grpc 1.17.1 → 1.18.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (166) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +1228 -988
  3. data/etc/roots.pem +242 -30
  4. data/include/grpc/grpc.h +2 -1
  5. data/include/grpc/grpc_security_constants.h +3 -3
  6. data/include/grpc/impl/codegen/atm_gcc_sync.h +2 -0
  7. data/include/grpc/impl/codegen/atm_windows.h +2 -0
  8. data/include/grpc/impl/codegen/compression_types.h +2 -1
  9. data/include/grpc/impl/codegen/grpc_types.h +1 -1
  10. data/include/grpc/impl/codegen/port_platform.h +9 -0
  11. data/src/core/ext/filters/client_channel/client_channel.cc +163 -882
  12. data/src/core/ext/filters/client_channel/health/health_check_client.cc +2 -4
  13. data/src/core/ext/filters/client_channel/health/health_check_client.h +2 -3
  14. data/src/core/ext/filters/client_channel/lb_policy.cc +1 -1
  15. data/src/core/ext/filters/client_channel/lb_policy.h +8 -17
  16. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +176 -216
  17. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h +1 -1
  18. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc +20 -23
  19. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +1 -1
  20. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +49 -52
  21. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +13 -35
  22. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +31 -30
  23. data/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +69 -225
  24. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h +1 -1
  25. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc +20 -23
  26. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h +1 -1
  27. data/src/core/ext/filters/client_channel/lb_policy_factory.h +2 -84
  28. data/src/core/ext/filters/client_channel/request_routing.cc +936 -0
  29. data/src/core/ext/filters/client_channel/request_routing.h +177 -0
  30. data/src/core/ext/filters/client_channel/resolver.cc +1 -1
  31. data/src/core/ext/filters/client_channel/resolver.h +1 -1
  32. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +37 -26
  33. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +30 -18
  34. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +119 -100
  35. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +8 -5
  36. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +5 -4
  37. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc +2 -1
  38. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc +12 -14
  39. data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +5 -9
  40. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +2 -1
  41. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +1 -2
  42. data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +17 -17
  43. data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +45 -52
  44. data/src/core/ext/filters/client_channel/resolver_result_parsing.h +13 -17
  45. data/src/core/ext/filters/client_channel/server_address.cc +103 -0
  46. data/src/core/ext/filters/client_channel/server_address.h +108 -0
  47. data/src/core/ext/filters/client_channel/subchannel.cc +10 -8
  48. data/src/core/ext/filters/client_channel/subchannel.h +9 -6
  49. data/src/core/ext/filters/client_channel/subchannel_index.cc +20 -27
  50. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +3 -2
  51. data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +8 -9
  52. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +1 -1
  53. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +1 -1
  54. data/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc +8 -11
  55. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +24 -54
  56. data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +3 -1
  57. data/src/core/ext/transport/chttp2/transport/context_list.cc +67 -0
  58. data/src/core/ext/transport/chttp2/transport/context_list.h +53 -0
  59. data/src/core/ext/transport/chttp2/transport/internal.h +38 -11
  60. data/src/core/ext/transport/chttp2/transport/writing.cc +5 -0
  61. data/src/core/ext/transport/inproc/inproc_transport.cc +1 -1
  62. data/src/core/lib/channel/channelz.cc +19 -18
  63. data/src/core/lib/channel/channelz.h +7 -1
  64. data/src/core/lib/channel/channelz_registry.cc +3 -2
  65. data/src/core/lib/debug/trace.cc +3 -0
  66. data/src/core/lib/debug/trace.h +5 -3
  67. data/src/core/lib/gpr/sync_posix.cc +96 -4
  68. data/src/core/lib/gprpp/inlined_vector.h +25 -19
  69. data/src/core/lib/gprpp/memory.h +2 -11
  70. data/src/core/lib/gprpp/orphanable.h +18 -82
  71. data/src/core/lib/gprpp/ref_counted.h +75 -84
  72. data/src/core/lib/gprpp/ref_counted_ptr.h +22 -17
  73. data/src/core/lib/http/httpcli_security_connector.cc +101 -94
  74. data/src/core/lib/http/parser.h +5 -5
  75. data/src/core/lib/iomgr/buffer_list.cc +16 -5
  76. data/src/core/lib/iomgr/buffer_list.h +10 -3
  77. data/src/core/lib/iomgr/call_combiner.cc +50 -2
  78. data/src/core/lib/iomgr/call_combiner.h +29 -2
  79. data/src/core/lib/iomgr/dynamic_annotations.h +67 -0
  80. data/src/core/lib/iomgr/endpoint.cc +4 -0
  81. data/src/core/lib/iomgr/endpoint.h +3 -0
  82. data/src/core/lib/iomgr/endpoint_pair_posix.cc +2 -2
  83. data/src/core/lib/iomgr/ev_epoll1_linux.cc +4 -0
  84. data/src/core/lib/iomgr/ev_epollex_linux.cc +4 -0
  85. data/src/core/lib/iomgr/ev_poll_posix.cc +4 -0
  86. data/src/core/lib/iomgr/ev_posix.cc +15 -7
  87. data/src/core/lib/iomgr/ev_posix.h +10 -0
  88. data/src/core/lib/iomgr/exec_ctx.cc +13 -0
  89. data/src/core/lib/iomgr/fork_posix.cc +1 -1
  90. data/src/core/lib/iomgr/internal_errqueue.cc +36 -3
  91. data/src/core/lib/iomgr/internal_errqueue.h +7 -1
  92. data/src/core/lib/iomgr/iomgr.cc +7 -0
  93. data/src/core/lib/iomgr/iomgr.h +4 -0
  94. data/src/core/lib/iomgr/iomgr_custom.cc +3 -1
  95. data/src/core/lib/iomgr/iomgr_internal.cc +4 -0
  96. data/src/core/lib/iomgr/iomgr_internal.h +4 -0
  97. data/src/core/lib/iomgr/iomgr_posix.cc +6 -1
  98. data/src/core/lib/iomgr/iomgr_windows.cc +4 -1
  99. data/src/core/lib/iomgr/port.h +1 -2
  100. data/src/core/lib/iomgr/resource_quota.cc +1 -0
  101. data/src/core/lib/iomgr/sockaddr_utils.cc +1 -0
  102. data/src/core/lib/iomgr/tcp_custom.cc +4 -1
  103. data/src/core/lib/iomgr/tcp_posix.cc +95 -35
  104. data/src/core/lib/iomgr/tcp_windows.cc +4 -1
  105. data/src/core/lib/iomgr/timer_manager.cc +6 -0
  106. data/src/core/lib/security/context/security_context.cc +75 -108
  107. data/src/core/lib/security/context/security_context.h +59 -35
  108. data/src/core/lib/security/credentials/alts/alts_credentials.cc +36 -48
  109. data/src/core/lib/security/credentials/alts/alts_credentials.h +37 -10
  110. data/src/core/lib/security/credentials/composite/composite_credentials.cc +97 -157
  111. data/src/core/lib/security/credentials/composite/composite_credentials.h +60 -24
  112. data/src/core/lib/security/credentials/credentials.cc +18 -142
  113. data/src/core/lib/security/credentials/credentials.h +119 -95
  114. data/src/core/lib/security/credentials/fake/fake_credentials.cc +46 -71
  115. data/src/core/lib/security/credentials/fake/fake_credentials.h +23 -5
  116. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +144 -51
  117. data/src/core/lib/security/credentials/google_default/google_default_credentials.h +28 -5
  118. data/src/core/lib/security/credentials/iam/iam_credentials.cc +27 -35
  119. data/src/core/lib/security/credentials/iam/iam_credentials.h +18 -4
  120. data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +60 -69
  121. data/src/core/lib/security/credentials/jwt/jwt_credentials.h +29 -10
  122. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +2 -0
  123. data/src/core/lib/security/credentials/local/local_credentials.cc +19 -32
  124. data/src/core/lib/security/credentials/local/local_credentials.h +32 -11
  125. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +130 -149
  126. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +74 -29
  127. data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +59 -77
  128. data/src/core/lib/security/credentials/plugin/plugin_credentials.h +40 -17
  129. data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +66 -83
  130. data/src/core/lib/security/credentials/ssl/ssl_credentials.h +58 -15
  131. data/src/core/lib/security/security_connector/alts/alts_security_connector.cc +152 -177
  132. data/src/core/lib/security/security_connector/alts/alts_security_connector.h +12 -10
  133. data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +210 -215
  134. data/src/core/lib/security/security_connector/fake/fake_security_connector.h +9 -6
  135. data/src/core/lib/security/security_connector/local/local_security_connector.cc +176 -169
  136. data/src/core/lib/security/security_connector/local/local_security_connector.h +10 -9
  137. data/src/core/lib/security/security_connector/security_connector.cc +41 -124
  138. data/src/core/lib/security/security_connector/security_connector.h +102 -105
  139. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +348 -370
  140. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.h +14 -12
  141. data/src/core/lib/security/security_connector/ssl_utils.cc +13 -9
  142. data/src/core/lib/security/security_connector/ssl_utils.h +3 -1
  143. data/src/core/lib/security/transport/client_auth_filter.cc +50 -50
  144. data/src/core/lib/security/transport/secure_endpoint.cc +7 -1
  145. data/src/core/lib/security/transport/security_handshaker.cc +82 -66
  146. data/src/core/lib/security/transport/server_auth_filter.cc +15 -13
  147. data/src/core/lib/surface/init.cc +1 -0
  148. data/src/core/lib/surface/server.cc +13 -11
  149. data/src/core/lib/surface/server.h +6 -6
  150. data/src/core/lib/surface/version.cc +2 -2
  151. data/src/core/lib/transport/metadata.cc +1 -0
  152. data/src/core/lib/transport/static_metadata.cc +228 -221
  153. data/src/core/lib/transport/static_metadata.h +75 -71
  154. data/src/core/lib/transport/transport.cc +2 -1
  155. data/src/core/lib/transport/transport.h +5 -1
  156. data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +9 -2
  157. data/src/core/tsi/ssl_transport_security.cc +35 -24
  158. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +1 -1
  159. data/src/ruby/lib/grpc/generic/rpc_server.rb +61 -0
  160. data/src/ruby/lib/grpc/generic/service.rb +1 -1
  161. data/src/ruby/lib/grpc/version.rb +1 -1
  162. data/src/ruby/pb/grpc/health/checker.rb +2 -3
  163. data/src/ruby/spec/generic/rpc_server_spec.rb +22 -0
  164. data/src/ruby/spec/support/services.rb +1 -0
  165. metadata +37 -32
  166. data/src/core/ext/filters/client_channel/lb_policy_factory.cc +0 -163
@@ -329,36 +329,6 @@ OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH
329
329
  QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS
330
330
  -----END CERTIFICATE-----
331
331
 
332
- # Issuer: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association
333
- # Subject: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association
334
- # Label: "Visa eCommerce Root"
335
- # Serial: 25952180776285836048024890241505565794
336
- # MD5 Fingerprint: fc:11:b8:d8:08:93:30:00:6d:23:f9:7e:eb:52:1e:02
337
- # SHA1 Fingerprint: 70:17:9b:86:8c:00:a4:fa:60:91:52:22:3f:9f:3e:32:bd:e0:05:62
338
- # SHA256 Fingerprint: 69:fa:c9:bd:55:fb:0a:c7:8d:53:bb:ee:5c:f1:d5:97:98:9f:d0:aa:ab:20:a2:51:51:bd:f1:73:3e:e7:d1:22
339
- -----BEGIN CERTIFICATE-----
340
- MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr
341
- MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl
342
- cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv
343
- bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw
344
- CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h
345
- dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l
346
- cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h
347
- 2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E
348
- lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV
349
- ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq
350
- 299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t
351
- vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL
352
- dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
353
- AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF
354
- AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR
355
- zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3
356
- LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd
357
- 7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw
358
- ++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt
359
- 398znM/jra6O1I7mT1GvFpLgXPYHDw==
360
- -----END CERTIFICATE-----
361
-
362
332
  # Issuer: CN=AAA Certificate Services O=Comodo CA Limited
363
333
  # Subject: CN=AAA Certificate Services O=Comodo CA Limited
364
334
  # Label: "Comodo AAA Services root"
@@ -4340,3 +4310,245 @@ rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV
4340
4310
  57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg
4341
4311
  Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9
4342
4312
  -----END CERTIFICATE-----
4313
+
4314
+ # Issuer: CN=GTS Root R1 O=Google Trust Services LLC
4315
+ # Subject: CN=GTS Root R1 O=Google Trust Services LLC
4316
+ # Label: "GTS Root R1"
4317
+ # Serial: 146587175971765017618439757810265552097
4318
+ # MD5 Fingerprint: 82:1a:ef:d4:d2:4a:f2:9f:e2:3d:97:06:14:70:72:85
4319
+ # SHA1 Fingerprint: e1:c9:50:e6:ef:22:f8:4c:56:45:72:8b:92:20:60:d7:d5:a7:a3:e8
4320
+ # SHA256 Fingerprint: 2a:57:54:71:e3:13:40:bc:21:58:1c:bd:2c:f1:3e:15:84:63:20:3e:ce:94:bc:f9:d3:cc:19:6b:f0:9a:54:72
4321
+ -----BEGIN CERTIFICATE-----
4322
+ MIIFWjCCA0KgAwIBAgIQbkepxUtHDA3sM9CJuRz04TANBgkqhkiG9w0BAQwFADBH
4323
+ MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
4324
+ QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy
4325
+ MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl
4326
+ cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEB
4327
+ AQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM
4328
+ f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vX
4329
+ mX7wCl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7
4330
+ zUjwTcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0P
4331
+ fyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtc
4332
+ vfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4
4333
+ Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUsp
4334
+ zBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOO
4335
+ Rc92wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYW
4336
+ k70paDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+
4337
+ DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgF
4338
+ lQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
4339
+ HQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBADiW
4340
+ Cu49tJYeX++dnAsznyvgyv3SjgofQXSlfKqE1OXyHuY3UjKcC9FhHb8owbZEKTV1
4341
+ d5iyfNm9dKyKaOOpMQkpAWBz40d8U6iQSifvS9efk+eCNs6aaAyC58/UEBZvXw6Z
4342
+ XPYfcX3v73svfuo21pdwCxXu11xWajOl40k4DLh9+42FpLFZXvRq4d2h9mREruZR
4343
+ gyFmxhE+885H7pwoHyXa/6xmld01D1zvICxi/ZG6qcz8WpyTgYMpl0p8WnK0OdC3
4344
+ d8t5/Wk6kjftbjhlRn7pYL15iJdfOBL07q9bgsiG1eGZbYwE8na6SfZu6W0eX6Dv
4345
+ J4J2QPim01hcDyxC2kLGe4g0x8HYRZvBPsVhHdljUEn2NIVq4BjFbkerQUIpm/Zg
4346
+ DdIx02OYI5NaAIFItO/Nis3Jz5nu2Z6qNuFoS3FJFDYoOj0dzpqPJeaAcWErtXvM
4347
+ +SUWgeExX6GjfhaknBZqlxi9dnKlC54dNuYvoS++cJEPqOba+MSSQGwlfnuzCdyy
4348
+ F62ARPBopY+Udf90WuioAnwMCeKpSwughQtiue+hMZL77/ZRBIls6Kl0obsXs7X9
4349
+ SQ98POyDGCBDTtWTurQ0sR8WNh8M5mQ5Fkzc4P4dyKliPUDqysU0ArSuiYgzNdws
4350
+ E3PYJ/HQcu51OyLemGhmW/HGY0dVHLqlCFF1pkgl
4351
+ -----END CERTIFICATE-----
4352
+
4353
+ # Issuer: CN=GTS Root R2 O=Google Trust Services LLC
4354
+ # Subject: CN=GTS Root R2 O=Google Trust Services LLC
4355
+ # Label: "GTS Root R2"
4356
+ # Serial: 146587176055767053814479386953112547951
4357
+ # MD5 Fingerprint: 44:ed:9a:0e:a4:09:3b:00:f2:ae:4c:a3:c6:61:b0:8b
4358
+ # SHA1 Fingerprint: d2:73:96:2a:2a:5e:39:9f:73:3f:e1:c7:1e:64:3f:03:38:34:fc:4d
4359
+ # SHA256 Fingerprint: c4:5d:7b:b0:8e:6d:67:e6:2e:42:35:11:0b:56:4e:5f:78:fd:92:ef:05:8c:84:0a:ea:4e:64:55:d7:58:5c:60
4360
+ -----BEGIN CERTIFICATE-----
4361
+ MIIFWjCCA0KgAwIBAgIQbkepxlqz5yDFMJo/aFLybzANBgkqhkiG9w0BAQwFADBH
4362
+ MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
4363
+ QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy
4364
+ MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl
4365
+ cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEB
4366
+ AQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv
4367
+ CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3Kg
4368
+ GjSY6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9Bu
4369
+ XvAuMC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOd
4370
+ re7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXu
4371
+ PuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1
4372
+ mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K
4373
+ 8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqj
4374
+ x5RWIr9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsR
4375
+ nTKaG73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0
4376
+ kzCqgc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9Ok
4377
+ twIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
4378
+ HQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBALZp
4379
+ 8KZ3/p7uC4Gt4cCpx/k1HUCCq+YEtN/L9x0Pg/B+E02NjO7jMyLDOfxA325BS0JT
4380
+ vhaI8dI4XsRomRyYUpOM52jtG2pzegVATX9lO9ZY8c6DR2Dj/5epnGB3GFW1fgiT
4381
+ z9D2PGcDFWEJ+YF59exTpJ/JjwGLc8R3dtyDovUMSRqodt6Sm2T4syzFJ9MHwAiA
4382
+ pJiS4wGWAqoC7o87xdFtCjMwc3i5T1QWvwsHoaRc5svJXISPD+AVdyx+Jn7axEvb
4383
+ pxZ3B7DNdehyQtaVhJ2Gg/LkkM0JR9SLA3DaWsYDQvTtN6LwG1BUSw7YhN4ZKJmB
4384
+ R64JGz9I0cNv4rBgF/XuIwKl2gBbbZCr7qLpGzvpx0QnRY5rn/WkhLx3+WuXrD5R
4385
+ RaIRpsyF7gpo8j5QOHokYh4XIDdtak23CZvJ/KRY9bb7nE4Yu5UC56GtmwfuNmsk
4386
+ 0jmGwZODUNKBRqhfYlcsu2xkiAhu7xNUX90txGdj08+JN7+dIPT7eoOboB6BAFDC
4387
+ 5AwiWVIQ7UNWhwD4FFKnHYuTjKJNRn8nxnGbJN7k2oaLDX5rIMHAnuFl2GqjpuiF
4388
+ izoHCBy69Y9Vmhh1fuXsgWbRIXOhNUQLgD1bnF5vKheW0YMjiGZt5obicDIvUiLn
4389
+ yOd/xCxgXS/Dr55FBcOEArf9LAhST4Ldo/DUhgkC
4390
+ -----END CERTIFICATE-----
4391
+
4392
+ # Issuer: CN=GTS Root R3 O=Google Trust Services LLC
4393
+ # Subject: CN=GTS Root R3 O=Google Trust Services LLC
4394
+ # Label: "GTS Root R3"
4395
+ # Serial: 146587176140553309517047991083707763997
4396
+ # MD5 Fingerprint: 1a:79:5b:6b:04:52:9c:5d:c7:74:33:1b:25:9a:f9:25
4397
+ # SHA1 Fingerprint: 30:d4:24:6f:07:ff:db:91:89:8a:0b:e9:49:66:11:eb:8c:5e:46:e5
4398
+ # SHA256 Fingerprint: 15:d5:b8:77:46:19:ea:7d:54:ce:1c:a6:d0:b0:c4:03:e0:37:a9:17:f1:31:e8:a0:4e:1e:6b:7a:71:ba:bc:e5
4399
+ -----BEGIN CERTIFICATE-----
4400
+ MIICDDCCAZGgAwIBAgIQbkepx2ypcyRAiQ8DVd2NHTAKBggqhkjOPQQDAzBHMQsw
4401
+ CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
4402
+ MBIGA1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
4403
+ MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
4404
+ Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQA
4405
+ IgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout
4406
+ 736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2A
4407
+ DDL24CejQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
4408
+ DgQWBBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEAgFuk
4409
+ fCPAlaUs3L6JbyO5o91lAFJekazInXJ0glMLfalAvWhgxeG4VDvBNhcl2MG9AjEA
4410
+ njWSdIUlUfUk7GRSJFClH9voy8l27OyCbvWFGFPouOOaKaqW04MjyaR7YbPMAuhd
4411
+ -----END CERTIFICATE-----
4412
+
4413
+ # Issuer: CN=GTS Root R4 O=Google Trust Services LLC
4414
+ # Subject: CN=GTS Root R4 O=Google Trust Services LLC
4415
+ # Label: "GTS Root R4"
4416
+ # Serial: 146587176229350439916519468929765261721
4417
+ # MD5 Fingerprint: 5d:b6:6a:c4:60:17:24:6a:1a:99:a8:4b:ee:5e:b4:26
4418
+ # SHA1 Fingerprint: 2a:1d:60:27:d9:4a:b1:0a:1c:4d:91:5c:cd:33:a0:cb:3e:2d:54:cb
4419
+ # SHA256 Fingerprint: 71:cc:a5:39:1f:9e:79:4b:04:80:25:30:b3:63:e1:21:da:8a:30:43:bb:26:66:2f:ea:4d:ca:7f:c9:51:a4:bd
4420
+ -----BEGIN CERTIFICATE-----
4421
+ MIICCjCCAZGgAwIBAgIQbkepyIuUtui7OyrYorLBmTAKBggqhkjOPQQDAzBHMQsw
4422
+ CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
4423
+ MBIGA1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
4424
+ MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
4425
+ Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQA
4426
+ IgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu
4427
+ hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/l
4428
+ xKvRHYqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
4429
+ DgQWBBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNnADBkAjBqUFJ0
4430
+ CMRw3J5QdCHojXohw0+WbhXRIjVhLfoIN+4Zba3bssx9BzT1YBkstTTZbyACMANx
4431
+ sbqjYAuG7ZoIapVon+Kz4ZNkfF6Tpt95LY2F45TPI11xzPKwTdb+mciUqXWi4w==
4432
+ -----END CERTIFICATE-----
4433
+
4434
+ # Issuer: CN=UCA Global G2 Root O=UniTrust
4435
+ # Subject: CN=UCA Global G2 Root O=UniTrust
4436
+ # Label: "UCA Global G2 Root"
4437
+ # Serial: 124779693093741543919145257850076631279
4438
+ # MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8
4439
+ # SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a
4440
+ # SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c
4441
+ -----BEGIN CERTIFICATE-----
4442
+ MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9
4443
+ MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH
4444
+ bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x
4445
+ CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds
4446
+ b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr
4447
+ b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9
4448
+ kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm
4449
+ VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R
4450
+ VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc
4451
+ C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj
4452
+ tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY
4453
+ D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv
4454
+ j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl
4455
+ NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6
4456
+ iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP
4457
+ O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/
4458
+ BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV
4459
+ ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj
4460
+ L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5
4461
+ 1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl
4462
+ 1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU
4463
+ b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV
4464
+ PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj
4465
+ y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb
4466
+ EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg
4467
+ DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI
4468
+ +Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy
4469
+ YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX
4470
+ UB+K+wb1whnw0A==
4471
+ -----END CERTIFICATE-----
4472
+
4473
+ # Issuer: CN=UCA Extended Validation Root O=UniTrust
4474
+ # Subject: CN=UCA Extended Validation Root O=UniTrust
4475
+ # Label: "UCA Extended Validation Root"
4476
+ # Serial: 106100277556486529736699587978573607008
4477
+ # MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2
4478
+ # SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a
4479
+ # SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24
4480
+ -----BEGIN CERTIFICATE-----
4481
+ MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH
4482
+ MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF
4483
+ eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx
4484
+ MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV
4485
+ BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB
4486
+ AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog
4487
+ D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS
4488
+ sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop
4489
+ O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk
4490
+ sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi
4491
+ c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj
4492
+ VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz
4493
+ KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/
4494
+ TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G
4495
+ sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs
4496
+ 1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD
4497
+ fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T
4498
+ AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN
4499
+ l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR
4500
+ ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ
4501
+ VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5
4502
+ c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp
4503
+ 4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s
4504
+ t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj
4505
+ 2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO
4506
+ vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C
4507
+ xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx
4508
+ cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM
4509
+ fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax
4510
+ -----END CERTIFICATE-----
4511
+
4512
+ # Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036
4513
+ # Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036
4514
+ # Label: "Certigna Root CA"
4515
+ # Serial: 269714418870597844693661054334862075617
4516
+ # MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77
4517
+ # SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43
4518
+ # SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68
4519
+ -----BEGIN CERTIFICATE-----
4520
+ MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw
4521
+ WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw
4522
+ MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x
4523
+ MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD
4524
+ VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX
4525
+ BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
4526
+ ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO
4527
+ ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M
4528
+ CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu
4529
+ I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm
4530
+ TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh
4531
+ C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf
4532
+ ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz
4533
+ IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT
4534
+ Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k
4535
+ JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5
4536
+ hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB
4537
+ GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
4538
+ FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of
4539
+ 1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov
4540
+ L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo
4541
+ dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr
4542
+ aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq
4543
+ hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L
4544
+ 6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG
4545
+ HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6
4546
+ 0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB
4547
+ lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi
4548
+ o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1
4549
+ gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v
4550
+ faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63
4551
+ Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh
4552
+ jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw
4553
+ 3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0=
4554
+ -----END CERTIFICATE-----
@@ -511,7 +511,8 @@ GRPCAPI char* grpc_channelz_get_server(intptr_t server_id);
511
511
 
512
512
  /* Gets all server sockets that exist in the server. */
513
513
  GRPCAPI char* grpc_channelz_get_server_sockets(intptr_t server_id,
514
- intptr_t start_socket_id);
514
+ intptr_t start_socket_id,
515
+ intptr_t max_results);
515
516
 
516
517
  /* Returns a single Channel, or else a NOT_FOUND code. The returned string
517
518
  is allocated and must be freed by the application. */
@@ -106,10 +106,10 @@ typedef enum {
106
106
  } grpc_ssl_client_certificate_request_type;
107
107
 
108
108
  /**
109
- * Type of local connection for which local channel/server credentials will be
110
- * applied. It only supports UDS for now.
109
+ * Type of local connections for which local channel/server credentials will be
110
+ * applied. It supports UDS and local TCP connections.
111
111
  */
112
- typedef enum { UDS = 0 } grpc_local_connect_type;
112
+ typedef enum { UDS = 0, LOCAL_TCP } grpc_local_connect_type;
113
113
 
114
114
  #ifdef __cplusplus
115
115
  }
@@ -26,6 +26,8 @@
26
26
  typedef intptr_t gpr_atm;
27
27
  #define GPR_ATM_MAX INTPTR_MAX
28
28
  #define GPR_ATM_MIN INTPTR_MIN
29
+ #define GPR_ATM_INC_CAS_THEN(blah) blah
30
+ #define GPR_ATM_INC_ADD_THEN(blah) blah
29
31
 
30
32
  #define GPR_ATM_COMPILE_BARRIER_() __asm__ __volatile__("" : : : "memory")
31
33
 
@@ -25,6 +25,8 @@
25
25
  typedef intptr_t gpr_atm;
26
26
  #define GPR_ATM_MAX INTPTR_MAX
27
27
  #define GPR_ATM_MIN INTPTR_MIN
28
+ #define GPR_ATM_INC_CAS_THEN(blah) blah
29
+ #define GPR_ATM_INC_ADD_THEN(blah) blah
28
30
 
29
31
  #define gpr_atm_full_barrier MemoryBarrier
30
32
 
@@ -52,7 +52,8 @@ extern "C" {
52
52
  "grpc.compression_enabled_algorithms_bitset"
53
53
  /** \} */
54
54
 
55
- /** The various compression algorithms supported by gRPC */
55
+ /** The various compression algorithms supported by gRPC (not sorted by
56
+ * compression level) */
56
57
  typedef enum {
57
58
  GRPC_COMPRESS_NONE = 0,
58
59
  GRPC_COMPRESS_DEFLATE,
@@ -293,7 +293,7 @@ typedef struct {
293
293
  "grpc.max_channel_trace_event_memory_per_node"
294
294
  /** If non-zero, gRPC library will track stats and information at at per channel
295
295
  * level. Disabling channelz naturally disables channel tracing. The default
296
- * is for channelz to be disabled. */
296
+ * is for channelz to be enabled. */
297
297
  #define GRPC_ARG_ENABLE_CHANNELZ "grpc.enable_channelz"
298
298
  /** If non-zero, Cronet transport will coalesce packets to fewer frames
299
299
  * when possible. */
@@ -526,6 +526,15 @@ typedef unsigned __int64 uint64_t;
526
526
  #endif /* GPR_ATTRIBUTE_NO_TSAN (2) */
527
527
  #endif /* GPR_ATTRIBUTE_NO_TSAN (1) */
528
528
 
529
+ /* GRPC_TSAN_ENABLED will be defined, when compiled with thread sanitizer. */
530
+ #if defined(__SANITIZE_THREAD__)
531
+ #define GRPC_TSAN_ENABLED
532
+ #elif defined(__has_feature)
533
+ #if __has_feature(thread_sanitizer)
534
+ #define GRPC_TSAN_ENABLED
535
+ #endif
536
+ #endif
537
+
529
538
  /* GRPC_ALLOW_EXCEPTIONS should be 0 or 1 if exceptions are allowed or not */
530
539
  #ifndef GRPC_ALLOW_EXCEPTIONS
531
540
  /* If not already set, set to 1 on Windows (style guide standard) but to
@@ -35,6 +35,7 @@
35
35
  #include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
36
36
  #include "src/core/ext/filters/client_channel/lb_policy_registry.h"
37
37
  #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
38
+ #include "src/core/ext/filters/client_channel/request_routing.h"
38
39
  #include "src/core/ext/filters/client_channel/resolver_registry.h"
39
40
  #include "src/core/ext/filters/client_channel/resolver_result_parsing.h"
40
41
  #include "src/core/ext/filters/client_channel/retry_throttle.h"
@@ -86,31 +87,18 @@ grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
86
87
  struct external_connectivity_watcher;
87
88
 
88
89
  typedef struct client_channel_channel_data {
89
- grpc_core::OrphanablePtr<grpc_core::Resolver> resolver;
90
- bool started_resolving;
90
+ grpc_core::ManualConstructor<grpc_core::RequestRouter> request_router;
91
+
91
92
  bool deadline_checking_enabled;
92
- grpc_client_channel_factory* client_channel_factory;
93
93
  bool enable_retries;
94
94
  size_t per_rpc_retry_buffer_size;
95
95
 
96
96
  /** combiner protecting all variables below in this data structure */
97
97
  grpc_combiner* combiner;
98
- /** currently active load balancer */
99
- grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> lb_policy;
100
98
  /** retry throttle data */
101
99
  grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
102
100
  /** maps method names to method_parameters structs */
103
101
  grpc_core::RefCountedPtr<ClientChannelMethodParamsTable> method_params_table;
104
- /** incoming resolver result - set by resolver.next() */
105
- grpc_channel_args* resolver_result;
106
- /** a list of closures that are all waiting for resolver result to come in */
107
- grpc_closure_list waiting_for_resolver_result_closures;
108
- /** resolver callback */
109
- grpc_closure on_resolver_result_changed;
110
- /** connectivity state being tracked */
111
- grpc_connectivity_state_tracker state_tracker;
112
- /** when an lb_policy arrives, should we try to exit idle */
113
- bool exit_idle_when_lb_policy_arrives;
114
102
  /** owning stack */
115
103
  grpc_channel_stack* owning_stack;
116
104
  /** interested parties (owned) */
@@ -127,424 +115,40 @@ typedef struct client_channel_channel_data {
127
115
  grpc_core::UniquePtr<char> info_lb_policy_name;
128
116
  /** service config in JSON form */
129
117
  grpc_core::UniquePtr<char> info_service_config_json;
130
- /* backpointer to grpc_channel's channelz node */
131
- grpc_core::channelz::ClientChannelNode* channelz_channel;
132
- /* caches if the last resolution event contained addresses */
133
- bool previous_resolution_contained_addresses;
134
118
  } channel_data;
135
119
 
136
- typedef struct {
137
- channel_data* chand;
138
- /** used as an identifier, don't dereference it because the LB policy may be
139
- * non-existing when the callback is run */
140
- grpc_core::LoadBalancingPolicy* lb_policy;
141
- grpc_closure closure;
142
- } reresolution_request_args;
143
-
144
- /** We create one watcher for each new lb_policy that is returned from a
145
- resolver, to watch for state changes from the lb_policy. When a state
146
- change is seen, we update the channel, and create a new watcher. */
147
- typedef struct {
148
- channel_data* chand;
149
- grpc_closure on_changed;
150
- grpc_connectivity_state state;
151
- grpc_core::LoadBalancingPolicy* lb_policy;
152
- } lb_policy_connectivity_watcher;
153
-
154
- static void watch_lb_policy_locked(channel_data* chand,
155
- grpc_core::LoadBalancingPolicy* lb_policy,
156
- grpc_connectivity_state current_state);
157
-
158
- static const char* channel_connectivity_state_change_string(
159
- grpc_connectivity_state state) {
160
- switch (state) {
161
- case GRPC_CHANNEL_IDLE:
162
- return "Channel state change to IDLE";
163
- case GRPC_CHANNEL_CONNECTING:
164
- return "Channel state change to CONNECTING";
165
- case GRPC_CHANNEL_READY:
166
- return "Channel state change to READY";
167
- case GRPC_CHANNEL_TRANSIENT_FAILURE:
168
- return "Channel state change to TRANSIENT_FAILURE";
169
- case GRPC_CHANNEL_SHUTDOWN:
170
- return "Channel state change to SHUTDOWN";
171
- }
172
- GPR_UNREACHABLE_CODE(return "UNKNOWN");
173
- }
174
-
175
- static void set_channel_connectivity_state_locked(channel_data* chand,
176
- grpc_connectivity_state state,
177
- grpc_error* error,
178
- const char* reason) {
179
- /* TODO: Improve failure handling:
180
- * - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE.
181
- * - Hand over pending picks from old policies during the switch that happens
182
- * when resolver provides an update. */
183
- if (chand->lb_policy != nullptr) {
184
- if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
185
- /* cancel picks with wait_for_ready=false */
186
- chand->lb_policy->CancelMatchingPicksLocked(
187
- /* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
188
- /* check= */ 0, GRPC_ERROR_REF(error));
189
- } else if (state == GRPC_CHANNEL_SHUTDOWN) {
190
- /* cancel all picks */
191
- chand->lb_policy->CancelMatchingPicksLocked(/* mask= */ 0, /* check= */ 0,
192
- GRPC_ERROR_REF(error));
193
- }
194
- }
195
- if (grpc_client_channel_trace.enabled()) {
196
- gpr_log(GPR_INFO, "chand=%p: setting connectivity state to %s", chand,
197
- grpc_connectivity_state_name(state));
198
- }
199
- if (chand->channelz_channel != nullptr) {
200
- chand->channelz_channel->AddTraceEvent(
201
- grpc_core::channelz::ChannelTrace::Severity::Info,
202
- grpc_slice_from_static_string(
203
- channel_connectivity_state_change_string(state)));
204
- }
205
- grpc_connectivity_state_set(&chand->state_tracker, state, error, reason);
206
- }
207
-
208
- static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
209
- lb_policy_connectivity_watcher* w =
210
- static_cast<lb_policy_connectivity_watcher*>(arg);
211
- /* check if the notification is for the latest policy */
212
- if (w->lb_policy == w->chand->lb_policy.get()) {
213
- if (grpc_client_channel_trace.enabled()) {
214
- gpr_log(GPR_INFO, "chand=%p: lb_policy=%p state changed to %s", w->chand,
215
- w->lb_policy, grpc_connectivity_state_name(w->state));
216
- }
217
- set_channel_connectivity_state_locked(w->chand, w->state,
218
- GRPC_ERROR_REF(error), "lb_changed");
219
- if (w->state != GRPC_CHANNEL_SHUTDOWN) {
220
- watch_lb_policy_locked(w->chand, w->lb_policy, w->state);
221
- }
222
- }
223
- GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack, "watch_lb_policy");
224
- gpr_free(w);
225
- }
226
-
227
- static void watch_lb_policy_locked(channel_data* chand,
228
- grpc_core::LoadBalancingPolicy* lb_policy,
229
- grpc_connectivity_state current_state) {
230
- lb_policy_connectivity_watcher* w =
231
- static_cast<lb_policy_connectivity_watcher*>(gpr_malloc(sizeof(*w)));
232
- GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
233
- w->chand = chand;
234
- GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
235
- grpc_combiner_scheduler(chand->combiner));
236
- w->state = current_state;
237
- w->lb_policy = lb_policy;
238
- lb_policy->NotifyOnStateChangeLocked(&w->state, &w->on_changed);
239
- }
240
-
241
- static void start_resolving_locked(channel_data* chand) {
242
- if (grpc_client_channel_trace.enabled()) {
243
- gpr_log(GPR_INFO, "chand=%p: starting name resolution", chand);
244
- }
245
- GPR_ASSERT(!chand->started_resolving);
246
- chand->started_resolving = true;
247
- GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
248
- chand->resolver->NextLocked(&chand->resolver_result,
249
- &chand->on_resolver_result_changed);
250
- }
251
-
252
- // Invoked from the resolver NextLocked() callback when the resolver
253
- // is shutting down.
254
- static void on_resolver_shutdown_locked(channel_data* chand,
255
- grpc_error* error) {
256
- if (grpc_client_channel_trace.enabled()) {
257
- gpr_log(GPR_INFO, "chand=%p: shutting down", chand);
258
- }
259
- if (chand->lb_policy != nullptr) {
260
- if (grpc_client_channel_trace.enabled()) {
261
- gpr_log(GPR_INFO, "chand=%p: shutting down lb_policy=%p", chand,
262
- chand->lb_policy.get());
263
- }
264
- grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
265
- chand->interested_parties);
266
- chand->lb_policy.reset();
267
- }
268
- if (chand->resolver != nullptr) {
269
- // This should never happen; it can only be triggered by a resolver
270
- // implementation spotaneously deciding to report shutdown without
271
- // being orphaned. This code is included just to be defensive.
272
- if (grpc_client_channel_trace.enabled()) {
273
- gpr_log(GPR_INFO, "chand=%p: spontaneous shutdown from resolver %p",
274
- chand, chand->resolver.get());
275
- }
276
- chand->resolver.reset();
277
- set_channel_connectivity_state_locked(
278
- chand, GRPC_CHANNEL_SHUTDOWN,
279
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
280
- "Resolver spontaneous shutdown", &error, 1),
281
- "resolver_spontaneous_shutdown");
282
- }
283
- grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
284
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
285
- "Channel disconnected", &error, 1));
286
- GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
287
- GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
288
- grpc_channel_args_destroy(chand->resolver_result);
289
- chand->resolver_result = nullptr;
290
- GRPC_ERROR_UNREF(error);
291
- }
292
-
293
- static void request_reresolution_locked(void* arg, grpc_error* error) {
294
- reresolution_request_args* args =
295
- static_cast<reresolution_request_args*>(arg);
296
- channel_data* chand = args->chand;
297
- // If this invocation is for a stale LB policy, treat it as an LB shutdown
298
- // signal.
299
- if (args->lb_policy != chand->lb_policy.get() || error != GRPC_ERROR_NONE ||
300
- chand->resolver == nullptr) {
301
- GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "re-resolution");
302
- gpr_free(args);
303
- return;
304
- }
305
- if (grpc_client_channel_trace.enabled()) {
306
- gpr_log(GPR_INFO, "chand=%p: started name re-resolving", chand);
307
- }
308
- chand->resolver->RequestReresolutionLocked();
309
- // Give back the closure to the LB policy.
310
- chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
311
- }
312
-
313
- using TraceStringVector = grpc_core::InlinedVector<char*, 3>;
314
-
315
- // Creates a new LB policy, replacing any previous one.
316
- // If the new policy is created successfully, sets *connectivity_state and
317
- // *connectivity_error to its initial connectivity state; otherwise,
318
- // leaves them unchanged.
319
- static void create_new_lb_policy_locked(
320
- channel_data* chand, char* lb_policy_name, grpc_json* lb_config,
321
- grpc_connectivity_state* connectivity_state,
322
- grpc_error** connectivity_error, TraceStringVector* trace_strings) {
323
- grpc_core::LoadBalancingPolicy::Args lb_policy_args;
324
- lb_policy_args.combiner = chand->combiner;
325
- lb_policy_args.client_channel_factory = chand->client_channel_factory;
326
- lb_policy_args.args = chand->resolver_result;
327
- lb_policy_args.lb_config = lb_config;
328
- grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy =
329
- grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
330
- lb_policy_name, lb_policy_args);
331
- if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
332
- gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
333
- if (chand->channelz_channel != nullptr) {
334
- char* str;
335
- gpr_asprintf(&str, "Could not create LB policy \'%s\'", lb_policy_name);
336
- trace_strings->push_back(str);
337
- }
338
- } else {
339
- if (grpc_client_channel_trace.enabled()) {
340
- gpr_log(GPR_INFO, "chand=%p: created new LB policy \"%s\" (%p)", chand,
341
- lb_policy_name, new_lb_policy.get());
342
- }
343
- if (chand->channelz_channel != nullptr) {
344
- char* str;
345
- gpr_asprintf(&str, "Created new LB policy \'%s\'", lb_policy_name);
346
- trace_strings->push_back(str);
347
- }
348
- // Swap out the LB policy and update the fds in
349
- // chand->interested_parties.
350
- if (chand->lb_policy != nullptr) {
351
- if (grpc_client_channel_trace.enabled()) {
352
- gpr_log(GPR_INFO, "chand=%p: shutting down lb_policy=%p", chand,
353
- chand->lb_policy.get());
354
- }
355
- grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
356
- chand->interested_parties);
357
- chand->lb_policy->HandOffPendingPicksLocked(new_lb_policy.get());
358
- }
359
- chand->lb_policy = std::move(new_lb_policy);
360
- grpc_pollset_set_add_pollset_set(chand->lb_policy->interested_parties(),
361
- chand->interested_parties);
362
- // Set up re-resolution callback.
363
- reresolution_request_args* args =
364
- static_cast<reresolution_request_args*>(gpr_zalloc(sizeof(*args)));
365
- args->chand = chand;
366
- args->lb_policy = chand->lb_policy.get();
367
- GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
368
- grpc_combiner_scheduler(chand->combiner));
369
- GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution");
370
- chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
371
- // Get the new LB policy's initial connectivity state and start a
372
- // connectivity watch.
373
- GRPC_ERROR_UNREF(*connectivity_error);
374
- *connectivity_state =
375
- chand->lb_policy->CheckConnectivityLocked(connectivity_error);
376
- if (chand->exit_idle_when_lb_policy_arrives) {
377
- chand->lb_policy->ExitIdleLocked();
378
- chand->exit_idle_when_lb_policy_arrives = false;
379
- }
380
- watch_lb_policy_locked(chand, chand->lb_policy.get(), *connectivity_state);
381
- }
382
- }
383
-
384
- static void maybe_add_trace_message_for_address_changes_locked(
385
- channel_data* chand, TraceStringVector* trace_strings) {
386
- int resolution_contains_addresses = false;
387
- const grpc_arg* channel_arg =
388
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
389
- if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
390
- grpc_lb_addresses* addresses =
391
- static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
392
- if (addresses->num_addresses > 0) {
393
- resolution_contains_addresses = true;
394
- }
395
- }
396
- if (!resolution_contains_addresses &&
397
- chand->previous_resolution_contained_addresses) {
398
- trace_strings->push_back(gpr_strdup("Address list became empty"));
399
- } else if (resolution_contains_addresses &&
400
- !chand->previous_resolution_contained_addresses) {
401
- trace_strings->push_back(gpr_strdup("Address list became non-empty"));
402
- }
403
- chand->previous_resolution_contained_addresses =
404
- resolution_contains_addresses;
405
- }
406
-
407
- static void concatenate_and_add_channel_trace_locked(
408
- channel_data* chand, TraceStringVector* trace_strings) {
409
- if (!trace_strings->empty()) {
410
- gpr_strvec v;
411
- gpr_strvec_init(&v);
412
- gpr_strvec_add(&v, gpr_strdup("Resolution event: "));
413
- bool is_first = 1;
414
- for (size_t i = 0; i < trace_strings->size(); ++i) {
415
- if (!is_first) gpr_strvec_add(&v, gpr_strdup(", "));
416
- is_first = false;
417
- gpr_strvec_add(&v, (*trace_strings)[i]);
418
- }
419
- char* flat;
420
- size_t flat_len = 0;
421
- flat = gpr_strvec_flatten(&v, &flat_len);
422
- chand->channelz_channel->AddTraceEvent(
423
- grpc_core::channelz::ChannelTrace::Severity::Info,
424
- grpc_slice_new(flat, flat_len, gpr_free));
425
- gpr_strvec_destroy(&v);
426
- }
427
- }
428
-
429
- // Callback invoked when a resolver result is available.
430
- static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
120
+ // Synchronous callback from chand->request_router to process a resolver
121
+ // result update.
122
+ static bool process_resolver_result_locked(void* arg,
123
+ const grpc_channel_args& args,
124
+ const char** lb_policy_name,
125
+ grpc_json** lb_policy_config) {
431
126
  channel_data* chand = static_cast<channel_data*>(arg);
127
+ ProcessedResolverResult resolver_result(args, chand->enable_retries);
128
+ grpc_core::UniquePtr<char> service_config_json =
129
+ resolver_result.service_config_json();
432
130
  if (grpc_client_channel_trace.enabled()) {
433
- const char* disposition =
434
- chand->resolver_result != nullptr
435
- ? ""
436
- : (error == GRPC_ERROR_NONE ? " (transient error)"
437
- : " (resolver shutdown)");
438
- gpr_log(GPR_INFO,
439
- "chand=%p: got resolver result: resolver_result=%p error=%s%s",
440
- chand, chand->resolver_result, grpc_error_string(error),
441
- disposition);
131
+ gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
132
+ chand, service_config_json.get());
442
133
  }
443
- // Handle shutdown.
444
- if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) {
445
- on_resolver_shutdown_locked(chand, GRPC_ERROR_REF(error));
446
- return;
447
- }
448
- // Data used to set the channel's connectivity state.
449
- bool set_connectivity_state = true;
450
- // We only want to trace the address resolution in the follow cases:
451
- // (a) Address resolution resulted in service config change.
452
- // (b) Address resolution that causes number of backends to go from
453
- // zero to non-zero.
454
- // (c) Address resolution that causes number of backends to go from
455
- // non-zero to zero.
456
- // (d) Address resolution that causes a new LB policy to be created.
457
- //
458
- // we track a list of strings to eventually be concatenated and traced.
459
- TraceStringVector trace_strings;
460
- grpc_connectivity_state connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
461
- grpc_error* connectivity_error =
462
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
463
- // chand->resolver_result will be null in the case of a transient
464
- // resolution error. In that case, we don't have any new result to
465
- // process, which means that we keep using the previous result (if any).
466
- if (chand->resolver_result == nullptr) {
467
- if (grpc_client_channel_trace.enabled()) {
468
- gpr_log(GPR_INFO, "chand=%p: resolver transient failure", chand);
469
- }
470
- // Don't override connectivity state if we already have an LB policy.
471
- if (chand->lb_policy != nullptr) set_connectivity_state = false;
472
- } else {
473
- // Parse the resolver result.
474
- ProcessedResolverResult resolver_result(chand->resolver_result,
475
- chand->enable_retries);
476
- chand->retry_throttle_data = resolver_result.retry_throttle_data();
477
- chand->method_params_table = resolver_result.method_params_table();
478
- grpc_core::UniquePtr<char> service_config_json =
479
- resolver_result.service_config_json();
480
- if (service_config_json != nullptr && grpc_client_channel_trace.enabled()) {
481
- gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
482
- chand, service_config_json.get());
483
- }
484
- grpc_core::UniquePtr<char> lb_policy_name =
485
- resolver_result.lb_policy_name();
486
- grpc_json* lb_policy_config = resolver_result.lb_policy_config();
487
- // Check to see if we're already using the right LB policy.
488
- // Note: It's safe to use chand->info_lb_policy_name here without
489
- // taking a lock on chand->info_mu, because this function is the
490
- // only thing that modifies its value, and it can only be invoked
491
- // once at any given time.
492
- bool lb_policy_name_changed = chand->info_lb_policy_name == nullptr ||
493
- gpr_stricmp(chand->info_lb_policy_name.get(),
494
- lb_policy_name.get()) != 0;
495
- if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
496
- // Continue using the same LB policy. Update with new addresses.
497
- if (grpc_client_channel_trace.enabled()) {
498
- gpr_log(GPR_INFO, "chand=%p: updating existing LB policy \"%s\" (%p)",
499
- chand, lb_policy_name.get(), chand->lb_policy.get());
500
- }
501
- chand->lb_policy->UpdateLocked(*chand->resolver_result, lb_policy_config);
502
- // No need to set the channel's connectivity state; the existing
503
- // watch on the LB policy will take care of that.
504
- set_connectivity_state = false;
505
- } else {
506
- // Instantiate new LB policy.
507
- create_new_lb_policy_locked(chand, lb_policy_name.get(), lb_policy_config,
508
- &connectivity_state, &connectivity_error,
509
- &trace_strings);
510
- }
511
- // Note: It's safe to use chand->info_service_config_json here without
512
- // taking a lock on chand->info_mu, because this function is the
513
- // only thing that modifies its value, and it can only be invoked
514
- // once at any given time.
515
- if (chand->channelz_channel != nullptr) {
516
- if (((service_config_json == nullptr) !=
517
- (chand->info_service_config_json == nullptr)) ||
518
- (service_config_json != nullptr &&
519
- strcmp(service_config_json.get(),
520
- chand->info_service_config_json.get()) != 0)) {
521
- // TODO(ncteisen): might be worth somehow including a snippet of the
522
- // config in the trace, at the risk of bloating the trace logs.
523
- trace_strings.push_back(gpr_strdup("Service config changed"));
524
- }
525
- maybe_add_trace_message_for_address_changes_locked(chand, &trace_strings);
526
- concatenate_and_add_channel_trace_locked(chand, &trace_strings);
527
- }
528
- // Swap out the data used by cc_get_channel_info().
529
- gpr_mu_lock(&chand->info_mu);
530
- chand->info_lb_policy_name = std::move(lb_policy_name);
531
- chand->info_service_config_json = std::move(service_config_json);
532
- gpr_mu_unlock(&chand->info_mu);
533
- // Clean up.
534
- grpc_channel_args_destroy(chand->resolver_result);
535
- chand->resolver_result = nullptr;
536
- }
537
- // Set the channel's connectivity state if needed.
538
- if (set_connectivity_state) {
539
- set_channel_connectivity_state_locked(
540
- chand, connectivity_state, connectivity_error, "resolver_result");
541
- } else {
542
- GRPC_ERROR_UNREF(connectivity_error);
543
- }
544
- // Invoke closures that were waiting for results and renew the watch.
545
- GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
546
- chand->resolver->NextLocked(&chand->resolver_result,
547
- &chand->on_resolver_result_changed);
134
+ // Update channel state.
135
+ chand->retry_throttle_data = resolver_result.retry_throttle_data();
136
+ chand->method_params_table = resolver_result.method_params_table();
137
+ // Swap out the data used by cc_get_channel_info().
138
+ gpr_mu_lock(&chand->info_mu);
139
+ chand->info_lb_policy_name = resolver_result.lb_policy_name();
140
+ const bool service_config_changed =
141
+ ((service_config_json == nullptr) !=
142
+ (chand->info_service_config_json == nullptr)) ||
143
+ (service_config_json != nullptr &&
144
+ strcmp(service_config_json.get(),
145
+ chand->info_service_config_json.get()) != 0);
146
+ chand->info_service_config_json = std::move(service_config_json);
147
+ gpr_mu_unlock(&chand->info_mu);
148
+ // Return results.
149
+ *lb_policy_name = chand->info_lb_policy_name.get();
150
+ *lb_policy_config = resolver_result.lb_policy_config();
151
+ return service_config_changed;
548
152
  }
549
153
 
550
154
  static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
@@ -554,15 +158,14 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
554
158
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
555
159
 
556
160
  if (op->on_connectivity_state_change != nullptr) {
557
- grpc_connectivity_state_notify_on_state_change(
558
- &chand->state_tracker, op->connectivity_state,
559
- op->on_connectivity_state_change);
161
+ chand->request_router->NotifyOnConnectivityStateChange(
162
+ op->connectivity_state, op->on_connectivity_state_change);
560
163
  op->on_connectivity_state_change = nullptr;
561
164
  op->connectivity_state = nullptr;
562
165
  }
563
166
 
564
167
  if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
565
- if (chand->lb_policy == nullptr) {
168
+ if (chand->request_router->lb_policy() == nullptr) {
566
169
  grpc_error* error =
567
170
  GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing");
568
171
  GRPC_CLOSURE_SCHED(op->send_ping.on_initiate, GRPC_ERROR_REF(error));
@@ -570,14 +173,9 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
570
173
  } else {
571
174
  grpc_error* error = GRPC_ERROR_NONE;
572
175
  grpc_core::LoadBalancingPolicy::PickState pick_state;
573
- pick_state.initial_metadata = nullptr;
574
- pick_state.initial_metadata_flags = 0;
575
- pick_state.on_complete = nullptr;
576
- memset(&pick_state.subchannel_call_context, 0,
577
- sizeof(pick_state.subchannel_call_context));
578
- pick_state.user_data = nullptr;
579
176
  // Pick must return synchronously, because pick_state.on_complete is null.
580
- GPR_ASSERT(chand->lb_policy->PickLocked(&pick_state, &error));
177
+ GPR_ASSERT(
178
+ chand->request_router->lb_policy()->PickLocked(&pick_state, &error));
581
179
  if (pick_state.connected_subchannel != nullptr) {
582
180
  pick_state.connected_subchannel->Ping(op->send_ping.on_initiate,
583
181
  op->send_ping.on_ack);
@@ -596,37 +194,14 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
596
194
  }
597
195
 
598
196
  if (op->disconnect_with_error != GRPC_ERROR_NONE) {
599
- if (chand->resolver != nullptr) {
600
- set_channel_connectivity_state_locked(
601
- chand, GRPC_CHANNEL_SHUTDOWN,
602
- GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
603
- chand->resolver.reset();
604
- if (!chand->started_resolving) {
605
- grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
606
- GRPC_ERROR_REF(op->disconnect_with_error));
607
- GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
608
- }
609
- if (chand->lb_policy != nullptr) {
610
- grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
611
- chand->interested_parties);
612
- chand->lb_policy.reset();
613
- }
614
- }
615
- GRPC_ERROR_UNREF(op->disconnect_with_error);
197
+ chand->request_router->ShutdownLocked(op->disconnect_with_error);
616
198
  }
617
199
 
618
200
  if (op->reset_connect_backoff) {
619
- if (chand->resolver != nullptr) {
620
- chand->resolver->ResetBackoffLocked();
621
- chand->resolver->RequestReresolutionLocked();
622
- }
623
- if (chand->lb_policy != nullptr) {
624
- chand->lb_policy->ResetBackoffLocked();
625
- }
201
+ chand->request_router->ResetConnectionBackoffLocked();
626
202
  }
627
203
 
628
204
  GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "start_transport_op");
629
-
630
205
  GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE);
631
206
  }
632
207
 
@@ -677,12 +252,9 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
677
252
  gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
678
253
 
679
254
  chand->owning_stack = args->channel_stack;
680
- GRPC_CLOSURE_INIT(&chand->on_resolver_result_changed,
681
- on_resolver_result_changed_locked, chand,
682
- grpc_combiner_scheduler(chand->combiner));
255
+ chand->deadline_checking_enabled =
256
+ grpc_deadline_checking_enabled(args->channel_args);
683
257
  chand->interested_parties = grpc_pollset_set_create();
684
- grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
685
- "client_channel");
686
258
  grpc_client_channel_start_backup_polling(chand->interested_parties);
687
259
  // Record max per-RPC retry buffer size.
688
260
  const grpc_arg* arg = grpc_channel_args_find(
@@ -692,8 +264,6 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
692
264
  // Record enable_retries.
693
265
  arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_ENABLE_RETRIES);
694
266
  chand->enable_retries = grpc_channel_arg_get_bool(arg, true);
695
- chand->channelz_channel = nullptr;
696
- chand->previous_resolution_contained_addresses = false;
697
267
  // Record client channel factory.
698
268
  arg = grpc_channel_args_find(args->channel_args,
699
269
  GRPC_ARG_CLIENT_CHANNEL_FACTORY);
@@ -705,9 +275,7 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
705
275
  return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
706
276
  "client channel factory arg must be a pointer");
707
277
  }
708
- grpc_client_channel_factory_ref(
709
- static_cast<grpc_client_channel_factory*>(arg->value.pointer.p));
710
- chand->client_channel_factory =
278
+ grpc_client_channel_factory* client_channel_factory =
711
279
  static_cast<grpc_client_channel_factory*>(arg->value.pointer.p);
712
280
  // Get server name to resolve, using proxy mapper if needed.
713
281
  arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
@@ -723,39 +291,24 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
723
291
  grpc_channel_args* new_args = nullptr;
724
292
  grpc_proxy_mappers_map_name(arg->value.string, args->channel_args,
725
293
  &proxy_name, &new_args);
726
- // Instantiate resolver.
727
- chand->resolver = grpc_core::ResolverRegistry::CreateResolver(
728
- proxy_name != nullptr ? proxy_name : arg->value.string,
729
- new_args != nullptr ? new_args : args->channel_args,
730
- chand->interested_parties, chand->combiner);
731
- if (proxy_name != nullptr) gpr_free(proxy_name);
732
- if (new_args != nullptr) grpc_channel_args_destroy(new_args);
733
- if (chand->resolver == nullptr) {
734
- return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
735
- }
736
- chand->deadline_checking_enabled =
737
- grpc_deadline_checking_enabled(args->channel_args);
738
- return GRPC_ERROR_NONE;
294
+ // Instantiate request router.
295
+ grpc_client_channel_factory_ref(client_channel_factory);
296
+ grpc_error* error = GRPC_ERROR_NONE;
297
+ chand->request_router.Init(
298
+ chand->owning_stack, chand->combiner, client_channel_factory,
299
+ chand->interested_parties, &grpc_client_channel_trace,
300
+ process_resolver_result_locked, chand,
301
+ proxy_name != nullptr ? proxy_name : arg->value.string /* target_uri */,
302
+ new_args != nullptr ? new_args : args->channel_args, &error);
303
+ gpr_free(proxy_name);
304
+ grpc_channel_args_destroy(new_args);
305
+ return error;
739
306
  }
740
307
 
741
308
  /* Destructor for channel_data */
742
309
  static void cc_destroy_channel_elem(grpc_channel_element* elem) {
743
310
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
744
- if (chand->resolver != nullptr) {
745
- // The only way we can get here is if we never started resolving,
746
- // because we take a ref to the channel stack when we start
747
- // resolving and do not release it until the resolver callback is
748
- // invoked after the resolver shuts down.
749
- chand->resolver.reset();
750
- }
751
- if (chand->client_channel_factory != nullptr) {
752
- grpc_client_channel_factory_unref(chand->client_channel_factory);
753
- }
754
- if (chand->lb_policy != nullptr) {
755
- grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
756
- chand->interested_parties);
757
- chand->lb_policy.reset();
758
- }
311
+ chand->request_router.Destroy();
759
312
  // TODO(roth): Once we convert the filter API to C++, there will no
760
313
  // longer be any need to explicitly reset these smart pointer data members.
761
314
  chand->info_lb_policy_name.reset();
@@ -763,7 +316,6 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
763
316
  chand->retry_throttle_data.reset();
764
317
  chand->method_params_table.reset();
765
318
  grpc_client_channel_stop_backup_polling(chand->interested_parties);
766
- grpc_connectivity_state_destroy(&chand->state_tracker);
767
319
  grpc_pollset_set_destroy(chand->interested_parties);
768
320
  GRPC_COMBINER_UNREF(chand->combiner, "client_channel");
769
321
  gpr_mu_destroy(&chand->info_mu);
@@ -820,6 +372,7 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
820
372
  // - add census stats for retries
821
373
 
822
374
  namespace {
375
+
823
376
  struct call_data;
824
377
 
825
378
  // State used for starting a retryable batch on a subchannel call.
@@ -904,12 +457,12 @@ struct subchannel_call_retry_state {
904
457
  bool completed_recv_initial_metadata : 1;
905
458
  bool started_recv_trailing_metadata : 1;
906
459
  bool completed_recv_trailing_metadata : 1;
460
+ // State for callback processing.
907
461
  subchannel_batch_data* recv_initial_metadata_ready_deferred_batch = nullptr;
908
462
  grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
909
463
  subchannel_batch_data* recv_message_ready_deferred_batch = nullptr;
910
464
  grpc_error* recv_message_error = GRPC_ERROR_NONE;
911
465
  subchannel_batch_data* recv_trailing_metadata_internal_batch = nullptr;
912
- // State for callback processing.
913
466
  // NOTE: Do not move this next to the metadata bitfields above. That would
914
467
  // save space but will also result in a data race because compiler will
915
468
  // generate a 2 byte store which overwrites the meta-data fields upon
@@ -918,12 +471,12 @@ struct subchannel_call_retry_state {
918
471
  };
919
472
 
920
473
  // Pending batches stored in call data.
921
- typedef struct {
474
+ struct pending_batch {
922
475
  // The pending batch. If nullptr, this slot is empty.
923
476
  grpc_transport_stream_op_batch* batch;
924
477
  // Indicates whether payload for send ops has been cached in call data.
925
478
  bool send_ops_cached;
926
- } pending_batch;
479
+ };
927
480
 
928
481
  /** Call data. Holds a pointer to grpc_subchannel_call and the
929
482
  associated machinery to create such a pointer.
@@ -960,11 +513,8 @@ struct call_data {
960
513
  for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches); ++i) {
961
514
  GPR_ASSERT(pending_batches[i].batch == nullptr);
962
515
  }
963
- for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
964
- if (pick.subchannel_call_context[i].value != nullptr) {
965
- pick.subchannel_call_context[i].destroy(
966
- pick.subchannel_call_context[i].value);
967
- }
516
+ if (have_request) {
517
+ request.Destroy();
968
518
  }
969
519
  }
970
520
 
@@ -991,12 +541,11 @@ struct call_data {
991
541
  // Set when we get a cancel_stream op.
992
542
  grpc_error* cancel_error = GRPC_ERROR_NONE;
993
543
 
994
- grpc_core::LoadBalancingPolicy::PickState pick;
544
+ grpc_core::ManualConstructor<grpc_core::RequestRouter::Request> request;
545
+ bool have_request = false;
995
546
  grpc_closure pick_closure;
996
- grpc_closure pick_cancel_closure;
997
547
 
998
548
  grpc_polling_entity* pollent = nullptr;
999
- bool pollent_added_to_interested_parties = false;
1000
549
 
1001
550
  // Batches are added to this list when received from above.
1002
551
  // They are removed when we are done handling the batch (i.e., when
@@ -1046,6 +595,7 @@ struct call_data {
1046
595
  grpc_linked_mdelem* send_trailing_metadata_storage = nullptr;
1047
596
  grpc_metadata_batch send_trailing_metadata;
1048
597
  };
598
+
1049
599
  } // namespace
1050
600
 
1051
601
  // Forward declarations.
@@ -1448,8 +998,9 @@ static void do_retry(grpc_call_element* elem,
1448
998
  "client_channel_call_retry");
1449
999
  calld->subchannel_call = nullptr;
1450
1000
  }
1451
- if (calld->pick.connected_subchannel != nullptr) {
1452
- calld->pick.connected_subchannel.reset();
1001
+ if (calld->have_request) {
1002
+ calld->have_request = false;
1003
+ calld->request.Destroy();
1453
1004
  }
1454
1005
  // Compute backoff delay.
1455
1006
  grpc_millis next_attempt_time;
@@ -1598,6 +1149,7 @@ static bool maybe_retry(grpc_call_element* elem,
1598
1149
  //
1599
1150
 
1600
1151
  namespace {
1152
+
1601
1153
  subchannel_batch_data::subchannel_batch_data(grpc_call_element* elem,
1602
1154
  call_data* calld, int refcount,
1603
1155
  bool set_on_complete)
@@ -1638,6 +1190,7 @@ void subchannel_batch_data::destroy() {
1638
1190
  call_data* calld = static_cast<call_data*>(elem->call_data);
1639
1191
  GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
1640
1192
  }
1193
+
1641
1194
  } // namespace
1642
1195
 
1643
1196
  // Creates a subchannel_batch_data object on the call's arena with the
@@ -2654,17 +2207,18 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
2654
2207
  const size_t parent_data_size =
2655
2208
  calld->enable_retries ? sizeof(subchannel_call_retry_state) : 0;
2656
2209
  const grpc_core::ConnectedSubchannel::CallArgs call_args = {
2657
- calld->pollent, // pollent
2658
- calld->path, // path
2659
- calld->call_start_time, // start_time
2660
- calld->deadline, // deadline
2661
- calld->arena, // arena
2662
- calld->pick.subchannel_call_context, // context
2663
- calld->call_combiner, // call_combiner
2664
- parent_data_size // parent_data_size
2210
+ calld->pollent, // pollent
2211
+ calld->path, // path
2212
+ calld->call_start_time, // start_time
2213
+ calld->deadline, // deadline
2214
+ calld->arena, // arena
2215
+ calld->request->pick()->subchannel_call_context, // context
2216
+ calld->call_combiner, // call_combiner
2217
+ parent_data_size // parent_data_size
2665
2218
  };
2666
- grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
2667
- call_args, &calld->subchannel_call);
2219
+ grpc_error* new_error =
2220
+ calld->request->pick()->connected_subchannel->CreateCall(
2221
+ call_args, &calld->subchannel_call);
2668
2222
  if (grpc_client_channel_trace.enabled()) {
2669
2223
  gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
2670
2224
  chand, calld, calld->subchannel_call, grpc_error_string(new_error));
@@ -2676,7 +2230,8 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
2676
2230
  if (parent_data_size > 0) {
2677
2231
  new (grpc_connected_subchannel_call_get_parent_data(
2678
2232
  calld->subchannel_call))
2679
- subchannel_call_retry_state(calld->pick.subchannel_call_context);
2233
+ subchannel_call_retry_state(
2234
+ calld->request->pick()->subchannel_call_context);
2680
2235
  }
2681
2236
  pending_batches_resume(elem);
2682
2237
  }
@@ -2688,7 +2243,7 @@ static void pick_done(void* arg, grpc_error* error) {
2688
2243
  grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
2689
2244
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2690
2245
  call_data* calld = static_cast<call_data*>(elem->call_data);
2691
- if (GPR_UNLIKELY(calld->pick.connected_subchannel == nullptr)) {
2246
+ if (GPR_UNLIKELY(calld->request->pick()->connected_subchannel == nullptr)) {
2692
2247
  // Failed to create subchannel.
2693
2248
  // If there was no error, this is an LB policy drop, in which case
2694
2249
  // we return an error; otherwise, we may retry.
@@ -2717,135 +2272,27 @@ static void pick_done(void* arg, grpc_error* error) {
2717
2272
  }
2718
2273
  }
2719
2274
 
2720
- static void maybe_add_call_to_channel_interested_parties_locked(
2721
- grpc_call_element* elem) {
2722
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2723
- call_data* calld = static_cast<call_data*>(elem->call_data);
2724
- if (!calld->pollent_added_to_interested_parties) {
2725
- calld->pollent_added_to_interested_parties = true;
2726
- grpc_polling_entity_add_to_pollset_set(calld->pollent,
2727
- chand->interested_parties);
2728
- }
2729
- }
2730
-
2731
- static void maybe_del_call_from_channel_interested_parties_locked(
2732
- grpc_call_element* elem) {
2275
+ // If the channel is in TRANSIENT_FAILURE and the call is not
2276
+ // wait_for_ready=true, fails the call and returns true.
2277
+ static bool fail_call_if_in_transient_failure(grpc_call_element* elem) {
2733
2278
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2734
2279
  call_data* calld = static_cast<call_data*>(elem->call_data);
2735
- if (calld->pollent_added_to_interested_parties) {
2736
- calld->pollent_added_to_interested_parties = false;
2737
- grpc_polling_entity_del_from_pollset_set(calld->pollent,
2738
- chand->interested_parties);
2280
+ grpc_transport_stream_op_batch* batch = calld->pending_batches[0].batch;
2281
+ if (chand->request_router->GetConnectivityState() ==
2282
+ GRPC_CHANNEL_TRANSIENT_FAILURE &&
2283
+ (batch->payload->send_initial_metadata.send_initial_metadata_flags &
2284
+ GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
2285
+ pending_batches_fail(
2286
+ elem,
2287
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
2288
+ "channel is in state TRANSIENT_FAILURE"),
2289
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
2290
+ true /* yield_call_combiner */);
2291
+ return true;
2739
2292
  }
2293
+ return false;
2740
2294
  }
2741
2295
 
2742
- // Invoked when a pick is completed to leave the client_channel combiner
2743
- // and continue processing in the call combiner.
2744
- // If needed, removes the call's polling entity from chand->interested_parties.
2745
- static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
2746
- call_data* calld = static_cast<call_data*>(elem->call_data);
2747
- maybe_del_call_from_channel_interested_parties_locked(elem);
2748
- GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem,
2749
- grpc_schedule_on_exec_ctx);
2750
- GRPC_CLOSURE_SCHED(&calld->pick_closure, error);
2751
- }
2752
-
2753
- namespace grpc_core {
2754
-
2755
- // Performs subchannel pick via LB policy.
2756
- class LbPicker {
2757
- public:
2758
- // Starts a pick on chand->lb_policy.
2759
- static void StartLocked(grpc_call_element* elem) {
2760
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2761
- call_data* calld = static_cast<call_data*>(elem->call_data);
2762
- if (grpc_client_channel_trace.enabled()) {
2763
- gpr_log(GPR_INFO, "chand=%p calld=%p: starting pick on lb_policy=%p",
2764
- chand, calld, chand->lb_policy.get());
2765
- }
2766
- // If this is a retry, use the send_initial_metadata payload that
2767
- // we've cached; otherwise, use the pending batch. The
2768
- // send_initial_metadata batch will be the first pending batch in the
2769
- // list, as set by get_batch_index() above.
2770
- calld->pick.initial_metadata =
2771
- calld->seen_send_initial_metadata
2772
- ? &calld->send_initial_metadata
2773
- : calld->pending_batches[0]
2774
- .batch->payload->send_initial_metadata.send_initial_metadata;
2775
- calld->pick.initial_metadata_flags =
2776
- calld->seen_send_initial_metadata
2777
- ? calld->send_initial_metadata_flags
2778
- : calld->pending_batches[0]
2779
- .batch->payload->send_initial_metadata
2780
- .send_initial_metadata_flags;
2781
- GRPC_CLOSURE_INIT(&calld->pick_closure, &LbPicker::DoneLocked, elem,
2782
- grpc_combiner_scheduler(chand->combiner));
2783
- calld->pick.on_complete = &calld->pick_closure;
2784
- GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
2785
- grpc_error* error = GRPC_ERROR_NONE;
2786
- const bool pick_done = chand->lb_policy->PickLocked(&calld->pick, &error);
2787
- if (GPR_LIKELY(pick_done)) {
2788
- // Pick completed synchronously.
2789
- if (grpc_client_channel_trace.enabled()) {
2790
- gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed synchronously",
2791
- chand, calld);
2792
- }
2793
- pick_done_locked(elem, error);
2794
- GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
2795
- } else {
2796
- // Pick will be returned asynchronously.
2797
- // Add the polling entity from call_data to the channel_data's
2798
- // interested_parties, so that the I/O of the LB policy can be done
2799
- // under it. It will be removed in pick_done_locked().
2800
- maybe_add_call_to_channel_interested_parties_locked(elem);
2801
- // Request notification on call cancellation.
2802
- GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
2803
- grpc_call_combiner_set_notify_on_cancel(
2804
- calld->call_combiner,
2805
- GRPC_CLOSURE_INIT(&calld->pick_cancel_closure,
2806
- &LbPicker::CancelLocked, elem,
2807
- grpc_combiner_scheduler(chand->combiner)));
2808
- }
2809
- }
2810
-
2811
- private:
2812
- // Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
2813
- // Unrefs the LB policy and invokes pick_done_locked().
2814
- static void DoneLocked(void* arg, grpc_error* error) {
2815
- grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
2816
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2817
- call_data* calld = static_cast<call_data*>(elem->call_data);
2818
- if (grpc_client_channel_trace.enabled()) {
2819
- gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed asynchronously",
2820
- chand, calld);
2821
- }
2822
- pick_done_locked(elem, GRPC_ERROR_REF(error));
2823
- GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
2824
- }
2825
-
2826
- // Note: This runs under the client_channel combiner, but will NOT be
2827
- // holding the call combiner.
2828
- static void CancelLocked(void* arg, grpc_error* error) {
2829
- grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
2830
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2831
- call_data* calld = static_cast<call_data*>(elem->call_data);
2832
- // Note: chand->lb_policy may have changed since we started our pick,
2833
- // in which case we will be cancelling the pick on a policy other than
2834
- // the one we started it on. However, this will just be a no-op.
2835
- if (GPR_UNLIKELY(error != GRPC_ERROR_NONE && chand->lb_policy != nullptr)) {
2836
- if (grpc_client_channel_trace.enabled()) {
2837
- gpr_log(GPR_INFO,
2838
- "chand=%p calld=%p: cancelling pick from LB policy %p", chand,
2839
- calld, chand->lb_policy.get());
2840
- }
2841
- chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
2842
- }
2843
- GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
2844
- }
2845
- };
2846
-
2847
- } // namespace grpc_core
2848
-
2849
2296
  // Applies service config to the call. Must be invoked once we know
2850
2297
  // that the resolver has returned results to the channel.
2851
2298
  static void apply_service_config_to_call_locked(grpc_call_element* elem) {
@@ -2902,224 +2349,66 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
2902
2349
  }
2903
2350
  }
2904
2351
 
2905
- // If the channel is in TRANSIENT_FAILURE and the call is not
2906
- // wait_for_ready=true, fails the call and returns true.
2907
- static bool fail_call_if_in_transient_failure(grpc_call_element* elem) {
2908
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2909
- call_data* calld = static_cast<call_data*>(elem->call_data);
2910
- grpc_transport_stream_op_batch* batch = calld->pending_batches[0].batch;
2911
- if (grpc_connectivity_state_check(&chand->state_tracker) ==
2912
- GRPC_CHANNEL_TRANSIENT_FAILURE &&
2913
- (batch->payload->send_initial_metadata.send_initial_metadata_flags &
2914
- GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
2915
- pending_batches_fail(
2916
- elem,
2917
- grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
2918
- "channel is in state TRANSIENT_FAILURE"),
2919
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
2920
- true /* yield_call_combiner */);
2921
- return true;
2922
- }
2923
- return false;
2924
- }
2925
-
2926
2352
  // Invoked once resolver results are available.
2927
- static void process_service_config_and_start_lb_pick_locked(
2928
- grpc_call_element* elem) {
2353
+ static bool maybe_apply_service_config_to_call_locked(void* arg) {
2354
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
2929
2355
  call_data* calld = static_cast<call_data*>(elem->call_data);
2930
2356
  // Only get service config data on the first attempt.
2931
2357
  if (GPR_LIKELY(calld->num_attempts_completed == 0)) {
2932
2358
  apply_service_config_to_call_locked(elem);
2933
2359
  // Check this after applying service config, since it may have
2934
2360
  // affected the call's wait_for_ready value.
2935
- if (fail_call_if_in_transient_failure(elem)) return;
2361
+ if (fail_call_if_in_transient_failure(elem)) return false;
2936
2362
  }
2937
- // Start LB pick.
2938
- grpc_core::LbPicker::StartLocked(elem);
2363
+ return true;
2939
2364
  }
2940
2365
 
2941
- namespace grpc_core {
2942
-
2943
- // Handles waiting for a resolver result.
2944
- // Used only for the first call on an idle channel.
2945
- class ResolverResultWaiter {
2946
- public:
2947
- explicit ResolverResultWaiter(grpc_call_element* elem) : elem_(elem) {
2948
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2949
- call_data* calld = static_cast<call_data*>(elem->call_data);
2950
- if (grpc_client_channel_trace.enabled()) {
2951
- gpr_log(GPR_INFO,
2952
- "chand=%p calld=%p: deferring pick pending resolver result",
2953
- chand, calld);
2954
- }
2955
- // Add closure to be run when a resolver result is available.
2956
- GRPC_CLOSURE_INIT(&done_closure_, &ResolverResultWaiter::DoneLocked, this,
2957
- grpc_combiner_scheduler(chand->combiner));
2958
- AddToWaitingList();
2959
- // Set cancellation closure, so that we abort if the call is cancelled.
2960
- GRPC_CLOSURE_INIT(&cancel_closure_, &ResolverResultWaiter::CancelLocked,
2961
- this, grpc_combiner_scheduler(chand->combiner));
2962
- grpc_call_combiner_set_notify_on_cancel(calld->call_combiner,
2963
- &cancel_closure_);
2964
- }
2965
-
2966
- private:
2967
- // Adds closure_ to chand->waiting_for_resolver_result_closures.
2968
- void AddToWaitingList() {
2969
- channel_data* chand = static_cast<channel_data*>(elem_->channel_data);
2970
- grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
2971
- &done_closure_, GRPC_ERROR_NONE);
2972
- }
2973
-
2974
- // Invoked when a resolver result is available.
2975
- static void DoneLocked(void* arg, grpc_error* error) {
2976
- ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
2977
- // If CancelLocked() has already run, delete ourselves without doing
2978
- // anything. Note that the call stack may have already been destroyed,
2979
- // so it's not safe to access anything in elem_.
2980
- if (GPR_UNLIKELY(self->finished_)) {
2981
- if (grpc_client_channel_trace.enabled()) {
2982
- gpr_log(GPR_INFO, "call cancelled before resolver result");
2983
- }
2984
- Delete(self);
2985
- return;
2986
- }
2987
- // Otherwise, process the resolver result.
2988
- grpc_call_element* elem = self->elem_;
2989
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2990
- call_data* calld = static_cast<call_data*>(elem->call_data);
2991
- if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
2992
- if (grpc_client_channel_trace.enabled()) {
2993
- gpr_log(GPR_INFO, "chand=%p calld=%p: resolver failed to return data",
2994
- chand, calld);
2995
- }
2996
- pick_done_locked(elem, GRPC_ERROR_REF(error));
2997
- } else if (GPR_UNLIKELY(chand->resolver == nullptr)) {
2998
- // Shutting down.
2999
- if (grpc_client_channel_trace.enabled()) {
3000
- gpr_log(GPR_INFO, "chand=%p calld=%p: resolver disconnected", chand,
3001
- calld);
3002
- }
3003
- pick_done_locked(elem,
3004
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
3005
- } else if (GPR_UNLIKELY(chand->lb_policy == nullptr)) {
3006
- // Transient resolver failure.
3007
- // If call has wait_for_ready=true, try again; otherwise, fail.
3008
- uint32_t send_initial_metadata_flags =
3009
- calld->seen_send_initial_metadata
3010
- ? calld->send_initial_metadata_flags
3011
- : calld->pending_batches[0]
3012
- .batch->payload->send_initial_metadata
3013
- .send_initial_metadata_flags;
3014
- if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
3015
- if (grpc_client_channel_trace.enabled()) {
3016
- gpr_log(GPR_INFO,
3017
- "chand=%p calld=%p: resolver returned but no LB policy; "
3018
- "wait_for_ready=true; trying again",
3019
- chand, calld);
3020
- }
3021
- // Re-add ourselves to the waiting list.
3022
- self->AddToWaitingList();
3023
- // Return early so that we don't set finished_ to true below.
3024
- return;
3025
- } else {
3026
- if (grpc_client_channel_trace.enabled()) {
3027
- gpr_log(GPR_INFO,
3028
- "chand=%p calld=%p: resolver returned but no LB policy; "
3029
- "wait_for_ready=false; failing",
3030
- chand, calld);
3031
- }
3032
- pick_done_locked(
3033
- elem,
3034
- grpc_error_set_int(
3035
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
3036
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
3037
- }
3038
- } else {
3039
- if (grpc_client_channel_trace.enabled()) {
3040
- gpr_log(GPR_INFO, "chand=%p calld=%p: resolver returned, doing LB pick",
3041
- chand, calld);
3042
- }
3043
- process_service_config_and_start_lb_pick_locked(elem);
3044
- }
3045
- self->finished_ = true;
3046
- }
3047
-
3048
- // Invoked when the call is cancelled.
3049
- // Note: This runs under the client_channel combiner, but will NOT be
3050
- // holding the call combiner.
3051
- static void CancelLocked(void* arg, grpc_error* error) {
3052
- ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
3053
- // If DoneLocked() has already run, delete ourselves without doing anything.
3054
- if (GPR_LIKELY(self->finished_)) {
3055
- Delete(self);
3056
- return;
3057
- }
3058
- // If we are being cancelled, immediately invoke pick_done_locked()
3059
- // to propagate the error back to the caller.
3060
- if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
3061
- grpc_call_element* elem = self->elem_;
3062
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
3063
- call_data* calld = static_cast<call_data*>(elem->call_data);
3064
- if (grpc_client_channel_trace.enabled()) {
3065
- gpr_log(GPR_INFO,
3066
- "chand=%p calld=%p: cancelling call waiting for name "
3067
- "resolution",
3068
- chand, calld);
3069
- }
3070
- // Note: Although we are not in the call combiner here, we are
3071
- // basically stealing the call combiner from the pending pick, so
3072
- // it's safe to call pick_done_locked() here -- we are essentially
3073
- // calling it here instead of calling it in DoneLocked().
3074
- pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
3075
- "Pick cancelled", &error, 1));
3076
- }
3077
- self->finished_ = true;
3078
- }
3079
-
3080
- grpc_call_element* elem_;
3081
- grpc_closure done_closure_;
3082
- grpc_closure cancel_closure_;
3083
- bool finished_ = false;
3084
- };
3085
-
3086
- } // namespace grpc_core
3087
-
3088
2366
  static void start_pick_locked(void* arg, grpc_error* ignored) {
3089
2367
  grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
3090
2368
  call_data* calld = static_cast<call_data*>(elem->call_data);
3091
2369
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
3092
- GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
2370
+ GPR_ASSERT(!calld->have_request);
3093
2371
  GPR_ASSERT(calld->subchannel_call == nullptr);
3094
- if (GPR_LIKELY(chand->lb_policy != nullptr)) {
3095
- // We already have resolver results, so process the service config
3096
- // and start an LB pick.
3097
- process_service_config_and_start_lb_pick_locked(elem);
3098
- } else if (GPR_UNLIKELY(chand->resolver == nullptr)) {
3099
- pick_done_locked(elem,
3100
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
3101
- } else {
3102
- // We do not yet have an LB policy, so wait for a resolver result.
3103
- if (GPR_UNLIKELY(!chand->started_resolving)) {
3104
- start_resolving_locked(chand);
3105
- } else {
3106
- // Normally, we want to do this check in
3107
- // process_service_config_and_start_lb_pick_locked(), so that we
3108
- // can honor the wait_for_ready setting in the service config.
3109
- // However, if the channel is in TRANSIENT_FAILURE at this point, that
3110
- // means that the resolver has returned a failure, so we're not going
3111
- // to get a service config right away. In that case, we fail the
3112
- // call now based on the wait_for_ready value passed in from the
3113
- // application.
3114
- if (fail_call_if_in_transient_failure(elem)) return;
3115
- }
3116
- // Create a new waiter, which will delete itself when done.
3117
- grpc_core::New<grpc_core::ResolverResultWaiter>(elem);
3118
- // Add the polling entity from call_data to the channel_data's
3119
- // interested_parties, so that the I/O of the resolver can be done
3120
- // under it. It will be removed in pick_done_locked().
3121
- maybe_add_call_to_channel_interested_parties_locked(elem);
2372
+ // Normally, we want to do this check until after we've processed the
2373
+ // service config, so that we can honor the wait_for_ready setting in
2374
+ // the service config. However, if the channel is in TRANSIENT_FAILURE
2375
+ // and we don't have an LB policy at this point, that means that the
2376
+ // resolver has returned a failure, so we're not going to get a service
2377
+ // config right away. In that case, we fail the call now based on the
2378
+ // wait_for_ready value passed in from the application.
2379
+ if (chand->request_router->lb_policy() == nullptr &&
2380
+ fail_call_if_in_transient_failure(elem)) {
2381
+ return;
3122
2382
  }
2383
+ // If this is a retry, use the send_initial_metadata payload that
2384
+ // we've cached; otherwise, use the pending batch. The
2385
+ // send_initial_metadata batch will be the first pending batch in the
2386
+ // list, as set by get_batch_index() above.
2387
+ // TODO(roth): What if the LB policy needs to add something to the
2388
+ // call's initial metadata, and then there's a retry? We don't want
2389
+ // the new metadata to be added twice. We might need to somehow
2390
+ // allocate the subchannel batch earlier so that we can give the
2391
+ // subchannel's copy of the metadata batch (which is copied for each
2392
+ // attempt) to the LB policy instead the one from the parent channel.
2393
+ grpc_metadata_batch* initial_metadata =
2394
+ calld->seen_send_initial_metadata
2395
+ ? &calld->send_initial_metadata
2396
+ : calld->pending_batches[0]
2397
+ .batch->payload->send_initial_metadata.send_initial_metadata;
2398
+ uint32_t* initial_metadata_flags =
2399
+ calld->seen_send_initial_metadata
2400
+ ? &calld->send_initial_metadata_flags
2401
+ : &calld->pending_batches[0]
2402
+ .batch->payload->send_initial_metadata
2403
+ .send_initial_metadata_flags;
2404
+ GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem,
2405
+ grpc_schedule_on_exec_ctx);
2406
+ calld->request.Init(calld->owning_call, calld->call_combiner, calld->pollent,
2407
+ initial_metadata, initial_metadata_flags,
2408
+ maybe_apply_service_config_to_call_locked, elem,
2409
+ &calld->pick_closure);
2410
+ calld->have_request = true;
2411
+ chand->request_router->RouteCallLocked(calld->request.get());
3123
2412
  }
3124
2413
 
3125
2414
  //
@@ -3259,23 +2548,10 @@ const grpc_channel_filter grpc_client_channel_filter = {
3259
2548
  "client-channel",
3260
2549
  };
3261
2550
 
3262
- static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
3263
- channel_data* chand = static_cast<channel_data*>(arg);
3264
- if (chand->lb_policy != nullptr) {
3265
- chand->lb_policy->ExitIdleLocked();
3266
- } else {
3267
- chand->exit_idle_when_lb_policy_arrives = true;
3268
- if (!chand->started_resolving && chand->resolver != nullptr) {
3269
- start_resolving_locked(chand);
3270
- }
3271
- }
3272
- GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect");
3273
- }
3274
-
3275
2551
  void grpc_client_channel_set_channelz_node(
3276
2552
  grpc_channel_element* elem, grpc_core::channelz::ClientChannelNode* node) {
3277
2553
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
3278
- chand->channelz_channel = node;
2554
+ chand->request_router->set_channelz_node(node);
3279
2555
  }
3280
2556
 
3281
2557
  void grpc_client_channel_populate_child_refs(
@@ -3283,17 +2559,22 @@ void grpc_client_channel_populate_child_refs(
3283
2559
  grpc_core::channelz::ChildRefsList* child_subchannels,
3284
2560
  grpc_core::channelz::ChildRefsList* child_channels) {
3285
2561
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
3286
- if (chand->lb_policy != nullptr) {
3287
- chand->lb_policy->FillChildRefsForChannelz(child_subchannels,
3288
- child_channels);
2562
+ if (chand->request_router->lb_policy() != nullptr) {
2563
+ chand->request_router->lb_policy()->FillChildRefsForChannelz(
2564
+ child_subchannels, child_channels);
3289
2565
  }
3290
2566
  }
3291
2567
 
2568
+ static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
2569
+ channel_data* chand = static_cast<channel_data*>(arg);
2570
+ chand->request_router->ExitIdleLocked();
2571
+ GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect");
2572
+ }
2573
+
3292
2574
  grpc_connectivity_state grpc_client_channel_check_connectivity_state(
3293
2575
  grpc_channel_element* elem, int try_to_connect) {
3294
2576
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
3295
- grpc_connectivity_state out =
3296
- grpc_connectivity_state_check(&chand->state_tracker);
2577
+ grpc_connectivity_state out = chand->request_router->GetConnectivityState();
3297
2578
  if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
3298
2579
  GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
3299
2580
  GRPC_CLOSURE_SCHED(
@@ -3338,19 +2619,19 @@ static void external_connectivity_watcher_list_append(
3338
2619
  }
3339
2620
 
3340
2621
  static void external_connectivity_watcher_list_remove(
3341
- channel_data* chand, external_connectivity_watcher* too_remove) {
2622
+ channel_data* chand, external_connectivity_watcher* to_remove) {
3342
2623
  GPR_ASSERT(
3343
- lookup_external_connectivity_watcher(chand, too_remove->on_complete));
2624
+ lookup_external_connectivity_watcher(chand, to_remove->on_complete));
3344
2625
  gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
3345
- if (too_remove == chand->external_connectivity_watcher_list_head) {
3346
- chand->external_connectivity_watcher_list_head = too_remove->next;
2626
+ if (to_remove == chand->external_connectivity_watcher_list_head) {
2627
+ chand->external_connectivity_watcher_list_head = to_remove->next;
3347
2628
  gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
3348
2629
  return;
3349
2630
  }
3350
2631
  external_connectivity_watcher* w =
3351
2632
  chand->external_connectivity_watcher_list_head;
3352
2633
  while (w != nullptr) {
3353
- if (w->next == too_remove) {
2634
+ if (w->next == to_remove) {
3354
2635
  w->next = w->next->next;
3355
2636
  gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
3356
2637
  return;
@@ -3402,15 +2683,15 @@ static void watch_connectivity_state_locked(void* arg,
3402
2683
  GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE);
3403
2684
  GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
3404
2685
  grpc_combiner_scheduler(w->chand->combiner));
3405
- grpc_connectivity_state_notify_on_state_change(&w->chand->state_tracker,
3406
- w->state, &w->my_closure);
2686
+ w->chand->request_router->NotifyOnConnectivityStateChange(w->state,
2687
+ &w->my_closure);
3407
2688
  } else {
3408
2689
  GPR_ASSERT(w->watcher_timer_init == nullptr);
3409
2690
  found = lookup_external_connectivity_watcher(w->chand, w->on_complete);
3410
2691
  if (found) {
3411
2692
  GPR_ASSERT(found->on_complete == w->on_complete);
3412
- grpc_connectivity_state_notify_on_state_change(
3413
- &found->chand->state_tracker, nullptr, &found->my_closure);
2693
+ found->chand->request_router->NotifyOnConnectivityStateChange(
2694
+ nullptr, &found->my_closure);
3414
2695
  }
3415
2696
  grpc_polling_entity_del_from_pollset_set(&w->pollent,
3416
2697
  w->chand->interested_parties);