grpc 1.66.0.pre3 → 1.66.0.pre5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e52dc0f3d86134aa54814feb69ada6c3fa30f1627f3fecfa8fb2eda3b8e413a6
4
- data.tar.gz: '08083c099539b4d8c3712344e644cf14017503900a3a450ec045a996fcb17a85'
3
+ metadata.gz: 03cfd5fbb381ec7182d27851262101658df9448007bb3c8237b6ff2911898218
4
+ data.tar.gz: 2763220f4965b12a8f7f08fbb859a0262dc0986eb11138ddaf2570880d87f385
5
5
  SHA512:
6
- metadata.gz: 3bfe425204ec77309d3c969b8c7cb3aefbf4739120edb11e9a2c1343b8a74c6579792f3fc67d123f1bd7e743578b58e47334b0410b7fa1f8aa4cfa987b181b8d
7
- data.tar.gz: 62bc4b19bc3527c0975a58302f77d37fd3caf06f98333b14307d0ca429be04e3f165c7f61a3eabf591efafe0554f55e035e9058106613003ee42e74c4ada6c5f
6
+ metadata.gz: 1e46d31fffa792277c414994d6164d6e8700fca2d3ad62df09c29cf80406568bcc64366c2802319b4e995a05038a6f5fb03cbb06362d2b69399a56bfa45066de
7
+ data.tar.gz: 54400a7f795636e459e0bf3eedad2c0bf992a4558bb1bbc27700fd573132c16cd3064b85c2853f5c358794916152f844410ba778fcc46b157c90ac5c629c848e
data/Makefile CHANGED
@@ -368,7 +368,7 @@ Q = @
368
368
  endif
369
369
 
370
370
  CORE_VERSION = 43.0.0
371
- CPP_VERSION = 1.66.0-pre3
371
+ CPP_VERSION = 1.66.0-pre5
372
372
 
373
373
  CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES))
374
374
  CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS)
@@ -108,7 +108,6 @@ struct secure_endpoint : public grpc_endpoint {
108
108
  }
109
109
 
110
110
  ~secure_endpoint() {
111
- memory_owner.Reset();
112
111
  tsi_frame_protector_destroy(protector);
113
112
  tsi_zero_copy_grpc_protector_destroy(zero_copy_protector);
114
113
  grpc_slice_buffer_destroy(&source_buffer);
@@ -254,6 +253,13 @@ static void on_read(void* user_data, grpc_error_handle error) {
254
253
 
255
254
  {
256
255
  grpc_core::MutexLock l(&ep->read_mu);
256
+
257
+ // If we were shut down after this callback was scheduled with OK
258
+ // status but before it was invoked, we need to treat that as an error.
259
+ if (ep->wrapped_ep == nullptr && error.ok()) {
260
+ error = absl::CancelledError("secure endpoint shutdown");
261
+ }
262
+
257
263
  uint8_t* cur = GRPC_SLICE_START_PTR(ep->read_staging_buffer);
258
264
  uint8_t* end = GRPC_SLICE_END_PTR(ep->read_staging_buffer);
259
265
 
@@ -380,9 +386,12 @@ static void flush_write_staging_buffer(secure_endpoint* ep, uint8_t** cur,
380
386
 
381
387
  static void on_write(void* user_data, grpc_error_handle error) {
382
388
  secure_endpoint* ep = static_cast<secure_endpoint*>(user_data);
383
- grpc_core::ExecCtx::Run(DEBUG_LOCATION, std::exchange(ep->write_cb, nullptr),
384
- std::move(error));
389
+ grpc_closure* cb = ep->write_cb;
390
+ ep->write_cb = nullptr;
385
391
  SECURE_ENDPOINT_UNREF(ep, "write");
392
+ grpc_core::EnsureRunInExecCtx([cb, error = std::move(error)]() {
393
+ grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
394
+ });
386
395
  }
387
396
 
388
397
  static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
@@ -503,7 +512,10 @@ static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
503
512
 
504
513
  static void endpoint_destroy(grpc_endpoint* secure_ep) {
505
514
  secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
515
+ ep->read_mu.Lock();
506
516
  ep->wrapped_ep.reset();
517
+ ep->memory_owner.Reset();
518
+ ep->read_mu.Unlock();
507
519
  SECURE_ENDPOINT_UNREF(ep, "destroy");
508
520
  }
509
521
 
@@ -497,6 +497,13 @@ void grpc_call_tracer_set(grpc_call* call,
497
497
  return arena->SetContext<grpc_core::CallTracerAnnotationInterface>(tracer);
498
498
  }
499
499
 
500
+ void grpc_call_tracer_set_and_manage(grpc_call* call,
501
+ grpc_core::ClientCallTracer* tracer) {
502
+ grpc_core::Arena* arena = grpc_call_get_arena(call);
503
+ arena->ManagedNew<ClientCallTracerWrapper>(tracer);
504
+ return arena->SetContext<grpc_core::CallTracerAnnotationInterface>(tracer);
505
+ }
506
+
500
507
  void* grpc_call_tracer_get(grpc_call* call) {
501
508
  grpc_core::Arena* arena = grpc_call_get_arena(call);
502
509
  auto* call_tracer =
@@ -265,6 +265,16 @@ void grpc_call_log_batch(const char* file, int line, const grpc_op* ops,
265
265
 
266
266
  void grpc_call_tracer_set(grpc_call* call, grpc_core::ClientCallTracer* tracer);
267
267
 
268
+ // Sets call tracer on the call and manages its life by using the call's arena.
269
+ // When using this API, the tracer will be destroyed by grpc_call arena when
270
+ // grpc_call is about to be destroyed. The caller of this API SHOULD NOT
271
+ // manually destroy the tracer. This API is used by Python as a way of using
272
+ // Arena to manage the lifetime of the call tracer. Python needs this API
273
+ // because the tracer was created within a separate shared object library which
274
+ // doesn't have access to core functions like arena->ManagedNew<>.
275
+ void grpc_call_tracer_set_and_manage(grpc_call* call,
276
+ grpc_core::ClientCallTracer* tracer);
277
+
268
278
  void* grpc_call_tracer_get(grpc_call* call);
269
279
 
270
280
  #define GRPC_CALL_LOG_BATCH(ops, nops) \
@@ -276,6 +286,15 @@ void* grpc_call_tracer_get(grpc_call* call);
276
286
 
277
287
  uint8_t grpc_call_is_client(grpc_call* call);
278
288
 
289
+ class ClientCallTracerWrapper {
290
+ public:
291
+ explicit ClientCallTracerWrapper(grpc_core::ClientCallTracer* tracer)
292
+ : tracer_(tracer) {}
293
+
294
+ private:
295
+ std::unique_ptr<grpc_core::ClientCallTracer> tracer_;
296
+ };
297
+
279
298
  // Return an appropriate compression algorithm for the requested compression \a
280
299
  // level in the context of \a call.
281
300
  grpc_compression_algorithm grpc_call_compression_for_level(
@@ -353,7 +353,8 @@ class RlsLb final : public LoadBalancingPolicy {
353
353
  // is called after releasing it.
354
354
  //
355
355
  // Both methods grab the data they need from the parent object.
356
- void StartUpdate() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
356
+ void StartUpdate(OrphanablePtr<ChildPolicyHandler>* child_policy_to_delete)
357
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
357
358
  absl::Status MaybeFinishUpdate() ABSL_LOCKS_EXCLUDED(&RlsLb::mu_);
358
359
 
359
360
  void ExitIdleLocked() {
@@ -397,14 +398,14 @@ class RlsLb final : public LoadBalancingPolicy {
397
398
  };
398
399
 
399
400
  // Note: We are forced to disable lock analysis here because
400
- // Orphan() is called by Unref() which is called by RefCountedPtr<>, which
401
+ // Orphaned() is called by Unref() which is called by RefCountedPtr<>, which
401
402
  // cannot have lock annotations for this particular caller.
402
403
  void Orphaned() override ABSL_NO_THREAD_SAFETY_ANALYSIS;
403
404
 
404
405
  RefCountedPtr<RlsLb> lb_policy_;
405
406
  std::string target_;
406
407
 
407
- bool is_shutdown_ = false;
408
+ bool is_shutdown_ = false; // Protected by WorkSerializer
408
409
 
409
410
  OrphanablePtr<ChildPolicyHandler> child_policy_;
410
411
  RefCountedPtr<LoadBalancingPolicy::Config> pending_config_;
@@ -503,12 +504,25 @@ class RlsLb final : public LoadBalancingPolicy {
503
504
  // Returns a list of child policy wrappers on which FinishUpdate()
504
505
  // needs to be called after releasing the lock.
505
506
  std::vector<ChildPolicyWrapper*> OnRlsResponseLocked(
506
- ResponseInfo response, std::unique_ptr<BackOff> backoff_state)
507
+ ResponseInfo response, std::unique_ptr<BackOff> backoff_state,
508
+ OrphanablePtr<ChildPolicyHandler>* child_policy_to_delete)
507
509
  ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
508
510
 
509
511
  // Moves entry to the end of the LRU list.
510
512
  void MarkUsed() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
511
513
 
514
+ // Takes entries from child_policy_wrappers_ and appends them to the end
515
+ // of \a child_policy_wrappers.
516
+ void TakeChildPolicyWrappers(
517
+ std::vector<RefCountedPtr<ChildPolicyWrapper>>* child_policy_wrappers)
518
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) {
519
+ child_policy_wrappers->insert(
520
+ child_policy_wrappers->end(),
521
+ std::make_move_iterator(child_policy_wrappers_.begin()),
522
+ std::make_move_iterator(child_policy_wrappers_.end()));
523
+ child_policy_wrappers_.clear();
524
+ }
525
+
512
526
  private:
513
527
  class BackoffTimer final : public InternallyRefCounted<BackoffTimer> {
514
528
  public:
@@ -566,19 +580,24 @@ class RlsLb final : public LoadBalancingPolicy {
566
580
  // the caller. Otherwise, the entry found is returned to the caller. The
567
581
  // entry returned to the user is considered recently used and its order in
568
582
  // the LRU list of the cache is updated.
569
- Entry* FindOrInsert(const RequestKey& key)
583
+ Entry* FindOrInsert(const RequestKey& key,
584
+ std::vector<RefCountedPtr<ChildPolicyWrapper>>*
585
+ child_policy_wrappers_to_delete)
570
586
  ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
571
587
 
572
588
  // Resizes the cache. If the new cache size is greater than the current size
573
589
  // of the cache, do nothing. Otherwise, evict the oldest entries that
574
590
  // exceed the new size limit of the cache.
575
- void Resize(size_t bytes) ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
591
+ void Resize(size_t bytes, std::vector<RefCountedPtr<ChildPolicyWrapper>>*
592
+ child_policy_wrappers_to_delete)
593
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
576
594
 
577
595
  // Resets backoff of all the cache entries.
578
596
  void ResetAllBackoff() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
579
597
 
580
598
  // Shutdown the cache; clean-up and orphan all the stored cache entries.
581
- void Shutdown() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
599
+ GRPC_MUST_USE_RESULT std::vector<RefCountedPtr<ChildPolicyWrapper>>
600
+ Shutdown() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
582
601
 
583
602
  void ReportMetricsLocked(CallbackMetricReporter& reporter)
584
603
  ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
@@ -594,7 +613,9 @@ class RlsLb final : public LoadBalancingPolicy {
594
613
 
595
614
  // Evicts oversized cache elements when the current size is greater than
596
615
  // the specified limit.
597
- void MaybeShrinkSize(size_t bytes)
616
+ void MaybeShrinkSize(size_t bytes,
617
+ std::vector<RefCountedPtr<ChildPolicyWrapper>>*
618
+ child_policy_wrappers_to_delete)
598
619
  ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
599
620
 
600
621
  RlsLb* lb_policy_;
@@ -859,7 +880,8 @@ absl::optional<Json> InsertOrUpdateChildPolicyField(const std::string& field,
859
880
  return Json::FromArray(std::move(array));
860
881
  }
861
882
 
862
- void RlsLb::ChildPolicyWrapper::StartUpdate() {
883
+ void RlsLb::ChildPolicyWrapper::StartUpdate(
884
+ OrphanablePtr<ChildPolicyHandler>* child_policy_to_delete) {
863
885
  ValidationErrors errors;
864
886
  auto child_policy_config = InsertOrUpdateChildPolicyField(
865
887
  lb_policy_->config_->child_policy_config_target_field_name(), target_,
@@ -884,7 +906,7 @@ void RlsLb::ChildPolicyWrapper::StartUpdate() {
884
906
  pending_config_.reset();
885
907
  picker_ = MakeRefCounted<TransientFailurePicker>(
886
908
  absl::UnavailableError(config.status().message()));
887
- child_policy_.reset();
909
+ *child_policy_to_delete = std::move(child_policy_);
888
910
  } else {
889
911
  pending_config_ = std::move(*config);
890
912
  }
@@ -939,9 +961,9 @@ void RlsLb::ChildPolicyWrapper::ChildPolicyHelper::UpdateState(
939
961
  << ": UpdateState(state=" << ConnectivityStateName(state)
940
962
  << ", status=" << status << ", picker=" << picker.get() << ")";
941
963
  }
964
+ if (wrapper_->is_shutdown_) return;
942
965
  {
943
966
  MutexLock lock(&wrapper_->lb_policy_->mu_);
944
- if (wrapper_->is_shutdown_) return;
945
967
  // TODO(roth): It looks like this ignores subsequent TF updates that
946
968
  // might change the status used to fail picks, which seems wrong.
947
969
  if (wrapper_->connectivity_state_ == GRPC_CHANNEL_TRANSIENT_FAILURE &&
@@ -951,7 +973,8 @@ void RlsLb::ChildPolicyWrapper::ChildPolicyHelper::UpdateState(
951
973
  wrapper_->connectivity_state_ = state;
952
974
  DCHECK(picker != nullptr);
953
975
  if (picker != nullptr) {
954
- wrapper_->picker_ = std::move(picker);
976
+ // We want to unref the picker after we release the lock.
977
+ wrapper_->picker_.swap(picker);
955
978
  }
956
979
  }
957
980
  wrapper_->lb_policy_->UpdatePickerLocked();
@@ -1204,19 +1227,19 @@ RlsLb::Cache::Entry::Entry(RefCountedPtr<RlsLb> lb_policy,
1204
1227
  lb_policy_->cache_.lru_list_.end(), key)) {}
1205
1228
 
1206
1229
  void RlsLb::Cache::Entry::Orphan() {
1207
- if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
1208
- LOG(INFO) << "[rlslb " << lb_policy_.get() << "] cache entry=" << this
1209
- << " " << lru_iterator_->ToString() << ": cache entry evicted";
1210
- }
1230
+ // We should be holding RlsLB::mu_.
1231
+ GRPC_TRACE_LOG(rls_lb, INFO)
1232
+ << "[rlslb " << lb_policy_.get() << "] cache entry=" << this << " "
1233
+ << lru_iterator_->ToString() << ": cache entry evicted";
1211
1234
  is_shutdown_ = true;
1212
1235
  lb_policy_->cache_.lru_list_.erase(lru_iterator_);
1213
1236
  lru_iterator_ = lb_policy_->cache_.lru_list_.end(); // Just in case.
1237
+ CHECK(child_policy_wrappers_.empty());
1214
1238
  backoff_state_.reset();
1215
1239
  if (backoff_timer_ != nullptr) {
1216
1240
  backoff_timer_.reset();
1217
1241
  lb_policy_->UpdatePickerAsync();
1218
1242
  }
1219
- child_policy_wrappers_.clear();
1220
1243
  Unref(DEBUG_LOCATION, "Orphan");
1221
1244
  }
1222
1245
 
@@ -1295,7 +1318,8 @@ void RlsLb::Cache::Entry::MarkUsed() {
1295
1318
 
1296
1319
  std::vector<RlsLb::ChildPolicyWrapper*>
1297
1320
  RlsLb::Cache::Entry::OnRlsResponseLocked(
1298
- ResponseInfo response, std::unique_ptr<BackOff> backoff_state) {
1321
+ ResponseInfo response, std::unique_ptr<BackOff> backoff_state,
1322
+ OrphanablePtr<ChildPolicyHandler>* child_policy_to_delete) {
1299
1323
  // Move the entry to the end of the LRU list.
1300
1324
  MarkUsed();
1301
1325
  // If the request failed, store the failed status and update the
@@ -1356,7 +1380,7 @@ RlsLb::Cache::Entry::OnRlsResponseLocked(
1356
1380
  if (it == lb_policy_->child_policy_map_.end()) {
1357
1381
  auto new_child = MakeRefCounted<ChildPolicyWrapper>(
1358
1382
  lb_policy_.Ref(DEBUG_LOCATION, "ChildPolicyWrapper"), target);
1359
- new_child->StartUpdate();
1383
+ new_child->StartUpdate(child_policy_to_delete);
1360
1384
  child_policies_to_finish_update.push_back(new_child.get());
1361
1385
  new_child_policy_wrappers.emplace_back(std::move(new_child));
1362
1386
  } else {
@@ -1393,12 +1417,15 @@ RlsLb::Cache::Entry* RlsLb::Cache::Find(const RequestKey& key) {
1393
1417
  return it->second.get();
1394
1418
  }
1395
1419
 
1396
- RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(const RequestKey& key) {
1420
+ RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(
1421
+ const RequestKey& key, std::vector<RefCountedPtr<ChildPolicyWrapper>>*
1422
+ child_policy_wrappers_to_delete) {
1397
1423
  auto it = map_.find(key);
1398
1424
  // If not found, create new entry.
1399
1425
  if (it == map_.end()) {
1400
1426
  size_t entry_size = EntrySizeForKey(key);
1401
- MaybeShrinkSize(size_limit_ - std::min(size_limit_, entry_size));
1427
+ MaybeShrinkSize(size_limit_ - std::min(size_limit_, entry_size),
1428
+ child_policy_wrappers_to_delete);
1402
1429
  Entry* entry = new Entry(
1403
1430
  lb_policy_->RefAsSubclass<RlsLb>(DEBUG_LOCATION, "CacheEntry"), key);
1404
1431
  map_.emplace(key, OrphanablePtr<Entry>(entry));
@@ -1418,13 +1445,13 @@ RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(const RequestKey& key) {
1418
1445
  return it->second.get();
1419
1446
  }
1420
1447
 
1421
- void RlsLb::Cache::Resize(size_t bytes) {
1422
- if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
1423
- LOG(INFO) << "[rlslb " << lb_policy_ << "] resizing cache to " << bytes
1424
- << " bytes";
1425
- }
1448
+ void RlsLb::Cache::Resize(size_t bytes,
1449
+ std::vector<RefCountedPtr<ChildPolicyWrapper>>*
1450
+ child_policy_wrappers_to_delete) {
1451
+ GRPC_TRACE_LOG(rls_lb, INFO)
1452
+ << "[rlslb " << lb_policy_ << "] resizing cache to " << bytes << " bytes";
1426
1453
  size_limit_ = bytes;
1427
- MaybeShrinkSize(size_limit_);
1454
+ MaybeShrinkSize(size_limit_, child_policy_wrappers_to_delete);
1428
1455
  }
1429
1456
 
1430
1457
  void RlsLb::Cache::ResetAllBackoff() {
@@ -1434,7 +1461,12 @@ void RlsLb::Cache::ResetAllBackoff() {
1434
1461
  lb_policy_->UpdatePickerAsync();
1435
1462
  }
1436
1463
 
1437
- void RlsLb::Cache::Shutdown() {
1464
+ std::vector<RefCountedPtr<RlsLb::ChildPolicyWrapper>> RlsLb::Cache::Shutdown() {
1465
+ std::vector<RefCountedPtr<ChildPolicyWrapper>>
1466
+ child_policy_wrappers_to_delete;
1467
+ for (auto& entry : map_) {
1468
+ entry.second->TakeChildPolicyWrappers(&child_policy_wrappers_to_delete);
1469
+ }
1438
1470
  map_.clear();
1439
1471
  lru_list_.clear();
1440
1472
  if (cleanup_timer_handle_.has_value() &&
@@ -1445,6 +1477,7 @@ void RlsLb::Cache::Shutdown() {
1445
1477
  }
1446
1478
  }
1447
1479
  cleanup_timer_handle_.reset();
1480
+ return child_policy_wrappers_to_delete;
1448
1481
  }
1449
1482
 
1450
1483
  void RlsLb::Cache::ReportMetricsLocked(CallbackMetricReporter& reporter) {
@@ -1478,15 +1511,17 @@ void RlsLb::Cache::StartCleanupTimer() {
1478
1511
  }
1479
1512
 
1480
1513
  void RlsLb::Cache::OnCleanupTimer() {
1481
- if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
1482
- LOG(INFO) << "[rlslb " << lb_policy_ << "] cache cleanup timer fired";
1483
- }
1514
+ GRPC_TRACE_LOG(rls_lb, INFO)
1515
+ << "[rlslb " << lb_policy_ << "] cache cleanup timer fired";
1516
+ std::vector<RefCountedPtr<ChildPolicyWrapper>>
1517
+ child_policy_wrappers_to_delete;
1484
1518
  MutexLock lock(&lb_policy_->mu_);
1485
1519
  if (!cleanup_timer_handle_.has_value()) return;
1486
1520
  if (lb_policy_->is_shutdown_) return;
1487
1521
  for (auto it = map_.begin(); it != map_.end();) {
1488
1522
  if (GPR_UNLIKELY(it->second->ShouldRemove() && it->second->CanEvict())) {
1489
1523
  size_ -= it->second->Size();
1524
+ it->second->TakeChildPolicyWrappers(&child_policy_wrappers_to_delete);
1490
1525
  it = map_.erase(it);
1491
1526
  } else {
1492
1527
  ++it;
@@ -1500,7 +1535,9 @@ size_t RlsLb::Cache::EntrySizeForKey(const RequestKey& key) {
1500
1535
  return (key.Size() * 2) + sizeof(Entry);
1501
1536
  }
1502
1537
 
1503
- void RlsLb::Cache::MaybeShrinkSize(size_t bytes) {
1538
+ void RlsLb::Cache::MaybeShrinkSize(
1539
+ size_t bytes, std::vector<RefCountedPtr<ChildPolicyWrapper>>*
1540
+ child_policy_wrappers_to_delete) {
1504
1541
  while (size_ > bytes) {
1505
1542
  auto lru_it = lru_list_.begin();
1506
1543
  if (GPR_UNLIKELY(lru_it == lru_list_.end())) break;
@@ -1512,6 +1549,7 @@ void RlsLb::Cache::MaybeShrinkSize(size_t bytes) {
1512
1549
  << map_it->second.get() << " " << lru_it->ToString();
1513
1550
  }
1514
1551
  size_ -= map_it->second->Size();
1552
+ map_it->second->TakeChildPolicyWrappers(child_policy_wrappers_to_delete);
1515
1553
  map_.erase(map_it);
1516
1554
  }
1517
1555
  if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
@@ -1841,13 +1879,18 @@ void RlsLb::RlsRequest::OnRlsCallCompleteLocked(grpc_error_handle error) {
1841
1879
  << ": response info: " << response.ToString();
1842
1880
  }
1843
1881
  std::vector<ChildPolicyWrapper*> child_policies_to_finish_update;
1882
+ std::vector<RefCountedPtr<ChildPolicyWrapper>>
1883
+ child_policy_wrappers_to_delete;
1884
+ OrphanablePtr<ChildPolicyHandler> child_policy_to_delete;
1844
1885
  {
1845
1886
  MutexLock lock(&lb_policy_->mu_);
1846
1887
  if (lb_policy_->is_shutdown_) return;
1847
1888
  rls_channel_->ReportResponseLocked(response.status.ok());
1848
- Cache::Entry* cache_entry = lb_policy_->cache_.FindOrInsert(key_);
1889
+ Cache::Entry* cache_entry =
1890
+ lb_policy_->cache_.FindOrInsert(key_, &child_policy_wrappers_to_delete);
1849
1891
  child_policies_to_finish_update = cache_entry->OnRlsResponseLocked(
1850
- std::move(response), std::move(backoff_state_));
1892
+ std::move(response), std::move(backoff_state_),
1893
+ &child_policy_to_delete);
1851
1894
  lb_policy_->request_map_.erase(key_);
1852
1895
  }
1853
1896
  // Now that we've released the lock, finish the update on any newly
@@ -2041,6 +2084,9 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
2041
2084
  }
2042
2085
  }
2043
2086
  // Now grab the lock to swap out the state it guards.
2087
+ std::vector<RefCountedPtr<ChildPolicyWrapper>>
2088
+ child_policy_wrappers_to_delete;
2089
+ OrphanablePtr<ChildPolicyHandler> child_policy_to_delete;
2044
2090
  {
2045
2091
  MutexLock lock(&mu_);
2046
2092
  // Swap out RLS channel if needed.
@@ -2052,7 +2098,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
2052
2098
  // Resize cache if needed.
2053
2099
  if (old_config == nullptr ||
2054
2100
  config_->cache_size_bytes() != old_config->cache_size_bytes()) {
2055
- cache_.Resize(static_cast<size_t>(config_->cache_size_bytes()));
2101
+ cache_.Resize(static_cast<size_t>(config_->cache_size_bytes()),
2102
+ &child_policy_wrappers_to_delete);
2056
2103
  }
2057
2104
  // Start update of child policies if needed.
2058
2105
  if (update_child_policies) {
@@ -2060,14 +2107,12 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
2060
2107
  LOG(INFO) << "[rlslb " << this << "] starting child policy updates";
2061
2108
  }
2062
2109
  for (auto& p : child_policy_map_) {
2063
- p.second->StartUpdate();
2110
+ p.second->StartUpdate(&child_policy_to_delete);
2064
2111
  }
2065
2112
  } else if (created_default_child) {
2066
- if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
2067
- LOG(INFO) << "[rlslb " << this
2068
- << "] starting default child policy update";
2069
- }
2070
- default_child_policy_->StartUpdate();
2113
+ GRPC_TRACE_LOG(rls_lb, INFO)
2114
+ << "[rlslb " << this << "] starting default child policy update";
2115
+ default_child_policy_->StartUpdate(&child_policy_to_delete);
2071
2116
  }
2072
2117
  }
2073
2118
  // Now that we've released the lock, finish update of child policies.
@@ -2133,14 +2178,20 @@ void RlsLb::ShutdownLocked() {
2133
2178
  LOG(INFO) << "[rlslb " << this << "] policy shutdown";
2134
2179
  }
2135
2180
  registered_metric_callback_.reset();
2136
- MutexLock lock(&mu_);
2137
- is_shutdown_ = true;
2138
- config_.reset(DEBUG_LOCATION, "ShutdownLocked");
2181
+ RefCountedPtr<ChildPolicyWrapper> child_policy_to_delete;
2182
+ std::vector<RefCountedPtr<ChildPolicyWrapper>>
2183
+ child_policy_wrappers_to_delete;
2184
+ OrphanablePtr<RlsChannel> rls_channel_to_delete;
2185
+ {
2186
+ MutexLock lock(&mu_);
2187
+ is_shutdown_ = true;
2188
+ config_.reset(DEBUG_LOCATION, "ShutdownLocked");
2189
+ child_policy_wrappers_to_delete = cache_.Shutdown();
2190
+ request_map_.clear();
2191
+ rls_channel_to_delete = std::move(rls_channel_);
2192
+ child_policy_to_delete = std::move(default_child_policy_);
2193
+ }
2139
2194
  channel_args_ = ChannelArgs();
2140
- cache_.Shutdown();
2141
- request_map_.clear();
2142
- rls_channel_.reset();
2143
- default_child_policy_.reset();
2144
2195
  }
2145
2196
 
2146
2197
  void RlsLb::UpdatePickerAsync() {
@@ -809,7 +809,7 @@ struct call_run_batch_args {
809
809
  };
810
810
 
811
811
  static void cancel_call_unblock_func(void* arg) {
812
- gpr_log(GPR_INFO, "GRPC_RUBY: cancel_call_unblock_func");
812
+ gpr_log(GPR_DEBUG, "GRPC_RUBY: cancel_call_unblock_func");
813
813
  grpc_call* call = (grpc_call*)arg;
814
814
  grpc_call_cancel(call, NULL);
815
815
  }
@@ -112,7 +112,7 @@ static VALUE grpc_rb_call_credentials_callback_rescue(VALUE args,
112
112
  VALUE rb_exception_info =
113
113
  rb_funcall(exception_object, rb_intern("inspect"), 0);
114
114
  (void)args;
115
- gpr_log(GPR_INFO,
115
+ gpr_log(GPR_DEBUG,
116
116
  "GRPC_RUBY call credentials callback failed, exception inspect:|%s| "
117
117
  "backtrace:|%s|",
118
118
  StringValueCStr(rb_exception_info), StringValueCStr(backtrace_str));
@@ -328,7 +328,7 @@ static void grpc_ruby_init_threads() {
328
328
  // in gpr_once_init. In general, it appears to be unsafe to call
329
329
  // into the ruby library while holding a non-ruby mutex, because a gil yield
330
330
  // could end up trying to lock onto that same mutex and deadlocking.
331
- gpr_log(GPR_INFO,
331
+ gpr_log(GPR_DEBUG,
332
332
  "GRPC_RUBY: grpc_ruby_init_threads g_bg_thread_init_done=%d",
333
333
  g_bg_thread_init_done);
334
334
  rb_mutex_lock(g_bg_thread_init_rb_mu);
@@ -68,7 +68,7 @@ static void grpc_rb_server_shutdown_and_notify_internal(grpc_rb_server* server,
68
68
  server->queue, tag, gpr_inf_future(GPR_CLOCK_REALTIME), NULL, NULL);
69
69
  }
70
70
  if (ev.type != GRPC_OP_COMPLETE) {
71
- gpr_log(GPR_INFO,
71
+ gpr_log(GPR_DEBUG,
72
72
  "GRPC_RUBY: bad grpc_server_shutdown_and_notify result:%d",
73
73
  ev.type);
74
74
  }
@@ -192,7 +192,7 @@ struct server_request_call_args {
192
192
 
193
193
  static void shutdown_server_unblock_func(void* arg) {
194
194
  grpc_rb_server* server = (grpc_rb_server*)arg;
195
- gpr_log(GPR_INFO, "GRPC_RUBY: shutdown_server_unblock_func");
195
+ gpr_log(GPR_DEBUG, "GRPC_RUBY: shutdown_server_unblock_func");
196
196
  GRPC_RUBY_ASSERT(server->wrapped != NULL);
197
197
  grpc_event event;
198
198
  void* tag = &event;
@@ -202,7 +202,7 @@ static void shutdown_server_unblock_func(void* arg) {
202
202
  // cancelled all calls.
203
203
  event = grpc_completion_queue_pluck(server->queue, tag,
204
204
  gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
205
- gpr_log(GPR_INFO,
205
+ gpr_log(GPR_DEBUG,
206
206
  "GRPC_RUBY: shutdown_server_unblock_func pluck event.type: %d "
207
207
  "event.success: %d",
208
208
  event.type, event.success);
@@ -14,5 +14,5 @@
14
14
 
15
15
  # GRPC contains the General RPC module.
16
16
  module GRPC
17
- VERSION = '1.66.0.pre3'
17
+ VERSION = '1.66.0.pre5'
18
18
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: grpc
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.66.0.pre3
4
+ version: 1.66.0.pre5
5
5
  platform: ruby
6
6
  authors:
7
7
  - gRPC Authors
8
8
  autorequire:
9
9
  bindir: src/ruby/bin
10
10
  cert_chain: []
11
- date: 2024-08-07 00:00:00.000000000 Z
11
+ date: 2024-08-16 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: google-protobuf
@@ -3595,7 +3595,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
3595
3595
  requirements:
3596
3596
  - - ">="
3597
3597
  - !ruby/object:Gem::Version
3598
- version: 2.5.0
3598
+ version: '3.0'
3599
3599
  required_rubygems_version: !ruby/object:Gem::Requirement
3600
3600
  requirements:
3601
3601
  - - ">="