@temporalio/core-bridge 0.16.0 → 0.16.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/Cargo.lock +1 -0
  2. package/index.d.ts +14 -0
  3. package/index.node +0 -0
  4. package/package.json +2 -2
  5. package/releases/aarch64-apple-darwin/index.node +0 -0
  6. package/releases/aarch64-unknown-linux-gnu/index.node +0 -0
  7. package/releases/x86_64-apple-darwin/index.node +0 -0
  8. package/releases/x86_64-pc-windows-msvc/index.node +0 -0
  9. package/releases/x86_64-unknown-linux-gnu/index.node +0 -0
  10. package/sdk-core/Cargo.toml +1 -0
  11. package/sdk-core/fsm/rustfsm_procmacro/Cargo.toml +1 -1
  12. package/sdk-core/fsm/rustfsm_procmacro/src/lib.rs +8 -9
  13. package/sdk-core/fsm/rustfsm_trait/Cargo.toml +1 -1
  14. package/sdk-core/fsm/rustfsm_trait/src/lib.rs +1 -1
  15. package/sdk-core/sdk-core-protos/src/lib.rs +36 -48
  16. package/sdk-core/src/core_tests/activity_tasks.rs +5 -5
  17. package/sdk-core/src/core_tests/mod.rs +2 -2
  18. package/sdk-core/src/core_tests/workflow_tasks.rs +2 -2
  19. package/sdk-core/src/errors.rs +11 -9
  20. package/sdk-core/src/lib.rs +2 -2
  21. package/sdk-core/src/machines/activity_state_machine.rs +3 -3
  22. package/sdk-core/src/machines/child_workflow_state_machine.rs +5 -5
  23. package/sdk-core/src/machines/complete_workflow_state_machine.rs +1 -1
  24. package/sdk-core/src/machines/continue_as_new_workflow_state_machine.rs +1 -1
  25. package/sdk-core/src/machines/mod.rs +16 -22
  26. package/sdk-core/src/machines/patch_state_machine.rs +8 -8
  27. package/sdk-core/src/machines/signal_external_state_machine.rs +2 -2
  28. package/sdk-core/src/machines/timer_state_machine.rs +4 -4
  29. package/sdk-core/src/machines/transition_coverage.rs +3 -3
  30. package/sdk-core/src/machines/workflow_machines.rs +9 -9
  31. package/sdk-core/src/pending_activations.rs +19 -20
  32. package/sdk-core/src/pollers/gateway.rs +3 -3
  33. package/sdk-core/src/pollers/poll_buffer.rs +2 -2
  34. package/sdk-core/src/pollers/retry.rs +4 -4
  35. package/sdk-core/src/prototype_rust_sdk/workflow_context.rs +3 -3
  36. package/sdk-core/src/prototype_rust_sdk/workflow_future.rs +4 -4
  37. package/sdk-core/src/prototype_rust_sdk.rs +3 -11
  38. package/sdk-core/src/telemetry/metrics.rs +2 -4
  39. package/sdk-core/src/telemetry/mod.rs +6 -7
  40. package/sdk-core/src/test_help/canned_histories.rs +8 -5
  41. package/sdk-core/src/test_help/history_builder.rs +2 -2
  42. package/sdk-core/src/test_help/history_info.rs +2 -2
  43. package/sdk-core/src/test_help/mod.rs +18 -30
  44. package/sdk-core/src/worker/activities/activity_heartbeat_manager.rs +246 -138
  45. package/sdk-core/src/worker/activities.rs +46 -45
  46. package/sdk-core/src/worker/config.rs +11 -0
  47. package/sdk-core/src/worker/dispatcher.rs +5 -5
  48. package/sdk-core/src/worker/mod.rs +8 -6
  49. package/sdk-core/src/workflow/driven_workflow.rs +3 -3
  50. package/sdk-core/src/workflow/history_update.rs +1 -1
  51. package/sdk-core/src/workflow/mod.rs +1 -1
  52. package/sdk-core/src/workflow/workflow_tasks/cache_manager.rs +13 -17
  53. package/sdk-core/src/workflow/workflow_tasks/concurrency_manager.rs +4 -8
  54. package/sdk-core/src/workflow/workflow_tasks/mod.rs +14 -19
  55. package/sdk-core/test_utils/src/lib.rs +2 -2
  56. package/sdk-core/tests/integ_tests/workflow_tests/activities.rs +61 -1
  57. package/src/conversions.rs +17 -0
@@ -107,7 +107,7 @@ impl PatchMachine {
107
107
  .into(),
108
108
  ),
109
109
  };
110
- let mut machine = PatchMachine {
110
+ let mut machine = Self {
111
111
  state: initial_state,
112
112
  shared_state: state,
113
113
  };
@@ -210,7 +210,7 @@ impl TryFrom<HistoryEvent> for PatchMachineEvents {
210
210
 
211
211
  fn try_from(e: HistoryEvent) -> Result<Self, Self::Error> {
212
212
  match e.get_changed_marker_details() {
213
- Some((id, _)) => Ok(PatchMachineEvents::MarkerRecorded(id)),
213
+ Some((id, _)) => Ok(Self::MarkerRecorded(id)),
214
214
  _ => Err(WFMachinesError::Nondeterminism(format!(
215
215
  "Change machine cannot handle this event: {}",
216
216
  e
@@ -422,7 +422,9 @@ mod tests {
422
422
  commands[0].command_type,
423
423
  CommandType::ScheduleActivityTask as i32
424
424
  );
425
- let act = if !replaying {
425
+ let act = if replaying {
426
+ wfm.get_next_activation().await
427
+ } else {
426
428
  // Feed more history
427
429
  wfm.new_history(
428
430
  patch_marker_single_activity(marker_type)
@@ -431,8 +433,6 @@ mod tests {
431
433
  .into(),
432
434
  )
433
435
  .await
434
- } else {
435
- wfm.get_next_activation().await
436
436
  };
437
437
 
438
438
  if marker_type == MarkerType::Deprecated {
@@ -514,7 +514,9 @@ mod tests {
514
514
  if activity_id == expected_activity_id
515
515
  );
516
516
 
517
- let act = if !replaying {
517
+ let act = if replaying {
518
+ wfm.get_next_activation().await
519
+ } else {
518
520
  // Feed more history. Since we are *not* replaying, we *always* "have" the change
519
521
  // and the history should have the has-change timer. v3 of course always has the change
520
522
  // regardless.
@@ -525,8 +527,6 @@ mod tests {
525
527
  .into(),
526
528
  )
527
529
  .await
528
- } else {
529
- wfm.get_next_activation().await
530
530
  };
531
531
 
532
532
  let act = act.unwrap();
@@ -266,7 +266,7 @@ impl Cancellable for SignalExternalMachine {
266
266
  ..Default::default()
267
267
  }),
268
268
  }
269
- .into()]
269
+ .into()];
270
270
  }
271
271
  Some(_) => panic!("Signal external machine cancel produced unexpected result"),
272
272
  None => (),
@@ -400,7 +400,7 @@ mod tests {
400
400
  };
401
401
  let cmds = s.cancel().unwrap();
402
402
  assert_eq!(cmds.len(), 0);
403
- assert_eq!(discriminant(&state), discriminant(&s.state))
403
+ assert_eq!(discriminant(&state), discriminant(&s.state));
404
404
  }
405
405
  }
406
406
  }
@@ -199,13 +199,13 @@ impl StartCommandRecorded {
199
199
  dat: SharedState,
200
200
  attrs: TimerFiredEventAttributes,
201
201
  ) -> TimerMachineTransition<Fired> {
202
- if dat.attrs.seq.to_string() != attrs.timer_id {
202
+ if dat.attrs.seq.to_string() == attrs.timer_id {
203
+ TransitionResult::ok(vec![TimerMachineCommand::Complete], Fired::default())
204
+ } else {
203
205
  TransitionResult::Err(WFMachinesError::Fatal(format!(
204
206
  "Timer fired event did not have expected timer id {}, it was {}!",
205
207
  dat.attrs.seq, attrs.timer_id
206
208
  )))
207
- } else {
208
- TransitionResult::ok(vec![TimerMachineCommand::Complete], Fired::default())
209
209
  }
210
210
  }
211
211
 
@@ -435,7 +435,7 @@ mod test {
435
435
  };
436
436
  let cmds = s.cancel().unwrap();
437
437
  assert_eq!(cmds.len(), 0);
438
- assert_eq!(discriminant(&state), discriminant(&s.state))
438
+ assert_eq!(discriminant(&state), discriminant(&s.state));
439
439
  }
440
440
  }
441
441
  }
@@ -79,7 +79,7 @@ mod machine_coverage_report {
79
79
  workflow_task_state_machine::WorkflowTaskMachine,
80
80
  };
81
81
  use rustfsm::StateMachine;
82
- use std::{fs::File, io::Write, ops::Deref};
82
+ use std::{fs::File, io::Write};
83
83
 
84
84
  // This "test" needs to exist so that we have a way to join the spawned thread. Otherwise
85
85
  // it'll just get abandoned.
@@ -89,7 +89,7 @@ mod machine_coverage_report {
89
89
  #[ignore]
90
90
  fn reporter() {
91
91
  // Make sure thread handle exists
92
- let _ = COVERAGE_SENDER.deref();
92
+ let _ = &*COVERAGE_SENDER;
93
93
  // Join it
94
94
  THREAD_HANDLE
95
95
  .lock()
@@ -124,7 +124,7 @@ mod machine_coverage_report {
124
124
  m @ "WorkflowTaskMachine" => cover_transitions(m, &mut wf_task, coverage),
125
125
  m @ "FailWorkflowMachine" => cover_transitions(m, &mut fail_wf, coverage),
126
126
  m @ "ContinueAsNewWorkflowMachine" => {
127
- cover_transitions(m, &mut cont_as_new, coverage)
127
+ cover_transitions(m, &mut cont_as_new, coverage);
128
128
  }
129
129
  m @ "CancelWorkflowMachine" => cover_transitions(m, &mut cancel_wf, coverage),
130
130
  m @ "PatchMachine" => cover_transitions(m, &mut version, coverage),
@@ -150,7 +150,7 @@ where
150
150
  T: Into<wf_activation_job::Variant>,
151
151
  {
152
152
  fn from(v: T) -> Self {
153
- MachineResponse::PushWFJob(v.into())
153
+ Self::PushWFJob(v.into())
154
154
  }
155
155
  }
156
156
 
@@ -171,7 +171,7 @@ pub(crate) enum WFMachinesError {
171
171
 
172
172
  impl From<TimestampOutOfSystemRangeError> for WFMachinesError {
173
173
  fn from(_: TimestampOutOfSystemRangeError) -> Self {
174
- WFMachinesError::Fatal("Could not decode timestamp".to_string())
174
+ Self::Fatal("Could not decode timestamp".to_string())
175
175
  }
176
176
  }
177
177
 
@@ -210,7 +210,7 @@ impl WorkflowMachines {
210
210
  }
211
211
 
212
212
  /// Returns true if workflow has seen a terminal command
213
- pub(crate) fn workflow_is_finished(&self) -> bool {
213
+ pub(crate) const fn workflow_is_finished(&self) -> bool {
214
214
  self.workflow_end_time.is_some()
215
215
  }
216
216
 
@@ -486,7 +486,7 @@ impl WorkflowMachines {
486
486
  }
487
487
 
488
488
  fn set_current_time(&mut self, time: SystemTime) -> SystemTime {
489
- if self.current_wf_time.map(|t| t < time).unwrap_or(true) {
489
+ if self.current_wf_time.map_or(true, |t| t < time) {
490
490
  self.current_wf_time = Some(time);
491
491
  }
492
492
  self.current_wf_time
@@ -503,7 +503,7 @@ impl WorkflowMachines {
503
503
  let results = self.drive_me.fetch_workflow_iteration_output().await;
504
504
  let jobs = self.handle_driven_results(results)?;
505
505
  let has_new_lang_jobs = !jobs.is_empty();
506
- for job in jobs.into_iter() {
506
+ for job in jobs {
507
507
  self.drive_me.send_job(job);
508
508
  }
509
509
  self.prepare_commands()?;
@@ -695,7 +695,7 @@ impl WorkflowMachines {
695
695
  self.current_wf_task_commands.push_back(timer);
696
696
  }
697
697
  WFCommand::CancelTimer(attrs) => {
698
- jobs.extend(self.process_cancellation(CommandID::Timer(attrs.seq))?)
698
+ jobs.extend(self.process_cancellation(CommandID::Timer(attrs.seq))?);
699
699
  }
700
700
  WFCommand::AddActivity(attrs) => {
701
701
  let seq = attrs.seq;
@@ -705,7 +705,7 @@ impl WorkflowMachines {
705
705
  self.current_wf_task_commands.push_back(activity);
706
706
  }
707
707
  WFCommand::RequestCancelActivity(attrs) => {
708
- jobs.extend(self.process_cancellation(CommandID::Activity(attrs.seq))?)
708
+ jobs.extend(self.process_cancellation(CommandID::Activity(attrs.seq))?);
709
709
  }
710
710
  WFCommand::CompleteWorkflow(attrs) => {
711
711
  self.metrics.wf_completed();
@@ -815,7 +815,7 @@ impl WorkflowMachines {
815
815
  self.current_wf_task_commands.push_back(sigm);
816
816
  }
817
817
  WFCommand::CancelSignalWorkflow(attrs) => {
818
- jobs.extend(self.process_cancellation(CommandID::SignalExternal(attrs.seq))?)
818
+ jobs.extend(self.process_cancellation(CommandID::SignalExternal(attrs.seq))?);
819
819
  }
820
820
  WFCommand::QueryResponse(_) => {
821
821
  // Nothing to do here, queries are handled above the machine level
@@ -840,7 +840,7 @@ impl WorkflowMachines {
840
840
  self.current_wf_task_commands.push_back(CommandAndMachine {
841
841
  command: c,
842
842
  machine: m_key,
843
- })
843
+ });
844
844
  }
845
845
  MachineResponse::PushWFJob(j) => {
846
846
  jobs.push(j);
@@ -68,15 +68,14 @@ impl PendingActivations {
68
68
  let mut inner = self.inner.write();
69
69
  let mut key_queue = inner.queue.iter().copied();
70
70
  let maybe_key = key_queue.position(|k| {
71
- if let Some(activation) = inner.activations.get(k) {
72
- predicate(&activation.run_id)
73
- } else {
74
- false
75
- }
71
+ inner
72
+ .activations
73
+ .get(k)
74
+ .map_or(false, |activation| predicate(&activation.run_id))
76
75
  });
77
76
 
78
77
  let maybe_key = maybe_key.map(|pos| inner.queue.remove(pos).unwrap());
79
- if let Some(key) = maybe_key {
78
+ maybe_key.and_then(|key| {
80
79
  if let Some(pa) = inner.activations.remove(key) {
81
80
  inner.by_run_id.remove(&pa.run_id);
82
81
  Some(pa)
@@ -87,9 +86,7 @@ impl PendingActivations {
87
86
  drop(inner); // Will deadlock when we recurse w/o this
88
87
  self.pop()
89
88
  }
90
- } else {
91
- None
92
- }
89
+ })
93
90
  }
94
91
 
95
92
  pub fn pop(&self) -> Option<WfActivation> {
@@ -119,17 +116,19 @@ fn merge_joblists(
119
116
  .as_mut_slice()
120
117
  .sort_by(evictions_always_last_compare);
121
118
  // Drop any duplicate evictions
122
- let truncate_len = if let Some(last_non_evict_job) = existing_list.iter().rev().position(|j| {
123
- !matches!(
124
- j.variant,
125
- Some(wf_activation_job::Variant::RemoveFromCache(_))
126
- )
127
- }) {
128
- existing_list.len() - last_non_evict_job + 1
129
- } else {
130
- 1
131
- };
132
- existing_list.truncate(truncate_len)
119
+ let truncate_len = existing_list
120
+ .iter()
121
+ .rev()
122
+ .position(|j| {
123
+ !matches!(
124
+ j.variant,
125
+ Some(wf_activation_job::Variant::RemoveFromCache(_))
126
+ )
127
+ })
128
+ .map_or(1, |last_non_evict_job| {
129
+ existing_list.len() - last_non_evict_job + 1
130
+ });
131
+ existing_list.truncate(truncate_len);
133
132
  }
134
133
 
135
134
  fn evictions_always_last_compare(a: &WfActivationJob, b: &WfActivationJob) -> Ordering {
@@ -165,7 +165,7 @@ impl Default for RetryConfig {
165
165
  }
166
166
 
167
167
  impl RetryConfig {
168
- pub(crate) fn poll_retry_policy() -> Self {
168
+ pub(crate) const fn poll_retry_policy() -> Self {
169
169
  Self {
170
170
  initial_interval: Duration::from_millis(200),
171
171
  randomization_factor: 0.2,
@@ -179,7 +179,7 @@ impl RetryConfig {
179
179
 
180
180
  impl From<RetryConfig> for ExponentialBackoff {
181
181
  fn from(c: RetryConfig) -> Self {
182
- ExponentialBackoff {
182
+ Self {
183
183
  current_interval: c.initial_interval,
184
184
  initial_interval: c.initial_interval,
185
185
  randomization_factor: c.randomization_factor,
@@ -269,7 +269,7 @@ impl Interceptor for ServiceCallInterceptor {
269
269
  .parse()
270
270
  .unwrap_or_else(|_| MetadataValue::from_static("")),
271
271
  );
272
- for (k, v) in self.opts.static_headers.iter() {
272
+ for (k, v) in &self.opts.static_headers {
273
273
  if let (Ok(k), Ok(v)) = (MetadataKey::from_str(k), MetadataValue::from_str(v)) {
274
274
  metadata.insert(k, v);
275
275
  }
@@ -152,7 +152,7 @@ where
152
152
 
153
153
  async fn shutdown_box(self: Box<Self>) {
154
154
  let this = *self;
155
- this.shutdown().await
155
+ this.shutdown().await;
156
156
  }
157
157
  }
158
158
 
@@ -192,7 +192,7 @@ impl Poller<PollWorkflowTaskQueueResponse> for WorkflowTaskPoller {
192
192
 
193
193
  async fn shutdown_box(self: Box<Self>) {
194
194
  let this = *self;
195
- this.shutdown().await
195
+ this.shutdown().await;
196
196
  }
197
197
  }
198
198
 
@@ -27,7 +27,7 @@ pub struct RetryGateway<SG> {
27
27
 
28
28
  impl<SG> RetryGateway<SG> {
29
29
  /// Use the provided retry config with the provided gateway
30
- pub fn new(gateway: SG, retry_config: RetryConfig) -> Self {
30
+ pub const fn new(gateway: SG, retry_config: RetryConfig) -> Self {
31
31
  Self {
32
32
  gateway,
33
33
  retry_config,
@@ -76,7 +76,7 @@ impl TonicErrorHandler {
76
76
  }
77
77
  }
78
78
 
79
- fn should_log_retry_warning(&self, cur_attempt: usize) -> bool {
79
+ const fn should_log_retry_warning(&self, cur_attempt: usize) -> bool {
80
80
  // Warn on more than 5 retries for unlimited retrying
81
81
  if self.max_retries == 0 && cur_attempt > 5 {
82
82
  return true;
@@ -104,9 +104,9 @@ impl ErrorHandler<tonic::Status> for TonicErrorHandler {
104
104
  }
105
105
 
106
106
  if current_attempt == 1 {
107
- debug!(error=?e, "gRPC call {} failed on first attempt", self.call_name)
107
+ debug!(error=?e, "gRPC call {} failed on first attempt", self.call_name);
108
108
  } else if self.should_log_retry_warning(current_attempt) {
109
- warn!(error=?e, "gRPC call {} retried {} times", self.call_name, current_attempt)
109
+ warn!(error=?e, "gRPC call {} retried {} times", self.call_name, current_attempt);
110
110
  }
111
111
 
112
112
  // Long polls are OK with being cancelled or running into the timeout because there's
@@ -99,7 +99,7 @@ impl WfContext {
99
99
  self.am_cancelled
100
100
  .changed()
101
101
  .await
102
- .expect("Cancelled send half not dropped")
102
+ .expect("Cancelled send half not dropped");
103
103
  }
104
104
 
105
105
  /// Request to create a timer
@@ -206,7 +206,7 @@ impl WfContext {
206
206
 
207
207
  /// Force a workflow task failure (EX: in order to retry on non-sticky queue)
208
208
  pub fn force_task_fail(&self, with: anyhow::Error) {
209
- self.send(with.into())
209
+ self.send(with.into());
210
210
  }
211
211
 
212
212
  /// Request the cancellation of an external workflow. May resolve as a failure if the workflow
@@ -262,7 +262,7 @@ impl WfContext {
262
262
 
263
263
  /// Cancel any cancellable operation by ID
264
264
  fn cancel(&mut self, cancellable_id: CancellableID) {
265
- self.send(RustWfCmd::Cancel(cancellable_id))
265
+ self.send(RustWfCmd::Cancel(cancellable_id));
266
266
  }
267
267
 
268
268
  fn send(&self, c: RustWfCmd) {
@@ -169,7 +169,7 @@ impl WorkflowFuture {
169
169
  self.unblock(UnblockEvent::Activity(
170
170
  seq,
171
171
  Box::new(result.context("Activity must have result")?),
172
- ))?
172
+ ))?;
173
173
  }
174
174
  Variant::ResolveChildWorkflowExecutionStart(
175
175
  ResolveChildWorkflowExecutionStart { seq, status },
@@ -184,7 +184,7 @@ impl WorkflowFuture {
184
184
  seq,
185
185
  Box::new(result.context("Child Workflow execution must have a result")?),
186
186
  ))?,
187
- Variant::UpdateRandomSeed(_) => {}
187
+ Variant::UpdateRandomSeed(_) => (),
188
188
  Variant::QueryWorkflow(_) => {
189
189
  todo!()
190
190
  }
@@ -209,10 +209,10 @@ impl WorkflowFuture {
209
209
  self.ctx_shared.write().changes.insert(patch_id, true);
210
210
  }
211
211
  Variant::ResolveSignalExternalWorkflow(attrs) => {
212
- self.unblock(UnblockEvent::SignalExternal(attrs.seq, attrs.failure))?
212
+ self.unblock(UnblockEvent::SignalExternal(attrs.seq, attrs.failure))?;
213
213
  }
214
214
  Variant::ResolveRequestCancelExternalWorkflow(attrs) => {
215
- self.unblock(UnblockEvent::CancelExternal(attrs.seq, attrs.failure))?
215
+ self.unblock(UnblockEvent::CancelExternal(attrs.seq, attrs.failure))?;
216
216
  }
217
217
 
218
218
  Variant::RemoveFromCache(_) => {
@@ -250,7 +250,7 @@ impl Unblockable for PendingChildWorkflow {
250
250
  type OtherDat = ChildWfCommon;
251
251
  fn unblock(ue: UnblockEvent, od: Self::OtherDat) -> Self {
252
252
  match ue {
253
- UnblockEvent::WorkflowStart(_, result) => PendingChildWorkflow {
253
+ UnblockEvent::WorkflowStart(_, result) => Self {
254
254
  status: *result,
255
255
  common: od,
256
256
  },
@@ -274,11 +274,7 @@ impl Unblockable for SignalExternalWfResult {
274
274
  fn unblock(ue: UnblockEvent, _: Self::OtherDat) -> Self {
275
275
  match ue {
276
276
  UnblockEvent::SignalExternal(_, maybefail) => {
277
- if let Some(f) = maybefail {
278
- Err(f)
279
- } else {
280
- Ok(SignalExternalOk)
281
- }
277
+ maybefail.map_or(Ok(SignalExternalOk), Err)
282
278
  }
283
279
  _ => panic!("Invalid unblock event for signal external workflow result"),
284
280
  }
@@ -290,11 +286,7 @@ impl Unblockable for CancelExternalWfResult {
290
286
  fn unblock(ue: UnblockEvent, _: Self::OtherDat) -> Self {
291
287
  match ue {
292
288
  UnblockEvent::CancelExternal(_, maybefail) => {
293
- if let Some(f) = maybefail {
294
- Err(f)
295
- } else {
296
- Ok(CancelExternalOk)
297
- }
289
+ maybefail.map_or(Ok(CancelExternalOk), Err)
298
290
  }
299
291
  _ => panic!("Invalid unblock event for signal external workflow result"),
300
292
  }
@@ -360,16 +360,14 @@ impl AggregatorSelector for SDKAggSelector {
360
360
  if *descriptor.instrument_kind() == InstrumentKind::ValueRecorder {
361
361
  // Some recorders are just gauges
362
362
  match descriptor.name() {
363
- STICKY_CACHE_SIZE_NAME => return Some(Arc::new(last_value())),
364
- NUM_POLLERS_NAME => return Some(Arc::new(last_value())),
363
+ STICKY_CACHE_SIZE_NAME | NUM_POLLERS_NAME => return Some(Arc::new(last_value())),
365
364
  _ => (),
366
365
  }
367
366
 
368
367
  // Other recorders will select their appropriate buckets
369
368
  let buckets = match descriptor.name() {
370
369
  WF_E2E_LATENCY_NAME => WF_LATENCY_MS_BUCKETS,
371
- WF_TASK_EXECUTION_LATENCY_NAME => WF_TASK_MS_BUCKETS,
372
- WF_TASK_REPLAY_LATENCY_NAME => WF_TASK_MS_BUCKETS,
370
+ WF_TASK_EXECUTION_LATENCY_NAME | WF_TASK_REPLAY_LATENCY_NAME => WF_TASK_MS_BUCKETS,
373
371
  WF_TASK_SCHED_TO_START_LATENCY_NAME | ACT_SCHED_TO_START_LATENCY_NAME => {
374
372
  TASK_SCHED_TO_START_MS_BUCKETS
375
373
  }
@@ -17,7 +17,7 @@ use opentelemetry::{
17
17
  };
18
18
  use opentelemetry_otlp::WithExportConfig;
19
19
  use parking_lot::{const_mutex, Mutex};
20
- use std::{collections::VecDeque, net::SocketAddr, ops::Deref, time::Duration};
20
+ use std::{collections::VecDeque, net::SocketAddr, time::Duration};
21
21
  use tracing_subscriber::{layer::SubscriberExt, EnvFilter};
22
22
  use url::Url;
23
23
 
@@ -112,7 +112,7 @@ pub(crate) fn telemetry_init(opts: &TelemetryOptions) -> Result<(), anyhow::Erro
112
112
  std::thread::spawn(move || {
113
113
  let res = GLOBAL_TELEM_DAT.get_or_try_init::<_, anyhow::Error>(move || {
114
114
  // Ensure closure captures the mutex guard
115
- let _ = guard.deref();
115
+ let _ = &*guard;
116
116
 
117
117
  let runtime = tokio::runtime::Builder::new_multi_thread()
118
118
  .thread_name("telemetry")
@@ -120,13 +120,12 @@ pub(crate) fn telemetry_init(opts: &TelemetryOptions) -> Result<(), anyhow::Erro
120
120
  .enable_all()
121
121
  .build()?;
122
122
  let mut globaldat = GlobalTelemDat::default();
123
- let mut am_forwarding_logs = false;
123
+ let am_forwarding_logs = opts.log_forwarding_level != LevelFilter::Off;
124
124
 
125
- if opts.log_forwarding_level != LevelFilter::Off {
125
+ if am_forwarding_logs {
126
126
  log::set_max_level(opts.log_forwarding_level);
127
127
  globaldat.core_export_logger =
128
128
  Some(CoreExportLogger::new(opts.log_forwarding_level));
129
- am_forwarding_logs = true;
130
129
  }
131
130
 
132
131
  let filter_layer = EnvFilter::try_from_env(LOG_FILTER_ENV_VAR).or_else(|_| {
@@ -232,7 +231,7 @@ pub(crate) fn test_telem_console() {
232
231
  log_forwarding_level: LevelFilter::Off,
233
232
  prometheus_export_bind_address: None,
234
233
  })
235
- .unwrap()
234
+ .unwrap();
236
235
  }
237
236
 
238
237
  #[allow(dead_code)] // Not always used, called to enable for debugging when needed
@@ -244,7 +243,7 @@ pub(crate) fn test_telem_collector() {
244
243
  log_forwarding_level: LevelFilter::Off,
245
244
  prometheus_export_bind_address: None,
246
245
  })
247
- .unwrap()
246
+ .unwrap();
248
247
  }
249
248
 
250
249
  /// A trait for using [Display] on the contents of vecs, etc, which don't implement it.
@@ -875,7 +875,7 @@ pub fn long_sequential_timers(num_tasks: usize) -> TestHistoryBuilder {
875
875
  t.add_by_type(EventType::WorkflowExecutionStarted);
876
876
  t.add_full_wf_task();
877
877
 
878
- for i in 1..num_tasks + 1 {
878
+ for i in 1..=num_tasks {
879
879
  let timer_started_event_id = t.add_get_event_id(EventType::TimerStarted, None);
880
880
  t.add(
881
881
  EventType::TimerFired,
@@ -1122,13 +1122,16 @@ pub fn timer_then_continue_as_new(timer_id: &str) -> TestHistoryBuilder {
1122
1122
  /// 10: EVENT_TYPE_WORKFLOW_TASK_COMPLETED
1123
1123
  /// 11: EVENT_TYPE_WORKFLOW_EXECUTION_CANCELED
1124
1124
  pub fn timer_wf_cancel_req_cancelled(timer_id: &str) -> TestHistoryBuilder {
1125
- timer_cancel_req_then(timer_id, |t| t.add_cancelled())
1125
+ timer_cancel_req_then(timer_id, TestHistoryBuilder::add_cancelled)
1126
1126
  }
1127
1127
  pub fn timer_wf_cancel_req_completed(timer_id: &str) -> TestHistoryBuilder {
1128
- timer_cancel_req_then(timer_id, |t| t.add_workflow_execution_completed())
1128
+ timer_cancel_req_then(
1129
+ timer_id,
1130
+ TestHistoryBuilder::add_workflow_execution_completed,
1131
+ )
1129
1132
  }
1130
1133
  pub fn timer_wf_cancel_req_failed(timer_id: &str) -> TestHistoryBuilder {
1131
- timer_cancel_req_then(timer_id, |t| t.add_workflow_execution_failed())
1134
+ timer_cancel_req_then(timer_id, TestHistoryBuilder::add_workflow_execution_failed)
1132
1135
  }
1133
1136
 
1134
1137
  /// 1: EVENT_TYPE_WORKFLOW_EXECUTION_STARTED
@@ -1158,7 +1161,7 @@ pub fn timer_wf_cancel_req_do_another_timer_then_cancelled() -> TestHistoryBuild
1158
1161
  }),
1159
1162
  );
1160
1163
  t.add_full_wf_task();
1161
- t.add_cancelled()
1164
+ t.add_cancelled();
1162
1165
  })
1163
1166
  }
1164
1167
 
@@ -182,7 +182,7 @@ impl TestHistoryBuilder {
182
182
  ..Default::default()
183
183
  },
184
184
  ),
185
- )
185
+ );
186
186
  }
187
187
 
188
188
  pub fn add_activity_task_cancel_requested(&mut self, scheduled_event_id: i64) {
@@ -358,7 +358,7 @@ impl TestHistoryBuilder {
358
358
  },
359
359
  )) = &evt.attributes
360
360
  {
361
- self.original_run_id = original_execution_run_id.to_owned();
361
+ self.original_run_id = original_execution_run_id.clone();
362
362
  };
363
363
  self.events.push(evt);
364
364
  }
@@ -105,14 +105,14 @@ impl HistoryInfo {
105
105
  }
106
106
 
107
107
  /// Non-test code should *not* rely on just counting workflow tasks b/c of pagination
108
- pub(crate) fn wf_task_count(&self) -> usize {
108
+ pub(crate) const fn wf_task_count(&self) -> usize {
109
109
  self.wf_task_count
110
110
  }
111
111
  }
112
112
 
113
113
  impl From<HistoryInfo> for HistoryUpdate {
114
114
  fn from(v: HistoryInfo) -> Self {
115
- HistoryUpdate::new_from_events(v.events, v.previous_started_event_id)
115
+ Self::new_from_events(v.events, v.previous_started_event_id)
116
116
  }
117
117
  }
118
118