@temporalio/core-bridge 0.16.0 → 0.17.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/Cargo.lock +1 -0
  2. package/index.d.ts +14 -0
  3. package/index.node +0 -0
  4. package/package.json +3 -3
  5. package/releases/aarch64-apple-darwin/index.node +0 -0
  6. package/releases/aarch64-unknown-linux-gnu/index.node +0 -0
  7. package/releases/x86_64-apple-darwin/index.node +0 -0
  8. package/releases/x86_64-pc-windows-msvc/index.node +0 -0
  9. package/releases/x86_64-unknown-linux-gnu/index.node +0 -0
  10. package/sdk-core/Cargo.toml +1 -0
  11. package/sdk-core/fsm/rustfsm_procmacro/Cargo.toml +1 -1
  12. package/sdk-core/fsm/rustfsm_procmacro/src/lib.rs +8 -9
  13. package/sdk-core/fsm/rustfsm_trait/Cargo.toml +1 -1
  14. package/sdk-core/fsm/rustfsm_trait/src/lib.rs +1 -1
  15. package/sdk-core/sdk-core-protos/src/lib.rs +43 -48
  16. package/sdk-core/src/core_tests/activity_tasks.rs +5 -5
  17. package/sdk-core/src/core_tests/mod.rs +2 -2
  18. package/sdk-core/src/core_tests/queries.rs +9 -2
  19. package/sdk-core/src/core_tests/workflow_tasks.rs +87 -8
  20. package/sdk-core/src/errors.rs +13 -13
  21. package/sdk-core/src/lib.rs +2 -2
  22. package/sdk-core/src/machines/activity_state_machine.rs +3 -3
  23. package/sdk-core/src/machines/child_workflow_state_machine.rs +6 -15
  24. package/sdk-core/src/machines/complete_workflow_state_machine.rs +1 -1
  25. package/sdk-core/src/machines/continue_as_new_workflow_state_machine.rs +1 -1
  26. package/sdk-core/src/machines/mod.rs +16 -22
  27. package/sdk-core/src/machines/patch_state_machine.rs +8 -8
  28. package/sdk-core/src/machines/signal_external_state_machine.rs +2 -2
  29. package/sdk-core/src/machines/timer_state_machine.rs +4 -4
  30. package/sdk-core/src/machines/transition_coverage.rs +3 -3
  31. package/sdk-core/src/machines/workflow_machines.rs +26 -24
  32. package/sdk-core/src/pending_activations.rs +19 -20
  33. package/sdk-core/src/pollers/gateway.rs +3 -3
  34. package/sdk-core/src/pollers/poll_buffer.rs +2 -2
  35. package/sdk-core/src/pollers/retry.rs +4 -4
  36. package/sdk-core/src/prototype_rust_sdk/workflow_context.rs +3 -3
  37. package/sdk-core/src/prototype_rust_sdk/workflow_future.rs +4 -4
  38. package/sdk-core/src/prototype_rust_sdk.rs +3 -11
  39. package/sdk-core/src/telemetry/metrics.rs +2 -4
  40. package/sdk-core/src/telemetry/mod.rs +6 -7
  41. package/sdk-core/src/test_help/canned_histories.rs +8 -5
  42. package/sdk-core/src/test_help/history_builder.rs +12 -2
  43. package/sdk-core/src/test_help/history_info.rs +23 -3
  44. package/sdk-core/src/test_help/mod.rs +24 -40
  45. package/sdk-core/src/worker/activities/activity_heartbeat_manager.rs +246 -138
  46. package/sdk-core/src/worker/activities.rs +46 -45
  47. package/sdk-core/src/worker/config.rs +11 -0
  48. package/sdk-core/src/worker/dispatcher.rs +5 -5
  49. package/sdk-core/src/worker/mod.rs +71 -52
  50. package/sdk-core/src/workflow/driven_workflow.rs +3 -3
  51. package/sdk-core/src/workflow/history_update.rs +1 -1
  52. package/sdk-core/src/workflow/mod.rs +1 -1
  53. package/sdk-core/src/workflow/workflow_tasks/cache_manager.rs +13 -17
  54. package/sdk-core/src/workflow/workflow_tasks/concurrency_manager.rs +4 -8
  55. package/sdk-core/src/workflow/workflow_tasks/mod.rs +46 -53
  56. package/sdk-core/test_utils/src/lib.rs +2 -2
  57. package/sdk-core/tests/integ_tests/workflow_tests/activities.rs +61 -1
  58. package/src/conversions.rs +17 -0
package/Cargo.lock CHANGED
@@ -1550,6 +1550,7 @@ dependencies = [
1550
1550
  "thiserror",
1551
1551
  "tokio",
1552
1552
  "tokio-stream",
1553
+ "tokio-util",
1553
1554
  "tonic 0.6.1",
1554
1555
  "tonic-build 0.6.0",
1555
1556
  "tower",
package/index.d.ts CHANGED
@@ -125,6 +125,20 @@ export interface WorkerOptions {
125
125
  * Maximum number of Workflow instances to cache before automatic eviction
126
126
  */
127
127
  maxCachedWorkflows: number;
128
+ /**
129
+ * Longest interval for throttling activity heartbeats
130
+ * @default 60 seconds
131
+ */
132
+ maxHeartbeatThrottleIntervalMs: number;
133
+
134
+ /**
135
+ * Default interval for throttling activity heartbeats in case
136
+ * `ActivityOptions.heartbeat_timeout` is unset.
137
+ * When the timeout *is* set in the `ActivityOptions`, throttling is set to
138
+ * `heartbeat_timeout * 0.8`.
139
+ * @default 30 seconds
140
+ */
141
+ defaultHeartbeatThrottleIntervalMs: number;
128
142
  }
129
143
 
130
144
  /** Log level - must match rust log level names */
package/index.node CHANGED
Binary file
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@temporalio/core-bridge",
3
- "version": "0.16.0",
3
+ "version": "0.17.1",
4
4
  "description": "Temporal.io SDK Core<>Node bridge",
5
5
  "main": "index.node",
6
6
  "types": "index.d.ts",
@@ -19,7 +19,7 @@
19
19
  "license": "MIT",
20
20
  "dependencies": {
21
21
  "@opentelemetry/api": "^1.0.3",
22
- "@temporalio/common": "^0.16.0",
22
+ "@temporalio/common": "^0.17.1",
23
23
  "arg": "^5.0.1",
24
24
  "cargo-cp-artifact": "^0.1.4",
25
25
  "which": "^2.0.2"
@@ -40,5 +40,5 @@
40
40
  "publishConfig": {
41
41
  "access": "public"
42
42
  },
43
- "gitHead": "42638434f033db2b55c43c2a9a7751d883ba17ec"
43
+ "gitHead": "3ee26c92e9c18500f919654a402237a86e69652e"
44
44
  }
@@ -43,6 +43,7 @@ ringbuf = "0.2"
43
43
  slotmap = "1.0"
44
44
  thiserror = "1.0"
45
45
  tokio = { version = "1.1", features = ["rt", "rt-multi-thread", "parking_lot", "time", "fs"] }
46
+ tokio-util = { version = "0.6.9" }
46
47
  tokio-stream = "0.1"
47
48
  tonic = { version = "0.6", features = ["tls", "tls-roots"] }
48
49
  tower = "0.4"
@@ -24,4 +24,4 @@ rustfsm_trait = { version = "0.1", path = "../rustfsm_trait" }
24
24
  trybuild = { version = "1.0", features = ["diff"] }
25
25
 
26
26
  [package.metadata.workspaces]
27
- independent = true
27
+ independent = true
@@ -1,5 +1,3 @@
1
- extern crate proc_macro;
2
-
3
1
  use proc_macro::TokenStream;
4
2
  use quote::{quote, quote_spanned};
5
3
  use std::collections::{hash_map::Entry, HashMap, HashSet};
@@ -82,7 +80,7 @@ use syn::{
82
80
  /// ReadingCard { card_data: data.clone() }.into(),
83
81
  /// SharedState { last_id: Some(data) }
84
82
  /// )
85
- /// }
83
+ /// }
86
84
  /// }
87
85
  /// }
88
86
  /// }
@@ -200,7 +198,7 @@ impl Parse for StateMachineDefinition {
200
198
  // Parse visibility if present
201
199
  let visibility = input.parse()?;
202
200
  // parse the state machine name, command type, and error type
203
- let (name, command_type, error_type, shared_state_type) = parse_machine_types(&input)
201
+ let (name, command_type, error_type, shared_state_type) = parse_machine_types(input)
204
202
  .map_err(|mut e| {
205
203
  e.combine(Error::new(
206
204
  e.span(),
@@ -235,7 +233,7 @@ impl Parse for StateMachineDefinition {
235
233
  }
236
234
  }
237
235
 
238
- fn parse_machine_types(input: &ParseStream) -> Result<(Ident, Ident, Ident, Option<Type>)> {
236
+ fn parse_machine_types(input: ParseStream) -> Result<(Ident, Ident, Ident, Option<Type>)> {
239
237
  let _: kw::name = input.parse()?;
240
238
  let name: Ident = input.parse()?;
241
239
  input.parse::<Token![;]>()?;
@@ -443,7 +441,7 @@ impl StateMachineDefinition {
443
441
  },
444
442
  multi_dests => {
445
443
  let string_dests: Vec<_> = multi_dests.iter()
446
- .map(|i| i.to_string()).collect();
444
+ .map(ToString::to_string).collect();
447
445
  let enum_ident = Ident::new(&string_dests.join("Or"),
448
446
  multi_dests[0].span());
449
447
  let multi_dest_enum = quote! {
@@ -558,6 +556,7 @@ impl StateMachineDefinition {
558
556
  fn state(&self) -> &Self::State {
559
557
  &self.state
560
558
  }
559
+
561
560
  fn set_state(&mut self, new: Self::State) {
562
561
  self.state = new
563
562
  }
@@ -590,7 +589,7 @@ impl StateMachineDefinition {
590
589
  #trait_impl
591
590
  };
592
591
 
593
- output.into()
592
+ TokenStream::from(output)
594
593
  }
595
594
 
596
595
  fn all_states(&self) -> HashSet<Ident> {
@@ -628,7 +627,7 @@ impl StateMachineDefinition {
628
627
  /// Merge transition's dest state lists for those with the same from state & handler
629
628
  fn merge_transition_dests(transitions: Vec<Transition>) -> Vec<Transition> {
630
629
  let mut map = HashMap::<_, Transition>::new();
631
- transitions.into_iter().for_each(|t| {
630
+ for t in transitions {
632
631
  // We want to use the transition sans-destinations as the key
633
632
  let without_dests = {
634
633
  let mut wd = t.clone();
@@ -643,6 +642,6 @@ fn merge_transition_dests(transitions: Vec<Transition>) -> Vec<Transition> {
643
642
  v.insert(t);
644
643
  }
645
644
  }
646
- });
645
+ }
647
646
  map.into_iter().map(|(_, v)| v).collect()
648
647
  }
@@ -11,4 +11,4 @@ description = "Trait sub-crate of the `rustfsm` crate"
11
11
  [dependencies]
12
12
 
13
13
  [package.metadata.workspaces]
14
- independent = true
14
+ independent = true
@@ -112,7 +112,7 @@ where
112
112
  pub fn unwrap(self) -> Vec<M::Command> {
113
113
  match self {
114
114
  Self::Ok { commands } => commands,
115
- _ => panic!("Transition was not successful!"),
115
+ Self::InvalidTransition => panic!("Transition was not successful!"),
116
116
  }
117
117
  }
118
118
  }
@@ -38,7 +38,7 @@ pub mod coresdk {
38
38
  activity_id: String,
39
39
  reason: ActivityCancelReason,
40
40
  ) -> Self {
41
- ActivityTask {
41
+ Self {
42
42
  task_token,
43
43
  activity_id,
44
44
  variant: Some(activity_task::Variant::Cancel(Cancel {
@@ -75,7 +75,7 @@ pub mod coresdk {
75
75
  })),
76
76
  }
77
77
  }
78
- pub fn will_complete_async() -> Self {
78
+ pub const fn will_complete_async() -> Self {
79
79
  Self {
80
80
  status: Some(activity_result::Status::WillCompleteAsync(
81
81
  WillCompleteAsync {},
@@ -195,7 +195,7 @@ pub mod coresdk {
195
195
  "jobs: {})",
196
196
  self.jobs
197
197
  .iter()
198
- .map(|j| j.to_string())
198
+ .map(ToString::to_string)
199
199
  .collect::<Vec<_>>()
200
200
  .as_slice()
201
201
  .join(", ")
@@ -264,18 +264,25 @@ pub mod coresdk {
264
264
  }
265
265
 
266
266
  pub mod workflow_completion {
267
- use crate::coresdk::workflow_completion::wf_activation_completion::Status;
267
+ use crate::temporal::api::failure;
268
268
  tonic::include_proto!("coresdk.workflow_completion");
269
269
 
270
270
  impl wf_activation_completion::Status {
271
- pub fn is_success(&self) -> bool {
271
+ pub const fn is_success(&self) -> bool {
272
272
  match &self {
273
- Status::Successful(_) => true,
274
- Status::Failed(_) => false,
273
+ Self::Successful(_) => true,
274
+ Self::Failed(_) => false,
275
275
  }
276
276
  }
277
277
  }
278
+
279
+ impl From<failure::v1::Failure> for Failure {
280
+ fn from(f: failure::v1::Failure) -> Self {
281
+ Failure { failure: Some(f) }
282
+ }
283
+ }
278
284
  }
285
+
279
286
  pub mod child_workflow {
280
287
  tonic::include_proto!("coresdk.child_workflow");
281
288
  }
@@ -370,7 +377,7 @@ pub mod coresdk {
370
377
 
371
378
  impl From<wf_activation_job::Variant> for WfActivationJob {
372
379
  fn from(a: wf_activation_job::Variant) -> Self {
373
- WfActivationJob { variant: Some(a) }
380
+ Self { variant: Some(a) }
374
381
  }
375
382
  }
376
383
 
@@ -382,7 +389,7 @@ pub mod coresdk {
382
389
 
383
390
  impl From<workflow_command::Variant> for WorkflowCommand {
384
391
  fn from(v: workflow_command::Variant) -> Self {
385
- WorkflowCommand { variant: Some(v) }
392
+ Self { variant: Some(v) }
386
393
  }
387
394
  }
388
395
 
@@ -591,7 +598,7 @@ pub mod coresdk {
591
598
  }
592
599
 
593
600
  impl ActivityResult {
594
- pub fn ok(result: Payload) -> Self {
601
+ pub const fn ok(result: Payload) -> Self {
595
602
  Self {
596
603
  status: Some(activity_result::activity_result::Status::Completed(
597
604
  activity_result::Success {
@@ -615,24 +622,18 @@ pub mod coresdk {
615
622
  .workflow_execution
616
623
  .map(|we| (we.workflow_id, we.run_id))
617
624
  .unwrap_or_default();
618
- ActivityTask {
625
+ Self {
619
626
  task_token: r.task_token,
620
627
  activity_id: r.activity_id,
621
628
  variant: Some(activity_task::activity_task::Variant::Start(
622
629
  activity_task::Start {
623
630
  workflow_namespace: r.workflow_namespace,
624
- workflow_type: r
625
- .workflow_type
626
- .map(|wt| wt.name)
627
- .unwrap_or_else(|| "".to_string()),
631
+ workflow_type: r.workflow_type.map_or_else(|| "".to_string(), |wt| wt.name),
628
632
  workflow_execution: Some(common::WorkflowExecution {
629
633
  workflow_id,
630
634
  run_id,
631
635
  }),
632
- activity_type: r
633
- .activity_type
634
- .map(|at| at.name)
635
- .unwrap_or_else(|| "".to_string()),
636
+ activity_type: r.activity_type.map_or_else(|| "".to_string(), |at| at.name),
636
637
  header_fields: r.header.map(Into::into).unwrap_or_default(),
637
638
  input: Vec::from_payloads(r.input),
638
639
  heartbeat_details: Vec::from_payloads(r.heartbeat_details),
@@ -664,7 +665,7 @@ pub mod coresdk {
664
665
 
665
666
  impl From<common::WorkflowExecution> for WorkflowExecution {
666
667
  fn from(exc: common::WorkflowExecution) -> Self {
667
- WorkflowExecution {
668
+ Self {
668
669
  workflow_id: exc.workflow_id,
669
670
  run_id: exc.run_id,
670
671
  }
@@ -767,7 +768,7 @@ pub mod coresdk {
767
768
  T: AsRef<[u8]>,
768
769
  {
769
770
  fn from(v: T) -> Self {
770
- Payloads {
771
+ Self {
771
772
  payloads: vec![v.into()],
772
773
  }
773
774
  }
@@ -786,10 +787,10 @@ pub mod coresdk {
786
787
  match v.payloads.pop() {
787
788
  None => Err(PayloadsToPayloadError::NoPayload),
788
789
  Some(p) => {
789
- if !v.payloads.is_empty() {
790
- Err(PayloadsToPayloadError::MoreThanOnePayload)
791
- } else {
790
+ if v.payloads.is_empty() {
792
791
  Ok(p.into())
792
+ } else {
793
+ Err(PayloadsToPayloadError::MoreThanOnePayload)
793
794
  }
794
795
  }
795
796
  }
@@ -1091,26 +1092,22 @@ pub mod temporal {
1091
1092
  impl HistoryEvent {
1092
1093
  /// Returns true if this is an event created to mirror a command
1093
1094
  pub fn is_command_event(&self) -> bool {
1094
- if let Some(et) = EventType::from_i32(self.event_type) {
1095
- match et {
1096
- EventType::ActivityTaskScheduled
1097
- | EventType::ActivityTaskCancelRequested
1098
- | EventType::MarkerRecorded
1099
- | EventType::RequestCancelExternalWorkflowExecutionInitiated
1100
- | EventType::SignalExternalWorkflowExecutionInitiated
1101
- | EventType::StartChildWorkflowExecutionInitiated
1102
- | EventType::TimerCanceled
1103
- | EventType::TimerStarted
1104
- | EventType::UpsertWorkflowSearchAttributes
1105
- | EventType::WorkflowExecutionCanceled
1106
- | EventType::WorkflowExecutionCompleted
1107
- | EventType::WorkflowExecutionContinuedAsNew
1108
- | EventType::WorkflowExecutionFailed => true,
1109
- _ => false,
1110
- }
1111
- } else {
1112
- false
1113
- }
1095
+ EventType::from_i32(self.event_type).map_or(false, |et| match et {
1096
+ EventType::ActivityTaskScheduled
1097
+ | EventType::ActivityTaskCancelRequested
1098
+ | EventType::MarkerRecorded
1099
+ | EventType::RequestCancelExternalWorkflowExecutionInitiated
1100
+ | EventType::SignalExternalWorkflowExecutionInitiated
1101
+ | EventType::StartChildWorkflowExecutionInitiated
1102
+ | EventType::TimerCanceled
1103
+ | EventType::TimerStarted
1104
+ | EventType::UpsertWorkflowSearchAttributes
1105
+ | EventType::WorkflowExecutionCanceled
1106
+ | EventType::WorkflowExecutionCompleted
1107
+ | EventType::WorkflowExecutionContinuedAsNew
1108
+ | EventType::WorkflowExecutionFailed => true,
1109
+ _ => false,
1110
+ })
1114
1111
  }
1115
1112
 
1116
1113
  /// Returns the command's initiating event id, if present. This is the id of the
@@ -1262,16 +1259,14 @@ pub mod temporal {
1262
1259
  let last_event = self
1263
1260
  .history
1264
1261
  .as_ref()
1265
- .map(|h| h.events.last().map(|he| he.event_id))
1266
- .flatten()
1262
+ .and_then(|h| h.events.last().map(|he| he.event_id))
1267
1263
  .unwrap_or(0);
1268
1264
  write!(
1269
1265
  f,
1270
1266
  "PollWFTQResp(run_id: {}, attempt: {}, last_event: {})",
1271
1267
  self.workflow_execution
1272
1268
  .as_ref()
1273
- .map(|we| we.run_id.as_str())
1274
- .unwrap_or(""),
1269
+ .map_or("", |we| we.run_id.as_str()),
1275
1270
  self.attempt,
1276
1271
  last_event
1277
1272
  )
@@ -76,7 +76,7 @@ async fn max_activities_respected() {
76
76
  core.register_worker(
77
77
  WorkerConfigBuilder::default()
78
78
  .task_queue(TEST_Q)
79
- .max_outstanding_activities(2usize)
79
+ .max_outstanding_activities(2_usize)
80
80
  .build()
81
81
  .unwrap(),
82
82
  )
@@ -163,7 +163,7 @@ async fn heartbeats_report_cancels_only_once() {
163
163
  core.record_activity_heartbeat(ActivityHeartbeat {
164
164
  task_token: act.task_token.clone(),
165
165
  task_queue: TEST_Q.to_string(),
166
- details: vec![vec![1u8, 2, 3].into()],
166
+ details: vec![vec![1_u8, 2, 3].into()],
167
167
  });
168
168
  // We have to wait a beat for the heartbeat to be processed
169
169
  sleep(Duration::from_millis(10)).await;
@@ -183,7 +183,7 @@ async fn heartbeats_report_cancels_only_once() {
183
183
  core.record_activity_heartbeat(ActivityHeartbeat {
184
184
  task_token: act.task_token,
185
185
  task_queue: TEST_Q.to_string(),
186
- details: vec![vec![1u8, 2, 3].into()],
186
+ details: vec![vec![1_u8, 2, 3].into()],
187
187
  });
188
188
  sleep(Duration::from_millis(10)).await;
189
189
  // Since cancels always come before new tasks, if we get a new non-cancel task, we did not
@@ -251,7 +251,7 @@ async fn activity_cancel_interrupts_poll() {
251
251
  core.record_activity_heartbeat(ActivityHeartbeat {
252
252
  task_token: act.task_token,
253
253
  task_queue: TEST_Q.to_string(),
254
- details: vec![vec![1u8, 2, 3].into()],
254
+ details: vec![vec![1_u8, 2, 3].into()],
255
255
  });
256
256
  last_finisher.store(1, Ordering::SeqCst);
257
257
  },
@@ -358,7 +358,7 @@ async fn many_concurrent_heartbeat_cancels() {
358
358
  .task_queue(TEST_Q)
359
359
  .max_outstanding_activities(CONCURRENCY_NUM)
360
360
  // Only 1 poll at a time to avoid over-polling and running out of responses
361
- .max_concurrent_at_polls(1usize)
361
+ .max_concurrent_at_polls(1_usize)
362
362
  .build()
363
363
  .unwrap(),
364
364
  )
@@ -91,8 +91,8 @@ async fn shutdown_interrupts_both_polls() {
91
91
  WorkerConfigBuilder::default()
92
92
  .task_queue(TEST_Q)
93
93
  // Need only 1 concurrent pollers for mock expectations to work here
94
- .max_concurrent_wft_polls(1usize)
95
- .max_concurrent_at_polls(1usize)
94
+ .max_concurrent_wft_polls(1_usize)
95
+ .max_concurrent_at_polls(1_usize)
96
96
  .build()
97
97
  .unwrap(),
98
98
  )
@@ -290,10 +290,17 @@ async fn legacy_query_failure_on_wft_failure() {
290
290
  core.shutdown().await;
291
291
  }
292
292
 
293
+ #[rstest::rstest]
293
294
  #[tokio::test]
294
- async fn legacy_query_with_full_history_after_complete() {
295
+ async fn legacy_query_after_complete(#[values(false, true)] full_history: bool) {
295
296
  let wfid = "fake_wf_id";
296
- let t = canned_histories::single_timer_wf_completes("1");
297
+ let t = if full_history {
298
+ canned_histories::single_timer_wf_completes("1")
299
+ } else {
300
+ let mut t = canned_histories::single_timer("1");
301
+ t.add_workflow_task_completed();
302
+ t
303
+ };
297
304
  let query_with_hist_task = {
298
305
  let mut pr = hist_to_poll_resp(
299
306
  &t,
@@ -642,9 +642,9 @@ async fn workflow_update_random_seed_on_workflow_reset() {
642
642
  UpdateRandomSeed{randomness_seed})),
643
643
  }] => {
644
644
  assert_ne!(randomness_seed_from_start.load(Ordering::SeqCst),
645
- *randomness_seed)
645
+ *randomness_seed);
646
646
  }
647
- )
647
+ );
648
648
  },
649
649
  vec![CompleteWorkflowExecution { result: None }.into()],
650
650
  ),
@@ -1619,19 +1619,19 @@ async fn failing_wft_doesnt_eat_permit_forever() {
1619
1619
  t.add_by_type(EventType::WorkflowExecutionStarted);
1620
1620
  t.add_workflow_task_scheduled_and_started();
1621
1621
 
1622
- let failures = 5;
1623
- // One extra response for when we stop failing
1624
- let resps = (1..=(failures + 1)).map(|_| 1);
1625
1622
  let mock = MockServerGatewayApis::new();
1626
- let mut mock = single_hist_mock_sg("fake_wf_id", t, resps, mock, true);
1623
+ let mut mock = single_hist_mock_sg("fake_wf_id", t, [1, 1, 1], mock, true);
1627
1624
  mock.worker_cfg(TEST_Q, |cfg| {
1628
1625
  cfg.max_cached_workflows = 2;
1629
1626
  cfg.max_outstanding_workflow_tasks = 2;
1630
1627
  });
1628
+ let outstanding_mock_tasks = mock.outstanding_task_map.clone();
1631
1629
  let core = mock_core(mock);
1632
1630
 
1633
- // Spin failing the WFT to verify that we don't get stuck
1634
- for _ in 1..=failures {
1631
+ let mut run_id = "".to_string();
1632
+ // Fail twice, verifying a permit is eaten. We cannot fail the same run more than twice in a row
1633
+ // because we purposefully time out rather than spamming.
1634
+ for _ in 1..=2 {
1635
1635
  let activation = core.poll_workflow_activation(TEST_Q).await.unwrap();
1636
1636
  // Issue a nonsense completion that will trigger a WFT failure
1637
1637
  core.complete_workflow_activation(WfActivationCompletion::from_cmd(
@@ -1648,11 +1648,90 @@ async fn failing_wft_doesnt_eat_permit_forever() {
1648
1648
  variant: Some(wf_activation_job::Variant::RemoveFromCache(_)),
1649
1649
  },]
1650
1650
  );
1651
+ run_id = activation.run_id.clone();
1652
+ core.complete_workflow_activation(WfActivationCompletion::empty(TEST_Q, activation.run_id))
1653
+ .await
1654
+ .unwrap();
1655
+ assert_eq!(core.outstanding_wfts(TEST_Q), 0);
1656
+ assert_eq!(core.available_wft_permits(TEST_Q), 2);
1657
+ }
1658
+ // We should be "out of work" because the mock service thinks we didn't complete the last task,
1659
+ // which we didn't, because we don't spam failures. The real server would eventually time out
1660
+ // the task. Mock doesn't understand that, so the WFT permit is released because eventually a
1661
+ // new one will be generated. We manually clear the mock's outstanding task list so the next
1662
+ // poll will work.
1663
+ outstanding_mock_tasks
1664
+ .unwrap()
1665
+ .write()
1666
+ .remove_by_left(&run_id);
1667
+ let activation = core.poll_workflow_activation(TEST_Q).await.unwrap();
1668
+ core.complete_workflow_activation(WfActivationCompletion::from_cmd(
1669
+ TEST_Q,
1670
+ activation.run_id,
1671
+ CompleteWorkflowExecution { result: None }.into(),
1672
+ ))
1673
+ .await
1674
+ .unwrap();
1675
+
1676
+ core.shutdown().await;
1677
+ }
1678
+
1679
+ #[tokio::test]
1680
+ async fn cache_miss_doesnt_eat_permit_forever() {
1681
+ let mut t = TestHistoryBuilder::default();
1682
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1683
+ t.add_full_wf_task();
1684
+ t.add_we_signaled("sig", vec![]);
1685
+ t.add_full_wf_task();
1686
+ t.add_workflow_execution_completed();
1687
+
1688
+ let mut mh = MockPollCfg::from_resp_batches(
1689
+ "fake_wf_id",
1690
+ t,
1691
+ [
1692
+ ResponseType::ToTaskNum(1),
1693
+ ResponseType::OneTask(2),
1694
+ ResponseType::ToTaskNum(1),
1695
+ ResponseType::OneTask(2),
1696
+ ResponseType::ToTaskNum(1),
1697
+ ResponseType::OneTask(2),
1698
+ // Last one to complete successfully
1699
+ ResponseType::ToTaskNum(1),
1700
+ ],
1701
+ MockServerGatewayApis::new(),
1702
+ );
1703
+ mh.num_expected_fails = Some(3);
1704
+ mh.expect_fail_wft_matcher =
1705
+ Box::new(|_, cause, _| matches!(cause, WorkflowTaskFailedCause::ResetStickyTaskQueue));
1706
+ let mut mock = build_mock_pollers(mh);
1707
+ mock.worker_cfg(TEST_Q, |cfg| {
1708
+ cfg.max_outstanding_workflow_tasks = 2;
1709
+ });
1710
+ let core = mock_core(mock);
1711
+
1712
+ // Spin missing the cache to verify that we don't get stuck
1713
+ for _ in 1..=3 {
1714
+ // Start
1715
+ let activation = core.poll_workflow_activation(TEST_Q).await.unwrap();
1716
+ core.complete_workflow_activation(WfActivationCompletion::empty(TEST_Q, activation.run_id))
1717
+ .await
1718
+ .unwrap();
1719
+ // Evict
1720
+ let activation = core.poll_workflow_activation(TEST_Q).await.unwrap();
1721
+ assert_matches!(
1722
+ activation.jobs.as_slice(),
1723
+ [WfActivationJob {
1724
+ variant: Some(wf_activation_job::Variant::RemoveFromCache(_)),
1725
+ },]
1726
+ );
1651
1727
  core.complete_workflow_activation(WfActivationCompletion::empty(TEST_Q, activation.run_id))
1652
1728
  .await
1653
1729
  .unwrap();
1654
1730
  assert_eq!(core.outstanding_wfts(TEST_Q), 0);
1655
1731
  assert_eq!(core.available_wft_permits(TEST_Q), 2);
1732
+ // When we loop back up, the poll will trigger a cache miss, which we should immediately
1733
+ // reply to WFT with failure, and then poll again, which will deliver the from-the-start
1734
+ // history
1656
1735
  }
1657
1736
  let activation = core.poll_workflow_activation(TEST_Q).await.unwrap();
1658
1737
  core.complete_workflow_activation(WfActivationCompletion::from_cmd(
@@ -1,6 +1,6 @@
1
1
  //! Error types exposed by public APIs
2
2
 
3
- use crate::{machines::WFMachinesError, task_token::TaskToken, WorkerLookupErr};
3
+ use crate::{machines::WFMachinesError, WorkerLookupErr};
4
4
  use temporal_sdk_core_protos::coresdk::{
5
5
  activity_result::ActivityResult, workflow_completion::WfActivationCompletion,
6
6
  };
@@ -11,17 +11,15 @@ pub(crate) struct WorkflowUpdateError {
11
11
  /// Underlying workflow error
12
12
  pub source: WFMachinesError,
13
13
  /// The run id of the erring workflow
14
+ #[allow(dead_code)] // Useful in debug output
14
15
  pub run_id: String,
15
- /// The task token associated with this update, if one existed yet.
16
- pub task_token: Option<TaskToken>,
17
16
  }
18
17
 
19
18
  impl From<WorkflowMissingError> for WorkflowUpdateError {
20
19
  fn from(wme: WorkflowMissingError) -> Self {
21
- WorkflowUpdateError {
20
+ Self {
22
21
  source: WFMachinesError::Fatal("Workflow machines missing".to_string()),
23
22
  run_id: wme.run_id,
24
- task_token: None,
25
23
  }
26
24
  }
27
25
  }
@@ -72,8 +70,8 @@ pub enum PollWfError {
72
70
  impl From<WorkerLookupErr> for PollWfError {
73
71
  fn from(e: WorkerLookupErr) -> Self {
74
72
  match e {
75
- WorkerLookupErr::Shutdown(_) => PollWfError::ShutDown,
76
- WorkerLookupErr::NoWorker(s) => PollWfError::NoWorkerForQueue(s),
73
+ WorkerLookupErr::Shutdown(_) => Self::ShutDown,
74
+ WorkerLookupErr::NoWorker(s) => Self::NoWorkerForQueue(s),
77
75
  }
78
76
  }
79
77
  }
@@ -97,8 +95,8 @@ pub enum PollActivityError {
97
95
  impl From<WorkerLookupErr> for PollActivityError {
98
96
  fn from(e: WorkerLookupErr) -> Self {
99
97
  match e {
100
- WorkerLookupErr::Shutdown(_) => PollActivityError::ShutDown,
101
- WorkerLookupErr::NoWorker(s) => PollActivityError::NoWorkerForQueue(s),
98
+ WorkerLookupErr::Shutdown(_) => Self::ShutDown,
99
+ WorkerLookupErr::NoWorker(s) => Self::NoWorkerForQueue(s),
102
100
  }
103
101
  }
104
102
  }
@@ -127,8 +125,9 @@ pub enum CompleteWfError {
127
125
  impl From<WorkerLookupErr> for CompleteWfError {
128
126
  fn from(e: WorkerLookupErr) -> Self {
129
127
  match e {
130
- WorkerLookupErr::Shutdown(s) => CompleteWfError::NoWorkerForQueue(s),
131
- WorkerLookupErr::NoWorker(s) => CompleteWfError::NoWorkerForQueue(s),
128
+ WorkerLookupErr::Shutdown(s) | WorkerLookupErr::NoWorker(s) => {
129
+ Self::NoWorkerForQueue(s)
130
+ }
132
131
  }
133
132
  }
134
133
  }
@@ -156,8 +155,9 @@ pub enum CompleteActivityError {
156
155
  impl From<WorkerLookupErr> for CompleteActivityError {
157
156
  fn from(e: WorkerLookupErr) -> Self {
158
157
  match e {
159
- WorkerLookupErr::Shutdown(s) => CompleteActivityError::NoWorkerForQueue(s),
160
- WorkerLookupErr::NoWorker(s) => CompleteActivityError::NoWorkerForQueue(s),
158
+ WorkerLookupErr::Shutdown(s) | WorkerLookupErr::NoWorker(s) => {
159
+ Self::NoWorkerForQueue(s)
160
+ }
161
161
  }
162
162
  }
163
163
  }