@temporalio/core-bridge 1.9.0 → 1.9.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/Cargo.lock +2 -33
  2. package/package.json +3 -3
  3. package/releases/aarch64-apple-darwin/index.node +0 -0
  4. package/releases/aarch64-unknown-linux-gnu/index.node +0 -0
  5. package/releases/x86_64-apple-darwin/index.node +0 -0
  6. package/releases/x86_64-pc-windows-msvc/index.node +0 -0
  7. package/releases/x86_64-unknown-linux-gnu/index.node +0 -0
  8. package/sdk-core/.github/workflows/per-pr.yml +1 -1
  9. package/sdk-core/Cargo.toml +1 -0
  10. package/sdk-core/README.md +1 -1
  11. package/sdk-core/client/src/lib.rs +40 -11
  12. package/sdk-core/client/src/workflow_handle/mod.rs +4 -0
  13. package/sdk-core/core/Cargo.toml +3 -2
  14. package/sdk-core/core/src/core_tests/activity_tasks.rs +69 -2
  15. package/sdk-core/core/src/core_tests/local_activities.rs +99 -4
  16. package/sdk-core/core/src/core_tests/queries.rs +90 -1
  17. package/sdk-core/core/src/core_tests/workflow_tasks.rs +8 -11
  18. package/sdk-core/core/src/telemetry/metrics.rs +4 -4
  19. package/sdk-core/core/src/telemetry/mod.rs +1 -3
  20. package/sdk-core/core/src/test_help/mod.rs +9 -0
  21. package/sdk-core/core/src/worker/activities/activity_heartbeat_manager.rs +1 -2
  22. package/sdk-core/core/src/worker/activities/local_activities.rs +1 -1
  23. package/sdk-core/core/src/worker/activities.rs +11 -4
  24. package/sdk-core/core/src/worker/mod.rs +6 -1
  25. package/sdk-core/core/src/worker/workflow/machines/local_activity_state_machine.rs +0 -1
  26. package/sdk-core/core/src/worker/workflow/machines/upsert_search_attributes_state_machine.rs +28 -6
  27. package/sdk-core/core/src/worker/workflow/machines/workflow_machines/local_acts.rs +15 -0
  28. package/sdk-core/core/src/worker/workflow/machines/workflow_machines.rs +19 -15
  29. package/sdk-core/core/src/worker/workflow/mod.rs +89 -59
  30. package/sdk-core/core/src/worker/workflow/workflow_stream/saved_wf_inputs.rs +1 -1
  31. package/sdk-core/core-api/Cargo.toml +2 -2
  32. package/sdk-core/fsm/rustfsm_procmacro/Cargo.toml +1 -1
  33. package/sdk-core/sdk/Cargo.toml +2 -2
  34. package/sdk-core/sdk/src/lib.rs +13 -8
  35. package/sdk-core/sdk/src/workflow_context.rs +2 -2
  36. package/sdk-core/sdk/src/workflow_future.rs +1 -1
  37. package/sdk-core/sdk-core-protos/Cargo.toml +1 -1
  38. package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/event_type.proto +4 -0
  39. package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/reset.proto +16 -3
  40. package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/update.proto +11 -0
  41. package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/history/v1/message.proto +10 -0
  42. package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto +4 -1
  43. package/sdk-core/sdk-core-protos/protos/testsrv_upstream/Makefile +3 -10
  44. package/sdk-core/sdk-core-protos/protos/testsrv_upstream/api-linter.yaml +0 -5
  45. package/sdk-core/sdk-core-protos/protos/testsrv_upstream/temporal/api/testservice/v1/request_response.proto +3 -4
  46. package/sdk-core/sdk-core-protos/src/history_info.rs +2 -2
  47. package/sdk-core/sdk-core-protos/src/lib.rs +1 -0
  48. package/sdk-core/tests/integ_tests/queries_tests.rs +12 -12
  49. package/sdk-core/tests/integ_tests/workflow_tests/patches.rs +48 -0
  50. package/src/conversions.rs +19 -17
  51. package/src/runtime.rs +32 -4
  52. package/sdk-core/sdk-core-protos/protos/testsrv_upstream/dependencies/gogoproto/gogo.proto +0 -141
@@ -632,14 +632,14 @@ pub struct MetricsCallBuffer<I>
632
632
  where
633
633
  I: BufferInstrumentRef,
634
634
  {
635
- calls_rx: crossbeam::channel::Receiver<MetricEvent<I>>,
635
+ calls_rx: crossbeam_channel::Receiver<MetricEvent<I>>,
636
636
  calls_tx: LogErrOnFullSender<MetricEvent<I>>,
637
637
  }
638
638
  #[derive(Clone, Debug)]
639
- struct LogErrOnFullSender<I>(crossbeam::channel::Sender<I>);
639
+ struct LogErrOnFullSender<I>(crossbeam_channel::Sender<I>);
640
640
  impl<I> LogErrOnFullSender<I> {
641
641
  fn send(&self, v: I) {
642
- if let Err(crossbeam::channel::TrySendError::Full(_)) = self.0.try_send(v) {
642
+ if let Err(crossbeam_channel::TrySendError::Full(_)) = self.0.try_send(v) {
643
643
  error!(
644
644
  "Core's metrics buffer is full! Dropping call to record metrics. \
645
645
  Make sure you drain the metric buffer often!"
@@ -654,7 +654,7 @@ where
654
654
  {
655
655
  /// Create a new buffer with the given capacity
656
656
  pub fn new(buffer_size: usize) -> Self {
657
- let (calls_tx, calls_rx) = crossbeam::channel::bounded(buffer_size);
657
+ let (calls_tx, calls_rx) = crossbeam_channel::bounded(buffer_size);
658
658
  MetricsCallBuffer {
659
659
  calls_rx,
660
660
  calls_tx: LogErrOnFullSender(calls_tx),
@@ -271,9 +271,7 @@ impl TemporalitySelector for ConstantTemporality {
271
271
  self.0
272
272
  }
273
273
  }
274
- fn metric_temporality_to_selector(
275
- t: MetricTemporality,
276
- ) -> impl TemporalitySelector + Send + Sync + Clone {
274
+ fn metric_temporality_to_selector(t: MetricTemporality) -> impl TemporalitySelector + Clone {
277
275
  match t {
278
276
  MetricTemporality::Cumulative => ConstantTemporality(Temporality::Cumulative),
279
277
  MetricTemporality::Delta => ConstantTemporality(Temporality::Delta),
@@ -20,6 +20,7 @@ use mockall::TimesRange;
20
20
  use parking_lot::RwLock;
21
21
  use std::{
22
22
  collections::{BTreeMap, HashMap, HashSet, VecDeque},
23
+ fmt::Debug,
23
24
  ops::{Deref, DerefMut},
24
25
  pin::Pin,
25
26
  sync::{
@@ -722,6 +723,14 @@ impl<T> DerefMut for QueueResponse<T> {
722
723
  &mut self.resp
723
724
  }
724
725
  }
726
+ impl<T> Debug for QueueResponse<T>
727
+ where
728
+ T: Debug,
729
+ {
730
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
731
+ self.resp.fmt(f)
732
+ }
733
+ }
725
734
 
726
735
  pub fn hist_to_poll_resp(
727
736
  t: &TestHistoryBuilder,
@@ -450,8 +450,7 @@ mod test {
450
450
  hm.shutdown().await;
451
451
  }
452
452
 
453
- /// Ensure that heartbeat can be called from a tight loop without any throttle_interval, resulting in two
454
- /// interactions with the server - one immediately and one after 500ms after the throttle_interval.
453
+ /// Ensure that heartbeat can be called from a tight loop and correctly throttle
455
454
  #[tokio::test]
456
455
  async fn process_tight_loop_and_shutdown() {
457
456
  let mut mock_client = mock_workflow_client();
@@ -1246,7 +1246,7 @@ mod tests {
1246
1246
  // Wait more than the timeout before grabbing the task
1247
1247
  sleep(timeout + Duration::from_millis(10)).await;
1248
1248
 
1249
- assert!(dbg!(lam.next_pending().await.unwrap()).is_timeout(false));
1249
+ assert!(lam.next_pending().await.unwrap().is_timeout(false));
1250
1250
  assert_eq!(lam.num_in_backoff(), 0);
1251
1251
  assert_eq!(lam.num_outstanding(), 0);
1252
1252
  }
@@ -152,6 +152,9 @@ pub(crate) struct WorkerActivityTasks {
152
152
  /// eager activities). Tasks received in this stream hold a "tracked" permit that is issued by
153
153
  /// the `eager_activities_semaphore`.
154
154
  eager_activities_tx: UnboundedSender<TrackedPermittedTqResp>,
155
+ /// Ensures that no activities are in the middle of flushing their results to server while we
156
+ /// try to shut down.
157
+ completers_lock: tokio::sync::RwLock<()>,
155
158
 
156
159
  metrics: MetricsContext,
157
160
 
@@ -230,6 +233,7 @@ impl WorkerActivityTasks {
230
233
  default_heartbeat_throttle_interval,
231
234
  poll_returned_shutdown_token: CancellationToken::new(),
232
235
  outstanding_activity_tasks,
236
+ completers_lock: Default::default(),
233
237
  }
234
238
  }
235
239
 
@@ -283,6 +287,7 @@ impl WorkerActivityTasks {
283
287
 
284
288
  pub(crate) async fn shutdown(&self) {
285
289
  self.initiate_shutdown();
290
+ let _ = self.completers_lock.write().await;
286
291
  self.poll_returned_shutdown_token.cancelled().await;
287
292
  self.heartbeat_manager.shutdown().await;
288
293
  }
@@ -321,10 +326,10 @@ impl WorkerActivityTasks {
321
326
  jh.abort()
322
327
  };
323
328
  self.heartbeat_manager.evict(task_token.clone()).await;
324
- self.complete_notify.notify_waiters();
325
329
 
326
330
  // No need to report activities which we already know the server doesn't care about
327
331
  if !known_not_found {
332
+ let _flushing_guard = self.completers_lock.read().await;
328
333
  let maybe_net_err = match status {
329
334
  aer::Status::WillCompleteAsync(_) => None,
330
335
  aer::Status::Completed(ar::Success { result }) => client
@@ -364,8 +369,8 @@ impl WorkerActivityTasks {
364
369
  {
365
370
  details
366
371
  } else {
367
- warn!(task_token = ? task_token,
368
- "Expected activity cancelled status with CanceledFailureInfo");
372
+ warn!(task_token=?task_token,
373
+ "Expected activity cancelled status with CanceledFailureInfo");
369
374
  None
370
375
  };
371
376
  client
@@ -376,9 +381,11 @@ impl WorkerActivityTasks {
376
381
  }
377
382
  };
378
383
 
384
+ self.complete_notify.notify_waiters();
385
+
379
386
  if let Some(e) = maybe_net_err {
380
387
  if e.code() == tonic::Code::NotFound {
381
- warn!(task_token = ?task_token, details = ?e, "Activity not found on \
388
+ warn!(task_token=?task_token, details=?e, "Activity not found on \
382
389
  completion. This may happen if the activity has already been cancelled but \
383
390
  completed anyway.");
384
391
  } else {
@@ -179,8 +179,14 @@ impl WorkerTrait for Worker {
179
179
  // Let the manager know that shutdown has been initiated to try to unblock the local
180
180
  // activity poll in case this worker is an activity-only worker.
181
181
  self.local_act_mgr.shutdown_initiated();
182
+
182
183
  if !self.workflows.ever_polled() {
183
184
  self.local_act_mgr.workflows_have_shutdown();
185
+ } else {
186
+ // Bump the workflow stream with a pointless input, since if a client initiates shutdown
187
+ // and then immediately blocks waiting on a workflow activation poll, it's possible that
188
+ // there may not be any more inputs ever, and that poll will never resolve.
189
+ self.workflows.send_get_state_info_msg();
184
190
  }
185
191
  }
186
192
 
@@ -189,7 +195,6 @@ impl WorkerTrait for Worker {
189
195
  }
190
196
 
191
197
  async fn finalize_shutdown(self) {
192
- self.shutdown().await;
193
198
  self.finalize_shutdown().await
194
199
  }
195
200
  }
@@ -1250,7 +1250,6 @@ mod tests {
1250
1250
  });
1251
1251
 
1252
1252
  let mut worker = build_fake_sdk(mock_cfg);
1253
- dbg!("Past thing");
1254
1253
  worker.register_wf(DEFAULT_WORKFLOW_TYPE, la_wf);
1255
1254
  worker.register_activity(
1256
1255
  DEFAULT_ACTIVITY_TYPE,
@@ -208,7 +208,9 @@ mod tests {
208
208
  use temporal_sdk_core_api::Worker;
209
209
  use temporal_sdk_core_protos::{
210
210
  coresdk::{
211
- workflow_commands::SetPatchMarker, workflow_completion::WorkflowActivationCompletion,
211
+ workflow_activation::{workflow_activation_job, WorkflowActivationJob},
212
+ workflow_commands::SetPatchMarker,
213
+ workflow_completion::WorkflowActivationCompletion,
212
214
  AsJsonPayloadExt,
213
215
  },
214
216
  temporal::api::{
@@ -332,24 +334,38 @@ mod tests {
332
334
  t.add_has_change_marker(&patch_id, false);
333
335
  }
334
336
  t.add_upsert_search_attrs_for_patch(&[patch_id.clone()]);
337
+ t.add_we_signaled("hi", vec![]);
335
338
  t.add_full_wf_task();
336
339
  t.add_workflow_execution_completed();
337
340
 
338
341
  let mut mp = MockPollCfg::from_resp_batches(
339
342
  "fakeid",
340
343
  t,
341
- [ResponseType::ToTaskNum(1), ResponseType::AllHistory],
344
+ [ResponseType::ToTaskNum(1), ResponseType::ToTaskNum(2)],
342
345
  mock_workflow_client(),
343
346
  );
344
347
  // Ensure the upsert command has an empty map when not using the patched command
345
348
  if !with_patched_cmd {
346
349
  mp.completion_asserts = Some(Box::new(|wftc| {
347
- assert_matches!(wftc.commands.first().and_then(|c| c.attributes.as_ref()).unwrap(),
348
- Attributes::UpsertWorkflowSearchAttributesCommandAttributes(attrs)
349
- if attrs.search_attributes.as_ref().unwrap().indexed_fields.is_empty())
350
+ let cmd_attrs = wftc
351
+ .commands
352
+ .first()
353
+ .and_then(|c| c.attributes.as_ref())
354
+ .unwrap();
355
+ if matches!(
356
+ cmd_attrs,
357
+ Attributes::CompleteWorkflowExecutionCommandAttributes(_)
358
+ ) {
359
+ return;
360
+ }
361
+ assert_matches!(cmd_attrs,
362
+ Attributes::UpsertWorkflowSearchAttributesCommandAttributes(attrs)
363
+ if attrs.search_attributes.clone().unwrap_or_default().indexed_fields.is_empty());
350
364
  }));
351
365
  }
352
- let core = mock_worker(build_mock_pollers(mp));
366
+ let mut mock = build_mock_pollers(mp);
367
+ mock.worker_cfg(|w| w.max_cached_workflows = 1);
368
+ let core = mock_worker(mock);
353
369
 
354
370
  let mut ver_upsert = HashMap::new();
355
371
  ver_upsert.insert(
@@ -379,6 +395,12 @@ mod tests {
379
395
  .unwrap();
380
396
  // Now ensure that encountering the upsert in history works fine
381
397
  let act = core.poll_workflow_activation().await.unwrap();
398
+ assert_matches!(
399
+ act.jobs.as_slice(),
400
+ [WorkflowActivationJob {
401
+ variant: Some(workflow_activation_job::Variant::SignalWorkflow(_)),
402
+ }]
403
+ );
382
404
  core.complete_execution(&act.run_id).await;
383
405
  }
384
406
  }
@@ -20,6 +20,8 @@ pub(super) struct LocalActivityData {
20
20
  /// Maps local activity sequence numbers to their resolutions as found when looking ahead at
21
21
  /// next WFT
22
22
  preresolutions: HashMap<u32, ResolveDat>,
23
+ /// Set true if the workflow is terminating
24
+ am_terminating: bool,
23
25
  }
24
26
 
25
27
  impl LocalActivityData {
@@ -45,6 +47,10 @@ impl LocalActivityData {
45
47
  wf_id: &str,
46
48
  run_id: &str,
47
49
  ) -> Vec<LocalActRequest> {
50
+ if self.am_terminating {
51
+ return vec![LocalActRequest::CancelAllInRun(run_id.to_string())];
52
+ }
53
+
48
54
  self.cancel_requests
49
55
  .drain(..)
50
56
  .map(LocalActRequest::Cancel)
@@ -65,6 +71,9 @@ impl LocalActivityData {
65
71
 
66
72
  /// Returns all outstanding local activities, whether executing or requested and in the queue
67
73
  pub(super) fn outstanding_la_count(&self) -> usize {
74
+ if self.am_terminating {
75
+ return 0;
76
+ }
68
77
  self.executing.len() + self.new_requests.len()
69
78
  }
70
79
 
@@ -82,4 +91,10 @@ impl LocalActivityData {
82
91
  .position(|req| req.seq == seq)
83
92
  .map(|i| self.new_requests.remove(i))
84
93
  }
94
+
95
+ /// Store that this workflow is terminating, and thus no new LA requests need be processed,
96
+ /// and any executing LAs should not prevent us from shutting down.
97
+ pub(super) fn indicate_terminating(&mut self) {
98
+ self.am_terminating = true;
99
+ }
85
100
  }
@@ -781,7 +781,10 @@ impl WorkflowMachines {
781
781
  Ok(EventHandlingOutcome::Normal)
782
782
  };
783
783
  }
784
- if event.event_type() == EventType::Unspecified || event.attributes.is_none() {
784
+ if event.event_type() == EventType::Unspecified
785
+ || event.event_type() == EventType::WorkflowExecutionUpdateRequested
786
+ || event.attributes.is_none()
787
+ {
785
788
  return if !event.worker_may_ignore {
786
789
  Err(WFMachinesError::Fatal(format!(
787
790
  "Event type is unspecified! This history is invalid. Event detail: {event:?}"
@@ -861,16 +864,14 @@ impl WorkflowMachines {
861
864
  let event_id = event.event_id;
862
865
 
863
866
  let consumed_cmd = loop {
864
- if let Some(peek_machine) = self.commands.front() {
865
- let mach = self.machine(peek_machine.machine);
866
- match patch_marker_handling(event, mach, next_event)? {
867
- EventHandlingOutcome::SkipCommand => {
868
- self.commands.pop_front();
869
- continue;
870
- }
871
- eho @ EventHandlingOutcome::SkipEvent { .. } => return Ok(eho),
872
- EventHandlingOutcome::Normal => {}
867
+ let maybe_machine = self.commands.front().map(|mk| self.machine(mk.machine));
868
+ match patch_marker_handling(event, maybe_machine, next_event)? {
869
+ EventHandlingOutcome::SkipCommand => {
870
+ self.commands.pop_front();
871
+ continue;
873
872
  }
873
+ eho @ EventHandlingOutcome::SkipEvent { .. } => return Ok(eho),
874
+ EventHandlingOutcome::Normal => {}
874
875
  }
875
876
 
876
877
  let maybe_command = self.commands.pop_front();
@@ -1469,6 +1470,9 @@ impl WorkflowMachines {
1469
1470
  let cwfm = self.add_new_command_machine(machine);
1470
1471
  self.workflow_end_time = Some(SystemTime::now());
1471
1472
  self.current_wf_task_commands.push_back(cwfm);
1473
+ // Wipe out any pending / executing local activity data since we're about to terminate
1474
+ // and there's nothing to be done with them.
1475
+ self.local_activity_data.indicate_terminating();
1472
1476
  }
1473
1477
 
1474
1478
  /// Add a new command/machines for that command to the current workflow task
@@ -1598,11 +1602,11 @@ enum EventHandlingOutcome {
1598
1602
  /// [WorkflowMachines::handle_command_event]
1599
1603
  fn patch_marker_handling(
1600
1604
  event: &HistoryEvent,
1601
- mach: &Machines,
1605
+ mach: Option<&Machines>,
1602
1606
  next_event: Option<&HistoryEvent>,
1603
1607
  ) -> Result<EventHandlingOutcome> {
1604
1608
  let patch_machine = match mach {
1605
- Machines::PatchMachine(pm) => Some(pm),
1609
+ Some(Machines::PatchMachine(pm)) => Some(pm),
1606
1610
  _ => None,
1607
1611
  };
1608
1612
  let patch_details = event.get_patch_marker_details();
@@ -1633,9 +1637,9 @@ fn patch_marker_handling(
1633
1637
  Ok(EventHandlingOutcome::Normal)
1634
1638
  }
1635
1639
  } else {
1636
- // Version markers can be skipped in the event they are deprecated
1637
- // Is deprecated. We can simply ignore this event, as deprecated change
1638
- // markers are allowed without matching changed calls.
1640
+ // Version markers can be skipped in the event they are deprecated. We can simply
1641
+ // ignore this event, as deprecated change markers are allowed without matching changed
1642
+ // calls.
1639
1643
  if deprecated {
1640
1644
  debug!("Deprecated patch marker tried against non-patch machine, skipping.");
1641
1645
  skip_one_or_two_events(next_event)
@@ -50,6 +50,7 @@ use std::{
50
50
  collections::VecDeque,
51
51
  fmt::Debug,
52
52
  future::Future,
53
+ mem,
53
54
  mem::discriminant,
54
55
  ops::DerefMut,
55
56
  rc::Rc,
@@ -179,58 +180,60 @@ impl Workflows {
179
180
  // We must spawn a task to constantly poll the activation stream, because otherwise
180
181
  // activation completions would not cause anything to happen until the next poll.
181
182
  let tracing_sub = telem_instance.and_then(|ti| ti.trace_subscriber());
182
- let processing_task = thread::spawn(move || {
183
- if let Some(ts) = tracing_sub {
184
- set_trace_subscriber_for_current_thread(ts);
185
- }
186
- let rt = tokio::runtime::Builder::new_current_thread()
187
- .enable_all()
188
- .thread_name("workflow-processing")
189
- .build()
190
- .unwrap();
191
- let local = LocalSet::new();
192
- local.block_on(&rt, async move {
193
- let mut stream = WFStream::build(
194
- basics,
195
- extracted_wft_stream,
196
- locals_stream,
197
- local_activity_request_sink,
198
- );
183
+ let processing_task = thread::Builder::new()
184
+ .name("workflow-processing".to_string())
185
+ .spawn(move || {
186
+ if let Some(ts) = tracing_sub {
187
+ set_trace_subscriber_for_current_thread(ts);
188
+ }
189
+ let rt = tokio::runtime::Builder::new_current_thread()
190
+ .enable_all()
191
+ .build()
192
+ .unwrap();
193
+ let local = LocalSet::new();
194
+ local.block_on(&rt, async move {
195
+ let mut stream = WFStream::build(
196
+ basics,
197
+ extracted_wft_stream,
198
+ locals_stream,
199
+ local_activity_request_sink,
200
+ );
199
201
 
200
- // However, we want to avoid plowing ahead until we've been asked to poll at least
201
- // once. This supports activity-only workers.
202
- let do_poll = tokio::select! {
203
- sp = start_polling_rx => {
204
- sp.is_ok()
205
- }
206
- _ = shutdown_tok.cancelled() => {
207
- false
202
+ // However, we want to avoid plowing ahead until we've been asked to poll at least
203
+ // once. This supports activity-only workers.
204
+ let do_poll = tokio::select! {
205
+ sp = start_polling_rx => {
206
+ sp.is_ok()
207
+ }
208
+ _ = shutdown_tok.cancelled() => {
209
+ false
210
+ }
211
+ };
212
+ if !do_poll {
213
+ return;
208
214
  }
209
- };
210
- if !do_poll {
211
- return;
212
- }
213
- while let Some(output) = stream.next().await {
214
- match output {
215
- Ok(o) => {
216
- for fetchreq in o.fetch_histories {
217
- fetch_tx
218
- .send(fetchreq)
219
- .expect("Fetch channel must not be dropped");
220
- }
221
- for act in o.activations {
222
- activation_tx
223
- .send(Ok(act))
224
- .expect("Activation processor channel not dropped");
215
+ while let Some(output) = stream.next().await {
216
+ match output {
217
+ Ok(o) => {
218
+ for fetchreq in o.fetch_histories {
219
+ fetch_tx
220
+ .send(fetchreq)
221
+ .expect("Fetch channel must not be dropped");
222
+ }
223
+ for act in o.activations {
224
+ activation_tx
225
+ .send(Ok(act))
226
+ .expect("Activation processor channel not dropped");
227
+ }
225
228
  }
229
+ Err(e) => activation_tx
230
+ .send(Err(e))
231
+ .expect("Activation processor channel not dropped"),
226
232
  }
227
- Err(e) => activation_tx
228
- .send(Err(e))
229
- .expect("Activation processor channel not dropped"),
230
233
  }
231
- }
232
- });
233
- });
234
+ });
235
+ })
236
+ .expect("Must be able to spawn workflow processing thread");
234
237
  Self {
235
238
  task_queue,
236
239
  local_tx,
@@ -497,10 +500,17 @@ impl Workflows {
497
500
  });
498
501
  }
499
502
 
500
- /// Query the state of workflow management. Can return `None` if workflow state is shut down.
501
- pub(super) fn get_state_info(&self) -> impl Future<Output = Option<WorkflowStateInfo>> {
503
+ /// Send a `GetStateInfoMsg` to the workflow stream. Can be used to bump the stream if there
504
+ /// would otherwise be no new inputs.
505
+ pub(super) fn send_get_state_info_msg(&self) -> oneshot::Receiver<WorkflowStateInfo> {
502
506
  let (tx, rx) = oneshot::channel();
503
507
  self.send_local(GetStateInfoMsg { response_tx: tx });
508
+ rx
509
+ }
510
+
511
+ /// Query the state of workflow management. Can return `None` if workflow state is shut down.
512
+ pub(super) fn get_state_info(&self) -> impl Future<Output = Option<WorkflowStateInfo>> {
513
+ let rx = self.send_get_state_info_msg();
504
514
  async move { rx.await.ok() }
505
515
  }
506
516
 
@@ -1023,17 +1033,31 @@ struct BufferedTasks {
1023
1033
  /// supersede any old one.
1024
1034
  wft: Option<PermittedWFT>,
1025
1035
  /// For query only tasks, multiple may be received concurrently and it's OK to buffer more
1026
- /// than one - however they should be dropped if, by the time we try to process them, we
1027
- /// have already processed a newer real WFT than the one the query was targeting (otherwise
1028
- /// we'd return data from the "future").
1036
+ /// than one - however they must all be handled before applying the next "real" wft (after the
1037
+ /// current one has been processed).
1029
1038
  query_only_tasks: VecDeque<PermittedWFT>,
1039
+ /// These are query-only tasks for the *buffered* wft, if any. They will all be discarded if
1040
+ /// a buffered wft is replaced before being handled. They move to `query_only_tasks` once the
1041
+ /// buffered task is taken.
1042
+ query_only_tasks_for_buffered: VecDeque<PermittedWFT>,
1030
1043
  }
1031
1044
 
1032
1045
  impl BufferedTasks {
1046
+ /// Buffers a new task. If it is a query-only task, multiple such tasks may be buffered which
1047
+ /// all will be handled at the end of the current WFT. If a new WFT which would advance history
1048
+ /// is provided, it will be buffered - but if another such task comes in while there is already
1049
+ /// one buffered, the old one will be overriden, and all queries will be invalidated.
1033
1050
  fn buffer(&mut self, task: PermittedWFT) {
1034
1051
  if task.work.is_query_only() {
1035
- self.query_only_tasks.push_back(task);
1052
+ if self.wft.is_none() {
1053
+ self.query_only_tasks.push_back(task);
1054
+ } else {
1055
+ self.query_only_tasks_for_buffered.push_back(task);
1056
+ }
1036
1057
  } else {
1058
+ if self.wft.is_some() {
1059
+ self.query_only_tasks_for_buffered.clear();
1060
+ }
1037
1061
  let _ = self.wft.insert(task);
1038
1062
  }
1039
1063
  }
@@ -1042,12 +1066,18 @@ impl BufferedTasks {
1042
1066
  self.wft.is_some() || !self.query_only_tasks.is_empty()
1043
1067
  }
1044
1068
 
1045
- /// Remove and return the next WFT from the buffer that should be applied. WFTs which would
1046
- /// advance workflow state are returned before query-only tasks.
1069
+ /// Remove and return the next WFT from the buffer that should be applied. Queries are returned
1070
+ /// first for the current workflow task, if there are any. If not, the next WFT that would
1071
+ /// advance history is returned.
1047
1072
  fn get_next_wft(&mut self) -> Option<PermittedWFT> {
1048
- self.wft
1049
- .take()
1050
- .or_else(|| self.query_only_tasks.pop_front())
1073
+ if let Some(q) = self.query_only_tasks.pop_front() {
1074
+ return Some(q);
1075
+ }
1076
+ if let Some(t) = self.wft.take() {
1077
+ self.query_only_tasks = mem::take(&mut self.query_only_tasks_for_buffered);
1078
+ return Some(t);
1079
+ }
1080
+ None
1051
1081
  }
1052
1082
  }
1053
1083
 
@@ -1310,7 +1340,7 @@ impl From<OutgoingJob> for WorkflowActivationJob {
1310
1340
  /// Errors thrown inside of workflow machines
1311
1341
  #[derive(thiserror::Error, Debug)]
1312
1342
  pub(crate) enum WFMachinesError {
1313
- #[error("Nondeterminism error: {0}")]
1343
+ #[error("[TMPRL1100] Nondeterminism error: {0}")]
1314
1344
  Nondeterminism(String),
1315
1345
  #[error("Fatal error in workflow machines: {0}")]
1316
1346
  Fatal(String),
@@ -9,7 +9,7 @@ use crate::{
9
9
  LocalActRequest, LocalActivityResolution,
10
10
  },
11
11
  };
12
- use crossbeam::queue::SegQueue;
12
+ use crossbeam_queue::SegQueue;
13
13
  use futures::Stream;
14
14
  use futures_util::StreamExt;
15
15
  use serde::{Deserialize, Serialize};
@@ -18,7 +18,7 @@ otel_impls = ["opentelemetry"]
18
18
  [dependencies]
19
19
  async-trait = "0.1"
20
20
  derive_builder = "0.12"
21
- derive_more = "0.99"
21
+ derive_more = { workspace = true }
22
22
  opentelemetry = { workspace = true, optional = true }
23
23
  prost-types = "0.11"
24
24
  serde = { version = "1.0", default_features = false, features = ["derive"] }
@@ -31,4 +31,4 @@ url = "2.3"
31
31
 
32
32
  [dependencies.temporal-sdk-core-protos]
33
33
  path = "../sdk-core-protos"
34
- version = "0.1"
34
+ version = "0.1"
@@ -14,7 +14,7 @@ name = "tests"
14
14
  path = "tests/progress.rs"
15
15
 
16
16
  [dependencies]
17
- derive_more = "0.99"
17
+ derive_more = { workspace = true }
18
18
  proc-macro2 = "1.0"
19
19
  syn = { version = "2.0", features = ["default", "extra-traits"] }
20
20
  quote = "1.0"
@@ -17,8 +17,8 @@ async-trait = "0.1"
17
17
  thiserror = "1.0"
18
18
  anyhow = "1.0"
19
19
  base64 = "0.21"
20
- crossbeam = "0.8"
21
- derive_more = "0.99"
20
+ crossbeam-channel = "0.5"
21
+ derive_more = { workspace = true }
22
22
  futures = "0.3"
23
23
  once_cell = "1.10"
24
24
  parking_lot = { version = "0.12", features = ["send_guard"] }