@temporalio/core-bridge 1.7.1 → 1.7.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/Cargo.lock +21 -0
  2. package/lib/index.d.ts +10 -10
  3. package/package.json +4 -4
  4. package/releases/aarch64-apple-darwin/index.node +0 -0
  5. package/sdk-core/.buildkite/pipeline.yml +1 -1
  6. package/sdk-core/.cargo/config.toml +2 -0
  7. package/sdk-core/CODEOWNERS +1 -1
  8. package/sdk-core/client/src/raw.rs +15 -6
  9. package/sdk-core/core/Cargo.toml +1 -0
  10. package/sdk-core/core/src/core_tests/activity_tasks.rs +13 -5
  11. package/sdk-core/core/src/core_tests/workflow_tasks.rs +45 -77
  12. package/sdk-core/core/src/internal_flags.rs +132 -46
  13. package/sdk-core/core/src/worker/activities/activity_task_poller_stream.rs +10 -7
  14. package/sdk-core/core/src/worker/activities.rs +152 -142
  15. package/sdk-core/core/src/worker/client.rs +12 -8
  16. package/sdk-core/core/src/worker/mod.rs +7 -5
  17. package/sdk-core/core/src/worker/workflow/history_update.rs +733 -33
  18. package/sdk-core/core/src/worker/workflow/machines/child_workflow_state_machine.rs +1 -1
  19. package/sdk-core/core/src/worker/workflow/machines/signal_external_state_machine.rs +4 -1
  20. package/sdk-core/core/src/worker/workflow/machines/workflow_machines.rs +5 -2
  21. package/sdk-core/core/src/worker/workflow/managed_run/managed_wf_test.rs +1 -1
  22. package/sdk-core/core/src/worker/workflow/managed_run.rs +0 -4
  23. package/sdk-core/protos/api_upstream/.github/workflows/publish-docs.yml +23 -0
  24. package/sdk-core/protos/api_upstream/Makefile +1 -1
  25. package/sdk-core/protos/api_upstream/buf.yaml +5 -0
  26. package/sdk-core/protos/api_upstream/temporal/api/common/v1/message.proto +17 -0
  27. package/sdk-core/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto +2 -0
  28. package/sdk-core/protos/api_upstream/temporal/api/history/v1/message.proto +6 -3
  29. package/sdk-core/protos/api_upstream/temporal/api/protocol/v1/message.proto +1 -1
  30. package/sdk-core/protos/api_upstream/temporal/api/taskqueue/v1/message.proto +12 -22
  31. package/sdk-core/protos/api_upstream/temporal/api/update/v1/message.proto +2 -2
  32. package/sdk-core/protos/api_upstream/temporal/api/workflow/v1/message.proto +2 -0
  33. package/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto +145 -48
  34. package/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/service.proto +19 -8
  35. package/sdk-core/sdk/src/workflow_context/options.rs +1 -1
  36. package/sdk-core/sdk/src/workflow_context.rs +9 -1
  37. package/sdk-core/test-utils/src/lib.rs +29 -7
  38. package/sdk-core/tests/integ_tests/activity_functions.rs +5 -0
  39. package/sdk-core/tests/integ_tests/workflow_tests/activities.rs +2 -4
  40. package/sdk-core/tests/integ_tests/workflow_tests/determinism.rs +0 -1
  41. package/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs +5 -7
  42. package/sdk-core/tests/integ_tests/workflow_tests.rs +3 -7
  43. package/sdk-core/tests/main.rs +1 -0
  44. package/releases/aarch64-unknown-linux-gnu/index.node +0 -0
  45. package/releases/x86_64-apple-darwin/index.node +0 -0
  46. package/releases/x86_64-pc-windows-msvc/index.node +0 -0
  47. package/releases/x86_64-unknown-linux-gnu/index.node +0 -0
@@ -48,6 +48,7 @@ pub struct HistoryUpdate {
48
48
  /// True if this update contains the final WFT in history, and no more attempts to extract
49
49
  /// additional updates should be made.
50
50
  has_last_wft: bool,
51
+ wft_count: usize,
51
52
  }
52
53
  impl Debug for HistoryUpdate {
53
54
  fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
@@ -84,6 +85,7 @@ pub struct HistoryPaginator {
84
85
  pub(crate) run_id: String,
85
86
  pub(crate) previous_wft_started_id: i64,
86
87
  pub(crate) wft_started_event_id: i64,
88
+ id_of_last_event_in_last_extracted_update: Option<i64>,
87
89
 
88
90
  #[cfg_attr(feature = "save_wf_inputs", serde(skip))]
89
91
  client: Arc<dyn WorkerClient>,
@@ -175,10 +177,14 @@ impl HistoryPaginator {
175
177
  run_id: req.original_wft.work.execution.run_id.clone(),
176
178
  previous_wft_started_id: req.original_wft.work.update.previous_wft_started_id,
177
179
  wft_started_event_id: req.original_wft.work.update.wft_started_id,
180
+ id_of_last_event_in_last_extracted_update: req
181
+ .original_wft
182
+ .paginator
183
+ .id_of_last_event_in_last_extracted_update,
178
184
  client,
179
185
  event_queue: Default::default(),
180
186
  next_page_token: NextPageToken::FetchFromStart,
181
- final_events: vec![],
187
+ final_events: req.original_wft.work.update.events,
182
188
  };
183
189
  let first_update = paginator.extract_next_update().await?;
184
190
  req.original_wft.work.update = first_update;
@@ -211,6 +217,7 @@ impl HistoryPaginator {
211
217
  final_events,
212
218
  previous_wft_started_id,
213
219
  wft_started_event_id,
220
+ id_of_last_event_in_last_extracted_update: None,
214
221
  }
215
222
  }
216
223
 
@@ -226,6 +233,7 @@ impl HistoryPaginator {
226
233
  final_events: vec![],
227
234
  previous_wft_started_id: -2,
228
235
  wft_started_event_id: -2,
236
+ id_of_last_event_in_last_extracted_update: None,
229
237
  }
230
238
  }
231
239
 
@@ -247,12 +255,43 @@ impl HistoryPaginator {
247
255
  .map(|e| e.event_id)
248
256
  .unwrap_or_default()
249
257
  >= self.wft_started_event_id;
258
+
259
+ // This handles a special case where the server might send us a page token along with
260
+ // a real page which ends at the current end of history. The page token then points to
261
+ // en empty page. We need to detect this, and consider it the end of history.
262
+ //
263
+ // This case unfortunately cannot be handled earlier, because we might fetch a page
264
+ // from the server which contains two complete WFTs, and thus we are happy to return
265
+ // an update at that time. But, if the page has a next page token, we *cannot* conclude
266
+ // we are done with replay until we fetch that page. So, we have to wait until the next
267
+ // extraction to determine (after fetching the next page and finding it to be empty)
268
+ // that we are done. Fetching the page eagerly is another option, but would be wasteful
269
+ // the overwhelming majority of the time.
270
+ let already_sent_update_with_enough_events = self
271
+ .id_of_last_event_in_last_extracted_update
272
+ .unwrap_or_default()
273
+ >= self.wft_started_event_id;
274
+ if current_events.is_empty() && no_next_page && already_sent_update_with_enough_events {
275
+ // We must return an empty update which also says is contains the final WFT so we
276
+ // know we're done with replay.
277
+ return Ok(HistoryUpdate::from_events(
278
+ [],
279
+ self.previous_wft_started_id,
280
+ self.wft_started_event_id,
281
+ true,
282
+ )
283
+ .0);
284
+ }
285
+
250
286
  if current_events.is_empty() || (no_next_page && !seen_enough_events) {
251
287
  // If next page fetching happened, and we still ended up with no or insufficient
252
288
  // events, something is wrong. We're expecting there to be more events to be able to
253
289
  // extract this update, but server isn't giving us any. We have no choice except to
254
290
  // give up and evict.
255
291
  error!(
292
+ current_events=?current_events,
293
+ no_next_page,
294
+ seen_enough_events,
256
295
  "We expected to be able to fetch more events but server says there are none"
257
296
  );
258
297
  return Err(EMPTY_FETCH_ERR.clone());
@@ -267,6 +306,16 @@ impl HistoryPaginator {
267
306
  self.wft_started_event_id,
268
307
  no_more,
269
308
  );
309
+
310
+ // If there are potentially more events and we haven't extracted two WFTs yet, keep
311
+ // trying.
312
+ if !matches!(self.next_page_token, NextPageToken::Done) && update.wft_count < 2 {
313
+ // Unwrap the update and stuff it all back in the queue
314
+ self.event_queue.extend(update.events);
315
+ self.event_queue.extend(extra);
316
+ continue;
317
+ }
318
+
270
319
  let extra_eid_same = extra
271
320
  .first()
272
321
  .map(|e| e.event_id == first_event_id)
@@ -278,17 +327,21 @@ impl HistoryPaginator {
278
327
  // There was not a meaningful WFT in the whole page. We must fetch more.
279
328
  continue;
280
329
  }
330
+ self.id_of_last_event_in_last_extracted_update =
331
+ update.events.last().map(|e| e.event_id);
332
+ #[cfg(debug_assertions)]
333
+ update.assert_contiguous();
281
334
  return Ok(update);
282
335
  }
283
336
  }
284
337
 
285
- /// Fetches the next page and adds it to the internal queue. Returns true if a fetch was
286
- /// performed, false if there is no next page.
338
+ /// Fetches the next page and adds it to the internal queue.
339
+ /// Returns true if we still have a next page token after fetching.
287
340
  async fn get_next_page(&mut self) -> Result<bool, tonic::Status> {
288
341
  let history = loop {
289
342
  let npt = match mem::replace(&mut self.next_page_token, NextPageToken::Done) {
290
343
  // If the last page token we got was empty, we're done.
291
- NextPageToken::Done => return Ok(false),
344
+ NextPageToken::Done => break None,
292
345
  NextPageToken::FetchFromStart => vec![],
293
346
  NextPageToken::Next(v) => v,
294
347
  };
@@ -315,8 +368,18 @@ impl HistoryPaginator {
315
368
  break fetch_res.history;
316
369
  };
317
370
 
318
- self.event_queue
319
- .extend(history.map(|h| h.events).unwrap_or_default());
371
+ let queue_back_id = self
372
+ .event_queue
373
+ .back()
374
+ .map(|e| e.event_id)
375
+ .unwrap_or_default();
376
+ self.event_queue.extend(
377
+ history
378
+ .map(|h| h.events)
379
+ .unwrap_or_default()
380
+ .into_iter()
381
+ .skip_while(|e| e.event_id <= queue_back_id),
382
+ );
320
383
  if matches!(&self.next_page_token, NextPageToken::Done) {
321
384
  // If finished, we need to extend the queue with the final events, skipping any
322
385
  // which are already present.
@@ -329,7 +392,7 @@ impl HistoryPaginator {
329
392
  );
330
393
  }
331
394
  };
332
- Ok(true)
395
+ Ok(!matches!(&self.next_page_token, NextPageToken::Done))
333
396
  }
334
397
  }
335
398
 
@@ -391,6 +454,7 @@ impl HistoryUpdate {
391
454
  previous_wft_started_id: -1,
392
455
  wft_started_id: -1,
393
456
  has_last_wft: false,
457
+ wft_count: 0,
394
458
  }
395
459
  }
396
460
  pub fn is_real(&self) -> bool {
@@ -400,6 +464,20 @@ impl HistoryUpdate {
400
464
  self.events.get(0).map(|e| e.event_id)
401
465
  }
402
466
 
467
+ #[cfg(debug_assertions)]
468
+ fn assert_contiguous(&self) -> bool {
469
+ use crate::abstractions::dbg_panic;
470
+
471
+ for win in self.events.as_slice().windows(2) {
472
+ if let &[e1, e2] = &win {
473
+ if e2.event_id != e1.event_id + 1 {
474
+ dbg_panic!("HistoryUpdate isn't contiguous! {:?} -> {:?}", e1, e2);
475
+ }
476
+ }
477
+ }
478
+ true
479
+ }
480
+
403
481
  /// Create an instance of an update directly from events. If the passed in event iterator has a
404
482
  /// partial WFT sequence at the end, all events after the last complete WFT sequence (ending
405
483
  /// with WFT started) are returned back to the caller, since the history update only works in
@@ -414,8 +492,11 @@ impl HistoryUpdate {
414
492
  <I as IntoIterator>::IntoIter: Send + 'static,
415
493
  {
416
494
  let mut all_events: Vec<_> = events.into_iter().collect();
417
- let mut last_end =
418
- find_end_index_of_next_wft_seq(all_events.as_slice(), previous_wft_started_id);
495
+ let mut last_end = find_end_index_of_next_wft_seq(
496
+ all_events.as_slice(),
497
+ previous_wft_started_id,
498
+ has_last_wft,
499
+ );
419
500
  if matches!(last_end, NextWFTSeqEndIndex::Incomplete(_)) {
420
501
  return if has_last_wft {
421
502
  (
@@ -424,6 +505,7 @@ impl HistoryUpdate {
424
505
  previous_wft_started_id,
425
506
  wft_started_id,
426
507
  has_last_wft,
508
+ wft_count: 1,
427
509
  },
428
510
  vec![],
429
511
  )
@@ -434,23 +516,32 @@ impl HistoryUpdate {
434
516
  previous_wft_started_id,
435
517
  wft_started_id,
436
518
  has_last_wft,
519
+ wft_count: 0,
437
520
  },
438
521
  all_events,
439
522
  )
440
523
  };
441
524
  }
525
+ let mut wft_count = 0;
442
526
  while let NextWFTSeqEndIndex::Complete(next_end_ix) = last_end {
527
+ wft_count += 1;
443
528
  let next_end_eid = all_events[next_end_ix].event_id;
444
529
  // To save skipping all events at the front of this slice, only pass the relevant
445
530
  // portion, but that means the returned index must be adjusted, hence the addition.
446
- let next_end = find_end_index_of_next_wft_seq(&all_events[next_end_ix..], next_end_eid)
447
- .add(next_end_ix);
531
+ let next_end = find_end_index_of_next_wft_seq(
532
+ &all_events[next_end_ix..],
533
+ next_end_eid,
534
+ has_last_wft,
535
+ )
536
+ .add(next_end_ix);
448
537
  if matches!(next_end, NextWFTSeqEndIndex::Incomplete(_)) {
449
538
  break;
450
539
  }
451
540
  last_end = next_end;
452
541
  }
453
- let remaining_events = if all_events.is_empty() {
542
+ // If we have the last WFT, there's no point in there being "remaining" events, because
543
+ // they must be considered part of the last sequence
544
+ let remaining_events = if all_events.is_empty() || has_last_wft {
454
545
  vec![]
455
546
  } else {
456
547
  all_events.split_off(last_end.index() + 1)
@@ -462,6 +553,7 @@ impl HistoryUpdate {
462
553
  previous_wft_started_id,
463
554
  wft_started_id,
464
555
  has_last_wft,
556
+ wft_count,
465
557
  },
466
558
  remaining_events,
467
559
  )
@@ -484,6 +576,7 @@ impl HistoryUpdate {
484
576
  previous_wft_started_id,
485
577
  wft_started_id,
486
578
  has_last_wft: true,
579
+ wft_count: 0,
487
580
  }
488
581
  }
489
582
 
@@ -499,7 +592,8 @@ impl HistoryUpdate {
499
592
  if let Some(ix_first_relevant) = self.starting_index_after_skipping(from_wft_started_id) {
500
593
  self.events.drain(0..ix_first_relevant);
501
594
  }
502
- let next_wft_ix = find_end_index_of_next_wft_seq(&self.events, from_wft_started_id);
595
+ let next_wft_ix =
596
+ find_end_index_of_next_wft_seq(&self.events, from_wft_started_id, self.has_last_wft);
503
597
  match next_wft_ix {
504
598
  NextWFTSeqEndIndex::Incomplete(siz) => {
505
599
  if self.has_last_wft {
@@ -540,14 +634,17 @@ impl HistoryUpdate {
540
634
  if relevant_events.is_empty() {
541
635
  return relevant_events;
542
636
  }
543
- let ix_end = find_end_index_of_next_wft_seq(relevant_events, from_wft_started_id).index();
637
+ let ix_end =
638
+ find_end_index_of_next_wft_seq(relevant_events, from_wft_started_id, self.has_last_wft)
639
+ .index();
544
640
  &relevant_events[0..=ix_end]
545
641
  }
546
642
 
547
643
  /// Returns true if this update has the next needed WFT sequence, false if events will need to
548
644
  /// be fetched in order to create a complete update with the entire next WFT sequence.
549
645
  pub fn can_take_next_wft_sequence(&self, from_wft_started_id: i64) -> bool {
550
- let next_wft_ix = find_end_index_of_next_wft_seq(&self.events, from_wft_started_id);
646
+ let next_wft_ix =
647
+ find_end_index_of_next_wft_seq(&self.events, from_wft_started_id, self.has_last_wft);
551
648
  if let NextWFTSeqEndIndex::Incomplete(_) = next_wft_ix {
552
649
  if !self.has_last_wft {
553
650
  return false;
@@ -607,12 +704,13 @@ impl NextWFTSeqEndIndex {
607
704
  fn find_end_index_of_next_wft_seq(
608
705
  events: &[HistoryEvent],
609
706
  from_event_id: i64,
707
+ has_last_wft: bool,
610
708
  ) -> NextWFTSeqEndIndex {
611
709
  if events.is_empty() {
612
710
  return NextWFTSeqEndIndex::Incomplete(0);
613
711
  }
614
712
  let mut last_index = 0;
615
- let mut saw_any_non_wft_event = false;
713
+ let mut saw_any_command_event = false;
616
714
  for (ix, e) in events.iter().enumerate() {
617
715
  last_index = ix;
618
716
 
@@ -622,15 +720,8 @@ fn find_end_index_of_next_wft_seq(
622
720
  continue;
623
721
  }
624
722
 
625
- if !matches!(
626
- e.event_type(),
627
- EventType::WorkflowTaskFailed
628
- | EventType::WorkflowTaskTimedOut
629
- | EventType::WorkflowTaskScheduled
630
- | EventType::WorkflowTaskStarted
631
- | EventType::WorkflowTaskCompleted
632
- ) {
633
- saw_any_non_wft_event = true;
723
+ if e.is_command_event() || e.event_type() == EventType::WorkflowExecutionStarted {
724
+ saw_any_command_event = true;
634
725
  }
635
726
  if e.is_final_wf_execution_event() {
636
727
  return NextWFTSeqEndIndex::Complete(last_index);
@@ -659,12 +750,20 @@ fn find_end_index_of_next_wft_seq(
659
750
  if next_next_event.event_type() == EventType::WorkflowTaskScheduled {
660
751
  continue;
661
752
  } else {
662
- saw_any_non_wft_event = true;
753
+ return NextWFTSeqEndIndex::Complete(ix);
663
754
  }
755
+ } else if !has_last_wft && !saw_any_command_event {
756
+ // Don't have enough events to look ahead of the WorkflowTaskCompleted. Need
757
+ // to fetch more.
758
+ continue;
664
759
  }
665
760
  }
761
+ } else if !has_last_wft && !saw_any_command_event {
762
+ // Don't have enough events to look ahead of the WorkflowTaskStarted. Need to fetch
763
+ // more.
764
+ continue;
666
765
  }
667
- if saw_any_non_wft_event {
766
+ if saw_any_command_event {
668
767
  return NextWFTSeqEndIndex::Complete(ix);
669
768
  }
670
769
  }
@@ -678,11 +777,21 @@ pub mod tests {
678
777
  use super::*;
679
778
  use crate::{
680
779
  replay::{HistoryInfo, TestHistoryBuilder},
681
- test_help::canned_histories,
780
+ test_help::{canned_histories, hist_to_poll_resp, mock_sdk_cfg, MockPollCfg, ResponseType},
682
781
  worker::client::mocks::mock_workflow_client,
683
782
  };
783
+ use futures::StreamExt;
684
784
  use futures_util::TryStreamExt;
685
- use temporal_sdk_core_protos::temporal::api::workflowservice::v1::GetWorkflowExecutionHistoryResponse;
785
+ use std::sync::atomic::{AtomicUsize, Ordering};
786
+ use temporal_client::WorkflowOptions;
787
+ use temporal_sdk::WfContext;
788
+ use temporal_sdk_core_protos::{
789
+ temporal::api::{
790
+ common::v1::WorkflowExecution, enums::v1::WorkflowTaskFailedCause,
791
+ workflowservice::v1::GetWorkflowExecutionHistoryResponse,
792
+ },
793
+ DEFAULT_WORKFLOW_TYPE,
794
+ };
686
795
 
687
796
  impl From<HistoryInfo> for HistoryUpdate {
688
797
  fn from(v: HistoryInfo) -> Self {
@@ -967,14 +1076,13 @@ pub mod tests {
967
1076
  }
968
1077
 
969
1078
  // Like the above, but if the history happens to be cut off at a wft boundary, (even though
970
- // there may have been many heartbeats after we have no way of knowing about), it's going to
971
- // count events 7-20 as a WFT since there is started, completed, timer command, ..heartbeats..
1079
+ // there may have been many heartbeats after we have no way of knowing about)
972
1080
  #[tokio::test]
973
1081
  async fn needs_fetch_after_complete_seq_with_heartbeats() {
974
1082
  let t = three_wfts_then_heartbeats();
975
1083
  let mut ends_in_middle_of_seq = t.as_history_update().events;
976
1084
  ends_in_middle_of_seq.truncate(20);
977
- let (mut update, remaining) = HistoryUpdate::from_events(
1085
+ let (mut update, _) = HistoryUpdate::from_events(
978
1086
  ends_in_middle_of_seq,
979
1087
  0,
980
1088
  t.get_full_history_info()
@@ -982,7 +1090,6 @@ pub mod tests {
982
1090
  .workflow_task_started_event_id(),
983
1091
  false,
984
1092
  );
985
- assert!(remaining.is_empty());
986
1093
  let seq = update.take_next_wft_sequence(0).unwrap_events();
987
1094
  assert_eq!(seq.last().unwrap().event_id, 3);
988
1095
  let seq = update.take_next_wft_sequence(3).unwrap_events();
@@ -1168,4 +1275,597 @@ pub mod tests {
1168
1275
 
1169
1276
  // TODO: Test we dont re-feed pointless updates if fetching returns <= events we already
1170
1277
  // processed
1278
+
1279
+ #[tokio::test]
1280
+ async fn handles_fetching_page_with_complete_wft_and_page_token_to_empty_page() {
1281
+ let timer_hist = canned_histories::single_timer("t");
1282
+ let workflow_task = timer_hist.get_full_history_info().unwrap();
1283
+ let prev_started_wft_id = workflow_task.previous_started_event_id();
1284
+ let wft_started_id = workflow_task.workflow_task_started_event_id();
1285
+
1286
+ let mut full_resp_with_npt: GetWorkflowExecutionHistoryResponse =
1287
+ timer_hist.get_full_history_info().unwrap().into();
1288
+ full_resp_with_npt.next_page_token = vec![1];
1289
+
1290
+ let mut mock_client = mock_workflow_client();
1291
+ mock_client
1292
+ .expect_get_workflow_execution_history()
1293
+ .returning(move |_, _, _| Ok(full_resp_with_npt.clone()))
1294
+ .times(1);
1295
+ mock_client
1296
+ .expect_get_workflow_execution_history()
1297
+ .returning(move |_, _, _| {
1298
+ Ok(GetWorkflowExecutionHistoryResponse {
1299
+ history: Some(History { events: vec![] }),
1300
+ raw_history: vec![],
1301
+ next_page_token: vec![],
1302
+ archived: false,
1303
+ })
1304
+ })
1305
+ .times(1);
1306
+
1307
+ let mut paginator = HistoryPaginator::new(
1308
+ workflow_task.into(),
1309
+ prev_started_wft_id,
1310
+ wft_started_id,
1311
+ "wfid".to_string(),
1312
+ "runid".to_string(),
1313
+ NextPageToken::FetchFromStart,
1314
+ Arc::new(mock_client),
1315
+ );
1316
+ let mut update = paginator.extract_next_update().await.unwrap();
1317
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
1318
+ assert_eq!(seq.last().unwrap().event_id, 3);
1319
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
1320
+ assert_eq!(seq.last().unwrap().event_id, 8);
1321
+ assert_matches!(update.take_next_wft_sequence(8), NextWFT::ReplayOver);
1322
+ }
1323
+
1324
+ #[tokio::test]
1325
+ async fn weird_pagination_doesnt_drop_wft_events() {
1326
+ let wf_id = "fakeid";
1327
+ // 1: EVENT_TYPE_WORKFLOW_EXECUTION_STARTED
1328
+ // 2: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1329
+ // 3: EVENT_TYPE_WORKFLOW_TASK_STARTED
1330
+ // 4: EVENT_TYPE_WORKFLOW_TASK_COMPLETED
1331
+ // empty page
1332
+ // 5: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1333
+ // 6: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1334
+ // 7: EVENT_TYPE_WORKFLOW_TASK_STARTED
1335
+ // 8: EVENT_TYPE_WORKFLOW_TASK_FAILED
1336
+ // empty page
1337
+ // 9: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1338
+ // 10: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1339
+ // 11: EVENT_TYPE_WORKFLOW_TASK_STARTED
1340
+ // empty page
1341
+ let mut t = TestHistoryBuilder::default();
1342
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1343
+ t.add_full_wf_task();
1344
+
1345
+ t.add_we_signaled("hi", vec![]);
1346
+ t.add_workflow_task_scheduled_and_started();
1347
+ t.add_workflow_task_failed_with_failure(
1348
+ WorkflowTaskFailedCause::UnhandledCommand,
1349
+ Default::default(),
1350
+ );
1351
+
1352
+ t.add_we_signaled("hi", vec![]);
1353
+ t.add_workflow_task_scheduled_and_started();
1354
+
1355
+ let workflow_task = t.get_full_history_info().unwrap();
1356
+ let mut wft_resp = workflow_task.as_poll_wft_response();
1357
+ wft_resp.workflow_execution = Some(WorkflowExecution {
1358
+ workflow_id: wf_id.to_string(),
1359
+ run_id: t.get_orig_run_id().to_string(),
1360
+ });
1361
+ // Just 9/10/11 in WFT
1362
+ wft_resp.history.as_mut().unwrap().events.drain(0..8);
1363
+
1364
+ let mut resp_1: GetWorkflowExecutionHistoryResponse =
1365
+ t.get_full_history_info().unwrap().into();
1366
+ resp_1.next_page_token = vec![1];
1367
+ resp_1.history.as_mut().unwrap().events.truncate(4);
1368
+
1369
+ let mut mock_client = mock_workflow_client();
1370
+ mock_client
1371
+ .expect_get_workflow_execution_history()
1372
+ .returning(move |_, _, _| Ok(resp_1.clone()))
1373
+ .times(1);
1374
+ mock_client
1375
+ .expect_get_workflow_execution_history()
1376
+ .returning(move |_, _, _| {
1377
+ Ok(GetWorkflowExecutionHistoryResponse {
1378
+ history: Some(History { events: vec![] }),
1379
+ raw_history: vec![],
1380
+ next_page_token: vec![2],
1381
+ archived: false,
1382
+ })
1383
+ })
1384
+ .times(1);
1385
+ let mut resp_2: GetWorkflowExecutionHistoryResponse =
1386
+ t.get_full_history_info().unwrap().into();
1387
+ resp_2.next_page_token = vec![3];
1388
+ resp_2.history.as_mut().unwrap().events.drain(0..4);
1389
+ resp_2.history.as_mut().unwrap().events.truncate(4);
1390
+ mock_client
1391
+ .expect_get_workflow_execution_history()
1392
+ .returning(move |_, _, _| Ok(resp_2.clone()))
1393
+ .times(1);
1394
+ mock_client
1395
+ .expect_get_workflow_execution_history()
1396
+ .returning(move |_, _, _| {
1397
+ Ok(GetWorkflowExecutionHistoryResponse {
1398
+ history: Some(History { events: vec![] }),
1399
+ raw_history: vec![],
1400
+ next_page_token: vec![],
1401
+ archived: false,
1402
+ })
1403
+ })
1404
+ .times(1);
1405
+
1406
+ let wf_type = DEFAULT_WORKFLOW_TYPE;
1407
+ let mh =
1408
+ MockPollCfg::from_resp_batches(wf_id, t, [ResponseType::Raw(wft_resp)], mock_client);
1409
+ let mut worker = mock_sdk_cfg(mh, |cfg| {
1410
+ cfg.max_cached_workflows = 2;
1411
+ cfg.ignore_evicts_on_shutdown = false;
1412
+ });
1413
+
1414
+ let sig_ctr = Arc::new(AtomicUsize::new(0));
1415
+ let sig_ctr_clone = sig_ctr.clone();
1416
+ worker.register_wf(wf_type.to_owned(), move |ctx: WfContext| {
1417
+ let sig_ctr_clone = sig_ctr_clone.clone();
1418
+ async move {
1419
+ let mut sigchan = ctx.make_signal_channel("hi");
1420
+ while sigchan.next().await.is_some() {
1421
+ if sig_ctr_clone.fetch_add(1, Ordering::AcqRel) == 1 {
1422
+ break;
1423
+ }
1424
+ }
1425
+ Ok(().into())
1426
+ }
1427
+ });
1428
+
1429
+ worker
1430
+ .submit_wf(
1431
+ wf_id.to_owned(),
1432
+ wf_type.to_owned(),
1433
+ vec![],
1434
+ WorkflowOptions::default(),
1435
+ )
1436
+ .await
1437
+ .unwrap();
1438
+ worker.run_until_done().await.unwrap();
1439
+ assert_eq!(sig_ctr.load(Ordering::Acquire), 2);
1440
+ }
1441
+
1442
+ #[tokio::test]
1443
+ async fn extreme_pagination_doesnt_drop_wft_events_paginator() {
1444
+ // 1: EVENT_TYPE_WORKFLOW_EXECUTION_STARTED
1445
+ // 2: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1446
+ // 3: EVENT_TYPE_WORKFLOW_TASK_STARTED // <- previous_started_event_id
1447
+ // 4: EVENT_TYPE_WORKFLOW_TASK_COMPLETED
1448
+
1449
+ // 5: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1450
+ // 6: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1451
+ // 7: EVENT_TYPE_WORKFLOW_TASK_STARTED
1452
+ // 8: EVENT_TYPE_WORKFLOW_TASK_FAILED
1453
+
1454
+ // 9: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1455
+ // 10: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1456
+ // 11: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1457
+ // 12: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1458
+ // 13: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1459
+ // 14: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1460
+ // 15: EVENT_TYPE_WORKFLOW_TASK_STARTED // <- started_event_id
1461
+
1462
+ let mut t = TestHistoryBuilder::default();
1463
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1464
+ t.add_full_wf_task();
1465
+
1466
+ t.add_we_signaled("hi", vec![]);
1467
+ t.add_workflow_task_scheduled_and_started();
1468
+ t.add_workflow_task_failed_with_failure(
1469
+ WorkflowTaskFailedCause::UnhandledCommand,
1470
+ Default::default(),
1471
+ );
1472
+
1473
+ t.add_we_signaled("hi", vec![]);
1474
+ t.add_we_signaled("hi", vec![]);
1475
+ t.add_we_signaled("hi", vec![]);
1476
+ t.add_we_signaled("hi", vec![]);
1477
+ t.add_we_signaled("hi", vec![]);
1478
+ t.add_workflow_task_scheduled_and_started();
1479
+
1480
+ let mut mock_client = mock_workflow_client();
1481
+
1482
+ let events: Vec<HistoryEvent> = t.get_full_history_info().unwrap().into_events();
1483
+ let first_event = events[0].clone();
1484
+ for (i, event) in events.into_iter().enumerate() {
1485
+ // Add an empty page
1486
+ mock_client
1487
+ .expect_get_workflow_execution_history()
1488
+ .returning(move |_, _, _| {
1489
+ Ok(GetWorkflowExecutionHistoryResponse {
1490
+ history: Some(History { events: vec![] }),
1491
+ raw_history: vec![],
1492
+ next_page_token: vec![(i * 10) as u8],
1493
+ archived: false,
1494
+ })
1495
+ })
1496
+ .times(1);
1497
+
1498
+ // Add a page with only event i
1499
+ mock_client
1500
+ .expect_get_workflow_execution_history()
1501
+ .returning(move |_, _, _| {
1502
+ Ok(GetWorkflowExecutionHistoryResponse {
1503
+ history: Some(History {
1504
+ events: vec![event.clone()],
1505
+ }),
1506
+ raw_history: vec![],
1507
+ next_page_token: vec![(i * 10 + 1) as u8],
1508
+ archived: false,
1509
+ })
1510
+ })
1511
+ .times(1);
1512
+ }
1513
+
1514
+ // Add an extra empty page at the end, with no NPT
1515
+ mock_client
1516
+ .expect_get_workflow_execution_history()
1517
+ .returning(move |_, _, _| {
1518
+ Ok(GetWorkflowExecutionHistoryResponse {
1519
+ history: Some(History { events: vec![] }),
1520
+ raw_history: vec![],
1521
+ next_page_token: vec![],
1522
+ archived: false,
1523
+ })
1524
+ })
1525
+ .times(1);
1526
+
1527
+ let mut paginator = HistoryPaginator::new(
1528
+ History {
1529
+ events: vec![first_event],
1530
+ },
1531
+ 3,
1532
+ 15,
1533
+ "wfid".to_string(),
1534
+ "runid".to_string(),
1535
+ vec![1],
1536
+ Arc::new(mock_client),
1537
+ );
1538
+
1539
+ let mut update = paginator.extract_next_update().await.unwrap();
1540
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
1541
+ assert_eq!(seq.first().unwrap().event_id, 1);
1542
+ assert_eq!(seq.last().unwrap().event_id, 3);
1543
+
1544
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
1545
+ assert_eq!(seq.first().unwrap().event_id, 4);
1546
+ assert_eq!(seq.last().unwrap().event_id, 15);
1547
+ }
1548
+
1549
+ #[tokio::test]
1550
+ async fn extreme_pagination_doesnt_drop_wft_events_worker() {
1551
+ let wf_id = "fakeid";
1552
+
1553
+ // In this test, we add empty pages between each event
1554
+
1555
+ // 1: EVENT_TYPE_WORKFLOW_EXECUTION_STARTED
1556
+ // 2: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1557
+ // 3: EVENT_TYPE_WORKFLOW_TASK_STARTED // <- previous_started_event_id
1558
+ // 4: EVENT_TYPE_WORKFLOW_TASK_COMPLETED
1559
+
1560
+ // 5: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1561
+ // 6: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1562
+ // 7: EVENT_TYPE_WORKFLOW_TASK_STARTED
1563
+ // 8: EVENT_TYPE_WORKFLOW_TASK_FAILED
1564
+
1565
+ // 9: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1566
+ // 10: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1567
+ // 11: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1568
+ // 12: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1569
+ // 13: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1570
+ // 14: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1571
+ // 15: EVENT_TYPE_WORKFLOW_TASK_STARTED // <- started_event_id
1572
+
1573
+ let mut t = TestHistoryBuilder::default();
1574
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1575
+ t.add_full_wf_task();
1576
+
1577
+ t.add_we_signaled("hi", vec![]);
1578
+ t.add_workflow_task_scheduled_and_started();
1579
+ t.add_workflow_task_failed_with_failure(
1580
+ WorkflowTaskFailedCause::UnhandledCommand,
1581
+ Default::default(),
1582
+ );
1583
+
1584
+ t.add_we_signaled("hi", vec![]);
1585
+ t.add_we_signaled("hi", vec![]);
1586
+ t.add_we_signaled("hi", vec![]);
1587
+ t.add_we_signaled("hi", vec![]);
1588
+ t.add_we_signaled("hi", vec![]);
1589
+ t.add_workflow_task_scheduled_and_started();
1590
+
1591
+ /////
1592
+
1593
+ let events: Vec<HistoryEvent> = t.get_full_history_info().unwrap().into_events();
1594
+ let first_event = events[0].clone();
1595
+
1596
+ let mut mock_client = mock_workflow_client();
1597
+
1598
+ for (i, event) in events.into_iter().enumerate() {
1599
+ // Add an empty page
1600
+ mock_client
1601
+ .expect_get_workflow_execution_history()
1602
+ .returning(move |_, _, _| {
1603
+ Ok(GetWorkflowExecutionHistoryResponse {
1604
+ history: Some(History { events: vec![] }),
1605
+ raw_history: vec![],
1606
+ next_page_token: vec![(i * 10 + 1) as u8],
1607
+ archived: false,
1608
+ })
1609
+ })
1610
+ .times(1);
1611
+
1612
+ // Add a page with just event i
1613
+ mock_client
1614
+ .expect_get_workflow_execution_history()
1615
+ .returning(move |_, _, _| {
1616
+ Ok(GetWorkflowExecutionHistoryResponse {
1617
+ history: Some(History {
1618
+ events: vec![event.clone()],
1619
+ }),
1620
+ raw_history: vec![],
1621
+ next_page_token: vec![(i * 10) as u8],
1622
+ archived: false,
1623
+ })
1624
+ })
1625
+ .times(1);
1626
+ }
1627
+
1628
+ // Add an extra empty page at the end, with no NPT
1629
+ mock_client
1630
+ .expect_get_workflow_execution_history()
1631
+ .returning(move |_, _, _| {
1632
+ Ok(GetWorkflowExecutionHistoryResponse {
1633
+ history: Some(History { events: vec![] }),
1634
+ raw_history: vec![],
1635
+ next_page_token: vec![],
1636
+ archived: false,
1637
+ })
1638
+ })
1639
+ .times(1);
1640
+
1641
+ let workflow_task = t.get_full_history_info().unwrap();
1642
+ let mut wft_resp = workflow_task.as_poll_wft_response();
1643
+ wft_resp.workflow_execution = Some(WorkflowExecution {
1644
+ workflow_id: wf_id.to_string(),
1645
+ run_id: t.get_orig_run_id().to_string(),
1646
+ });
1647
+ wft_resp.history = Some(History {
1648
+ events: vec![first_event],
1649
+ });
1650
+ wft_resp.next_page_token = vec![1];
1651
+ wft_resp.previous_started_event_id = 3;
1652
+ wft_resp.started_event_id = 15;
1653
+
1654
+ let wf_type = DEFAULT_WORKFLOW_TYPE;
1655
+ let mh =
1656
+ MockPollCfg::from_resp_batches(wf_id, t, [ResponseType::Raw(wft_resp)], mock_client);
1657
+ let mut worker = mock_sdk_cfg(mh, |cfg| {
1658
+ cfg.max_cached_workflows = 2;
1659
+ cfg.ignore_evicts_on_shutdown = false;
1660
+ });
1661
+
1662
+ let sig_ctr = Arc::new(AtomicUsize::new(0));
1663
+ let sig_ctr_clone = sig_ctr.clone();
1664
+ worker.register_wf(wf_type.to_owned(), move |ctx: WfContext| {
1665
+ let sig_ctr_clone = sig_ctr_clone.clone();
1666
+ async move {
1667
+ let mut sigchan = ctx.make_signal_channel("hi");
1668
+ while sigchan.next().await.is_some() {
1669
+ if sig_ctr_clone.fetch_add(1, Ordering::AcqRel) == 5 {
1670
+ break;
1671
+ }
1672
+ }
1673
+ Ok(().into())
1674
+ }
1675
+ });
1676
+
1677
+ worker
1678
+ .submit_wf(
1679
+ wf_id.to_owned(),
1680
+ wf_type.to_owned(),
1681
+ vec![],
1682
+ WorkflowOptions::default(),
1683
+ )
1684
+ .await
1685
+ .unwrap();
1686
+ worker.run_until_done().await.unwrap();
1687
+ assert_eq!(sig_ctr.load(Ordering::Acquire), 6);
1688
+ }
1689
+
1690
+ #[tokio::test]
1691
+ async fn finding_end_index_with_started_as_last_event() {
1692
+ let wf_id = "fakeid";
1693
+ let mut t = TestHistoryBuilder::default();
1694
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1695
+ t.add_full_wf_task();
1696
+
1697
+ t.add_we_signaled("hi", vec![]);
1698
+ t.add_workflow_task_scheduled_and_started();
1699
+ // We need to see more after this - it's not sufficient to end on a started event when
1700
+ // we know there might be more
1701
+
1702
+ let workflow_task = t.get_history_info(1).unwrap();
1703
+ let prev_started_wft_id = workflow_task.previous_started_event_id();
1704
+ let wft_started_id = workflow_task.workflow_task_started_event_id();
1705
+ let mut wft_resp = workflow_task.as_poll_wft_response();
1706
+ wft_resp.workflow_execution = Some(WorkflowExecution {
1707
+ workflow_id: wf_id.to_string(),
1708
+ run_id: t.get_orig_run_id().to_string(),
1709
+ });
1710
+ wft_resp.next_page_token = vec![1];
1711
+
1712
+ let mut resp_1: GetWorkflowExecutionHistoryResponse =
1713
+ t.get_full_history_info().unwrap().into();
1714
+ resp_1.next_page_token = vec![2];
1715
+
1716
+ let mut mock_client = mock_workflow_client();
1717
+ mock_client
1718
+ .expect_get_workflow_execution_history()
1719
+ .returning(move |_, _, _| Ok(resp_1.clone()))
1720
+ .times(1);
1721
+ // Since there aren't sufficient events, we should try to see another fetch, and that'll
1722
+ // say there aren't any
1723
+ mock_client
1724
+ .expect_get_workflow_execution_history()
1725
+ .returning(move |_, _, _| Ok(Default::default()))
1726
+ .times(1);
1727
+
1728
+ let mut paginator = HistoryPaginator::new(
1729
+ workflow_task.into(),
1730
+ prev_started_wft_id,
1731
+ wft_started_id,
1732
+ "wfid".to_string(),
1733
+ "runid".to_string(),
1734
+ NextPageToken::FetchFromStart,
1735
+ Arc::new(mock_client),
1736
+ );
1737
+ let mut update = paginator.extract_next_update().await.unwrap();
1738
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
1739
+ assert_eq!(seq.last().unwrap().event_id, 3);
1740
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
1741
+ // We're done since the last fetch revealed nothing
1742
+ assert_eq!(seq.last().unwrap().event_id, 7);
1743
+ }
1744
+
1745
+ #[tokio::test]
1746
+ async fn just_signal_is_complete_wft() {
1747
+ let mut t = TestHistoryBuilder::default();
1748
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1749
+ t.add_full_wf_task();
1750
+ t.add_we_signaled("whatever", vec![]);
1751
+ t.add_full_wf_task();
1752
+ t.add_we_signaled("whatever", vec![]);
1753
+ t.add_full_wf_task();
1754
+ t.add_workflow_execution_completed();
1755
+
1756
+ let workflow_task = t.get_full_history_info().unwrap();
1757
+ let prev_started_wft_id = workflow_task.previous_started_event_id();
1758
+ let wft_started_id = workflow_task.workflow_task_started_event_id();
1759
+ let mock_client = mock_workflow_client();
1760
+ let mut paginator = HistoryPaginator::new(
1761
+ workflow_task.into(),
1762
+ prev_started_wft_id,
1763
+ wft_started_id,
1764
+ "wfid".to_string(),
1765
+ "runid".to_string(),
1766
+ NextPageToken::Done,
1767
+ Arc::new(mock_client),
1768
+ );
1769
+ let mut update = paginator.extract_next_update().await.unwrap();
1770
+ let seq = next_check_peek(&mut update, 0);
1771
+ assert_eq!(seq.len(), 3);
1772
+ let seq = next_check_peek(&mut update, 3);
1773
+ assert_eq!(seq.len(), 4);
1774
+ let seq = next_check_peek(&mut update, 7);
1775
+ assert_eq!(seq.len(), 4);
1776
+ let seq = next_check_peek(&mut update, 11);
1777
+ assert_eq!(seq.len(), 2);
1778
+ }
1779
+
1780
+ #[tokio::test]
1781
+ async fn heartbeats_then_signal() {
1782
+ let mut t = TestHistoryBuilder::default();
1783
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1784
+ t.add_full_wf_task();
1785
+ t.add_full_wf_task();
1786
+ let mut need_fetch_resp =
1787
+ hist_to_poll_resp(&t, "wfid".to_owned(), ResponseType::AllHistory).resp;
1788
+ need_fetch_resp.next_page_token = vec![1];
1789
+ t.add_full_wf_task();
1790
+ t.add_we_signaled("whatever", vec![]);
1791
+ t.add_workflow_task_scheduled_and_started();
1792
+
1793
+ let full_resp: GetWorkflowExecutionHistoryResponse =
1794
+ t.get_full_history_info().unwrap().into();
1795
+
1796
+ let mut mock_client = mock_workflow_client();
1797
+ mock_client
1798
+ .expect_get_workflow_execution_history()
1799
+ .returning(move |_, _, _| Ok(full_resp.clone()))
1800
+ .times(1);
1801
+
1802
+ let mut paginator = HistoryPaginator::new(
1803
+ need_fetch_resp.history.unwrap(),
1804
+ // Pretend we have already processed first WFT
1805
+ 3,
1806
+ 6,
1807
+ "wfid".to_string(),
1808
+ "runid".to_string(),
1809
+ NextPageToken::Next(vec![1]),
1810
+ Arc::new(mock_client),
1811
+ );
1812
+ let mut update = paginator.extract_next_update().await.unwrap();
1813
+ // Starting past first wft
1814
+ let seq = next_check_peek(&mut update, 3);
1815
+ assert_eq!(seq.len(), 6);
1816
+ let seq = next_check_peek(&mut update, 9);
1817
+ assert_eq!(seq.len(), 4);
1818
+ }
1819
+
1820
+ #[tokio::test]
1821
+ async fn cache_miss_with_only_one_wft_available_orders_properly() {
1822
+ let mut t = TestHistoryBuilder::default();
1823
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1824
+ t.add_full_wf_task();
1825
+ t.add_by_type(EventType::TimerStarted);
1826
+ t.add_full_wf_task();
1827
+ t.add_by_type(EventType::TimerStarted);
1828
+ t.add_workflow_task_scheduled_and_started();
1829
+
1830
+ let incremental_task =
1831
+ hist_to_poll_resp(&t, "wfid".to_owned(), ResponseType::OneTask(3)).resp;
1832
+
1833
+ let mut mock_client = mock_workflow_client();
1834
+ let mut one_task_resp: GetWorkflowExecutionHistoryResponse =
1835
+ t.get_history_info(1).unwrap().into();
1836
+ one_task_resp.next_page_token = vec![1];
1837
+ mock_client
1838
+ .expect_get_workflow_execution_history()
1839
+ .returning(move |_, _, _| Ok(one_task_resp.clone()))
1840
+ .times(1);
1841
+ let mut up_to_sched_start: GetWorkflowExecutionHistoryResponse =
1842
+ t.get_full_history_info().unwrap().into();
1843
+ up_to_sched_start
1844
+ .history
1845
+ .as_mut()
1846
+ .unwrap()
1847
+ .events
1848
+ .truncate(9);
1849
+ mock_client
1850
+ .expect_get_workflow_execution_history()
1851
+ .returning(move |_, _, _| Ok(up_to_sched_start.clone()))
1852
+ .times(1);
1853
+
1854
+ let mut paginator = HistoryPaginator::new(
1855
+ incremental_task.history.unwrap(),
1856
+ 6,
1857
+ 9,
1858
+ "wfid".to_string(),
1859
+ "runid".to_string(),
1860
+ NextPageToken::FetchFromStart,
1861
+ Arc::new(mock_client),
1862
+ );
1863
+ let mut update = paginator.extract_next_update().await.unwrap();
1864
+ let seq = next_check_peek(&mut update, 0);
1865
+ assert_eq!(seq.last().unwrap().event_id, 3);
1866
+ let seq = next_check_peek(&mut update, 3);
1867
+ assert_eq!(seq.last().unwrap().event_id, 7);
1868
+ let seq = next_check_peek(&mut update, 7);
1869
+ assert_eq!(seq.last().unwrap().event_id, 11);
1870
+ }
1171
1871
  }