@temporalio/core-bridge 1.7.2 → 1.7.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { SpanContext } from '@opentelemetry/api';
2
2
  import type { TLSConfig } from '@temporalio/common/lib/internal-non-workflow';
3
3
  export { TLSConfig };
4
- declare type Shadow<Base, New> = Base extends object ? New extends object ? {
4
+ type Shadow<Base, New> = Base extends object ? New extends object ? {
5
5
  [K in keyof Base | keyof New]: K extends keyof Base ? K extends keyof New ? Shadow<Base[K], New[K]> : Base[K] : K extends keyof New ? New[K] : never;
6
6
  } : New : New;
7
7
  export interface RetryOptions {
@@ -74,7 +74,7 @@ export interface ForwardLogger {
74
74
  *
75
75
  * @experimental
76
76
  */
77
- export declare type Logger = ConsoleLogger | ForwardLogger;
77
+ export type Logger = ConsoleLogger | ForwardLogger;
78
78
  /**
79
79
  * OpenTelemetry Collector options for exporting metrics or traces
80
80
  *
@@ -100,13 +100,13 @@ export interface OtelCollectorExporter {
100
100
  };
101
101
  }
102
102
  /** @experimental */
103
- export declare type CompiledOtelTraceExporter = Shadow<OtelCollectorExporter, {
103
+ export type CompiledOtelTraceExporter = Shadow<OtelCollectorExporter, {
104
104
  otel: {
105
105
  metricsExportInterval?: never;
106
106
  };
107
107
  }>;
108
108
  /** @experimental */
109
- export declare type CompiledOtelMetricsExporter = Shadow<OtelCollectorExporter, {
109
+ export type CompiledOtelMetricsExporter = Shadow<OtelCollectorExporter, {
110
110
  otel: {
111
111
  metricsExportInterval: number;
112
112
  };
@@ -136,7 +136,7 @@ export interface PrometheusMetricsExporter {
136
136
  *
137
137
  * @experimental
138
138
  */
139
- export declare type MetricsExporter = {
139
+ export type MetricsExporter = {
140
140
  temporality?: 'cumulative' | 'delta';
141
141
  } & (PrometheusMetricsExporter | OtelCollectorExporter);
142
142
  /**
@@ -144,7 +144,7 @@ export declare type MetricsExporter = {
144
144
  *
145
145
  * @experimental
146
146
  */
147
- export declare type TraceExporter = OtelCollectorExporter;
147
+ export type TraceExporter = OtelCollectorExporter;
148
148
  /** @experimental */
149
149
  export interface TelemetryOptions {
150
150
  /**
@@ -216,7 +216,7 @@ export interface TelemetryOptions {
216
216
  metrics?: MetricsExporter;
217
217
  }
218
218
  /** @experimental */
219
- export declare type CompiledTelemetryOptions = {
219
+ export type CompiledTelemetryOptions = {
220
220
  noTemporalPrefixForMetrics?: boolean;
221
221
  logging: {
222
222
  filter: string;
@@ -290,7 +290,7 @@ export interface WorkerOptions {
290
290
  maxActivitiesPerSecond?: number;
291
291
  }
292
292
  /** Log level - must match rust log level names */
293
- export declare type LogLevel = 'TRACE' | 'DEBUG' | 'INFO' | 'WARN' | 'ERROR';
293
+ export type LogLevel = 'TRACE' | 'DEBUG' | 'INFO' | 'WARN' | 'ERROR';
294
294
  export interface LogEntry {
295
295
  /** Log message */
296
296
  message: string;
@@ -306,7 +306,7 @@ export interface LogEntry {
306
306
  /**
307
307
  * Which version of the executable to run.
308
308
  */
309
- export declare type EphemeralServerExecutable = {
309
+ export type EphemeralServerExecutable = {
310
310
  type: 'cached-download';
311
311
  /**
312
312
  * Download destination directory or the system's temp directory if none set.
@@ -390,7 +390,7 @@ export interface DevServerConfig {
390
390
  *
391
391
  * Both the time-skipping test server and Temporal CLI dev server are supported.
392
392
  */
393
- export declare type EphemeralServerConfig = TimeSkippingServerConfig | DevServerConfig;
393
+ export type EphemeralServerConfig = TimeSkippingServerConfig | DevServerConfig;
394
394
  export interface Worker {
395
395
  type: 'Worker';
396
396
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@temporalio/core-bridge",
3
- "version": "1.7.2",
3
+ "version": "1.7.4",
4
4
  "description": "Temporal.io SDK Core<>Node bridge",
5
5
  "main": "index.js",
6
6
  "types": "lib/index.d.ts",
@@ -22,8 +22,8 @@
22
22
  "author": "Temporal Technologies Inc. <sdk@temporal.io>",
23
23
  "license": "MIT",
24
24
  "dependencies": {
25
- "@opentelemetry/api": "^1.3.0",
26
- "@temporalio/common": "1.7.2",
25
+ "@opentelemetry/api": "^1.4.1",
26
+ "@temporalio/common": "1.7.4",
27
27
  "arg": "^5.0.2",
28
28
  "cargo-cp-artifact": "^0.1.6",
29
29
  "which": "^2.0.2"
@@ -53,5 +53,5 @@
53
53
  "publishConfig": {
54
54
  "access": "public"
55
55
  },
56
- "gitHead": "779561124eecdec8396e658c0a1305d343dfaff7"
56
+ "gitHead": "fb4088a8174b60b7a3fc7763ed39dbfc514a3e56"
57
57
  }
@@ -17,7 +17,7 @@ steps:
17
17
  agents:
18
18
  queue: "default"
19
19
  docker: "*"
20
- command: "cargo lint"
20
+ command: "cargo lint && cargo test-lint"
21
21
  timeout_in_minutes: 15
22
22
  plugins:
23
23
  - docker-compose#v3.0.0:
@@ -4,3 +4,5 @@ wf-input-replay = ["run", "--package", "temporal-sdk-core", "--features", "save_
4
4
  "--example", "wf_input_replay", "--"]
5
5
  lint = ["clippy", "--workspace", "--examples", "--all-features",
6
6
  "--test", "integ_tests", "--test", "heavy_tests", "--", "--D", "warnings"]
7
+ test-lint = ["clippy", "--all", "--all-features", "--examples", "--workspace",
8
+ "--tests", "--", "--D", "warnings"]
@@ -1,3 +1,3 @@
1
1
  # Primary owners
2
2
 
3
- * @Sushisource @bergundy @cretz @Spikhalskiy
3
+ * @temporalio/sdk
@@ -896,10 +896,7 @@ async fn workflow_failures_only_reported_once() {
896
896
  #[tokio::test]
897
897
  async fn max_wft_respected() {
898
898
  let total_wfs = 100;
899
- let wf_ids: Vec<_> = (0..total_wfs)
900
- .into_iter()
901
- .map(|i| format!("fake-wf-{i}"))
902
- .collect();
899
+ let wf_ids: Vec<_> = (0..total_wfs).map(|i| format!("fake-wf-{i}")).collect();
903
900
  let hists = wf_ids.iter().map(|wf_id| {
904
901
  let hist = canned_histories::single_timer("1");
905
902
  FakeWfResponses {
@@ -1027,7 +1024,7 @@ async fn activity_not_canceled_when_also_completed_repro(hist_batches: &'static
1027
1024
  #[tokio::test]
1028
1025
  async fn lots_of_workflows() {
1029
1026
  let total_wfs = 500;
1030
- let hists = (0..total_wfs).into_iter().map(|i| {
1027
+ let hists = (0..total_wfs).map(|i| {
1031
1028
  let wf_id = format!("fake-wf-{i}");
1032
1029
  let hist = canned_histories::single_timer("1");
1033
1030
  FakeWfResponses {
@@ -1705,9 +1702,7 @@ async fn pagination_works_with_tasks_from_completion() {
1705
1702
  t.add_by_type(EventType::WorkflowExecutionStarted);
1706
1703
  t.add_full_wf_task();
1707
1704
  t.add_we_signaled("sig", vec![]);
1708
- t.add_full_wf_task();
1709
- t.add_workflow_execution_completed();
1710
- let get_exec_resp: GetWorkflowExecutionHistoryResponse = t.get_history_info(2).unwrap().into();
1705
+ t.add_workflow_task_scheduled_and_started();
1711
1706
 
1712
1707
  let mut mock = mock_workflow_client();
1713
1708
  let mut needs_pag_resp = hist_to_poll_resp(&t, wfid.to_owned(), ResponseType::OneTask(2)).resp;
@@ -1722,9 +1717,13 @@ async fn pagination_works_with_tasks_from_completion() {
1722
1717
  mock.expect_complete_workflow_task()
1723
1718
  .times(1)
1724
1719
  .returning(|_| Ok(Default::default()));
1720
+
1721
+ let get_exec_resp: GetWorkflowExecutionHistoryResponse =
1722
+ t.get_full_history_info().unwrap().into();
1725
1723
  mock.expect_get_workflow_execution_history()
1726
1724
  .returning(move |_, _, _| Ok(get_exec_resp.clone()))
1727
1725
  .times(1);
1726
+
1728
1727
  let mut mock = single_hist_mock_sg(wfid, t, [1], mock, true);
1729
1728
  mock.worker_cfg(|wc| wc.max_cached_workflows = 2);
1730
1729
  let core = mock_worker(mock);
@@ -2162,10 +2161,6 @@ async fn fetching_to_continue_replay_works() {
2162
2161
  t.add_full_wf_task(); // end 14
2163
2162
  let mut fetch_resp: GetWorkflowExecutionHistoryResponse =
2164
2163
  t.get_full_history_info().unwrap().into();
2165
- // Should only contain events after 7
2166
- if let Some(ref mut h) = fetch_resp.history {
2167
- h.events.retain(|e| e.event_id >= 8);
2168
- }
2169
2164
  // And indicate that even *more* needs to be fetched after this, so we see a request for the
2170
2165
  // next page happen.
2171
2166
  fetch_resp.next_page_token = vec![2];
@@ -2173,12 +2168,8 @@ async fn fetching_to_continue_replay_works() {
2173
2168
  let timer_started_event_id = t.add_by_type(EventType::TimerStarted);
2174
2169
  t.add_timer_fired(timer_started_event_id, "1".to_string());
2175
2170
  t.add_full_wf_task();
2176
- let mut final_fetch_resp: GetWorkflowExecutionHistoryResponse =
2171
+ let final_fetch_resp: GetWorkflowExecutionHistoryResponse =
2177
2172
  t.get_full_history_info().unwrap().into();
2178
- // Should have only the final event
2179
- if let Some(ref mut h) = final_fetch_resp.history {
2180
- h.events.retain(|e| e.event_id >= 15);
2181
- }
2182
2173
 
2183
2174
  let tasks = vec![
2184
2175
  ResponseType::ToTaskNum(1),
@@ -2273,15 +2264,25 @@ async fn ensure_fetching_fail_during_complete_sends_task_failure() {
2273
2264
  t.add_full_wf_task(); // started 3
2274
2265
  t.add_we_signaled("sig1", vec![]);
2275
2266
  t.add_full_wf_task(); // started 7
2276
- t.add_we_signaled("sig2", vec![]);
2267
+
2268
+ // Need a command event after here so the paginator will know it has two complete WFTs and
2269
+ // processing can begin before needing to fetch again
2270
+ t.add_by_type(EventType::TimerStarted);
2277
2271
  t.add_full_wf_task(); // started 11
2278
2272
  t.add_workflow_execution_completed();
2279
2273
 
2280
- let mut first_poll = hist_to_poll_resp(&t, wfid, ResponseType::ToTaskNum(1)).resp;
2281
- first_poll.next_page_token = vec![1];
2282
- first_poll.previous_started_event_id = 3;
2274
+ let mut first_poll = hist_to_poll_resp(&t, wfid, ResponseType::OneTask(4)).resp;
2275
+ // History is partial so fetch will happen. We have to lie here and make up a previous started
2276
+ // which really makes no sense, otherwise the paginator eagerly fetches and will fail before we
2277
+ // ever start anything -- which is good -- but this test wants to make sure a fetching failure
2278
+ // during a completion is handled correctly. That may no longer actually be a thing that can
2279
+ // happen.
2280
+ first_poll.previous_started_event_id = 0;
2281
+ first_poll.started_event_id = 11;
2283
2282
 
2284
- let mut next_page: GetWorkflowExecutionHistoryResponse = t.get_history_info(2).unwrap().into();
2283
+ let mut next_page: GetWorkflowExecutionHistoryResponse =
2284
+ t.get_full_history_info().unwrap().into();
2285
+ next_page.history.as_mut().unwrap().events.truncate(9);
2285
2286
  next_page.next_page_token = vec![2];
2286
2287
 
2287
2288
  let mut mock = mock_workflow_client();
@@ -2291,9 +2292,6 @@ async fn ensure_fetching_fail_during_complete_sends_task_failure() {
2291
2292
  Ok(next_page.clone())
2292
2293
  })
2293
2294
  .times(1);
2294
- let mut really_empty_fetch_resp: GetWorkflowExecutionHistoryResponse =
2295
- t.get_history_info(1).unwrap().into();
2296
- really_empty_fetch_resp.history = Some(Default::default());
2297
2295
  mock.expect_get_workflow_execution_history()
2298
2296
  .returning(move |_, _, _| {
2299
2297
  error!("Called fetch second time!");
@@ -2314,24 +2312,13 @@ async fn ensure_fetching_fail_during_complete_sends_task_failure() {
2314
2312
  .await
2315
2313
  .unwrap();
2316
2314
 
2317
- let wf_task = core.poll_workflow_activation().await.unwrap();
2318
- assert_matches!(
2319
- wf_task.jobs.as_slice(),
2320
- [WorkflowActivationJob {
2321
- variant: Some(workflow_activation_job::Variant::SignalWorkflow(_)),
2322
- },]
2323
- );
2324
- core.complete_workflow_activation(WorkflowActivationCompletion::empty(wf_task.run_id))
2325
- .await
2326
- .unwrap();
2327
-
2328
2315
  // Expect to see eviction b/c of history fetching error here.
2329
2316
  let wf_task = core.poll_workflow_activation().await.unwrap();
2330
2317
  assert_matches!(
2331
2318
  wf_task.jobs.as_slice(),
2332
2319
  [WorkflowActivationJob {
2333
- variant: Some(workflow_activation_job::Variant::RemoveFromCache(_)),
2334
- },]
2320
+ variant: Some(workflow_activation_job::Variant::RemoveFromCache(c)),
2321
+ }] if c.message.contains("Fetching history")
2335
2322
  );
2336
2323
 
2337
2324
  core.shutdown().await;
@@ -2401,7 +2388,6 @@ async fn core_internal_flags() {
2401
2388
  .copied()
2402
2389
  .collect::<HashSet<_>>(),
2403
2390
  CoreInternalFlags::all_except_too_high()
2404
- .into_iter()
2405
2391
  .map(|f| f as u32)
2406
2392
  .collect()
2407
2393
  );
@@ -48,6 +48,7 @@ pub struct HistoryUpdate {
48
48
  /// True if this update contains the final WFT in history, and no more attempts to extract
49
49
  /// additional updates should be made.
50
50
  has_last_wft: bool,
51
+ wft_count: usize,
51
52
  }
52
53
  impl Debug for HistoryUpdate {
53
54
  fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
@@ -176,11 +177,14 @@ impl HistoryPaginator {
176
177
  run_id: req.original_wft.work.execution.run_id.clone(),
177
178
  previous_wft_started_id: req.original_wft.work.update.previous_wft_started_id,
178
179
  wft_started_event_id: req.original_wft.work.update.wft_started_id,
179
- id_of_last_event_in_last_extracted_update: None,
180
+ id_of_last_event_in_last_extracted_update: req
181
+ .original_wft
182
+ .paginator
183
+ .id_of_last_event_in_last_extracted_update,
180
184
  client,
181
185
  event_queue: Default::default(),
182
186
  next_page_token: NextPageToken::FetchFromStart,
183
- final_events: vec![],
187
+ final_events: req.original_wft.work.update.events,
184
188
  };
185
189
  let first_update = paginator.extract_next_update().await?;
186
190
  req.original_wft.work.update = first_update;
@@ -244,7 +248,7 @@ impl HistoryPaginator {
244
248
  /// we have two, or until we are at the end of history.
245
249
  pub(crate) async fn extract_next_update(&mut self) -> Result<HistoryUpdate, tonic::Status> {
246
250
  loop {
247
- let fetch_happened = !self.get_next_page().await?;
251
+ let no_next_page = !self.get_next_page().await?;
248
252
  let current_events = mem::take(&mut self.event_queue);
249
253
  let seen_enough_events = current_events
250
254
  .back()
@@ -267,10 +271,7 @@ impl HistoryPaginator {
267
271
  .id_of_last_event_in_last_extracted_update
268
272
  .unwrap_or_default()
269
273
  >= self.wft_started_event_id;
270
- if current_events.is_empty()
271
- && !fetch_happened
272
- && already_sent_update_with_enough_events
273
- {
274
+ if current_events.is_empty() && no_next_page && already_sent_update_with_enough_events {
274
275
  // We must return an empty update which also says is contains the final WFT so we
275
276
  // know we're done with replay.
276
277
  return Ok(HistoryUpdate::from_events(
@@ -282,12 +283,15 @@ impl HistoryPaginator {
282
283
  .0);
283
284
  }
284
285
 
285
- if current_events.is_empty() || (fetch_happened && !seen_enough_events) {
286
+ if current_events.is_empty() || (no_next_page && !seen_enough_events) {
286
287
  // If next page fetching happened, and we still ended up with no or insufficient
287
288
  // events, something is wrong. We're expecting there to be more events to be able to
288
289
  // extract this update, but server isn't giving us any. We have no choice except to
289
290
  // give up and evict.
290
291
  error!(
292
+ current_events=?current_events,
293
+ no_next_page,
294
+ seen_enough_events,
291
295
  "We expected to be able to fetch more events but server says there are none"
292
296
  );
293
297
  return Err(EMPTY_FETCH_ERR.clone());
@@ -302,6 +306,16 @@ impl HistoryPaginator {
302
306
  self.wft_started_event_id,
303
307
  no_more,
304
308
  );
309
+
310
+ // If there are potentially more events and we haven't extracted two WFTs yet, keep
311
+ // trying.
312
+ if !matches!(self.next_page_token, NextPageToken::Done) && update.wft_count < 2 {
313
+ // Unwrap the update and stuff it all back in the queue
314
+ self.event_queue.extend(update.events);
315
+ self.event_queue.extend(extra);
316
+ continue;
317
+ }
318
+
305
319
  let extra_eid_same = extra
306
320
  .first()
307
321
  .map(|e| e.event_id == first_event_id)
@@ -315,17 +329,19 @@ impl HistoryPaginator {
315
329
  }
316
330
  self.id_of_last_event_in_last_extracted_update =
317
331
  update.events.last().map(|e| e.event_id);
332
+ #[cfg(debug_assertions)]
333
+ update.assert_contiguous();
318
334
  return Ok(update);
319
335
  }
320
336
  }
321
337
 
322
- /// Fetches the next page and adds it to the internal queue. Returns true if a fetch was
323
- /// performed, false if there is no next page.
338
+ /// Fetches the next page and adds it to the internal queue.
339
+ /// Returns true if we still have a next page token after fetching.
324
340
  async fn get_next_page(&mut self) -> Result<bool, tonic::Status> {
325
341
  let history = loop {
326
342
  let npt = match mem::replace(&mut self.next_page_token, NextPageToken::Done) {
327
343
  // If the last page token we got was empty, we're done.
328
- NextPageToken::Done => return Ok(false),
344
+ NextPageToken::Done => break None,
329
345
  NextPageToken::FetchFromStart => vec![],
330
346
  NextPageToken::Next(v) => v,
331
347
  };
@@ -352,8 +368,18 @@ impl HistoryPaginator {
352
368
  break fetch_res.history;
353
369
  };
354
370
 
355
- self.event_queue
356
- .extend(history.map(|h| h.events).unwrap_or_default());
371
+ let queue_back_id = self
372
+ .event_queue
373
+ .back()
374
+ .map(|e| e.event_id)
375
+ .unwrap_or_default();
376
+ self.event_queue.extend(
377
+ history
378
+ .map(|h| h.events)
379
+ .unwrap_or_default()
380
+ .into_iter()
381
+ .skip_while(|e| e.event_id <= queue_back_id),
382
+ );
357
383
  if matches!(&self.next_page_token, NextPageToken::Done) {
358
384
  // If finished, we need to extend the queue with the final events, skipping any
359
385
  // which are already present.
@@ -366,7 +392,7 @@ impl HistoryPaginator {
366
392
  );
367
393
  }
368
394
  };
369
- Ok(true)
395
+ Ok(!matches!(&self.next_page_token, NextPageToken::Done))
370
396
  }
371
397
  }
372
398
 
@@ -428,6 +454,7 @@ impl HistoryUpdate {
428
454
  previous_wft_started_id: -1,
429
455
  wft_started_id: -1,
430
456
  has_last_wft: false,
457
+ wft_count: 0,
431
458
  }
432
459
  }
433
460
  pub fn is_real(&self) -> bool {
@@ -437,6 +464,20 @@ impl HistoryUpdate {
437
464
  self.events.get(0).map(|e| e.event_id)
438
465
  }
439
466
 
467
+ #[cfg(debug_assertions)]
468
+ fn assert_contiguous(&self) -> bool {
469
+ use crate::abstractions::dbg_panic;
470
+
471
+ for win in self.events.as_slice().windows(2) {
472
+ if let &[e1, e2] = &win {
473
+ if e2.event_id != e1.event_id + 1 {
474
+ dbg_panic!("HistoryUpdate isn't contiguous! {:?} -> {:?}", e1, e2);
475
+ }
476
+ }
477
+ }
478
+ true
479
+ }
480
+
440
481
  /// Create an instance of an update directly from events. If the passed in event iterator has a
441
482
  /// partial WFT sequence at the end, all events after the last complete WFT sequence (ending
442
483
  /// with WFT started) are returned back to the caller, since the history update only works in
@@ -451,8 +492,11 @@ impl HistoryUpdate {
451
492
  <I as IntoIterator>::IntoIter: Send + 'static,
452
493
  {
453
494
  let mut all_events: Vec<_> = events.into_iter().collect();
454
- let mut last_end =
455
- find_end_index_of_next_wft_seq(all_events.as_slice(), previous_wft_started_id);
495
+ let mut last_end = find_end_index_of_next_wft_seq(
496
+ all_events.as_slice(),
497
+ previous_wft_started_id,
498
+ has_last_wft,
499
+ );
456
500
  if matches!(last_end, NextWFTSeqEndIndex::Incomplete(_)) {
457
501
  return if has_last_wft {
458
502
  (
@@ -461,6 +505,7 @@ impl HistoryUpdate {
461
505
  previous_wft_started_id,
462
506
  wft_started_id,
463
507
  has_last_wft,
508
+ wft_count: 1,
464
509
  },
465
510
  vec![],
466
511
  )
@@ -471,23 +516,32 @@ impl HistoryUpdate {
471
516
  previous_wft_started_id,
472
517
  wft_started_id,
473
518
  has_last_wft,
519
+ wft_count: 0,
474
520
  },
475
521
  all_events,
476
522
  )
477
523
  };
478
524
  }
525
+ let mut wft_count = 0;
479
526
  while let NextWFTSeqEndIndex::Complete(next_end_ix) = last_end {
527
+ wft_count += 1;
480
528
  let next_end_eid = all_events[next_end_ix].event_id;
481
529
  // To save skipping all events at the front of this slice, only pass the relevant
482
530
  // portion, but that means the returned index must be adjusted, hence the addition.
483
- let next_end = find_end_index_of_next_wft_seq(&all_events[next_end_ix..], next_end_eid)
484
- .add(next_end_ix);
531
+ let next_end = find_end_index_of_next_wft_seq(
532
+ &all_events[next_end_ix..],
533
+ next_end_eid,
534
+ has_last_wft,
535
+ )
536
+ .add(next_end_ix);
485
537
  if matches!(next_end, NextWFTSeqEndIndex::Incomplete(_)) {
486
538
  break;
487
539
  }
488
540
  last_end = next_end;
489
541
  }
490
- let remaining_events = if all_events.is_empty() {
542
+ // If we have the last WFT, there's no point in there being "remaining" events, because
543
+ // they must be considered part of the last sequence
544
+ let remaining_events = if all_events.is_empty() || has_last_wft {
491
545
  vec![]
492
546
  } else {
493
547
  all_events.split_off(last_end.index() + 1)
@@ -499,6 +553,7 @@ impl HistoryUpdate {
499
553
  previous_wft_started_id,
500
554
  wft_started_id,
501
555
  has_last_wft,
556
+ wft_count,
502
557
  },
503
558
  remaining_events,
504
559
  )
@@ -521,6 +576,7 @@ impl HistoryUpdate {
521
576
  previous_wft_started_id,
522
577
  wft_started_id,
523
578
  has_last_wft: true,
579
+ wft_count: 0,
524
580
  }
525
581
  }
526
582
 
@@ -536,7 +592,8 @@ impl HistoryUpdate {
536
592
  if let Some(ix_first_relevant) = self.starting_index_after_skipping(from_wft_started_id) {
537
593
  self.events.drain(0..ix_first_relevant);
538
594
  }
539
- let next_wft_ix = find_end_index_of_next_wft_seq(&self.events, from_wft_started_id);
595
+ let next_wft_ix =
596
+ find_end_index_of_next_wft_seq(&self.events, from_wft_started_id, self.has_last_wft);
540
597
  match next_wft_ix {
541
598
  NextWFTSeqEndIndex::Incomplete(siz) => {
542
599
  if self.has_last_wft {
@@ -577,14 +634,17 @@ impl HistoryUpdate {
577
634
  if relevant_events.is_empty() {
578
635
  return relevant_events;
579
636
  }
580
- let ix_end = find_end_index_of_next_wft_seq(relevant_events, from_wft_started_id).index();
637
+ let ix_end =
638
+ find_end_index_of_next_wft_seq(relevant_events, from_wft_started_id, self.has_last_wft)
639
+ .index();
581
640
  &relevant_events[0..=ix_end]
582
641
  }
583
642
 
584
643
  /// Returns true if this update has the next needed WFT sequence, false if events will need to
585
644
  /// be fetched in order to create a complete update with the entire next WFT sequence.
586
645
  pub fn can_take_next_wft_sequence(&self, from_wft_started_id: i64) -> bool {
587
- let next_wft_ix = find_end_index_of_next_wft_seq(&self.events, from_wft_started_id);
646
+ let next_wft_ix =
647
+ find_end_index_of_next_wft_seq(&self.events, from_wft_started_id, self.has_last_wft);
588
648
  if let NextWFTSeqEndIndex::Incomplete(_) = next_wft_ix {
589
649
  if !self.has_last_wft {
590
650
  return false;
@@ -644,12 +704,13 @@ impl NextWFTSeqEndIndex {
644
704
  fn find_end_index_of_next_wft_seq(
645
705
  events: &[HistoryEvent],
646
706
  from_event_id: i64,
707
+ has_last_wft: bool,
647
708
  ) -> NextWFTSeqEndIndex {
648
709
  if events.is_empty() {
649
710
  return NextWFTSeqEndIndex::Incomplete(0);
650
711
  }
651
712
  let mut last_index = 0;
652
- let mut saw_any_non_wft_event = false;
713
+ let mut saw_any_command_event = false;
653
714
  for (ix, e) in events.iter().enumerate() {
654
715
  last_index = ix;
655
716
 
@@ -659,15 +720,8 @@ fn find_end_index_of_next_wft_seq(
659
720
  continue;
660
721
  }
661
722
 
662
- if !matches!(
663
- e.event_type(),
664
- EventType::WorkflowTaskFailed
665
- | EventType::WorkflowTaskTimedOut
666
- | EventType::WorkflowTaskScheduled
667
- | EventType::WorkflowTaskStarted
668
- | EventType::WorkflowTaskCompleted
669
- ) {
670
- saw_any_non_wft_event = true;
723
+ if e.is_command_event() || e.event_type() == EventType::WorkflowExecutionStarted {
724
+ saw_any_command_event = true;
671
725
  }
672
726
  if e.is_final_wf_execution_event() {
673
727
  return NextWFTSeqEndIndex::Complete(last_index);
@@ -696,12 +750,20 @@ fn find_end_index_of_next_wft_seq(
696
750
  if next_next_event.event_type() == EventType::WorkflowTaskScheduled {
697
751
  continue;
698
752
  } else {
699
- saw_any_non_wft_event = true;
753
+ return NextWFTSeqEndIndex::Complete(ix);
700
754
  }
755
+ } else if !has_last_wft && !saw_any_command_event {
756
+ // Don't have enough events to look ahead of the WorkflowTaskCompleted. Need
757
+ // to fetch more.
758
+ continue;
701
759
  }
702
760
  }
761
+ } else if !has_last_wft && !saw_any_command_event {
762
+ // Don't have enough events to look ahead of the WorkflowTaskStarted. Need to fetch
763
+ // more.
764
+ continue;
703
765
  }
704
- if saw_any_non_wft_event {
766
+ if saw_any_command_event {
705
767
  return NextWFTSeqEndIndex::Complete(ix);
706
768
  }
707
769
  }
@@ -715,11 +777,21 @@ pub mod tests {
715
777
  use super::*;
716
778
  use crate::{
717
779
  replay::{HistoryInfo, TestHistoryBuilder},
718
- test_help::canned_histories,
780
+ test_help::{canned_histories, hist_to_poll_resp, mock_sdk_cfg, MockPollCfg, ResponseType},
719
781
  worker::client::mocks::mock_workflow_client,
720
782
  };
783
+ use futures::StreamExt;
721
784
  use futures_util::TryStreamExt;
722
- use temporal_sdk_core_protos::temporal::api::workflowservice::v1::GetWorkflowExecutionHistoryResponse;
785
+ use std::sync::atomic::{AtomicUsize, Ordering};
786
+ use temporal_client::WorkflowOptions;
787
+ use temporal_sdk::WfContext;
788
+ use temporal_sdk_core_protos::{
789
+ temporal::api::{
790
+ common::v1::WorkflowExecution, enums::v1::WorkflowTaskFailedCause,
791
+ workflowservice::v1::GetWorkflowExecutionHistoryResponse,
792
+ },
793
+ DEFAULT_WORKFLOW_TYPE,
794
+ };
723
795
 
724
796
  impl From<HistoryInfo> for HistoryUpdate {
725
797
  fn from(v: HistoryInfo) -> Self {
@@ -1004,14 +1076,13 @@ pub mod tests {
1004
1076
  }
1005
1077
 
1006
1078
  // Like the above, but if the history happens to be cut off at a wft boundary, (even though
1007
- // there may have been many heartbeats after we have no way of knowing about), it's going to
1008
- // count events 7-20 as a WFT since there is started, completed, timer command, ..heartbeats..
1079
+ // there may have been many heartbeats after we have no way of knowing about)
1009
1080
  #[tokio::test]
1010
1081
  async fn needs_fetch_after_complete_seq_with_heartbeats() {
1011
1082
  let t = three_wfts_then_heartbeats();
1012
1083
  let mut ends_in_middle_of_seq = t.as_history_update().events;
1013
1084
  ends_in_middle_of_seq.truncate(20);
1014
- let (mut update, remaining) = HistoryUpdate::from_events(
1085
+ let (mut update, _) = HistoryUpdate::from_events(
1015
1086
  ends_in_middle_of_seq,
1016
1087
  0,
1017
1088
  t.get_full_history_info()
@@ -1019,7 +1090,6 @@ pub mod tests {
1019
1090
  .workflow_task_started_event_id(),
1020
1091
  false,
1021
1092
  );
1022
- assert!(remaining.is_empty());
1023
1093
  let seq = update.take_next_wft_sequence(0).unwrap_events();
1024
1094
  assert_eq!(seq.last().unwrap().event_id, 3);
1025
1095
  let seq = update.take_next_wft_sequence(3).unwrap_events();
@@ -1248,8 +1318,554 @@ pub mod tests {
1248
1318
  assert_eq!(seq.last().unwrap().event_id, 3);
1249
1319
  let seq = update.take_next_wft_sequence(3).unwrap_events();
1250
1320
  assert_eq!(seq.last().unwrap().event_id, 8);
1251
- assert_matches!(update.take_next_wft_sequence(8), NextWFT::NeedFetch);
1252
- let mut update = paginator.extract_next_update().await.unwrap();
1253
1321
  assert_matches!(update.take_next_wft_sequence(8), NextWFT::ReplayOver);
1254
1322
  }
1323
+
1324
+ #[tokio::test]
1325
+ async fn weird_pagination_doesnt_drop_wft_events() {
1326
+ let wf_id = "fakeid";
1327
+ // 1: EVENT_TYPE_WORKFLOW_EXECUTION_STARTED
1328
+ // 2: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1329
+ // 3: EVENT_TYPE_WORKFLOW_TASK_STARTED
1330
+ // 4: EVENT_TYPE_WORKFLOW_TASK_COMPLETED
1331
+ // empty page
1332
+ // 5: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1333
+ // 6: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1334
+ // 7: EVENT_TYPE_WORKFLOW_TASK_STARTED
1335
+ // 8: EVENT_TYPE_WORKFLOW_TASK_FAILED
1336
+ // empty page
1337
+ // 9: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1338
+ // 10: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1339
+ // 11: EVENT_TYPE_WORKFLOW_TASK_STARTED
1340
+ // empty page
1341
+ let mut t = TestHistoryBuilder::default();
1342
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1343
+ t.add_full_wf_task();
1344
+
1345
+ t.add_we_signaled("hi", vec![]);
1346
+ t.add_workflow_task_scheduled_and_started();
1347
+ t.add_workflow_task_failed_with_failure(
1348
+ WorkflowTaskFailedCause::UnhandledCommand,
1349
+ Default::default(),
1350
+ );
1351
+
1352
+ t.add_we_signaled("hi", vec![]);
1353
+ t.add_workflow_task_scheduled_and_started();
1354
+
1355
+ let workflow_task = t.get_full_history_info().unwrap();
1356
+ let mut wft_resp = workflow_task.as_poll_wft_response();
1357
+ wft_resp.workflow_execution = Some(WorkflowExecution {
1358
+ workflow_id: wf_id.to_string(),
1359
+ run_id: t.get_orig_run_id().to_string(),
1360
+ });
1361
+ // Just 9/10/11 in WFT
1362
+ wft_resp.history.as_mut().unwrap().events.drain(0..8);
1363
+
1364
+ let mut resp_1: GetWorkflowExecutionHistoryResponse =
1365
+ t.get_full_history_info().unwrap().into();
1366
+ resp_1.next_page_token = vec![1];
1367
+ resp_1.history.as_mut().unwrap().events.truncate(4);
1368
+
1369
+ let mut mock_client = mock_workflow_client();
1370
+ mock_client
1371
+ .expect_get_workflow_execution_history()
1372
+ .returning(move |_, _, _| Ok(resp_1.clone()))
1373
+ .times(1);
1374
+ mock_client
1375
+ .expect_get_workflow_execution_history()
1376
+ .returning(move |_, _, _| {
1377
+ Ok(GetWorkflowExecutionHistoryResponse {
1378
+ history: Some(History { events: vec![] }),
1379
+ raw_history: vec![],
1380
+ next_page_token: vec![2],
1381
+ archived: false,
1382
+ })
1383
+ })
1384
+ .times(1);
1385
+ let mut resp_2: GetWorkflowExecutionHistoryResponse =
1386
+ t.get_full_history_info().unwrap().into();
1387
+ resp_2.next_page_token = vec![3];
1388
+ resp_2.history.as_mut().unwrap().events.drain(0..4);
1389
+ resp_2.history.as_mut().unwrap().events.truncate(4);
1390
+ mock_client
1391
+ .expect_get_workflow_execution_history()
1392
+ .returning(move |_, _, _| Ok(resp_2.clone()))
1393
+ .times(1);
1394
+ mock_client
1395
+ .expect_get_workflow_execution_history()
1396
+ .returning(move |_, _, _| {
1397
+ Ok(GetWorkflowExecutionHistoryResponse {
1398
+ history: Some(History { events: vec![] }),
1399
+ raw_history: vec![],
1400
+ next_page_token: vec![],
1401
+ archived: false,
1402
+ })
1403
+ })
1404
+ .times(1);
1405
+
1406
+ let wf_type = DEFAULT_WORKFLOW_TYPE;
1407
+ let mh =
1408
+ MockPollCfg::from_resp_batches(wf_id, t, [ResponseType::Raw(wft_resp)], mock_client);
1409
+ let mut worker = mock_sdk_cfg(mh, |cfg| {
1410
+ cfg.max_cached_workflows = 2;
1411
+ cfg.ignore_evicts_on_shutdown = false;
1412
+ });
1413
+
1414
+ let sig_ctr = Arc::new(AtomicUsize::new(0));
1415
+ let sig_ctr_clone = sig_ctr.clone();
1416
+ worker.register_wf(wf_type.to_owned(), move |ctx: WfContext| {
1417
+ let sig_ctr_clone = sig_ctr_clone.clone();
1418
+ async move {
1419
+ let mut sigchan = ctx.make_signal_channel("hi");
1420
+ while sigchan.next().await.is_some() {
1421
+ if sig_ctr_clone.fetch_add(1, Ordering::AcqRel) == 1 {
1422
+ break;
1423
+ }
1424
+ }
1425
+ Ok(().into())
1426
+ }
1427
+ });
1428
+
1429
+ worker
1430
+ .submit_wf(
1431
+ wf_id.to_owned(),
1432
+ wf_type.to_owned(),
1433
+ vec![],
1434
+ WorkflowOptions::default(),
1435
+ )
1436
+ .await
1437
+ .unwrap();
1438
+ worker.run_until_done().await.unwrap();
1439
+ assert_eq!(sig_ctr.load(Ordering::Acquire), 2);
1440
+ }
1441
+
1442
+ #[tokio::test]
1443
+ async fn extreme_pagination_doesnt_drop_wft_events_paginator() {
1444
+ // 1: EVENT_TYPE_WORKFLOW_EXECUTION_STARTED
1445
+ // 2: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1446
+ // 3: EVENT_TYPE_WORKFLOW_TASK_STARTED // <- previous_started_event_id
1447
+ // 4: EVENT_TYPE_WORKFLOW_TASK_COMPLETED
1448
+
1449
+ // 5: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1450
+ // 6: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1451
+ // 7: EVENT_TYPE_WORKFLOW_TASK_STARTED
1452
+ // 8: EVENT_TYPE_WORKFLOW_TASK_FAILED
1453
+
1454
+ // 9: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1455
+ // 10: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1456
+ // 11: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1457
+ // 12: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1458
+ // 13: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1459
+ // 14: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1460
+ // 15: EVENT_TYPE_WORKFLOW_TASK_STARTED // <- started_event_id
1461
+
1462
+ let mut t = TestHistoryBuilder::default();
1463
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1464
+ t.add_full_wf_task();
1465
+
1466
+ t.add_we_signaled("hi", vec![]);
1467
+ t.add_workflow_task_scheduled_and_started();
1468
+ t.add_workflow_task_failed_with_failure(
1469
+ WorkflowTaskFailedCause::UnhandledCommand,
1470
+ Default::default(),
1471
+ );
1472
+
1473
+ t.add_we_signaled("hi", vec![]);
1474
+ t.add_we_signaled("hi", vec![]);
1475
+ t.add_we_signaled("hi", vec![]);
1476
+ t.add_we_signaled("hi", vec![]);
1477
+ t.add_we_signaled("hi", vec![]);
1478
+ t.add_workflow_task_scheduled_and_started();
1479
+
1480
+ let mut mock_client = mock_workflow_client();
1481
+
1482
+ let events: Vec<HistoryEvent> = t.get_full_history_info().unwrap().into_events();
1483
+ let first_event = events[0].clone();
1484
+ for (i, event) in events.into_iter().enumerate() {
1485
+ // Add an empty page
1486
+ mock_client
1487
+ .expect_get_workflow_execution_history()
1488
+ .returning(move |_, _, _| {
1489
+ Ok(GetWorkflowExecutionHistoryResponse {
1490
+ history: Some(History { events: vec![] }),
1491
+ raw_history: vec![],
1492
+ next_page_token: vec![(i * 10) as u8],
1493
+ archived: false,
1494
+ })
1495
+ })
1496
+ .times(1);
1497
+
1498
+ // Add a page with only event i
1499
+ mock_client
1500
+ .expect_get_workflow_execution_history()
1501
+ .returning(move |_, _, _| {
1502
+ Ok(GetWorkflowExecutionHistoryResponse {
1503
+ history: Some(History {
1504
+ events: vec![event.clone()],
1505
+ }),
1506
+ raw_history: vec![],
1507
+ next_page_token: vec![(i * 10 + 1) as u8],
1508
+ archived: false,
1509
+ })
1510
+ })
1511
+ .times(1);
1512
+ }
1513
+
1514
+ // Add an extra empty page at the end, with no NPT
1515
+ mock_client
1516
+ .expect_get_workflow_execution_history()
1517
+ .returning(move |_, _, _| {
1518
+ Ok(GetWorkflowExecutionHistoryResponse {
1519
+ history: Some(History { events: vec![] }),
1520
+ raw_history: vec![],
1521
+ next_page_token: vec![],
1522
+ archived: false,
1523
+ })
1524
+ })
1525
+ .times(1);
1526
+
1527
+ let mut paginator = HistoryPaginator::new(
1528
+ History {
1529
+ events: vec![first_event],
1530
+ },
1531
+ 3,
1532
+ 15,
1533
+ "wfid".to_string(),
1534
+ "runid".to_string(),
1535
+ vec![1],
1536
+ Arc::new(mock_client),
1537
+ );
1538
+
1539
+ let mut update = paginator.extract_next_update().await.unwrap();
1540
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
1541
+ assert_eq!(seq.first().unwrap().event_id, 1);
1542
+ assert_eq!(seq.last().unwrap().event_id, 3);
1543
+
1544
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
1545
+ assert_eq!(seq.first().unwrap().event_id, 4);
1546
+ assert_eq!(seq.last().unwrap().event_id, 15);
1547
+ }
1548
+
1549
+ #[tokio::test]
1550
+ async fn extreme_pagination_doesnt_drop_wft_events_worker() {
1551
+ let wf_id = "fakeid";
1552
+
1553
+ // In this test, we add empty pages between each event
1554
+
1555
+ // 1: EVENT_TYPE_WORKFLOW_EXECUTION_STARTED
1556
+ // 2: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1557
+ // 3: EVENT_TYPE_WORKFLOW_TASK_STARTED // <- previous_started_event_id
1558
+ // 4: EVENT_TYPE_WORKFLOW_TASK_COMPLETED
1559
+
1560
+ // 5: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1561
+ // 6: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1562
+ // 7: EVENT_TYPE_WORKFLOW_TASK_STARTED
1563
+ // 8: EVENT_TYPE_WORKFLOW_TASK_FAILED
1564
+
1565
+ // 9: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1566
+ // 10: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1567
+ // 11: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1568
+ // 12: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1569
+ // 13: EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED
1570
+ // 14: EVENT_TYPE_WORKFLOW_TASK_SCHEDULED
1571
+ // 15: EVENT_TYPE_WORKFLOW_TASK_STARTED // <- started_event_id
1572
+
1573
+ let mut t = TestHistoryBuilder::default();
1574
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1575
+ t.add_full_wf_task();
1576
+
1577
+ t.add_we_signaled("hi", vec![]);
1578
+ t.add_workflow_task_scheduled_and_started();
1579
+ t.add_workflow_task_failed_with_failure(
1580
+ WorkflowTaskFailedCause::UnhandledCommand,
1581
+ Default::default(),
1582
+ );
1583
+
1584
+ t.add_we_signaled("hi", vec![]);
1585
+ t.add_we_signaled("hi", vec![]);
1586
+ t.add_we_signaled("hi", vec![]);
1587
+ t.add_we_signaled("hi", vec![]);
1588
+ t.add_we_signaled("hi", vec![]);
1589
+ t.add_workflow_task_scheduled_and_started();
1590
+
1591
+ /////
1592
+
1593
+ let events: Vec<HistoryEvent> = t.get_full_history_info().unwrap().into_events();
1594
+ let first_event = events[0].clone();
1595
+
1596
+ let mut mock_client = mock_workflow_client();
1597
+
1598
+ for (i, event) in events.into_iter().enumerate() {
1599
+ // Add an empty page
1600
+ mock_client
1601
+ .expect_get_workflow_execution_history()
1602
+ .returning(move |_, _, _| {
1603
+ Ok(GetWorkflowExecutionHistoryResponse {
1604
+ history: Some(History { events: vec![] }),
1605
+ raw_history: vec![],
1606
+ next_page_token: vec![(i * 10 + 1) as u8],
1607
+ archived: false,
1608
+ })
1609
+ })
1610
+ .times(1);
1611
+
1612
+ // Add a page with just event i
1613
+ mock_client
1614
+ .expect_get_workflow_execution_history()
1615
+ .returning(move |_, _, _| {
1616
+ Ok(GetWorkflowExecutionHistoryResponse {
1617
+ history: Some(History {
1618
+ events: vec![event.clone()],
1619
+ }),
1620
+ raw_history: vec![],
1621
+ next_page_token: vec![(i * 10) as u8],
1622
+ archived: false,
1623
+ })
1624
+ })
1625
+ .times(1);
1626
+ }
1627
+
1628
+ // Add an extra empty page at the end, with no NPT
1629
+ mock_client
1630
+ .expect_get_workflow_execution_history()
1631
+ .returning(move |_, _, _| {
1632
+ Ok(GetWorkflowExecutionHistoryResponse {
1633
+ history: Some(History { events: vec![] }),
1634
+ raw_history: vec![],
1635
+ next_page_token: vec![],
1636
+ archived: false,
1637
+ })
1638
+ })
1639
+ .times(1);
1640
+
1641
+ let workflow_task = t.get_full_history_info().unwrap();
1642
+ let mut wft_resp = workflow_task.as_poll_wft_response();
1643
+ wft_resp.workflow_execution = Some(WorkflowExecution {
1644
+ workflow_id: wf_id.to_string(),
1645
+ run_id: t.get_orig_run_id().to_string(),
1646
+ });
1647
+ wft_resp.history = Some(History {
1648
+ events: vec![first_event],
1649
+ });
1650
+ wft_resp.next_page_token = vec![1];
1651
+ wft_resp.previous_started_event_id = 3;
1652
+ wft_resp.started_event_id = 15;
1653
+
1654
+ let wf_type = DEFAULT_WORKFLOW_TYPE;
1655
+ let mh =
1656
+ MockPollCfg::from_resp_batches(wf_id, t, [ResponseType::Raw(wft_resp)], mock_client);
1657
+ let mut worker = mock_sdk_cfg(mh, |cfg| {
1658
+ cfg.max_cached_workflows = 2;
1659
+ cfg.ignore_evicts_on_shutdown = false;
1660
+ });
1661
+
1662
+ let sig_ctr = Arc::new(AtomicUsize::new(0));
1663
+ let sig_ctr_clone = sig_ctr.clone();
1664
+ worker.register_wf(wf_type.to_owned(), move |ctx: WfContext| {
1665
+ let sig_ctr_clone = sig_ctr_clone.clone();
1666
+ async move {
1667
+ let mut sigchan = ctx.make_signal_channel("hi");
1668
+ while sigchan.next().await.is_some() {
1669
+ if sig_ctr_clone.fetch_add(1, Ordering::AcqRel) == 5 {
1670
+ break;
1671
+ }
1672
+ }
1673
+ Ok(().into())
1674
+ }
1675
+ });
1676
+
1677
+ worker
1678
+ .submit_wf(
1679
+ wf_id.to_owned(),
1680
+ wf_type.to_owned(),
1681
+ vec![],
1682
+ WorkflowOptions::default(),
1683
+ )
1684
+ .await
1685
+ .unwrap();
1686
+ worker.run_until_done().await.unwrap();
1687
+ assert_eq!(sig_ctr.load(Ordering::Acquire), 6);
1688
+ }
1689
+
1690
+ #[tokio::test]
1691
+ async fn finding_end_index_with_started_as_last_event() {
1692
+ let wf_id = "fakeid";
1693
+ let mut t = TestHistoryBuilder::default();
1694
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1695
+ t.add_full_wf_task();
1696
+
1697
+ t.add_we_signaled("hi", vec![]);
1698
+ t.add_workflow_task_scheduled_and_started();
1699
+ // We need to see more after this - it's not sufficient to end on a started event when
1700
+ // we know there might be more
1701
+
1702
+ let workflow_task = t.get_history_info(1).unwrap();
1703
+ let prev_started_wft_id = workflow_task.previous_started_event_id();
1704
+ let wft_started_id = workflow_task.workflow_task_started_event_id();
1705
+ let mut wft_resp = workflow_task.as_poll_wft_response();
1706
+ wft_resp.workflow_execution = Some(WorkflowExecution {
1707
+ workflow_id: wf_id.to_string(),
1708
+ run_id: t.get_orig_run_id().to_string(),
1709
+ });
1710
+ wft_resp.next_page_token = vec![1];
1711
+
1712
+ let mut resp_1: GetWorkflowExecutionHistoryResponse =
1713
+ t.get_full_history_info().unwrap().into();
1714
+ resp_1.next_page_token = vec![2];
1715
+
1716
+ let mut mock_client = mock_workflow_client();
1717
+ mock_client
1718
+ .expect_get_workflow_execution_history()
1719
+ .returning(move |_, _, _| Ok(resp_1.clone()))
1720
+ .times(1);
1721
+ // Since there aren't sufficient events, we should try to see another fetch, and that'll
1722
+ // say there aren't any
1723
+ mock_client
1724
+ .expect_get_workflow_execution_history()
1725
+ .returning(move |_, _, _| Ok(Default::default()))
1726
+ .times(1);
1727
+
1728
+ let mut paginator = HistoryPaginator::new(
1729
+ workflow_task.into(),
1730
+ prev_started_wft_id,
1731
+ wft_started_id,
1732
+ "wfid".to_string(),
1733
+ "runid".to_string(),
1734
+ NextPageToken::FetchFromStart,
1735
+ Arc::new(mock_client),
1736
+ );
1737
+ let mut update = paginator.extract_next_update().await.unwrap();
1738
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
1739
+ assert_eq!(seq.last().unwrap().event_id, 3);
1740
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
1741
+ // We're done since the last fetch revealed nothing
1742
+ assert_eq!(seq.last().unwrap().event_id, 7);
1743
+ }
1744
+
1745
+ #[tokio::test]
1746
+ async fn just_signal_is_complete_wft() {
1747
+ let mut t = TestHistoryBuilder::default();
1748
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1749
+ t.add_full_wf_task();
1750
+ t.add_we_signaled("whatever", vec![]);
1751
+ t.add_full_wf_task();
1752
+ t.add_we_signaled("whatever", vec![]);
1753
+ t.add_full_wf_task();
1754
+ t.add_workflow_execution_completed();
1755
+
1756
+ let workflow_task = t.get_full_history_info().unwrap();
1757
+ let prev_started_wft_id = workflow_task.previous_started_event_id();
1758
+ let wft_started_id = workflow_task.workflow_task_started_event_id();
1759
+ let mock_client = mock_workflow_client();
1760
+ let mut paginator = HistoryPaginator::new(
1761
+ workflow_task.into(),
1762
+ prev_started_wft_id,
1763
+ wft_started_id,
1764
+ "wfid".to_string(),
1765
+ "runid".to_string(),
1766
+ NextPageToken::Done,
1767
+ Arc::new(mock_client),
1768
+ );
1769
+ let mut update = paginator.extract_next_update().await.unwrap();
1770
+ let seq = next_check_peek(&mut update, 0);
1771
+ assert_eq!(seq.len(), 3);
1772
+ let seq = next_check_peek(&mut update, 3);
1773
+ assert_eq!(seq.len(), 4);
1774
+ let seq = next_check_peek(&mut update, 7);
1775
+ assert_eq!(seq.len(), 4);
1776
+ let seq = next_check_peek(&mut update, 11);
1777
+ assert_eq!(seq.len(), 2);
1778
+ }
1779
+
1780
+ #[tokio::test]
1781
+ async fn heartbeats_then_signal() {
1782
+ let mut t = TestHistoryBuilder::default();
1783
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1784
+ t.add_full_wf_task();
1785
+ t.add_full_wf_task();
1786
+ let mut need_fetch_resp =
1787
+ hist_to_poll_resp(&t, "wfid".to_owned(), ResponseType::AllHistory).resp;
1788
+ need_fetch_resp.next_page_token = vec![1];
1789
+ t.add_full_wf_task();
1790
+ t.add_we_signaled("whatever", vec![]);
1791
+ t.add_workflow_task_scheduled_and_started();
1792
+
1793
+ let full_resp: GetWorkflowExecutionHistoryResponse =
1794
+ t.get_full_history_info().unwrap().into();
1795
+
1796
+ let mut mock_client = mock_workflow_client();
1797
+ mock_client
1798
+ .expect_get_workflow_execution_history()
1799
+ .returning(move |_, _, _| Ok(full_resp.clone()))
1800
+ .times(1);
1801
+
1802
+ let mut paginator = HistoryPaginator::new(
1803
+ need_fetch_resp.history.unwrap(),
1804
+ // Pretend we have already processed first WFT
1805
+ 3,
1806
+ 6,
1807
+ "wfid".to_string(),
1808
+ "runid".to_string(),
1809
+ NextPageToken::Next(vec![1]),
1810
+ Arc::new(mock_client),
1811
+ );
1812
+ let mut update = paginator.extract_next_update().await.unwrap();
1813
+ // Starting past first wft
1814
+ let seq = next_check_peek(&mut update, 3);
1815
+ assert_eq!(seq.len(), 6);
1816
+ let seq = next_check_peek(&mut update, 9);
1817
+ assert_eq!(seq.len(), 4);
1818
+ }
1819
+
1820
+ #[tokio::test]
1821
+ async fn cache_miss_with_only_one_wft_available_orders_properly() {
1822
+ let mut t = TestHistoryBuilder::default();
1823
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1824
+ t.add_full_wf_task();
1825
+ t.add_by_type(EventType::TimerStarted);
1826
+ t.add_full_wf_task();
1827
+ t.add_by_type(EventType::TimerStarted);
1828
+ t.add_workflow_task_scheduled_and_started();
1829
+
1830
+ let incremental_task =
1831
+ hist_to_poll_resp(&t, "wfid".to_owned(), ResponseType::OneTask(3)).resp;
1832
+
1833
+ let mut mock_client = mock_workflow_client();
1834
+ let mut one_task_resp: GetWorkflowExecutionHistoryResponse =
1835
+ t.get_history_info(1).unwrap().into();
1836
+ one_task_resp.next_page_token = vec![1];
1837
+ mock_client
1838
+ .expect_get_workflow_execution_history()
1839
+ .returning(move |_, _, _| Ok(one_task_resp.clone()))
1840
+ .times(1);
1841
+ let mut up_to_sched_start: GetWorkflowExecutionHistoryResponse =
1842
+ t.get_full_history_info().unwrap().into();
1843
+ up_to_sched_start
1844
+ .history
1845
+ .as_mut()
1846
+ .unwrap()
1847
+ .events
1848
+ .truncate(9);
1849
+ mock_client
1850
+ .expect_get_workflow_execution_history()
1851
+ .returning(move |_, _, _| Ok(up_to_sched_start.clone()))
1852
+ .times(1);
1853
+
1854
+ let mut paginator = HistoryPaginator::new(
1855
+ incremental_task.history.unwrap(),
1856
+ 6,
1857
+ 9,
1858
+ "wfid".to_string(),
1859
+ "runid".to_string(),
1860
+ NextPageToken::FetchFromStart,
1861
+ Arc::new(mock_client),
1862
+ );
1863
+ let mut update = paginator.extract_next_update().await.unwrap();
1864
+ let seq = next_check_peek(&mut update, 0);
1865
+ assert_eq!(seq.last().unwrap().event_id, 3);
1866
+ let seq = next_check_peek(&mut update, 3);
1867
+ assert_eq!(seq.last().unwrap().event_id, 7);
1868
+ let seq = next_check_peek(&mut update, 7);
1869
+ assert_eq!(seq.last().unwrap().event_id, 11);
1870
+ }
1255
1871
  }
@@ -531,7 +531,7 @@ impl TryFrom<HistEventData> for ChildWorkflowMachineEvents {
531
531
  Self::ChildWorkflowExecutionCancelled
532
532
  }
533
533
  _ => {
534
- return Err(WFMachinesError::Fatal(format!(
534
+ return Err(WFMachinesError::Nondeterminism(format!(
535
535
  "Child workflow machine does not handle this event: {e:?}"
536
536
  )))
537
537
  }
@@ -95,7 +95,7 @@ impl ManagedWFFunc {
95
95
  run_id: "runid".to_string(),
96
96
  history: hist,
97
97
  metrics: MetricsContext::no_op(),
98
- capabilities: &DEFAULT_TEST_CAPABILITIES,
98
+ capabilities: DEFAULT_TEST_CAPABILITIES,
99
99
  },
100
100
  Box::new(driver).into(),
101
101
  );
@@ -1031,9 +1031,6 @@ impl ManagedRun {
1031
1031
  new_local_acts: Vec<LocalActRequest>,
1032
1032
  ) -> Result<(), WFMachinesError> {
1033
1033
  let immediate_resolutions = self.local_activity_request_sink.sink_reqs(new_local_acts);
1034
- if !immediate_resolutions.is_empty() {
1035
- warn!("Immediate res: {:?}", &immediate_resolutions);
1036
- }
1037
1034
  for resolution in immediate_resolutions {
1038
1035
  self.wfm
1039
1036
  .notify_of_local_result(LocalResolution::LocalActivity(resolution))?;
@@ -272,7 +272,7 @@ impl Signal {
272
272
  }
273
273
 
274
274
  /// Data contained within a signal
275
- #[derive(Default)]
275
+ #[derive(Default, Debug)]
276
276
  pub struct SignalData {
277
277
  /// The arguments the signal will receive
278
278
  pub input: Vec<Payload>,
@@ -11,7 +11,7 @@ use crate::{
11
11
  SignalExternalWfResult, TimerResult, UnblockEvent, Unblockable,
12
12
  };
13
13
  use crossbeam::channel::{Receiver, Sender};
14
- use futures::{task::Context, FutureExt, Stream};
14
+ use futures::{task::Context, FutureExt, Stream, StreamExt};
15
15
  use parking_lot::RwLock;
16
16
  use std::{
17
17
  collections::HashMap,
@@ -398,6 +398,14 @@ impl DrainableSignalStream {
398
398
  }
399
399
  signals
400
400
  }
401
+
402
+ pub fn drain_ready(&mut self) -> Vec<SignalData> {
403
+ let mut signals = vec![];
404
+ while let Some(s) = self.0.next().now_or_never().flatten() {
405
+ signals.push(s);
406
+ }
407
+ signals
408
+ }
401
409
  }
402
410
 
403
411
  impl Stream for DrainableSignalStream {