@temporalio/core-bridge 1.16.0 → 1.16.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@temporalio/core-bridge",
3
- "version": "1.16.0",
3
+ "version": "1.16.1",
4
4
  "description": "Temporal.io SDK Core<>Node bridge",
5
5
  "main": "index.js",
6
6
  "types": "lib/index.d.ts",
@@ -14,7 +14,7 @@
14
14
  "license": "MIT",
15
15
  "dependencies": {
16
16
  "@grpc/grpc-js": "^1.12.4",
17
- "@temporalio/common": "1.16.0"
17
+ "@temporalio/common": "1.16.1"
18
18
  },
19
19
  "devDependencies": {
20
20
  "arg": "^5.0.2",
@@ -1433,7 +1433,9 @@ impl WorkflowMachines {
1433
1433
  self.replaying,
1434
1434
  attrs.deprecated,
1435
1435
  encountered_entry.is_some(),
1436
- self.encountered_patch_markers.keys().map(|s| s.as_str()),
1436
+ self.encountered_patch_markers
1437
+ .iter()
1438
+ .filter_map(|(k, ci)| ci.created_command.then_some(k.as_str())),
1437
1439
  self.observed_internal_flags.clone(),
1438
1440
  )?;
1439
1441
  let mkey = self.add_cmd_to_wf_task(
@@ -1,4 +1,6 @@
1
- use crate::common::{ActivationAssertionsInterceptor, CoreWfStarter, build_fake_sdk};
1
+ use crate::common::{
2
+ ActivationAssertionsInterceptor, CoreWfStarter, WorkflowHandleExt, build_fake_sdk,
3
+ };
2
4
  use std::{
3
5
  collections::{HashSet, VecDeque, hash_map::RandomState},
4
6
  sync::{
@@ -28,6 +30,7 @@ use temporalio_common::{
28
30
  history::v1::{
29
31
  ActivityTaskCompletedEventAttributes, ActivityTaskScheduledEventAttributes,
30
32
  ActivityTaskStartedEventAttributes, TimerFiredEventAttributes,
33
+ history_event::Attributes as EventAttributes,
31
34
  },
32
35
  },
33
36
  },
@@ -874,3 +877,73 @@ async fn many_patches_combine_in_search_attrib_update(#[case] num_patches: usize
874
877
  worker.register_workflow_with_factory(move || ManyPatchesWf { num_patches });
875
878
  worker.run().await.unwrap();
876
879
  }
880
+
881
+ const MANY_PATCHES_IN_ONE_WFT_COUNT: usize = 200;
882
+
883
+ #[workflow]
884
+ #[derive(Default)]
885
+ struct ManyPatchesInOneWftWf;
886
+
887
+ #[workflow_methods]
888
+ impl ManyPatchesInOneWftWf {
889
+ #[run(name = DEFAULT_WORKFLOW_TYPE)]
890
+ async fn run(ctx: &mut WorkflowContext<Self>) -> WorkflowResult<()> {
891
+ for i in 1..=MANY_PATCHES_IN_ONE_WFT_COUNT {
892
+ let _ = ctx.patched(&format!("patch-{i}"));
893
+ }
894
+ ctx.timer(Duration::from_millis(1)).await;
895
+ Ok(())
896
+ }
897
+ }
898
+
899
+ // The main difference with many_patches_combine_in_search_attrib_update are that
900
+ // this one creates multiple patches in a single WFT, rather than spread them out
901
+ // over multiple WFTs. See https://github.com/temporalio/sdk-core/issues/1223.
902
+ #[tokio::test]
903
+ async fn patch_marker_size_overflow_replay_is_deterministic() {
904
+ let wf_name = "patch_marker_size_overflow_replay_is_deterministic";
905
+ let mut starter = CoreWfStarter::new(wf_name);
906
+ starter.sdk_config.task_types = WorkerTaskTypes::workflow_only();
907
+ let mut worker = starter.worker().await;
908
+ worker.register_workflow::<ManyPatchesInOneWftWf>();
909
+
910
+ let task_queue = starter.get_task_queue().to_owned();
911
+ let handle = worker
912
+ .submit_workflow(
913
+ ManyPatchesInOneWftWf::run,
914
+ (),
915
+ WorkflowStartOptions::new(task_queue, wf_name.to_owned()).build(),
916
+ )
917
+ .await
918
+ .unwrap();
919
+ worker.run_until_done().await.unwrap();
920
+
921
+ // Confirm that the original execution did in fact hit the size limit: the last upsert SA
922
+ // event in history must contain fewer than the total number of patches issued by the workflow.
923
+ let history = handle.fetch_history(Default::default()).await.unwrap();
924
+ let last_upsert_patches = history
925
+ .events()
926
+ .iter()
927
+ .rev()
928
+ .find_map(|e| match &e.attributes {
929
+ Some(EventAttributes::UpsertWorkflowSearchAttributesEventAttributes(a)) => a
930
+ .search_attributes
931
+ .as_ref()
932
+ .and_then(|sa| sa.indexed_fields.get(VERSION_SEARCH_ATTR_KEY))
933
+ .map(|p| HashSet::<String, RandomState>::from_json_payload(p).unwrap()),
934
+ _ => None,
935
+ })
936
+ .expect("history should contain at least one UpsertWorkflowSearchAttributes event");
937
+ assert!(
938
+ last_upsert_patches.len() < MANY_PATCHES_IN_ONE_WFT_COUNT,
939
+ "expected the last upsert SA event to be missing patches due to size overflow, \
940
+ but it contained all {MANY_PATCHES_IN_ONE_WFT_COUNT} of them",
941
+ );
942
+
943
+ // Replay the workflow from the fetched history. This must succeed: the SDK must produce the
944
+ // same sequence of upsert SA commands during replay as it did during the original execution.
945
+ handle
946
+ .fetch_history_and_replay(worker.inner_mut())
947
+ .await
948
+ .unwrap();
949
+ }
package/src/worker.rs CHANGED
@@ -868,7 +868,7 @@ mod custom_slot_supplier {
868
868
  }
869
869
  Err(err) => {
870
870
  warn!("Error reserving slot: {err:?}");
871
- tokio::time::sleep(std::time::Duration::from_millis(1000)).await;
871
+ tokio::time::sleep(std::time::Duration::from_secs(1)).await;
872
872
  }
873
873
  }
874
874
  }