@temporalio/core-bridge 1.3.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/Cargo.lock +117 -212
  2. package/index.d.ts +8 -2
  3. package/package.json +2 -3
  4. package/releases/aarch64-apple-darwin/index.node +0 -0
  5. package/releases/aarch64-unknown-linux-gnu/index.node +0 -0
  6. package/releases/x86_64-apple-darwin/index.node +0 -0
  7. package/releases/x86_64-pc-windows-msvc/index.node +0 -0
  8. package/releases/x86_64-unknown-linux-gnu/index.node +0 -0
  9. package/sdk-core/.buildkite/pipeline.yml +2 -0
  10. package/sdk-core/.cargo/config.toml +1 -1
  11. package/sdk-core/bridge-ffi/src/lib.rs +2 -2
  12. package/sdk-core/client/Cargo.toml +1 -1
  13. package/sdk-core/client/src/lib.rs +16 -11
  14. package/sdk-core/client/src/metrics.rs +13 -11
  15. package/sdk-core/client/src/raw.rs +1 -2
  16. package/sdk-core/client/src/retry.rs +57 -42
  17. package/sdk-core/core/Cargo.toml +12 -8
  18. package/sdk-core/core/src/core_tests/activity_tasks.rs +65 -40
  19. package/sdk-core/core/src/ephemeral_server/mod.rs +19 -3
  20. package/sdk-core/core/src/lib.rs +2 -2
  21. package/sdk-core/core/src/pollers/mod.rs +2 -0
  22. package/sdk-core/core/src/telemetry/metrics.rs +48 -39
  23. package/sdk-core/core/src/telemetry/mod.rs +53 -22
  24. package/sdk-core/core/src/telemetry/prometheus_server.rs +17 -13
  25. package/sdk-core/core/src/worker/client/mocks.rs +1 -0
  26. package/sdk-core/core/src/worker/workflow/mod.rs +4 -1
  27. package/sdk-core/core-api/Cargo.toml +1 -1
  28. package/sdk-core/test-utils/src/lib.rs +21 -2
  29. package/sdk-core/tests/integ_tests/ephemeral_server_tests.rs +27 -40
  30. package/sdk-core/tests/integ_tests/polling_tests.rs +1 -0
  31. package/sdk-core/tests/integ_tests/queries_tests.rs +1 -1
  32. package/sdk-core/tests/integ_tests/workflow_tests/replay.rs +1 -5
  33. package/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs +8 -2
  34. package/sdk-core/tests/main.rs +7 -0
  35. package/sdk-core/tests/runner.rs +93 -0
  36. package/src/conversions.rs +19 -3
@@ -20,7 +20,7 @@ futures = "0.3"
20
20
  futures-retry = "0.6.0"
21
21
  http = "0.2"
22
22
  once_cell = "1.13"
23
- opentelemetry = { version = "0.17", features = ["metrics"] }
23
+ opentelemetry = { version = "0.18", features = ["metrics"] }
24
24
  parking_lot = "0.12"
25
25
  prost-types = "0.11"
26
26
  thiserror = "1.0"
@@ -29,7 +29,7 @@ use crate::{
29
29
  sealed::WfHandleClient,
30
30
  workflow_handle::UntypedWorkflowHandle,
31
31
  };
32
- use backoff::{ExponentialBackoff, SystemClock};
32
+ use backoff::{exponential, ExponentialBackoff, SystemClock};
33
33
  use http::uri::InvalidUri;
34
34
  use once_cell::sync::OnceCell;
35
35
  use opentelemetry::metrics::Meter;
@@ -73,6 +73,7 @@ use uuid::Uuid;
73
73
 
74
74
  static CLIENT_NAME_HEADER_KEY: &str = "client-name";
75
75
  static CLIENT_VERSION_HEADER_KEY: &str = "client-version";
76
+ /// These must match the gRPC method names, not the snake case versions that exist in the Rust code.
76
77
  static LONG_POLL_METHOD_NAMES: [&str; 2] = ["PollWorkflowTaskQueue", "PollActivityTaskQueue"];
77
78
  /// The server times out polls after 60 seconds. Set our timeout to be slightly beyond that.
78
79
  const LONG_POLL_TIMEOUT: Duration = Duration::from_secs(70);
@@ -179,20 +180,24 @@ impl RetryConfig {
179
180
  max_retries: 0,
180
181
  }
181
182
  }
183
+
184
+ pub(crate) fn into_exp_backoff<C>(self, clock: C) -> exponential::ExponentialBackoff<C> {
185
+ exponential::ExponentialBackoff {
186
+ current_interval: self.initial_interval,
187
+ initial_interval: self.initial_interval,
188
+ randomization_factor: self.randomization_factor,
189
+ multiplier: self.multiplier,
190
+ max_interval: self.max_interval,
191
+ max_elapsed_time: self.max_elapsed_time,
192
+ clock,
193
+ start_time: Instant::now(),
194
+ }
195
+ }
182
196
  }
183
197
 
184
198
  impl From<RetryConfig> for ExponentialBackoff {
185
199
  fn from(c: RetryConfig) -> Self {
186
- Self {
187
- current_interval: c.initial_interval,
188
- initial_interval: c.initial_interval,
189
- randomization_factor: c.randomization_factor,
190
- multiplier: c.multiplier,
191
- max_interval: c.max_interval,
192
- max_elapsed_time: c.max_elapsed_time,
193
- clock: SystemClock::default(),
194
- start_time: Instant::now(),
195
- }
200
+ c.into_exp_backoff(SystemClock::default())
196
201
  }
197
202
  }
198
203
 
@@ -1,7 +1,7 @@
1
1
  use crate::{AttachMetricLabels, LONG_POLL_METHOD_NAMES};
2
2
  use futures::{future::BoxFuture, FutureExt};
3
3
  use opentelemetry::{
4
- metrics::{Counter, Meter, ValueRecorder},
4
+ metrics::{Counter, Histogram, Meter},
5
5
  KeyValue,
6
6
  };
7
7
  use std::{
@@ -17,6 +17,7 @@ use tower::Service;
17
17
  // appropriate k/vs have already been set.
18
18
  #[derive(Clone, Debug)]
19
19
  pub struct MetricsContext {
20
+ ctx: opentelemetry::Context,
20
21
  kvs: Arc<Vec<KeyValue>>,
21
22
  poll_is_long: bool,
22
23
 
@@ -25,21 +26,22 @@ pub struct MetricsContext {
25
26
  long_svc_request: Counter<u64>,
26
27
  long_svc_request_failed: Counter<u64>,
27
28
 
28
- svc_request_latency: ValueRecorder<u64>,
29
- long_svc_request_latency: ValueRecorder<u64>,
29
+ svc_request_latency: Histogram<u64>,
30
+ long_svc_request_latency: Histogram<u64>,
30
31
  }
31
32
 
32
33
  impl MetricsContext {
33
34
  pub(crate) fn new(kvs: Vec<KeyValue>, meter: &Meter) -> Self {
34
35
  Self {
36
+ ctx: opentelemetry::Context::current(),
35
37
  kvs: Arc::new(kvs),
36
38
  poll_is_long: false,
37
39
  svc_request: meter.u64_counter("request").init(),
38
40
  svc_request_failed: meter.u64_counter("request_failure").init(),
39
41
  long_svc_request: meter.u64_counter("long_request").init(),
40
42
  long_svc_request_failed: meter.u64_counter("long_request_failure").init(),
41
- svc_request_latency: meter.u64_value_recorder("request_latency").init(),
42
- long_svc_request_latency: meter.u64_value_recorder("long_request_latency").init(),
43
+ svc_request_latency: meter.u64_histogram("request_latency").init(),
44
+ long_svc_request_latency: meter.u64_histogram("long_request_latency").init(),
43
45
  }
44
46
  }
45
47
 
@@ -62,18 +64,18 @@ impl MetricsContext {
62
64
  /// A request to the temporal service was made
63
65
  pub(crate) fn svc_request(&self) {
64
66
  if self.poll_is_long {
65
- self.long_svc_request.add(1, &self.kvs);
67
+ self.long_svc_request.add(&self.ctx, 1, &self.kvs);
66
68
  } else {
67
- self.svc_request.add(1, &self.kvs);
69
+ self.svc_request.add(&self.ctx, 1, &self.kvs);
68
70
  }
69
71
  }
70
72
 
71
73
  /// A request to the temporal service failed
72
74
  pub(crate) fn svc_request_failed(&self) {
73
75
  if self.poll_is_long {
74
- self.long_svc_request_failed.add(1, &self.kvs);
76
+ self.long_svc_request_failed.add(&self.ctx, 1, &self.kvs);
75
77
  } else {
76
- self.svc_request_failed.add(1, &self.kvs);
78
+ self.svc_request_failed.add(&self.ctx, 1, &self.kvs);
77
79
  }
78
80
  }
79
81
 
@@ -81,10 +83,10 @@ impl MetricsContext {
81
83
  pub(crate) fn record_svc_req_latency(&self, dur: Duration) {
82
84
  if self.poll_is_long {
83
85
  self.long_svc_request_latency
84
- .record(dur.as_millis() as u64, &self.kvs);
86
+ .record(&self.ctx, dur.as_millis() as u64, &self.kvs);
85
87
  } else {
86
88
  self.svc_request_latency
87
- .record(dur.as_millis() as u64, &self.kvs);
89
+ .record(&self.ctx, dur.as_millis() as u64, &self.kvs);
88
90
  }
89
91
  }
90
92
  }
@@ -95,7 +95,6 @@ where
95
95
  F: Send + Sync + Unpin + 'static,
96
96
  {
97
97
  let rtc = self.get_retry_config(call_name);
98
- let req = req_cloner(&req);
99
98
  let fact = || {
100
99
  let req_clone = req_cloner(&req);
101
100
  callfn(self, req_clone)
@@ -892,7 +891,7 @@ mod tests {
892
891
  .filter(|l| l.starts_with("rpc"))
893
892
  .map(|l| {
894
893
  let stripped = l.strip_prefix("rpc ").unwrap();
895
- (&stripped[..stripped.find('(').unwrap()]).trim()
894
+ (stripped[..stripped.find('(').unwrap()]).trim()
896
895
  })
897
896
  .collect();
898
897
  let no_underscores: HashSet<_> = impl_list.iter().map(|x| x.replace('_', "")).collect();
@@ -29,6 +29,10 @@ pub const RETRYABLE_ERROR_CODES: [Code; 7] = [
29
29
  Code::Unavailable,
30
30
  ];
31
31
  const LONG_POLL_FATAL_GRACE: Duration = Duration::from_secs(60);
32
+ /// Must match the method name in [crate::raw::WorkflowService]
33
+ const POLL_WORKFLOW_METH_NAME: &str = "poll_workflow_task_queue";
34
+ /// Must match the method name in [crate::raw::WorkflowService]
35
+ const POLL_ACTIVITY_METH_NAME: &str = "poll_activity_task_queue";
32
36
 
33
37
  /// A wrapper for a [WorkflowClientTrait] or [crate::WorkflowService] implementor which performs
34
38
  /// auto-retries
@@ -82,8 +86,7 @@ impl<SG> RetryClient<SG> {
82
86
  }
83
87
 
84
88
  pub(crate) fn get_retry_config(&self, call_name: &'static str) -> RetryConfig {
85
- let call_type = Self::determine_call_type(call_name);
86
- match call_type {
89
+ match CallType::from_call_name(call_name) {
87
90
  CallType::Normal => (*self.retry_config).clone(),
88
91
  CallType::LongPoll => RetryConfig::poll_retry_policy(),
89
92
  }
@@ -98,15 +101,7 @@ impl<SG> RetryClient<SG> {
98
101
  F: FnMut() -> Fut + Unpin,
99
102
  Fut: Future<Output = Result<R>>,
100
103
  {
101
- let call_type = Self::determine_call_type(call_name);
102
- FutureRetry::new(factory, TonicErrorHandler::new(rtc, call_type, call_name))
103
- }
104
-
105
- fn determine_call_type(call_name: &str) -> CallType {
106
- match call_name {
107
- "poll_workflow_task" | "poll_activity_task" => CallType::LongPoll,
108
- _ => CallType::Normal,
109
- }
104
+ FutureRetry::new(factory, TonicErrorHandler::new(rtc, call_name))
110
105
  }
111
106
  }
112
107
 
@@ -118,19 +113,22 @@ pub(crate) struct TonicErrorHandler<C: Clock> {
118
113
  call_name: &'static str,
119
114
  }
120
115
  impl TonicErrorHandler<SystemClock> {
121
- fn new(cfg: RetryConfig, call_type: CallType, call_name: &'static str) -> Self {
122
- Self {
123
- max_retries: cfg.max_retries,
124
- call_type,
125
- call_name,
126
- backoff: cfg.into(),
127
- }
116
+ fn new(cfg: RetryConfig, call_name: &'static str) -> Self {
117
+ Self::new_with_clock(cfg, call_name, SystemClock::default())
128
118
  }
129
119
  }
130
120
  impl<C> TonicErrorHandler<C>
131
121
  where
132
122
  C: Clock,
133
123
  {
124
+ fn new_with_clock(cfg: RetryConfig, call_name: &'static str, clock: C) -> Self {
125
+ Self {
126
+ max_retries: cfg.max_retries,
127
+ call_type: CallType::from_call_name(call_name),
128
+ call_name,
129
+ backoff: cfg.into_exp_backoff(clock),
130
+ }
131
+ }
134
132
  const fn should_log_retry_warning(&self, cur_attempt: usize) -> bool {
135
133
  // Warn on more than 5 retries for unlimited retrying
136
134
  if self.max_retries == 0 && cur_attempt > 5 {
@@ -149,6 +147,14 @@ pub enum CallType {
149
147
  Normal,
150
148
  LongPoll,
151
149
  }
150
+ impl CallType {
151
+ fn from_call_name(call_name: &str) -> Self {
152
+ match call_name {
153
+ POLL_WORKFLOW_METH_NAME | POLL_ACTIVITY_METH_NAME => CallType::LongPoll,
154
+ _ => CallType::Normal,
155
+ }
156
+ }
157
+ }
152
158
 
153
159
  impl<C> ErrorHandler<tonic::Status> for TonicErrorHandler<C>
154
160
  where
@@ -568,22 +574,13 @@ mod tests {
568
574
  Code::Unauthenticated,
569
575
  Code::Unimplemented,
570
576
  ] {
571
- for call_name in ["poll_workflow_task", "poll_activity_task"] {
572
- let retry_cfg = RetryConfig::default();
577
+ for call_name in [POLL_WORKFLOW_METH_NAME, POLL_ACTIVITY_METH_NAME] {
578
+ let retry_cfg = RetryConfig::poll_retry_policy();
573
579
  let mut err_handler = TonicErrorHandler {
574
580
  max_retries: retry_cfg.max_retries,
575
581
  call_type: CallType::LongPoll,
576
582
  call_name,
577
- backoff: ExponentialBackoff {
578
- current_interval: retry_cfg.initial_interval,
579
- initial_interval: retry_cfg.initial_interval,
580
- randomization_factor: retry_cfg.randomization_factor,
581
- multiplier: retry_cfg.multiplier,
582
- max_interval: retry_cfg.max_interval,
583
- max_elapsed_time: retry_cfg.max_elapsed_time,
584
- clock: FixedClock(Instant::now()),
585
- start_time: Instant::now(),
586
- },
583
+ backoff: retry_cfg.into_exp_backoff(FixedClock(Instant::now())),
587
584
  };
588
585
  let result = err_handler.handle(1, Status::new(code, "Ahh"));
589
586
  assert_matches!(result, RetryPolicy::WaitRetry(_));
@@ -598,6 +595,30 @@ mod tests {
598
595
  }
599
596
  }
600
597
 
598
+ #[tokio::test]
599
+ async fn long_poll_retryable_errors_never_fatal() {
600
+ for code in RETRYABLE_ERROR_CODES {
601
+ for call_name in [POLL_WORKFLOW_METH_NAME, POLL_ACTIVITY_METH_NAME] {
602
+ let retry_cfg = RetryConfig::poll_retry_policy();
603
+ let mut err_handler = TonicErrorHandler {
604
+ max_retries: retry_cfg.max_retries,
605
+ call_type: CallType::LongPoll,
606
+ call_name,
607
+ backoff: retry_cfg.into_exp_backoff(FixedClock(Instant::now())),
608
+ };
609
+ let result = err_handler.handle(1, Status::new(code, "Ahh"));
610
+ assert_matches!(result, RetryPolicy::WaitRetry(_));
611
+ err_handler.backoff.clock.0 = err_handler
612
+ .backoff
613
+ .clock
614
+ .0
615
+ .add(LONG_POLL_FATAL_GRACE + Duration::from_secs(1));
616
+ let result = err_handler.handle(2, Status::new(code, "Ahh"));
617
+ assert_matches!(result, RetryPolicy::WaitRetry(_));
618
+ }
619
+ }
620
+ }
621
+
601
622
  #[tokio::test]
602
623
  async fn retryable_errors() {
603
624
  for code in RETRYABLE_ERROR_CODES {
@@ -626,12 +647,9 @@ mod tests {
626
647
  // correct retry config
627
648
  let fake_retry = RetryClient::new((), Default::default());
628
649
  for i in 1..=50 {
629
- for call in ["poll_workflow_task", "poll_activity_task"] {
630
- let mut err_handler = TonicErrorHandler::new(
631
- fake_retry.get_retry_config(call),
632
- CallType::LongPoll,
633
- call,
634
- );
650
+ for call in [POLL_WORKFLOW_METH_NAME, POLL_ACTIVITY_METH_NAME] {
651
+ let mut err_handler =
652
+ TonicErrorHandler::new(fake_retry.get_retry_config(call), call);
635
653
  let result = err_handler.handle(i, Status::new(Code::Unknown, "Ahh"));
636
654
  assert_matches!(result, RetryPolicy::WaitRetry(_));
637
655
  }
@@ -643,12 +661,9 @@ mod tests {
643
661
  let fake_retry = RetryClient::new((), Default::default());
644
662
  // For some reason we will get cancelled in these situations occasionally (always?) too
645
663
  for code in [Code::Cancelled, Code::DeadlineExceeded] {
646
- for call in ["poll_workflow_task", "poll_activity_task"] {
647
- let mut err_handler = TonicErrorHandler::new(
648
- fake_retry.get_retry_config(call),
649
- CallType::LongPoll,
650
- call,
651
- );
664
+ for call in [POLL_WORKFLOW_METH_NAME, POLL_ACTIVITY_METH_NAME] {
665
+ let mut err_handler =
666
+ TonicErrorHandler::new(fake_retry.get_retry_config(call), call);
652
667
  for i in 1..=5 {
653
668
  let result = err_handler.handle(i, Status::new(code, "retryable failure"));
654
669
  assert_matches!(result, RetryPolicy::WaitRetry(_));
@@ -26,7 +26,7 @@ enum_dispatch = "0.3"
26
26
  flate2 = "1.0"
27
27
  futures = "0.3"
28
28
  futures-util = "0.3"
29
- governor = "0.4"
29
+ governor = "0.5"
30
30
  http = "0.2"
31
31
  hyper = "0.14"
32
32
  itertools = "0.10"
@@ -36,9 +36,9 @@ lru = "0.8"
36
36
  mockall = "0.11"
37
37
  nix = "0.25"
38
38
  once_cell = "1.5"
39
- opentelemetry = { version = "0.17", features = ["rt-tokio"] }
40
- opentelemetry-otlp = { version = "0.10.0", features = ["tokio", "metrics"] }
41
- opentelemetry-prometheus = "0.10.0"
39
+ opentelemetry = { version = "0.18", features = ["rt-tokio"] }
40
+ opentelemetry-otlp = { version = "0.11", features = ["tokio", "metrics"] }
41
+ opentelemetry-prometheus = "0.11"
42
42
  parking_lot = { version = "0.12", features = ["send_guard"] }
43
43
  prometheus = "0.13"
44
44
  prost = "0.11"
@@ -56,11 +56,9 @@ tokio = { version = "1.1", features = ["rt", "rt-multi-thread", "parking_lot", "
56
56
  tokio-util = { version = "0.7", features = ["io", "io-util"] }
57
57
  tokio-stream = "0.1"
58
58
  tonic = { version = "0.8", features = ["tls", "tls-roots"] }
59
- # TODO: Get rid of this once otel updates its tonic dep
60
- tonic_otel = { version = "0.6", package = "tonic" }
61
59
  tracing = { version = "0.1", features = ["log-always"] }
62
60
  tracing-futures = "0.2"
63
- tracing-opentelemetry = "0.17"
61
+ tracing-opentelemetry = "0.18"
64
62
  tracing-subscriber = { version = "0.3", features = ["parking_lot", "env-filter"] }
65
63
  url = "2.2"
66
64
  uuid = { version = "1.1", features = ["v4"] }
@@ -87,7 +85,7 @@ version = "0.1"
87
85
  [dev-dependencies]
88
86
  assert_matches = "1.4"
89
87
  bimap = "0.6.1"
90
- criterion = "0.3"
88
+ criterion = "0.4"
91
89
  rstest = "0.15"
92
90
  temporal-sdk-core-test-utils = { path = "../test-utils" }
93
91
  temporal-sdk = { path = "../sdk" }
@@ -110,3 +108,9 @@ test = false
110
108
  [[bench]]
111
109
  name = "workflow_replay"
112
110
  harness = false
111
+
112
+ # This is maybe a bit hacky, but we call the runner an "example" because that gets it compiling with
113
+ # the dev-dependencies, which we want.
114
+ [[example]]
115
+ name = "integ_runner"
116
+ path = "../tests/runner.rs"
@@ -10,6 +10,7 @@ use crate::{
10
10
  ActivityHeartbeat, Worker, WorkerConfigBuilder,
11
11
  };
12
12
  use futures::FutureExt;
13
+ use itertools::Itertools;
13
14
  use std::{
14
15
  cell::RefCell,
15
16
  collections::{hash_map::Entry, HashMap, VecDeque},
@@ -617,20 +618,23 @@ async fn max_tq_acts_set_passed_to_poll_properly() {
617
618
  /// delivered via polling.
618
619
  #[tokio::test]
619
620
  async fn activity_tasks_from_completion_are_delivered() {
621
+ // Construct the history - one task with 5 activities, 4 on the same task queue, and 1 on a
622
+ // different queue, 3 activities will be executed eagerly as specified by the
623
+ // MAX_EAGER_ACTIVITY_RESERVATIONS_PER_WORKFLOW_TASK constant.
620
624
  let wfid = "fake_wf_id";
621
625
  let mut t = TestHistoryBuilder::default();
622
626
  t.add_by_type(EventType::WorkflowExecutionStarted);
623
627
  t.add_full_wf_task();
624
- let act_same_queue_sched_id = t.add_activity_task_scheduled("act_id_same_queue");
625
- let act_different_queue_sched_id = t.add_activity_task_scheduled("act_id_different_queue");
626
- let act_same_queue_start_id = t.add_activity_task_started(act_same_queue_sched_id);
627
- t.add_activity_task_completed(
628
- act_same_queue_sched_id,
629
- act_same_queue_start_id,
630
- b"hi".into(),
631
- );
628
+ let act_same_queue_scheduled_ids = (1..4)
629
+ .map(|i| t.add_activity_task_scheduled(format!("act_id_{}_same_queue", i)))
630
+ .collect_vec();
631
+ t.add_activity_task_scheduled("act_id_same_queue_not_eager");
632
+ t.add_activity_task_scheduled("act_id_different_queue");
633
+ for scheduled_event_id in act_same_queue_scheduled_ids {
634
+ let started_event_id = t.add_activity_task_started(scheduled_event_id);
635
+ t.add_activity_task_completed(scheduled_event_id, started_event_id, b"hi".into());
636
+ }
632
637
  t.add_full_wf_task();
633
- t.add_activity_task_cancel_requested(act_different_queue_sched_id);
634
638
  t.add_workflow_execution_completed();
635
639
 
636
640
  let num_eager_requested = Arc::new(AtomicUsize::new(0));
@@ -658,15 +662,17 @@ async fn activity_tasks_from_completion_are_delivered() {
658
662
  num_eager_requested_clone.store(count, Ordering::Relaxed);
659
663
  Ok(RespondWorkflowTaskCompletedResponse {
660
664
  workflow_task: None,
661
- activity_tasks: vec![PollActivityTaskQueueResponse {
662
- task_token: vec![1],
663
- activity_id: "act_id_same_queue".to_string(),
664
- ..Default::default()
665
- }],
665
+ activity_tasks: (1..4)
666
+ .map(|i| PollActivityTaskQueueResponse {
667
+ task_token: vec![i],
668
+ activity_id: format!("act_id_{}_same_queue", i),
669
+ ..Default::default()
670
+ })
671
+ .collect_vec(),
666
672
  })
667
673
  });
668
674
  mock.expect_complete_activity_task()
669
- .times(1)
675
+ .times(3)
670
676
  .returning(|_, _| Ok(RespondActivityTaskCompletedResponse::default()));
671
677
  let mut mock = single_hist_mock_sg(wfid, t, [1], mock, true);
672
678
  let mut mock_poller = mock_manual_poller();
@@ -677,46 +683,65 @@ async fn activity_tasks_from_completion_are_delivered() {
677
683
  mock.worker_cfg(|wc| wc.max_cached_workflows = 2);
678
684
  let core = mock_worker(mock);
679
685
 
686
+ // Test start
680
687
  let wf_task = core.poll_workflow_activation().await.unwrap();
681
- core.complete_workflow_activation(WorkflowActivationCompletion::from_cmds(
682
- wf_task.run_id,
683
- vec![
688
+ let mut cmds = (1..4)
689
+ .map(|seq| {
684
690
  ScheduleActivity {
685
- seq: 1,
686
- activity_id: "act_id_same_queue".to_string(),
691
+ seq,
692
+ activity_id: format!("act_id_{}_same_queue", seq),
687
693
  task_queue: TEST_Q.to_string(),
688
694
  cancellation_type: ActivityCancellationType::TryCancel as i32,
689
695
  ..Default::default()
690
696
  }
691
- .into(),
692
- ScheduleActivity {
693
- seq: 2,
694
- activity_id: "act_id_different_queue".to_string(),
695
- task_queue: "different_queue".to_string(),
696
- cancellation_type: ActivityCancellationType::Abandon as i32,
697
- ..Default::default()
698
- }
699
- .into(),
700
- ],
697
+ .into()
698
+ })
699
+ .collect_vec();
700
+ cmds.push(
701
+ ScheduleActivity {
702
+ seq: 4,
703
+ activity_id: "act_id_same_queue_not_eager".to_string(),
704
+ task_queue: TEST_Q.to_string(),
705
+ cancellation_type: ActivityCancellationType::TryCancel as i32,
706
+ ..Default::default()
707
+ }
708
+ .into(),
709
+ );
710
+ cmds.push(
711
+ ScheduleActivity {
712
+ seq: 5,
713
+ activity_id: "act_id_different_queue".to_string(),
714
+ task_queue: "different_queue".to_string(),
715
+ cancellation_type: ActivityCancellationType::Abandon as i32,
716
+ ..Default::default()
717
+ }
718
+ .into(),
719
+ );
720
+
721
+ core.complete_workflow_activation(WorkflowActivationCompletion::from_cmds(
722
+ wf_task.run_id,
723
+ cmds,
701
724
  ))
702
725
  .await
703
726
  .unwrap();
704
727
 
705
- // We should see the activity when we poll now
706
- let act_task = core.poll_activity_task().await.unwrap();
707
- assert_eq!(act_task.task_token, vec![1]);
728
+ // We should see the 3 eager activities when we poll now
729
+ for i in 1..4 {
730
+ let act_task = core.poll_activity_task().await.unwrap();
731
+ assert_eq!(act_task.task_token, vec![i]);
708
732
 
709
- core.complete_activity_task(ActivityTaskCompletion {
710
- task_token: act_task.task_token.clone(),
711
- result: Some(ActivityExecutionResult::ok("hi".into())),
712
- })
713
- .await
714
- .unwrap();
733
+ core.complete_activity_task(ActivityTaskCompletion {
734
+ task_token: act_task.task_token.clone(),
735
+ result: Some(ActivityExecutionResult::ok("hi".into())),
736
+ })
737
+ .await
738
+ .unwrap();
739
+ }
715
740
 
716
741
  core.shutdown().await;
717
742
 
718
743
  // Verify only a single eager activity was scheduled (the one on our worker's task queue)
719
- assert_eq!(num_eager_requested.load(Ordering::Relaxed), 1);
744
+ assert_eq!(num_eager_requested.load(Ordering::Relaxed), 3);
720
745
  }
721
746
 
722
747
  #[tokio::test]
@@ -20,6 +20,7 @@ use zip::read::read_zipfile_from_stream;
20
20
 
21
21
  #[cfg(target_family = "unix")]
22
22
  use std::os::unix::fs::OpenOptionsExt;
23
+ use std::process::Stdio;
23
24
 
24
25
  /// Configuration for Temporalite.
25
26
  #[derive(Debug, Clone, derive_builder::Builder)]
@@ -52,6 +53,11 @@ pub struct TemporaliteConfig {
52
53
  impl TemporaliteConfig {
53
54
  /// Start a Temporalite server.
54
55
  pub async fn start_server(&self) -> anyhow::Result<EphemeralServer> {
56
+ self.start_server_with_output(Stdio::inherit()).await
57
+ }
58
+
59
+ /// Start a Temporalite server with configurable stdout destination.
60
+ pub async fn start_server_with_output(&self, output: Stdio) -> anyhow::Result<EphemeralServer> {
55
61
  // Get exe path
56
62
  let exe_path = self.exe.get_or_download("temporalite").await?;
57
63
 
@@ -89,6 +95,7 @@ impl TemporaliteConfig {
89
95
  port,
90
96
  args,
91
97
  has_test_service: false,
98
+ output,
92
99
  })
93
100
  .await
94
101
  }
@@ -110,6 +117,11 @@ pub struct TestServerConfig {
110
117
  impl TestServerConfig {
111
118
  /// Start a test server.
112
119
  pub async fn start_server(&self) -> anyhow::Result<EphemeralServer> {
120
+ self.start_server_with_output(Stdio::inherit()).await
121
+ }
122
+
123
+ /// Start a test server with configurable stdout.
124
+ pub async fn start_server_with_output(&self, output: Stdio) -> anyhow::Result<EphemeralServer> {
113
125
  // Get exe path
114
126
  let exe_path = self.exe.get_or_download("temporal-test-server").await?;
115
127
 
@@ -126,6 +138,7 @@ impl TestServerConfig {
126
138
  port,
127
139
  args,
128
140
  has_test_service: true,
141
+ output,
129
142
  })
130
143
  .await
131
144
  }
@@ -136,9 +149,11 @@ struct EphemeralServerConfig {
136
149
  port: u16,
137
150
  args: Vec<String>,
138
151
  has_test_service: bool,
152
+ output: Stdio,
139
153
  }
140
154
 
141
155
  /// Server that will be stopped when dropped.
156
+ #[derive(Debug)]
142
157
  pub struct EphemeralServer {
143
158
  /// gRPC target host:port for the server frontend.
144
159
  pub target: String,
@@ -153,7 +168,8 @@ impl EphemeralServer {
153
168
  // TODO(cretz): Offer stdio suppression?
154
169
  let child = tokio::process::Command::new(config.exe_path)
155
170
  .args(config.args)
156
- .stdin(std::process::Stdio::null())
171
+ .stdin(Stdio::null())
172
+ .stdout(config.output)
157
173
  .spawn()?;
158
174
  let target = format!("127.0.0.1:{}", config.port);
159
175
  let target_url = format!("http://{}", target);
@@ -208,12 +224,12 @@ impl EphemeralServer {
208
224
  // For whatever reason, Tokio is not properly waiting on result
209
225
  // after sending kill in some cases which is causing defunct zombie
210
226
  // processes to remain and kill() to hang. Therefore, we are sending
211
- // SIGINT and waiting on the process ourselves using a low-level call.
227
+ // SIGKILL and waiting on the process ourselves using a low-level call.
212
228
  //
213
229
  // WARNING: This is based on empirical evidence starting a Python test
214
230
  // run on Linux with Python 3.7 (does not happen on Python 3.10 nor does
215
231
  // it happen on Temporalite nor does it happen in Rust integration
216
- // tests). Do not consider this fixed without running that scenario.
232
+ // tests). Don't alter without running that scenario. EX: SIGINT works but not SIGKILL
217
233
  if let Some(pid) = self.child.id() {
218
234
  let nix_pid = nix::unistd::Pid::from_raw(pid as i32);
219
235
  Ok(spawn_blocking(move || {
@@ -34,8 +34,8 @@ pub use pollers::{
34
34
  TlsConfig, WorkflowClientTrait,
35
35
  };
36
36
  pub use telemetry::{
37
- fetch_global_buffered_logs, telemetry_init, Logger, MetricsExporter, OtelCollectorOptions,
38
- TelemetryOptions, TelemetryOptionsBuilder, TraceExporter,
37
+ fetch_global_buffered_logs, telemetry_init, Logger, MetricTemporality, MetricsExporter,
38
+ OtelCollectorOptions, TelemetryOptions, TelemetryOptionsBuilder, TraceExporter,
39
39
  };
40
40
  pub use temporal_sdk_core_api as api;
41
41
  pub use temporal_sdk_core_protos as protos;
@@ -19,6 +19,7 @@ pub type Result<T, E = tonic::Status> = std::result::Result<T, E>;
19
19
  /// A trait for things that poll the server. Hides complexity of concurrent polling or polling
20
20
  /// on sticky/nonsticky queues simultaneously.
21
21
  #[cfg_attr(test, mockall::automock)]
22
+ #[cfg_attr(test, allow(unused))]
22
23
  #[async_trait::async_trait]
23
24
  pub trait Poller<PollResult>
24
25
  where
@@ -37,6 +38,7 @@ pub type BoxedActPoller = BoxedPoller<PollActivityTaskQueueResponse>;
37
38
  #[cfg(test)]
38
39
  mockall::mock! {
39
40
  pub ManualPoller<T: Send + Sync + 'static> {}
41
+ #[allow(unused)]
40
42
  impl<T: Send + Sync + 'static> Poller<T> for ManualPoller<T> {
41
43
  fn poll<'a, 'b>(&self)
42
44
  -> impl Future<Output = Option<Result<T>>> + Send + 'b