@temporalio/core-bridge 1.7.1 → 1.7.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/Cargo.lock +21 -0
  2. package/lib/index.d.ts +10 -10
  3. package/package.json +4 -4
  4. package/releases/aarch64-apple-darwin/index.node +0 -0
  5. package/sdk-core/.buildkite/pipeline.yml +1 -1
  6. package/sdk-core/.cargo/config.toml +2 -0
  7. package/sdk-core/CODEOWNERS +1 -1
  8. package/sdk-core/client/src/raw.rs +15 -6
  9. package/sdk-core/core/Cargo.toml +1 -0
  10. package/sdk-core/core/src/core_tests/activity_tasks.rs +13 -5
  11. package/sdk-core/core/src/core_tests/workflow_tasks.rs +45 -77
  12. package/sdk-core/core/src/internal_flags.rs +132 -46
  13. package/sdk-core/core/src/worker/activities/activity_task_poller_stream.rs +10 -7
  14. package/sdk-core/core/src/worker/activities.rs +152 -142
  15. package/sdk-core/core/src/worker/client.rs +12 -8
  16. package/sdk-core/core/src/worker/mod.rs +7 -5
  17. package/sdk-core/core/src/worker/workflow/history_update.rs +733 -33
  18. package/sdk-core/core/src/worker/workflow/machines/child_workflow_state_machine.rs +1 -1
  19. package/sdk-core/core/src/worker/workflow/machines/signal_external_state_machine.rs +4 -1
  20. package/sdk-core/core/src/worker/workflow/machines/workflow_machines.rs +5 -2
  21. package/sdk-core/core/src/worker/workflow/managed_run/managed_wf_test.rs +1 -1
  22. package/sdk-core/core/src/worker/workflow/managed_run.rs +0 -4
  23. package/sdk-core/protos/api_upstream/.github/workflows/publish-docs.yml +23 -0
  24. package/sdk-core/protos/api_upstream/Makefile +1 -1
  25. package/sdk-core/protos/api_upstream/buf.yaml +5 -0
  26. package/sdk-core/protos/api_upstream/temporal/api/common/v1/message.proto +17 -0
  27. package/sdk-core/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto +2 -0
  28. package/sdk-core/protos/api_upstream/temporal/api/history/v1/message.proto +6 -3
  29. package/sdk-core/protos/api_upstream/temporal/api/protocol/v1/message.proto +1 -1
  30. package/sdk-core/protos/api_upstream/temporal/api/taskqueue/v1/message.proto +12 -22
  31. package/sdk-core/protos/api_upstream/temporal/api/update/v1/message.proto +2 -2
  32. package/sdk-core/protos/api_upstream/temporal/api/workflow/v1/message.proto +2 -0
  33. package/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto +145 -48
  34. package/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/service.proto +19 -8
  35. package/sdk-core/sdk/src/workflow_context/options.rs +1 -1
  36. package/sdk-core/sdk/src/workflow_context.rs +9 -1
  37. package/sdk-core/test-utils/src/lib.rs +29 -7
  38. package/sdk-core/tests/integ_tests/activity_functions.rs +5 -0
  39. package/sdk-core/tests/integ_tests/workflow_tests/activities.rs +2 -4
  40. package/sdk-core/tests/integ_tests/workflow_tests/determinism.rs +0 -1
  41. package/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs +5 -7
  42. package/sdk-core/tests/integ_tests/workflow_tests.rs +3 -7
  43. package/sdk-core/tests/main.rs +1 -0
  44. package/releases/aarch64-unknown-linux-gnu/index.node +0 -0
  45. package/releases/x86_64-apple-darwin/index.node +0 -0
  46. package/releases/x86_64-pc-windows-msvc/index.node +0 -0
  47. package/releases/x86_64-unknown-linux-gnu/index.node +0 -0
@@ -1,7 +1,11 @@
1
1
  //! Utilities for and tracking of internal versions which alter history in incompatible ways
2
2
  //! so that we can use older code paths for workflows executed on older core versions.
3
3
 
4
- use std::collections::{BTreeSet, HashSet};
4
+ use itertools::Either;
5
+ use std::{
6
+ collections::{BTreeSet, HashSet},
7
+ iter,
8
+ };
5
9
  use temporal_sdk_core_protos::temporal::api::{
6
10
  history::v1::WorkflowTaskCompletedEventAttributes, sdk::v1::WorkflowTaskCompletedMetadata,
7
11
  workflowservice::v1::get_system_info_response,
@@ -15,7 +19,7 @@ use temporal_sdk_core_protos::temporal::api::{
15
19
  /// that removing older variants does not create any change in existing values. Removed flag
16
20
  /// variants must be reserved forever (a-la protobuf), and should be called out in a comment.
17
21
  #[repr(u32)]
18
- #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Clone, Debug)]
22
+ #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Clone, Debug, enum_iterator::Sequence)]
19
23
  pub(crate) enum CoreInternalFlags {
20
24
  /// In this flag additional checks were added to a number of state machines to ensure that
21
25
  /// the ID and type of activities, local activities, and child workflows match during replay.
@@ -28,64 +32,85 @@ pub(crate) enum CoreInternalFlags {
28
32
  }
29
33
 
30
34
  #[derive(Debug, Clone, PartialEq, Eq)]
31
- pub(crate) struct InternalFlags {
32
- enabled: bool,
33
- core: BTreeSet<CoreInternalFlags>,
34
- lang: BTreeSet<u32>,
35
- core_since_last_complete: HashSet<CoreInternalFlags>,
36
- lang_since_last_complete: HashSet<u32>,
35
+ pub(crate) enum InternalFlags {
36
+ Enabled {
37
+ core: BTreeSet<CoreInternalFlags>,
38
+ lang: BTreeSet<u32>,
39
+ core_since_last_complete: HashSet<CoreInternalFlags>,
40
+ lang_since_last_complete: HashSet<u32>,
41
+ },
42
+ Disabled,
37
43
  }
38
44
 
39
45
  impl InternalFlags {
40
46
  pub fn new(server_capabilities: &get_system_info_response::Capabilities) -> Self {
41
- Self {
42
- enabled: server_capabilities.sdk_metadata,
43
- core: Default::default(),
44
- lang: Default::default(),
45
- core_since_last_complete: Default::default(),
46
- lang_since_last_complete: Default::default(),
47
+ match server_capabilities.sdk_metadata {
48
+ true => Self::Enabled {
49
+ core: Default::default(),
50
+ lang: Default::default(),
51
+ core_since_last_complete: Default::default(),
52
+ lang_since_last_complete: Default::default(),
53
+ },
54
+ false => Self::Disabled,
47
55
  }
48
56
  }
49
57
 
50
58
  pub fn add_from_complete(&mut self, e: &WorkflowTaskCompletedEventAttributes) {
51
- if !self.enabled {
52
- return;
53
- }
54
-
55
- if let Some(metadata) = e.sdk_metadata.as_ref() {
56
- self.core.extend(
57
- metadata
58
- .core_used_flags
59
- .iter()
60
- .map(|u| CoreInternalFlags::from_u32(*u)),
61
- );
62
- self.lang.extend(metadata.lang_used_flags.iter());
59
+ if let Self::Enabled { core, lang, .. } = self {
60
+ if let Some(metadata) = e.sdk_metadata.as_ref() {
61
+ core.extend(
62
+ metadata
63
+ .core_used_flags
64
+ .iter()
65
+ .map(|u| CoreInternalFlags::from_u32(*u)),
66
+ );
67
+ lang.extend(metadata.lang_used_flags.iter());
68
+ }
63
69
  }
64
70
  }
65
71
 
66
72
  pub fn add_lang_used(&mut self, flags: impl IntoIterator<Item = u32>) {
67
- if !self.enabled {
68
- return;
73
+ if let Self::Enabled {
74
+ lang_since_last_complete,
75
+ ..
76
+ } = self
77
+ {
78
+ lang_since_last_complete.extend(flags.into_iter());
69
79
  }
70
-
71
- self.lang_since_last_complete.extend(flags.into_iter());
72
80
  }
73
81
 
74
82
  /// Returns true if this flag may currently be used. If `should_record` is true, always returns
75
83
  /// true and records the flag as being used, for taking later via
76
84
  /// [Self::gather_for_wft_complete].
77
85
  pub fn try_use(&mut self, core_patch: CoreInternalFlags, should_record: bool) -> bool {
78
- if !self.enabled {
86
+ match self {
87
+ Self::Enabled {
88
+ core,
89
+ core_since_last_complete,
90
+ ..
91
+ } => {
92
+ if should_record {
93
+ core_since_last_complete.insert(core_patch);
94
+ true
95
+ } else {
96
+ core.contains(&core_patch)
97
+ }
98
+ }
79
99
  // If the server does not support the metadata field, we must assume we can never use
80
100
  // any internal flags since they can't be recorded for future use
81
- return false;
101
+ Self::Disabled => false,
82
102
  }
103
+ }
83
104
 
84
- if should_record {
85
- self.core_since_last_complete.insert(core_patch);
86
- true
87
- } else {
88
- self.core.contains(&core_patch)
105
+ /// Writes all known core flags to the set which should be recorded in the current WFT if not
106
+ /// already known. Must only be called if not replaying.
107
+ pub fn write_all_known(&mut self) {
108
+ if let Self::Enabled {
109
+ core_since_last_complete,
110
+ ..
111
+ } = self
112
+ {
113
+ core_since_last_complete.extend(CoreInternalFlags::all_except_too_high());
89
114
  }
90
115
  }
91
116
 
@@ -93,18 +118,39 @@ impl InternalFlags {
93
118
  /// sdk metadata message that can be combined with any existing data before sending the WFT
94
119
  /// complete
95
120
  pub fn gather_for_wft_complete(&mut self) -> WorkflowTaskCompletedMetadata {
96
- WorkflowTaskCompletedMetadata {
97
- core_used_flags: self
98
- .core_since_last_complete
99
- .drain()
100
- .map(|p| p as u32)
101
- .collect(),
102
- lang_used_flags: self.lang_since_last_complete.drain().collect(),
121
+ match self {
122
+ Self::Enabled {
123
+ core_since_last_complete,
124
+ lang_since_last_complete,
125
+ core,
126
+ lang,
127
+ } => {
128
+ let core_newly_used: Vec<_> = core_since_last_complete
129
+ .iter()
130
+ .filter(|f| !core.contains(f))
131
+ .map(|p| *p as u32)
132
+ .collect();
133
+ let lang_newly_used: Vec<_> = lang_since_last_complete
134
+ .iter()
135
+ .filter(|f| !lang.contains(f))
136
+ .copied()
137
+ .collect();
138
+ core.extend(core_since_last_complete.iter());
139
+ lang.extend(lang_since_last_complete.iter());
140
+ WorkflowTaskCompletedMetadata {
141
+ core_used_flags: core_newly_used,
142
+ lang_used_flags: lang_newly_used,
143
+ }
144
+ }
145
+ Self::Disabled => WorkflowTaskCompletedMetadata::default(),
103
146
  }
104
147
  }
105
148
 
106
- pub fn all_lang(&self) -> &BTreeSet<u32> {
107
- &self.lang
149
+ pub fn all_lang(&self) -> impl Iterator<Item = u32> + '_ {
150
+ match self {
151
+ Self::Enabled { lang, .. } => Either::Left(lang.iter().copied()),
152
+ Self::Disabled => Either::Right(iter::empty()),
153
+ }
108
154
  }
109
155
  }
110
156
 
@@ -116,6 +162,11 @@ impl CoreInternalFlags {
116
162
  _ => Self::TooHigh,
117
163
  }
118
164
  }
165
+
166
+ pub fn all_except_too_high() -> impl Iterator<Item = CoreInternalFlags> {
167
+ enum_iterator::all::<CoreInternalFlags>()
168
+ .filter(|f| !matches!(f, CoreInternalFlags::TooHigh))
169
+ }
119
170
  }
120
171
 
121
172
  #[cfg(test)]
@@ -138,4 +189,39 @@ mod tests {
138
189
  assert_matches!(gathered.core_used_flags.as_slice(), &[]);
139
190
  assert_matches!(gathered.lang_used_flags.as_slice(), &[]);
140
191
  }
192
+
193
+ #[test]
194
+ fn all_have_u32_from_impl() {
195
+ let all_known = CoreInternalFlags::all_except_too_high();
196
+ for flag in all_known {
197
+ let as_u32 = flag as u32;
198
+ assert_eq!(CoreInternalFlags::from_u32(as_u32), flag);
199
+ }
200
+ }
201
+
202
+ #[test]
203
+ fn only_writes_new_flags() {
204
+ let mut f = InternalFlags::new(&Capabilities {
205
+ sdk_metadata: true,
206
+ ..Default::default()
207
+ });
208
+ f.add_lang_used([1]);
209
+ f.try_use(CoreInternalFlags::IdAndTypeDeterminismChecks, true);
210
+ let gathered = f.gather_for_wft_complete();
211
+ assert_matches!(gathered.core_used_flags.as_slice(), &[1]);
212
+ assert_matches!(gathered.lang_used_flags.as_slice(), &[1]);
213
+
214
+ f.add_from_complete(&WorkflowTaskCompletedEventAttributes {
215
+ sdk_metadata: Some(WorkflowTaskCompletedMetadata {
216
+ core_used_flags: vec![2],
217
+ lang_used_flags: vec![2],
218
+ }),
219
+ ..Default::default()
220
+ });
221
+ f.add_lang_used([2]);
222
+ f.try_use(CoreInternalFlags::UpsertSearchAttributeOnPatch, true);
223
+ let gathered = f.gather_for_wft_complete();
224
+ assert_matches!(gathered.core_used_flags.as_slice(), &[]);
225
+ assert_matches!(gathered.lang_used_flags.as_slice(), &[]);
226
+ }
141
227
  }
@@ -1,11 +1,14 @@
1
- use crate::abstractions::MeteredSemaphore;
2
- use crate::worker::activities::PermittedTqResp;
3
- use crate::{pollers::BoxedActPoller, MetricsContext};
1
+ use crate::{
2
+ abstractions::MeteredSemaphore, pollers::BoxedActPoller, worker::activities::PermittedTqResp,
3
+ MetricsContext,
4
+ };
4
5
  use futures::{stream, Stream};
5
- use governor::clock::DefaultClock;
6
- use governor::middleware::NoOpMiddleware;
7
- use governor::state::{InMemoryState, NotKeyed};
8
- use governor::RateLimiter;
6
+ use governor::{
7
+ clock::DefaultClock,
8
+ middleware::NoOpMiddleware,
9
+ state::{InMemoryState, NotKeyed},
10
+ RateLimiter,
11
+ };
9
12
  use std::sync::Arc;
10
13
  use temporal_sdk_core_protos::temporal::api::workflowservice::v1::PollActivityTaskQueueResponse;
11
14
  use tokio::select;
@@ -37,7 +37,10 @@ use governor::{Quota, RateLimiter};
37
37
  use std::{
38
38
  convert::TryInto,
39
39
  future,
40
- sync::Arc,
40
+ sync::{
41
+ atomic::{AtomicBool, Ordering},
42
+ Arc,
43
+ },
41
44
  time::{Duration, Instant},
42
45
  };
43
46
  use temporal_sdk_core_protos::{
@@ -117,8 +120,8 @@ impl RemoteInFlightActInfo {
117
120
  }
118
121
 
119
122
  pub(crate) struct WorkerActivityTasks {
120
- /// Token used to signal the server task poller that shutdown is beginning
121
- poller_shutdown_token: CancellationToken,
123
+ /// Token which is cancelled once shutdown is beginning
124
+ shutdown_initiated_token: CancellationToken,
122
125
  /// Centralizes management of heartbeat issuing / throttling
123
126
  heartbeat_manager: ActivityHeartbeatManager,
124
127
  /// Combined stream for any ActivityTask producing source (polls, eager activities,
@@ -169,7 +172,7 @@ impl WorkerActivityTasks {
169
172
  metrics.with_new_attrs([activity_worker_type()]),
170
173
  MetricsContext::available_task_slots,
171
174
  ));
172
- let poller_shutdown_token = CancellationToken::new();
175
+ let shutdown_initiated_token = CancellationToken::new();
173
176
  let rate_limiter = max_worker_act_per_sec.and_then(|ps| {
174
177
  Quota::with_period(Duration::from_secs_f64(ps.recip())).map(RateLimiter::direct)
175
178
  });
@@ -179,7 +182,7 @@ impl WorkerActivityTasks {
179
182
  semaphore.clone(),
180
183
  rate_limiter,
181
184
  metrics.clone(),
182
- poller_shutdown_token.clone(),
185
+ shutdown_initiated_token.clone(),
183
186
  );
184
187
  let (eager_activities_tx, eager_activities_rx) = unbounded_channel();
185
188
  let eager_activities_semaphore = ClosableMeteredSemaphore::new_arc(semaphore);
@@ -199,22 +202,21 @@ impl WorkerActivityTasks {
199
202
  starts_stream.map(ActivityTaskSource::from),
200
203
  |_: &mut ()| PollNext::Left,
201
204
  );
202
- // Create a task stream composed of (in poll preference order):
203
- // cancels_stream ------------------------------+--- activity_task_stream
204
- // eager_activities_rx ---+--- starts_stream ---|
205
- // server_poll_stream ---|
206
- let activity_task_stream = Self::merge_source_streams(
205
+
206
+ let activity_task_stream = ActivityTaskStream {
207
207
  source_stream,
208
- outstanding_activity_tasks.clone(),
208
+ outstanding_tasks: outstanding_activity_tasks.clone(),
209
209
  start_tasks_stream_complete,
210
- complete_notify.clone(),
211
- graceful_shutdown,
210
+ complete_notify: complete_notify.clone(),
211
+ grace_period: graceful_shutdown,
212
212
  cancels_tx,
213
- metrics.clone(),
214
- );
213
+ shutdown_initiated_token: shutdown_initiated_token.clone(),
214
+ metrics: metrics.clone(),
215
+ }
216
+ .streamify();
215
217
 
216
218
  Self {
217
- poller_shutdown_token,
219
+ shutdown_initiated_token,
218
220
  eager_activities_tx,
219
221
  heartbeat_manager,
220
222
  activity_task_stream: Mutex::new(activity_task_stream.boxed()),
@@ -263,9 +265,7 @@ impl WorkerActivityTasks {
263
265
 
264
266
  // Prefer eager activities over polling the server
265
267
  stream::select_with_strategy(non_poll_stream, poller_stream, |_: &mut ()| PollNext::Left)
266
- .map(|res| res.map_err(|err| err.into()))
267
- // This map, chain, filter_map sequence is here to cancel the token when this stream ends.
268
- .map(Some)
268
+ .map(|res| Some(res.map_err(Into::into)))
269
269
  .chain(futures::stream::once(async move {
270
270
  on_complete_token.cancel();
271
271
  None
@@ -273,98 +273,8 @@ impl WorkerActivityTasks {
273
273
  .filter_map(future::ready)
274
274
  }
275
275
 
276
- /// Builds an [ActivityTask] stream for both cancellation tasks from cancels delivered from
277
- /// heartbeats as well as new activity starts
278
- fn merge_source_streams(
279
- source_stream: impl Stream<Item = ActivityTaskSource>,
280
- outstanding_tasks: Arc<DashMap<TaskToken, RemoteInFlightActInfo>>,
281
- start_tasks_stream_complete: CancellationToken,
282
- complete_notify: Arc<Notify>,
283
- grace_period: Option<Duration>,
284
- cancels_tx: UnboundedSender<PendingActivityCancel>,
285
- metrics: MetricsContext,
286
- ) -> impl Stream<Item = Result<ActivityTask, PollActivityError>> {
287
- let outstanding_tasks_clone = outstanding_tasks.clone();
288
- source_stream
289
- .filter_map(move |source| {
290
- let outstanding_tasks = outstanding_tasks.clone();
291
- let metrics = metrics.clone();
292
- async move {
293
- match source {
294
- ActivityTaskSource::PendingCancel(next_pc) => {
295
- // It's possible that activity has been completed and we no longer have
296
- // an outstanding activity task. This is fine because it means that we
297
- // no longer need to cancel this activity, so we'll just ignore such
298
- // orphaned cancellations.
299
- if let Some(mut details) =
300
- outstanding_tasks.get_mut(&next_pc.task_token)
301
- {
302
- if details.issued_cancel_to_lang.is_some() {
303
- // Don't double-issue cancellations
304
- return None;
305
- }
306
-
307
- details.issued_cancel_to_lang = Some(next_pc.reason);
308
- if next_pc.reason == ActivityCancelReason::NotFound {
309
- details.known_not_found = true;
310
- }
311
- Some(Ok(ActivityTask::cancel_from_ids(
312
- next_pc.task_token.0,
313
- next_pc.reason,
314
- )))
315
- } else {
316
- debug!(task_token = ?next_pc.task_token,
317
- "Unknown activity task when issuing cancel");
318
- // If we can't find the activity here, it's already been completed,
319
- // in which case issuing a cancel again is pointless.
320
- None
321
- }
322
- }
323
- ActivityTaskSource::PendingStart(res) => {
324
- Some(res.map(|(task, is_eager)| {
325
- Self::about_to_issue_task(
326
- outstanding_tasks,
327
- task,
328
- is_eager,
329
- metrics,
330
- )
331
- }))
332
- }
333
- }
334
- }
335
- })
336
- .take_until(async move {
337
- start_tasks_stream_complete.cancelled().await;
338
- // Issue cancels for any still-living act tasks after the grace period
339
- let (grace_killer, stop_grace) = futures_util::future::abortable(async {
340
- if let Some(gp) = grace_period {
341
- // Make sure we've waited at least the grace period. This way if waiting for
342
- // starts to finish took a while, we subtract that from the grace period.
343
- tokio::time::sleep(gp).await;
344
- for mapref in outstanding_tasks_clone.iter() {
345
- let _ = cancels_tx.send(PendingActivityCancel::new(
346
- mapref.key().clone(),
347
- ActivityCancelReason::WorkerShutdown,
348
- ));
349
- }
350
- }
351
- });
352
- join!(
353
- async {
354
- while !outstanding_tasks_clone.is_empty() {
355
- complete_notify.notified().await
356
- }
357
- // If we were waiting for the grace period but everything already finished,
358
- // we don't need to keep waiting.
359
- stop_grace.abort();
360
- },
361
- grace_killer
362
- )
363
- })
364
- }
365
-
366
276
  pub(crate) fn initiate_shutdown(&self) {
367
- self.poller_shutdown_token.cancel();
277
+ self.shutdown_initiated_token.cancel();
368
278
  self.eager_activities_semaphore.close();
369
279
  }
370
280
 
@@ -518,42 +428,142 @@ impl WorkerActivityTasks {
518
428
  }
519
429
  }
520
430
 
521
- /// Called when there is a new [ActivityTask] about to be bubbled up out of the poller
522
- fn about_to_issue_task(
523
- outstanding_tasks: Arc<DashMap<TaskToken, RemoteInFlightActInfo>>,
524
- task: PermittedTqResp,
525
- is_eager: bool,
526
- metrics: MetricsContext,
527
- ) -> ActivityTask {
528
- if let Some(ref act_type) = task.resp.activity_type {
529
- if let Some(ref wf_type) = task.resp.workflow_type {
530
- metrics
531
- .with_new_attrs([
532
- activity_type(act_type.name.clone()),
533
- workflow_type(wf_type.name.clone()),
534
- eager(is_eager),
535
- ])
536
- .act_task_received();
537
- }
538
- }
539
- // There could be an else statement here but since the response should always contain both
540
- // activity_type and workflow_type, we won't bother.
431
+ #[cfg(test)]
432
+ pub(crate) fn remaining_activity_capacity(&self) -> usize {
433
+ self.eager_activities_semaphore.available_permits()
434
+ }
435
+ }
541
436
 
542
- if let Some(dur) = task.resp.sched_to_start() {
543
- metrics.act_sched_to_start_latency(dur);
544
- };
437
+ struct ActivityTaskStream<SrcStrm> {
438
+ source_stream: SrcStrm,
439
+ outstanding_tasks: Arc<DashMap<TaskToken, RemoteInFlightActInfo>>,
440
+ start_tasks_stream_complete: CancellationToken,
441
+ complete_notify: Arc<Notify>,
442
+ grace_period: Option<Duration>,
443
+ cancels_tx: UnboundedSender<PendingActivityCancel>,
444
+ /// Token which is cancelled once shutdown is beginning
445
+ shutdown_initiated_token: CancellationToken,
446
+ metrics: MetricsContext,
447
+ }
545
448
 
546
- outstanding_tasks.insert(
547
- task.resp.task_token.clone().into(),
548
- RemoteInFlightActInfo::new(&task.resp, task.permit.into_used()),
549
- );
449
+ impl<SrcStrm> ActivityTaskStream<SrcStrm>
450
+ where
451
+ SrcStrm: Stream<Item = ActivityTaskSource>,
452
+ {
453
+ /// Create a task stream composed of (in poll preference order):
454
+ /// cancels_stream ------------------------------+--- activity_task_stream
455
+ /// eager_activities_rx ---+--- starts_stream ---|
456
+ /// server_poll_stream ---|
457
+ fn streamify(self) -> impl Stream<Item = Result<ActivityTask, PollActivityError>> {
458
+ let outstanding_tasks_clone = self.outstanding_tasks.clone();
459
+ let should_issue_immediate_cancel = Arc::new(AtomicBool::new(false));
460
+ let should_issue_immediate_cancel_clone = should_issue_immediate_cancel.clone();
461
+ let cancels_tx = self.cancels_tx.clone();
462
+ self.source_stream
463
+ .filter_map(move |source| {
464
+ let res = match source {
465
+ ActivityTaskSource::PendingCancel(next_pc) => {
466
+ // It's possible that activity has been completed and we no longer have
467
+ // an outstanding activity task. This is fine because it means that we
468
+ // no longer need to cancel this activity, so we'll just ignore such
469
+ // orphaned cancellations.
470
+ if let Some(mut details) =
471
+ self.outstanding_tasks.get_mut(&next_pc.task_token)
472
+ {
473
+ if details.issued_cancel_to_lang.is_some() {
474
+ // Don't double-issue cancellations
475
+ None
476
+ } else {
477
+ details.issued_cancel_to_lang = Some(next_pc.reason);
478
+ if next_pc.reason == ActivityCancelReason::NotFound {
479
+ details.known_not_found = true;
480
+ }
481
+ Some(Ok(ActivityTask::cancel_from_ids(
482
+ next_pc.task_token.0,
483
+ next_pc.reason,
484
+ )))
485
+ }
486
+ } else {
487
+ debug!(task_token = ?next_pc.task_token,
488
+ "Unknown activity task when issuing cancel");
489
+ // If we can't find the activity here, it's already been completed,
490
+ // in which case issuing a cancel again is pointless.
491
+ None
492
+ }
493
+ }
494
+ ActivityTaskSource::PendingStart(res) => {
495
+ Some(res.map(|(task, is_eager)| {
496
+ if let Some(ref act_type) = task.resp.activity_type {
497
+ if let Some(ref wf_type) = task.resp.workflow_type {
498
+ self.metrics
499
+ .with_new_attrs([
500
+ activity_type(act_type.name.clone()),
501
+ workflow_type(wf_type.name.clone()),
502
+ eager(is_eager),
503
+ ])
504
+ .act_task_received();
505
+ }
506
+ }
507
+ // There could be an else statement here but since the response
508
+ // should always contain both activity_type and workflow_type, we
509
+ // won't bother.
550
510
 
551
- ActivityTask::start_from_poll_resp(task.resp)
552
- }
511
+ if let Some(dur) = task.resp.sched_to_start() {
512
+ self.metrics.act_sched_to_start_latency(dur);
513
+ };
553
514
 
554
- #[cfg(test)]
555
- pub(crate) fn remaining_activity_capacity(&self) -> usize {
556
- self.eager_activities_semaphore.available_permits()
515
+ let tt: TaskToken = task.resp.task_token.clone().into();
516
+ self.outstanding_tasks.insert(
517
+ tt.clone(),
518
+ RemoteInFlightActInfo::new(&task.resp, task.permit.into_used()),
519
+ );
520
+ // If we have already waited the grace period and issued cancels,
521
+ // this will have been set true, indicating anything that happened
522
+ // to be buffered/in-flight/etc should get an immediate cancel. This
523
+ // is to allow the user to potentially decide to ignore cancels and
524
+ // do work on polls that got received during shutdown.
525
+ if should_issue_immediate_cancel.load(Ordering::Acquire) {
526
+ let _ = cancels_tx.send(PendingActivityCancel::new(
527
+ tt,
528
+ ActivityCancelReason::WorkerShutdown,
529
+ ));
530
+ }
531
+
532
+ ActivityTask::start_from_poll_resp(task.resp)
533
+ }))
534
+ }
535
+ };
536
+ async move { res }
537
+ })
538
+ .take_until(async move {
539
+ // Once we've been told to begin cancelling, wait the grace period and then start
540
+ // cancelling anything outstanding.
541
+ let (grace_killer, stop_grace) = futures_util::future::abortable(async {
542
+ if let Some(gp) = self.grace_period {
543
+ self.shutdown_initiated_token.cancelled().await;
544
+ tokio::time::sleep(gp).await;
545
+ should_issue_immediate_cancel_clone.store(true, Ordering::Release);
546
+ for mapref in outstanding_tasks_clone.iter() {
547
+ let _ = self.cancels_tx.send(PendingActivityCancel::new(
548
+ mapref.key().clone(),
549
+ ActivityCancelReason::WorkerShutdown,
550
+ ));
551
+ }
552
+ }
553
+ });
554
+ join!(
555
+ async {
556
+ self.start_tasks_stream_complete.cancelled().await;
557
+ while !outstanding_tasks_clone.is_empty() {
558
+ self.complete_notify.notified().await
559
+ }
560
+ // If we were waiting for the grace period but everything already finished,
561
+ // we don't need to keep waiting.
562
+ stop_grace.abort();
563
+ },
564
+ grace_killer
565
+ )
566
+ })
557
567
  }
558
568
  }
559
569
 
@@ -7,12 +7,15 @@ use temporal_sdk_core_protos::{
7
7
  coresdk::workflow_commands::QueryResult,
8
8
  temporal::api::{
9
9
  command::v1::Command,
10
- common::v1::{MeteringMetadata, Payloads, WorkflowExecution},
10
+ common::v1::{
11
+ MeteringMetadata, Payloads, WorkerVersionCapabilities, WorkerVersionStamp,
12
+ WorkflowExecution,
13
+ },
11
14
  enums::v1::{TaskQueueKind, WorkflowTaskFailedCause},
12
15
  failure::v1::Failure,
13
16
  query::v1::WorkflowQueryResult,
14
17
  sdk::v1::WorkflowTaskCompletedMetadata,
15
- taskqueue::v1::{StickyExecutionAttributes, TaskQueue, TaskQueueMetadata, VersionId},
18
+ taskqueue::v1::{StickyExecutionAttributes, TaskQueue, TaskQueueMetadata},
16
19
  workflowservice::v1::{get_system_info_response::Capabilities, *},
17
20
  },
18
21
  TaskToken,
@@ -138,8 +141,8 @@ impl WorkerClient for WorkerClientBag {
138
141
  } else {
139
142
  self.worker_build_id.clone()
140
143
  },
141
- worker_versioning_id: Some(VersionId {
142
- worker_build_id: self.versioning_build_id(),
144
+ worker_version_capabilities: Some(WorkerVersionCapabilities {
145
+ build_id: self.versioning_build_id(),
143
146
  }),
144
147
  };
145
148
 
@@ -166,8 +169,8 @@ impl WorkerClient for WorkerClientBag {
166
169
  task_queue_metadata: max_tasks_per_sec.map(|tps| TaskQueueMetadata {
167
170
  max_tasks_per_second: Some(tps),
168
171
  }),
169
- worker_versioning_id: Some(VersionId {
170
- worker_build_id: self.versioning_build_id(),
172
+ worker_version_capabilities: Some(WorkerVersionCapabilities {
173
+ build_id: self.versioning_build_id(),
171
174
  }),
172
175
  };
173
176
 
@@ -190,8 +193,9 @@ impl WorkerClient for WorkerClientBag {
190
193
  sticky_attributes: request.sticky_attributes,
191
194
  return_new_workflow_task: request.return_new_workflow_task,
192
195
  force_create_new_workflow_task: request.force_create_new_workflow_task,
193
- worker_versioning_id: Some(VersionId {
194
- worker_build_id: self.versioning_build_id(),
196
+ worker_version_stamp: Some(WorkerVersionStamp {
197
+ build_id: self.versioning_build_id(),
198
+ bundle_id: "".to_string(),
195
199
  }),
196
200
  messages: vec![],
197
201
  binary_checksum: self.worker_build_id.clone(),