@temporalio/core-bridge 1.12.0 → 1.12.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/Cargo.lock +64 -119
- package/Cargo.toml +1 -1
- package/index.js +3 -2
- package/package.json +3 -3
- package/releases/aarch64-apple-darwin/index.node +0 -0
- package/releases/aarch64-unknown-linux-gnu/index.node +0 -0
- package/releases/x86_64-apple-darwin/index.node +0 -0
- package/releases/x86_64-pc-windows-msvc/index.node +0 -0
- package/releases/x86_64-unknown-linux-gnu/index.node +0 -0
- package/sdk-core/.cargo/config.toml +1 -2
- package/sdk-core/.github/workflows/per-pr.yml +2 -0
- package/sdk-core/AGENTS.md +7 -0
- package/sdk-core/Cargo.toml +9 -5
- package/sdk-core/README.md +6 -5
- package/sdk-core/client/Cargo.toml +3 -2
- package/sdk-core/client/src/lib.rs +17 -8
- package/sdk-core/client/src/metrics.rs +57 -23
- package/sdk-core/client/src/raw.rs +33 -15
- package/sdk-core/core/Cargo.toml +11 -9
- package/sdk-core/core/benches/workflow_replay.rs +114 -15
- package/sdk-core/core/src/core_tests/activity_tasks.rs +18 -18
- package/sdk-core/core/src/core_tests/child_workflows.rs +4 -4
- package/sdk-core/core/src/core_tests/determinism.rs +6 -6
- package/sdk-core/core/src/core_tests/local_activities.rs +20 -20
- package/sdk-core/core/src/core_tests/mod.rs +40 -5
- package/sdk-core/core/src/core_tests/queries.rs +25 -16
- package/sdk-core/core/src/core_tests/replay_flag.rs +3 -3
- package/sdk-core/core/src/core_tests/updates.rs +3 -3
- package/sdk-core/core/src/core_tests/workers.rs +9 -7
- package/sdk-core/core/src/core_tests/workflow_tasks.rs +40 -42
- package/sdk-core/core/src/ephemeral_server/mod.rs +1 -19
- package/sdk-core/core/src/lib.rs +10 -1
- package/sdk-core/core/src/pollers/poll_buffer.rs +2 -2
- package/sdk-core/core/src/replay/mod.rs +3 -3
- package/sdk-core/core/src/telemetry/metrics.rs +306 -152
- package/sdk-core/core/src/telemetry/mod.rs +11 -4
- package/sdk-core/core/src/telemetry/otel.rs +134 -131
- package/sdk-core/core/src/telemetry/prometheus_meter.rs +885 -0
- package/sdk-core/core/src/telemetry/prometheus_server.rs +48 -28
- package/sdk-core/core/src/test_help/mod.rs +27 -12
- package/sdk-core/core/src/worker/activities/activity_heartbeat_manager.rs +7 -7
- package/sdk-core/core/src/worker/activities.rs +4 -4
- package/sdk-core/core/src/worker/client/mocks.rs +10 -3
- package/sdk-core/core/src/worker/client.rs +68 -5
- package/sdk-core/core/src/worker/heartbeat.rs +229 -0
- package/sdk-core/core/src/worker/mod.rs +35 -14
- package/sdk-core/core/src/worker/tuner/resource_based.rs +4 -4
- package/sdk-core/core/src/worker/workflow/history_update.rs +71 -19
- package/sdk-core/core/src/worker/workflow/machines/cancel_external_state_machine.rs +1 -2
- package/sdk-core/core/src/worker/workflow/machines/child_workflow_state_machine.rs +1 -1
- package/sdk-core/core/src/worker/workflow/machines/nexus_operation_state_machine.rs +31 -48
- package/sdk-core/core/src/worker/workflow/machines/signal_external_state_machine.rs +1 -2
- package/sdk-core/core/src/worker/workflow/machines/upsert_search_attributes_state_machine.rs +3 -3
- package/sdk-core/core/src/worker/workflow/machines/workflow_machines.rs +4 -1
- package/sdk-core/core/src/worker/workflow/managed_run.rs +1 -1
- package/sdk-core/core/src/worker/workflow/mod.rs +15 -15
- package/sdk-core/core-api/Cargo.toml +2 -2
- package/sdk-core/core-api/src/envconfig.rs +204 -99
- package/sdk-core/core-api/src/lib.rs +9 -0
- package/sdk-core/core-api/src/telemetry/metrics.rs +548 -100
- package/sdk-core/core-api/src/worker.rs +11 -5
- package/sdk-core/core-c-bridge/Cargo.toml +49 -0
- package/sdk-core/core-c-bridge/build.rs +26 -0
- package/sdk-core/core-c-bridge/include/temporal-sdk-core-c-bridge.h +817 -0
- package/sdk-core/core-c-bridge/src/client.rs +679 -0
- package/sdk-core/core-c-bridge/src/lib.rs +245 -0
- package/sdk-core/core-c-bridge/src/metric.rs +682 -0
- package/sdk-core/core-c-bridge/src/random.rs +61 -0
- package/sdk-core/core-c-bridge/src/runtime.rs +445 -0
- package/sdk-core/core-c-bridge/src/testing.rs +282 -0
- package/sdk-core/core-c-bridge/src/tests/context.rs +644 -0
- package/sdk-core/core-c-bridge/src/tests/mod.rs +178 -0
- package/sdk-core/core-c-bridge/src/tests/utils.rs +108 -0
- package/sdk-core/core-c-bridge/src/worker.rs +1069 -0
- package/sdk-core/etc/deps.svg +64 -64
- package/sdk-core/sdk/src/activity_context.rs +6 -4
- package/sdk-core/sdk/src/lib.rs +49 -27
- package/sdk-core/sdk/src/workflow_future.rs +18 -25
- package/sdk-core/sdk-core-protos/protos/api_upstream/README.md +4 -0
- package/sdk-core/sdk-core-protos/protos/api_upstream/buf.yaml +0 -2
- package/sdk-core/sdk-core-protos/protos/api_upstream/openapi/openapiv2.json +630 -83
- package/sdk-core/sdk-core-protos/protos/api_upstream/openapi/openapiv3.yaml +632 -78
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/batch/v1/message.proto +4 -4
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/command/v1/message.proto +6 -4
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/common/v1/message.proto +2 -2
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/deployment/v1/message.proto +32 -2
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/common.proto +10 -1
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/deployment.proto +26 -0
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto +2 -0
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/reset.proto +4 -4
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/failure/v1/message.proto +2 -2
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/history/v1/message.proto +47 -31
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/nexus/v1/message.proto +4 -4
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/schedule/v1/message.proto +7 -1
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/worker/v1/message.proto +134 -0
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/workflow/v1/message.proto +14 -11
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto +148 -37
- package/sdk-core/sdk-core-protos/protos/api_upstream/temporal/api/workflowservice/v1/service.proto +21 -0
- package/sdk-core/sdk-core-protos/protos/local/temporal/sdk/core/workflow_activation/workflow_activation.proto +4 -4
- package/sdk-core/sdk-core-protos/src/history_builder.rs +9 -5
- package/sdk-core/sdk-core-protos/src/lib.rs +96 -6
- package/sdk-core/test-utils/src/lib.rs +11 -3
- package/sdk-core/tests/cloud_tests.rs +3 -3
- package/sdk-core/tests/heavy_tests.rs +11 -3
- package/sdk-core/tests/integ_tests/client_tests.rs +12 -13
- package/sdk-core/tests/integ_tests/ephemeral_server_tests.rs +1 -1
- package/sdk-core/tests/integ_tests/metrics_tests.rs +188 -83
- package/sdk-core/tests/integ_tests/polling_tests.rs +1 -1
- package/sdk-core/tests/integ_tests/queries_tests.rs +56 -40
- package/sdk-core/tests/integ_tests/update_tests.rs +2 -7
- package/sdk-core/tests/integ_tests/worker_tests.rs +3 -4
- package/sdk-core/tests/integ_tests/worker_versioning_tests.rs +3 -7
- package/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs +3 -5
- package/sdk-core/tests/integ_tests/workflow_tests/nexus.rs +24 -17
- package/src/client.rs +6 -0
- package/src/metrics.rs +6 -6
|
@@ -0,0 +1,1069 @@
|
|
|
1
|
+
use crate::{
|
|
2
|
+
ByteArray, ByteArrayRef, ByteArrayRefArray, UserDataHandle, client::Client, runtime::Runtime,
|
|
3
|
+
};
|
|
4
|
+
use anyhow::{Context, bail};
|
|
5
|
+
use prost::Message;
|
|
6
|
+
use std::{
|
|
7
|
+
collections::{HashMap, HashSet},
|
|
8
|
+
sync::Arc,
|
|
9
|
+
time::Duration,
|
|
10
|
+
};
|
|
11
|
+
use temporal_sdk_core::{
|
|
12
|
+
WorkerConfigBuilder,
|
|
13
|
+
replay::{HistoryForReplay, ReplayWorkerInput},
|
|
14
|
+
};
|
|
15
|
+
use temporal_sdk_core_api::{
|
|
16
|
+
Worker as CoreWorker,
|
|
17
|
+
errors::{PollError, WorkflowErrorType},
|
|
18
|
+
worker::{
|
|
19
|
+
SlotInfoTrait, SlotKind, SlotMarkUsedContext, SlotReleaseContext, SlotReservationContext,
|
|
20
|
+
SlotSupplierPermit,
|
|
21
|
+
},
|
|
22
|
+
};
|
|
23
|
+
use temporal_sdk_core_protos::{
|
|
24
|
+
coresdk::{
|
|
25
|
+
ActivityHeartbeat, ActivityTaskCompletion,
|
|
26
|
+
workflow_completion::WorkflowActivationCompletion,
|
|
27
|
+
},
|
|
28
|
+
temporal::api::history::v1::History,
|
|
29
|
+
};
|
|
30
|
+
use tokio::sync::{
|
|
31
|
+
mpsc::{Sender, channel},
|
|
32
|
+
oneshot,
|
|
33
|
+
};
|
|
34
|
+
use tokio_stream::wrappers::ReceiverStream;
|
|
35
|
+
|
|
36
|
+
#[repr(C)]
|
|
37
|
+
pub struct WorkerOptions {
|
|
38
|
+
pub namespace: ByteArrayRef,
|
|
39
|
+
pub task_queue: ByteArrayRef,
|
|
40
|
+
pub versioning_strategy: WorkerVersioningStrategy,
|
|
41
|
+
pub identity_override: ByteArrayRef,
|
|
42
|
+
pub max_cached_workflows: u32,
|
|
43
|
+
pub tuner: TunerHolder,
|
|
44
|
+
pub no_remote_activities: bool,
|
|
45
|
+
pub sticky_queue_schedule_to_start_timeout_millis: u64,
|
|
46
|
+
pub max_heartbeat_throttle_interval_millis: u64,
|
|
47
|
+
pub default_heartbeat_throttle_interval_millis: u64,
|
|
48
|
+
pub max_activities_per_second: f64,
|
|
49
|
+
pub max_task_queue_activities_per_second: f64,
|
|
50
|
+
pub graceful_shutdown_period_millis: u64,
|
|
51
|
+
pub workflow_task_poller_behavior: PollerBehavior,
|
|
52
|
+
pub nonsticky_to_sticky_poll_ratio: f32,
|
|
53
|
+
pub activity_task_poller_behavior: PollerBehavior,
|
|
54
|
+
pub nondeterminism_as_workflow_fail: bool,
|
|
55
|
+
pub nondeterminism_as_workflow_fail_for_types: ByteArrayRefArray,
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
#[repr(C)]
|
|
59
|
+
pub struct PollerBehaviorSimpleMaximum {
|
|
60
|
+
pub simple_maximum: usize,
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
#[repr(C)]
|
|
64
|
+
pub struct PollerBehaviorAutoscaling {
|
|
65
|
+
pub minimum: usize,
|
|
66
|
+
pub maximum: usize,
|
|
67
|
+
pub initial: usize,
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// Only one of simple_maximum and autoscaling can be present.
|
|
71
|
+
#[repr(C)]
|
|
72
|
+
pub struct PollerBehavior {
|
|
73
|
+
pub simple_maximum: *const PollerBehaviorSimpleMaximum,
|
|
74
|
+
pub autoscaling: *const PollerBehaviorAutoscaling,
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
impl TryFrom<&PollerBehavior> for temporal_sdk_core_api::worker::PollerBehavior {
|
|
78
|
+
type Error = anyhow::Error;
|
|
79
|
+
fn try_from(value: &PollerBehavior) -> Result<Self, Self::Error> {
|
|
80
|
+
if !value.simple_maximum.is_null() && !value.autoscaling.is_null() {
|
|
81
|
+
bail!("simple_maximum and autoscaling cannot both be non-null values");
|
|
82
|
+
}
|
|
83
|
+
if let Some(value) = unsafe { value.simple_maximum.as_ref() } {
|
|
84
|
+
return Ok(
|
|
85
|
+
temporal_sdk_core_api::worker::PollerBehavior::SimpleMaximum(value.simple_maximum),
|
|
86
|
+
);
|
|
87
|
+
} else if let Some(value) = unsafe { value.autoscaling.as_ref() } {
|
|
88
|
+
return Ok(temporal_sdk_core_api::worker::PollerBehavior::Autoscaling {
|
|
89
|
+
minimum: value.minimum,
|
|
90
|
+
maximum: value.maximum,
|
|
91
|
+
initial: value.initial,
|
|
92
|
+
});
|
|
93
|
+
}
|
|
94
|
+
bail!("simple_maximum and autoscaling cannot both be null values");
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
#[repr(C)]
|
|
99
|
+
pub enum WorkerVersioningStrategy {
|
|
100
|
+
None(WorkerVersioningNone),
|
|
101
|
+
DeploymentBased(WorkerDeploymentOptions),
|
|
102
|
+
LegacyBuildIdBased(LegacyBuildIdBasedStrategy),
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
#[repr(C)]
|
|
106
|
+
pub struct WorkerVersioningNone {
|
|
107
|
+
pub build_id: ByteArrayRef,
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
#[repr(C)]
|
|
111
|
+
pub struct WorkerDeploymentOptions {
|
|
112
|
+
pub version: WorkerDeploymentVersion,
|
|
113
|
+
pub use_worker_versioning: bool,
|
|
114
|
+
pub default_versioning_behavior: i32,
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
#[repr(C)]
|
|
118
|
+
pub struct LegacyBuildIdBasedStrategy {
|
|
119
|
+
pub build_id: ByteArrayRef,
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
#[repr(C)]
|
|
123
|
+
pub struct WorkerDeploymentVersion {
|
|
124
|
+
pub deployment_name: ByteArrayRef,
|
|
125
|
+
pub build_id: ByteArrayRef,
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
#[repr(C)]
|
|
129
|
+
pub struct TunerHolder {
|
|
130
|
+
pub workflow_slot_supplier: SlotSupplier,
|
|
131
|
+
pub activity_slot_supplier: SlotSupplier,
|
|
132
|
+
pub local_activity_slot_supplier: SlotSupplier,
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
#[repr(C)]
|
|
136
|
+
#[derive(Clone, Copy)]
|
|
137
|
+
pub enum SlotSupplier {
|
|
138
|
+
FixedSize(FixedSizeSlotSupplier),
|
|
139
|
+
ResourceBased(ResourceBasedSlotSupplier),
|
|
140
|
+
Custom(CustomSlotSupplierCallbacksImpl),
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
#[repr(C)]
|
|
144
|
+
#[derive(Clone, Copy)]
|
|
145
|
+
pub struct FixedSizeSlotSupplier {
|
|
146
|
+
pub num_slots: usize,
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
#[repr(C)]
|
|
150
|
+
#[derive(Clone, Copy)]
|
|
151
|
+
pub struct ResourceBasedSlotSupplier {
|
|
152
|
+
pub minimum_slots: usize,
|
|
153
|
+
pub maximum_slots: usize,
|
|
154
|
+
pub ramp_throttle_ms: u64,
|
|
155
|
+
pub tuner_options: ResourceBasedTunerOptions,
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
struct CustomSlotSupplier<SK> {
|
|
159
|
+
inner: CustomSlotSupplierCallbacksImpl,
|
|
160
|
+
_pd: std::marker::PhantomData<SK>,
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
unsafe impl<SK> Send for CustomSlotSupplier<SK> {}
|
|
164
|
+
unsafe impl<SK> Sync for CustomSlotSupplier<SK> {}
|
|
165
|
+
|
|
166
|
+
pub type CustomReserveSlotCallback =
|
|
167
|
+
unsafe extern "C" fn(ctx: *const SlotReserveCtx, sender: *mut libc::c_void);
|
|
168
|
+
pub type CustomCancelReserveCallback = unsafe extern "C" fn(token_source: *mut libc::c_void);
|
|
169
|
+
/// Must return C#-tracked id for the permit. A zero value means no permit was reserved.
|
|
170
|
+
pub type CustomTryReserveSlotCallback = unsafe extern "C" fn(ctx: *const SlotReserveCtx) -> usize;
|
|
171
|
+
pub type CustomMarkSlotUsedCallback = unsafe extern "C" fn(ctx: *const SlotMarkUsedCtx);
|
|
172
|
+
pub type CustomReleaseSlotCallback = unsafe extern "C" fn(ctx: *const SlotReleaseCtx);
|
|
173
|
+
pub type CustomSlotImplFreeCallback =
|
|
174
|
+
unsafe extern "C" fn(userimpl: *const CustomSlotSupplierCallbacks);
|
|
175
|
+
|
|
176
|
+
#[repr(C)]
|
|
177
|
+
#[derive(Clone, Copy)]
|
|
178
|
+
pub struct CustomSlotSupplierCallbacksImpl(pub *const CustomSlotSupplierCallbacks);
|
|
179
|
+
|
|
180
|
+
#[repr(C)]
|
|
181
|
+
pub struct CustomSlotSupplierCallbacks {
|
|
182
|
+
pub reserve: CustomReserveSlotCallback,
|
|
183
|
+
pub cancel_reserve: CustomCancelReserveCallback,
|
|
184
|
+
pub try_reserve: CustomTryReserveSlotCallback,
|
|
185
|
+
pub mark_used: CustomMarkSlotUsedCallback,
|
|
186
|
+
pub release: CustomReleaseSlotCallback,
|
|
187
|
+
pub free: CustomSlotImplFreeCallback,
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
impl CustomSlotSupplierCallbacksImpl {
|
|
191
|
+
fn into_ss<SK: SlotKind + Send + Sync + 'static>(
|
|
192
|
+
self,
|
|
193
|
+
) -> Arc<dyn temporal_sdk_core_api::worker::SlotSupplier<SlotKind = SK> + Send + Sync + 'static>
|
|
194
|
+
{
|
|
195
|
+
Arc::new(CustomSlotSupplier {
|
|
196
|
+
inner: self,
|
|
197
|
+
_pd: Default::default(),
|
|
198
|
+
})
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
impl Drop for CustomSlotSupplierCallbacks {
|
|
202
|
+
fn drop(&mut self) {
|
|
203
|
+
unsafe {
|
|
204
|
+
(self.free)(&*self);
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
#[repr(C)]
|
|
210
|
+
pub enum SlotKindType {
|
|
211
|
+
WorkflowSlotKindType,
|
|
212
|
+
ActivitySlotKindType,
|
|
213
|
+
LocalActivitySlotKindType,
|
|
214
|
+
NexusSlotKindType,
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
#[repr(C)]
|
|
218
|
+
pub struct SlotReserveCtx {
|
|
219
|
+
pub slot_type: SlotKindType,
|
|
220
|
+
pub task_queue: ByteArrayRef,
|
|
221
|
+
pub worker_identity: ByteArrayRef,
|
|
222
|
+
pub worker_build_id: ByteArrayRef,
|
|
223
|
+
pub is_sticky: bool,
|
|
224
|
+
// The C# side will store a pointer here to the cancellation token source
|
|
225
|
+
pub token_src: *mut libc::c_void,
|
|
226
|
+
}
|
|
227
|
+
unsafe impl Send for SlotReserveCtx {}
|
|
228
|
+
|
|
229
|
+
#[repr(C)]
|
|
230
|
+
pub enum SlotInfo {
|
|
231
|
+
WorkflowSlotInfo {
|
|
232
|
+
workflow_type: ByteArrayRef,
|
|
233
|
+
is_sticky: bool,
|
|
234
|
+
},
|
|
235
|
+
ActivitySlotInfo {
|
|
236
|
+
activity_type: ByteArrayRef,
|
|
237
|
+
},
|
|
238
|
+
LocalActivitySlotInfo {
|
|
239
|
+
activity_type: ByteArrayRef,
|
|
240
|
+
},
|
|
241
|
+
NexusSlotInfo {
|
|
242
|
+
operation: ByteArrayRef,
|
|
243
|
+
service: ByteArrayRef,
|
|
244
|
+
},
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
#[repr(C)]
|
|
248
|
+
pub struct SlotMarkUsedCtx {
|
|
249
|
+
pub slot_info: SlotInfo,
|
|
250
|
+
/// C# id for the slot permit.
|
|
251
|
+
pub slot_permit: usize,
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
#[repr(C)]
|
|
255
|
+
pub struct SlotReleaseCtx {
|
|
256
|
+
pub slot_info: *const SlotInfo,
|
|
257
|
+
/// C# id for the slot permit.
|
|
258
|
+
pub slot_permit: usize,
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
struct CancelReserveGuard {
|
|
262
|
+
token_src: *mut libc::c_void,
|
|
263
|
+
callback: CustomCancelReserveCallback,
|
|
264
|
+
}
|
|
265
|
+
impl Drop for CancelReserveGuard {
|
|
266
|
+
fn drop(&mut self) {
|
|
267
|
+
if !self.token_src.is_null() {
|
|
268
|
+
unsafe {
|
|
269
|
+
(self.callback)(self.token_src);
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
unsafe impl Send for CancelReserveGuard {}
|
|
275
|
+
|
|
276
|
+
#[async_trait::async_trait]
|
|
277
|
+
impl<SK: SlotKind + Send + Sync> temporal_sdk_core_api::worker::SlotSupplier
|
|
278
|
+
for CustomSlotSupplier<SK>
|
|
279
|
+
{
|
|
280
|
+
type SlotKind = SK;
|
|
281
|
+
|
|
282
|
+
async fn reserve_slot(&self, ctx: &dyn SlotReservationContext) -> SlotSupplierPermit {
|
|
283
|
+
let (tx, rx) = oneshot::channel();
|
|
284
|
+
let ctx = Self::convert_reserve_ctx(ctx);
|
|
285
|
+
let tx = Box::into_raw(Box::new(tx)) as *mut libc::c_void;
|
|
286
|
+
unsafe {
|
|
287
|
+
let _drop_guard = CancelReserveGuard {
|
|
288
|
+
token_src: ctx.token_src,
|
|
289
|
+
callback: (*self.inner.0).cancel_reserve,
|
|
290
|
+
};
|
|
291
|
+
((*self.inner.0).reserve)(&ctx, tx);
|
|
292
|
+
rx.await.expect("reserve channel is not closed")
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
fn try_reserve_slot(&self, ctx: &dyn SlotReservationContext) -> Option<SlotSupplierPermit> {
|
|
297
|
+
let ctx = Self::convert_reserve_ctx(ctx);
|
|
298
|
+
let permit_id = unsafe { ((*self.inner.0).try_reserve)(&ctx) };
|
|
299
|
+
if permit_id == 0 {
|
|
300
|
+
None
|
|
301
|
+
} else {
|
|
302
|
+
Some(SlotSupplierPermit::with_user_data(permit_id))
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
fn mark_slot_used(&self, ctx: &dyn SlotMarkUsedContext<SlotKind = Self::SlotKind>) {
|
|
307
|
+
let ctx = SlotMarkUsedCtx {
|
|
308
|
+
slot_info: Self::convert_slot_info(ctx.info().downcast()),
|
|
309
|
+
slot_permit: ctx.permit().user_data::<usize>().copied().unwrap_or(0),
|
|
310
|
+
};
|
|
311
|
+
unsafe {
|
|
312
|
+
((*self.inner.0).mark_used)(&ctx);
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
fn release_slot(&self, ctx: &dyn SlotReleaseContext<SlotKind = Self::SlotKind>) {
|
|
317
|
+
let mut info_ptr = std::ptr::null();
|
|
318
|
+
let converted_slot_info = ctx.info().map(|i| Self::convert_slot_info(i.downcast()));
|
|
319
|
+
if let Some(ref converted) = converted_slot_info {
|
|
320
|
+
info_ptr = converted;
|
|
321
|
+
}
|
|
322
|
+
let ctx = SlotReleaseCtx {
|
|
323
|
+
slot_info: info_ptr,
|
|
324
|
+
slot_permit: ctx.permit().user_data::<usize>().copied().unwrap_or(0),
|
|
325
|
+
};
|
|
326
|
+
unsafe {
|
|
327
|
+
((*self.inner.0).release)(&ctx);
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
fn available_slots(&self) -> Option<usize> {
|
|
332
|
+
None
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
impl<SK: SlotKind + Send + Sync> CustomSlotSupplier<SK> {
|
|
337
|
+
fn convert_reserve_ctx(ctx: &dyn SlotReservationContext) -> SlotReserveCtx {
|
|
338
|
+
SlotReserveCtx {
|
|
339
|
+
slot_type: match SK::kind() {
|
|
340
|
+
temporal_sdk_core_api::worker::SlotKindType::Workflow => {
|
|
341
|
+
SlotKindType::WorkflowSlotKindType
|
|
342
|
+
}
|
|
343
|
+
temporal_sdk_core_api::worker::SlotKindType::Activity => {
|
|
344
|
+
SlotKindType::ActivitySlotKindType
|
|
345
|
+
}
|
|
346
|
+
temporal_sdk_core_api::worker::SlotKindType::LocalActivity => {
|
|
347
|
+
SlotKindType::LocalActivitySlotKindType
|
|
348
|
+
}
|
|
349
|
+
temporal_sdk_core_api::worker::SlotKindType::Nexus => {
|
|
350
|
+
SlotKindType::NexusSlotKindType
|
|
351
|
+
}
|
|
352
|
+
},
|
|
353
|
+
task_queue: ctx.task_queue().into(),
|
|
354
|
+
worker_identity: ctx.worker_identity().into(),
|
|
355
|
+
worker_build_id: if let Some(vers) = ctx.worker_deployment_version() {
|
|
356
|
+
vers.build_id.as_str().into()
|
|
357
|
+
} else {
|
|
358
|
+
ByteArrayRef::empty()
|
|
359
|
+
},
|
|
360
|
+
is_sticky: ctx.is_sticky(),
|
|
361
|
+
token_src: std::ptr::null_mut(),
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
fn convert_slot_info(info: temporal_sdk_core_api::worker::SlotInfo) -> SlotInfo {
|
|
366
|
+
match info {
|
|
367
|
+
temporal_sdk_core_api::worker::SlotInfo::Workflow(w) => SlotInfo::WorkflowSlotInfo {
|
|
368
|
+
workflow_type: w.workflow_type.as_str().into(),
|
|
369
|
+
is_sticky: w.is_sticky,
|
|
370
|
+
},
|
|
371
|
+
temporal_sdk_core_api::worker::SlotInfo::Activity(a) => SlotInfo::ActivitySlotInfo {
|
|
372
|
+
activity_type: a.activity_type.as_str().into(),
|
|
373
|
+
},
|
|
374
|
+
temporal_sdk_core_api::worker::SlotInfo::LocalActivity(a) => {
|
|
375
|
+
SlotInfo::LocalActivitySlotInfo {
|
|
376
|
+
activity_type: a.activity_type.as_str().into(),
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
temporal_sdk_core_api::worker::SlotInfo::Nexus(n) => SlotInfo::NexusSlotInfo {
|
|
380
|
+
operation: n.operation.as_str().into(),
|
|
381
|
+
service: n.operation.as_str().into(),
|
|
382
|
+
},
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
#[repr(C)]
|
|
388
|
+
#[derive(Clone, Copy, PartialEq, Debug)]
|
|
389
|
+
pub struct ResourceBasedTunerOptions {
|
|
390
|
+
pub target_memory_usage: f64,
|
|
391
|
+
pub target_cpu_usage: f64,
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
#[derive(Clone)]
|
|
395
|
+
pub struct Worker {
|
|
396
|
+
worker: Option<Arc<temporal_sdk_core::Worker>>,
|
|
397
|
+
runtime: Runtime,
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
/// Only runtime or fail will be non-null. Whichever is must be freed when done.
|
|
401
|
+
#[repr(C)]
|
|
402
|
+
pub struct WorkerOrFail {
|
|
403
|
+
pub worker: *mut Worker,
|
|
404
|
+
pub fail: *const ByteArray,
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
pub struct WorkerReplayPusher {
|
|
408
|
+
tx: Sender<HistoryForReplay>,
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
#[repr(C)]
|
|
412
|
+
pub struct WorkerReplayerOrFail {
|
|
413
|
+
pub worker: *mut Worker,
|
|
414
|
+
pub worker_replay_pusher: *mut WorkerReplayPusher,
|
|
415
|
+
pub fail: *const ByteArray,
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
#[repr(C)]
|
|
419
|
+
pub struct WorkerReplayPushResult {
|
|
420
|
+
pub fail: *const ByteArray,
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
/// Should be called at the top of any C bridge call that will need to use the tokio runtime from
|
|
424
|
+
/// the Core runtime provided as an argument. Also sets up tracing for the duration of the scope in
|
|
425
|
+
/// which the call was made.
|
|
426
|
+
macro_rules! enter_sync {
|
|
427
|
+
($runtime:expr) => {
|
|
428
|
+
let _trace_guard = $runtime
|
|
429
|
+
.core
|
|
430
|
+
.telemetry()
|
|
431
|
+
.trace_subscriber()
|
|
432
|
+
.map(|s| tracing::subscriber::set_default(s));
|
|
433
|
+
let _guard = $runtime.core.tokio_handle().enter();
|
|
434
|
+
};
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
#[unsafe(no_mangle)]
|
|
438
|
+
pub extern "C" fn temporal_core_worker_new(
|
|
439
|
+
client: *mut Client,
|
|
440
|
+
options: *const WorkerOptions,
|
|
441
|
+
) -> WorkerOrFail {
|
|
442
|
+
let client = unsafe { &mut *client };
|
|
443
|
+
enter_sync!(client.runtime);
|
|
444
|
+
let options = unsafe { &*options };
|
|
445
|
+
|
|
446
|
+
let (worker, fail) = match options.try_into() {
|
|
447
|
+
Err(err) => (
|
|
448
|
+
std::ptr::null_mut(),
|
|
449
|
+
client
|
|
450
|
+
.runtime
|
|
451
|
+
.alloc_utf8(&format!("Invalid options: {err}"))
|
|
452
|
+
.into_raw()
|
|
453
|
+
.cast_const(),
|
|
454
|
+
),
|
|
455
|
+
Ok(config) => match temporal_sdk_core::init_worker(
|
|
456
|
+
&client.runtime.core,
|
|
457
|
+
config,
|
|
458
|
+
client.core.clone().into_inner(),
|
|
459
|
+
) {
|
|
460
|
+
Err(err) => (
|
|
461
|
+
std::ptr::null_mut(),
|
|
462
|
+
client
|
|
463
|
+
.runtime
|
|
464
|
+
.alloc_utf8(&format!("Worker start failed: {err}"))
|
|
465
|
+
.into_raw()
|
|
466
|
+
.cast_const(),
|
|
467
|
+
),
|
|
468
|
+
Ok(worker) => (
|
|
469
|
+
Box::into_raw(Box::new(Worker {
|
|
470
|
+
worker: Some(Arc::new(worker)),
|
|
471
|
+
runtime: client.runtime.clone(),
|
|
472
|
+
})),
|
|
473
|
+
std::ptr::null(),
|
|
474
|
+
),
|
|
475
|
+
},
|
|
476
|
+
};
|
|
477
|
+
WorkerOrFail { worker, fail }
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
#[unsafe(no_mangle)]
|
|
481
|
+
pub extern "C" fn temporal_core_worker_free(worker: *mut Worker) {
|
|
482
|
+
if worker.is_null() {
|
|
483
|
+
return;
|
|
484
|
+
}
|
|
485
|
+
unsafe {
|
|
486
|
+
let _ = Box::from_raw(worker);
|
|
487
|
+
}
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
/// If fail is present, it must be freed.
|
|
491
|
+
pub type WorkerCallback =
|
|
492
|
+
unsafe extern "C" fn(user_data: *mut libc::c_void, fail: *const ByteArray);
|
|
493
|
+
|
|
494
|
+
#[unsafe(no_mangle)]
|
|
495
|
+
pub extern "C" fn temporal_core_worker_validate(
|
|
496
|
+
worker: *mut Worker,
|
|
497
|
+
user_data: *mut libc::c_void,
|
|
498
|
+
callback: WorkerCallback,
|
|
499
|
+
) {
|
|
500
|
+
let worker = unsafe { &*worker };
|
|
501
|
+
let user_data = UserDataHandle(user_data);
|
|
502
|
+
let core_worker = worker.worker.as_ref().unwrap().clone();
|
|
503
|
+
worker.runtime.core.tokio_handle().spawn(async move {
|
|
504
|
+
let fail = match core_worker.validate().await {
|
|
505
|
+
Ok(_) => std::ptr::null(),
|
|
506
|
+
Err(err) => worker
|
|
507
|
+
.runtime
|
|
508
|
+
.clone()
|
|
509
|
+
.alloc_utf8(&format!("Worker validation failed: {err}"))
|
|
510
|
+
.into_raw()
|
|
511
|
+
.cast_const(),
|
|
512
|
+
};
|
|
513
|
+
unsafe {
|
|
514
|
+
callback(user_data.into(), fail);
|
|
515
|
+
}
|
|
516
|
+
});
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
#[unsafe(no_mangle)]
|
|
520
|
+
pub extern "C" fn temporal_core_worker_replace_client(
|
|
521
|
+
worker: *mut Worker,
|
|
522
|
+
new_client: *mut Client,
|
|
523
|
+
) {
|
|
524
|
+
let worker = unsafe { &*worker };
|
|
525
|
+
let core_worker = worker.worker.as_ref().expect("missing worker").clone();
|
|
526
|
+
let client = unsafe { &*new_client };
|
|
527
|
+
core_worker.replace_client(client.core.get_client().clone());
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
/// If success or fail are present, they must be freed. They will both be null
|
|
531
|
+
/// if this is a result of a poll shutdown.
|
|
532
|
+
pub type WorkerPollCallback = unsafe extern "C" fn(
|
|
533
|
+
user_data: *mut libc::c_void,
|
|
534
|
+
success: *const ByteArray,
|
|
535
|
+
fail: *const ByteArray,
|
|
536
|
+
);
|
|
537
|
+
|
|
538
|
+
#[unsafe(no_mangle)]
|
|
539
|
+
pub extern "C" fn temporal_core_worker_poll_workflow_activation(
|
|
540
|
+
worker: *mut Worker,
|
|
541
|
+
user_data: *mut libc::c_void,
|
|
542
|
+
callback: WorkerPollCallback,
|
|
543
|
+
) {
|
|
544
|
+
let worker = unsafe { &*worker };
|
|
545
|
+
let user_data = UserDataHandle(user_data);
|
|
546
|
+
let core_worker = worker.worker.as_ref().unwrap().clone();
|
|
547
|
+
worker.runtime.core.tokio_handle().spawn(async move {
|
|
548
|
+
let (success, fail) = match core_worker.poll_workflow_activation().await {
|
|
549
|
+
Ok(act) => (
|
|
550
|
+
ByteArray::from_vec(act.encode_to_vec())
|
|
551
|
+
.into_raw()
|
|
552
|
+
.cast_const(),
|
|
553
|
+
std::ptr::null(),
|
|
554
|
+
),
|
|
555
|
+
Err(PollError::ShutDown) => (std::ptr::null(), std::ptr::null()),
|
|
556
|
+
Err(err) => (
|
|
557
|
+
std::ptr::null(),
|
|
558
|
+
worker
|
|
559
|
+
.runtime
|
|
560
|
+
.clone()
|
|
561
|
+
.alloc_utf8(&format!("Poll failure: {err}"))
|
|
562
|
+
.into_raw()
|
|
563
|
+
.cast_const(),
|
|
564
|
+
),
|
|
565
|
+
};
|
|
566
|
+
unsafe {
|
|
567
|
+
callback(user_data.into(), success, fail);
|
|
568
|
+
}
|
|
569
|
+
});
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
#[unsafe(no_mangle)]
|
|
573
|
+
pub extern "C" fn temporal_core_worker_poll_activity_task(
|
|
574
|
+
worker: *mut Worker,
|
|
575
|
+
user_data: *mut libc::c_void,
|
|
576
|
+
callback: WorkerPollCallback,
|
|
577
|
+
) {
|
|
578
|
+
let worker = unsafe { &*worker };
|
|
579
|
+
let user_data = UserDataHandle(user_data);
|
|
580
|
+
let core_worker = worker.worker.as_ref().unwrap().clone();
|
|
581
|
+
worker.runtime.core.tokio_handle().spawn(async move {
|
|
582
|
+
let (success, fail) = match core_worker.poll_activity_task().await {
|
|
583
|
+
Ok(act) => (
|
|
584
|
+
ByteArray::from_vec(act.encode_to_vec())
|
|
585
|
+
.into_raw()
|
|
586
|
+
.cast_const(),
|
|
587
|
+
std::ptr::null(),
|
|
588
|
+
),
|
|
589
|
+
Err(PollError::ShutDown) => (std::ptr::null(), std::ptr::null()),
|
|
590
|
+
Err(err) => (
|
|
591
|
+
std::ptr::null(),
|
|
592
|
+
worker
|
|
593
|
+
.runtime
|
|
594
|
+
.clone()
|
|
595
|
+
.alloc_utf8(&format!("Poll failure: {err}"))
|
|
596
|
+
.into_raw()
|
|
597
|
+
.cast_const(),
|
|
598
|
+
),
|
|
599
|
+
};
|
|
600
|
+
unsafe {
|
|
601
|
+
callback(user_data.into(), success, fail);
|
|
602
|
+
}
|
|
603
|
+
});
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
#[unsafe(no_mangle)]
|
|
607
|
+
pub extern "C" fn temporal_core_worker_complete_workflow_activation(
|
|
608
|
+
worker: *mut Worker,
|
|
609
|
+
completion: ByteArrayRef,
|
|
610
|
+
user_data: *mut libc::c_void,
|
|
611
|
+
callback: WorkerCallback,
|
|
612
|
+
) {
|
|
613
|
+
let worker = unsafe { &*worker };
|
|
614
|
+
let completion = match WorkflowActivationCompletion::decode(completion.to_slice()) {
|
|
615
|
+
Ok(completion) => completion,
|
|
616
|
+
Err(err) => {
|
|
617
|
+
unsafe {
|
|
618
|
+
callback(
|
|
619
|
+
user_data,
|
|
620
|
+
worker
|
|
621
|
+
.runtime
|
|
622
|
+
.clone()
|
|
623
|
+
.alloc_utf8(&format!("Decode failure: {err}"))
|
|
624
|
+
.into_raw(),
|
|
625
|
+
);
|
|
626
|
+
}
|
|
627
|
+
return;
|
|
628
|
+
}
|
|
629
|
+
};
|
|
630
|
+
let user_data = UserDataHandle(user_data);
|
|
631
|
+
let core_worker = worker.worker.as_ref().unwrap().clone();
|
|
632
|
+
worker.runtime.core.tokio_handle().spawn(async move {
|
|
633
|
+
let fail = match core_worker.complete_workflow_activation(completion).await {
|
|
634
|
+
Ok(_) => std::ptr::null(),
|
|
635
|
+
Err(err) => worker
|
|
636
|
+
.runtime
|
|
637
|
+
.clone()
|
|
638
|
+
.alloc_utf8(&format!("Completion failure: {err}"))
|
|
639
|
+
.into_raw()
|
|
640
|
+
.cast_const(),
|
|
641
|
+
};
|
|
642
|
+
unsafe {
|
|
643
|
+
callback(user_data.into(), fail);
|
|
644
|
+
}
|
|
645
|
+
});
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
#[unsafe(no_mangle)]
|
|
649
|
+
pub extern "C" fn temporal_core_worker_complete_activity_task(
|
|
650
|
+
worker: *mut Worker,
|
|
651
|
+
completion: ByteArrayRef,
|
|
652
|
+
user_data: *mut libc::c_void,
|
|
653
|
+
callback: WorkerCallback,
|
|
654
|
+
) {
|
|
655
|
+
let worker = unsafe { &*worker };
|
|
656
|
+
let completion = match ActivityTaskCompletion::decode(completion.to_slice()) {
|
|
657
|
+
Ok(completion) => completion,
|
|
658
|
+
Err(err) => {
|
|
659
|
+
unsafe {
|
|
660
|
+
callback(
|
|
661
|
+
user_data,
|
|
662
|
+
worker
|
|
663
|
+
.runtime
|
|
664
|
+
.clone()
|
|
665
|
+
.alloc_utf8(&format!("Decode failure: {err}"))
|
|
666
|
+
.into_raw(),
|
|
667
|
+
);
|
|
668
|
+
}
|
|
669
|
+
return;
|
|
670
|
+
}
|
|
671
|
+
};
|
|
672
|
+
let user_data = UserDataHandle(user_data);
|
|
673
|
+
let core_worker = worker.worker.as_ref().unwrap().clone();
|
|
674
|
+
worker.runtime.core.tokio_handle().spawn(async move {
|
|
675
|
+
let fail = match core_worker.complete_activity_task(completion).await {
|
|
676
|
+
Ok(_) => std::ptr::null(),
|
|
677
|
+
Err(err) => worker
|
|
678
|
+
.runtime
|
|
679
|
+
.clone()
|
|
680
|
+
.alloc_utf8(&format!("Completion failure: {err}"))
|
|
681
|
+
.into_raw()
|
|
682
|
+
.cast_const(),
|
|
683
|
+
};
|
|
684
|
+
unsafe {
|
|
685
|
+
callback(user_data.into(), fail);
|
|
686
|
+
}
|
|
687
|
+
});
|
|
688
|
+
}
|
|
689
|
+
|
|
690
|
+
/// Returns error if any. Must be freed if returned.
|
|
691
|
+
#[unsafe(no_mangle)]
|
|
692
|
+
pub extern "C" fn temporal_core_worker_record_activity_heartbeat(
|
|
693
|
+
worker: *mut Worker,
|
|
694
|
+
heartbeat: ByteArrayRef,
|
|
695
|
+
) -> *const ByteArray {
|
|
696
|
+
let worker = unsafe { &*worker };
|
|
697
|
+
enter_sync!(worker.runtime);
|
|
698
|
+
match ActivityHeartbeat::decode(heartbeat.to_slice()) {
|
|
699
|
+
Ok(heartbeat) => {
|
|
700
|
+
worker
|
|
701
|
+
.worker
|
|
702
|
+
.as_ref()
|
|
703
|
+
.unwrap()
|
|
704
|
+
.record_activity_heartbeat(heartbeat);
|
|
705
|
+
std::ptr::null()
|
|
706
|
+
}
|
|
707
|
+
Err(err) => worker
|
|
708
|
+
.runtime
|
|
709
|
+
.clone()
|
|
710
|
+
.alloc_utf8(&format!("Decode failure: {err}"))
|
|
711
|
+
.into_raw(),
|
|
712
|
+
}
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
#[unsafe(no_mangle)]
|
|
716
|
+
pub extern "C" fn temporal_core_worker_request_workflow_eviction(
|
|
717
|
+
worker: *mut Worker,
|
|
718
|
+
run_id: ByteArrayRef,
|
|
719
|
+
) {
|
|
720
|
+
let worker = unsafe { &*worker };
|
|
721
|
+
enter_sync!(worker.runtime);
|
|
722
|
+
worker
|
|
723
|
+
.worker
|
|
724
|
+
.as_ref()
|
|
725
|
+
.unwrap()
|
|
726
|
+
.request_workflow_eviction(run_id.to_str());
|
|
727
|
+
}
|
|
728
|
+
|
|
729
|
+
#[unsafe(no_mangle)]
|
|
730
|
+
pub extern "C" fn temporal_core_worker_initiate_shutdown(worker: *mut Worker) {
|
|
731
|
+
let worker = unsafe { &*worker };
|
|
732
|
+
worker.worker.as_ref().unwrap().initiate_shutdown();
|
|
733
|
+
}
|
|
734
|
+
|
|
735
|
+
#[unsafe(no_mangle)]
|
|
736
|
+
pub extern "C" fn temporal_core_worker_finalize_shutdown(
|
|
737
|
+
worker: *mut Worker,
|
|
738
|
+
user_data: *mut libc::c_void,
|
|
739
|
+
callback: WorkerCallback,
|
|
740
|
+
) {
|
|
741
|
+
let worker = unsafe { &mut *worker };
|
|
742
|
+
let user_data = UserDataHandle(user_data);
|
|
743
|
+
worker.runtime.core.tokio_handle().spawn(async move {
|
|
744
|
+
// Take the worker out of the option and leave None. This should be the
|
|
745
|
+
// only reference remaining to the worker so try_unwrap will work.
|
|
746
|
+
let core_worker = match Arc::try_unwrap(worker.worker.take().unwrap()) {
|
|
747
|
+
Ok(core_worker) => core_worker,
|
|
748
|
+
Err(arc) => {
|
|
749
|
+
unsafe {
|
|
750
|
+
callback(
|
|
751
|
+
user_data.into(),
|
|
752
|
+
worker
|
|
753
|
+
.runtime
|
|
754
|
+
.clone()
|
|
755
|
+
.alloc_utf8(&format!(
|
|
756
|
+
"Cannot finalize, expected 1 reference, got {}",
|
|
757
|
+
Arc::strong_count(&arc)
|
|
758
|
+
))
|
|
759
|
+
.into_raw(),
|
|
760
|
+
);
|
|
761
|
+
}
|
|
762
|
+
return;
|
|
763
|
+
}
|
|
764
|
+
};
|
|
765
|
+
core_worker.finalize_shutdown().await;
|
|
766
|
+
unsafe {
|
|
767
|
+
callback(user_data.into(), std::ptr::null());
|
|
768
|
+
}
|
|
769
|
+
});
|
|
770
|
+
}
|
|
771
|
+
|
|
772
|
+
#[unsafe(no_mangle)]
|
|
773
|
+
pub extern "C" fn temporal_core_worker_replayer_new(
|
|
774
|
+
runtime: *mut Runtime,
|
|
775
|
+
options: *const WorkerOptions,
|
|
776
|
+
) -> WorkerReplayerOrFail {
|
|
777
|
+
let runtime = unsafe { &mut *runtime };
|
|
778
|
+
enter_sync!(runtime);
|
|
779
|
+
let options = unsafe { &*options };
|
|
780
|
+
|
|
781
|
+
let (worker, worker_replay_pusher, fail) = match options.try_into() {
|
|
782
|
+
Err(err) => (
|
|
783
|
+
std::ptr::null_mut(),
|
|
784
|
+
std::ptr::null_mut(),
|
|
785
|
+
runtime
|
|
786
|
+
.alloc_utf8(&format!("Invalid options: {err}"))
|
|
787
|
+
.into_raw()
|
|
788
|
+
.cast_const(),
|
|
789
|
+
),
|
|
790
|
+
Ok(config) => {
|
|
791
|
+
let (tx, rx) = channel(1);
|
|
792
|
+
match temporal_sdk_core::init_replay_worker(ReplayWorkerInput::new(
|
|
793
|
+
config,
|
|
794
|
+
ReceiverStream::new(rx),
|
|
795
|
+
)) {
|
|
796
|
+
Err(err) => (
|
|
797
|
+
std::ptr::null_mut(),
|
|
798
|
+
std::ptr::null_mut(),
|
|
799
|
+
runtime
|
|
800
|
+
.alloc_utf8(&format!("Worker replay init failed: {err}"))
|
|
801
|
+
.into_raw()
|
|
802
|
+
.cast_const(),
|
|
803
|
+
),
|
|
804
|
+
Ok(worker) => (
|
|
805
|
+
Box::into_raw(Box::new(Worker {
|
|
806
|
+
worker: Some(Arc::new(worker)),
|
|
807
|
+
runtime: runtime.clone(),
|
|
808
|
+
})),
|
|
809
|
+
Box::into_raw(Box::new(WorkerReplayPusher { tx })),
|
|
810
|
+
std::ptr::null(),
|
|
811
|
+
),
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
};
|
|
815
|
+
WorkerReplayerOrFail {
|
|
816
|
+
worker,
|
|
817
|
+
worker_replay_pusher,
|
|
818
|
+
fail,
|
|
819
|
+
}
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
#[unsafe(no_mangle)]
|
|
823
|
+
pub extern "C" fn temporal_core_worker_replay_pusher_free(
|
|
824
|
+
worker_replay_pusher: *mut WorkerReplayPusher,
|
|
825
|
+
) {
|
|
826
|
+
unsafe {
|
|
827
|
+
let _ = Box::from_raw(worker_replay_pusher);
|
|
828
|
+
}
|
|
829
|
+
}
|
|
830
|
+
|
|
831
|
+
#[unsafe(no_mangle)]
|
|
832
|
+
pub extern "C" fn temporal_core_worker_replay_push(
|
|
833
|
+
worker: *mut Worker,
|
|
834
|
+
worker_replay_pusher: *mut WorkerReplayPusher,
|
|
835
|
+
workflow_id: ByteArrayRef,
|
|
836
|
+
history: ByteArrayRef,
|
|
837
|
+
) -> WorkerReplayPushResult {
|
|
838
|
+
let worker = unsafe { &mut *worker };
|
|
839
|
+
let worker_replay_pusher = unsafe { &*worker_replay_pusher };
|
|
840
|
+
let workflow_id = workflow_id.to_string();
|
|
841
|
+
match History::decode(history.to_slice()) {
|
|
842
|
+
Err(err) => {
|
|
843
|
+
return WorkerReplayPushResult {
|
|
844
|
+
fail: worker
|
|
845
|
+
.runtime
|
|
846
|
+
.alloc_utf8(&format!("Worker replay init failed: {err}"))
|
|
847
|
+
.into_raw()
|
|
848
|
+
.cast_const(),
|
|
849
|
+
};
|
|
850
|
+
}
|
|
851
|
+
Ok(history) => worker.runtime.core.tokio_handle().spawn(async move {
|
|
852
|
+
// Intentionally ignoring error here
|
|
853
|
+
let _ = worker_replay_pusher
|
|
854
|
+
.tx
|
|
855
|
+
.send(HistoryForReplay::new(history, workflow_id))
|
|
856
|
+
.await;
|
|
857
|
+
}),
|
|
858
|
+
};
|
|
859
|
+
WorkerReplayPushResult {
|
|
860
|
+
fail: std::ptr::null(),
|
|
861
|
+
}
|
|
862
|
+
}
|
|
863
|
+
|
|
864
|
+
#[unsafe(no_mangle)]
|
|
865
|
+
pub extern "C" fn temporal_core_complete_async_reserve(
|
|
866
|
+
sender: *mut libc::c_void,
|
|
867
|
+
permit_id: usize,
|
|
868
|
+
) {
|
|
869
|
+
if !sender.is_null() {
|
|
870
|
+
unsafe {
|
|
871
|
+
let sender = Box::from_raw(sender as *mut oneshot::Sender<SlotSupplierPermit>);
|
|
872
|
+
let permit = SlotSupplierPermit::with_user_data(permit_id);
|
|
873
|
+
let _ = sender.send(permit);
|
|
874
|
+
}
|
|
875
|
+
} else {
|
|
876
|
+
panic!("ReserveSlot sender must not be null!");
|
|
877
|
+
}
|
|
878
|
+
}
|
|
879
|
+
|
|
880
|
+
#[unsafe(no_mangle)]
|
|
881
|
+
pub extern "C" fn temporal_core_set_reserve_cancel_target(
|
|
882
|
+
ctx: *mut SlotReserveCtx,
|
|
883
|
+
token_ptr: *mut libc::c_void,
|
|
884
|
+
) {
|
|
885
|
+
if let Some(ctx) = unsafe { ctx.as_mut() } {
|
|
886
|
+
ctx.token_src = token_ptr;
|
|
887
|
+
}
|
|
888
|
+
}
|
|
889
|
+
|
|
890
|
+
impl TryFrom<&WorkerOptions> for temporal_sdk_core::WorkerConfig {
|
|
891
|
+
type Error = anyhow::Error;
|
|
892
|
+
|
|
893
|
+
fn try_from(opt: &WorkerOptions) -> anyhow::Result<Self> {
|
|
894
|
+
let converted_tuner: temporal_sdk_core::TunerHolder = (&opt.tuner).try_into()?;
|
|
895
|
+
WorkerConfigBuilder::default()
|
|
896
|
+
.namespace(opt.namespace.to_str())
|
|
897
|
+
.task_queue(opt.task_queue.to_str())
|
|
898
|
+
.versioning_strategy({
|
|
899
|
+
match &opt.versioning_strategy {
|
|
900
|
+
WorkerVersioningStrategy::None(n) => {
|
|
901
|
+
temporal_sdk_core_api::worker::WorkerVersioningStrategy::None {
|
|
902
|
+
build_id: n.build_id.to_string(),
|
|
903
|
+
}
|
|
904
|
+
}
|
|
905
|
+
WorkerVersioningStrategy::DeploymentBased(dopts) => {
|
|
906
|
+
let dvb = if let Ok(v) = dopts.default_versioning_behavior.try_into() {
|
|
907
|
+
Some(v)
|
|
908
|
+
} else {
|
|
909
|
+
bail!("Invalid default versioning behavior {}", dopts.default_versioning_behavior)
|
|
910
|
+
};
|
|
911
|
+
temporal_sdk_core_api::worker::WorkerVersioningStrategy::WorkerDeploymentBased(
|
|
912
|
+
temporal_sdk_core_api::worker::WorkerDeploymentOptions {
|
|
913
|
+
version: temporal_sdk_core_api::worker::WorkerDeploymentVersion {
|
|
914
|
+
deployment_name: dopts.version.deployment_name.to_string(),
|
|
915
|
+
build_id: dopts.version.build_id.to_string(),
|
|
916
|
+
},
|
|
917
|
+
use_worker_versioning: dopts.use_worker_versioning,
|
|
918
|
+
default_versioning_behavior: dvb,
|
|
919
|
+
}
|
|
920
|
+
)
|
|
921
|
+
}
|
|
922
|
+
WorkerVersioningStrategy::LegacyBuildIdBased(l) => {
|
|
923
|
+
temporal_sdk_core_api::worker::WorkerVersioningStrategy::LegacyBuildIdBased {
|
|
924
|
+
build_id: l.build_id.to_string(),
|
|
925
|
+
}
|
|
926
|
+
}
|
|
927
|
+
}
|
|
928
|
+
})
|
|
929
|
+
.client_identity_override(opt.identity_override.to_option_string())
|
|
930
|
+
.max_cached_workflows(opt.max_cached_workflows as usize)
|
|
931
|
+
.tuner(Arc::new(converted_tuner))
|
|
932
|
+
.no_remote_activities(opt.no_remote_activities)
|
|
933
|
+
.sticky_queue_schedule_to_start_timeout(Duration::from_millis(
|
|
934
|
+
opt.sticky_queue_schedule_to_start_timeout_millis,
|
|
935
|
+
))
|
|
936
|
+
.max_heartbeat_throttle_interval(Duration::from_millis(
|
|
937
|
+
opt.max_heartbeat_throttle_interval_millis,
|
|
938
|
+
))
|
|
939
|
+
.default_heartbeat_throttle_interval(Duration::from_millis(
|
|
940
|
+
opt.default_heartbeat_throttle_interval_millis,
|
|
941
|
+
))
|
|
942
|
+
.max_worker_activities_per_second(if opt.max_activities_per_second == 0.0 {
|
|
943
|
+
None
|
|
944
|
+
} else {
|
|
945
|
+
Some(opt.max_activities_per_second)
|
|
946
|
+
})
|
|
947
|
+
.max_task_queue_activities_per_second(
|
|
948
|
+
if opt.max_task_queue_activities_per_second == 0.0 {
|
|
949
|
+
None
|
|
950
|
+
} else {
|
|
951
|
+
Some(opt.max_task_queue_activities_per_second)
|
|
952
|
+
},
|
|
953
|
+
)
|
|
954
|
+
// Even though grace period is optional, if it is not set then the
|
|
955
|
+
// auto-cancel-activity behavior or shutdown will not occur, so we
|
|
956
|
+
// always set it even if 0.
|
|
957
|
+
.graceful_shutdown_period(Duration::from_millis(opt.graceful_shutdown_period_millis))
|
|
958
|
+
.workflow_task_poller_behavior(temporal_sdk_core_api::worker::PollerBehavior::try_from(&opt.workflow_task_poller_behavior)?)
|
|
959
|
+
.nonsticky_to_sticky_poll_ratio(opt.nonsticky_to_sticky_poll_ratio)
|
|
960
|
+
.activity_task_poller_behavior(temporal_sdk_core_api::worker::PollerBehavior::try_from(&opt.activity_task_poller_behavior)?)
|
|
961
|
+
.workflow_failure_errors(if opt.nondeterminism_as_workflow_fail {
|
|
962
|
+
HashSet::from([WorkflowErrorType::Nondeterminism])
|
|
963
|
+
} else {
|
|
964
|
+
HashSet::new()
|
|
965
|
+
})
|
|
966
|
+
.workflow_types_to_failure_errors(
|
|
967
|
+
opt.nondeterminism_as_workflow_fail_for_types
|
|
968
|
+
.to_str_vec()
|
|
969
|
+
.into_iter()
|
|
970
|
+
.map(|s| {
|
|
971
|
+
(
|
|
972
|
+
s.to_owned(),
|
|
973
|
+
HashSet::from([WorkflowErrorType::Nondeterminism]),
|
|
974
|
+
)
|
|
975
|
+
})
|
|
976
|
+
.collect::<HashMap<String, HashSet<WorkflowErrorType>>>(),
|
|
977
|
+
)
|
|
978
|
+
.build()
|
|
979
|
+
.map_err(|err| anyhow::anyhow!(err))
|
|
980
|
+
}
|
|
981
|
+
}
|
|
982
|
+
|
|
983
|
+
impl TryFrom<&TunerHolder> for temporal_sdk_core::TunerHolder {
|
|
984
|
+
type Error = anyhow::Error;
|
|
985
|
+
|
|
986
|
+
fn try_from(holder: &TunerHolder) -> anyhow::Result<Self> {
|
|
987
|
+
// Verify all resource-based options are the same if any are set
|
|
988
|
+
let maybe_wf_resource_opts =
|
|
989
|
+
if let SlotSupplier::ResourceBased(ref ss) = holder.workflow_slot_supplier {
|
|
990
|
+
Some(&ss.tuner_options)
|
|
991
|
+
} else {
|
|
992
|
+
None
|
|
993
|
+
};
|
|
994
|
+
let maybe_act_resource_opts =
|
|
995
|
+
if let SlotSupplier::ResourceBased(ref ss) = holder.activity_slot_supplier {
|
|
996
|
+
Some(&ss.tuner_options)
|
|
997
|
+
} else {
|
|
998
|
+
None
|
|
999
|
+
};
|
|
1000
|
+
let maybe_local_act_resource_opts =
|
|
1001
|
+
if let SlotSupplier::ResourceBased(ref ss) = holder.local_activity_slot_supplier {
|
|
1002
|
+
Some(&ss.tuner_options)
|
|
1003
|
+
} else {
|
|
1004
|
+
None
|
|
1005
|
+
};
|
|
1006
|
+
let all_resource_opts = [
|
|
1007
|
+
maybe_wf_resource_opts,
|
|
1008
|
+
maybe_act_resource_opts,
|
|
1009
|
+
maybe_local_act_resource_opts,
|
|
1010
|
+
];
|
|
1011
|
+
let mut set_resource_opts = all_resource_opts.iter().flatten();
|
|
1012
|
+
let first = set_resource_opts.next();
|
|
1013
|
+
let all_are_same = if let Some(first) = first {
|
|
1014
|
+
set_resource_opts.all(|elem| elem == first)
|
|
1015
|
+
} else {
|
|
1016
|
+
true
|
|
1017
|
+
};
|
|
1018
|
+
if !all_are_same {
|
|
1019
|
+
bail!("All resource-based slot suppliers must have the same ResourceBasedTunerOptions",);
|
|
1020
|
+
}
|
|
1021
|
+
|
|
1022
|
+
let mut options = temporal_sdk_core::TunerHolderOptionsBuilder::default();
|
|
1023
|
+
if let Some(first) = first {
|
|
1024
|
+
options.resource_based_options(
|
|
1025
|
+
temporal_sdk_core::ResourceBasedSlotsOptionsBuilder::default()
|
|
1026
|
+
.target_mem_usage(first.target_memory_usage)
|
|
1027
|
+
.target_cpu_usage(first.target_cpu_usage)
|
|
1028
|
+
.build()
|
|
1029
|
+
.expect("Building ResourceBasedSlotsOptions is infallible"),
|
|
1030
|
+
);
|
|
1031
|
+
};
|
|
1032
|
+
options
|
|
1033
|
+
.workflow_slot_options(holder.workflow_slot_supplier.try_into()?)
|
|
1034
|
+
.activity_slot_options(holder.activity_slot_supplier.try_into()?)
|
|
1035
|
+
.local_activity_slot_options(holder.local_activity_slot_supplier.try_into()?)
|
|
1036
|
+
.build()
|
|
1037
|
+
.context("Invalid tuner holder options")?
|
|
1038
|
+
.build_tuner_holder()
|
|
1039
|
+
.context("Failed building tuner holder")
|
|
1040
|
+
}
|
|
1041
|
+
}
|
|
1042
|
+
|
|
1043
|
+
impl<SK: SlotKind + Send + Sync + 'static> TryFrom<SlotSupplier>
|
|
1044
|
+
for temporal_sdk_core::SlotSupplierOptions<SK>
|
|
1045
|
+
{
|
|
1046
|
+
type Error = anyhow::Error;
|
|
1047
|
+
|
|
1048
|
+
fn try_from(
|
|
1049
|
+
supplier: SlotSupplier,
|
|
1050
|
+
) -> anyhow::Result<temporal_sdk_core::SlotSupplierOptions<SK>> {
|
|
1051
|
+
Ok(match supplier {
|
|
1052
|
+
SlotSupplier::FixedSize(fs) => temporal_sdk_core::SlotSupplierOptions::FixedSize {
|
|
1053
|
+
slots: fs.num_slots,
|
|
1054
|
+
},
|
|
1055
|
+
SlotSupplier::ResourceBased(ss) => {
|
|
1056
|
+
temporal_sdk_core::SlotSupplierOptions::ResourceBased(
|
|
1057
|
+
temporal_sdk_core::ResourceSlotOptions::new(
|
|
1058
|
+
ss.minimum_slots,
|
|
1059
|
+
ss.maximum_slots,
|
|
1060
|
+
Duration::from_millis(ss.ramp_throttle_ms),
|
|
1061
|
+
),
|
|
1062
|
+
)
|
|
1063
|
+
}
|
|
1064
|
+
SlotSupplier::Custom(cs) => {
|
|
1065
|
+
temporal_sdk_core::SlotSupplierOptions::Custom(cs.into_ss())
|
|
1066
|
+
}
|
|
1067
|
+
})
|
|
1068
|
+
}
|
|
1069
|
+
}
|