temporalio 0.0.2 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +25 -23
- data/bridge/Cargo.lock +168 -59
- data/bridge/Cargo.toml +4 -2
- data/bridge/sdk-core/README.md +19 -6
- data/bridge/sdk-core/client/src/lib.rs +215 -39
- data/bridge/sdk-core/client/src/metrics.rs +17 -8
- data/bridge/sdk-core/client/src/raw.rs +4 -4
- data/bridge/sdk-core/client/src/retry.rs +32 -20
- data/bridge/sdk-core/core/Cargo.toml +22 -9
- data/bridge/sdk-core/core/src/abstractions.rs +203 -14
- data/bridge/sdk-core/core/src/core_tests/activity_tasks.rs +76 -41
- data/bridge/sdk-core/core/src/core_tests/determinism.rs +165 -2
- data/bridge/sdk-core/core/src/core_tests/local_activities.rs +204 -83
- data/bridge/sdk-core/core/src/core_tests/queries.rs +3 -4
- data/bridge/sdk-core/core/src/core_tests/workers.rs +1 -3
- data/bridge/sdk-core/core/src/core_tests/workflow_tasks.rs +397 -54
- data/bridge/sdk-core/core/src/ephemeral_server/mod.rs +106 -12
- data/bridge/sdk-core/core/src/internal_flags.rs +136 -0
- data/bridge/sdk-core/core/src/lib.rs +16 -9
- data/bridge/sdk-core/core/src/telemetry/log_export.rs +1 -1
- data/bridge/sdk-core/core/src/telemetry/metrics.rs +69 -35
- data/bridge/sdk-core/core/src/telemetry/mod.rs +29 -13
- data/bridge/sdk-core/core/src/telemetry/prometheus_server.rs +17 -12
- data/bridge/sdk-core/core/src/test_help/mod.rs +62 -12
- data/bridge/sdk-core/core/src/worker/activities/activity_heartbeat_manager.rs +112 -156
- data/bridge/sdk-core/core/src/worker/activities/activity_task_poller_stream.rs +89 -0
- data/bridge/sdk-core/core/src/worker/activities/local_activities.rs +352 -122
- data/bridge/sdk-core/core/src/worker/activities.rs +233 -157
- data/bridge/sdk-core/core/src/worker/client/mocks.rs +22 -2
- data/bridge/sdk-core/core/src/worker/client.rs +18 -2
- data/bridge/sdk-core/core/src/worker/mod.rs +165 -58
- data/bridge/sdk-core/core/src/worker/workflow/bridge.rs +1 -3
- data/bridge/sdk-core/core/src/worker/workflow/driven_workflow.rs +3 -5
- data/bridge/sdk-core/core/src/worker/workflow/history_update.rs +856 -277
- data/bridge/sdk-core/core/src/worker/workflow/machines/activity_state_machine.rs +100 -43
- data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_external_state_machine.rs +7 -7
- data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_workflow_state_machine.rs +5 -4
- data/bridge/sdk-core/core/src/worker/workflow/machines/child_workflow_state_machine.rs +87 -27
- data/bridge/sdk-core/core/src/worker/workflow/machines/complete_workflow_state_machine.rs +5 -4
- data/bridge/sdk-core/core/src/worker/workflow/machines/continue_as_new_workflow_state_machine.rs +5 -4
- data/bridge/sdk-core/core/src/worker/workflow/machines/fail_workflow_state_machine.rs +5 -4
- data/bridge/sdk-core/core/src/worker/workflow/machines/local_activity_state_machine.rs +137 -62
- data/bridge/sdk-core/core/src/worker/workflow/machines/mod.rs +25 -17
- data/bridge/sdk-core/core/src/worker/workflow/machines/modify_workflow_properties_state_machine.rs +7 -6
- data/bridge/sdk-core/core/src/worker/workflow/machines/patch_state_machine.rs +103 -152
- data/bridge/sdk-core/core/src/worker/workflow/machines/signal_external_state_machine.rs +7 -7
- data/bridge/sdk-core/core/src/worker/workflow/machines/timer_state_machine.rs +9 -9
- data/bridge/sdk-core/core/src/worker/workflow/machines/transition_coverage.rs +2 -2
- data/bridge/sdk-core/core/src/worker/workflow/machines/upsert_search_attributes_state_machine.rs +14 -7
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines/local_acts.rs +5 -16
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines.rs +201 -121
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_task_state_machine.rs +11 -14
- data/bridge/sdk-core/core/src/worker/workflow/managed_run/managed_wf_test.rs +30 -15
- data/bridge/sdk-core/core/src/worker/workflow/managed_run.rs +1026 -376
- data/bridge/sdk-core/core/src/worker/workflow/mod.rs +460 -384
- data/bridge/sdk-core/core/src/worker/workflow/run_cache.rs +40 -57
- data/bridge/sdk-core/core/src/worker/workflow/wft_extraction.rs +125 -0
- data/bridge/sdk-core/core/src/worker/workflow/wft_poller.rs +1 -4
- data/bridge/sdk-core/core/src/worker/workflow/workflow_stream/saved_wf_inputs.rs +117 -0
- data/bridge/sdk-core/core/src/worker/workflow/workflow_stream/tonic_status_serde.rs +24 -0
- data/bridge/sdk-core/core/src/worker/workflow/workflow_stream.rs +448 -718
- data/bridge/sdk-core/core-api/Cargo.toml +2 -1
- data/bridge/sdk-core/core-api/src/errors.rs +1 -34
- data/bridge/sdk-core/core-api/src/lib.rs +6 -2
- data/bridge/sdk-core/core-api/src/telemetry.rs +0 -6
- data/bridge/sdk-core/core-api/src/worker.rs +14 -1
- data/bridge/sdk-core/fsm/rustfsm_procmacro/src/lib.rs +18 -15
- data/bridge/sdk-core/fsm/rustfsm_trait/src/lib.rs +8 -3
- data/bridge/sdk-core/histories/evict_while_la_running_no_interference-16_history.bin +0 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/command/v1/message.proto +5 -17
- data/bridge/sdk-core/protos/api_upstream/temporal/api/common/v1/message.proto +11 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/command_type.proto +1 -6
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/event_type.proto +6 -6
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto +5 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/update.proto +22 -6
- data/bridge/sdk-core/protos/api_upstream/temporal/api/history/v1/message.proto +48 -19
- data/bridge/sdk-core/protos/api_upstream/temporal/api/namespace/v1/message.proto +2 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/operatorservice/v1/request_response.proto +3 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/{enums/v1/interaction_type.proto → protocol/v1/message.proto} +29 -11
- data/bridge/sdk-core/protos/api_upstream/temporal/api/sdk/v1/task_complete_metadata.proto +63 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/update/v1/message.proto +111 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto +59 -28
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/service.proto +2 -2
- data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_result/activity_result.proto +1 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_task/activity_task.proto +1 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/child_workflow/child_workflow.proto +1 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/common/common.proto +1 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/core_interface.proto +1 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/external_data/external_data.proto +1 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_activation/workflow_activation.proto +7 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_commands/workflow_commands.proto +1 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_completion/workflow_completion.proto +6 -0
- data/bridge/sdk-core/sdk/Cargo.toml +3 -2
- data/bridge/sdk-core/sdk/src/lib.rs +87 -20
- data/bridge/sdk-core/sdk/src/workflow_future.rs +9 -8
- data/bridge/sdk-core/sdk-core-protos/Cargo.toml +5 -2
- data/bridge/sdk-core/sdk-core-protos/build.rs +36 -1
- data/bridge/sdk-core/sdk-core-protos/src/history_builder.rs +100 -87
- data/bridge/sdk-core/sdk-core-protos/src/history_info.rs +5 -1
- data/bridge/sdk-core/sdk-core-protos/src/lib.rs +175 -57
- data/bridge/sdk-core/sdk-core-protos/src/task_token.rs +12 -2
- data/bridge/sdk-core/test-utils/Cargo.toml +3 -1
- data/bridge/sdk-core/test-utils/src/canned_histories.rs +106 -296
- data/bridge/sdk-core/test-utils/src/histfetch.rs +1 -1
- data/bridge/sdk-core/test-utils/src/lib.rs +82 -23
- data/bridge/sdk-core/test-utils/src/wf_input_saver.rs +50 -0
- data/bridge/sdk-core/test-utils/src/workflows.rs +29 -0
- data/bridge/sdk-core/tests/fuzzy_workflow.rs +130 -0
- data/bridge/sdk-core/tests/{load_tests.rs → heavy_tests.rs} +125 -51
- data/bridge/sdk-core/tests/integ_tests/ephemeral_server_tests.rs +25 -3
- data/bridge/sdk-core/tests/integ_tests/heartbeat_tests.rs +5 -3
- data/bridge/sdk-core/tests/integ_tests/metrics_tests.rs +218 -16
- data/bridge/sdk-core/tests/integ_tests/polling_tests.rs +4 -47
- data/bridge/sdk-core/tests/integ_tests/queries_tests.rs +5 -128
- data/bridge/sdk-core/tests/integ_tests/visibility_tests.rs +83 -25
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/activities.rs +93 -69
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs +1 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs +6 -13
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs +1 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs +6 -2
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/determinism.rs +3 -10
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs +72 -191
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs +1 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/patches.rs +7 -28
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/replay.rs +12 -7
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/resets.rs +1 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/signals.rs +18 -14
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs +6 -20
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/timers.rs +10 -21
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs +6 -4
- data/bridge/sdk-core/tests/integ_tests/workflow_tests.rs +10 -11
- data/bridge/sdk-core/tests/main.rs +3 -13
- data/bridge/sdk-core/tests/runner.rs +75 -36
- data/bridge/sdk-core/tests/wf_input_replay.rs +32 -0
- data/bridge/src/connection.rs +41 -25
- data/bridge/src/lib.rs +269 -14
- data/bridge/src/runtime.rs +1 -1
- data/bridge/src/test_server.rs +153 -0
- data/bridge/src/worker.rs +89 -16
- data/lib/gen/temporal/api/command/v1/message_pb.rb +4 -18
- data/lib/gen/temporal/api/common/v1/message_pb.rb +4 -0
- data/lib/gen/temporal/api/enums/v1/command_type_pb.rb +1 -3
- data/lib/gen/temporal/api/enums/v1/event_type_pb.rb +3 -3
- data/lib/gen/temporal/api/enums/v1/failed_cause_pb.rb +2 -0
- data/lib/gen/temporal/api/enums/v1/update_pb.rb +6 -4
- data/lib/gen/temporal/api/history/v1/message_pb.rb +27 -19
- data/lib/gen/temporal/api/namespace/v1/message_pb.rb +1 -0
- data/lib/gen/temporal/api/operatorservice/v1/request_response_pb.rb +3 -0
- data/lib/gen/temporal/api/protocol/v1/message_pb.rb +30 -0
- data/lib/gen/temporal/api/sdk/v1/task_complete_metadata_pb.rb +23 -0
- data/lib/gen/temporal/api/testservice/v1/request_response_pb.rb +49 -0
- data/lib/gen/temporal/api/testservice/v1/service_pb.rb +21 -0
- data/lib/gen/temporal/api/update/v1/message_pb.rb +72 -0
- data/lib/gen/temporal/api/workflowservice/v1/request_response_pb.rb +26 -16
- data/lib/gen/temporal/sdk/core/activity_result/activity_result_pb.rb +13 -9
- data/lib/gen/temporal/sdk/core/activity_task/activity_task_pb.rb +10 -6
- data/lib/gen/temporal/sdk/core/child_workflow/child_workflow_pb.rb +13 -9
- data/lib/gen/temporal/sdk/core/common/common_pb.rb +7 -3
- data/lib/gen/temporal/sdk/core/core_interface_pb.rb +9 -3
- data/lib/gen/temporal/sdk/core/external_data/external_data_pb.rb +7 -3
- data/lib/gen/temporal/sdk/core/workflow_activation/workflow_activation_pb.rb +27 -21
- data/lib/gen/temporal/sdk/core/workflow_commands/workflow_commands_pb.rb +28 -24
- data/lib/gen/temporal/sdk/core/workflow_completion/workflow_completion_pb.rb +12 -5
- data/lib/temporalio/activity/context.rb +13 -8
- data/lib/temporalio/activity/info.rb +1 -1
- data/lib/temporalio/bridge/connect_options.rb +15 -0
- data/lib/temporalio/bridge/retry_config.rb +24 -0
- data/lib/temporalio/bridge/tls_options.rb +19 -0
- data/lib/temporalio/client/implementation.rb +8 -8
- data/lib/temporalio/connection/retry_config.rb +44 -0
- data/lib/temporalio/connection/service.rb +20 -0
- data/lib/temporalio/connection/test_service.rb +92 -0
- data/lib/temporalio/connection/tls_options.rb +51 -0
- data/lib/temporalio/connection/workflow_service.rb +731 -0
- data/lib/temporalio/connection.rb +55 -720
- data/lib/temporalio/interceptor/activity_inbound.rb +22 -0
- data/lib/temporalio/interceptor/activity_outbound.rb +24 -0
- data/lib/temporalio/interceptor/chain.rb +5 -5
- data/lib/temporalio/interceptor/client.rb +8 -4
- data/lib/temporalio/interceptor.rb +22 -0
- data/lib/temporalio/retry_policy.rb +13 -3
- data/lib/temporalio/testing/time_skipping_handle.rb +32 -0
- data/lib/temporalio/testing/time_skipping_interceptor.rb +23 -0
- data/lib/temporalio/testing/workflow_environment.rb +112 -0
- data/lib/temporalio/testing.rb +175 -0
- data/lib/temporalio/version.rb +1 -1
- data/lib/temporalio/worker/activity_runner.rb +26 -4
- data/lib/temporalio/worker/activity_worker.rb +44 -18
- data/lib/temporalio/worker/sync_worker.rb +47 -11
- data/lib/temporalio/worker.rb +27 -21
- data/lib/temporalio/workflow/async.rb +46 -0
- data/lib/temporalio/workflow/future.rb +138 -0
- data/lib/temporalio/workflow/info.rb +76 -0
- data/temporalio.gemspec +4 -3
- metadata +67 -17
- data/bridge/sdk-core/Cargo.lock +0 -2606
- data/bridge/sdk-core/protos/api_upstream/temporal/api/interaction/v1/message.proto +0 -87
- data/lib/bridge.so +0 -0
- data/lib/gen/temporal/api/enums/v1/interaction_type_pb.rb +0 -25
- data/lib/gen/temporal/api/interaction/v1/message_pb.rb +0 -49
- data/lib/gen/temporal/sdk/core/bridge/bridge_pb.rb +0 -222
|
@@ -1,57 +1,95 @@
|
|
|
1
1
|
use crate::{
|
|
2
|
-
|
|
3
|
-
worker::
|
|
2
|
+
protosext::ValidPollWFTQResponse,
|
|
3
|
+
worker::{
|
|
4
|
+
client::WorkerClient,
|
|
5
|
+
workflow::{CacheMissFetchReq, PermittedWFT, PreparedWFT},
|
|
6
|
+
},
|
|
4
7
|
};
|
|
5
|
-
use futures::{future::BoxFuture,
|
|
8
|
+
use futures::{future::BoxFuture, FutureExt, Stream};
|
|
9
|
+
use itertools::Itertools;
|
|
6
10
|
use std::{
|
|
7
11
|
collections::VecDeque,
|
|
8
12
|
fmt::Debug,
|
|
9
13
|
future::Future,
|
|
14
|
+
mem,
|
|
15
|
+
mem::transmute,
|
|
10
16
|
pin::Pin,
|
|
11
17
|
sync::Arc,
|
|
12
18
|
task::{Context, Poll},
|
|
13
19
|
};
|
|
14
20
|
use temporal_sdk_core_protos::temporal::api::{
|
|
15
21
|
enums::v1::EventType,
|
|
16
|
-
history::v1::{History, HistoryEvent},
|
|
17
|
-
workflowservice::v1::GetWorkflowExecutionHistoryResponse,
|
|
22
|
+
history::v1::{history_event, History, HistoryEvent, WorkflowTaskCompletedEventAttributes},
|
|
18
23
|
};
|
|
19
24
|
use tracing::Instrument;
|
|
20
25
|
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
26
|
+
lazy_static::lazy_static! {
|
|
27
|
+
static ref EMPTY_FETCH_ERR: tonic::Status
|
|
28
|
+
= tonic::Status::data_loss("Fetched empty history page");
|
|
29
|
+
static ref EMPTY_TASK_ERR: tonic::Status
|
|
30
|
+
= tonic::Status::data_loss("Received an empty workflow task with no queries or history");
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/// Represents one or more complete WFT sequences. History events are expected to be consumed from
|
|
34
|
+
/// it and applied to the state machines via [HistoryUpdate::take_next_wft_sequence]
|
|
35
|
+
#[cfg_attr(
|
|
36
|
+
feature = "save_wf_inputs",
|
|
37
|
+
derive(serde::Serialize, serde::Deserialize)
|
|
38
|
+
)]
|
|
24
39
|
pub struct HistoryUpdate {
|
|
25
|
-
events:
|
|
26
|
-
///
|
|
27
|
-
///
|
|
28
|
-
///
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
40
|
+
events: Vec<HistoryEvent>,
|
|
41
|
+
/// The event ID of the last started WFT, as according to the WFT which this update was
|
|
42
|
+
/// extracted from. Hence, while processing multiple logical WFTs during replay which were part
|
|
43
|
+
/// of one large history fetched from server, multiple updates may have the same value here.
|
|
44
|
+
pub previous_wft_started_id: i64,
|
|
45
|
+
/// True if this update contains the final WFT in history, and no more attempts to extract
|
|
46
|
+
/// additional updates should be made.
|
|
47
|
+
has_last_wft: bool,
|
|
32
48
|
}
|
|
33
49
|
impl Debug for HistoryUpdate {
|
|
34
50
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
51
|
+
if self.is_real() {
|
|
52
|
+
write!(
|
|
53
|
+
f,
|
|
54
|
+
"HistoryUpdate(previous_started_event_id: {}, length: {}, first_event_id: {:?})",
|
|
55
|
+
self.previous_wft_started_id,
|
|
56
|
+
self.events.len(),
|
|
57
|
+
self.events.first().map(|e| e.event_id)
|
|
58
|
+
)
|
|
59
|
+
} else {
|
|
60
|
+
write!(f, "DummyHistoryUpdate")
|
|
61
|
+
}
|
|
40
62
|
}
|
|
41
63
|
}
|
|
42
64
|
|
|
65
|
+
#[derive(Debug)]
|
|
66
|
+
pub enum NextWFT {
|
|
67
|
+
ReplayOver,
|
|
68
|
+
WFT(Vec<HistoryEvent>, bool),
|
|
69
|
+
NeedFetch,
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
#[derive(derive_more::DebugCustom)]
|
|
73
|
+
#[debug(fmt = "HistoryPaginator(run_id: {run_id})")]
|
|
74
|
+
#[cfg_attr(
|
|
75
|
+
feature = "save_wf_inputs",
|
|
76
|
+
derive(serde::Serialize, serde::Deserialize),
|
|
77
|
+
serde(default = "HistoryPaginator::fake_deserialized")
|
|
78
|
+
)]
|
|
43
79
|
pub struct HistoryPaginator {
|
|
44
|
-
|
|
80
|
+
pub(crate) wf_id: String,
|
|
81
|
+
pub(crate) run_id: String,
|
|
82
|
+
pub(crate) previous_wft_started_id: i64,
|
|
83
|
+
|
|
84
|
+
#[cfg_attr(feature = "save_wf_inputs", serde(skip))]
|
|
45
85
|
client: Arc<dyn WorkerClient>,
|
|
86
|
+
#[cfg_attr(feature = "save_wf_inputs", serde(skip))]
|
|
46
87
|
event_queue: VecDeque<HistoryEvent>,
|
|
47
|
-
|
|
48
|
-
run_id: String,
|
|
88
|
+
#[cfg_attr(feature = "save_wf_inputs", serde(skip))]
|
|
49
89
|
next_page_token: NextPageToken,
|
|
50
|
-
open_history_request:
|
|
51
|
-
Option<BoxFuture<'static, Result<GetWorkflowExecutionHistoryResponse, tonic::Status>>>,
|
|
52
90
|
/// These are events that should be returned once pagination has finished. This only happens
|
|
53
91
|
/// during cache misses, where we got a partial task but need to fetch history from the start.
|
|
54
|
-
|
|
92
|
+
#[cfg_attr(feature = "save_wf_inputs", serde(skip))]
|
|
55
93
|
final_events: Vec<HistoryEvent>,
|
|
56
94
|
}
|
|
57
95
|
|
|
@@ -77,8 +115,68 @@ impl From<Vec<u8>> for NextPageToken {
|
|
|
77
115
|
}
|
|
78
116
|
|
|
79
117
|
impl HistoryPaginator {
|
|
80
|
-
|
|
118
|
+
/// Use a new poll response to create a new [WFTPaginator], returning it and the
|
|
119
|
+
/// [PreparedWFT] extracted from it that can be fed into workflow state.
|
|
120
|
+
pub(super) async fn from_poll(
|
|
121
|
+
wft: ValidPollWFTQResponse,
|
|
122
|
+
client: Arc<dyn WorkerClient>,
|
|
123
|
+
) -> Result<(Self, PreparedWFT), tonic::Status> {
|
|
124
|
+
let empty_hist = wft.history.events.is_empty();
|
|
125
|
+
let npt = if empty_hist {
|
|
126
|
+
NextPageToken::FetchFromStart
|
|
127
|
+
} else {
|
|
128
|
+
wft.next_page_token.into()
|
|
129
|
+
};
|
|
130
|
+
let mut paginator = HistoryPaginator::new(
|
|
131
|
+
wft.history,
|
|
132
|
+
wft.previous_started_event_id,
|
|
133
|
+
wft.workflow_execution.workflow_id.clone(),
|
|
134
|
+
wft.workflow_execution.run_id.clone(),
|
|
135
|
+
npt,
|
|
136
|
+
client,
|
|
137
|
+
);
|
|
138
|
+
if empty_hist && wft.legacy_query.is_none() && wft.query_requests.is_empty() {
|
|
139
|
+
return Err(EMPTY_TASK_ERR.clone());
|
|
140
|
+
}
|
|
141
|
+
let update = if empty_hist {
|
|
142
|
+
HistoryUpdate::from_events([], wft.previous_started_event_id, true).0
|
|
143
|
+
} else {
|
|
144
|
+
paginator.extract_next_update().await?
|
|
145
|
+
};
|
|
146
|
+
let prepared = PreparedWFT {
|
|
147
|
+
task_token: wft.task_token,
|
|
148
|
+
attempt: wft.attempt,
|
|
149
|
+
execution: wft.workflow_execution,
|
|
150
|
+
workflow_type: wft.workflow_type,
|
|
151
|
+
legacy_query: wft.legacy_query,
|
|
152
|
+
query_requests: wft.query_requests,
|
|
153
|
+
update,
|
|
154
|
+
};
|
|
155
|
+
Ok((paginator, prepared))
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
pub(super) async fn from_fetchreq(
|
|
159
|
+
mut req: CacheMissFetchReq,
|
|
160
|
+
client: Arc<dyn WorkerClient>,
|
|
161
|
+
) -> Result<PermittedWFT, tonic::Status> {
|
|
162
|
+
let mut paginator = Self {
|
|
163
|
+
wf_id: req.original_wft.work.execution.workflow_id.clone(),
|
|
164
|
+
run_id: req.original_wft.work.execution.run_id.clone(),
|
|
165
|
+
previous_wft_started_id: req.original_wft.work.update.previous_wft_started_id,
|
|
166
|
+
client,
|
|
167
|
+
event_queue: Default::default(),
|
|
168
|
+
next_page_token: NextPageToken::FetchFromStart,
|
|
169
|
+
final_events: vec![],
|
|
170
|
+
};
|
|
171
|
+
let first_update = paginator.extract_next_update().await?;
|
|
172
|
+
req.original_wft.work.update = first_update;
|
|
173
|
+
req.original_wft.paginator = paginator;
|
|
174
|
+
Ok(req.original_wft)
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
fn new(
|
|
81
178
|
initial_history: History,
|
|
179
|
+
previous_wft_started_id: i64,
|
|
82
180
|
wf_id: String,
|
|
83
181
|
run_id: String,
|
|
84
182
|
next_page_token: impl Into<NextPageToken>,
|
|
@@ -97,20 +195,107 @@ impl HistoryPaginator {
|
|
|
97
195
|
wf_id,
|
|
98
196
|
run_id,
|
|
99
197
|
next_page_token,
|
|
100
|
-
open_history_request: None,
|
|
101
198
|
final_events,
|
|
199
|
+
previous_wft_started_id,
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
#[cfg(feature = "save_wf_inputs")]
|
|
204
|
+
pub(super) fn fake_deserialized() -> HistoryPaginator {
|
|
205
|
+
use crate::worker::client::mocks::mock_manual_workflow_client;
|
|
206
|
+
HistoryPaginator {
|
|
207
|
+
client: Arc::new(mock_manual_workflow_client()),
|
|
208
|
+
event_queue: Default::default(),
|
|
209
|
+
wf_id: "".to_string(),
|
|
210
|
+
run_id: "".to_string(),
|
|
211
|
+
next_page_token: NextPageToken::FetchFromStart,
|
|
212
|
+
final_events: vec![],
|
|
213
|
+
previous_wft_started_id: -2,
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
/// Return at least the next two WFT sequences (as determined by the passed-in ID) as a
|
|
218
|
+
/// [HistoryUpdate]. Two sequences supports the required peek-ahead during replay without
|
|
219
|
+
/// unnecessary back-and-forth.
|
|
220
|
+
///
|
|
221
|
+
/// If there are already enough events buffered in memory, they will all be returned. Including
|
|
222
|
+
/// possibly (likely, during replay) more than just the next two WFTs.
|
|
223
|
+
///
|
|
224
|
+
/// If there are insufficient events to constitute two WFTs, then we will fetch pages until
|
|
225
|
+
/// we have two, or until we are at the end of history.
|
|
226
|
+
pub(crate) async fn extract_next_update(&mut self) -> Result<HistoryUpdate, tonic::Status> {
|
|
227
|
+
loop {
|
|
228
|
+
self.get_next_page().await?;
|
|
229
|
+
let current_events = mem::take(&mut self.event_queue);
|
|
230
|
+
if current_events.is_empty() {
|
|
231
|
+
// If next page fetching happened, and we still ended up with no events, something
|
|
232
|
+
// is wrong. We're expecting there to be more events to be able to extract this
|
|
233
|
+
// update, but server isn't giving us any. We have no choice except to give up and
|
|
234
|
+
// evict.
|
|
235
|
+
error!(
|
|
236
|
+
"We expected to be able to fetch more events but server says there are none"
|
|
237
|
+
);
|
|
238
|
+
return Err(EMPTY_FETCH_ERR.clone());
|
|
239
|
+
}
|
|
240
|
+
let first_event_id = current_events.front().unwrap().event_id;
|
|
241
|
+
// If there are some events at the end of the fetched events which represent only a
|
|
242
|
+
// portion of a complete WFT, retain them to be used in the next extraction.
|
|
243
|
+
let no_more = matches!(self.next_page_token, NextPageToken::Done);
|
|
244
|
+
let (update, extra) =
|
|
245
|
+
HistoryUpdate::from_events(current_events, self.previous_wft_started_id, no_more);
|
|
246
|
+
let extra_eid_same = extra
|
|
247
|
+
.first()
|
|
248
|
+
.map(|e| e.event_id == first_event_id)
|
|
249
|
+
.unwrap_or_default();
|
|
250
|
+
self.event_queue = extra.into();
|
|
251
|
+
if !no_more && extra_eid_same {
|
|
252
|
+
// There was not a meaningful WFT in the whole page. We must fetch more
|
|
253
|
+
continue;
|
|
254
|
+
}
|
|
255
|
+
return Ok(update);
|
|
102
256
|
}
|
|
103
257
|
}
|
|
104
258
|
|
|
105
|
-
|
|
106
|
-
|
|
259
|
+
/// Fetches the next page and adds it to the internal queue. Returns true if a fetch was
|
|
260
|
+
/// performed, false if there is no next page.
|
|
261
|
+
async fn get_next_page(&mut self) -> Result<bool, tonic::Status> {
|
|
262
|
+
let history = loop {
|
|
263
|
+
let npt = match mem::replace(&mut self.next_page_token, NextPageToken::Done) {
|
|
264
|
+
// If there's no open request and the last page token we got was empty, we're done.
|
|
265
|
+
NextPageToken::Done => return Ok(false),
|
|
266
|
+
NextPageToken::FetchFromStart => vec![],
|
|
267
|
+
NextPageToken::Next(v) => v,
|
|
268
|
+
};
|
|
269
|
+
debug!(run_id=%self.run_id, "Fetching new history page");
|
|
270
|
+
let fetch_res = self
|
|
271
|
+
.client
|
|
272
|
+
.get_workflow_execution_history(self.wf_id.clone(), Some(self.run_id.clone()), npt)
|
|
273
|
+
.instrument(span!(tracing::Level::TRACE, "fetch_history_in_paginator"))
|
|
274
|
+
.await?;
|
|
275
|
+
|
|
276
|
+
self.next_page_token = fetch_res.next_page_token.into();
|
|
277
|
+
|
|
278
|
+
let history_is_empty = fetch_res
|
|
279
|
+
.history
|
|
280
|
+
.as_ref()
|
|
281
|
+
.map(|h| h.events.is_empty())
|
|
282
|
+
.unwrap_or(true);
|
|
283
|
+
if history_is_empty && matches!(&self.next_page_token, NextPageToken::Next(_)) {
|
|
284
|
+
// If the fetch returned an empty history, but there *was* a next page token,
|
|
285
|
+
// immediately try to get that.
|
|
286
|
+
continue;
|
|
287
|
+
}
|
|
288
|
+
// Async doesn't love recursion so we do this instead.
|
|
289
|
+
break fetch_res.history;
|
|
290
|
+
};
|
|
291
|
+
|
|
107
292
|
self.event_queue
|
|
108
|
-
.extend(
|
|
293
|
+
.extend(history.map(|h| h.events).unwrap_or_default());
|
|
109
294
|
if matches!(&self.next_page_token, NextPageToken::Done) {
|
|
110
295
|
// If finished, we need to extend the queue with the final events, skipping any
|
|
111
296
|
// which are already present.
|
|
112
297
|
if let Some(last_event_id) = self.event_queue.back().map(|e| e.event_id) {
|
|
113
|
-
let final_events =
|
|
298
|
+
let final_events = mem::take(&mut self.final_events);
|
|
114
299
|
self.event_queue.extend(
|
|
115
300
|
final_events
|
|
116
301
|
.into_iter()
|
|
@@ -118,63 +303,143 @@ impl HistoryPaginator {
|
|
|
118
303
|
);
|
|
119
304
|
}
|
|
120
305
|
};
|
|
306
|
+
Ok(true)
|
|
121
307
|
}
|
|
122
308
|
}
|
|
123
309
|
|
|
124
|
-
|
|
310
|
+
#[pin_project::pin_project]
|
|
311
|
+
struct StreamingHistoryPaginator {
|
|
312
|
+
inner: HistoryPaginator,
|
|
313
|
+
#[pin]
|
|
314
|
+
open_history_request: Option<BoxFuture<'static, Result<(), tonic::Status>>>,
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
impl StreamingHistoryPaginator {
|
|
318
|
+
// Kept since can be used for history downloading
|
|
319
|
+
#[cfg(test)]
|
|
320
|
+
pub fn new(inner: HistoryPaginator) -> Self {
|
|
321
|
+
Self {
|
|
322
|
+
inner,
|
|
323
|
+
open_history_request: None,
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
impl Stream for StreamingHistoryPaginator {
|
|
125
329
|
type Item = Result<HistoryEvent, tonic::Status>;
|
|
126
330
|
|
|
127
|
-
fn poll_next(
|
|
128
|
-
|
|
331
|
+
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
|
332
|
+
let mut this = self.project();
|
|
333
|
+
|
|
334
|
+
if let Some(e) = this.inner.event_queue.pop_front() {
|
|
129
335
|
return Poll::Ready(Some(Ok(e)));
|
|
130
336
|
}
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
};
|
|
140
|
-
debug!(run_id=%self.run_id, "Fetching new history page");
|
|
141
|
-
let gw = self.client.clone();
|
|
142
|
-
let wid = self.wf_id.clone();
|
|
143
|
-
let rid = self.run_id.clone();
|
|
144
|
-
let resp_fut = async move {
|
|
145
|
-
gw.get_workflow_execution_history(wid, Some(rid), npt)
|
|
146
|
-
.instrument(span!(tracing::Level::TRACE, "fetch_history_in_paginator"))
|
|
147
|
-
.await
|
|
148
|
-
};
|
|
149
|
-
self.open_history_request.insert(resp_fut.boxed())
|
|
150
|
-
};
|
|
337
|
+
if this.open_history_request.is_none() {
|
|
338
|
+
// SAFETY: This is safe because the inner paginator cannot be dropped before the future,
|
|
339
|
+
// and the future won't be moved from out of this struct.
|
|
340
|
+
this.open_history_request.set(Some(unsafe {
|
|
341
|
+
transmute(HistoryPaginator::get_next_page(this.inner).boxed())
|
|
342
|
+
}));
|
|
343
|
+
}
|
|
344
|
+
let history_req = this.open_history_request.as_mut().as_pin_mut().unwrap();
|
|
151
345
|
|
|
152
|
-
|
|
346
|
+
match Future::poll(history_req, cx) {
|
|
153
347
|
Poll::Ready(resp) => {
|
|
154
|
-
|
|
348
|
+
this.open_history_request.set(None);
|
|
155
349
|
match resp {
|
|
156
350
|
Err(neterr) => Poll::Ready(Some(Err(neterr))),
|
|
157
|
-
Ok(
|
|
158
|
-
self.extend_queue_with_new_page(resp);
|
|
159
|
-
Poll::Ready(self.event_queue.pop_front().map(Ok))
|
|
160
|
-
}
|
|
351
|
+
Ok(_) => Poll::Ready(this.inner.event_queue.pop_front().map(Ok)),
|
|
161
352
|
}
|
|
162
353
|
}
|
|
163
354
|
Poll::Pending => Poll::Pending,
|
|
164
|
-
}
|
|
355
|
+
}
|
|
165
356
|
}
|
|
166
357
|
}
|
|
167
358
|
|
|
168
359
|
impl HistoryUpdate {
|
|
169
|
-
|
|
360
|
+
/// Sometimes it's useful to take an update out of something without needing to use an option
|
|
361
|
+
/// field. Use this to replace the field with an empty update.
|
|
362
|
+
pub fn dummy() -> Self {
|
|
170
363
|
Self {
|
|
171
|
-
events:
|
|
172
|
-
|
|
173
|
-
|
|
364
|
+
events: vec![],
|
|
365
|
+
previous_wft_started_id: -1,
|
|
366
|
+
has_last_wft: false,
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
pub fn is_real(&self) -> bool {
|
|
370
|
+
self.previous_wft_started_id >= 0
|
|
371
|
+
}
|
|
372
|
+
pub fn first_event_id(&self) -> Option<i64> {
|
|
373
|
+
self.events.get(0).map(|e| e.event_id)
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
/// Create an instance of an update directly from events. If the passed in event iterator has a
|
|
377
|
+
/// partial WFT sequence at the end, all events after the last complete WFT sequence (ending
|
|
378
|
+
/// with WFT started) are returned back to the caller, since the history update only works in
|
|
379
|
+
/// terms of complete WFT sequences.
|
|
380
|
+
pub fn from_events<I: IntoIterator<Item = HistoryEvent>>(
|
|
381
|
+
events: I,
|
|
382
|
+
previous_wft_started_id: i64,
|
|
383
|
+
has_last_wft: bool,
|
|
384
|
+
) -> (Self, Vec<HistoryEvent>)
|
|
385
|
+
where
|
|
386
|
+
<I as IntoIterator>::IntoIter: Send + 'static,
|
|
387
|
+
{
|
|
388
|
+
let mut all_events: Vec<_> = events.into_iter().collect();
|
|
389
|
+
let mut last_end =
|
|
390
|
+
find_end_index_of_next_wft_seq(all_events.as_slice(), previous_wft_started_id);
|
|
391
|
+
if matches!(last_end, NextWFTSeqEndIndex::Incomplete(_)) {
|
|
392
|
+
return if has_last_wft {
|
|
393
|
+
(
|
|
394
|
+
Self {
|
|
395
|
+
events: all_events,
|
|
396
|
+
previous_wft_started_id,
|
|
397
|
+
has_last_wft,
|
|
398
|
+
},
|
|
399
|
+
vec![],
|
|
400
|
+
)
|
|
401
|
+
} else {
|
|
402
|
+
(
|
|
403
|
+
Self {
|
|
404
|
+
events: vec![],
|
|
405
|
+
previous_wft_started_id,
|
|
406
|
+
has_last_wft,
|
|
407
|
+
},
|
|
408
|
+
all_events,
|
|
409
|
+
)
|
|
410
|
+
};
|
|
411
|
+
}
|
|
412
|
+
while let NextWFTSeqEndIndex::Complete(next_end_ix) = last_end {
|
|
413
|
+
let next_end_eid = all_events[next_end_ix].event_id;
|
|
414
|
+
// To save skipping all events at the front of this slice, only pass the relevant
|
|
415
|
+
// portion, but that means the returned index must be adjusted, hence the addition.
|
|
416
|
+
let next_end = find_end_index_of_next_wft_seq(&all_events[next_end_ix..], next_end_eid)
|
|
417
|
+
.add(next_end_ix);
|
|
418
|
+
if matches!(next_end, NextWFTSeqEndIndex::Incomplete(_)) {
|
|
419
|
+
break;
|
|
420
|
+
}
|
|
421
|
+
last_end = next_end;
|
|
174
422
|
}
|
|
423
|
+
let remaining_events = if all_events.is_empty() {
|
|
424
|
+
vec![]
|
|
425
|
+
} else {
|
|
426
|
+
all_events.split_off(last_end.index() + 1)
|
|
427
|
+
};
|
|
428
|
+
|
|
429
|
+
(
|
|
430
|
+
Self {
|
|
431
|
+
events: all_events,
|
|
432
|
+
previous_wft_started_id,
|
|
433
|
+
has_last_wft,
|
|
434
|
+
},
|
|
435
|
+
remaining_events,
|
|
436
|
+
)
|
|
175
437
|
}
|
|
176
438
|
|
|
177
|
-
/// Create an instance of an update directly from events
|
|
439
|
+
/// Create an instance of an update directly from events. The passed in events *must* consist
|
|
440
|
+
/// of one or more complete WFT sequences. IE: The event iterator must not end in the middle
|
|
441
|
+
/// of a WFT sequence.
|
|
442
|
+
#[cfg(test)]
|
|
178
443
|
pub fn new_from_events<I: IntoIterator<Item = HistoryEvent>>(
|
|
179
444
|
events: I,
|
|
180
445
|
previous_wft_started_id: i64,
|
|
@@ -183,309 +448,417 @@ impl HistoryUpdate {
|
|
|
183
448
|
<I as IntoIterator>::IntoIter: Send + 'static,
|
|
184
449
|
{
|
|
185
450
|
Self {
|
|
186
|
-
events:
|
|
187
|
-
|
|
188
|
-
|
|
451
|
+
events: events.into_iter().collect(),
|
|
452
|
+
previous_wft_started_id,
|
|
453
|
+
has_last_wft: true,
|
|
189
454
|
}
|
|
190
455
|
}
|
|
191
456
|
|
|
192
|
-
/// Given a workflow task started id, return all events starting at that number (
|
|
193
|
-
/// the next WFT started event (inclusive).
|
|
194
|
-
/// remaining history is returned.
|
|
195
|
-
///
|
|
196
|
-
/// Events are *consumed* by this process, to keep things efficient in workflow machines, and
|
|
197
|
-
/// the function may call out to server to fetch more pages if they are known to exist and
|
|
198
|
-
/// needed to complete the WFT sequence.
|
|
457
|
+
/// Given a workflow task started id, return all events starting at that number (exclusive) to
|
|
458
|
+
/// the next WFT started event (inclusive).
|
|
199
459
|
///
|
|
200
|
-
///
|
|
460
|
+
/// Events are *consumed* by this process, to keep things efficient in workflow machines.
|
|
201
461
|
///
|
|
202
|
-
///
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
.take_next_wft_sequence_impl(from_wft_started_id)
|
|
209
|
-
.await?;
|
|
210
|
-
if !maybe_bonus_events.is_empty() {
|
|
211
|
-
self.buffered.extend(maybe_bonus_events);
|
|
462
|
+
/// If we are out of WFT sequences that can be yielded by this update, it will return an empty
|
|
463
|
+
/// vec, indicating more pages will need to be fetched.
|
|
464
|
+
pub fn take_next_wft_sequence(&mut self, from_wft_started_id: i64) -> NextWFT {
|
|
465
|
+
// First, drop any events from the queue which are earlier than the passed-in id.
|
|
466
|
+
if let Some(ix_first_relevant) = self.starting_index_after_skipping(from_wft_started_id) {
|
|
467
|
+
self.events.drain(0..ix_first_relevant);
|
|
212
468
|
}
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
469
|
+
let next_wft_ix = find_end_index_of_next_wft_seq(&self.events, from_wft_started_id);
|
|
470
|
+
match next_wft_ix {
|
|
471
|
+
NextWFTSeqEndIndex::Incomplete(siz) => {
|
|
472
|
+
if self.has_last_wft {
|
|
473
|
+
if siz == 0 {
|
|
474
|
+
NextWFT::ReplayOver
|
|
475
|
+
} else {
|
|
476
|
+
self.build_next_wft(siz)
|
|
477
|
+
}
|
|
478
|
+
} else {
|
|
479
|
+
if siz != 0 {
|
|
480
|
+
panic!(
|
|
481
|
+
"HistoryUpdate was created with an incomplete WFT. This is an SDK bug."
|
|
482
|
+
);
|
|
483
|
+
}
|
|
484
|
+
NextWFT::NeedFetch
|
|
485
|
+
}
|
|
221
486
|
}
|
|
487
|
+
NextWFTSeqEndIndex::Complete(next_wft_ix) => self.build_next_wft(next_wft_ix),
|
|
222
488
|
}
|
|
489
|
+
}
|
|
223
490
|
|
|
224
|
-
|
|
491
|
+
fn build_next_wft(&mut self, drain_this_much: usize) -> NextWFT {
|
|
492
|
+
NextWFT::WFT(
|
|
493
|
+
self.events.drain(0..=drain_this_much).collect(),
|
|
494
|
+
self.events.is_empty() && self.has_last_wft,
|
|
495
|
+
)
|
|
225
496
|
}
|
|
226
497
|
|
|
227
498
|
/// Lets the caller peek ahead at the next WFT sequence that will be returned by
|
|
228
|
-
/// [take_next_wft_sequence]. Will always return
|
|
229
|
-
/// first. May also return an empty iterator or incomplete sequence if we are at
|
|
230
|
-
/// history.
|
|
231
|
-
pub fn peek_next_wft_sequence(&self) ->
|
|
232
|
-
self
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
let mut should_pop = |e: &HistoryEvent| {
|
|
251
|
-
if e.event_id <= from_event_id {
|
|
252
|
-
return true;
|
|
253
|
-
} else if e.event_type() == EventType::WorkflowTaskStarted {
|
|
254
|
-
next_wft_state = NextWftState::Seen;
|
|
255
|
-
return true;
|
|
499
|
+
/// [take_next_wft_sequence]. Will always return the first available WFT sequence if that has
|
|
500
|
+
/// not been called first. May also return an empty iterator or incomplete sequence if we are at
|
|
501
|
+
/// the end of history.
|
|
502
|
+
pub fn peek_next_wft_sequence(&self, from_wft_started_id: i64) -> &[HistoryEvent] {
|
|
503
|
+
let ix_first_relevant = self
|
|
504
|
+
.starting_index_after_skipping(from_wft_started_id)
|
|
505
|
+
.unwrap_or_default();
|
|
506
|
+
let relevant_events = &self.events[ix_first_relevant..];
|
|
507
|
+
if relevant_events.is_empty() {
|
|
508
|
+
return relevant_events;
|
|
509
|
+
}
|
|
510
|
+
let ix_end = find_end_index_of_next_wft_seq(relevant_events, from_wft_started_id).index();
|
|
511
|
+
&relevant_events[0..=ix_end]
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
/// Returns true if this update has the next needed WFT sequence, false if events will need to
|
|
515
|
+
/// be fetched in order to create a complete update with the entire next WFT sequence.
|
|
516
|
+
pub fn can_take_next_wft_sequence(&self, from_wft_started_id: i64) -> bool {
|
|
517
|
+
let next_wft_ix = find_end_index_of_next_wft_seq(&self.events, from_wft_started_id);
|
|
518
|
+
if let NextWFTSeqEndIndex::Incomplete(_) = next_wft_ix {
|
|
519
|
+
if !self.has_last_wft {
|
|
520
|
+
return false;
|
|
256
521
|
}
|
|
522
|
+
}
|
|
523
|
+
true
|
|
524
|
+
}
|
|
257
525
|
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
false
|
|
271
|
-
}
|
|
272
|
-
NextWftState::SeenCompleted => {
|
|
273
|
-
// If we've seen the WFT be completed, and this event is another scheduled, then
|
|
274
|
-
// this was an empty heartbeat we should ignore.
|
|
275
|
-
if e.event_type() == EventType::WorkflowTaskScheduled {
|
|
276
|
-
next_wft_state = NextWftState::NotSeen;
|
|
277
|
-
return true;
|
|
278
|
-
}
|
|
279
|
-
// Otherwise, we're done here
|
|
280
|
-
false
|
|
526
|
+
/// Returns the next WFT completed event attributes, if any, starting at (inclusive) the
|
|
527
|
+
/// `from_id`
|
|
528
|
+
pub fn peek_next_wft_completed(
|
|
529
|
+
&self,
|
|
530
|
+
from_id: i64,
|
|
531
|
+
) -> Option<&WorkflowTaskCompletedEventAttributes> {
|
|
532
|
+
self.events
|
|
533
|
+
.iter()
|
|
534
|
+
.skip_while(|e| e.event_id < from_id)
|
|
535
|
+
.find_map(|e| match &e.attributes {
|
|
536
|
+
Some(history_event::Attributes::WorkflowTaskCompletedEventAttributes(ref a)) => {
|
|
537
|
+
Some(a)
|
|
281
538
|
}
|
|
282
|
-
|
|
283
|
-
}
|
|
284
|
-
|
|
539
|
+
_ => None,
|
|
540
|
+
})
|
|
541
|
+
}
|
|
285
542
|
|
|
286
|
-
|
|
287
|
-
|
|
543
|
+
fn starting_index_after_skipping(&self, from_wft_started_id: i64) -> Option<usize> {
|
|
544
|
+
self.events
|
|
545
|
+
.iter()
|
|
546
|
+
.find_position(|e| e.event_id > from_wft_started_id)
|
|
547
|
+
.map(|(ix, _)| ix)
|
|
548
|
+
}
|
|
549
|
+
}
|
|
288
550
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
551
|
+
#[derive(Debug, Copy, Clone)]
|
|
552
|
+
enum NextWFTSeqEndIndex {
|
|
553
|
+
/// The next WFT sequence is completely contained within the passed-in iterator
|
|
554
|
+
Complete(usize),
|
|
555
|
+
/// The next WFT sequence is not found within the passed-in iterator, and the contained
|
|
556
|
+
/// value is the last index of the iterator.
|
|
557
|
+
Incomplete(usize),
|
|
558
|
+
}
|
|
559
|
+
impl NextWFTSeqEndIndex {
|
|
560
|
+
fn index(self) -> usize {
|
|
561
|
+
match self {
|
|
562
|
+
NextWFTSeqEndIndex::Complete(ix) | NextWFTSeqEndIndex::Incomplete(ix) => ix,
|
|
563
|
+
}
|
|
564
|
+
}
|
|
565
|
+
fn add(self, val: usize) -> Self {
|
|
566
|
+
match self {
|
|
567
|
+
NextWFTSeqEndIndex::Complete(ix) => NextWFTSeqEndIndex::Complete(ix + val),
|
|
568
|
+
NextWFTSeqEndIndex::Incomplete(ix) => NextWFTSeqEndIndex::Incomplete(ix + val),
|
|
569
|
+
}
|
|
570
|
+
}
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
/// Discovers the index of the last event in next WFT sequence within the passed-in slice
|
|
574
|
+
fn find_end_index_of_next_wft_seq(
|
|
575
|
+
events: &[HistoryEvent],
|
|
576
|
+
from_event_id: i64,
|
|
577
|
+
) -> NextWFTSeqEndIndex {
|
|
578
|
+
if events.is_empty() {
|
|
579
|
+
return NextWFTSeqEndIndex::Incomplete(0);
|
|
580
|
+
}
|
|
581
|
+
let mut last_index = 0;
|
|
582
|
+
let mut saw_any_non_wft_event = false;
|
|
583
|
+
for (ix, e) in events.iter().enumerate() {
|
|
584
|
+
last_index = ix;
|
|
585
|
+
|
|
586
|
+
// It's possible to have gotten a new history update without eviction (ex: unhandled
|
|
587
|
+
// command on completion), where we may need to skip events we already handled.
|
|
588
|
+
if e.event_id <= from_event_id {
|
|
589
|
+
continue;
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
if !matches!(
|
|
593
|
+
e.event_type(),
|
|
594
|
+
EventType::WorkflowTaskFailed
|
|
595
|
+
| EventType::WorkflowTaskTimedOut
|
|
596
|
+
| EventType::WorkflowTaskScheduled
|
|
597
|
+
| EventType::WorkflowTaskStarted
|
|
598
|
+
| EventType::WorkflowTaskCompleted
|
|
599
|
+
) {
|
|
600
|
+
saw_any_non_wft_event = true;
|
|
601
|
+
}
|
|
602
|
+
if e.is_final_wf_execution_event() {
|
|
603
|
+
return NextWFTSeqEndIndex::Complete(last_index);
|
|
604
|
+
}
|
|
293
605
|
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
606
|
+
if e.event_type() == EventType::WorkflowTaskStarted {
|
|
607
|
+
if let Some(next_event) = events.get(ix + 1) {
|
|
608
|
+
let et = next_event.event_type();
|
|
609
|
+
// If the next event is WFT timeout or fail, or abrupt WF execution end, that
|
|
610
|
+
// doesn't conclude a WFT sequence.
|
|
611
|
+
if matches!(
|
|
612
|
+
et,
|
|
613
|
+
EventType::WorkflowTaskFailed
|
|
614
|
+
| EventType::WorkflowTaskTimedOut
|
|
615
|
+
| EventType::WorkflowExecutionTimedOut
|
|
616
|
+
| EventType::WorkflowExecutionTerminated
|
|
617
|
+
| EventType::WorkflowExecutionCanceled
|
|
618
|
+
) {
|
|
619
|
+
continue;
|
|
300
620
|
}
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
// re-buffering along with the event we're currently on.
|
|
312
|
-
extra_e.push(
|
|
313
|
-
events_to_next_wft_started
|
|
314
|
-
.pop()
|
|
315
|
-
.expect("There is an element here by definition"),
|
|
316
|
-
);
|
|
621
|
+
// If we've never seen an interesting event and the next two events are a completion
|
|
622
|
+
// followed immediately again by scheduled, then this is a WFT heartbeat and also
|
|
623
|
+
// doesn't conclude the sequence.
|
|
624
|
+
else if et == EventType::WorkflowTaskCompleted {
|
|
625
|
+
if let Some(next_next_event) = events.get(ix + 2) {
|
|
626
|
+
if next_next_event.event_type() == EventType::WorkflowTaskScheduled {
|
|
627
|
+
continue;
|
|
628
|
+
} else {
|
|
629
|
+
saw_any_non_wft_event = true;
|
|
630
|
+
}
|
|
317
631
|
}
|
|
318
|
-
extra_e.push(e);
|
|
319
|
-
break;
|
|
320
632
|
}
|
|
321
|
-
|
|
633
|
+
}
|
|
634
|
+
if saw_any_non_wft_event {
|
|
635
|
+
return NextWFTSeqEndIndex::Complete(ix);
|
|
322
636
|
}
|
|
323
637
|
}
|
|
324
|
-
|
|
325
|
-
Ok((events_to_next_wft_started, extra_e))
|
|
326
638
|
}
|
|
327
|
-
}
|
|
328
639
|
|
|
329
|
-
|
|
330
|
-
enum NextWftState {
|
|
331
|
-
NotSeen,
|
|
332
|
-
Seen,
|
|
333
|
-
SeenCompleted,
|
|
640
|
+
NextWFTSeqEndIndex::Incomplete(last_index)
|
|
334
641
|
}
|
|
335
642
|
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
643
|
+
#[cfg(test)]
|
|
644
|
+
pub mod tests {
|
|
645
|
+
use super::*;
|
|
646
|
+
use crate::{
|
|
647
|
+
replay::{HistoryInfo, TestHistoryBuilder},
|
|
648
|
+
test_help::canned_histories,
|
|
649
|
+
worker::client::mocks::mock_workflow_client,
|
|
650
|
+
};
|
|
651
|
+
use futures_util::TryStreamExt;
|
|
652
|
+
use temporal_sdk_core_protos::temporal::api::workflowservice::v1::GetWorkflowExecutionHistoryResponse;
|
|
653
|
+
|
|
654
|
+
impl From<HistoryInfo> for HistoryUpdate {
|
|
655
|
+
fn from(v: HistoryInfo) -> Self {
|
|
656
|
+
Self::new_from_events(v.events().to_vec(), v.previous_started_event_id())
|
|
657
|
+
}
|
|
339
658
|
}
|
|
340
|
-
}
|
|
341
659
|
|
|
342
|
-
pub trait TestHBExt {
|
|
343
|
-
|
|
344
|
-
}
|
|
660
|
+
pub trait TestHBExt {
|
|
661
|
+
fn as_history_update(&self) -> HistoryUpdate;
|
|
662
|
+
}
|
|
345
663
|
|
|
346
|
-
impl TestHBExt for TestHistoryBuilder {
|
|
347
|
-
|
|
348
|
-
|
|
664
|
+
impl TestHBExt for TestHistoryBuilder {
|
|
665
|
+
fn as_history_update(&self) -> HistoryUpdate {
|
|
666
|
+
self.get_full_history_info().unwrap().into()
|
|
667
|
+
}
|
|
349
668
|
}
|
|
350
|
-
}
|
|
351
669
|
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
670
|
+
impl NextWFT {
|
|
671
|
+
fn unwrap_events(self) -> Vec<HistoryEvent> {
|
|
672
|
+
match self {
|
|
673
|
+
NextWFT::WFT(e, _) => e,
|
|
674
|
+
o => panic!("Must be complete WFT: {o:?}"),
|
|
675
|
+
}
|
|
676
|
+
}
|
|
677
|
+
}
|
|
356
678
|
|
|
357
|
-
|
|
358
|
-
|
|
679
|
+
fn next_check_peek(update: &mut HistoryUpdate, from_id: i64) -> Vec<HistoryEvent> {
|
|
680
|
+
let seq_peeked = update.peek_next_wft_sequence(from_id).to_vec();
|
|
681
|
+
let seq = update.take_next_wft_sequence(from_id).unwrap_events();
|
|
682
|
+
assert_eq!(seq, seq_peeked);
|
|
683
|
+
seq
|
|
684
|
+
}
|
|
685
|
+
|
|
686
|
+
#[test]
|
|
687
|
+
fn consumes_standard_wft_sequence() {
|
|
359
688
|
let timer_hist = canned_histories::single_timer("t");
|
|
360
689
|
let mut update = timer_hist.as_history_update();
|
|
361
|
-
let seq_1 = update
|
|
690
|
+
let seq_1 = next_check_peek(&mut update, 0);
|
|
362
691
|
assert_eq!(seq_1.len(), 3);
|
|
363
692
|
assert_eq!(seq_1.last().unwrap().event_id, 3);
|
|
364
|
-
let
|
|
693
|
+
let seq_2_peeked = update.peek_next_wft_sequence(0).to_vec();
|
|
694
|
+
let seq_2 = next_check_peek(&mut update, 3);
|
|
695
|
+
assert_eq!(seq_2, seq_2_peeked);
|
|
365
696
|
assert_eq!(seq_2.len(), 5);
|
|
366
697
|
assert_eq!(seq_2.last().unwrap().event_id, 8);
|
|
367
698
|
}
|
|
368
699
|
|
|
369
|
-
#[
|
|
370
|
-
|
|
700
|
+
#[test]
|
|
701
|
+
fn skips_wft_failed() {
|
|
371
702
|
let failed_hist = canned_histories::workflow_fails_with_reset_after_timer("t", "runid");
|
|
372
703
|
let mut update = failed_hist.as_history_update();
|
|
373
|
-
let seq_1 = update
|
|
704
|
+
let seq_1 = next_check_peek(&mut update, 0);
|
|
374
705
|
assert_eq!(seq_1.len(), 3);
|
|
375
706
|
assert_eq!(seq_1.last().unwrap().event_id, 3);
|
|
376
|
-
let seq_2 = update
|
|
707
|
+
let seq_2 = next_check_peek(&mut update, 3);
|
|
377
708
|
assert_eq!(seq_2.len(), 8);
|
|
378
709
|
assert_eq!(seq_2.last().unwrap().event_id, 11);
|
|
379
710
|
}
|
|
380
711
|
|
|
381
|
-
#[
|
|
382
|
-
|
|
712
|
+
#[test]
|
|
713
|
+
fn skips_wft_timeout() {
|
|
383
714
|
let failed_hist = canned_histories::wft_timeout_repro();
|
|
384
715
|
let mut update = failed_hist.as_history_update();
|
|
385
|
-
let seq_1 = update
|
|
716
|
+
let seq_1 = next_check_peek(&mut update, 0);
|
|
386
717
|
assert_eq!(seq_1.len(), 3);
|
|
387
718
|
assert_eq!(seq_1.last().unwrap().event_id, 3);
|
|
388
|
-
let seq_2 = update
|
|
719
|
+
let seq_2 = next_check_peek(&mut update, 3);
|
|
389
720
|
assert_eq!(seq_2.len(), 11);
|
|
390
721
|
assert_eq!(seq_2.last().unwrap().event_id, 14);
|
|
391
722
|
}
|
|
392
723
|
|
|
393
|
-
#[
|
|
394
|
-
|
|
724
|
+
#[test]
|
|
725
|
+
fn skips_events_before_desired_wft() {
|
|
395
726
|
let timer_hist = canned_histories::single_timer("t");
|
|
396
727
|
let mut update = timer_hist.as_history_update();
|
|
397
728
|
// We haven't processed the first 3 events, but we should still only get the second sequence
|
|
398
|
-
let seq_2 = update.take_next_wft_sequence(3).
|
|
729
|
+
let seq_2 = update.take_next_wft_sequence(3).unwrap_events();
|
|
399
730
|
assert_eq!(seq_2.len(), 5);
|
|
400
731
|
assert_eq!(seq_2.last().unwrap().event_id, 8);
|
|
401
732
|
}
|
|
402
733
|
|
|
403
|
-
#[
|
|
404
|
-
|
|
734
|
+
#[test]
|
|
735
|
+
fn history_ends_abruptly() {
|
|
405
736
|
let mut timer_hist = canned_histories::single_timer("t");
|
|
406
737
|
timer_hist.add_workflow_execution_terminated();
|
|
407
738
|
let mut update = timer_hist.as_history_update();
|
|
408
|
-
let seq_2 = update.take_next_wft_sequence(3).
|
|
409
|
-
assert_eq!(seq_2.len(),
|
|
410
|
-
assert_eq!(seq_2.last().unwrap().event_id,
|
|
739
|
+
let seq_2 = update.take_next_wft_sequence(3).unwrap_events();
|
|
740
|
+
assert_eq!(seq_2.len(), 6);
|
|
741
|
+
assert_eq!(seq_2.last().unwrap().event_id, 9);
|
|
411
742
|
}
|
|
412
743
|
|
|
413
|
-
#[
|
|
414
|
-
|
|
744
|
+
#[test]
|
|
745
|
+
fn heartbeats_skipped() {
|
|
415
746
|
let mut t = TestHistoryBuilder::default();
|
|
416
747
|
t.add_by_type(EventType::WorkflowExecutionStarted);
|
|
417
748
|
t.add_full_wf_task();
|
|
418
|
-
t.add_full_wf_task();
|
|
419
|
-
t.
|
|
420
|
-
t.add_full_wf_task();
|
|
421
|
-
t.add_full_wf_task();
|
|
749
|
+
t.add_full_wf_task(); // wft started 6
|
|
750
|
+
t.add_by_type(EventType::TimerStarted);
|
|
751
|
+
t.add_full_wf_task(); // wft started 10
|
|
422
752
|
t.add_full_wf_task();
|
|
423
753
|
t.add_full_wf_task();
|
|
424
|
-
t.
|
|
425
|
-
t.
|
|
754
|
+
t.add_full_wf_task(); // wft started 19
|
|
755
|
+
t.add_by_type(EventType::TimerStarted);
|
|
756
|
+
t.add_full_wf_task(); // wft started 23
|
|
426
757
|
t.add_we_signaled("whee", vec![]);
|
|
427
758
|
t.add_full_wf_task();
|
|
428
759
|
t.add_workflow_execution_completed();
|
|
429
760
|
|
|
430
761
|
let mut update = t.as_history_update();
|
|
431
|
-
let seq = update
|
|
762
|
+
let seq = next_check_peek(&mut update, 0);
|
|
432
763
|
assert_eq!(seq.len(), 6);
|
|
433
|
-
let seq = update
|
|
764
|
+
let seq = next_check_peek(&mut update, 6);
|
|
434
765
|
assert_eq!(seq.len(), 13);
|
|
435
|
-
let seq = update
|
|
766
|
+
let seq = next_check_peek(&mut update, 19);
|
|
436
767
|
assert_eq!(seq.len(), 4);
|
|
437
|
-
let seq = update
|
|
768
|
+
let seq = next_check_peek(&mut update, 23);
|
|
438
769
|
assert_eq!(seq.len(), 4);
|
|
439
|
-
let seq = update
|
|
770
|
+
let seq = next_check_peek(&mut update, 27);
|
|
440
771
|
assert_eq!(seq.len(), 2);
|
|
441
772
|
}
|
|
442
773
|
|
|
443
|
-
#[
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
774
|
+
#[test]
|
|
775
|
+
fn heartbeat_marker_end() {
|
|
776
|
+
let mut t = TestHistoryBuilder::default();
|
|
777
|
+
t.add_by_type(EventType::WorkflowExecutionStarted);
|
|
778
|
+
t.add_full_wf_task();
|
|
779
|
+
t.add_full_wf_task();
|
|
780
|
+
t.add_local_activity_result_marker(1, "1", "done".into());
|
|
781
|
+
t.add_workflow_execution_completed();
|
|
782
|
+
|
|
783
|
+
let mut update = t.as_history_update();
|
|
784
|
+
let seq = next_check_peek(&mut update, 3);
|
|
785
|
+
// completed, sched, started
|
|
786
|
+
assert_eq!(seq.len(), 3);
|
|
787
|
+
let seq = next_check_peek(&mut update, 6);
|
|
788
|
+
assert_eq!(seq.len(), 3);
|
|
789
|
+
}
|
|
790
|
+
|
|
791
|
+
fn paginator_setup(history: TestHistoryBuilder, chunk_size: usize) -> HistoryPaginator {
|
|
792
|
+
let full_hist = history.get_full_history_info().unwrap().into_events();
|
|
793
|
+
let initial_hist = full_hist.chunks(chunk_size).next().unwrap().to_vec();
|
|
451
794
|
let mut mock_client = mock_workflow_client();
|
|
452
795
|
|
|
453
|
-
let mut npt =
|
|
796
|
+
let mut npt = 1;
|
|
454
797
|
mock_client
|
|
455
798
|
.expect_get_workflow_execution_history()
|
|
456
799
|
.returning(move |_, _, passed_npt| {
|
|
457
800
|
assert_eq!(passed_npt, vec![npt]);
|
|
458
|
-
let
|
|
801
|
+
let mut hist_chunks = full_hist.chunks(chunk_size).peekable();
|
|
802
|
+
let next_chunks = hist_chunks.nth(npt.into()).unwrap_or_default();
|
|
459
803
|
npt += 1;
|
|
804
|
+
let next_page_token = if hist_chunks.peek().is_none() {
|
|
805
|
+
vec![]
|
|
806
|
+
} else {
|
|
807
|
+
vec![npt]
|
|
808
|
+
};
|
|
460
809
|
Ok(GetWorkflowExecutionHistoryResponse {
|
|
461
|
-
history: Some(
|
|
810
|
+
history: Some(History {
|
|
811
|
+
events: next_chunks.into(),
|
|
812
|
+
}),
|
|
462
813
|
raw_history: vec![],
|
|
463
|
-
next_page_token
|
|
814
|
+
next_page_token,
|
|
464
815
|
archived: false,
|
|
465
816
|
})
|
|
466
817
|
});
|
|
467
818
|
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
initial_hist
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
819
|
+
HistoryPaginator::new(
|
|
820
|
+
History {
|
|
821
|
+
events: initial_hist,
|
|
822
|
+
},
|
|
823
|
+
0,
|
|
824
|
+
"wfid".to_string(),
|
|
825
|
+
"runid".to_string(),
|
|
826
|
+
vec![1],
|
|
827
|
+
Arc::new(mock_client),
|
|
828
|
+
)
|
|
829
|
+
}
|
|
830
|
+
|
|
831
|
+
#[rstest::rstest]
|
|
832
|
+
#[tokio::test]
|
|
833
|
+
async fn paginator_extracts_updates(#[values(10, 11, 12, 13, 14)] chunk_size: usize) {
|
|
834
|
+
let wft_count = 100;
|
|
835
|
+
let mut paginator = paginator_setup(
|
|
836
|
+
canned_histories::long_sequential_timers(wft_count),
|
|
837
|
+
chunk_size,
|
|
477
838
|
);
|
|
839
|
+
let mut update = paginator.extract_next_update().await.unwrap();
|
|
478
840
|
|
|
479
|
-
let seq = update.take_next_wft_sequence(0).
|
|
841
|
+
let seq = update.take_next_wft_sequence(0).unwrap_events();
|
|
480
842
|
assert_eq!(seq.len(), 3);
|
|
481
843
|
|
|
482
844
|
let mut last_event_id = 3;
|
|
483
845
|
let mut last_started_id = 3;
|
|
484
|
-
for
|
|
485
|
-
let seq =
|
|
486
|
-
.take_next_wft_sequence(last_started_id)
|
|
487
|
-
|
|
488
|
-
|
|
846
|
+
for i in 1..wft_count {
|
|
847
|
+
let seq = {
|
|
848
|
+
match update.take_next_wft_sequence(last_started_id) {
|
|
849
|
+
NextWFT::WFT(seq, _) => seq,
|
|
850
|
+
NextWFT::NeedFetch => {
|
|
851
|
+
update = paginator.extract_next_update().await.unwrap();
|
|
852
|
+
update
|
|
853
|
+
.take_next_wft_sequence(last_started_id)
|
|
854
|
+
.unwrap_events()
|
|
855
|
+
}
|
|
856
|
+
NextWFT::ReplayOver => {
|
|
857
|
+
assert_eq!(i, wft_count - 1);
|
|
858
|
+
break;
|
|
859
|
+
}
|
|
860
|
+
}
|
|
861
|
+
};
|
|
489
862
|
for e in &seq {
|
|
490
863
|
last_event_id += 1;
|
|
491
864
|
assert_eq!(e.event_id, last_event_id);
|
|
@@ -495,10 +868,124 @@ pub mod tests {
|
|
|
495
868
|
}
|
|
496
869
|
}
|
|
497
870
|
|
|
871
|
+
#[tokio::test]
|
|
872
|
+
async fn paginator_streams() {
|
|
873
|
+
let wft_count = 10;
|
|
874
|
+
let paginator = StreamingHistoryPaginator::new(paginator_setup(
|
|
875
|
+
canned_histories::long_sequential_timers(wft_count),
|
|
876
|
+
10,
|
|
877
|
+
));
|
|
878
|
+
let everything: Vec<_> = paginator.try_collect().await.unwrap();
|
|
879
|
+
assert_eq!(everything.len(), (wft_count + 1) * 5);
|
|
880
|
+
everything.iter().fold(1, |event_id, e| {
|
|
881
|
+
assert_eq!(event_id, e.event_id);
|
|
882
|
+
e.event_id + 1
|
|
883
|
+
});
|
|
884
|
+
}
|
|
885
|
+
|
|
886
|
+
fn three_wfts_then_heartbeats() -> TestHistoryBuilder {
|
|
887
|
+
let mut t = TestHistoryBuilder::default();
|
|
888
|
+
// Start with two complete normal WFTs
|
|
889
|
+
t.add_by_type(EventType::WorkflowExecutionStarted);
|
|
890
|
+
t.add_full_wf_task(); // wft start - 3
|
|
891
|
+
t.add_by_type(EventType::TimerStarted);
|
|
892
|
+
t.add_full_wf_task(); // wft start - 7
|
|
893
|
+
t.add_by_type(EventType::TimerStarted);
|
|
894
|
+
t.add_full_wf_task(); // wft start - 11
|
|
895
|
+
for _ in 1..50 {
|
|
896
|
+
// Add a bunch of heartbeats with no commands, which count as one task
|
|
897
|
+
t.add_full_wf_task();
|
|
898
|
+
}
|
|
899
|
+
t.add_workflow_execution_completed();
|
|
900
|
+
t
|
|
901
|
+
}
|
|
902
|
+
|
|
903
|
+
#[tokio::test]
|
|
904
|
+
async fn needs_fetch_if_ending_in_middle_of_wft_seq() {
|
|
905
|
+
let t = three_wfts_then_heartbeats();
|
|
906
|
+
let mut ends_in_middle_of_seq = t.as_history_update().events;
|
|
907
|
+
ends_in_middle_of_seq.truncate(19);
|
|
908
|
+
// The update should contain the first two complete WFTs, ending on the 8th event which
|
|
909
|
+
// is WFT started. The remaining events should be returned. False flags means the creator
|
|
910
|
+
// knows there are more events, so we should return need fetch
|
|
911
|
+
let (mut update, remaining) = HistoryUpdate::from_events(ends_in_middle_of_seq, 0, false);
|
|
912
|
+
assert_eq!(remaining[0].event_id, 8);
|
|
913
|
+
assert_eq!(remaining.last().unwrap().event_id, 19);
|
|
914
|
+
let seq = update.take_next_wft_sequence(0).unwrap_events();
|
|
915
|
+
assert_eq!(seq.last().unwrap().event_id, 3);
|
|
916
|
+
let seq = update.take_next_wft_sequence(3).unwrap_events();
|
|
917
|
+
assert_eq!(seq.last().unwrap().event_id, 7);
|
|
918
|
+
let next = update.take_next_wft_sequence(7);
|
|
919
|
+
assert_matches!(next, NextWFT::NeedFetch);
|
|
920
|
+
}
|
|
921
|
+
|
|
922
|
+
// Like the above, but if the history happens to be cut off at a wft boundary, (even though
|
|
923
|
+
// there may have been many heartbeats after we have no way of knowing about), it's going to
|
|
924
|
+
// count events 7-20 as a WFT since there is started, completed, timer command, ..heartbeats..
|
|
925
|
+
#[tokio::test]
|
|
926
|
+
async fn needs_fetch_after_complete_seq_with_heartbeats() {
|
|
927
|
+
let t = three_wfts_then_heartbeats();
|
|
928
|
+
let mut ends_in_middle_of_seq = t.as_history_update().events;
|
|
929
|
+
ends_in_middle_of_seq.truncate(20);
|
|
930
|
+
let (mut update, remaining) = HistoryUpdate::from_events(ends_in_middle_of_seq, 0, false);
|
|
931
|
+
assert!(remaining.is_empty());
|
|
932
|
+
let seq = update.take_next_wft_sequence(0).unwrap_events();
|
|
933
|
+
assert_eq!(seq.last().unwrap().event_id, 3);
|
|
934
|
+
let seq = update.take_next_wft_sequence(3).unwrap_events();
|
|
935
|
+
assert_eq!(seq.last().unwrap().event_id, 7);
|
|
936
|
+
let seq = update.take_next_wft_sequence(7).unwrap_events();
|
|
937
|
+
assert_eq!(seq.last().unwrap().event_id, 20);
|
|
938
|
+
let next = update.take_next_wft_sequence(20);
|
|
939
|
+
assert_matches!(next, NextWFT::NeedFetch);
|
|
940
|
+
}
|
|
941
|
+
|
|
942
|
+
#[rstest::rstest]
|
|
943
|
+
#[tokio::test]
|
|
944
|
+
async fn paginator_works_with_wft_over_multiple_pages(
|
|
945
|
+
#[values(10, 11, 12, 13, 14)] chunk_size: usize,
|
|
946
|
+
) {
|
|
947
|
+
let t = three_wfts_then_heartbeats();
|
|
948
|
+
let mut paginator = paginator_setup(t, chunk_size);
|
|
949
|
+
let mut update = paginator.extract_next_update().await.unwrap();
|
|
950
|
+
let mut last_id = 0;
|
|
951
|
+
loop {
|
|
952
|
+
let seq = update.take_next_wft_sequence(last_id);
|
|
953
|
+
match seq {
|
|
954
|
+
NextWFT::WFT(seq, _) => {
|
|
955
|
+
last_id = seq.last().unwrap().event_id;
|
|
956
|
+
}
|
|
957
|
+
NextWFT::NeedFetch => {
|
|
958
|
+
update = paginator.extract_next_update().await.unwrap();
|
|
959
|
+
}
|
|
960
|
+
NextWFT::ReplayOver => break,
|
|
961
|
+
}
|
|
962
|
+
}
|
|
963
|
+
assert_eq!(last_id, 160);
|
|
964
|
+
}
|
|
965
|
+
|
|
966
|
+
#[tokio::test]
|
|
967
|
+
async fn task_just_before_heartbeat_chain_is_taken() {
|
|
968
|
+
let t = three_wfts_then_heartbeats();
|
|
969
|
+
let mut update = t.as_history_update();
|
|
970
|
+
let seq = update.take_next_wft_sequence(0).unwrap_events();
|
|
971
|
+
assert_eq!(seq.last().unwrap().event_id, 3);
|
|
972
|
+
let seq = update.take_next_wft_sequence(3).unwrap_events();
|
|
973
|
+
assert_eq!(seq.last().unwrap().event_id, 7);
|
|
974
|
+
let seq = update.take_next_wft_sequence(7).unwrap_events();
|
|
975
|
+
assert_eq!(seq.last().unwrap().event_id, 158);
|
|
976
|
+
let seq = update.take_next_wft_sequence(158).unwrap_events();
|
|
977
|
+
assert_eq!(seq.last().unwrap().event_id, 160);
|
|
978
|
+
assert_eq!(
|
|
979
|
+
seq.last().unwrap().event_type(),
|
|
980
|
+
EventType::WorkflowExecutionCompleted
|
|
981
|
+
);
|
|
982
|
+
}
|
|
983
|
+
|
|
498
984
|
#[tokio::test]
|
|
499
985
|
async fn handles_cache_misses() {
|
|
500
986
|
let timer_hist = canned_histories::single_timer("t");
|
|
501
987
|
let partial_task = timer_hist.get_one_wft(2).unwrap();
|
|
988
|
+
let prev_started_wft_id = partial_task.previous_started_event_id();
|
|
502
989
|
let mut history_from_get: GetWorkflowExecutionHistoryResponse =
|
|
503
990
|
timer_hist.get_history_info(2).unwrap().into();
|
|
504
991
|
// Chop off the last event, which is WFT started, which server doesn't return in get
|
|
@@ -509,24 +996,116 @@ pub mod tests {
|
|
|
509
996
|
.expect_get_workflow_execution_history()
|
|
510
997
|
.returning(move |_, _, _| Ok(history_from_get.clone()));
|
|
511
998
|
|
|
512
|
-
let mut
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
),
|
|
521
|
-
1,
|
|
999
|
+
let mut paginator = HistoryPaginator::new(
|
|
1000
|
+
partial_task.into(),
|
|
1001
|
+
prev_started_wft_id,
|
|
1002
|
+
"wfid".to_string(),
|
|
1003
|
+
"runid".to_string(),
|
|
1004
|
+
// A cache miss means we'll try to fetch from start
|
|
1005
|
+
NextPageToken::FetchFromStart,
|
|
1006
|
+
Arc::new(mock_client),
|
|
522
1007
|
);
|
|
1008
|
+
let mut update = paginator.extract_next_update().await.unwrap();
|
|
523
1009
|
// We expect if we try to take the first task sequence that the first event is the first
|
|
524
1010
|
// event in the sequence.
|
|
525
|
-
let seq = update.take_next_wft_sequence(0).
|
|
1011
|
+
let seq = update.take_next_wft_sequence(0).unwrap_events();
|
|
526
1012
|
assert_eq!(seq[0].event_id, 1);
|
|
527
|
-
let seq = update.take_next_wft_sequence(3).
|
|
1013
|
+
let seq = update.take_next_wft_sequence(3).unwrap_events();
|
|
528
1014
|
// Verify anything extra (which should only ever be WFT started) was re-appended to the
|
|
529
1015
|
// end of the event iteration after fetching the old history.
|
|
530
1016
|
assert_eq!(seq.last().unwrap().event_id, 8);
|
|
531
1017
|
}
|
|
1018
|
+
|
|
1019
|
+
#[test]
|
|
1020
|
+
fn la_marker_chunking() {
|
|
1021
|
+
let mut t = TestHistoryBuilder::default();
|
|
1022
|
+
t.add_by_type(EventType::WorkflowExecutionStarted);
|
|
1023
|
+
t.add_full_wf_task();
|
|
1024
|
+
t.add_we_signaled("whatever", vec![]);
|
|
1025
|
+
t.add_full_wf_task(); // started - 7
|
|
1026
|
+
t.add_local_activity_result_marker(1, "hi", Default::default());
|
|
1027
|
+
let act_s = t.add_activity_task_scheduled("1");
|
|
1028
|
+
let act_st = t.add_activity_task_started(act_s);
|
|
1029
|
+
t.add_activity_task_completed(act_s, act_st, Default::default());
|
|
1030
|
+
t.add_workflow_task_scheduled_and_started();
|
|
1031
|
+
t.add_workflow_task_timed_out();
|
|
1032
|
+
t.add_workflow_task_scheduled_and_started();
|
|
1033
|
+
t.add_workflow_task_timed_out();
|
|
1034
|
+
t.add_workflow_task_scheduled_and_started();
|
|
1035
|
+
|
|
1036
|
+
let mut update = t.as_history_update();
|
|
1037
|
+
let seq = next_check_peek(&mut update, 0);
|
|
1038
|
+
assert_eq!(seq.len(), 3);
|
|
1039
|
+
let seq = next_check_peek(&mut update, 3);
|
|
1040
|
+
assert_eq!(seq.len(), 4);
|
|
1041
|
+
let seq = next_check_peek(&mut update, 7);
|
|
1042
|
+
assert_eq!(seq.len(), 13);
|
|
1043
|
+
}
|
|
1044
|
+
|
|
1045
|
+
#[tokio::test]
|
|
1046
|
+
async fn handles_blank_fetch_response() {
|
|
1047
|
+
let timer_hist = canned_histories::single_timer("t");
|
|
1048
|
+
let partial_task = timer_hist.get_one_wft(2).unwrap();
|
|
1049
|
+
let prev_started_wft_id = partial_task.previous_started_event_id();
|
|
1050
|
+
let mut mock_client = mock_workflow_client();
|
|
1051
|
+
mock_client
|
|
1052
|
+
.expect_get_workflow_execution_history()
|
|
1053
|
+
.returning(move |_, _, _| Ok(Default::default()));
|
|
1054
|
+
|
|
1055
|
+
let mut paginator = HistoryPaginator::new(
|
|
1056
|
+
partial_task.into(),
|
|
1057
|
+
prev_started_wft_id,
|
|
1058
|
+
"wfid".to_string(),
|
|
1059
|
+
"runid".to_string(),
|
|
1060
|
+
// A cache miss means we'll try to fetch from start
|
|
1061
|
+
NextPageToken::FetchFromStart,
|
|
1062
|
+
Arc::new(mock_client),
|
|
1063
|
+
);
|
|
1064
|
+
let err = paginator.extract_next_update().await.unwrap_err();
|
|
1065
|
+
assert_matches!(err.code(), tonic::Code::DataLoss);
|
|
1066
|
+
}
|
|
1067
|
+
|
|
1068
|
+
#[tokio::test]
|
|
1069
|
+
async fn handles_empty_page_with_next_token() {
|
|
1070
|
+
let timer_hist = canned_histories::single_timer("t");
|
|
1071
|
+
let partial_task = timer_hist.get_one_wft(2).unwrap();
|
|
1072
|
+
let prev_started_wft_id = partial_task.previous_started_event_id();
|
|
1073
|
+
let full_resp: GetWorkflowExecutionHistoryResponse =
|
|
1074
|
+
timer_hist.get_full_history_info().unwrap().into();
|
|
1075
|
+
let mut mock_client = mock_workflow_client();
|
|
1076
|
+
mock_client
|
|
1077
|
+
.expect_get_workflow_execution_history()
|
|
1078
|
+
.returning(move |_, _, _| {
|
|
1079
|
+
Ok(GetWorkflowExecutionHistoryResponse {
|
|
1080
|
+
history: Some(History { events: vec![] }),
|
|
1081
|
+
raw_history: vec![],
|
|
1082
|
+
next_page_token: vec![2],
|
|
1083
|
+
archived: false,
|
|
1084
|
+
})
|
|
1085
|
+
})
|
|
1086
|
+
.times(1);
|
|
1087
|
+
mock_client
|
|
1088
|
+
.expect_get_workflow_execution_history()
|
|
1089
|
+
.returning(move |_, _, _| Ok(full_resp.clone()))
|
|
1090
|
+
.times(1);
|
|
1091
|
+
|
|
1092
|
+
let mut paginator = HistoryPaginator::new(
|
|
1093
|
+
partial_task.into(),
|
|
1094
|
+
prev_started_wft_id,
|
|
1095
|
+
"wfid".to_string(),
|
|
1096
|
+
"runid".to_string(),
|
|
1097
|
+
// A cache miss means we'll try to fetch from start
|
|
1098
|
+
NextPageToken::FetchFromStart,
|
|
1099
|
+
Arc::new(mock_client),
|
|
1100
|
+
);
|
|
1101
|
+
let mut update = paginator.extract_next_update().await.unwrap();
|
|
1102
|
+
let seq = update.take_next_wft_sequence(0).unwrap_events();
|
|
1103
|
+
assert_eq!(seq.last().unwrap().event_id, 3);
|
|
1104
|
+
let seq = update.take_next_wft_sequence(3).unwrap_events();
|
|
1105
|
+
assert_eq!(seq.last().unwrap().event_id, 8);
|
|
1106
|
+
assert_matches!(update.take_next_wft_sequence(8), NextWFT::ReplayOver);
|
|
1107
|
+
}
|
|
1108
|
+
|
|
1109
|
+
// TODO: Test we dont re-feed pointless updates if fetching returns <= events we already
|
|
1110
|
+
// processed
|
|
532
1111
|
}
|