temporalio 0.1.0 → 0.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Cargo.lock +4035 -0
- data/Cargo.toml +25 -0
- data/Gemfile +20 -0
- data/LICENSE +16 -15
- data/README.md +455 -195
- data/Rakefile +387 -0
- data/ext/Cargo.toml +25 -0
- data/lib/temporalio/activity/complete_async_error.rb +11 -0
- data/lib/temporalio/activity/context.rb +82 -77
- data/lib/temporalio/activity/definition.rb +77 -0
- data/lib/temporalio/activity/info.rb +42 -46
- data/lib/temporalio/activity.rb +49 -65
- data/lib/temporalio/api/batch/v1/message.rb +31 -0
- data/lib/temporalio/api/cloud/cloudservice/v1/request_response.rb +93 -0
- data/lib/temporalio/api/cloud/cloudservice/v1/service.rb +25 -0
- data/lib/temporalio/api/cloud/cloudservice.rb +3 -0
- data/lib/temporalio/api/cloud/identity/v1/message.rb +36 -0
- data/lib/temporalio/api/cloud/namespace/v1/message.rb +35 -0
- data/lib/temporalio/api/cloud/operation/v1/message.rb +27 -0
- data/lib/temporalio/api/cloud/region/v1/message.rb +23 -0
- data/lib/temporalio/api/command/v1/message.rb +46 -0
- data/lib/temporalio/api/common/v1/grpc_status.rb +23 -0
- data/lib/temporalio/api/common/v1/message.rb +41 -0
- data/lib/temporalio/api/enums/v1/batch_operation.rb +22 -0
- data/lib/temporalio/api/enums/v1/command_type.rb +21 -0
- data/lib/temporalio/api/enums/v1/common.rb +26 -0
- data/lib/temporalio/api/enums/v1/event_type.rb +21 -0
- data/lib/temporalio/api/enums/v1/failed_cause.rb +26 -0
- data/lib/temporalio/api/enums/v1/namespace.rb +23 -0
- data/lib/temporalio/api/enums/v1/query.rb +22 -0
- data/lib/temporalio/api/enums/v1/reset.rb +23 -0
- data/lib/temporalio/api/enums/v1/schedule.rb +21 -0
- data/lib/temporalio/api/enums/v1/task_queue.rb +25 -0
- data/lib/temporalio/api/enums/v1/update.rb +22 -0
- data/lib/temporalio/api/enums/v1/workflow.rb +30 -0
- data/lib/temporalio/api/errordetails/v1/message.rb +42 -0
- data/lib/temporalio/api/export/v1/message.rb +24 -0
- data/lib/temporalio/api/failure/v1/message.rb +35 -0
- data/lib/temporalio/api/filter/v1/message.rb +27 -0
- data/lib/temporalio/api/history/v1/message.rb +90 -0
- data/lib/temporalio/api/namespace/v1/message.rb +31 -0
- data/lib/temporalio/api/nexus/v1/message.rb +40 -0
- data/lib/temporalio/api/operatorservice/v1/request_response.rb +49 -0
- data/lib/temporalio/api/operatorservice/v1/service.rb +23 -0
- data/lib/temporalio/api/operatorservice.rb +3 -0
- data/lib/temporalio/api/protocol/v1/message.rb +23 -0
- data/lib/temporalio/api/query/v1/message.rb +27 -0
- data/lib/temporalio/api/replication/v1/message.rb +26 -0
- data/lib/temporalio/api/schedule/v1/message.rb +42 -0
- data/lib/temporalio/api/sdk/v1/enhanced_stack_trace.rb +25 -0
- data/lib/temporalio/api/sdk/v1/task_complete_metadata.rb +21 -0
- data/lib/temporalio/api/sdk/v1/user_metadata.rb +23 -0
- data/lib/temporalio/api/sdk/v1/workflow_metadata.rb +23 -0
- data/lib/temporalio/api/taskqueue/v1/message.rb +45 -0
- data/lib/temporalio/api/update/v1/message.rb +33 -0
- data/lib/temporalio/api/version/v1/message.rb +26 -0
- data/lib/temporalio/api/workflow/v1/message.rb +43 -0
- data/lib/temporalio/api/workflowservice/v1/request_response.rb +189 -0
- data/lib/temporalio/api/workflowservice/v1/service.rb +23 -0
- data/lib/temporalio/api/workflowservice.rb +3 -0
- data/lib/temporalio/api.rb +13 -0
- data/lib/temporalio/cancellation.rb +150 -0
- data/lib/temporalio/client/activity_id_reference.rb +32 -0
- data/lib/temporalio/client/async_activity_handle.rb +110 -0
- data/lib/temporalio/client/connection/cloud_service.rb +648 -0
- data/lib/temporalio/client/connection/operator_service.rb +249 -0
- data/lib/temporalio/client/connection/service.rb +41 -0
- data/lib/temporalio/client/connection/workflow_service.rb +1218 -0
- data/lib/temporalio/client/connection.rb +270 -0
- data/lib/temporalio/client/interceptor.rb +316 -0
- data/lib/temporalio/client/workflow_execution.rb +103 -0
- data/lib/temporalio/client/workflow_execution_count.rb +36 -0
- data/lib/temporalio/client/workflow_execution_status.rb +18 -0
- data/lib/temporalio/client/workflow_handle.rb +380 -177
- data/lib/temporalio/client/workflow_query_reject_condition.rb +14 -0
- data/lib/temporalio/client/workflow_update_handle.rb +67 -0
- data/lib/temporalio/client/workflow_update_wait_stage.rb +17 -0
- data/lib/temporalio/client.rb +366 -93
- data/lib/temporalio/common_enums.rb +24 -0
- data/lib/temporalio/converters/data_converter.rb +102 -0
- data/lib/temporalio/converters/failure_converter.rb +200 -0
- data/lib/temporalio/converters/payload_codec.rb +26 -0
- data/lib/temporalio/converters/payload_converter/binary_null.rb +34 -0
- data/lib/temporalio/converters/payload_converter/binary_plain.rb +35 -0
- data/lib/temporalio/converters/payload_converter/binary_protobuf.rb +42 -0
- data/lib/temporalio/converters/payload_converter/composite.rb +62 -0
- data/lib/temporalio/converters/payload_converter/encoding.rb +35 -0
- data/lib/temporalio/converters/payload_converter/json_plain.rb +44 -0
- data/lib/temporalio/converters/payload_converter/json_protobuf.rb +41 -0
- data/lib/temporalio/converters/payload_converter.rb +73 -0
- data/lib/temporalio/converters.rb +9 -0
- data/lib/temporalio/error/failure.rb +119 -94
- data/lib/temporalio/error.rb +147 -0
- data/lib/temporalio/internal/bridge/api/activity_result/activity_result.rb +34 -0
- data/lib/temporalio/internal/bridge/api/activity_task/activity_task.rb +31 -0
- data/lib/temporalio/internal/bridge/api/child_workflow/child_workflow.rb +33 -0
- data/lib/temporalio/internal/bridge/api/common/common.rb +26 -0
- data/lib/temporalio/internal/bridge/api/core_interface.rb +36 -0
- data/lib/temporalio/internal/bridge/api/external_data/external_data.rb +27 -0
- data/lib/temporalio/internal/bridge/api/workflow_activation/workflow_activation.rb +52 -0
- data/lib/temporalio/internal/bridge/api/workflow_commands/workflow_commands.rb +54 -0
- data/lib/temporalio/internal/bridge/api/workflow_completion/workflow_completion.rb +30 -0
- data/lib/temporalio/internal/bridge/api.rb +3 -0
- data/lib/temporalio/internal/bridge/client.rb +90 -0
- data/lib/temporalio/internal/bridge/runtime.rb +53 -0
- data/lib/temporalio/internal/bridge/testing.rb +46 -0
- data/lib/temporalio/internal/bridge/worker.rb +83 -0
- data/lib/temporalio/internal/bridge.rb +36 -0
- data/lib/temporalio/internal/client/implementation.rb +525 -0
- data/lib/temporalio/internal/proto_utils.rb +54 -0
- data/lib/temporalio/internal/worker/activity_worker.rb +345 -0
- data/lib/temporalio/internal/worker/multi_runner.rb +169 -0
- data/lib/temporalio/internal.rb +7 -0
- data/lib/temporalio/retry_policy.rb +39 -80
- data/lib/temporalio/runtime.rb +259 -13
- data/lib/temporalio/scoped_logger.rb +96 -0
- data/lib/temporalio/search_attributes.rb +300 -0
- data/lib/temporalio/testing/activity_environment.rb +132 -0
- data/lib/temporalio/testing/workflow_environment.rb +113 -88
- data/lib/temporalio/testing.rb +4 -169
- data/lib/temporalio/version.rb +3 -1
- data/lib/temporalio/worker/activity_executor/fiber.rb +49 -0
- data/lib/temporalio/worker/activity_executor/thread_pool.rb +254 -0
- data/lib/temporalio/worker/activity_executor.rb +55 -0
- data/lib/temporalio/worker/interceptor.rb +88 -0
- data/lib/temporalio/worker/tuner.rb +151 -0
- data/lib/temporalio/worker.rb +385 -163
- data/lib/temporalio/workflow_history.rb +22 -0
- data/lib/temporalio.rb +2 -7
- data/temporalio.gemspec +20 -38
- metadata +131 -596
- data/bridge/Cargo.lock +0 -2997
- data/bridge/Cargo.toml +0 -29
- data/bridge/sdk-core/ARCHITECTURE.md +0 -76
- data/bridge/sdk-core/Cargo.toml +0 -2
- data/bridge/sdk-core/LICENSE.txt +0 -23
- data/bridge/sdk-core/README.md +0 -117
- data/bridge/sdk-core/arch_docs/diagrams/README.md +0 -10
- data/bridge/sdk-core/arch_docs/diagrams/sticky_queues.puml +0 -40
- data/bridge/sdk-core/arch_docs/diagrams/workflow_internals.svg +0 -1
- data/bridge/sdk-core/arch_docs/sticky_queues.md +0 -51
- data/bridge/sdk-core/client/Cargo.toml +0 -40
- data/bridge/sdk-core/client/LICENSE.txt +0 -23
- data/bridge/sdk-core/client/src/lib.rs +0 -1462
- data/bridge/sdk-core/client/src/metrics.rs +0 -174
- data/bridge/sdk-core/client/src/raw.rs +0 -932
- data/bridge/sdk-core/client/src/retry.rs +0 -763
- data/bridge/sdk-core/client/src/workflow_handle/mod.rs +0 -185
- data/bridge/sdk-core/core/Cargo.toml +0 -129
- data/bridge/sdk-core/core/LICENSE.txt +0 -23
- data/bridge/sdk-core/core/benches/workflow_replay.rs +0 -76
- data/bridge/sdk-core/core/src/abstractions.rs +0 -355
- data/bridge/sdk-core/core/src/core_tests/activity_tasks.rs +0 -1049
- data/bridge/sdk-core/core/src/core_tests/child_workflows.rs +0 -221
- data/bridge/sdk-core/core/src/core_tests/determinism.rs +0 -270
- data/bridge/sdk-core/core/src/core_tests/local_activities.rs +0 -1046
- data/bridge/sdk-core/core/src/core_tests/mod.rs +0 -100
- data/bridge/sdk-core/core/src/core_tests/queries.rs +0 -893
- data/bridge/sdk-core/core/src/core_tests/replay_flag.rs +0 -65
- data/bridge/sdk-core/core/src/core_tests/workers.rs +0 -257
- data/bridge/sdk-core/core/src/core_tests/workflow_cancels.rs +0 -124
- data/bridge/sdk-core/core/src/core_tests/workflow_tasks.rs +0 -2433
- data/bridge/sdk-core/core/src/ephemeral_server/mod.rs +0 -609
- data/bridge/sdk-core/core/src/internal_flags.rs +0 -136
- data/bridge/sdk-core/core/src/lib.rs +0 -289
- data/bridge/sdk-core/core/src/pollers/mod.rs +0 -54
- data/bridge/sdk-core/core/src/pollers/poll_buffer.rs +0 -297
- data/bridge/sdk-core/core/src/protosext/mod.rs +0 -428
- data/bridge/sdk-core/core/src/replay/mod.rs +0 -215
- data/bridge/sdk-core/core/src/retry_logic.rs +0 -202
- data/bridge/sdk-core/core/src/telemetry/log_export.rs +0 -190
- data/bridge/sdk-core/core/src/telemetry/metrics.rs +0 -462
- data/bridge/sdk-core/core/src/telemetry/mod.rs +0 -423
- data/bridge/sdk-core/core/src/telemetry/prometheus_server.rs +0 -83
- data/bridge/sdk-core/core/src/test_help/mod.rs +0 -939
- data/bridge/sdk-core/core/src/worker/activities/activity_heartbeat_manager.rs +0 -536
- data/bridge/sdk-core/core/src/worker/activities/activity_task_poller_stream.rs +0 -89
- data/bridge/sdk-core/core/src/worker/activities/local_activities.rs +0 -1278
- data/bridge/sdk-core/core/src/worker/activities.rs +0 -557
- data/bridge/sdk-core/core/src/worker/client/mocks.rs +0 -107
- data/bridge/sdk-core/core/src/worker/client.rs +0 -389
- data/bridge/sdk-core/core/src/worker/mod.rs +0 -677
- data/bridge/sdk-core/core/src/worker/workflow/bridge.rs +0 -35
- data/bridge/sdk-core/core/src/worker/workflow/driven_workflow.rs +0 -99
- data/bridge/sdk-core/core/src/worker/workflow/history_update.rs +0 -1111
- data/bridge/sdk-core/core/src/worker/workflow/machines/activity_state_machine.rs +0 -964
- data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_external_state_machine.rs +0 -294
- data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_workflow_state_machine.rs +0 -168
- data/bridge/sdk-core/core/src/worker/workflow/machines/child_workflow_state_machine.rs +0 -918
- data/bridge/sdk-core/core/src/worker/workflow/machines/complete_workflow_state_machine.rs +0 -137
- data/bridge/sdk-core/core/src/worker/workflow/machines/continue_as_new_workflow_state_machine.rs +0 -158
- data/bridge/sdk-core/core/src/worker/workflow/machines/fail_workflow_state_machine.rs +0 -130
- data/bridge/sdk-core/core/src/worker/workflow/machines/local_activity_state_machine.rs +0 -1525
- data/bridge/sdk-core/core/src/worker/workflow/machines/mod.rs +0 -324
- data/bridge/sdk-core/core/src/worker/workflow/machines/modify_workflow_properties_state_machine.rs +0 -179
- data/bridge/sdk-core/core/src/worker/workflow/machines/patch_state_machine.rs +0 -659
- data/bridge/sdk-core/core/src/worker/workflow/machines/signal_external_state_machine.rs +0 -439
- data/bridge/sdk-core/core/src/worker/workflow/machines/timer_state_machine.rs +0 -435
- data/bridge/sdk-core/core/src/worker/workflow/machines/transition_coverage.rs +0 -175
- data/bridge/sdk-core/core/src/worker/workflow/machines/upsert_search_attributes_state_machine.rs +0 -249
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines/local_acts.rs +0 -85
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines.rs +0 -1280
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_task_state_machine.rs +0 -269
- data/bridge/sdk-core/core/src/worker/workflow/managed_run/managed_wf_test.rs +0 -213
- data/bridge/sdk-core/core/src/worker/workflow/managed_run.rs +0 -1305
- data/bridge/sdk-core/core/src/worker/workflow/mod.rs +0 -1276
- data/bridge/sdk-core/core/src/worker/workflow/run_cache.rs +0 -128
- data/bridge/sdk-core/core/src/worker/workflow/wft_extraction.rs +0 -125
- data/bridge/sdk-core/core/src/worker/workflow/wft_poller.rs +0 -85
- data/bridge/sdk-core/core/src/worker/workflow/workflow_stream/saved_wf_inputs.rs +0 -117
- data/bridge/sdk-core/core/src/worker/workflow/workflow_stream/tonic_status_serde.rs +0 -24
- data/bridge/sdk-core/core/src/worker/workflow/workflow_stream.rs +0 -715
- data/bridge/sdk-core/core-api/Cargo.toml +0 -33
- data/bridge/sdk-core/core-api/LICENSE.txt +0 -23
- data/bridge/sdk-core/core-api/src/errors.rs +0 -62
- data/bridge/sdk-core/core-api/src/lib.rs +0 -113
- data/bridge/sdk-core/core-api/src/telemetry.rs +0 -141
- data/bridge/sdk-core/core-api/src/worker.rs +0 -161
- data/bridge/sdk-core/etc/deps.svg +0 -162
- data/bridge/sdk-core/etc/dynamic-config.yaml +0 -2
- data/bridge/sdk-core/etc/otel-collector-config.yaml +0 -36
- data/bridge/sdk-core/etc/prometheus.yaml +0 -6
- data/bridge/sdk-core/etc/regen-depgraph.sh +0 -5
- data/bridge/sdk-core/fsm/Cargo.toml +0 -18
- data/bridge/sdk-core/fsm/LICENSE.txt +0 -23
- data/bridge/sdk-core/fsm/README.md +0 -3
- data/bridge/sdk-core/fsm/rustfsm_procmacro/Cargo.toml +0 -27
- data/bridge/sdk-core/fsm/rustfsm_procmacro/LICENSE.txt +0 -23
- data/bridge/sdk-core/fsm/rustfsm_procmacro/src/lib.rs +0 -650
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/progress.rs +0 -8
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/dupe_transitions_fail.rs +0 -18
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/dupe_transitions_fail.stderr +0 -12
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/dynamic_dest_pass.rs +0 -41
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/forgot_name_fail.rs +0 -14
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/forgot_name_fail.stderr +0 -11
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/handler_arg_pass.rs +0 -32
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/handler_pass.rs +0 -31
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/medium_complex_pass.rs +0 -46
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/no_handle_conversions_require_into_fail.rs +0 -29
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/no_handle_conversions_require_into_fail.stderr +0 -12
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/simple_pass.rs +0 -32
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/struct_event_variant_fail.rs +0 -18
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/struct_event_variant_fail.stderr +0 -5
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_more_item_event_variant_fail.rs +0 -11
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_more_item_event_variant_fail.stderr +0 -5
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_zero_item_event_variant_fail.rs +0 -11
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_zero_item_event_variant_fail.stderr +0 -5
- data/bridge/sdk-core/fsm/rustfsm_trait/Cargo.toml +0 -14
- data/bridge/sdk-core/fsm/rustfsm_trait/LICENSE.txt +0 -23
- data/bridge/sdk-core/fsm/rustfsm_trait/src/lib.rs +0 -254
- data/bridge/sdk-core/fsm/src/lib.rs +0 -2
- data/bridge/sdk-core/histories/evict_while_la_running_no_interference-16_history.bin +0 -0
- data/bridge/sdk-core/histories/evict_while_la_running_no_interference-23_history.bin +0 -0
- data/bridge/sdk-core/histories/evict_while_la_running_no_interference-85_history.bin +0 -0
- data/bridge/sdk-core/histories/fail_wf_task.bin +0 -0
- data/bridge/sdk-core/histories/timer_workflow_history.bin +0 -0
- data/bridge/sdk-core/integ-with-otel.sh +0 -7
- data/bridge/sdk-core/protos/api_upstream/README.md +0 -9
- data/bridge/sdk-core/protos/api_upstream/api-linter.yaml +0 -40
- data/bridge/sdk-core/protos/api_upstream/buf.yaml +0 -9
- data/bridge/sdk-core/protos/api_upstream/build/go.mod +0 -7
- data/bridge/sdk-core/protos/api_upstream/build/go.sum +0 -5
- data/bridge/sdk-core/protos/api_upstream/build/tools.go +0 -29
- data/bridge/sdk-core/protos/api_upstream/dependencies/gogoproto/gogo.proto +0 -141
- data/bridge/sdk-core/protos/api_upstream/go.mod +0 -6
- data/bridge/sdk-core/protos/api_upstream/temporal/api/batch/v1/message.proto +0 -89
- data/bridge/sdk-core/protos/api_upstream/temporal/api/command/v1/message.proto +0 -248
- data/bridge/sdk-core/protos/api_upstream/temporal/api/common/v1/message.proto +0 -123
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/batch_operation.proto +0 -47
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/command_type.proto +0 -52
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/common.proto +0 -56
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/event_type.proto +0 -170
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto +0 -123
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/namespace.proto +0 -51
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/query.proto +0 -50
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/reset.proto +0 -41
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/schedule.proto +0 -60
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/task_queue.proto +0 -59
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/update.proto +0 -56
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/workflow.proto +0 -122
- data/bridge/sdk-core/protos/api_upstream/temporal/api/errordetails/v1/message.proto +0 -108
- data/bridge/sdk-core/protos/api_upstream/temporal/api/failure/v1/message.proto +0 -114
- data/bridge/sdk-core/protos/api_upstream/temporal/api/filter/v1/message.proto +0 -56
- data/bridge/sdk-core/protos/api_upstream/temporal/api/history/v1/message.proto +0 -787
- data/bridge/sdk-core/protos/api_upstream/temporal/api/namespace/v1/message.proto +0 -99
- data/bridge/sdk-core/protos/api_upstream/temporal/api/operatorservice/v1/request_response.proto +0 -124
- data/bridge/sdk-core/protos/api_upstream/temporal/api/operatorservice/v1/service.proto +0 -80
- data/bridge/sdk-core/protos/api_upstream/temporal/api/protocol/v1/message.proto +0 -57
- data/bridge/sdk-core/protos/api_upstream/temporal/api/query/v1/message.proto +0 -61
- data/bridge/sdk-core/protos/api_upstream/temporal/api/replication/v1/message.proto +0 -55
- data/bridge/sdk-core/protos/api_upstream/temporal/api/schedule/v1/message.proto +0 -379
- data/bridge/sdk-core/protos/api_upstream/temporal/api/sdk/v1/task_complete_metadata.proto +0 -63
- data/bridge/sdk-core/protos/api_upstream/temporal/api/taskqueue/v1/message.proto +0 -108
- data/bridge/sdk-core/protos/api_upstream/temporal/api/update/v1/message.proto +0 -111
- data/bridge/sdk-core/protos/api_upstream/temporal/api/version/v1/message.proto +0 -59
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflow/v1/message.proto +0 -146
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto +0 -1199
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/service.proto +0 -415
- data/bridge/sdk-core/protos/grpc/health/v1/health.proto +0 -63
- data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_result/activity_result.proto +0 -79
- data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_task/activity_task.proto +0 -80
- data/bridge/sdk-core/protos/local/temporal/sdk/core/child_workflow/child_workflow.proto +0 -78
- data/bridge/sdk-core/protos/local/temporal/sdk/core/common/common.proto +0 -16
- data/bridge/sdk-core/protos/local/temporal/sdk/core/core_interface.proto +0 -31
- data/bridge/sdk-core/protos/local/temporal/sdk/core/external_data/external_data.proto +0 -31
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_activation/workflow_activation.proto +0 -270
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_commands/workflow_commands.proto +0 -305
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_completion/workflow_completion.proto +0 -35
- data/bridge/sdk-core/protos/testsrv_upstream/api-linter.yaml +0 -38
- data/bridge/sdk-core/protos/testsrv_upstream/buf.yaml +0 -13
- data/bridge/sdk-core/protos/testsrv_upstream/dependencies/gogoproto/gogo.proto +0 -141
- data/bridge/sdk-core/protos/testsrv_upstream/temporal/api/testservice/v1/request_response.proto +0 -63
- data/bridge/sdk-core/protos/testsrv_upstream/temporal/api/testservice/v1/service.proto +0 -90
- data/bridge/sdk-core/rustfmt.toml +0 -1
- data/bridge/sdk-core/sdk/Cargo.toml +0 -48
- data/bridge/sdk-core/sdk/LICENSE.txt +0 -23
- data/bridge/sdk-core/sdk/src/activity_context.rs +0 -230
- data/bridge/sdk-core/sdk/src/app_data.rs +0 -37
- data/bridge/sdk-core/sdk/src/interceptors.rs +0 -50
- data/bridge/sdk-core/sdk/src/lib.rs +0 -861
- data/bridge/sdk-core/sdk/src/payload_converter.rs +0 -11
- data/bridge/sdk-core/sdk/src/workflow_context/options.rs +0 -295
- data/bridge/sdk-core/sdk/src/workflow_context.rs +0 -694
- data/bridge/sdk-core/sdk/src/workflow_future.rs +0 -500
- data/bridge/sdk-core/sdk-core-protos/Cargo.toml +0 -33
- data/bridge/sdk-core/sdk-core-protos/LICENSE.txt +0 -23
- data/bridge/sdk-core/sdk-core-protos/build.rs +0 -142
- data/bridge/sdk-core/sdk-core-protos/src/constants.rs +0 -7
- data/bridge/sdk-core/sdk-core-protos/src/history_builder.rs +0 -557
- data/bridge/sdk-core/sdk-core-protos/src/history_info.rs +0 -234
- data/bridge/sdk-core/sdk-core-protos/src/lib.rs +0 -2088
- data/bridge/sdk-core/sdk-core-protos/src/task_token.rs +0 -48
- data/bridge/sdk-core/sdk-core-protos/src/utilities.rs +0 -14
- data/bridge/sdk-core/test-utils/Cargo.toml +0 -38
- data/bridge/sdk-core/test-utils/src/canned_histories.rs +0 -1389
- data/bridge/sdk-core/test-utils/src/histfetch.rs +0 -28
- data/bridge/sdk-core/test-utils/src/lib.rs +0 -709
- data/bridge/sdk-core/test-utils/src/wf_input_saver.rs +0 -50
- data/bridge/sdk-core/test-utils/src/workflows.rs +0 -29
- data/bridge/sdk-core/tests/fuzzy_workflow.rs +0 -130
- data/bridge/sdk-core/tests/heavy_tests.rs +0 -265
- data/bridge/sdk-core/tests/integ_tests/client_tests.rs +0 -36
- data/bridge/sdk-core/tests/integ_tests/ephemeral_server_tests.rs +0 -150
- data/bridge/sdk-core/tests/integ_tests/heartbeat_tests.rs +0 -223
- data/bridge/sdk-core/tests/integ_tests/metrics_tests.rs +0 -239
- data/bridge/sdk-core/tests/integ_tests/polling_tests.rs +0 -90
- data/bridge/sdk-core/tests/integ_tests/queries_tests.rs +0 -314
- data/bridge/sdk-core/tests/integ_tests/visibility_tests.rs +0 -151
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/activities.rs +0 -902
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/appdata_propagation.rs +0 -61
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs +0 -60
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs +0 -51
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs +0 -51
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs +0 -64
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/determinism.rs +0 -47
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs +0 -669
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs +0 -54
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/patches.rs +0 -92
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/replay.rs +0 -228
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/resets.rs +0 -94
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/signals.rs +0 -171
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs +0 -85
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/timers.rs +0 -120
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs +0 -77
- data/bridge/sdk-core/tests/integ_tests/workflow_tests.rs +0 -596
- data/bridge/sdk-core/tests/main.rs +0 -103
- data/bridge/sdk-core/tests/runner.rs +0 -132
- data/bridge/sdk-core/tests/wf_input_replay.rs +0 -32
- data/bridge/src/connection.rs +0 -202
- data/bridge/src/lib.rs +0 -494
- data/bridge/src/runtime.rs +0 -54
- data/bridge/src/test_server.rs +0 -153
- data/bridge/src/worker.rs +0 -197
- data/ext/Rakefile +0 -9
- data/lib/gen/dependencies/gogoproto/gogo_pb.rb +0 -14
- data/lib/gen/temporal/api/batch/v1/message_pb.rb +0 -50
- data/lib/gen/temporal/api/command/v1/message_pb.rb +0 -160
- data/lib/gen/temporal/api/common/v1/message_pb.rb +0 -73
- data/lib/gen/temporal/api/enums/v1/batch_operation_pb.rb +0 -33
- data/lib/gen/temporal/api/enums/v1/command_type_pb.rb +0 -37
- data/lib/gen/temporal/api/enums/v1/common_pb.rb +0 -42
- data/lib/gen/temporal/api/enums/v1/event_type_pb.rb +0 -68
- data/lib/gen/temporal/api/enums/v1/failed_cause_pb.rb +0 -79
- data/lib/gen/temporal/api/enums/v1/namespace_pb.rb +0 -37
- data/lib/gen/temporal/api/enums/v1/query_pb.rb +0 -31
- data/lib/gen/temporal/api/enums/v1/reset_pb.rb +0 -24
- data/lib/gen/temporal/api/enums/v1/schedule_pb.rb +0 -28
- data/lib/gen/temporal/api/enums/v1/task_queue_pb.rb +0 -30
- data/lib/gen/temporal/api/enums/v1/update_pb.rb +0 -25
- data/lib/gen/temporal/api/enums/v1/workflow_pb.rb +0 -89
- data/lib/gen/temporal/api/errordetails/v1/message_pb.rb +0 -84
- data/lib/gen/temporal/api/failure/v1/message_pb.rb +0 -83
- data/lib/gen/temporal/api/filter/v1/message_pb.rb +0 -40
- data/lib/gen/temporal/api/history/v1/message_pb.rb +0 -498
- data/lib/gen/temporal/api/namespace/v1/message_pb.rb +0 -64
- data/lib/gen/temporal/api/operatorservice/v1/request_response_pb.rb +0 -88
- data/lib/gen/temporal/api/operatorservice/v1/service_pb.rb +0 -20
- data/lib/gen/temporal/api/protocol/v1/message_pb.rb +0 -30
- data/lib/gen/temporal/api/query/v1/message_pb.rb +0 -38
- data/lib/gen/temporal/api/replication/v1/message_pb.rb +0 -37
- data/lib/gen/temporal/api/schedule/v1/message_pb.rb +0 -149
- data/lib/gen/temporal/api/sdk/v1/task_complete_metadata_pb.rb +0 -23
- data/lib/gen/temporal/api/taskqueue/v1/message_pb.rb +0 -73
- data/lib/gen/temporal/api/testservice/v1/request_response_pb.rb +0 -49
- data/lib/gen/temporal/api/testservice/v1/service_pb.rb +0 -21
- data/lib/gen/temporal/api/update/v1/message_pb.rb +0 -72
- data/lib/gen/temporal/api/version/v1/message_pb.rb +0 -41
- data/lib/gen/temporal/api/workflow/v1/message_pb.rb +0 -111
- data/lib/gen/temporal/api/workflowservice/v1/request_response_pb.rb +0 -798
- data/lib/gen/temporal/api/workflowservice/v1/service_pb.rb +0 -20
- data/lib/gen/temporal/sdk/core/activity_result/activity_result_pb.rb +0 -62
- data/lib/gen/temporal/sdk/core/activity_task/activity_task_pb.rb +0 -61
- data/lib/gen/temporal/sdk/core/child_workflow/child_workflow_pb.rb +0 -61
- data/lib/gen/temporal/sdk/core/common/common_pb.rb +0 -26
- data/lib/gen/temporal/sdk/core/core_interface_pb.rb +0 -40
- data/lib/gen/temporal/sdk/core/external_data/external_data_pb.rb +0 -31
- data/lib/gen/temporal/sdk/core/workflow_activation/workflow_activation_pb.rb +0 -171
- data/lib/gen/temporal/sdk/core/workflow_commands/workflow_commands_pb.rb +0 -200
- data/lib/gen/temporal/sdk/core/workflow_completion/workflow_completion_pb.rb +0 -41
- data/lib/temporalio/bridge/connect_options.rb +0 -15
- data/lib/temporalio/bridge/error.rb +0 -8
- data/lib/temporalio/bridge/retry_config.rb +0 -24
- data/lib/temporalio/bridge/tls_options.rb +0 -19
- data/lib/temporalio/bridge.rb +0 -14
- data/lib/temporalio/client/implementation.rb +0 -340
- data/lib/temporalio/connection/retry_config.rb +0 -44
- data/lib/temporalio/connection/service.rb +0 -20
- data/lib/temporalio/connection/test_service.rb +0 -92
- data/lib/temporalio/connection/tls_options.rb +0 -51
- data/lib/temporalio/connection/workflow_service.rb +0 -731
- data/lib/temporalio/connection.rb +0 -86
- data/lib/temporalio/data_converter.rb +0 -191
- data/lib/temporalio/error/workflow_failure.rb +0 -19
- data/lib/temporalio/errors.rb +0 -40
- data/lib/temporalio/failure_converter/base.rb +0 -26
- data/lib/temporalio/failure_converter/basic.rb +0 -319
- data/lib/temporalio/failure_converter.rb +0 -7
- data/lib/temporalio/interceptor/activity_inbound.rb +0 -22
- data/lib/temporalio/interceptor/activity_outbound.rb +0 -24
- data/lib/temporalio/interceptor/chain.rb +0 -28
- data/lib/temporalio/interceptor/client.rb +0 -127
- data/lib/temporalio/interceptor.rb +0 -22
- data/lib/temporalio/payload_codec/base.rb +0 -32
- data/lib/temporalio/payload_converter/base.rb +0 -24
- data/lib/temporalio/payload_converter/bytes.rb +0 -27
- data/lib/temporalio/payload_converter/composite.rb +0 -49
- data/lib/temporalio/payload_converter/encoding_base.rb +0 -35
- data/lib/temporalio/payload_converter/json.rb +0 -26
- data/lib/temporalio/payload_converter/nil.rb +0 -26
- data/lib/temporalio/payload_converter.rb +0 -14
- data/lib/temporalio/retry_state.rb +0 -35
- data/lib/temporalio/testing/time_skipping_handle.rb +0 -32
- data/lib/temporalio/testing/time_skipping_interceptor.rb +0 -23
- data/lib/temporalio/timeout_type.rb +0 -29
- data/lib/temporalio/worker/activity_runner.rb +0 -114
- data/lib/temporalio/worker/activity_worker.rb +0 -164
- data/lib/temporalio/worker/reactor.rb +0 -46
- data/lib/temporalio/worker/runner.rb +0 -63
- data/lib/temporalio/worker/sync_worker.rb +0 -124
- data/lib/temporalio/worker/thread_pool_executor.rb +0 -51
- data/lib/temporalio/workflow/async.rb +0 -46
- data/lib/temporalio/workflow/execution_info.rb +0 -54
- data/lib/temporalio/workflow/execution_status.rb +0 -36
- data/lib/temporalio/workflow/future.rb +0 -138
- data/lib/temporalio/workflow/id_reuse_policy.rb +0 -36
- data/lib/temporalio/workflow/info.rb +0 -76
- data/lib/temporalio/workflow/query_reject_condition.rb +0 -33
- data/lib/thermite_patch.rb +0 -23
@@ -1,1111 +0,0 @@
|
|
1
|
-
use crate::{
|
2
|
-
protosext::ValidPollWFTQResponse,
|
3
|
-
worker::{
|
4
|
-
client::WorkerClient,
|
5
|
-
workflow::{CacheMissFetchReq, PermittedWFT, PreparedWFT},
|
6
|
-
},
|
7
|
-
};
|
8
|
-
use futures::{future::BoxFuture, FutureExt, Stream};
|
9
|
-
use itertools::Itertools;
|
10
|
-
use std::{
|
11
|
-
collections::VecDeque,
|
12
|
-
fmt::Debug,
|
13
|
-
future::Future,
|
14
|
-
mem,
|
15
|
-
mem::transmute,
|
16
|
-
pin::Pin,
|
17
|
-
sync::Arc,
|
18
|
-
task::{Context, Poll},
|
19
|
-
};
|
20
|
-
use temporal_sdk_core_protos::temporal::api::{
|
21
|
-
enums::v1::EventType,
|
22
|
-
history::v1::{history_event, History, HistoryEvent, WorkflowTaskCompletedEventAttributes},
|
23
|
-
};
|
24
|
-
use tracing::Instrument;
|
25
|
-
|
26
|
-
lazy_static::lazy_static! {
|
27
|
-
static ref EMPTY_FETCH_ERR: tonic::Status
|
28
|
-
= tonic::Status::data_loss("Fetched empty history page");
|
29
|
-
static ref EMPTY_TASK_ERR: tonic::Status
|
30
|
-
= tonic::Status::data_loss("Received an empty workflow task with no queries or history");
|
31
|
-
}
|
32
|
-
|
33
|
-
/// Represents one or more complete WFT sequences. History events are expected to be consumed from
|
34
|
-
/// it and applied to the state machines via [HistoryUpdate::take_next_wft_sequence]
|
35
|
-
#[cfg_attr(
|
36
|
-
feature = "save_wf_inputs",
|
37
|
-
derive(serde::Serialize, serde::Deserialize)
|
38
|
-
)]
|
39
|
-
pub struct HistoryUpdate {
|
40
|
-
events: Vec<HistoryEvent>,
|
41
|
-
/// The event ID of the last started WFT, as according to the WFT which this update was
|
42
|
-
/// extracted from. Hence, while processing multiple logical WFTs during replay which were part
|
43
|
-
/// of one large history fetched from server, multiple updates may have the same value here.
|
44
|
-
pub previous_wft_started_id: i64,
|
45
|
-
/// True if this update contains the final WFT in history, and no more attempts to extract
|
46
|
-
/// additional updates should be made.
|
47
|
-
has_last_wft: bool,
|
48
|
-
}
|
49
|
-
impl Debug for HistoryUpdate {
|
50
|
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
51
|
-
if self.is_real() {
|
52
|
-
write!(
|
53
|
-
f,
|
54
|
-
"HistoryUpdate(previous_started_event_id: {}, length: {}, first_event_id: {:?})",
|
55
|
-
self.previous_wft_started_id,
|
56
|
-
self.events.len(),
|
57
|
-
self.events.first().map(|e| e.event_id)
|
58
|
-
)
|
59
|
-
} else {
|
60
|
-
write!(f, "DummyHistoryUpdate")
|
61
|
-
}
|
62
|
-
}
|
63
|
-
}
|
64
|
-
|
65
|
-
#[derive(Debug)]
|
66
|
-
pub enum NextWFT {
|
67
|
-
ReplayOver,
|
68
|
-
WFT(Vec<HistoryEvent>, bool),
|
69
|
-
NeedFetch,
|
70
|
-
}
|
71
|
-
|
72
|
-
#[derive(derive_more::DebugCustom)]
|
73
|
-
#[debug(fmt = "HistoryPaginator(run_id: {run_id})")]
|
74
|
-
#[cfg_attr(
|
75
|
-
feature = "save_wf_inputs",
|
76
|
-
derive(serde::Serialize, serde::Deserialize),
|
77
|
-
serde(default = "HistoryPaginator::fake_deserialized")
|
78
|
-
)]
|
79
|
-
pub struct HistoryPaginator {
|
80
|
-
pub(crate) wf_id: String,
|
81
|
-
pub(crate) run_id: String,
|
82
|
-
pub(crate) previous_wft_started_id: i64,
|
83
|
-
|
84
|
-
#[cfg_attr(feature = "save_wf_inputs", serde(skip))]
|
85
|
-
client: Arc<dyn WorkerClient>,
|
86
|
-
#[cfg_attr(feature = "save_wf_inputs", serde(skip))]
|
87
|
-
event_queue: VecDeque<HistoryEvent>,
|
88
|
-
#[cfg_attr(feature = "save_wf_inputs", serde(skip))]
|
89
|
-
next_page_token: NextPageToken,
|
90
|
-
/// These are events that should be returned once pagination has finished. This only happens
|
91
|
-
/// during cache misses, where we got a partial task but need to fetch history from the start.
|
92
|
-
#[cfg_attr(feature = "save_wf_inputs", serde(skip))]
|
93
|
-
final_events: Vec<HistoryEvent>,
|
94
|
-
}
|
95
|
-
|
96
|
-
#[derive(Clone, Debug)]
|
97
|
-
pub enum NextPageToken {
|
98
|
-
/// There is no page token, we need to fetch history from the beginning
|
99
|
-
FetchFromStart,
|
100
|
-
/// There is a page token
|
101
|
-
Next(Vec<u8>),
|
102
|
-
/// There is no page token, we are done fetching history
|
103
|
-
Done,
|
104
|
-
}
|
105
|
-
|
106
|
-
// If we're converting from a page token from the server, if it's empty, then we're done.
|
107
|
-
impl From<Vec<u8>> for NextPageToken {
|
108
|
-
fn from(page_token: Vec<u8>) -> Self {
|
109
|
-
if page_token.is_empty() {
|
110
|
-
NextPageToken::Done
|
111
|
-
} else {
|
112
|
-
NextPageToken::Next(page_token)
|
113
|
-
}
|
114
|
-
}
|
115
|
-
}
|
116
|
-
|
117
|
-
impl HistoryPaginator {
|
118
|
-
/// Use a new poll response to create a new [WFTPaginator], returning it and the
|
119
|
-
/// [PreparedWFT] extracted from it that can be fed into workflow state.
|
120
|
-
pub(super) async fn from_poll(
|
121
|
-
wft: ValidPollWFTQResponse,
|
122
|
-
client: Arc<dyn WorkerClient>,
|
123
|
-
) -> Result<(Self, PreparedWFT), tonic::Status> {
|
124
|
-
let empty_hist = wft.history.events.is_empty();
|
125
|
-
let npt = if empty_hist {
|
126
|
-
NextPageToken::FetchFromStart
|
127
|
-
} else {
|
128
|
-
wft.next_page_token.into()
|
129
|
-
};
|
130
|
-
let mut paginator = HistoryPaginator::new(
|
131
|
-
wft.history,
|
132
|
-
wft.previous_started_event_id,
|
133
|
-
wft.workflow_execution.workflow_id.clone(),
|
134
|
-
wft.workflow_execution.run_id.clone(),
|
135
|
-
npt,
|
136
|
-
client,
|
137
|
-
);
|
138
|
-
if empty_hist && wft.legacy_query.is_none() && wft.query_requests.is_empty() {
|
139
|
-
return Err(EMPTY_TASK_ERR.clone());
|
140
|
-
}
|
141
|
-
let update = if empty_hist {
|
142
|
-
HistoryUpdate::from_events([], wft.previous_started_event_id, true).0
|
143
|
-
} else {
|
144
|
-
paginator.extract_next_update().await?
|
145
|
-
};
|
146
|
-
let prepared = PreparedWFT {
|
147
|
-
task_token: wft.task_token,
|
148
|
-
attempt: wft.attempt,
|
149
|
-
execution: wft.workflow_execution,
|
150
|
-
workflow_type: wft.workflow_type,
|
151
|
-
legacy_query: wft.legacy_query,
|
152
|
-
query_requests: wft.query_requests,
|
153
|
-
update,
|
154
|
-
};
|
155
|
-
Ok((paginator, prepared))
|
156
|
-
}
|
157
|
-
|
158
|
-
pub(super) async fn from_fetchreq(
|
159
|
-
mut req: CacheMissFetchReq,
|
160
|
-
client: Arc<dyn WorkerClient>,
|
161
|
-
) -> Result<PermittedWFT, tonic::Status> {
|
162
|
-
let mut paginator = Self {
|
163
|
-
wf_id: req.original_wft.work.execution.workflow_id.clone(),
|
164
|
-
run_id: req.original_wft.work.execution.run_id.clone(),
|
165
|
-
previous_wft_started_id: req.original_wft.work.update.previous_wft_started_id,
|
166
|
-
client,
|
167
|
-
event_queue: Default::default(),
|
168
|
-
next_page_token: NextPageToken::FetchFromStart,
|
169
|
-
final_events: vec![],
|
170
|
-
};
|
171
|
-
let first_update = paginator.extract_next_update().await?;
|
172
|
-
req.original_wft.work.update = first_update;
|
173
|
-
req.original_wft.paginator = paginator;
|
174
|
-
Ok(req.original_wft)
|
175
|
-
}
|
176
|
-
|
177
|
-
fn new(
|
178
|
-
initial_history: History,
|
179
|
-
previous_wft_started_id: i64,
|
180
|
-
wf_id: String,
|
181
|
-
run_id: String,
|
182
|
-
next_page_token: impl Into<NextPageToken>,
|
183
|
-
client: Arc<dyn WorkerClient>,
|
184
|
-
) -> Self {
|
185
|
-
let next_page_token = next_page_token.into();
|
186
|
-
let (event_queue, final_events) =
|
187
|
-
if matches!(next_page_token, NextPageToken::FetchFromStart) {
|
188
|
-
(VecDeque::new(), initial_history.events)
|
189
|
-
} else {
|
190
|
-
(initial_history.events.into(), vec![])
|
191
|
-
};
|
192
|
-
Self {
|
193
|
-
client,
|
194
|
-
event_queue,
|
195
|
-
wf_id,
|
196
|
-
run_id,
|
197
|
-
next_page_token,
|
198
|
-
final_events,
|
199
|
-
previous_wft_started_id,
|
200
|
-
}
|
201
|
-
}
|
202
|
-
|
203
|
-
#[cfg(feature = "save_wf_inputs")]
|
204
|
-
pub(super) fn fake_deserialized() -> HistoryPaginator {
|
205
|
-
use crate::worker::client::mocks::mock_manual_workflow_client;
|
206
|
-
HistoryPaginator {
|
207
|
-
client: Arc::new(mock_manual_workflow_client()),
|
208
|
-
event_queue: Default::default(),
|
209
|
-
wf_id: "".to_string(),
|
210
|
-
run_id: "".to_string(),
|
211
|
-
next_page_token: NextPageToken::FetchFromStart,
|
212
|
-
final_events: vec![],
|
213
|
-
previous_wft_started_id: -2,
|
214
|
-
}
|
215
|
-
}
|
216
|
-
|
217
|
-
/// Return at least the next two WFT sequences (as determined by the passed-in ID) as a
|
218
|
-
/// [HistoryUpdate]. Two sequences supports the required peek-ahead during replay without
|
219
|
-
/// unnecessary back-and-forth.
|
220
|
-
///
|
221
|
-
/// If there are already enough events buffered in memory, they will all be returned. Including
|
222
|
-
/// possibly (likely, during replay) more than just the next two WFTs.
|
223
|
-
///
|
224
|
-
/// If there are insufficient events to constitute two WFTs, then we will fetch pages until
|
225
|
-
/// we have two, or until we are at the end of history.
|
226
|
-
pub(crate) async fn extract_next_update(&mut self) -> Result<HistoryUpdate, tonic::Status> {
|
227
|
-
loop {
|
228
|
-
self.get_next_page().await?;
|
229
|
-
let current_events = mem::take(&mut self.event_queue);
|
230
|
-
if current_events.is_empty() {
|
231
|
-
// If next page fetching happened, and we still ended up with no events, something
|
232
|
-
// is wrong. We're expecting there to be more events to be able to extract this
|
233
|
-
// update, but server isn't giving us any. We have no choice except to give up and
|
234
|
-
// evict.
|
235
|
-
error!(
|
236
|
-
"We expected to be able to fetch more events but server says there are none"
|
237
|
-
);
|
238
|
-
return Err(EMPTY_FETCH_ERR.clone());
|
239
|
-
}
|
240
|
-
let first_event_id = current_events.front().unwrap().event_id;
|
241
|
-
// If there are some events at the end of the fetched events which represent only a
|
242
|
-
// portion of a complete WFT, retain them to be used in the next extraction.
|
243
|
-
let no_more = matches!(self.next_page_token, NextPageToken::Done);
|
244
|
-
let (update, extra) =
|
245
|
-
HistoryUpdate::from_events(current_events, self.previous_wft_started_id, no_more);
|
246
|
-
let extra_eid_same = extra
|
247
|
-
.first()
|
248
|
-
.map(|e| e.event_id == first_event_id)
|
249
|
-
.unwrap_or_default();
|
250
|
-
self.event_queue = extra.into();
|
251
|
-
if !no_more && extra_eid_same {
|
252
|
-
// There was not a meaningful WFT in the whole page. We must fetch more
|
253
|
-
continue;
|
254
|
-
}
|
255
|
-
return Ok(update);
|
256
|
-
}
|
257
|
-
}
|
258
|
-
|
259
|
-
/// Fetches the next page and adds it to the internal queue. Returns true if a fetch was
|
260
|
-
/// performed, false if there is no next page.
|
261
|
-
async fn get_next_page(&mut self) -> Result<bool, tonic::Status> {
|
262
|
-
let history = loop {
|
263
|
-
let npt = match mem::replace(&mut self.next_page_token, NextPageToken::Done) {
|
264
|
-
// If there's no open request and the last page token we got was empty, we're done.
|
265
|
-
NextPageToken::Done => return Ok(false),
|
266
|
-
NextPageToken::FetchFromStart => vec![],
|
267
|
-
NextPageToken::Next(v) => v,
|
268
|
-
};
|
269
|
-
debug!(run_id=%self.run_id, "Fetching new history page");
|
270
|
-
let fetch_res = self
|
271
|
-
.client
|
272
|
-
.get_workflow_execution_history(self.wf_id.clone(), Some(self.run_id.clone()), npt)
|
273
|
-
.instrument(span!(tracing::Level::TRACE, "fetch_history_in_paginator"))
|
274
|
-
.await?;
|
275
|
-
|
276
|
-
self.next_page_token = fetch_res.next_page_token.into();
|
277
|
-
|
278
|
-
let history_is_empty = fetch_res
|
279
|
-
.history
|
280
|
-
.as_ref()
|
281
|
-
.map(|h| h.events.is_empty())
|
282
|
-
.unwrap_or(true);
|
283
|
-
if history_is_empty && matches!(&self.next_page_token, NextPageToken::Next(_)) {
|
284
|
-
// If the fetch returned an empty history, but there *was* a next page token,
|
285
|
-
// immediately try to get that.
|
286
|
-
continue;
|
287
|
-
}
|
288
|
-
// Async doesn't love recursion so we do this instead.
|
289
|
-
break fetch_res.history;
|
290
|
-
};
|
291
|
-
|
292
|
-
self.event_queue
|
293
|
-
.extend(history.map(|h| h.events).unwrap_or_default());
|
294
|
-
if matches!(&self.next_page_token, NextPageToken::Done) {
|
295
|
-
// If finished, we need to extend the queue with the final events, skipping any
|
296
|
-
// which are already present.
|
297
|
-
if let Some(last_event_id) = self.event_queue.back().map(|e| e.event_id) {
|
298
|
-
let final_events = mem::take(&mut self.final_events);
|
299
|
-
self.event_queue.extend(
|
300
|
-
final_events
|
301
|
-
.into_iter()
|
302
|
-
.skip_while(|e2| e2.event_id <= last_event_id),
|
303
|
-
);
|
304
|
-
}
|
305
|
-
};
|
306
|
-
Ok(true)
|
307
|
-
}
|
308
|
-
}
|
309
|
-
|
310
|
-
#[pin_project::pin_project]
|
311
|
-
struct StreamingHistoryPaginator {
|
312
|
-
inner: HistoryPaginator,
|
313
|
-
#[pin]
|
314
|
-
open_history_request: Option<BoxFuture<'static, Result<(), tonic::Status>>>,
|
315
|
-
}
|
316
|
-
|
317
|
-
impl StreamingHistoryPaginator {
|
318
|
-
// Kept since can be used for history downloading
|
319
|
-
#[cfg(test)]
|
320
|
-
pub fn new(inner: HistoryPaginator) -> Self {
|
321
|
-
Self {
|
322
|
-
inner,
|
323
|
-
open_history_request: None,
|
324
|
-
}
|
325
|
-
}
|
326
|
-
}
|
327
|
-
|
328
|
-
impl Stream for StreamingHistoryPaginator {
|
329
|
-
type Item = Result<HistoryEvent, tonic::Status>;
|
330
|
-
|
331
|
-
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
332
|
-
let mut this = self.project();
|
333
|
-
|
334
|
-
if let Some(e) = this.inner.event_queue.pop_front() {
|
335
|
-
return Poll::Ready(Some(Ok(e)));
|
336
|
-
}
|
337
|
-
if this.open_history_request.is_none() {
|
338
|
-
// SAFETY: This is safe because the inner paginator cannot be dropped before the future,
|
339
|
-
// and the future won't be moved from out of this struct.
|
340
|
-
this.open_history_request.set(Some(unsafe {
|
341
|
-
transmute(HistoryPaginator::get_next_page(this.inner).boxed())
|
342
|
-
}));
|
343
|
-
}
|
344
|
-
let history_req = this.open_history_request.as_mut().as_pin_mut().unwrap();
|
345
|
-
|
346
|
-
match Future::poll(history_req, cx) {
|
347
|
-
Poll::Ready(resp) => {
|
348
|
-
this.open_history_request.set(None);
|
349
|
-
match resp {
|
350
|
-
Err(neterr) => Poll::Ready(Some(Err(neterr))),
|
351
|
-
Ok(_) => Poll::Ready(this.inner.event_queue.pop_front().map(Ok)),
|
352
|
-
}
|
353
|
-
}
|
354
|
-
Poll::Pending => Poll::Pending,
|
355
|
-
}
|
356
|
-
}
|
357
|
-
}
|
358
|
-
|
359
|
-
impl HistoryUpdate {
|
360
|
-
/// Sometimes it's useful to take an update out of something without needing to use an option
|
361
|
-
/// field. Use this to replace the field with an empty update.
|
362
|
-
pub fn dummy() -> Self {
|
363
|
-
Self {
|
364
|
-
events: vec![],
|
365
|
-
previous_wft_started_id: -1,
|
366
|
-
has_last_wft: false,
|
367
|
-
}
|
368
|
-
}
|
369
|
-
pub fn is_real(&self) -> bool {
|
370
|
-
self.previous_wft_started_id >= 0
|
371
|
-
}
|
372
|
-
pub fn first_event_id(&self) -> Option<i64> {
|
373
|
-
self.events.get(0).map(|e| e.event_id)
|
374
|
-
}
|
375
|
-
|
376
|
-
/// Create an instance of an update directly from events. If the passed in event iterator has a
|
377
|
-
/// partial WFT sequence at the end, all events after the last complete WFT sequence (ending
|
378
|
-
/// with WFT started) are returned back to the caller, since the history update only works in
|
379
|
-
/// terms of complete WFT sequences.
|
380
|
-
pub fn from_events<I: IntoIterator<Item = HistoryEvent>>(
|
381
|
-
events: I,
|
382
|
-
previous_wft_started_id: i64,
|
383
|
-
has_last_wft: bool,
|
384
|
-
) -> (Self, Vec<HistoryEvent>)
|
385
|
-
where
|
386
|
-
<I as IntoIterator>::IntoIter: Send + 'static,
|
387
|
-
{
|
388
|
-
let mut all_events: Vec<_> = events.into_iter().collect();
|
389
|
-
let mut last_end =
|
390
|
-
find_end_index_of_next_wft_seq(all_events.as_slice(), previous_wft_started_id);
|
391
|
-
if matches!(last_end, NextWFTSeqEndIndex::Incomplete(_)) {
|
392
|
-
return if has_last_wft {
|
393
|
-
(
|
394
|
-
Self {
|
395
|
-
events: all_events,
|
396
|
-
previous_wft_started_id,
|
397
|
-
has_last_wft,
|
398
|
-
},
|
399
|
-
vec![],
|
400
|
-
)
|
401
|
-
} else {
|
402
|
-
(
|
403
|
-
Self {
|
404
|
-
events: vec![],
|
405
|
-
previous_wft_started_id,
|
406
|
-
has_last_wft,
|
407
|
-
},
|
408
|
-
all_events,
|
409
|
-
)
|
410
|
-
};
|
411
|
-
}
|
412
|
-
while let NextWFTSeqEndIndex::Complete(next_end_ix) = last_end {
|
413
|
-
let next_end_eid = all_events[next_end_ix].event_id;
|
414
|
-
// To save skipping all events at the front of this slice, only pass the relevant
|
415
|
-
// portion, but that means the returned index must be adjusted, hence the addition.
|
416
|
-
let next_end = find_end_index_of_next_wft_seq(&all_events[next_end_ix..], next_end_eid)
|
417
|
-
.add(next_end_ix);
|
418
|
-
if matches!(next_end, NextWFTSeqEndIndex::Incomplete(_)) {
|
419
|
-
break;
|
420
|
-
}
|
421
|
-
last_end = next_end;
|
422
|
-
}
|
423
|
-
let remaining_events = if all_events.is_empty() {
|
424
|
-
vec![]
|
425
|
-
} else {
|
426
|
-
all_events.split_off(last_end.index() + 1)
|
427
|
-
};
|
428
|
-
|
429
|
-
(
|
430
|
-
Self {
|
431
|
-
events: all_events,
|
432
|
-
previous_wft_started_id,
|
433
|
-
has_last_wft,
|
434
|
-
},
|
435
|
-
remaining_events,
|
436
|
-
)
|
437
|
-
}
|
438
|
-
|
439
|
-
/// Create an instance of an update directly from events. The passed in events *must* consist
|
440
|
-
/// of one or more complete WFT sequences. IE: The event iterator must not end in the middle
|
441
|
-
/// of a WFT sequence.
|
442
|
-
#[cfg(test)]
|
443
|
-
pub fn new_from_events<I: IntoIterator<Item = HistoryEvent>>(
|
444
|
-
events: I,
|
445
|
-
previous_wft_started_id: i64,
|
446
|
-
) -> Self
|
447
|
-
where
|
448
|
-
<I as IntoIterator>::IntoIter: Send + 'static,
|
449
|
-
{
|
450
|
-
Self {
|
451
|
-
events: events.into_iter().collect(),
|
452
|
-
previous_wft_started_id,
|
453
|
-
has_last_wft: true,
|
454
|
-
}
|
455
|
-
}
|
456
|
-
|
457
|
-
/// Given a workflow task started id, return all events starting at that number (exclusive) to
|
458
|
-
/// the next WFT started event (inclusive).
|
459
|
-
///
|
460
|
-
/// Events are *consumed* by this process, to keep things efficient in workflow machines.
|
461
|
-
///
|
462
|
-
/// If we are out of WFT sequences that can be yielded by this update, it will return an empty
|
463
|
-
/// vec, indicating more pages will need to be fetched.
|
464
|
-
pub fn take_next_wft_sequence(&mut self, from_wft_started_id: i64) -> NextWFT {
|
465
|
-
// First, drop any events from the queue which are earlier than the passed-in id.
|
466
|
-
if let Some(ix_first_relevant) = self.starting_index_after_skipping(from_wft_started_id) {
|
467
|
-
self.events.drain(0..ix_first_relevant);
|
468
|
-
}
|
469
|
-
let next_wft_ix = find_end_index_of_next_wft_seq(&self.events, from_wft_started_id);
|
470
|
-
match next_wft_ix {
|
471
|
-
NextWFTSeqEndIndex::Incomplete(siz) => {
|
472
|
-
if self.has_last_wft {
|
473
|
-
if siz == 0 {
|
474
|
-
NextWFT::ReplayOver
|
475
|
-
} else {
|
476
|
-
self.build_next_wft(siz)
|
477
|
-
}
|
478
|
-
} else {
|
479
|
-
if siz != 0 {
|
480
|
-
panic!(
|
481
|
-
"HistoryUpdate was created with an incomplete WFT. This is an SDK bug."
|
482
|
-
);
|
483
|
-
}
|
484
|
-
NextWFT::NeedFetch
|
485
|
-
}
|
486
|
-
}
|
487
|
-
NextWFTSeqEndIndex::Complete(next_wft_ix) => self.build_next_wft(next_wft_ix),
|
488
|
-
}
|
489
|
-
}
|
490
|
-
|
491
|
-
fn build_next_wft(&mut self, drain_this_much: usize) -> NextWFT {
|
492
|
-
NextWFT::WFT(
|
493
|
-
self.events.drain(0..=drain_this_much).collect(),
|
494
|
-
self.events.is_empty() && self.has_last_wft,
|
495
|
-
)
|
496
|
-
}
|
497
|
-
|
498
|
-
/// Lets the caller peek ahead at the next WFT sequence that will be returned by
|
499
|
-
/// [take_next_wft_sequence]. Will always return the first available WFT sequence if that has
|
500
|
-
/// not been called first. May also return an empty iterator or incomplete sequence if we are at
|
501
|
-
/// the end of history.
|
502
|
-
pub fn peek_next_wft_sequence(&self, from_wft_started_id: i64) -> &[HistoryEvent] {
|
503
|
-
let ix_first_relevant = self
|
504
|
-
.starting_index_after_skipping(from_wft_started_id)
|
505
|
-
.unwrap_or_default();
|
506
|
-
let relevant_events = &self.events[ix_first_relevant..];
|
507
|
-
if relevant_events.is_empty() {
|
508
|
-
return relevant_events;
|
509
|
-
}
|
510
|
-
let ix_end = find_end_index_of_next_wft_seq(relevant_events, from_wft_started_id).index();
|
511
|
-
&relevant_events[0..=ix_end]
|
512
|
-
}
|
513
|
-
|
514
|
-
/// Returns true if this update has the next needed WFT sequence, false if events will need to
|
515
|
-
/// be fetched in order to create a complete update with the entire next WFT sequence.
|
516
|
-
pub fn can_take_next_wft_sequence(&self, from_wft_started_id: i64) -> bool {
|
517
|
-
let next_wft_ix = find_end_index_of_next_wft_seq(&self.events, from_wft_started_id);
|
518
|
-
if let NextWFTSeqEndIndex::Incomplete(_) = next_wft_ix {
|
519
|
-
if !self.has_last_wft {
|
520
|
-
return false;
|
521
|
-
}
|
522
|
-
}
|
523
|
-
true
|
524
|
-
}
|
525
|
-
|
526
|
-
/// Returns the next WFT completed event attributes, if any, starting at (inclusive) the
|
527
|
-
/// `from_id`
|
528
|
-
pub fn peek_next_wft_completed(
|
529
|
-
&self,
|
530
|
-
from_id: i64,
|
531
|
-
) -> Option<&WorkflowTaskCompletedEventAttributes> {
|
532
|
-
self.events
|
533
|
-
.iter()
|
534
|
-
.skip_while(|e| e.event_id < from_id)
|
535
|
-
.find_map(|e| match &e.attributes {
|
536
|
-
Some(history_event::Attributes::WorkflowTaskCompletedEventAttributes(ref a)) => {
|
537
|
-
Some(a)
|
538
|
-
}
|
539
|
-
_ => None,
|
540
|
-
})
|
541
|
-
}
|
542
|
-
|
543
|
-
fn starting_index_after_skipping(&self, from_wft_started_id: i64) -> Option<usize> {
|
544
|
-
self.events
|
545
|
-
.iter()
|
546
|
-
.find_position(|e| e.event_id > from_wft_started_id)
|
547
|
-
.map(|(ix, _)| ix)
|
548
|
-
}
|
549
|
-
}
|
550
|
-
|
551
|
-
#[derive(Debug, Copy, Clone)]
|
552
|
-
enum NextWFTSeqEndIndex {
|
553
|
-
/// The next WFT sequence is completely contained within the passed-in iterator
|
554
|
-
Complete(usize),
|
555
|
-
/// The next WFT sequence is not found within the passed-in iterator, and the contained
|
556
|
-
/// value is the last index of the iterator.
|
557
|
-
Incomplete(usize),
|
558
|
-
}
|
559
|
-
impl NextWFTSeqEndIndex {
|
560
|
-
fn index(self) -> usize {
|
561
|
-
match self {
|
562
|
-
NextWFTSeqEndIndex::Complete(ix) | NextWFTSeqEndIndex::Incomplete(ix) => ix,
|
563
|
-
}
|
564
|
-
}
|
565
|
-
fn add(self, val: usize) -> Self {
|
566
|
-
match self {
|
567
|
-
NextWFTSeqEndIndex::Complete(ix) => NextWFTSeqEndIndex::Complete(ix + val),
|
568
|
-
NextWFTSeqEndIndex::Incomplete(ix) => NextWFTSeqEndIndex::Incomplete(ix + val),
|
569
|
-
}
|
570
|
-
}
|
571
|
-
}
|
572
|
-
|
573
|
-
/// Discovers the index of the last event in next WFT sequence within the passed-in slice
|
574
|
-
fn find_end_index_of_next_wft_seq(
|
575
|
-
events: &[HistoryEvent],
|
576
|
-
from_event_id: i64,
|
577
|
-
) -> NextWFTSeqEndIndex {
|
578
|
-
if events.is_empty() {
|
579
|
-
return NextWFTSeqEndIndex::Incomplete(0);
|
580
|
-
}
|
581
|
-
let mut last_index = 0;
|
582
|
-
let mut saw_any_non_wft_event = false;
|
583
|
-
for (ix, e) in events.iter().enumerate() {
|
584
|
-
last_index = ix;
|
585
|
-
|
586
|
-
// It's possible to have gotten a new history update without eviction (ex: unhandled
|
587
|
-
// command on completion), where we may need to skip events we already handled.
|
588
|
-
if e.event_id <= from_event_id {
|
589
|
-
continue;
|
590
|
-
}
|
591
|
-
|
592
|
-
if !matches!(
|
593
|
-
e.event_type(),
|
594
|
-
EventType::WorkflowTaskFailed
|
595
|
-
| EventType::WorkflowTaskTimedOut
|
596
|
-
| EventType::WorkflowTaskScheduled
|
597
|
-
| EventType::WorkflowTaskStarted
|
598
|
-
| EventType::WorkflowTaskCompleted
|
599
|
-
) {
|
600
|
-
saw_any_non_wft_event = true;
|
601
|
-
}
|
602
|
-
if e.is_final_wf_execution_event() {
|
603
|
-
return NextWFTSeqEndIndex::Complete(last_index);
|
604
|
-
}
|
605
|
-
|
606
|
-
if e.event_type() == EventType::WorkflowTaskStarted {
|
607
|
-
if let Some(next_event) = events.get(ix + 1) {
|
608
|
-
let et = next_event.event_type();
|
609
|
-
// If the next event is WFT timeout or fail, or abrupt WF execution end, that
|
610
|
-
// doesn't conclude a WFT sequence.
|
611
|
-
if matches!(
|
612
|
-
et,
|
613
|
-
EventType::WorkflowTaskFailed
|
614
|
-
| EventType::WorkflowTaskTimedOut
|
615
|
-
| EventType::WorkflowExecutionTimedOut
|
616
|
-
| EventType::WorkflowExecutionTerminated
|
617
|
-
| EventType::WorkflowExecutionCanceled
|
618
|
-
) {
|
619
|
-
continue;
|
620
|
-
}
|
621
|
-
// If we've never seen an interesting event and the next two events are a completion
|
622
|
-
// followed immediately again by scheduled, then this is a WFT heartbeat and also
|
623
|
-
// doesn't conclude the sequence.
|
624
|
-
else if et == EventType::WorkflowTaskCompleted {
|
625
|
-
if let Some(next_next_event) = events.get(ix + 2) {
|
626
|
-
if next_next_event.event_type() == EventType::WorkflowTaskScheduled {
|
627
|
-
continue;
|
628
|
-
} else {
|
629
|
-
saw_any_non_wft_event = true;
|
630
|
-
}
|
631
|
-
}
|
632
|
-
}
|
633
|
-
}
|
634
|
-
if saw_any_non_wft_event {
|
635
|
-
return NextWFTSeqEndIndex::Complete(ix);
|
636
|
-
}
|
637
|
-
}
|
638
|
-
}
|
639
|
-
|
640
|
-
NextWFTSeqEndIndex::Incomplete(last_index)
|
641
|
-
}
|
642
|
-
|
643
|
-
#[cfg(test)]
|
644
|
-
pub mod tests {
|
645
|
-
use super::*;
|
646
|
-
use crate::{
|
647
|
-
replay::{HistoryInfo, TestHistoryBuilder},
|
648
|
-
test_help::canned_histories,
|
649
|
-
worker::client::mocks::mock_workflow_client,
|
650
|
-
};
|
651
|
-
use futures_util::TryStreamExt;
|
652
|
-
use temporal_sdk_core_protos::temporal::api::workflowservice::v1::GetWorkflowExecutionHistoryResponse;
|
653
|
-
|
654
|
-
impl From<HistoryInfo> for HistoryUpdate {
|
655
|
-
fn from(v: HistoryInfo) -> Self {
|
656
|
-
Self::new_from_events(v.events().to_vec(), v.previous_started_event_id())
|
657
|
-
}
|
658
|
-
}
|
659
|
-
|
660
|
-
pub trait TestHBExt {
|
661
|
-
fn as_history_update(&self) -> HistoryUpdate;
|
662
|
-
}
|
663
|
-
|
664
|
-
impl TestHBExt for TestHistoryBuilder {
|
665
|
-
fn as_history_update(&self) -> HistoryUpdate {
|
666
|
-
self.get_full_history_info().unwrap().into()
|
667
|
-
}
|
668
|
-
}
|
669
|
-
|
670
|
-
impl NextWFT {
|
671
|
-
fn unwrap_events(self) -> Vec<HistoryEvent> {
|
672
|
-
match self {
|
673
|
-
NextWFT::WFT(e, _) => e,
|
674
|
-
o => panic!("Must be complete WFT: {o:?}"),
|
675
|
-
}
|
676
|
-
}
|
677
|
-
}
|
678
|
-
|
679
|
-
fn next_check_peek(update: &mut HistoryUpdate, from_id: i64) -> Vec<HistoryEvent> {
|
680
|
-
let seq_peeked = update.peek_next_wft_sequence(from_id).to_vec();
|
681
|
-
let seq = update.take_next_wft_sequence(from_id).unwrap_events();
|
682
|
-
assert_eq!(seq, seq_peeked);
|
683
|
-
seq
|
684
|
-
}
|
685
|
-
|
686
|
-
#[test]
|
687
|
-
fn consumes_standard_wft_sequence() {
|
688
|
-
let timer_hist = canned_histories::single_timer("t");
|
689
|
-
let mut update = timer_hist.as_history_update();
|
690
|
-
let seq_1 = next_check_peek(&mut update, 0);
|
691
|
-
assert_eq!(seq_1.len(), 3);
|
692
|
-
assert_eq!(seq_1.last().unwrap().event_id, 3);
|
693
|
-
let seq_2_peeked = update.peek_next_wft_sequence(0).to_vec();
|
694
|
-
let seq_2 = next_check_peek(&mut update, 3);
|
695
|
-
assert_eq!(seq_2, seq_2_peeked);
|
696
|
-
assert_eq!(seq_2.len(), 5);
|
697
|
-
assert_eq!(seq_2.last().unwrap().event_id, 8);
|
698
|
-
}
|
699
|
-
|
700
|
-
#[test]
|
701
|
-
fn skips_wft_failed() {
|
702
|
-
let failed_hist = canned_histories::workflow_fails_with_reset_after_timer("t", "runid");
|
703
|
-
let mut update = failed_hist.as_history_update();
|
704
|
-
let seq_1 = next_check_peek(&mut update, 0);
|
705
|
-
assert_eq!(seq_1.len(), 3);
|
706
|
-
assert_eq!(seq_1.last().unwrap().event_id, 3);
|
707
|
-
let seq_2 = next_check_peek(&mut update, 3);
|
708
|
-
assert_eq!(seq_2.len(), 8);
|
709
|
-
assert_eq!(seq_2.last().unwrap().event_id, 11);
|
710
|
-
}
|
711
|
-
|
712
|
-
#[test]
|
713
|
-
fn skips_wft_timeout() {
|
714
|
-
let failed_hist = canned_histories::wft_timeout_repro();
|
715
|
-
let mut update = failed_hist.as_history_update();
|
716
|
-
let seq_1 = next_check_peek(&mut update, 0);
|
717
|
-
assert_eq!(seq_1.len(), 3);
|
718
|
-
assert_eq!(seq_1.last().unwrap().event_id, 3);
|
719
|
-
let seq_2 = next_check_peek(&mut update, 3);
|
720
|
-
assert_eq!(seq_2.len(), 11);
|
721
|
-
assert_eq!(seq_2.last().unwrap().event_id, 14);
|
722
|
-
}
|
723
|
-
|
724
|
-
#[test]
|
725
|
-
fn skips_events_before_desired_wft() {
|
726
|
-
let timer_hist = canned_histories::single_timer("t");
|
727
|
-
let mut update = timer_hist.as_history_update();
|
728
|
-
// We haven't processed the first 3 events, but we should still only get the second sequence
|
729
|
-
let seq_2 = update.take_next_wft_sequence(3).unwrap_events();
|
730
|
-
assert_eq!(seq_2.len(), 5);
|
731
|
-
assert_eq!(seq_2.last().unwrap().event_id, 8);
|
732
|
-
}
|
733
|
-
|
734
|
-
#[test]
|
735
|
-
fn history_ends_abruptly() {
|
736
|
-
let mut timer_hist = canned_histories::single_timer("t");
|
737
|
-
timer_hist.add_workflow_execution_terminated();
|
738
|
-
let mut update = timer_hist.as_history_update();
|
739
|
-
let seq_2 = update.take_next_wft_sequence(3).unwrap_events();
|
740
|
-
assert_eq!(seq_2.len(), 6);
|
741
|
-
assert_eq!(seq_2.last().unwrap().event_id, 9);
|
742
|
-
}
|
743
|
-
|
744
|
-
#[test]
|
745
|
-
fn heartbeats_skipped() {
|
746
|
-
let mut t = TestHistoryBuilder::default();
|
747
|
-
t.add_by_type(EventType::WorkflowExecutionStarted);
|
748
|
-
t.add_full_wf_task();
|
749
|
-
t.add_full_wf_task(); // wft started 6
|
750
|
-
t.add_by_type(EventType::TimerStarted);
|
751
|
-
t.add_full_wf_task(); // wft started 10
|
752
|
-
t.add_full_wf_task();
|
753
|
-
t.add_full_wf_task();
|
754
|
-
t.add_full_wf_task(); // wft started 19
|
755
|
-
t.add_by_type(EventType::TimerStarted);
|
756
|
-
t.add_full_wf_task(); // wft started 23
|
757
|
-
t.add_we_signaled("whee", vec![]);
|
758
|
-
t.add_full_wf_task();
|
759
|
-
t.add_workflow_execution_completed();
|
760
|
-
|
761
|
-
let mut update = t.as_history_update();
|
762
|
-
let seq = next_check_peek(&mut update, 0);
|
763
|
-
assert_eq!(seq.len(), 6);
|
764
|
-
let seq = next_check_peek(&mut update, 6);
|
765
|
-
assert_eq!(seq.len(), 13);
|
766
|
-
let seq = next_check_peek(&mut update, 19);
|
767
|
-
assert_eq!(seq.len(), 4);
|
768
|
-
let seq = next_check_peek(&mut update, 23);
|
769
|
-
assert_eq!(seq.len(), 4);
|
770
|
-
let seq = next_check_peek(&mut update, 27);
|
771
|
-
assert_eq!(seq.len(), 2);
|
772
|
-
}
|
773
|
-
|
774
|
-
#[test]
|
775
|
-
fn heartbeat_marker_end() {
|
776
|
-
let mut t = TestHistoryBuilder::default();
|
777
|
-
t.add_by_type(EventType::WorkflowExecutionStarted);
|
778
|
-
t.add_full_wf_task();
|
779
|
-
t.add_full_wf_task();
|
780
|
-
t.add_local_activity_result_marker(1, "1", "done".into());
|
781
|
-
t.add_workflow_execution_completed();
|
782
|
-
|
783
|
-
let mut update = t.as_history_update();
|
784
|
-
let seq = next_check_peek(&mut update, 3);
|
785
|
-
// completed, sched, started
|
786
|
-
assert_eq!(seq.len(), 3);
|
787
|
-
let seq = next_check_peek(&mut update, 6);
|
788
|
-
assert_eq!(seq.len(), 3);
|
789
|
-
}
|
790
|
-
|
791
|
-
fn paginator_setup(history: TestHistoryBuilder, chunk_size: usize) -> HistoryPaginator {
|
792
|
-
let full_hist = history.get_full_history_info().unwrap().into_events();
|
793
|
-
let initial_hist = full_hist.chunks(chunk_size).next().unwrap().to_vec();
|
794
|
-
let mut mock_client = mock_workflow_client();
|
795
|
-
|
796
|
-
let mut npt = 1;
|
797
|
-
mock_client
|
798
|
-
.expect_get_workflow_execution_history()
|
799
|
-
.returning(move |_, _, passed_npt| {
|
800
|
-
assert_eq!(passed_npt, vec![npt]);
|
801
|
-
let mut hist_chunks = full_hist.chunks(chunk_size).peekable();
|
802
|
-
let next_chunks = hist_chunks.nth(npt.into()).unwrap_or_default();
|
803
|
-
npt += 1;
|
804
|
-
let next_page_token = if hist_chunks.peek().is_none() {
|
805
|
-
vec![]
|
806
|
-
} else {
|
807
|
-
vec![npt]
|
808
|
-
};
|
809
|
-
Ok(GetWorkflowExecutionHistoryResponse {
|
810
|
-
history: Some(History {
|
811
|
-
events: next_chunks.into(),
|
812
|
-
}),
|
813
|
-
raw_history: vec![],
|
814
|
-
next_page_token,
|
815
|
-
archived: false,
|
816
|
-
})
|
817
|
-
});
|
818
|
-
|
819
|
-
HistoryPaginator::new(
|
820
|
-
History {
|
821
|
-
events: initial_hist,
|
822
|
-
},
|
823
|
-
0,
|
824
|
-
"wfid".to_string(),
|
825
|
-
"runid".to_string(),
|
826
|
-
vec![1],
|
827
|
-
Arc::new(mock_client),
|
828
|
-
)
|
829
|
-
}
|
830
|
-
|
831
|
-
#[rstest::rstest]
|
832
|
-
#[tokio::test]
|
833
|
-
async fn paginator_extracts_updates(#[values(10, 11, 12, 13, 14)] chunk_size: usize) {
|
834
|
-
let wft_count = 100;
|
835
|
-
let mut paginator = paginator_setup(
|
836
|
-
canned_histories::long_sequential_timers(wft_count),
|
837
|
-
chunk_size,
|
838
|
-
);
|
839
|
-
let mut update = paginator.extract_next_update().await.unwrap();
|
840
|
-
|
841
|
-
let seq = update.take_next_wft_sequence(0).unwrap_events();
|
842
|
-
assert_eq!(seq.len(), 3);
|
843
|
-
|
844
|
-
let mut last_event_id = 3;
|
845
|
-
let mut last_started_id = 3;
|
846
|
-
for i in 1..wft_count {
|
847
|
-
let seq = {
|
848
|
-
match update.take_next_wft_sequence(last_started_id) {
|
849
|
-
NextWFT::WFT(seq, _) => seq,
|
850
|
-
NextWFT::NeedFetch => {
|
851
|
-
update = paginator.extract_next_update().await.unwrap();
|
852
|
-
update
|
853
|
-
.take_next_wft_sequence(last_started_id)
|
854
|
-
.unwrap_events()
|
855
|
-
}
|
856
|
-
NextWFT::ReplayOver => {
|
857
|
-
assert_eq!(i, wft_count - 1);
|
858
|
-
break;
|
859
|
-
}
|
860
|
-
}
|
861
|
-
};
|
862
|
-
for e in &seq {
|
863
|
-
last_event_id += 1;
|
864
|
-
assert_eq!(e.event_id, last_event_id);
|
865
|
-
}
|
866
|
-
assert_eq!(seq.len(), 5);
|
867
|
-
last_started_id += 5;
|
868
|
-
}
|
869
|
-
}
|
870
|
-
|
871
|
-
#[tokio::test]
|
872
|
-
async fn paginator_streams() {
|
873
|
-
let wft_count = 10;
|
874
|
-
let paginator = StreamingHistoryPaginator::new(paginator_setup(
|
875
|
-
canned_histories::long_sequential_timers(wft_count),
|
876
|
-
10,
|
877
|
-
));
|
878
|
-
let everything: Vec<_> = paginator.try_collect().await.unwrap();
|
879
|
-
assert_eq!(everything.len(), (wft_count + 1) * 5);
|
880
|
-
everything.iter().fold(1, |event_id, e| {
|
881
|
-
assert_eq!(event_id, e.event_id);
|
882
|
-
e.event_id + 1
|
883
|
-
});
|
884
|
-
}
|
885
|
-
|
886
|
-
fn three_wfts_then_heartbeats() -> TestHistoryBuilder {
|
887
|
-
let mut t = TestHistoryBuilder::default();
|
888
|
-
// Start with two complete normal WFTs
|
889
|
-
t.add_by_type(EventType::WorkflowExecutionStarted);
|
890
|
-
t.add_full_wf_task(); // wft start - 3
|
891
|
-
t.add_by_type(EventType::TimerStarted);
|
892
|
-
t.add_full_wf_task(); // wft start - 7
|
893
|
-
t.add_by_type(EventType::TimerStarted);
|
894
|
-
t.add_full_wf_task(); // wft start - 11
|
895
|
-
for _ in 1..50 {
|
896
|
-
// Add a bunch of heartbeats with no commands, which count as one task
|
897
|
-
t.add_full_wf_task();
|
898
|
-
}
|
899
|
-
t.add_workflow_execution_completed();
|
900
|
-
t
|
901
|
-
}
|
902
|
-
|
903
|
-
#[tokio::test]
|
904
|
-
async fn needs_fetch_if_ending_in_middle_of_wft_seq() {
|
905
|
-
let t = three_wfts_then_heartbeats();
|
906
|
-
let mut ends_in_middle_of_seq = t.as_history_update().events;
|
907
|
-
ends_in_middle_of_seq.truncate(19);
|
908
|
-
// The update should contain the first two complete WFTs, ending on the 8th event which
|
909
|
-
// is WFT started. The remaining events should be returned. False flags means the creator
|
910
|
-
// knows there are more events, so we should return need fetch
|
911
|
-
let (mut update, remaining) = HistoryUpdate::from_events(ends_in_middle_of_seq, 0, false);
|
912
|
-
assert_eq!(remaining[0].event_id, 8);
|
913
|
-
assert_eq!(remaining.last().unwrap().event_id, 19);
|
914
|
-
let seq = update.take_next_wft_sequence(0).unwrap_events();
|
915
|
-
assert_eq!(seq.last().unwrap().event_id, 3);
|
916
|
-
let seq = update.take_next_wft_sequence(3).unwrap_events();
|
917
|
-
assert_eq!(seq.last().unwrap().event_id, 7);
|
918
|
-
let next = update.take_next_wft_sequence(7);
|
919
|
-
assert_matches!(next, NextWFT::NeedFetch);
|
920
|
-
}
|
921
|
-
|
922
|
-
// Like the above, but if the history happens to be cut off at a wft boundary, (even though
|
923
|
-
// there may have been many heartbeats after we have no way of knowing about), it's going to
|
924
|
-
// count events 7-20 as a WFT since there is started, completed, timer command, ..heartbeats..
|
925
|
-
#[tokio::test]
|
926
|
-
async fn needs_fetch_after_complete_seq_with_heartbeats() {
|
927
|
-
let t = three_wfts_then_heartbeats();
|
928
|
-
let mut ends_in_middle_of_seq = t.as_history_update().events;
|
929
|
-
ends_in_middle_of_seq.truncate(20);
|
930
|
-
let (mut update, remaining) = HistoryUpdate::from_events(ends_in_middle_of_seq, 0, false);
|
931
|
-
assert!(remaining.is_empty());
|
932
|
-
let seq = update.take_next_wft_sequence(0).unwrap_events();
|
933
|
-
assert_eq!(seq.last().unwrap().event_id, 3);
|
934
|
-
let seq = update.take_next_wft_sequence(3).unwrap_events();
|
935
|
-
assert_eq!(seq.last().unwrap().event_id, 7);
|
936
|
-
let seq = update.take_next_wft_sequence(7).unwrap_events();
|
937
|
-
assert_eq!(seq.last().unwrap().event_id, 20);
|
938
|
-
let next = update.take_next_wft_sequence(20);
|
939
|
-
assert_matches!(next, NextWFT::NeedFetch);
|
940
|
-
}
|
941
|
-
|
942
|
-
#[rstest::rstest]
|
943
|
-
#[tokio::test]
|
944
|
-
async fn paginator_works_with_wft_over_multiple_pages(
|
945
|
-
#[values(10, 11, 12, 13, 14)] chunk_size: usize,
|
946
|
-
) {
|
947
|
-
let t = three_wfts_then_heartbeats();
|
948
|
-
let mut paginator = paginator_setup(t, chunk_size);
|
949
|
-
let mut update = paginator.extract_next_update().await.unwrap();
|
950
|
-
let mut last_id = 0;
|
951
|
-
loop {
|
952
|
-
let seq = update.take_next_wft_sequence(last_id);
|
953
|
-
match seq {
|
954
|
-
NextWFT::WFT(seq, _) => {
|
955
|
-
last_id = seq.last().unwrap().event_id;
|
956
|
-
}
|
957
|
-
NextWFT::NeedFetch => {
|
958
|
-
update = paginator.extract_next_update().await.unwrap();
|
959
|
-
}
|
960
|
-
NextWFT::ReplayOver => break,
|
961
|
-
}
|
962
|
-
}
|
963
|
-
assert_eq!(last_id, 160);
|
964
|
-
}
|
965
|
-
|
966
|
-
#[tokio::test]
|
967
|
-
async fn task_just_before_heartbeat_chain_is_taken() {
|
968
|
-
let t = three_wfts_then_heartbeats();
|
969
|
-
let mut update = t.as_history_update();
|
970
|
-
let seq = update.take_next_wft_sequence(0).unwrap_events();
|
971
|
-
assert_eq!(seq.last().unwrap().event_id, 3);
|
972
|
-
let seq = update.take_next_wft_sequence(3).unwrap_events();
|
973
|
-
assert_eq!(seq.last().unwrap().event_id, 7);
|
974
|
-
let seq = update.take_next_wft_sequence(7).unwrap_events();
|
975
|
-
assert_eq!(seq.last().unwrap().event_id, 158);
|
976
|
-
let seq = update.take_next_wft_sequence(158).unwrap_events();
|
977
|
-
assert_eq!(seq.last().unwrap().event_id, 160);
|
978
|
-
assert_eq!(
|
979
|
-
seq.last().unwrap().event_type(),
|
980
|
-
EventType::WorkflowExecutionCompleted
|
981
|
-
);
|
982
|
-
}
|
983
|
-
|
984
|
-
#[tokio::test]
|
985
|
-
async fn handles_cache_misses() {
|
986
|
-
let timer_hist = canned_histories::single_timer("t");
|
987
|
-
let partial_task = timer_hist.get_one_wft(2).unwrap();
|
988
|
-
let prev_started_wft_id = partial_task.previous_started_event_id();
|
989
|
-
let mut history_from_get: GetWorkflowExecutionHistoryResponse =
|
990
|
-
timer_hist.get_history_info(2).unwrap().into();
|
991
|
-
// Chop off the last event, which is WFT started, which server doesn't return in get
|
992
|
-
// history
|
993
|
-
history_from_get.history.as_mut().map(|h| h.events.pop());
|
994
|
-
let mut mock_client = mock_workflow_client();
|
995
|
-
mock_client
|
996
|
-
.expect_get_workflow_execution_history()
|
997
|
-
.returning(move |_, _, _| Ok(history_from_get.clone()));
|
998
|
-
|
999
|
-
let mut paginator = HistoryPaginator::new(
|
1000
|
-
partial_task.into(),
|
1001
|
-
prev_started_wft_id,
|
1002
|
-
"wfid".to_string(),
|
1003
|
-
"runid".to_string(),
|
1004
|
-
// A cache miss means we'll try to fetch from start
|
1005
|
-
NextPageToken::FetchFromStart,
|
1006
|
-
Arc::new(mock_client),
|
1007
|
-
);
|
1008
|
-
let mut update = paginator.extract_next_update().await.unwrap();
|
1009
|
-
// We expect if we try to take the first task sequence that the first event is the first
|
1010
|
-
// event in the sequence.
|
1011
|
-
let seq = update.take_next_wft_sequence(0).unwrap_events();
|
1012
|
-
assert_eq!(seq[0].event_id, 1);
|
1013
|
-
let seq = update.take_next_wft_sequence(3).unwrap_events();
|
1014
|
-
// Verify anything extra (which should only ever be WFT started) was re-appended to the
|
1015
|
-
// end of the event iteration after fetching the old history.
|
1016
|
-
assert_eq!(seq.last().unwrap().event_id, 8);
|
1017
|
-
}
|
1018
|
-
|
1019
|
-
#[test]
|
1020
|
-
fn la_marker_chunking() {
|
1021
|
-
let mut t = TestHistoryBuilder::default();
|
1022
|
-
t.add_by_type(EventType::WorkflowExecutionStarted);
|
1023
|
-
t.add_full_wf_task();
|
1024
|
-
t.add_we_signaled("whatever", vec![]);
|
1025
|
-
t.add_full_wf_task(); // started - 7
|
1026
|
-
t.add_local_activity_result_marker(1, "hi", Default::default());
|
1027
|
-
let act_s = t.add_activity_task_scheduled("1");
|
1028
|
-
let act_st = t.add_activity_task_started(act_s);
|
1029
|
-
t.add_activity_task_completed(act_s, act_st, Default::default());
|
1030
|
-
t.add_workflow_task_scheduled_and_started();
|
1031
|
-
t.add_workflow_task_timed_out();
|
1032
|
-
t.add_workflow_task_scheduled_and_started();
|
1033
|
-
t.add_workflow_task_timed_out();
|
1034
|
-
t.add_workflow_task_scheduled_and_started();
|
1035
|
-
|
1036
|
-
let mut update = t.as_history_update();
|
1037
|
-
let seq = next_check_peek(&mut update, 0);
|
1038
|
-
assert_eq!(seq.len(), 3);
|
1039
|
-
let seq = next_check_peek(&mut update, 3);
|
1040
|
-
assert_eq!(seq.len(), 4);
|
1041
|
-
let seq = next_check_peek(&mut update, 7);
|
1042
|
-
assert_eq!(seq.len(), 13);
|
1043
|
-
}
|
1044
|
-
|
1045
|
-
#[tokio::test]
|
1046
|
-
async fn handles_blank_fetch_response() {
|
1047
|
-
let timer_hist = canned_histories::single_timer("t");
|
1048
|
-
let partial_task = timer_hist.get_one_wft(2).unwrap();
|
1049
|
-
let prev_started_wft_id = partial_task.previous_started_event_id();
|
1050
|
-
let mut mock_client = mock_workflow_client();
|
1051
|
-
mock_client
|
1052
|
-
.expect_get_workflow_execution_history()
|
1053
|
-
.returning(move |_, _, _| Ok(Default::default()));
|
1054
|
-
|
1055
|
-
let mut paginator = HistoryPaginator::new(
|
1056
|
-
partial_task.into(),
|
1057
|
-
prev_started_wft_id,
|
1058
|
-
"wfid".to_string(),
|
1059
|
-
"runid".to_string(),
|
1060
|
-
// A cache miss means we'll try to fetch from start
|
1061
|
-
NextPageToken::FetchFromStart,
|
1062
|
-
Arc::new(mock_client),
|
1063
|
-
);
|
1064
|
-
let err = paginator.extract_next_update().await.unwrap_err();
|
1065
|
-
assert_matches!(err.code(), tonic::Code::DataLoss);
|
1066
|
-
}
|
1067
|
-
|
1068
|
-
#[tokio::test]
|
1069
|
-
async fn handles_empty_page_with_next_token() {
|
1070
|
-
let timer_hist = canned_histories::single_timer("t");
|
1071
|
-
let partial_task = timer_hist.get_one_wft(2).unwrap();
|
1072
|
-
let prev_started_wft_id = partial_task.previous_started_event_id();
|
1073
|
-
let full_resp: GetWorkflowExecutionHistoryResponse =
|
1074
|
-
timer_hist.get_full_history_info().unwrap().into();
|
1075
|
-
let mut mock_client = mock_workflow_client();
|
1076
|
-
mock_client
|
1077
|
-
.expect_get_workflow_execution_history()
|
1078
|
-
.returning(move |_, _, _| {
|
1079
|
-
Ok(GetWorkflowExecutionHistoryResponse {
|
1080
|
-
history: Some(History { events: vec![] }),
|
1081
|
-
raw_history: vec![],
|
1082
|
-
next_page_token: vec![2],
|
1083
|
-
archived: false,
|
1084
|
-
})
|
1085
|
-
})
|
1086
|
-
.times(1);
|
1087
|
-
mock_client
|
1088
|
-
.expect_get_workflow_execution_history()
|
1089
|
-
.returning(move |_, _, _| Ok(full_resp.clone()))
|
1090
|
-
.times(1);
|
1091
|
-
|
1092
|
-
let mut paginator = HistoryPaginator::new(
|
1093
|
-
partial_task.into(),
|
1094
|
-
prev_started_wft_id,
|
1095
|
-
"wfid".to_string(),
|
1096
|
-
"runid".to_string(),
|
1097
|
-
// A cache miss means we'll try to fetch from start
|
1098
|
-
NextPageToken::FetchFromStart,
|
1099
|
-
Arc::new(mock_client),
|
1100
|
-
);
|
1101
|
-
let mut update = paginator.extract_next_update().await.unwrap();
|
1102
|
-
let seq = update.take_next_wft_sequence(0).unwrap_events();
|
1103
|
-
assert_eq!(seq.last().unwrap().event_id, 3);
|
1104
|
-
let seq = update.take_next_wft_sequence(3).unwrap_events();
|
1105
|
-
assert_eq!(seq.last().unwrap().event_id, 8);
|
1106
|
-
assert_matches!(update.take_next_wft_sequence(8), NextWFT::ReplayOver);
|
1107
|
-
}
|
1108
|
-
|
1109
|
-
// TODO: Test we dont re-feed pointless updates if fetching returns <= events we already
|
1110
|
-
// processed
|
1111
|
-
}
|