temporalio 0.0.0 → 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +301 -0
- data/bridge/Cargo.lock +2888 -0
- data/bridge/Cargo.toml +27 -0
- data/bridge/sdk-core/ARCHITECTURE.md +76 -0
- data/bridge/sdk-core/Cargo.lock +2606 -0
- data/bridge/sdk-core/Cargo.toml +2 -0
- data/bridge/sdk-core/LICENSE.txt +23 -0
- data/bridge/sdk-core/README.md +104 -0
- data/bridge/sdk-core/arch_docs/diagrams/README.md +10 -0
- data/bridge/sdk-core/arch_docs/diagrams/sticky_queues.puml +40 -0
- data/bridge/sdk-core/arch_docs/diagrams/workflow_internals.svg +1 -0
- data/bridge/sdk-core/arch_docs/sticky_queues.md +51 -0
- data/bridge/sdk-core/client/Cargo.toml +40 -0
- data/bridge/sdk-core/client/LICENSE.txt +23 -0
- data/bridge/sdk-core/client/src/lib.rs +1286 -0
- data/bridge/sdk-core/client/src/metrics.rs +165 -0
- data/bridge/sdk-core/client/src/raw.rs +932 -0
- data/bridge/sdk-core/client/src/retry.rs +751 -0
- data/bridge/sdk-core/client/src/workflow_handle/mod.rs +185 -0
- data/bridge/sdk-core/core/Cargo.toml +116 -0
- data/bridge/sdk-core/core/LICENSE.txt +23 -0
- data/bridge/sdk-core/core/benches/workflow_replay.rs +76 -0
- data/bridge/sdk-core/core/src/abstractions.rs +166 -0
- data/bridge/sdk-core/core/src/core_tests/activity_tasks.rs +1014 -0
- data/bridge/sdk-core/core/src/core_tests/child_workflows.rs +221 -0
- data/bridge/sdk-core/core/src/core_tests/determinism.rs +107 -0
- data/bridge/sdk-core/core/src/core_tests/local_activities.rs +925 -0
- data/bridge/sdk-core/core/src/core_tests/mod.rs +100 -0
- data/bridge/sdk-core/core/src/core_tests/queries.rs +894 -0
- data/bridge/sdk-core/core/src/core_tests/replay_flag.rs +65 -0
- data/bridge/sdk-core/core/src/core_tests/workers.rs +259 -0
- data/bridge/sdk-core/core/src/core_tests/workflow_cancels.rs +124 -0
- data/bridge/sdk-core/core/src/core_tests/workflow_tasks.rs +2090 -0
- data/bridge/sdk-core/core/src/ephemeral_server/mod.rs +515 -0
- data/bridge/sdk-core/core/src/lib.rs +282 -0
- data/bridge/sdk-core/core/src/pollers/mod.rs +54 -0
- data/bridge/sdk-core/core/src/pollers/poll_buffer.rs +297 -0
- data/bridge/sdk-core/core/src/protosext/mod.rs +428 -0
- data/bridge/sdk-core/core/src/replay/mod.rs +215 -0
- data/bridge/sdk-core/core/src/retry_logic.rs +202 -0
- data/bridge/sdk-core/core/src/telemetry/log_export.rs +190 -0
- data/bridge/sdk-core/core/src/telemetry/metrics.rs +428 -0
- data/bridge/sdk-core/core/src/telemetry/mod.rs +407 -0
- data/bridge/sdk-core/core/src/telemetry/prometheus_server.rs +78 -0
- data/bridge/sdk-core/core/src/test_help/mod.rs +889 -0
- data/bridge/sdk-core/core/src/worker/activities/activity_heartbeat_manager.rs +580 -0
- data/bridge/sdk-core/core/src/worker/activities/local_activities.rs +1048 -0
- data/bridge/sdk-core/core/src/worker/activities.rs +481 -0
- data/bridge/sdk-core/core/src/worker/client/mocks.rs +87 -0
- data/bridge/sdk-core/core/src/worker/client.rs +373 -0
- data/bridge/sdk-core/core/src/worker/mod.rs +570 -0
- data/bridge/sdk-core/core/src/worker/workflow/bridge.rs +37 -0
- data/bridge/sdk-core/core/src/worker/workflow/driven_workflow.rs +101 -0
- data/bridge/sdk-core/core/src/worker/workflow/history_update.rs +532 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/activity_state_machine.rs +907 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_external_state_machine.rs +294 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_workflow_state_machine.rs +167 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/child_workflow_state_machine.rs +858 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/complete_workflow_state_machine.rs +136 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/continue_as_new_workflow_state_machine.rs +157 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/fail_workflow_state_machine.rs +129 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/local_activity_state_machine.rs +1450 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/mod.rs +316 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/modify_workflow_properties_state_machine.rs +178 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/patch_state_machine.rs +708 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/signal_external_state_machine.rs +439 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/timer_state_machine.rs +435 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/transition_coverage.rs +175 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/upsert_search_attributes_state_machine.rs +242 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines/local_acts.rs +96 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines.rs +1200 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_task_state_machine.rs +272 -0
- data/bridge/sdk-core/core/src/worker/workflow/managed_run/managed_wf_test.rs +198 -0
- data/bridge/sdk-core/core/src/worker/workflow/managed_run.rs +655 -0
- data/bridge/sdk-core/core/src/worker/workflow/mod.rs +1200 -0
- data/bridge/sdk-core/core/src/worker/workflow/run_cache.rs +145 -0
- data/bridge/sdk-core/core/src/worker/workflow/wft_poller.rs +88 -0
- data/bridge/sdk-core/core/src/worker/workflow/workflow_stream.rs +985 -0
- data/bridge/sdk-core/core-api/Cargo.toml +32 -0
- data/bridge/sdk-core/core-api/LICENSE.txt +23 -0
- data/bridge/sdk-core/core-api/src/errors.rs +95 -0
- data/bridge/sdk-core/core-api/src/lib.rs +109 -0
- data/bridge/sdk-core/core-api/src/telemetry.rs +147 -0
- data/bridge/sdk-core/core-api/src/worker.rs +148 -0
- data/bridge/sdk-core/etc/deps.svg +162 -0
- data/bridge/sdk-core/etc/dynamic-config.yaml +2 -0
- data/bridge/sdk-core/etc/otel-collector-config.yaml +36 -0
- data/bridge/sdk-core/etc/prometheus.yaml +6 -0
- data/bridge/sdk-core/etc/regen-depgraph.sh +5 -0
- data/bridge/sdk-core/fsm/Cargo.toml +18 -0
- data/bridge/sdk-core/fsm/LICENSE.txt +23 -0
- data/bridge/sdk-core/fsm/README.md +3 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/Cargo.toml +27 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/LICENSE.txt +23 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/src/lib.rs +647 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/progress.rs +8 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/dupe_transitions_fail.rs +18 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/dupe_transitions_fail.stderr +12 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/dynamic_dest_pass.rs +41 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/forgot_name_fail.rs +14 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/forgot_name_fail.stderr +11 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/handler_arg_pass.rs +32 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/handler_pass.rs +31 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/medium_complex_pass.rs +46 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/no_handle_conversions_require_into_fail.rs +29 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/no_handle_conversions_require_into_fail.stderr +12 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/simple_pass.rs +32 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/struct_event_variant_fail.rs +18 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/struct_event_variant_fail.stderr +5 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_more_item_event_variant_fail.rs +11 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_more_item_event_variant_fail.stderr +5 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_zero_item_event_variant_fail.rs +11 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_zero_item_event_variant_fail.stderr +5 -0
- data/bridge/sdk-core/fsm/rustfsm_trait/Cargo.toml +14 -0
- data/bridge/sdk-core/fsm/rustfsm_trait/LICENSE.txt +23 -0
- data/bridge/sdk-core/fsm/rustfsm_trait/src/lib.rs +249 -0
- data/bridge/sdk-core/fsm/src/lib.rs +2 -0
- data/bridge/sdk-core/histories/evict_while_la_running_no_interference-23_history.bin +0 -0
- data/bridge/sdk-core/histories/evict_while_la_running_no_interference-85_history.bin +0 -0
- data/bridge/sdk-core/histories/fail_wf_task.bin +0 -0
- data/bridge/sdk-core/histories/timer_workflow_history.bin +0 -0
- data/bridge/sdk-core/integ-with-otel.sh +7 -0
- data/bridge/sdk-core/protos/api_upstream/README.md +9 -0
- data/bridge/sdk-core/protos/api_upstream/api-linter.yaml +40 -0
- data/bridge/sdk-core/protos/api_upstream/buf.yaml +9 -0
- data/bridge/sdk-core/protos/api_upstream/build/go.mod +7 -0
- data/bridge/sdk-core/protos/api_upstream/build/go.sum +5 -0
- data/bridge/sdk-core/protos/api_upstream/build/tools.go +29 -0
- data/bridge/sdk-core/protos/api_upstream/dependencies/gogoproto/gogo.proto +141 -0
- data/bridge/sdk-core/protos/api_upstream/go.mod +6 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/batch/v1/message.proto +89 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/command/v1/message.proto +260 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/common/v1/message.proto +112 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/batch_operation.proto +47 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/command_type.proto +57 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/common.proto +56 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/event_type.proto +170 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto +118 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/interaction_type.proto +39 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/namespace.proto +51 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/query.proto +50 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/reset.proto +41 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/schedule.proto +60 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/task_queue.proto +59 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/update.proto +40 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/workflow.proto +122 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/errordetails/v1/message.proto +108 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/failure/v1/message.proto +114 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/filter/v1/message.proto +56 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/history/v1/message.proto +758 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/interaction/v1/message.proto +87 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/namespace/v1/message.proto +97 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/operatorservice/v1/request_response.proto +121 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/operatorservice/v1/service.proto +80 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/query/v1/message.proto +61 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/replication/v1/message.proto +55 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/schedule/v1/message.proto +379 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/taskqueue/v1/message.proto +108 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/version/v1/message.proto +59 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflow/v1/message.proto +146 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto +1168 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/service.proto +415 -0
- data/bridge/sdk-core/protos/grpc/health/v1/health.proto +63 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_result/activity_result.proto +78 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_task/activity_task.proto +79 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/child_workflow/child_workflow.proto +77 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/common/common.proto +15 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/core_interface.proto +30 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/external_data/external_data.proto +30 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_activation/workflow_activation.proto +263 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_commands/workflow_commands.proto +304 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_completion/workflow_completion.proto +29 -0
- data/bridge/sdk-core/protos/testsrv_upstream/api-linter.yaml +38 -0
- data/bridge/sdk-core/protos/testsrv_upstream/buf.yaml +13 -0
- data/bridge/sdk-core/protos/testsrv_upstream/dependencies/gogoproto/gogo.proto +141 -0
- data/bridge/sdk-core/protos/testsrv_upstream/temporal/api/testservice/v1/request_response.proto +63 -0
- data/bridge/sdk-core/protos/testsrv_upstream/temporal/api/testservice/v1/service.proto +90 -0
- data/bridge/sdk-core/rustfmt.toml +1 -0
- data/bridge/sdk-core/sdk/Cargo.toml +47 -0
- data/bridge/sdk-core/sdk/LICENSE.txt +23 -0
- data/bridge/sdk-core/sdk/src/activity_context.rs +230 -0
- data/bridge/sdk-core/sdk/src/app_data.rs +37 -0
- data/bridge/sdk-core/sdk/src/interceptors.rs +50 -0
- data/bridge/sdk-core/sdk/src/lib.rs +794 -0
- data/bridge/sdk-core/sdk/src/payload_converter.rs +11 -0
- data/bridge/sdk-core/sdk/src/workflow_context/options.rs +295 -0
- data/bridge/sdk-core/sdk/src/workflow_context.rs +694 -0
- data/bridge/sdk-core/sdk/src/workflow_future.rs +499 -0
- data/bridge/sdk-core/sdk-core-protos/Cargo.toml +30 -0
- data/bridge/sdk-core/sdk-core-protos/LICENSE.txt +23 -0
- data/bridge/sdk-core/sdk-core-protos/build.rs +107 -0
- data/bridge/sdk-core/sdk-core-protos/src/constants.rs +7 -0
- data/bridge/sdk-core/sdk-core-protos/src/history_builder.rs +544 -0
- data/bridge/sdk-core/sdk-core-protos/src/history_info.rs +230 -0
- data/bridge/sdk-core/sdk-core-protos/src/lib.rs +1970 -0
- data/bridge/sdk-core/sdk-core-protos/src/task_token.rs +38 -0
- data/bridge/sdk-core/sdk-core-protos/src/utilities.rs +14 -0
- data/bridge/sdk-core/test-utils/Cargo.toml +36 -0
- data/bridge/sdk-core/test-utils/src/canned_histories.rs +1579 -0
- data/bridge/sdk-core/test-utils/src/histfetch.rs +28 -0
- data/bridge/sdk-core/test-utils/src/lib.rs +650 -0
- data/bridge/sdk-core/tests/integ_tests/client_tests.rs +36 -0
- data/bridge/sdk-core/tests/integ_tests/ephemeral_server_tests.rs +128 -0
- data/bridge/sdk-core/tests/integ_tests/heartbeat_tests.rs +221 -0
- data/bridge/sdk-core/tests/integ_tests/metrics_tests.rs +37 -0
- data/bridge/sdk-core/tests/integ_tests/polling_tests.rs +133 -0
- data/bridge/sdk-core/tests/integ_tests/queries_tests.rs +437 -0
- data/bridge/sdk-core/tests/integ_tests/visibility_tests.rs +93 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/activities.rs +878 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/appdata_propagation.rs +61 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs +59 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs +58 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs +50 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs +60 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/determinism.rs +54 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs +788 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs +53 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/patches.rs +113 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/replay.rs +223 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/resets.rs +93 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/signals.rs +167 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs +99 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/timers.rs +131 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs +75 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests.rs +597 -0
- data/bridge/sdk-core/tests/load_tests.rs +191 -0
- data/bridge/sdk-core/tests/main.rs +113 -0
- data/bridge/sdk-core/tests/runner.rs +93 -0
- data/bridge/src/connection.rs +186 -0
- data/bridge/src/lib.rs +239 -0
- data/bridge/src/runtime.rs +54 -0
- data/bridge/src/worker.rs +124 -0
- data/ext/Rakefile +9 -0
- data/lib/bridge.so +0 -0
- data/lib/gen/dependencies/gogoproto/gogo_pb.rb +14 -0
- data/lib/gen/temporal/api/batch/v1/message_pb.rb +50 -0
- data/lib/gen/temporal/api/command/v1/message_pb.rb +174 -0
- data/lib/gen/temporal/api/common/v1/message_pb.rb +69 -0
- data/lib/gen/temporal/api/enums/v1/batch_operation_pb.rb +33 -0
- data/lib/gen/temporal/api/enums/v1/command_type_pb.rb +39 -0
- data/lib/gen/temporal/api/enums/v1/common_pb.rb +42 -0
- data/lib/gen/temporal/api/enums/v1/event_type_pb.rb +68 -0
- data/lib/gen/temporal/api/enums/v1/failed_cause_pb.rb +77 -0
- data/lib/gen/temporal/api/enums/v1/interaction_type_pb.rb +25 -0
- data/lib/gen/temporal/api/enums/v1/namespace_pb.rb +37 -0
- data/lib/gen/temporal/api/enums/v1/query_pb.rb +31 -0
- data/lib/gen/temporal/api/enums/v1/reset_pb.rb +24 -0
- data/lib/gen/temporal/api/enums/v1/schedule_pb.rb +28 -0
- data/lib/gen/temporal/api/enums/v1/task_queue_pb.rb +30 -0
- data/lib/gen/temporal/api/enums/v1/update_pb.rb +23 -0
- data/lib/gen/temporal/api/enums/v1/workflow_pb.rb +89 -0
- data/lib/gen/temporal/api/errordetails/v1/message_pb.rb +84 -0
- data/lib/gen/temporal/api/failure/v1/message_pb.rb +83 -0
- data/lib/gen/temporal/api/filter/v1/message_pb.rb +40 -0
- data/lib/gen/temporal/api/history/v1/message_pb.rb +490 -0
- data/lib/gen/temporal/api/interaction/v1/message_pb.rb +49 -0
- data/lib/gen/temporal/api/namespace/v1/message_pb.rb +63 -0
- data/lib/gen/temporal/api/operatorservice/v1/request_response_pb.rb +85 -0
- data/lib/gen/temporal/api/operatorservice/v1/service_pb.rb +20 -0
- data/lib/gen/temporal/api/query/v1/message_pb.rb +38 -0
- data/lib/gen/temporal/api/replication/v1/message_pb.rb +37 -0
- data/lib/gen/temporal/api/schedule/v1/message_pb.rb +149 -0
- data/lib/gen/temporal/api/taskqueue/v1/message_pb.rb +73 -0
- data/lib/gen/temporal/api/version/v1/message_pb.rb +41 -0
- data/lib/gen/temporal/api/workflow/v1/message_pb.rb +111 -0
- data/lib/gen/temporal/api/workflowservice/v1/request_response_pb.rb +788 -0
- data/lib/gen/temporal/api/workflowservice/v1/service_pb.rb +20 -0
- data/lib/gen/temporal/sdk/core/activity_result/activity_result_pb.rb +58 -0
- data/lib/gen/temporal/sdk/core/activity_task/activity_task_pb.rb +57 -0
- data/lib/gen/temporal/sdk/core/bridge/bridge_pb.rb +222 -0
- data/lib/gen/temporal/sdk/core/child_workflow/child_workflow_pb.rb +57 -0
- data/lib/gen/temporal/sdk/core/common/common_pb.rb +22 -0
- data/lib/gen/temporal/sdk/core/core_interface_pb.rb +34 -0
- data/lib/gen/temporal/sdk/core/external_data/external_data_pb.rb +27 -0
- data/lib/gen/temporal/sdk/core/workflow_activation/workflow_activation_pb.rb +165 -0
- data/lib/gen/temporal/sdk/core/workflow_commands/workflow_commands_pb.rb +196 -0
- data/lib/gen/temporal/sdk/core/workflow_completion/workflow_completion_pb.rb +34 -0
- data/lib/temporalio/activity/context.rb +97 -0
- data/lib/temporalio/activity/info.rb +67 -0
- data/lib/temporalio/activity.rb +85 -0
- data/lib/temporalio/bridge/error.rb +8 -0
- data/lib/temporalio/bridge.rb +14 -0
- data/lib/temporalio/client/implementation.rb +340 -0
- data/lib/temporalio/client/workflow_handle.rb +243 -0
- data/lib/temporalio/client.rb +131 -0
- data/lib/temporalio/connection.rb +751 -0
- data/lib/temporalio/data_converter.rb +191 -0
- data/lib/temporalio/error/failure.rb +194 -0
- data/lib/temporalio/error/workflow_failure.rb +19 -0
- data/lib/temporalio/errors.rb +40 -0
- data/lib/temporalio/failure_converter/base.rb +26 -0
- data/lib/temporalio/failure_converter/basic.rb +319 -0
- data/lib/temporalio/failure_converter.rb +7 -0
- data/lib/temporalio/interceptor/chain.rb +28 -0
- data/lib/temporalio/interceptor/client.rb +123 -0
- data/lib/temporalio/payload_codec/base.rb +32 -0
- data/lib/temporalio/payload_converter/base.rb +24 -0
- data/lib/temporalio/payload_converter/bytes.rb +27 -0
- data/lib/temporalio/payload_converter/composite.rb +49 -0
- data/lib/temporalio/payload_converter/encoding_base.rb +35 -0
- data/lib/temporalio/payload_converter/json.rb +26 -0
- data/lib/temporalio/payload_converter/nil.rb +26 -0
- data/lib/temporalio/payload_converter.rb +14 -0
- data/lib/temporalio/retry_policy.rb +82 -0
- data/lib/temporalio/retry_state.rb +35 -0
- data/lib/temporalio/runtime.rb +25 -0
- data/lib/temporalio/timeout_type.rb +29 -0
- data/lib/temporalio/version.rb +3 -0
- data/lib/temporalio/worker/activity_runner.rb +92 -0
- data/lib/temporalio/worker/activity_worker.rb +138 -0
- data/lib/temporalio/worker/reactor.rb +46 -0
- data/lib/temporalio/worker/runner.rb +63 -0
- data/lib/temporalio/worker/sync_worker.rb +88 -0
- data/lib/temporalio/worker/thread_pool_executor.rb +51 -0
- data/lib/temporalio/worker.rb +198 -0
- data/lib/temporalio/workflow/execution_info.rb +54 -0
- data/lib/temporalio/workflow/execution_status.rb +36 -0
- data/lib/temporalio/workflow/id_reuse_policy.rb +36 -0
- data/lib/temporalio/workflow/query_reject_condition.rb +33 -0
- data/lib/temporalio.rb +12 -1
- data/lib/thermite_patch.rb +23 -0
- data/temporalio.gemspec +45 -0
- metadata +566 -9
- data/lib/temporal/version.rb +0 -3
- data/lib/temporal.rb +0 -4
- data/temporal.gemspec +0 -20
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
use crate::worker::workflow::{OutgoingJob, WFCommand, WorkflowStartedInfo};
|
|
2
|
+
use prost_types::Timestamp;
|
|
3
|
+
use temporal_sdk_core_protos::{
|
|
4
|
+
coresdk::workflow_activation::{start_workflow_from_attribs, WorkflowActivationJob},
|
|
5
|
+
temporal::api::history::v1::WorkflowExecutionStartedEventAttributes,
|
|
6
|
+
utilities::TryIntoOrNone,
|
|
7
|
+
};
|
|
8
|
+
|
|
9
|
+
/// Abstracts away the concept of an actual workflow implementation, handling sending it new
|
|
10
|
+
/// jobs and fetching output from it.
|
|
11
|
+
pub struct DrivenWorkflow {
|
|
12
|
+
started_attrs: Option<WorkflowStartedInfo>,
|
|
13
|
+
fetcher: Box<dyn WorkflowFetcher>,
|
|
14
|
+
/// Outgoing activation jobs that need to be sent to the lang sdk
|
|
15
|
+
outgoing_wf_activation_jobs: Vec<OutgoingJob>,
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
impl<WF> From<Box<WF>> for DrivenWorkflow
|
|
19
|
+
where
|
|
20
|
+
WF: WorkflowFetcher + 'static,
|
|
21
|
+
{
|
|
22
|
+
fn from(wf: Box<WF>) -> Self {
|
|
23
|
+
Self {
|
|
24
|
+
started_attrs: None,
|
|
25
|
+
fetcher: wf,
|
|
26
|
+
outgoing_wf_activation_jobs: Default::default(),
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
impl DrivenWorkflow {
|
|
32
|
+
/// Start the workflow
|
|
33
|
+
pub fn start(
|
|
34
|
+
&mut self,
|
|
35
|
+
workflow_id: String,
|
|
36
|
+
randomness_seed: u64,
|
|
37
|
+
start_time: Timestamp,
|
|
38
|
+
attribs: WorkflowExecutionStartedEventAttributes,
|
|
39
|
+
) {
|
|
40
|
+
debug!(run_id = %attribs.original_execution_run_id, "Driven WF start");
|
|
41
|
+
let started_info = WorkflowStartedInfo {
|
|
42
|
+
workflow_task_timeout: attribs.workflow_task_timeout.clone().try_into_or_none(),
|
|
43
|
+
workflow_execution_timeout: attribs
|
|
44
|
+
.workflow_execution_timeout
|
|
45
|
+
.clone()
|
|
46
|
+
.try_into_or_none(),
|
|
47
|
+
memo: attribs.memo.clone(),
|
|
48
|
+
search_attrs: attribs.search_attributes.clone(),
|
|
49
|
+
retry_policy: attribs.retry_policy.clone(),
|
|
50
|
+
};
|
|
51
|
+
self.send_job(
|
|
52
|
+
start_workflow_from_attribs(attribs, workflow_id, randomness_seed, start_time).into(),
|
|
53
|
+
);
|
|
54
|
+
self.started_attrs = Some(started_info);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/// Return the attributes from the workflow execution started event if this workflow has started
|
|
58
|
+
pub fn get_started_info(&self) -> Option<&WorkflowStartedInfo> {
|
|
59
|
+
self.started_attrs.as_ref()
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
/// Enqueue a new job to be sent to the driven workflow
|
|
63
|
+
pub(super) fn send_job(&mut self, job: OutgoingJob) {
|
|
64
|
+
self.outgoing_wf_activation_jobs.push(job);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/// Observe pending jobs
|
|
68
|
+
pub(super) fn peek_pending_jobs(&self) -> &[OutgoingJob] {
|
|
69
|
+
self.outgoing_wf_activation_jobs.as_slice()
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
/// Drain all pending jobs, so that they may be sent to the driven workflow
|
|
73
|
+
pub fn drain_jobs(&mut self) -> Vec<WorkflowActivationJob> {
|
|
74
|
+
self.outgoing_wf_activation_jobs
|
|
75
|
+
.drain(..)
|
|
76
|
+
.map(Into::into)
|
|
77
|
+
.collect()
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
#[async_trait::async_trait]
|
|
82
|
+
impl WorkflowFetcher for DrivenWorkflow {
|
|
83
|
+
async fn fetch_workflow_iteration_output(&mut self) -> Vec<WFCommand> {
|
|
84
|
+
self.fetcher.fetch_workflow_iteration_output().await
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/// Implementors of this trait represent a way to fetch output from executing/iterating some
|
|
89
|
+
/// workflow code (or a mocked workflow).
|
|
90
|
+
#[async_trait::async_trait]
|
|
91
|
+
pub trait WorkflowFetcher: Send {
|
|
92
|
+
/// Obtain any output from the workflow's recent execution(s). Because the lang sdk is
|
|
93
|
+
/// responsible for calling workflow code as a result of receiving tasks from
|
|
94
|
+
/// [crate::Core::poll_task], we cannot directly iterate it here. Thus implementations of this
|
|
95
|
+
/// trait are expected to either buffer output or otherwise produce it on demand when this
|
|
96
|
+
/// function is called.
|
|
97
|
+
///
|
|
98
|
+
/// In the case of the real [WorkflowBridge] implementation, commands are simply pulled from
|
|
99
|
+
/// a buffer that the language side sinks into when it calls [crate::Core::complete_task]
|
|
100
|
+
async fn fetch_workflow_iteration_output(&mut self) -> Vec<WFCommand>;
|
|
101
|
+
}
|
|
@@ -0,0 +1,532 @@
|
|
|
1
|
+
use crate::{
|
|
2
|
+
replay::{HistoryInfo, TestHistoryBuilder},
|
|
3
|
+
worker::client::WorkerClient,
|
|
4
|
+
};
|
|
5
|
+
use futures::{future::BoxFuture, stream, stream::BoxStream, FutureExt, Stream, StreamExt};
|
|
6
|
+
use std::{
|
|
7
|
+
collections::VecDeque,
|
|
8
|
+
fmt::Debug,
|
|
9
|
+
future::Future,
|
|
10
|
+
pin::Pin,
|
|
11
|
+
sync::Arc,
|
|
12
|
+
task::{Context, Poll},
|
|
13
|
+
};
|
|
14
|
+
use temporal_sdk_core_protos::temporal::api::{
|
|
15
|
+
enums::v1::EventType,
|
|
16
|
+
history::v1::{History, HistoryEvent},
|
|
17
|
+
workflowservice::v1::GetWorkflowExecutionHistoryResponse,
|
|
18
|
+
};
|
|
19
|
+
use tracing::Instrument;
|
|
20
|
+
|
|
21
|
+
/// A slimmed down version of a poll workflow task response which includes just the info needed
|
|
22
|
+
/// by [WorkflowManager]. History events are expected to be consumed from it and applied to the
|
|
23
|
+
/// state machines.
|
|
24
|
+
pub struct HistoryUpdate {
|
|
25
|
+
events: BoxStream<'static, Result<HistoryEvent, tonic::Status>>,
|
|
26
|
+
/// It is useful to be able to look ahead up to one workflow task beyond the currently
|
|
27
|
+
/// requested one. The initial (possibly only) motivation for this being to be able to
|
|
28
|
+
/// pre-emptively notify lang about patch markers so that calls to `changed` do not need to
|
|
29
|
+
/// be async.
|
|
30
|
+
buffered: VecDeque<HistoryEvent>,
|
|
31
|
+
pub previous_started_event_id: i64,
|
|
32
|
+
}
|
|
33
|
+
impl Debug for HistoryUpdate {
|
|
34
|
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
35
|
+
write!(
|
|
36
|
+
f,
|
|
37
|
+
"HistoryUpdate(previous_started_event_id: {})",
|
|
38
|
+
self.previous_started_event_id
|
|
39
|
+
)
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
pub struct HistoryPaginator {
|
|
44
|
+
// Potentially this could actually be a ref w/ lifetime here
|
|
45
|
+
client: Arc<dyn WorkerClient>,
|
|
46
|
+
event_queue: VecDeque<HistoryEvent>,
|
|
47
|
+
wf_id: String,
|
|
48
|
+
run_id: String,
|
|
49
|
+
next_page_token: NextPageToken,
|
|
50
|
+
open_history_request:
|
|
51
|
+
Option<BoxFuture<'static, Result<GetWorkflowExecutionHistoryResponse, tonic::Status>>>,
|
|
52
|
+
/// These are events that should be returned once pagination has finished. This only happens
|
|
53
|
+
/// during cache misses, where we got a partial task but need to fetch history from the start.
|
|
54
|
+
/// We use this to apply any
|
|
55
|
+
final_events: Vec<HistoryEvent>,
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
#[derive(Clone, Debug)]
|
|
59
|
+
pub enum NextPageToken {
|
|
60
|
+
/// There is no page token, we need to fetch history from the beginning
|
|
61
|
+
FetchFromStart,
|
|
62
|
+
/// There is a page token
|
|
63
|
+
Next(Vec<u8>),
|
|
64
|
+
/// There is no page token, we are done fetching history
|
|
65
|
+
Done,
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
// If we're converting from a page token from the server, if it's empty, then we're done.
|
|
69
|
+
impl From<Vec<u8>> for NextPageToken {
|
|
70
|
+
fn from(page_token: Vec<u8>) -> Self {
|
|
71
|
+
if page_token.is_empty() {
|
|
72
|
+
NextPageToken::Done
|
|
73
|
+
} else {
|
|
74
|
+
NextPageToken::Next(page_token)
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
impl HistoryPaginator {
|
|
80
|
+
pub(crate) fn new(
|
|
81
|
+
initial_history: History,
|
|
82
|
+
wf_id: String,
|
|
83
|
+
run_id: String,
|
|
84
|
+
next_page_token: impl Into<NextPageToken>,
|
|
85
|
+
client: Arc<dyn WorkerClient>,
|
|
86
|
+
) -> Self {
|
|
87
|
+
let next_page_token = next_page_token.into();
|
|
88
|
+
let (event_queue, final_events) =
|
|
89
|
+
if matches!(next_page_token, NextPageToken::FetchFromStart) {
|
|
90
|
+
(VecDeque::new(), initial_history.events)
|
|
91
|
+
} else {
|
|
92
|
+
(initial_history.events.into(), vec![])
|
|
93
|
+
};
|
|
94
|
+
Self {
|
|
95
|
+
client,
|
|
96
|
+
event_queue,
|
|
97
|
+
wf_id,
|
|
98
|
+
run_id,
|
|
99
|
+
next_page_token,
|
|
100
|
+
open_history_request: None,
|
|
101
|
+
final_events,
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
fn extend_queue_with_new_page(&mut self, resp: GetWorkflowExecutionHistoryResponse) {
|
|
106
|
+
self.next_page_token = resp.next_page_token.into();
|
|
107
|
+
self.event_queue
|
|
108
|
+
.extend(resp.history.map(|h| h.events).unwrap_or_default());
|
|
109
|
+
if matches!(&self.next_page_token, NextPageToken::Done) {
|
|
110
|
+
// If finished, we need to extend the queue with the final events, skipping any
|
|
111
|
+
// which are already present.
|
|
112
|
+
if let Some(last_event_id) = self.event_queue.back().map(|e| e.event_id) {
|
|
113
|
+
let final_events = std::mem::take(&mut self.final_events);
|
|
114
|
+
self.event_queue.extend(
|
|
115
|
+
final_events
|
|
116
|
+
.into_iter()
|
|
117
|
+
.skip_while(|e2| e2.event_id <= last_event_id),
|
|
118
|
+
);
|
|
119
|
+
}
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
impl Stream for HistoryPaginator {
|
|
125
|
+
type Item = Result<HistoryEvent, tonic::Status>;
|
|
126
|
+
|
|
127
|
+
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
|
128
|
+
if let Some(e) = self.event_queue.pop_front() {
|
|
129
|
+
return Poll::Ready(Some(Ok(e)));
|
|
130
|
+
}
|
|
131
|
+
let history_req = if let Some(req) = self.open_history_request.as_mut() {
|
|
132
|
+
req
|
|
133
|
+
} else {
|
|
134
|
+
let npt = match std::mem::replace(&mut self.next_page_token, NextPageToken::Done) {
|
|
135
|
+
// If there's no open request and the last page token we got was empty, we're done.
|
|
136
|
+
NextPageToken::Done => return Poll::Ready(None),
|
|
137
|
+
NextPageToken::FetchFromStart => vec![],
|
|
138
|
+
NextPageToken::Next(v) => v,
|
|
139
|
+
};
|
|
140
|
+
debug!(run_id=%self.run_id, "Fetching new history page");
|
|
141
|
+
let gw = self.client.clone();
|
|
142
|
+
let wid = self.wf_id.clone();
|
|
143
|
+
let rid = self.run_id.clone();
|
|
144
|
+
let resp_fut = async move {
|
|
145
|
+
gw.get_workflow_execution_history(wid, Some(rid), npt)
|
|
146
|
+
.instrument(span!(tracing::Level::TRACE, "fetch_history_in_paginator"))
|
|
147
|
+
.await
|
|
148
|
+
};
|
|
149
|
+
self.open_history_request.insert(resp_fut.boxed())
|
|
150
|
+
};
|
|
151
|
+
|
|
152
|
+
return match Future::poll(history_req.as_mut(), cx) {
|
|
153
|
+
Poll::Ready(resp) => {
|
|
154
|
+
self.open_history_request = None;
|
|
155
|
+
match resp {
|
|
156
|
+
Err(neterr) => Poll::Ready(Some(Err(neterr))),
|
|
157
|
+
Ok(resp) => {
|
|
158
|
+
self.extend_queue_with_new_page(resp);
|
|
159
|
+
Poll::Ready(self.event_queue.pop_front().map(Ok))
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
Poll::Pending => Poll::Pending,
|
|
164
|
+
};
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
impl HistoryUpdate {
|
|
169
|
+
pub fn new(history_iterator: HistoryPaginator, previous_wft_started_id: i64) -> Self {
|
|
170
|
+
Self {
|
|
171
|
+
events: history_iterator.fuse().boxed(),
|
|
172
|
+
buffered: VecDeque::new(),
|
|
173
|
+
previous_started_event_id: previous_wft_started_id,
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/// Create an instance of an update directly from events - should only be used for replaying.
|
|
178
|
+
pub fn new_from_events<I: IntoIterator<Item = HistoryEvent>>(
|
|
179
|
+
events: I,
|
|
180
|
+
previous_wft_started_id: i64,
|
|
181
|
+
) -> Self
|
|
182
|
+
where
|
|
183
|
+
<I as IntoIterator>::IntoIter: Send + 'static,
|
|
184
|
+
{
|
|
185
|
+
Self {
|
|
186
|
+
events: stream::iter(events.into_iter().map(Ok)).boxed(),
|
|
187
|
+
buffered: VecDeque::new(),
|
|
188
|
+
previous_started_event_id: previous_wft_started_id,
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
/// Given a workflow task started id, return all events starting at that number (inclusive) to
|
|
193
|
+
/// the next WFT started event (inclusive). If there is no subsequent WFT started event,
|
|
194
|
+
/// remaining history is returned.
|
|
195
|
+
///
|
|
196
|
+
/// Events are *consumed* by this process, to keep things efficient in workflow machines, and
|
|
197
|
+
/// the function may call out to server to fetch more pages if they are known to exist and
|
|
198
|
+
/// needed to complete the WFT sequence.
|
|
199
|
+
///
|
|
200
|
+
/// Always buffers the WFT sequence *after* the returned one as well, if it is available.
|
|
201
|
+
///
|
|
202
|
+
/// Can return a tonic error in the event that fetching additional history was needed and failed
|
|
203
|
+
pub async fn take_next_wft_sequence(
|
|
204
|
+
&mut self,
|
|
205
|
+
from_wft_started_id: i64,
|
|
206
|
+
) -> Result<Vec<HistoryEvent>, tonic::Status> {
|
|
207
|
+
let (next_wft_events, maybe_bonus_events) = self
|
|
208
|
+
.take_next_wft_sequence_impl(from_wft_started_id)
|
|
209
|
+
.await?;
|
|
210
|
+
if !maybe_bonus_events.is_empty() {
|
|
211
|
+
self.buffered.extend(maybe_bonus_events);
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
if let Some(last_event_id) = next_wft_events.last().map(|he| he.event_id) {
|
|
215
|
+
// Always attempt to fetch the *next* WFT sequence as well, to buffer it for lookahead
|
|
216
|
+
let (buffer_these_events, maybe_bonus_events) =
|
|
217
|
+
self.take_next_wft_sequence_impl(last_event_id).await?;
|
|
218
|
+
self.buffered.extend(buffer_these_events);
|
|
219
|
+
if !maybe_bonus_events.is_empty() {
|
|
220
|
+
self.buffered.extend(maybe_bonus_events);
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
Ok(next_wft_events)
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
/// Lets the caller peek ahead at the next WFT sequence that will be returned by
|
|
228
|
+
/// [take_next_wft_sequence]. Will always return an empty iterator if that has not been called
|
|
229
|
+
/// first. May also return an empty iterator or incomplete sequence if we are at the end of
|
|
230
|
+
/// history.
|
|
231
|
+
pub fn peek_next_wft_sequence(&self) -> impl Iterator<Item = &HistoryEvent> {
|
|
232
|
+
self.buffered.iter()
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
/// Retrieve the next WFT sequence, first from buffered events and then from the real stream.
|
|
236
|
+
/// Returns (events up to the next logical wft sequence, extra events that were taken but
|
|
237
|
+
/// should be re-appended to the end of the buffer).
|
|
238
|
+
async fn take_next_wft_sequence_impl(
|
|
239
|
+
&mut self,
|
|
240
|
+
from_event_id: i64,
|
|
241
|
+
) -> Result<(Vec<HistoryEvent>, Vec<HistoryEvent>), tonic::Status> {
|
|
242
|
+
let mut events_to_next_wft_started: Vec<HistoryEvent> = vec![];
|
|
243
|
+
|
|
244
|
+
// This flag tracks if, while determining events to be returned, we have seen the next
|
|
245
|
+
// logically significant WFT started event which follows the one that was passed in as a
|
|
246
|
+
// parameter. If a WFT fails, times out, or is devoid of commands (ie: a heartbeat) it is
|
|
247
|
+
// not significant. So we will stop returning events (exclusive) as soon as we see an event
|
|
248
|
+
// following a WFT started that is *not* failed, timed out, or completed with a command.
|
|
249
|
+
let mut next_wft_state = NextWftState::NotSeen;
|
|
250
|
+
let mut should_pop = |e: &HistoryEvent| {
|
|
251
|
+
if e.event_id <= from_event_id {
|
|
252
|
+
return true;
|
|
253
|
+
} else if e.event_type() == EventType::WorkflowTaskStarted {
|
|
254
|
+
next_wft_state = NextWftState::Seen;
|
|
255
|
+
return true;
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
match next_wft_state {
|
|
259
|
+
NextWftState::Seen => {
|
|
260
|
+
// Must ignore failures and timeouts
|
|
261
|
+
if e.event_type() == EventType::WorkflowTaskFailed
|
|
262
|
+
|| e.event_type() == EventType::WorkflowTaskTimedOut
|
|
263
|
+
{
|
|
264
|
+
next_wft_state = NextWftState::NotSeen;
|
|
265
|
+
return true;
|
|
266
|
+
} else if e.event_type() == EventType::WorkflowTaskCompleted {
|
|
267
|
+
next_wft_state = NextWftState::SeenCompleted;
|
|
268
|
+
return true;
|
|
269
|
+
}
|
|
270
|
+
false
|
|
271
|
+
}
|
|
272
|
+
NextWftState::SeenCompleted => {
|
|
273
|
+
// If we've seen the WFT be completed, and this event is another scheduled, then
|
|
274
|
+
// this was an empty heartbeat we should ignore.
|
|
275
|
+
if e.event_type() == EventType::WorkflowTaskScheduled {
|
|
276
|
+
next_wft_state = NextWftState::NotSeen;
|
|
277
|
+
return true;
|
|
278
|
+
}
|
|
279
|
+
// Otherwise, we're done here
|
|
280
|
+
false
|
|
281
|
+
}
|
|
282
|
+
NextWftState::NotSeen => true,
|
|
283
|
+
}
|
|
284
|
+
};
|
|
285
|
+
|
|
286
|
+
// Fetch events from the buffer first, then from the network
|
|
287
|
+
let mut event_q = stream::iter(self.buffered.drain(..).map(Ok)).chain(&mut self.events);
|
|
288
|
+
|
|
289
|
+
let mut extra_e = vec![];
|
|
290
|
+
let mut last_seen_id = None;
|
|
291
|
+
while let Some(e) = event_q.next().await {
|
|
292
|
+
let e = e?;
|
|
293
|
+
|
|
294
|
+
// This little block prevents us from infinitely fetching work from the server in the
|
|
295
|
+
// event that, for whatever reason, it keeps returning stuff we've already seen.
|
|
296
|
+
if let Some(last_id) = last_seen_id {
|
|
297
|
+
if e.event_id <= last_id {
|
|
298
|
+
error!("Server returned history event IDs that went backwards!");
|
|
299
|
+
break;
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
last_seen_id = Some(e.event_id);
|
|
303
|
+
|
|
304
|
+
// It's possible to have gotten a new history update without eviction (ex: unhandled
|
|
305
|
+
// command on completion), where we may need to skip events we already handled.
|
|
306
|
+
if e.event_id > from_event_id {
|
|
307
|
+
if !should_pop(&e) {
|
|
308
|
+
if next_wft_state == NextWftState::SeenCompleted {
|
|
309
|
+
// We have seen the wft completed event, but decided to exit. We don't
|
|
310
|
+
// want to return that event as part of this sequence, so include it for
|
|
311
|
+
// re-buffering along with the event we're currently on.
|
|
312
|
+
extra_e.push(
|
|
313
|
+
events_to_next_wft_started
|
|
314
|
+
.pop()
|
|
315
|
+
.expect("There is an element here by definition"),
|
|
316
|
+
);
|
|
317
|
+
}
|
|
318
|
+
extra_e.push(e);
|
|
319
|
+
break;
|
|
320
|
+
}
|
|
321
|
+
events_to_next_wft_started.push(e);
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
Ok((events_to_next_wft_started, extra_e))
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
#[derive(Eq, PartialEq, Debug)]
|
|
330
|
+
enum NextWftState {
|
|
331
|
+
NotSeen,
|
|
332
|
+
Seen,
|
|
333
|
+
SeenCompleted,
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
impl From<HistoryInfo> for HistoryUpdate {
|
|
337
|
+
fn from(v: HistoryInfo) -> Self {
|
|
338
|
+
Self::new_from_events(v.events().to_vec(), v.previous_started_event_id())
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
pub trait TestHBExt {
|
|
343
|
+
fn as_history_update(&self) -> HistoryUpdate;
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
impl TestHBExt for TestHistoryBuilder {
|
|
347
|
+
fn as_history_update(&self) -> HistoryUpdate {
|
|
348
|
+
self.get_full_history_info().unwrap().into()
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
#[cfg(test)]
|
|
353
|
+
pub mod tests {
|
|
354
|
+
use super::*;
|
|
355
|
+
use crate::{test_help::canned_histories, worker::client::mocks::mock_workflow_client};
|
|
356
|
+
|
|
357
|
+
#[tokio::test]
|
|
358
|
+
async fn consumes_standard_wft_sequence() {
|
|
359
|
+
let timer_hist = canned_histories::single_timer("t");
|
|
360
|
+
let mut update = timer_hist.as_history_update();
|
|
361
|
+
let seq_1 = update.take_next_wft_sequence(0).await.unwrap();
|
|
362
|
+
assert_eq!(seq_1.len(), 3);
|
|
363
|
+
assert_eq!(seq_1.last().unwrap().event_id, 3);
|
|
364
|
+
let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
|
|
365
|
+
assert_eq!(seq_2.len(), 5);
|
|
366
|
+
assert_eq!(seq_2.last().unwrap().event_id, 8);
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
#[tokio::test]
|
|
370
|
+
async fn skips_wft_failed() {
|
|
371
|
+
let failed_hist = canned_histories::workflow_fails_with_reset_after_timer("t", "runid");
|
|
372
|
+
let mut update = failed_hist.as_history_update();
|
|
373
|
+
let seq_1 = update.take_next_wft_sequence(0).await.unwrap();
|
|
374
|
+
assert_eq!(seq_1.len(), 3);
|
|
375
|
+
assert_eq!(seq_1.last().unwrap().event_id, 3);
|
|
376
|
+
let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
|
|
377
|
+
assert_eq!(seq_2.len(), 8);
|
|
378
|
+
assert_eq!(seq_2.last().unwrap().event_id, 11);
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
#[tokio::test]
|
|
382
|
+
async fn skips_wft_timeout() {
|
|
383
|
+
let failed_hist = canned_histories::wft_timeout_repro();
|
|
384
|
+
let mut update = failed_hist.as_history_update();
|
|
385
|
+
let seq_1 = update.take_next_wft_sequence(0).await.unwrap();
|
|
386
|
+
assert_eq!(seq_1.len(), 3);
|
|
387
|
+
assert_eq!(seq_1.last().unwrap().event_id, 3);
|
|
388
|
+
let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
|
|
389
|
+
assert_eq!(seq_2.len(), 11);
|
|
390
|
+
assert_eq!(seq_2.last().unwrap().event_id, 14);
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
#[tokio::test]
|
|
394
|
+
async fn skips_events_before_desired_wft() {
|
|
395
|
+
let timer_hist = canned_histories::single_timer("t");
|
|
396
|
+
let mut update = timer_hist.as_history_update();
|
|
397
|
+
// We haven't processed the first 3 events, but we should still only get the second sequence
|
|
398
|
+
let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
|
|
399
|
+
assert_eq!(seq_2.len(), 5);
|
|
400
|
+
assert_eq!(seq_2.last().unwrap().event_id, 8);
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
#[tokio::test]
|
|
404
|
+
async fn history_ends_abruptly() {
|
|
405
|
+
let mut timer_hist = canned_histories::single_timer("t");
|
|
406
|
+
timer_hist.add_workflow_execution_terminated();
|
|
407
|
+
let mut update = timer_hist.as_history_update();
|
|
408
|
+
let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
|
|
409
|
+
assert_eq!(seq_2.len(), 5);
|
|
410
|
+
assert_eq!(seq_2.last().unwrap().event_id, 8);
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
#[tokio::test]
|
|
414
|
+
async fn heartbeats_skipped() {
|
|
415
|
+
let mut t = TestHistoryBuilder::default();
|
|
416
|
+
t.add_by_type(EventType::WorkflowExecutionStarted);
|
|
417
|
+
t.add_full_wf_task();
|
|
418
|
+
t.add_full_wf_task();
|
|
419
|
+
t.add_get_event_id(EventType::TimerStarted, None);
|
|
420
|
+
t.add_full_wf_task();
|
|
421
|
+
t.add_full_wf_task();
|
|
422
|
+
t.add_full_wf_task();
|
|
423
|
+
t.add_full_wf_task();
|
|
424
|
+
t.add_get_event_id(EventType::TimerStarted, None);
|
|
425
|
+
t.add_full_wf_task();
|
|
426
|
+
t.add_we_signaled("whee", vec![]);
|
|
427
|
+
t.add_full_wf_task();
|
|
428
|
+
t.add_workflow_execution_completed();
|
|
429
|
+
|
|
430
|
+
let mut update = t.as_history_update();
|
|
431
|
+
let seq = update.take_next_wft_sequence(0).await.unwrap();
|
|
432
|
+
assert_eq!(seq.len(), 6);
|
|
433
|
+
let seq = update.take_next_wft_sequence(6).await.unwrap();
|
|
434
|
+
assert_eq!(seq.len(), 13);
|
|
435
|
+
let seq = update.take_next_wft_sequence(19).await.unwrap();
|
|
436
|
+
assert_eq!(seq.len(), 4);
|
|
437
|
+
let seq = update.take_next_wft_sequence(23).await.unwrap();
|
|
438
|
+
assert_eq!(seq.len(), 4);
|
|
439
|
+
let seq = update.take_next_wft_sequence(27).await.unwrap();
|
|
440
|
+
assert_eq!(seq.len(), 2);
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
#[tokio::test]
|
|
444
|
+
async fn paginator_fetches_new_pages() {
|
|
445
|
+
// Note that this test triggers the "event ids that went backwards" error, acceptably.
|
|
446
|
+
// Can be fixed by having mock not return earlier events.
|
|
447
|
+
let wft_count = 500;
|
|
448
|
+
let long_hist = canned_histories::long_sequential_timers(wft_count);
|
|
449
|
+
let initial_hist = long_hist.get_history_info(10).unwrap();
|
|
450
|
+
let prev_started = initial_hist.previous_started_event_id();
|
|
451
|
+
let mut mock_client = mock_workflow_client();
|
|
452
|
+
|
|
453
|
+
let mut npt = 2;
|
|
454
|
+
mock_client
|
|
455
|
+
.expect_get_workflow_execution_history()
|
|
456
|
+
.returning(move |_, _, passed_npt| {
|
|
457
|
+
assert_eq!(passed_npt, vec![npt]);
|
|
458
|
+
let history = long_hist.get_history_info(10 * npt as usize).unwrap();
|
|
459
|
+
npt += 1;
|
|
460
|
+
Ok(GetWorkflowExecutionHistoryResponse {
|
|
461
|
+
history: Some(history.into()),
|
|
462
|
+
raw_history: vec![],
|
|
463
|
+
next_page_token: vec![npt],
|
|
464
|
+
archived: false,
|
|
465
|
+
})
|
|
466
|
+
});
|
|
467
|
+
|
|
468
|
+
let mut update = HistoryUpdate::new(
|
|
469
|
+
HistoryPaginator::new(
|
|
470
|
+
initial_hist.into(),
|
|
471
|
+
"wfid".to_string(),
|
|
472
|
+
"runid".to_string(),
|
|
473
|
+
vec![2], // Start at page "2"
|
|
474
|
+
Arc::new(mock_client),
|
|
475
|
+
),
|
|
476
|
+
prev_started,
|
|
477
|
+
);
|
|
478
|
+
|
|
479
|
+
let seq = update.take_next_wft_sequence(0).await.unwrap();
|
|
480
|
+
assert_eq!(seq.len(), 3);
|
|
481
|
+
|
|
482
|
+
let mut last_event_id = 3;
|
|
483
|
+
let mut last_started_id = 3;
|
|
484
|
+
for _ in 1..wft_count {
|
|
485
|
+
let seq = update
|
|
486
|
+
.take_next_wft_sequence(last_started_id)
|
|
487
|
+
.await
|
|
488
|
+
.unwrap();
|
|
489
|
+
for e in &seq {
|
|
490
|
+
last_event_id += 1;
|
|
491
|
+
assert_eq!(e.event_id, last_event_id);
|
|
492
|
+
}
|
|
493
|
+
assert_eq!(seq.len(), 5);
|
|
494
|
+
last_started_id += 5;
|
|
495
|
+
}
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
#[tokio::test]
|
|
499
|
+
async fn handles_cache_misses() {
|
|
500
|
+
let timer_hist = canned_histories::single_timer("t");
|
|
501
|
+
let partial_task = timer_hist.get_one_wft(2).unwrap();
|
|
502
|
+
let mut history_from_get: GetWorkflowExecutionHistoryResponse =
|
|
503
|
+
timer_hist.get_history_info(2).unwrap().into();
|
|
504
|
+
// Chop off the last event, which is WFT started, which server doesn't return in get
|
|
505
|
+
// history
|
|
506
|
+
history_from_get.history.as_mut().map(|h| h.events.pop());
|
|
507
|
+
let mut mock_client = mock_workflow_client();
|
|
508
|
+
mock_client
|
|
509
|
+
.expect_get_workflow_execution_history()
|
|
510
|
+
.returning(move |_, _, _| Ok(history_from_get.clone()));
|
|
511
|
+
|
|
512
|
+
let mut update = HistoryUpdate::new(
|
|
513
|
+
HistoryPaginator::new(
|
|
514
|
+
partial_task.into(),
|
|
515
|
+
"wfid".to_string(),
|
|
516
|
+
"runid".to_string(),
|
|
517
|
+
// A cache miss means we'll try to fetch from start
|
|
518
|
+
NextPageToken::FetchFromStart,
|
|
519
|
+
Arc::new(mock_client),
|
|
520
|
+
),
|
|
521
|
+
1,
|
|
522
|
+
);
|
|
523
|
+
// We expect if we try to take the first task sequence that the first event is the first
|
|
524
|
+
// event in the sequence.
|
|
525
|
+
let seq = update.take_next_wft_sequence(0).await.unwrap();
|
|
526
|
+
assert_eq!(seq[0].event_id, 1);
|
|
527
|
+
let seq = update.take_next_wft_sequence(3).await.unwrap();
|
|
528
|
+
// Verify anything extra (which should only ever be WFT started) was re-appended to the
|
|
529
|
+
// end of the event iteration after fetching the old history.
|
|
530
|
+
assert_eq!(seq.last().unwrap().event_id, 8);
|
|
531
|
+
}
|
|
532
|
+
}
|