temporalio 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Gemfile +3 -0
- data/LICENSE +20 -0
- data/README.md +130 -0
- data/bridge/Cargo.lock +2865 -0
- data/bridge/Cargo.toml +26 -0
- data/bridge/sdk-core/ARCHITECTURE.md +76 -0
- data/bridge/sdk-core/Cargo.lock +2606 -0
- data/bridge/sdk-core/Cargo.toml +2 -0
- data/bridge/sdk-core/LICENSE.txt +23 -0
- data/bridge/sdk-core/README.md +107 -0
- data/bridge/sdk-core/arch_docs/diagrams/README.md +10 -0
- data/bridge/sdk-core/arch_docs/diagrams/sticky_queues.puml +40 -0
- data/bridge/sdk-core/arch_docs/diagrams/workflow_internals.svg +1 -0
- data/bridge/sdk-core/arch_docs/sticky_queues.md +51 -0
- data/bridge/sdk-core/bridge-ffi/Cargo.toml +24 -0
- data/bridge/sdk-core/bridge-ffi/LICENSE.txt +23 -0
- data/bridge/sdk-core/bridge-ffi/build.rs +25 -0
- data/bridge/sdk-core/bridge-ffi/include/sdk-core-bridge.h +249 -0
- data/bridge/sdk-core/bridge-ffi/src/lib.rs +825 -0
- data/bridge/sdk-core/bridge-ffi/src/wrappers.rs +211 -0
- data/bridge/sdk-core/client/Cargo.toml +40 -0
- data/bridge/sdk-core/client/LICENSE.txt +23 -0
- data/bridge/sdk-core/client/src/lib.rs +1294 -0
- data/bridge/sdk-core/client/src/metrics.rs +165 -0
- data/bridge/sdk-core/client/src/raw.rs +931 -0
- data/bridge/sdk-core/client/src/retry.rs +674 -0
- data/bridge/sdk-core/client/src/workflow_handle/mod.rs +185 -0
- data/bridge/sdk-core/core/Cargo.toml +116 -0
- data/bridge/sdk-core/core/LICENSE.txt +23 -0
- data/bridge/sdk-core/core/benches/workflow_replay.rs +73 -0
- data/bridge/sdk-core/core/src/abstractions.rs +166 -0
- data/bridge/sdk-core/core/src/core_tests/activity_tasks.rs +911 -0
- data/bridge/sdk-core/core/src/core_tests/child_workflows.rs +221 -0
- data/bridge/sdk-core/core/src/core_tests/determinism.rs +107 -0
- data/bridge/sdk-core/core/src/core_tests/local_activities.rs +515 -0
- data/bridge/sdk-core/core/src/core_tests/mod.rs +100 -0
- data/bridge/sdk-core/core/src/core_tests/queries.rs +736 -0
- data/bridge/sdk-core/core/src/core_tests/replay_flag.rs +65 -0
- data/bridge/sdk-core/core/src/core_tests/workers.rs +259 -0
- data/bridge/sdk-core/core/src/core_tests/workflow_cancels.rs +124 -0
- data/bridge/sdk-core/core/src/core_tests/workflow_tasks.rs +2070 -0
- data/bridge/sdk-core/core/src/ephemeral_server/mod.rs +515 -0
- data/bridge/sdk-core/core/src/lib.rs +175 -0
- data/bridge/sdk-core/core/src/log_export.rs +62 -0
- data/bridge/sdk-core/core/src/pollers/mod.rs +54 -0
- data/bridge/sdk-core/core/src/pollers/poll_buffer.rs +297 -0
- data/bridge/sdk-core/core/src/protosext/mod.rs +428 -0
- data/bridge/sdk-core/core/src/replay/mod.rs +71 -0
- data/bridge/sdk-core/core/src/retry_logic.rs +202 -0
- data/bridge/sdk-core/core/src/telemetry/metrics.rs +383 -0
- data/bridge/sdk-core/core/src/telemetry/mod.rs +412 -0
- data/bridge/sdk-core/core/src/telemetry/prometheus_server.rs +77 -0
- data/bridge/sdk-core/core/src/test_help/mod.rs +875 -0
- data/bridge/sdk-core/core/src/worker/activities/activity_heartbeat_manager.rs +580 -0
- data/bridge/sdk-core/core/src/worker/activities/local_activities.rs +1042 -0
- data/bridge/sdk-core/core/src/worker/activities.rs +464 -0
- data/bridge/sdk-core/core/src/worker/client/mocks.rs +87 -0
- data/bridge/sdk-core/core/src/worker/client.rs +347 -0
- data/bridge/sdk-core/core/src/worker/mod.rs +566 -0
- data/bridge/sdk-core/core/src/worker/workflow/bridge.rs +37 -0
- data/bridge/sdk-core/core/src/worker/workflow/driven_workflow.rs +110 -0
- data/bridge/sdk-core/core/src/worker/workflow/history_update.rs +458 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/activity_state_machine.rs +911 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_external_state_machine.rs +298 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_workflow_state_machine.rs +171 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/child_workflow_state_machine.rs +860 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/complete_workflow_state_machine.rs +140 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/continue_as_new_workflow_state_machine.rs +161 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/fail_workflow_state_machine.rs +133 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/local_activity_state_machine.rs +1448 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/mod.rs +342 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/mutable_side_effect_state_machine.rs +127 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/patch_state_machine.rs +712 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/side_effect_state_machine.rs +71 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/signal_external_state_machine.rs +443 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/timer_state_machine.rs +439 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/transition_coverage.rs +169 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/upsert_search_attributes_state_machine.rs +246 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines/local_acts.rs +96 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines.rs +1184 -0
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_task_state_machine.rs +277 -0
- data/bridge/sdk-core/core/src/worker/workflow/managed_run/managed_wf_test.rs +198 -0
- data/bridge/sdk-core/core/src/worker/workflow/managed_run.rs +647 -0
- data/bridge/sdk-core/core/src/worker/workflow/mod.rs +1143 -0
- data/bridge/sdk-core/core/src/worker/workflow/run_cache.rs +145 -0
- data/bridge/sdk-core/core/src/worker/workflow/wft_poller.rs +88 -0
- data/bridge/sdk-core/core/src/worker/workflow/workflow_stream.rs +940 -0
- data/bridge/sdk-core/core-api/Cargo.toml +31 -0
- data/bridge/sdk-core/core-api/LICENSE.txt +23 -0
- data/bridge/sdk-core/core-api/src/errors.rs +95 -0
- data/bridge/sdk-core/core-api/src/lib.rs +151 -0
- data/bridge/sdk-core/core-api/src/worker.rs +135 -0
- data/bridge/sdk-core/etc/deps.svg +187 -0
- data/bridge/sdk-core/etc/dynamic-config.yaml +2 -0
- data/bridge/sdk-core/etc/otel-collector-config.yaml +36 -0
- data/bridge/sdk-core/etc/prometheus.yaml +6 -0
- data/bridge/sdk-core/fsm/Cargo.toml +18 -0
- data/bridge/sdk-core/fsm/LICENSE.txt +23 -0
- data/bridge/sdk-core/fsm/README.md +3 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/Cargo.toml +27 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/LICENSE.txt +23 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/src/lib.rs +647 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/progress.rs +8 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/dupe_transitions_fail.rs +18 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/dupe_transitions_fail.stderr +12 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/dynamic_dest_pass.rs +41 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/forgot_name_fail.rs +14 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/forgot_name_fail.stderr +11 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/handler_arg_pass.rs +32 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/handler_pass.rs +31 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/medium_complex_pass.rs +46 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/no_handle_conversions_require_into_fail.rs +29 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/no_handle_conversions_require_into_fail.stderr +12 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/simple_pass.rs +32 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/struct_event_variant_fail.rs +18 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/struct_event_variant_fail.stderr +5 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_more_item_event_variant_fail.rs +11 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_more_item_event_variant_fail.stderr +5 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_zero_item_event_variant_fail.rs +11 -0
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_zero_item_event_variant_fail.stderr +5 -0
- data/bridge/sdk-core/fsm/rustfsm_trait/Cargo.toml +14 -0
- data/bridge/sdk-core/fsm/rustfsm_trait/LICENSE.txt +23 -0
- data/bridge/sdk-core/fsm/rustfsm_trait/src/lib.rs +249 -0
- data/bridge/sdk-core/fsm/src/lib.rs +2 -0
- data/bridge/sdk-core/histories/fail_wf_task.bin +0 -0
- data/bridge/sdk-core/histories/timer_workflow_history.bin +0 -0
- data/bridge/sdk-core/integ-with-otel.sh +7 -0
- data/bridge/sdk-core/protos/api_upstream/README.md +9 -0
- data/bridge/sdk-core/protos/api_upstream/api-linter.yaml +40 -0
- data/bridge/sdk-core/protos/api_upstream/buf.yaml +12 -0
- data/bridge/sdk-core/protos/api_upstream/dependencies/gogoproto/gogo.proto +141 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/batch/v1/message.proto +86 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/cluster/v1/message.proto +83 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/command/v1/message.proto +259 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/common/v1/message.proto +112 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/batch_operation.proto +46 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/cluster.proto +40 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/command_type.proto +57 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/common.proto +55 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/event_type.proto +168 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto +97 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/namespace.proto +51 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/query.proto +50 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/reset.proto +41 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/schedule.proto +60 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/task_queue.proto +59 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/update.proto +51 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/workflow.proto +122 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/errordetails/v1/message.proto +108 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/failure/v1/message.proto +114 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/filter/v1/message.proto +56 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/history/v1/message.proto +751 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/namespace/v1/message.proto +97 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/operatorservice/v1/request_response.proto +161 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/operatorservice/v1/service.proto +99 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/query/v1/message.proto +61 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/replication/v1/message.proto +55 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/schedule/v1/message.proto +300 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/taskqueue/v1/message.proto +108 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/update/v1/message.proto +46 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/version/v1/message.proto +59 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflow/v1/message.proto +145 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto +1124 -0
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/service.proto +401 -0
- data/bridge/sdk-core/protos/grpc/health/v1/health.proto +63 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_result/activity_result.proto +78 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_task/activity_task.proto +79 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/bridge/bridge.proto +210 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/child_workflow/child_workflow.proto +77 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/common/common.proto +15 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/core_interface.proto +30 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/external_data/external_data.proto +30 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_activation/workflow_activation.proto +261 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_commands/workflow_commands.proto +297 -0
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_completion/workflow_completion.proto +29 -0
- data/bridge/sdk-core/protos/testsrv_upstream/api-linter.yaml +38 -0
- data/bridge/sdk-core/protos/testsrv_upstream/buf.yaml +13 -0
- data/bridge/sdk-core/protos/testsrv_upstream/dependencies/gogoproto/gogo.proto +141 -0
- data/bridge/sdk-core/protos/testsrv_upstream/temporal/api/testservice/v1/request_response.proto +63 -0
- data/bridge/sdk-core/protos/testsrv_upstream/temporal/api/testservice/v1/service.proto +90 -0
- data/bridge/sdk-core/rustfmt.toml +1 -0
- data/bridge/sdk-core/sdk/Cargo.toml +47 -0
- data/bridge/sdk-core/sdk/LICENSE.txt +23 -0
- data/bridge/sdk-core/sdk/src/activity_context.rs +230 -0
- data/bridge/sdk-core/sdk/src/app_data.rs +37 -0
- data/bridge/sdk-core/sdk/src/conversions.rs +8 -0
- data/bridge/sdk-core/sdk/src/interceptors.rs +17 -0
- data/bridge/sdk-core/sdk/src/lib.rs +792 -0
- data/bridge/sdk-core/sdk/src/payload_converter.rs +11 -0
- data/bridge/sdk-core/sdk/src/workflow_context/options.rs +295 -0
- data/bridge/sdk-core/sdk/src/workflow_context.rs +683 -0
- data/bridge/sdk-core/sdk/src/workflow_future.rs +503 -0
- data/bridge/sdk-core/sdk-core-protos/Cargo.toml +30 -0
- data/bridge/sdk-core/sdk-core-protos/LICENSE.txt +23 -0
- data/bridge/sdk-core/sdk-core-protos/build.rs +108 -0
- data/bridge/sdk-core/sdk-core-protos/src/constants.rs +7 -0
- data/bridge/sdk-core/sdk-core-protos/src/history_builder.rs +497 -0
- data/bridge/sdk-core/sdk-core-protos/src/history_info.rs +230 -0
- data/bridge/sdk-core/sdk-core-protos/src/lib.rs +1910 -0
- data/bridge/sdk-core/sdk-core-protos/src/task_token.rs +38 -0
- data/bridge/sdk-core/sdk-core-protos/src/utilities.rs +14 -0
- data/bridge/sdk-core/test-utils/Cargo.toml +35 -0
- data/bridge/sdk-core/test-utils/src/canned_histories.rs +1579 -0
- data/bridge/sdk-core/test-utils/src/histfetch.rs +28 -0
- data/bridge/sdk-core/test-utils/src/lib.rs +598 -0
- data/bridge/sdk-core/tests/integ_tests/client_tests.rs +36 -0
- data/bridge/sdk-core/tests/integ_tests/ephemeral_server_tests.rs +128 -0
- data/bridge/sdk-core/tests/integ_tests/heartbeat_tests.rs +218 -0
- data/bridge/sdk-core/tests/integ_tests/polling_tests.rs +146 -0
- data/bridge/sdk-core/tests/integ_tests/queries_tests.rs +437 -0
- data/bridge/sdk-core/tests/integ_tests/visibility_tests.rs +93 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/activities.rs +878 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/appdata_propagation.rs +61 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs +59 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs +58 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs +50 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs +60 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/determinism.rs +54 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs +634 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/patches.rs +113 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/replay.rs +137 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/resets.rs +93 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/signals.rs +167 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs +99 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/timers.rs +131 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs +75 -0
- data/bridge/sdk-core/tests/integ_tests/workflow_tests.rs +587 -0
- data/bridge/sdk-core/tests/load_tests.rs +191 -0
- data/bridge/sdk-core/tests/main.rs +111 -0
- data/bridge/sdk-core/tests/runner.rs +93 -0
- data/bridge/src/connection.rs +167 -0
- data/bridge/src/lib.rs +180 -0
- data/bridge/src/runtime.rs +47 -0
- data/bridge/src/worker.rs +73 -0
- data/ext/Rakefile +9 -0
- data/lib/bridge.so +0 -0
- data/lib/gen/dependencies/gogoproto/gogo_pb.rb +14 -0
- data/lib/gen/temporal/api/batch/v1/message_pb.rb +48 -0
- data/lib/gen/temporal/api/cluster/v1/message_pb.rb +67 -0
- data/lib/gen/temporal/api/command/v1/message_pb.rb +166 -0
- data/lib/gen/temporal/api/common/v1/message_pb.rb +69 -0
- data/lib/gen/temporal/api/enums/v1/batch_operation_pb.rb +32 -0
- data/lib/gen/temporal/api/enums/v1/cluster_pb.rb +26 -0
- data/lib/gen/temporal/api/enums/v1/command_type_pb.rb +37 -0
- data/lib/gen/temporal/api/enums/v1/common_pb.rb +41 -0
- data/lib/gen/temporal/api/enums/v1/event_type_pb.rb +67 -0
- data/lib/gen/temporal/api/enums/v1/failed_cause_pb.rb +71 -0
- data/lib/gen/temporal/api/enums/v1/namespace_pb.rb +37 -0
- data/lib/gen/temporal/api/enums/v1/query_pb.rb +31 -0
- data/lib/gen/temporal/api/enums/v1/reset_pb.rb +24 -0
- data/lib/gen/temporal/api/enums/v1/schedule_pb.rb +28 -0
- data/lib/gen/temporal/api/enums/v1/task_queue_pb.rb +30 -0
- data/lib/gen/temporal/api/enums/v1/update_pb.rb +28 -0
- data/lib/gen/temporal/api/enums/v1/workflow_pb.rb +89 -0
- data/lib/gen/temporal/api/errordetails/v1/message_pb.rb +84 -0
- data/lib/gen/temporal/api/failure/v1/message_pb.rb +83 -0
- data/lib/gen/temporal/api/filter/v1/message_pb.rb +40 -0
- data/lib/gen/temporal/api/history/v1/message_pb.rb +489 -0
- data/lib/gen/temporal/api/namespace/v1/message_pb.rb +63 -0
- data/lib/gen/temporal/api/operatorservice/v1/request_response_pb.rb +125 -0
- data/lib/gen/temporal/api/operatorservice/v1/service_pb.rb +20 -0
- data/lib/gen/temporal/api/query/v1/message_pb.rb +38 -0
- data/lib/gen/temporal/api/replication/v1/message_pb.rb +37 -0
- data/lib/gen/temporal/api/schedule/v1/message_pb.rb +128 -0
- data/lib/gen/temporal/api/taskqueue/v1/message_pb.rb +73 -0
- data/lib/gen/temporal/api/update/v1/message_pb.rb +26 -0
- data/lib/gen/temporal/api/version/v1/message_pb.rb +41 -0
- data/lib/gen/temporal/api/workflow/v1/message_pb.rb +110 -0
- data/lib/gen/temporal/api/workflowservice/v1/request_response_pb.rb +771 -0
- data/lib/gen/temporal/api/workflowservice/v1/service_pb.rb +20 -0
- data/lib/gen/temporal/sdk/core/activity_result/activity_result_pb.rb +58 -0
- data/lib/gen/temporal/sdk/core/activity_task/activity_task_pb.rb +57 -0
- data/lib/gen/temporal/sdk/core/bridge/bridge_pb.rb +222 -0
- data/lib/gen/temporal/sdk/core/child_workflow/child_workflow_pb.rb +57 -0
- data/lib/gen/temporal/sdk/core/common/common_pb.rb +22 -0
- data/lib/gen/temporal/sdk/core/core_interface_pb.rb +34 -0
- data/lib/gen/temporal/sdk/core/external_data/external_data_pb.rb +27 -0
- data/lib/gen/temporal/sdk/core/workflow_activation/workflow_activation_pb.rb +164 -0
- data/lib/gen/temporal/sdk/core/workflow_commands/workflow_commands_pb.rb +192 -0
- data/lib/gen/temporal/sdk/core/workflow_completion/workflow_completion_pb.rb +34 -0
- data/lib/temporal/bridge.rb +14 -0
- data/lib/temporal/client/implementation.rb +339 -0
- data/lib/temporal/client/workflow_handle.rb +243 -0
- data/lib/temporal/client.rb +144 -0
- data/lib/temporal/connection.rb +736 -0
- data/lib/temporal/data_converter.rb +150 -0
- data/lib/temporal/error/failure.rb +194 -0
- data/lib/temporal/error/workflow_failure.rb +17 -0
- data/lib/temporal/errors.rb +22 -0
- data/lib/temporal/failure_converter/base.rb +26 -0
- data/lib/temporal/failure_converter/basic.rb +313 -0
- data/lib/temporal/failure_converter.rb +8 -0
- data/lib/temporal/interceptor/chain.rb +27 -0
- data/lib/temporal/interceptor/client.rb +102 -0
- data/lib/temporal/payload_codec/base.rb +32 -0
- data/lib/temporal/payload_converter/base.rb +24 -0
- data/lib/temporal/payload_converter/bytes.rb +26 -0
- data/lib/temporal/payload_converter/composite.rb +47 -0
- data/lib/temporal/payload_converter/encoding_base.rb +35 -0
- data/lib/temporal/payload_converter/json.rb +25 -0
- data/lib/temporal/payload_converter/nil.rb +25 -0
- data/lib/temporal/payload_converter.rb +14 -0
- data/lib/temporal/retry_policy.rb +82 -0
- data/lib/temporal/retry_state.rb +35 -0
- data/lib/temporal/runtime.rb +22 -0
- data/lib/temporal/timeout_type.rb +29 -0
- data/lib/temporal/version.rb +3 -0
- data/lib/temporal/workflow/execution_info.rb +54 -0
- data/lib/temporal/workflow/execution_status.rb +36 -0
- data/lib/temporal/workflow/id_reuse_policy.rb +36 -0
- data/lib/temporal/workflow/query_reject_condition.rb +33 -0
- data/lib/temporal.rb +8 -0
- data/lib/temporalio.rb +3 -0
- data/lib/thermite_patch.rb +23 -0
- data/temporalio.gemspec +41 -0
- metadata +583 -0
@@ -0,0 +1,647 @@
|
|
1
|
+
#[cfg(test)]
|
2
|
+
mod managed_wf_test;
|
3
|
+
|
4
|
+
use crate::{
|
5
|
+
worker::{
|
6
|
+
workflow::{
|
7
|
+
machines::WorkflowMachines, ActivationAction, ActivationCompleteOutcome, HistoryUpdate,
|
8
|
+
LocalResolution, NewIncomingWFT, OutgoingServerCommands, RequestEvictMsg, RunActions,
|
9
|
+
RunActivationCompletion, RunUpdateResponse, ServerCommandsWithWorkflowInfo, WFCommand,
|
10
|
+
WorkflowBridge,
|
11
|
+
},
|
12
|
+
LocalActRequest,
|
13
|
+
},
|
14
|
+
MetricsContext,
|
15
|
+
};
|
16
|
+
use futures::{stream, StreamExt};
|
17
|
+
use std::{
|
18
|
+
ops::Add,
|
19
|
+
sync::mpsc::Sender,
|
20
|
+
time::{Duration, Instant},
|
21
|
+
};
|
22
|
+
use temporal_sdk_core_api::errors::WFMachinesError;
|
23
|
+
use temporal_sdk_core_protos::coresdk::{
|
24
|
+
workflow_activation::{RemoveFromCache, WorkflowActivation},
|
25
|
+
workflow_commands::QueryResult,
|
26
|
+
};
|
27
|
+
use tokio::{
|
28
|
+
sync::{
|
29
|
+
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
|
30
|
+
oneshot,
|
31
|
+
},
|
32
|
+
task,
|
33
|
+
task::JoinHandle,
|
34
|
+
};
|
35
|
+
use tokio_stream::wrappers::UnboundedReceiverStream;
|
36
|
+
use tracing::Span;
|
37
|
+
use tracing_futures::Instrument;
|
38
|
+
|
39
|
+
use crate::worker::workflow::{
|
40
|
+
ActivationCompleteResult, ActivationOrAuto, FailRunUpdate, FulfillableActivationComplete,
|
41
|
+
GoodRunUpdate, LocalActivityRequestSink, RunAction, RunUpdateResponseKind,
|
42
|
+
};
|
43
|
+
use temporal_sdk_core_protos::TaskToken;
|
44
|
+
|
45
|
+
use crate::abstractions::dbg_panic;
|
46
|
+
#[cfg(test)]
|
47
|
+
pub(crate) use managed_wf_test::ManagedWFFunc;
|
48
|
+
|
49
|
+
type Result<T, E = WFMachinesError> = std::result::Result<T, E>;
|
50
|
+
/// What percentage of a WFT timeout we are willing to wait before sending a WFT heartbeat when
|
51
|
+
/// necessary.
|
52
|
+
const WFT_HEARTBEAT_TIMEOUT_FRACTION: f32 = 0.8;
|
53
|
+
|
54
|
+
pub(super) struct ManagedRun {
|
55
|
+
wfm: WorkflowManager,
|
56
|
+
update_tx: UnboundedSender<RunUpdateResponse>,
|
57
|
+
local_activity_request_sink: LocalActivityRequestSink,
|
58
|
+
waiting_on_la: Option<WaitingOnLAs>,
|
59
|
+
// Is set to true if the machines encounter an error and the only subsequent thing we should
|
60
|
+
// do is be evicted.
|
61
|
+
am_broken: bool,
|
62
|
+
}
|
63
|
+
|
64
|
+
/// If an activation completion needed to wait on LA completions (or heartbeat timeout) we use
|
65
|
+
/// this struct to store the data we need to finish the completion once that has happened
|
66
|
+
struct WaitingOnLAs {
|
67
|
+
wft_timeout: Duration,
|
68
|
+
/// If set, we are waiting for LAs to complete as part of a just-finished workflow activation.
|
69
|
+
/// If unset, we already had a heartbeat timeout and got a new WFT without any new work while
|
70
|
+
/// there are still incomplete LAs.
|
71
|
+
completion_dat: Option<(
|
72
|
+
CompletionDataForWFT,
|
73
|
+
oneshot::Sender<ActivationCompleteResult>,
|
74
|
+
)>,
|
75
|
+
hb_chan: UnboundedSender<Span>,
|
76
|
+
heartbeat_timeout_task: JoinHandle<()>,
|
77
|
+
}
|
78
|
+
|
79
|
+
#[derive(Debug)]
|
80
|
+
struct CompletionDataForWFT {
|
81
|
+
task_token: TaskToken,
|
82
|
+
query_responses: Vec<QueryResult>,
|
83
|
+
has_pending_query: bool,
|
84
|
+
activation_was_only_eviction: bool,
|
85
|
+
}
|
86
|
+
|
87
|
+
impl ManagedRun {
|
88
|
+
pub(super) fn new(
|
89
|
+
wfm: WorkflowManager,
|
90
|
+
update_tx: UnboundedSender<RunUpdateResponse>,
|
91
|
+
local_activity_request_sink: LocalActivityRequestSink,
|
92
|
+
) -> Self {
|
93
|
+
Self {
|
94
|
+
wfm,
|
95
|
+
update_tx,
|
96
|
+
local_activity_request_sink,
|
97
|
+
waiting_on_la: None,
|
98
|
+
am_broken: false,
|
99
|
+
}
|
100
|
+
}
|
101
|
+
|
102
|
+
pub(super) async fn run(self, run_actions_rx: UnboundedReceiver<RunAction>) {
|
103
|
+
let (heartbeat_tx, heartbeat_rx) = unbounded_channel();
|
104
|
+
stream::select(
|
105
|
+
UnboundedReceiverStream::new(run_actions_rx),
|
106
|
+
UnboundedReceiverStream::new(heartbeat_rx).map(|trace_span| RunAction {
|
107
|
+
action: RunActions::HeartbeatTimeout,
|
108
|
+
trace_span,
|
109
|
+
}),
|
110
|
+
)
|
111
|
+
.fold((self, heartbeat_tx), |(mut me, heartbeat_tx), action| {
|
112
|
+
let span = action.trace_span;
|
113
|
+
let action = action.action;
|
114
|
+
let mut no_wft = false;
|
115
|
+
async move {
|
116
|
+
let res = match action {
|
117
|
+
RunActions::NewIncomingWFT(wft) => me
|
118
|
+
.incoming_wft(wft)
|
119
|
+
.await
|
120
|
+
.map(RunActionOutcome::AfterNewWFT),
|
121
|
+
RunActions::ActivationCompletion(completion) => me
|
122
|
+
.completion(completion, &heartbeat_tx)
|
123
|
+
.await
|
124
|
+
.map(RunActionOutcome::AfterCompletion),
|
125
|
+
RunActions::CheckMoreWork {
|
126
|
+
want_to_evict,
|
127
|
+
has_pending_queries,
|
128
|
+
has_wft,
|
129
|
+
} => {
|
130
|
+
if !has_wft {
|
131
|
+
no_wft = true;
|
132
|
+
}
|
133
|
+
me.check_more_work(want_to_evict, has_pending_queries, has_wft)
|
134
|
+
.await
|
135
|
+
.map(RunActionOutcome::AfterCheckWork)
|
136
|
+
}
|
137
|
+
RunActions::LocalResolution(r) => me
|
138
|
+
.local_resolution(r)
|
139
|
+
.await
|
140
|
+
.map(RunActionOutcome::AfterLocalResolution),
|
141
|
+
RunActions::HeartbeatTimeout => {
|
142
|
+
let maybe_act = if me.heartbeat_timeout() {
|
143
|
+
Some(ActivationOrAuto::Autocomplete {
|
144
|
+
run_id: me.wfm.machines.run_id.clone(),
|
145
|
+
})
|
146
|
+
} else {
|
147
|
+
None
|
148
|
+
};
|
149
|
+
Ok(RunActionOutcome::AfterHeartbeatTimeout(maybe_act))
|
150
|
+
}
|
151
|
+
};
|
152
|
+
match res {
|
153
|
+
Ok(outcome) => {
|
154
|
+
me.send_update_response(outcome, no_wft);
|
155
|
+
}
|
156
|
+
Err(e) => {
|
157
|
+
error!(error=?e, "Error in run machines");
|
158
|
+
me.am_broken = true;
|
159
|
+
me.update_tx
|
160
|
+
.send(RunUpdateResponse {
|
161
|
+
kind: RunUpdateResponseKind::Fail(FailRunUpdate {
|
162
|
+
run_id: me.wfm.machines.run_id.clone(),
|
163
|
+
err: e.source,
|
164
|
+
completion_resp: e.complete_resp_chan,
|
165
|
+
}),
|
166
|
+
span: Span::current(),
|
167
|
+
})
|
168
|
+
.expect("Machine can send update");
|
169
|
+
}
|
170
|
+
}
|
171
|
+
(me, heartbeat_tx)
|
172
|
+
}
|
173
|
+
.instrument(span)
|
174
|
+
})
|
175
|
+
.await;
|
176
|
+
}
|
177
|
+
|
178
|
+
async fn incoming_wft(
|
179
|
+
&mut self,
|
180
|
+
wft: NewIncomingWFT,
|
181
|
+
) -> Result<Option<ActivationOrAuto>, RunUpdateErr> {
|
182
|
+
let activation = if let Some(h) = wft.history_update {
|
183
|
+
self.wfm.feed_history_from_server(h).await?
|
184
|
+
} else {
|
185
|
+
let r = self.wfm.get_next_activation().await?;
|
186
|
+
if r.jobs.is_empty() {
|
187
|
+
return Err(RunUpdateErr {
|
188
|
+
source: WFMachinesError::Fatal(format!(
|
189
|
+
"Machines created for {} with no jobs",
|
190
|
+
self.wfm.machines.run_id
|
191
|
+
)),
|
192
|
+
complete_resp_chan: None,
|
193
|
+
});
|
194
|
+
}
|
195
|
+
r
|
196
|
+
};
|
197
|
+
|
198
|
+
if activation.jobs.is_empty() {
|
199
|
+
if self.wfm.machines.outstanding_local_activity_count() > 0 {
|
200
|
+
// If the activation has no jobs but there are outstanding LAs, we need to restart the
|
201
|
+
// WFT heartbeat.
|
202
|
+
if let Some(ref mut lawait) = self.waiting_on_la {
|
203
|
+
if lawait.completion_dat.is_some() {
|
204
|
+
panic!("Should not have completion dat when getting new wft & empty jobs")
|
205
|
+
}
|
206
|
+
lawait.heartbeat_timeout_task.abort();
|
207
|
+
lawait.heartbeat_timeout_task = start_heartbeat_timeout_task(
|
208
|
+
lawait.hb_chan.clone(),
|
209
|
+
wft.start_time,
|
210
|
+
lawait.wft_timeout,
|
211
|
+
);
|
212
|
+
// No activation needs to be sent to lang. We just need to wait for another
|
213
|
+
// heartbeat timeout or LAs to resolve
|
214
|
+
return Ok(None);
|
215
|
+
} else {
|
216
|
+
panic!(
|
217
|
+
"Got a new WFT while there are outstanding local activities, but there \
|
218
|
+
was no waiting on LA info."
|
219
|
+
)
|
220
|
+
}
|
221
|
+
} else {
|
222
|
+
return Ok(Some(ActivationOrAuto::Autocomplete {
|
223
|
+
run_id: self.wfm.machines.run_id.clone(),
|
224
|
+
}));
|
225
|
+
}
|
226
|
+
}
|
227
|
+
|
228
|
+
Ok(Some(ActivationOrAuto::LangActivation(activation)))
|
229
|
+
}
|
230
|
+
|
231
|
+
async fn completion(
|
232
|
+
&mut self,
|
233
|
+
mut completion: RunActivationCompletion,
|
234
|
+
heartbeat_tx: &UnboundedSender<Span>,
|
235
|
+
) -> Result<Option<FulfillableActivationComplete>, RunUpdateErr> {
|
236
|
+
let resp_chan = completion
|
237
|
+
.resp_chan
|
238
|
+
.take()
|
239
|
+
.expect("Completion response channel must be populated");
|
240
|
+
|
241
|
+
let outcome = async move {
|
242
|
+
// Send commands from lang into the machines then check if the workflow run
|
243
|
+
// needs another activation and mark it if so
|
244
|
+
self.wfm.push_commands(completion.commands).await?;
|
245
|
+
// Don't bother applying the next task if we're evicting at the end of
|
246
|
+
// this activation
|
247
|
+
if !completion.activation_was_eviction {
|
248
|
+
self.wfm.apply_next_task_if_ready().await?;
|
249
|
+
}
|
250
|
+
let new_local_acts = self.wfm.drain_queued_local_activities();
|
251
|
+
|
252
|
+
let immediate_resolutions = (self.local_activity_request_sink)(new_local_acts);
|
253
|
+
for resolution in immediate_resolutions {
|
254
|
+
self.wfm
|
255
|
+
.notify_of_local_result(LocalResolution::LocalActivity(resolution))?;
|
256
|
+
}
|
257
|
+
|
258
|
+
let data = CompletionDataForWFT {
|
259
|
+
task_token: completion.task_token,
|
260
|
+
query_responses: completion.query_responses,
|
261
|
+
has_pending_query: completion.has_pending_query,
|
262
|
+
activation_was_only_eviction: completion.activation_was_only_eviction,
|
263
|
+
};
|
264
|
+
if self.wfm.machines.outstanding_local_activity_count() == 0 {
|
265
|
+
Ok((None, data, self))
|
266
|
+
} else {
|
267
|
+
let wft_timeout: Duration = self
|
268
|
+
.wfm
|
269
|
+
.machines
|
270
|
+
.get_started_info()
|
271
|
+
.and_then(|attrs| attrs.workflow_task_timeout)
|
272
|
+
.ok_or_else(|| {
|
273
|
+
WFMachinesError::Fatal(
|
274
|
+
"Workflow's start attribs were missing a well formed task timeout"
|
275
|
+
.to_string(),
|
276
|
+
)
|
277
|
+
})?;
|
278
|
+
let heartbeat_tx = heartbeat_tx.clone();
|
279
|
+
Ok((
|
280
|
+
Some((heartbeat_tx, completion.start_time, wft_timeout)),
|
281
|
+
data,
|
282
|
+
self,
|
283
|
+
))
|
284
|
+
}
|
285
|
+
}
|
286
|
+
.await;
|
287
|
+
|
288
|
+
match outcome {
|
289
|
+
Ok((None, data, me)) => Ok(Some(me.prepare_complete_resp(resp_chan, data, false))),
|
290
|
+
Ok((Some((chan, start_t, wft_timeout)), data, me)) => {
|
291
|
+
if let Some(wola) = me.waiting_on_la.as_mut() {
|
292
|
+
wola.heartbeat_timeout_task.abort();
|
293
|
+
}
|
294
|
+
me.waiting_on_la = Some(WaitingOnLAs {
|
295
|
+
wft_timeout,
|
296
|
+
completion_dat: Some((data, resp_chan)),
|
297
|
+
hb_chan: chan.clone(),
|
298
|
+
heartbeat_timeout_task: start_heartbeat_timeout_task(
|
299
|
+
chan,
|
300
|
+
start_t,
|
301
|
+
wft_timeout,
|
302
|
+
),
|
303
|
+
});
|
304
|
+
Ok(None)
|
305
|
+
}
|
306
|
+
Err(e) => Err(RunUpdateErr {
|
307
|
+
source: e,
|
308
|
+
complete_resp_chan: Some(resp_chan),
|
309
|
+
}),
|
310
|
+
}
|
311
|
+
}
|
312
|
+
|
313
|
+
async fn check_more_work(
|
314
|
+
&mut self,
|
315
|
+
want_to_evict: Option<RequestEvictMsg>,
|
316
|
+
has_pending_queries: bool,
|
317
|
+
has_wft: bool,
|
318
|
+
) -> Result<Option<ActivationOrAuto>, RunUpdateErr> {
|
319
|
+
if !has_wft {
|
320
|
+
// It doesn't make sense to do work unless we have a WFT
|
321
|
+
return Ok(None);
|
322
|
+
}
|
323
|
+
if self.wfm.machines.has_pending_jobs() && !self.am_broken {
|
324
|
+
Ok(Some(ActivationOrAuto::LangActivation(
|
325
|
+
self.wfm.get_next_activation().await?,
|
326
|
+
)))
|
327
|
+
} else {
|
328
|
+
if has_pending_queries && !self.am_broken {
|
329
|
+
return Ok(Some(ActivationOrAuto::ReadyForQueries(
|
330
|
+
self.wfm.machines.get_wf_activation(),
|
331
|
+
)));
|
332
|
+
}
|
333
|
+
if let Some(wte) = want_to_evict {
|
334
|
+
let mut act = self.wfm.machines.get_wf_activation();
|
335
|
+
// No other jobs make any sense to send if we encountered an error.
|
336
|
+
if self.am_broken {
|
337
|
+
act.jobs = vec![];
|
338
|
+
}
|
339
|
+
act.append_evict_job(RemoveFromCache {
|
340
|
+
message: wte.message,
|
341
|
+
reason: wte.reason as i32,
|
342
|
+
});
|
343
|
+
Ok(Some(ActivationOrAuto::LangActivation(act)))
|
344
|
+
} else {
|
345
|
+
Ok(None)
|
346
|
+
}
|
347
|
+
}
|
348
|
+
}
|
349
|
+
|
350
|
+
fn prepare_complete_resp(
|
351
|
+
&mut self,
|
352
|
+
resp_chan: oneshot::Sender<ActivationCompleteResult>,
|
353
|
+
data: CompletionDataForWFT,
|
354
|
+
due_to_heartbeat_timeout: bool,
|
355
|
+
) -> FulfillableActivationComplete {
|
356
|
+
let outgoing_cmds = self.wfm.get_server_commands();
|
357
|
+
let query_responses = data.query_responses;
|
358
|
+
let has_query_responses = !query_responses.is_empty();
|
359
|
+
let is_query_playback = data.has_pending_query && !has_query_responses;
|
360
|
+
|
361
|
+
// We only actually want to send commands back to the server if there are no more
|
362
|
+
// pending activations and we are caught up on replay. We don't want to complete a wft
|
363
|
+
// if we already saw the final event in the workflow, or if we are playing back for the
|
364
|
+
// express purpose of fulfilling a query. If the activation we sent was *only* an
|
365
|
+
// eviction, and there were no commands produced during iteration, don't send that
|
366
|
+
// either.
|
367
|
+
let no_commands_and_evicting =
|
368
|
+
outgoing_cmds.commands.is_empty() && data.activation_was_only_eviction;
|
369
|
+
let to_be_sent = ServerCommandsWithWorkflowInfo {
|
370
|
+
task_token: data.task_token,
|
371
|
+
action: ActivationAction::WftComplete {
|
372
|
+
force_new_wft: due_to_heartbeat_timeout,
|
373
|
+
commands: outgoing_cmds.commands,
|
374
|
+
query_responses,
|
375
|
+
},
|
376
|
+
};
|
377
|
+
|
378
|
+
let should_respond = !(self.wfm.machines.has_pending_jobs()
|
379
|
+
|| outgoing_cmds.replaying
|
380
|
+
|| is_query_playback
|
381
|
+
|| no_commands_and_evicting);
|
382
|
+
let outcome = if should_respond || has_query_responses {
|
383
|
+
ActivationCompleteOutcome::ReportWFTSuccess(to_be_sent)
|
384
|
+
} else {
|
385
|
+
ActivationCompleteOutcome::DoNothing
|
386
|
+
};
|
387
|
+
FulfillableActivationComplete {
|
388
|
+
result: ActivationCompleteResult {
|
389
|
+
most_recently_processed_event: self.wfm.machines.last_processed_event as usize,
|
390
|
+
outcome,
|
391
|
+
},
|
392
|
+
resp_chan,
|
393
|
+
}
|
394
|
+
}
|
395
|
+
|
396
|
+
async fn local_resolution(
|
397
|
+
&mut self,
|
398
|
+
res: LocalResolution,
|
399
|
+
) -> Result<Option<FulfillableActivationComplete>, RunUpdateErr> {
|
400
|
+
debug!(resolution=?res, "Applying local resolution");
|
401
|
+
self.wfm.notify_of_local_result(res)?;
|
402
|
+
if self.wfm.machines.outstanding_local_activity_count() == 0 {
|
403
|
+
if let Some(mut wait_dat) = self.waiting_on_la.take() {
|
404
|
+
// Cancel the heartbeat timeout
|
405
|
+
wait_dat.heartbeat_timeout_task.abort();
|
406
|
+
if let Some((completion_dat, resp_chan)) = wait_dat.completion_dat.take() {
|
407
|
+
return Ok(Some(self.prepare_complete_resp(
|
408
|
+
resp_chan,
|
409
|
+
completion_dat,
|
410
|
+
false,
|
411
|
+
)));
|
412
|
+
}
|
413
|
+
}
|
414
|
+
}
|
415
|
+
Ok(None)
|
416
|
+
}
|
417
|
+
|
418
|
+
/// Returns `true` if autocompletion should be issued, which will actually cause us to end up
|
419
|
+
/// in [completion] again, at which point we'll start a new heartbeat timeout, which will
|
420
|
+
/// immediately trigger and thus finish the completion, forcing a new task as it should.
|
421
|
+
fn heartbeat_timeout(&mut self) -> bool {
|
422
|
+
if let Some(ref mut wait_dat) = self.waiting_on_la {
|
423
|
+
// Cancel the heartbeat timeout
|
424
|
+
wait_dat.heartbeat_timeout_task.abort();
|
425
|
+
if let Some((completion_dat, resp_chan)) = wait_dat.completion_dat.take() {
|
426
|
+
let compl = self.prepare_complete_resp(resp_chan, completion_dat, true);
|
427
|
+
// Immediately fulfill the completion since the run update will already have
|
428
|
+
// been replied to
|
429
|
+
compl.fulfill();
|
430
|
+
} else {
|
431
|
+
// Auto-reply WFT complete
|
432
|
+
return true;
|
433
|
+
}
|
434
|
+
} else {
|
435
|
+
// If a heartbeat timeout happened, we should always have been waiting on LAs
|
436
|
+
dbg_panic!("WFT heartbeat timeout fired but we were not waiting on any LAs");
|
437
|
+
}
|
438
|
+
false
|
439
|
+
}
|
440
|
+
|
441
|
+
fn send_update_response(&self, outcome: RunActionOutcome, no_wft: bool) {
|
442
|
+
let mut in_response_to_wft = false;
|
443
|
+
let (outgoing_activation, fulfillable_complete) = match outcome {
|
444
|
+
RunActionOutcome::AfterNewWFT(a) => {
|
445
|
+
in_response_to_wft = true;
|
446
|
+
(a, None)
|
447
|
+
}
|
448
|
+
RunActionOutcome::AfterCheckWork(a) => (a, None),
|
449
|
+
RunActionOutcome::AfterLocalResolution(f) => (None, f),
|
450
|
+
RunActionOutcome::AfterCompletion(f) => (None, f),
|
451
|
+
RunActionOutcome::AfterHeartbeatTimeout(a) => (a, None),
|
452
|
+
};
|
453
|
+
let mut more_pending_work = self.wfm.machines.has_pending_jobs();
|
454
|
+
// We don't want to consider there to be more local-only work to be done if there is no
|
455
|
+
// workflow task associated with the run right now. This can happen if, ex, we complete
|
456
|
+
// a local activity while waiting for server to send us the next WFT. Activating lang would
|
457
|
+
// be harmful at this stage, as there might be work returned in that next WFT which should
|
458
|
+
// be part of the next activation.
|
459
|
+
if no_wft {
|
460
|
+
more_pending_work = false;
|
461
|
+
}
|
462
|
+
self.update_tx
|
463
|
+
.send(RunUpdateResponse {
|
464
|
+
kind: RunUpdateResponseKind::Good(GoodRunUpdate {
|
465
|
+
run_id: self.wfm.machines.run_id.clone(),
|
466
|
+
outgoing_activation,
|
467
|
+
fulfillable_complete,
|
468
|
+
have_seen_terminal_event: self.wfm.machines.have_seen_terminal_event,
|
469
|
+
more_pending_work,
|
470
|
+
most_recently_processed_event_number: self.wfm.machines.last_processed_event
|
471
|
+
as usize,
|
472
|
+
in_response_to_wft,
|
473
|
+
}),
|
474
|
+
span: Span::current(),
|
475
|
+
})
|
476
|
+
.expect("Machine can send update");
|
477
|
+
}
|
478
|
+
}
|
479
|
+
|
480
|
+
fn start_heartbeat_timeout_task(
|
481
|
+
chan: UnboundedSender<Span>,
|
482
|
+
wft_start_time: Instant,
|
483
|
+
wft_timeout: Duration,
|
484
|
+
) -> JoinHandle<()> {
|
485
|
+
// The heartbeat deadline is 80% of the WFT timeout
|
486
|
+
let wft_heartbeat_deadline =
|
487
|
+
wft_start_time.add(wft_timeout.mul_f32(WFT_HEARTBEAT_TIMEOUT_FRACTION));
|
488
|
+
task::spawn(async move {
|
489
|
+
tokio::time::sleep_until(wft_heartbeat_deadline.into()).await;
|
490
|
+
let _ = chan.send(Span::current());
|
491
|
+
})
|
492
|
+
}
|
493
|
+
|
494
|
+
enum RunActionOutcome {
|
495
|
+
AfterNewWFT(Option<ActivationOrAuto>),
|
496
|
+
AfterCheckWork(Option<ActivationOrAuto>),
|
497
|
+
AfterLocalResolution(Option<FulfillableActivationComplete>),
|
498
|
+
AfterCompletion(Option<FulfillableActivationComplete>),
|
499
|
+
AfterHeartbeatTimeout(Option<ActivationOrAuto>),
|
500
|
+
}
|
501
|
+
|
502
|
+
#[derive(derive_more::DebugCustom)]
|
503
|
+
#[debug(fmt = "RunUpdateErr({:?})", source)]
|
504
|
+
struct RunUpdateErr {
|
505
|
+
source: WFMachinesError,
|
506
|
+
complete_resp_chan: Option<oneshot::Sender<ActivationCompleteResult>>,
|
507
|
+
}
|
508
|
+
|
509
|
+
impl From<WFMachinesError> for RunUpdateErr {
|
510
|
+
fn from(e: WFMachinesError) -> Self {
|
511
|
+
RunUpdateErr {
|
512
|
+
source: e,
|
513
|
+
complete_resp_chan: None,
|
514
|
+
}
|
515
|
+
}
|
516
|
+
}
|
517
|
+
|
518
|
+
/// Manages an instance of a [WorkflowMachines], which is not thread-safe, as well as other data
|
519
|
+
/// associated with that specific workflow run.
|
520
|
+
pub(crate) struct WorkflowManager {
|
521
|
+
machines: WorkflowMachines,
|
522
|
+
/// Is always `Some` in normal operation. Optional to allow for unit testing with the test
|
523
|
+
/// workflow driver, which does not need to complete activations the normal way.
|
524
|
+
command_sink: Option<Sender<Vec<WFCommand>>>,
|
525
|
+
}
|
526
|
+
|
527
|
+
impl WorkflowManager {
|
528
|
+
/// Create a new workflow manager given workflow history and execution info as would be found
|
529
|
+
/// in [PollWorkflowTaskQueueResponse]
|
530
|
+
pub fn new(
|
531
|
+
history: HistoryUpdate,
|
532
|
+
namespace: String,
|
533
|
+
workflow_id: String,
|
534
|
+
workflow_type: String,
|
535
|
+
run_id: String,
|
536
|
+
metrics: MetricsContext,
|
537
|
+
) -> Self {
|
538
|
+
let (wfb, cmd_sink) = WorkflowBridge::new();
|
539
|
+
let state_machines = WorkflowMachines::new(
|
540
|
+
namespace,
|
541
|
+
workflow_id,
|
542
|
+
workflow_type,
|
543
|
+
run_id,
|
544
|
+
history,
|
545
|
+
Box::new(wfb).into(),
|
546
|
+
metrics,
|
547
|
+
);
|
548
|
+
Self {
|
549
|
+
machines: state_machines,
|
550
|
+
command_sink: Some(cmd_sink),
|
551
|
+
}
|
552
|
+
}
|
553
|
+
|
554
|
+
#[cfg(test)]
|
555
|
+
pub const fn new_from_machines(workflow_machines: WorkflowMachines) -> Self {
|
556
|
+
Self {
|
557
|
+
machines: workflow_machines,
|
558
|
+
command_sink: None,
|
559
|
+
}
|
560
|
+
}
|
561
|
+
|
562
|
+
/// Given history that was just obtained from the server, pipe it into this workflow's machines.
|
563
|
+
///
|
564
|
+
/// Should only be called when a workflow has caught up on replay (or is just beginning). It
|
565
|
+
/// will return a workflow activation if one is needed.
|
566
|
+
async fn feed_history_from_server(
|
567
|
+
&mut self,
|
568
|
+
update: HistoryUpdate,
|
569
|
+
) -> Result<WorkflowActivation> {
|
570
|
+
self.machines.new_history_from_server(update).await?;
|
571
|
+
self.get_next_activation().await
|
572
|
+
}
|
573
|
+
|
574
|
+
/// Let this workflow know that something we've been waiting locally on has resolved, like a
|
575
|
+
/// local activity or side effect
|
576
|
+
///
|
577
|
+
/// Returns true if the resolution did anything. EX: If the activity is already canceled and
|
578
|
+
/// used the TryCancel or Abandon modes, the resolution is uninteresting.
|
579
|
+
fn notify_of_local_result(&mut self, resolved: LocalResolution) -> Result<bool> {
|
580
|
+
self.machines.local_resolution(resolved)
|
581
|
+
}
|
582
|
+
|
583
|
+
/// Fetch the next workflow activation for this workflow if one is required. Doing so will apply
|
584
|
+
/// the next unapplied workflow task if such a sequence exists in history we already know about.
|
585
|
+
///
|
586
|
+
/// Callers may also need to call [get_server_commands] after this to issue any pending commands
|
587
|
+
/// to the server.
|
588
|
+
async fn get_next_activation(&mut self) -> Result<WorkflowActivation> {
|
589
|
+
// First check if there are already some pending jobs, which can be a result of replay.
|
590
|
+
let activation = self.machines.get_wf_activation();
|
591
|
+
if !activation.jobs.is_empty() {
|
592
|
+
return Ok(activation);
|
593
|
+
}
|
594
|
+
|
595
|
+
self.machines.apply_next_wft_from_history().await?;
|
596
|
+
Ok(self.machines.get_wf_activation())
|
597
|
+
}
|
598
|
+
|
599
|
+
/// If there are no pending jobs for the workflow, apply the next workflow task and check
|
600
|
+
/// again if there are any jobs. Importantly, does not *drain* jobs.
|
601
|
+
///
|
602
|
+
/// Returns true if there are jobs (before or after applying the next WFT).
|
603
|
+
async fn apply_next_task_if_ready(&mut self) -> Result<bool> {
|
604
|
+
if self.machines.has_pending_jobs() {
|
605
|
+
return Ok(true);
|
606
|
+
}
|
607
|
+
loop {
|
608
|
+
let consumed_events = self.machines.apply_next_wft_from_history().await?;
|
609
|
+
|
610
|
+
if consumed_events == 0 || !self.machines.replaying || self.machines.has_pending_jobs()
|
611
|
+
{
|
612
|
+
// Keep applying tasks while there are events, we are still replaying, and there are
|
613
|
+
// no jobs
|
614
|
+
break;
|
615
|
+
}
|
616
|
+
}
|
617
|
+
Ok(self.machines.has_pending_jobs())
|
618
|
+
}
|
619
|
+
|
620
|
+
/// Typically called after [get_next_activation], use this to retrieve commands to be sent to
|
621
|
+
/// the server which have been generated by the machines. Does *not* drain those commands.
|
622
|
+
/// See [WorkflowMachines::get_commands].
|
623
|
+
fn get_server_commands(&self) -> OutgoingServerCommands {
|
624
|
+
OutgoingServerCommands {
|
625
|
+
commands: self.machines.get_commands(),
|
626
|
+
replaying: self.machines.replaying,
|
627
|
+
}
|
628
|
+
}
|
629
|
+
|
630
|
+
/// Remove and return all queued local activities. Once this is called, they need to be
|
631
|
+
/// dispatched for execution.
|
632
|
+
fn drain_queued_local_activities(&mut self) -> Vec<LocalActRequest> {
|
633
|
+
self.machines.drain_queued_local_activities()
|
634
|
+
}
|
635
|
+
|
636
|
+
/// Feed the workflow machines new commands issued by the executing workflow code, and iterate
|
637
|
+
/// the machines.
|
638
|
+
async fn push_commands(&mut self, cmds: Vec<WFCommand>) -> Result<()> {
|
639
|
+
if let Some(cs) = self.command_sink.as_mut() {
|
640
|
+
cs.send(cmds).map_err(|_| {
|
641
|
+
WFMachinesError::Fatal("Internal error buffering workflow commands".to_string())
|
642
|
+
})?;
|
643
|
+
}
|
644
|
+
self.machines.iterate_machines().await?;
|
645
|
+
Ok(())
|
646
|
+
}
|
647
|
+
}
|