temporalio 0.1.1 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Cargo.lock +4035 -0
- data/Cargo.toml +25 -0
- data/Gemfile +20 -0
- data/LICENSE +16 -15
- data/README.md +455 -195
- data/Rakefile +387 -0
- data/ext/Cargo.toml +25 -0
- data/lib/temporalio/activity/complete_async_error.rb +11 -0
- data/lib/temporalio/activity/context.rb +82 -77
- data/lib/temporalio/activity/definition.rb +77 -0
- data/lib/temporalio/activity/info.rb +42 -46
- data/lib/temporalio/activity.rb +49 -65
- data/lib/temporalio/api/batch/v1/message.rb +31 -0
- data/lib/temporalio/api/cloud/cloudservice/v1/request_response.rb +93 -0
- data/lib/temporalio/api/cloud/cloudservice/v1/service.rb +25 -0
- data/lib/temporalio/api/cloud/cloudservice.rb +3 -0
- data/lib/temporalio/api/cloud/identity/v1/message.rb +36 -0
- data/lib/temporalio/api/cloud/namespace/v1/message.rb +35 -0
- data/lib/temporalio/api/cloud/operation/v1/message.rb +27 -0
- data/lib/temporalio/api/cloud/region/v1/message.rb +23 -0
- data/lib/temporalio/api/command/v1/message.rb +46 -0
- data/lib/temporalio/api/common/v1/grpc_status.rb +23 -0
- data/lib/temporalio/api/common/v1/message.rb +41 -0
- data/lib/temporalio/api/enums/v1/batch_operation.rb +22 -0
- data/lib/temporalio/api/enums/v1/command_type.rb +21 -0
- data/lib/temporalio/api/enums/v1/common.rb +26 -0
- data/lib/temporalio/api/enums/v1/event_type.rb +21 -0
- data/lib/temporalio/api/enums/v1/failed_cause.rb +26 -0
- data/lib/temporalio/api/enums/v1/namespace.rb +23 -0
- data/lib/temporalio/api/enums/v1/query.rb +22 -0
- data/lib/temporalio/api/enums/v1/reset.rb +23 -0
- data/lib/temporalio/api/enums/v1/schedule.rb +21 -0
- data/lib/temporalio/api/enums/v1/task_queue.rb +25 -0
- data/lib/temporalio/api/enums/v1/update.rb +22 -0
- data/lib/temporalio/api/enums/v1/workflow.rb +30 -0
- data/lib/temporalio/api/errordetails/v1/message.rb +42 -0
- data/lib/temporalio/api/export/v1/message.rb +24 -0
- data/lib/temporalio/api/failure/v1/message.rb +35 -0
- data/lib/temporalio/api/filter/v1/message.rb +27 -0
- data/lib/temporalio/api/history/v1/message.rb +90 -0
- data/lib/temporalio/api/namespace/v1/message.rb +31 -0
- data/lib/temporalio/api/nexus/v1/message.rb +40 -0
- data/lib/temporalio/api/operatorservice/v1/request_response.rb +49 -0
- data/lib/temporalio/api/operatorservice/v1/service.rb +23 -0
- data/lib/temporalio/api/operatorservice.rb +3 -0
- data/lib/temporalio/api/protocol/v1/message.rb +23 -0
- data/lib/temporalio/api/query/v1/message.rb +27 -0
- data/lib/temporalio/api/replication/v1/message.rb +26 -0
- data/lib/temporalio/api/schedule/v1/message.rb +42 -0
- data/lib/temporalio/api/sdk/v1/enhanced_stack_trace.rb +25 -0
- data/lib/temporalio/api/sdk/v1/task_complete_metadata.rb +21 -0
- data/lib/temporalio/api/sdk/v1/user_metadata.rb +23 -0
- data/lib/temporalio/api/sdk/v1/workflow_metadata.rb +23 -0
- data/lib/temporalio/api/taskqueue/v1/message.rb +45 -0
- data/lib/temporalio/api/update/v1/message.rb +33 -0
- data/lib/temporalio/api/version/v1/message.rb +26 -0
- data/lib/temporalio/api/workflow/v1/message.rb +43 -0
- data/lib/temporalio/api/workflowservice/v1/request_response.rb +189 -0
- data/lib/temporalio/api/workflowservice/v1/service.rb +23 -0
- data/lib/temporalio/api/workflowservice.rb +3 -0
- data/lib/temporalio/api.rb +13 -0
- data/lib/temporalio/cancellation.rb +150 -0
- data/lib/temporalio/client/activity_id_reference.rb +32 -0
- data/lib/temporalio/client/async_activity_handle.rb +110 -0
- data/lib/temporalio/client/connection/cloud_service.rb +648 -0
- data/lib/temporalio/client/connection/operator_service.rb +249 -0
- data/lib/temporalio/client/connection/service.rb +41 -0
- data/lib/temporalio/client/connection/workflow_service.rb +1218 -0
- data/lib/temporalio/client/connection.rb +270 -0
- data/lib/temporalio/client/interceptor.rb +316 -0
- data/lib/temporalio/client/workflow_execution.rb +103 -0
- data/lib/temporalio/client/workflow_execution_count.rb +36 -0
- data/lib/temporalio/client/workflow_execution_status.rb +18 -0
- data/lib/temporalio/client/workflow_handle.rb +380 -177
- data/lib/temporalio/client/workflow_query_reject_condition.rb +14 -0
- data/lib/temporalio/client/workflow_update_handle.rb +67 -0
- data/lib/temporalio/client/workflow_update_wait_stage.rb +17 -0
- data/lib/temporalio/client.rb +366 -93
- data/lib/temporalio/common_enums.rb +24 -0
- data/lib/temporalio/converters/data_converter.rb +102 -0
- data/lib/temporalio/converters/failure_converter.rb +200 -0
- data/lib/temporalio/converters/payload_codec.rb +26 -0
- data/lib/temporalio/converters/payload_converter/binary_null.rb +34 -0
- data/lib/temporalio/converters/payload_converter/binary_plain.rb +35 -0
- data/lib/temporalio/converters/payload_converter/binary_protobuf.rb +42 -0
- data/lib/temporalio/converters/payload_converter/composite.rb +62 -0
- data/lib/temporalio/converters/payload_converter/encoding.rb +35 -0
- data/lib/temporalio/converters/payload_converter/json_plain.rb +44 -0
- data/lib/temporalio/converters/payload_converter/json_protobuf.rb +41 -0
- data/lib/temporalio/converters/payload_converter.rb +73 -0
- data/lib/temporalio/converters.rb +9 -0
- data/lib/temporalio/error/failure.rb +119 -94
- data/lib/temporalio/error.rb +147 -0
- data/lib/temporalio/internal/bridge/api/activity_result/activity_result.rb +34 -0
- data/lib/temporalio/internal/bridge/api/activity_task/activity_task.rb +31 -0
- data/lib/temporalio/internal/bridge/api/child_workflow/child_workflow.rb +33 -0
- data/lib/temporalio/internal/bridge/api/common/common.rb +26 -0
- data/lib/temporalio/internal/bridge/api/core_interface.rb +36 -0
- data/lib/temporalio/internal/bridge/api/external_data/external_data.rb +27 -0
- data/lib/temporalio/internal/bridge/api/workflow_activation/workflow_activation.rb +52 -0
- data/lib/temporalio/internal/bridge/api/workflow_commands/workflow_commands.rb +54 -0
- data/lib/temporalio/internal/bridge/api/workflow_completion/workflow_completion.rb +30 -0
- data/lib/temporalio/internal/bridge/api.rb +3 -0
- data/lib/temporalio/internal/bridge/client.rb +90 -0
- data/lib/temporalio/internal/bridge/runtime.rb +53 -0
- data/lib/temporalio/internal/bridge/testing.rb +46 -0
- data/lib/temporalio/internal/bridge/worker.rb +83 -0
- data/lib/temporalio/internal/bridge.rb +36 -0
- data/lib/temporalio/internal/client/implementation.rb +525 -0
- data/lib/temporalio/internal/proto_utils.rb +54 -0
- data/lib/temporalio/internal/worker/activity_worker.rb +345 -0
- data/lib/temporalio/internal/worker/multi_runner.rb +169 -0
- data/lib/temporalio/internal.rb +7 -0
- data/lib/temporalio/retry_policy.rb +39 -80
- data/lib/temporalio/runtime.rb +259 -13
- data/lib/temporalio/scoped_logger.rb +96 -0
- data/lib/temporalio/search_attributes.rb +300 -0
- data/lib/temporalio/testing/activity_environment.rb +132 -0
- data/lib/temporalio/testing/workflow_environment.rb +113 -88
- data/lib/temporalio/testing.rb +4 -169
- data/lib/temporalio/version.rb +3 -1
- data/lib/temporalio/worker/activity_executor/fiber.rb +49 -0
- data/lib/temporalio/worker/activity_executor/thread_pool.rb +254 -0
- data/lib/temporalio/worker/activity_executor.rb +55 -0
- data/lib/temporalio/worker/interceptor.rb +88 -0
- data/lib/temporalio/worker/tuner.rb +151 -0
- data/lib/temporalio/worker.rb +385 -163
- data/lib/temporalio/workflow_history.rb +22 -0
- data/lib/temporalio.rb +2 -7
- data/temporalio.gemspec +20 -39
- metadata +131 -712
- data/bridge/Cargo.lock +0 -2997
- data/bridge/Cargo.toml +0 -29
- data/bridge/sdk-core/ARCHITECTURE.md +0 -76
- data/bridge/sdk-core/Cargo.toml +0 -2
- data/bridge/sdk-core/LICENSE.txt +0 -23
- data/bridge/sdk-core/README.md +0 -117
- data/bridge/sdk-core/arch_docs/diagrams/README.md +0 -10
- data/bridge/sdk-core/arch_docs/diagrams/sticky_queues.puml +0 -40
- data/bridge/sdk-core/arch_docs/diagrams/workflow_internals.svg +0 -1
- data/bridge/sdk-core/arch_docs/sticky_queues.md +0 -51
- data/bridge/sdk-core/client/Cargo.toml +0 -40
- data/bridge/sdk-core/client/LICENSE.txt +0 -23
- data/bridge/sdk-core/client/src/lib.rs +0 -1462
- data/bridge/sdk-core/client/src/metrics.rs +0 -174
- data/bridge/sdk-core/client/src/raw.rs +0 -932
- data/bridge/sdk-core/client/src/retry.rs +0 -763
- data/bridge/sdk-core/client/src/workflow_handle/mod.rs +0 -185
- data/bridge/sdk-core/core/Cargo.toml +0 -129
- data/bridge/sdk-core/core/LICENSE.txt +0 -23
- data/bridge/sdk-core/core/benches/workflow_replay.rs +0 -76
- data/bridge/sdk-core/core/src/abstractions.rs +0 -355
- data/bridge/sdk-core/core/src/core_tests/activity_tasks.rs +0 -1049
- data/bridge/sdk-core/core/src/core_tests/child_workflows.rs +0 -221
- data/bridge/sdk-core/core/src/core_tests/determinism.rs +0 -270
- data/bridge/sdk-core/core/src/core_tests/local_activities.rs +0 -1046
- data/bridge/sdk-core/core/src/core_tests/mod.rs +0 -100
- data/bridge/sdk-core/core/src/core_tests/queries.rs +0 -893
- data/bridge/sdk-core/core/src/core_tests/replay_flag.rs +0 -65
- data/bridge/sdk-core/core/src/core_tests/workers.rs +0 -257
- data/bridge/sdk-core/core/src/core_tests/workflow_cancels.rs +0 -124
- data/bridge/sdk-core/core/src/core_tests/workflow_tasks.rs +0 -2433
- data/bridge/sdk-core/core/src/ephemeral_server/mod.rs +0 -609
- data/bridge/sdk-core/core/src/internal_flags.rs +0 -136
- data/bridge/sdk-core/core/src/lib.rs +0 -289
- data/bridge/sdk-core/core/src/pollers/mod.rs +0 -54
- data/bridge/sdk-core/core/src/pollers/poll_buffer.rs +0 -297
- data/bridge/sdk-core/core/src/protosext/mod.rs +0 -428
- data/bridge/sdk-core/core/src/replay/mod.rs +0 -215
- data/bridge/sdk-core/core/src/retry_logic.rs +0 -202
- data/bridge/sdk-core/core/src/telemetry/log_export.rs +0 -190
- data/bridge/sdk-core/core/src/telemetry/metrics.rs +0 -462
- data/bridge/sdk-core/core/src/telemetry/mod.rs +0 -423
- data/bridge/sdk-core/core/src/telemetry/prometheus_server.rs +0 -83
- data/bridge/sdk-core/core/src/test_help/mod.rs +0 -939
- data/bridge/sdk-core/core/src/worker/activities/activity_heartbeat_manager.rs +0 -536
- data/bridge/sdk-core/core/src/worker/activities/activity_task_poller_stream.rs +0 -89
- data/bridge/sdk-core/core/src/worker/activities/local_activities.rs +0 -1278
- data/bridge/sdk-core/core/src/worker/activities.rs +0 -557
- data/bridge/sdk-core/core/src/worker/client/mocks.rs +0 -107
- data/bridge/sdk-core/core/src/worker/client.rs +0 -389
- data/bridge/sdk-core/core/src/worker/mod.rs +0 -677
- data/bridge/sdk-core/core/src/worker/workflow/bridge.rs +0 -35
- data/bridge/sdk-core/core/src/worker/workflow/driven_workflow.rs +0 -99
- data/bridge/sdk-core/core/src/worker/workflow/history_update.rs +0 -1111
- data/bridge/sdk-core/core/src/worker/workflow/machines/activity_state_machine.rs +0 -964
- data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_external_state_machine.rs +0 -294
- data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_workflow_state_machine.rs +0 -168
- data/bridge/sdk-core/core/src/worker/workflow/machines/child_workflow_state_machine.rs +0 -918
- data/bridge/sdk-core/core/src/worker/workflow/machines/complete_workflow_state_machine.rs +0 -137
- data/bridge/sdk-core/core/src/worker/workflow/machines/continue_as_new_workflow_state_machine.rs +0 -158
- data/bridge/sdk-core/core/src/worker/workflow/machines/fail_workflow_state_machine.rs +0 -130
- data/bridge/sdk-core/core/src/worker/workflow/machines/local_activity_state_machine.rs +0 -1525
- data/bridge/sdk-core/core/src/worker/workflow/machines/mod.rs +0 -324
- data/bridge/sdk-core/core/src/worker/workflow/machines/modify_workflow_properties_state_machine.rs +0 -179
- data/bridge/sdk-core/core/src/worker/workflow/machines/patch_state_machine.rs +0 -659
- data/bridge/sdk-core/core/src/worker/workflow/machines/signal_external_state_machine.rs +0 -439
- data/bridge/sdk-core/core/src/worker/workflow/machines/timer_state_machine.rs +0 -435
- data/bridge/sdk-core/core/src/worker/workflow/machines/transition_coverage.rs +0 -175
- data/bridge/sdk-core/core/src/worker/workflow/machines/upsert_search_attributes_state_machine.rs +0 -249
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines/local_acts.rs +0 -85
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines.rs +0 -1280
- data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_task_state_machine.rs +0 -269
- data/bridge/sdk-core/core/src/worker/workflow/managed_run/managed_wf_test.rs +0 -213
- data/bridge/sdk-core/core/src/worker/workflow/managed_run.rs +0 -1305
- data/bridge/sdk-core/core/src/worker/workflow/mod.rs +0 -1276
- data/bridge/sdk-core/core/src/worker/workflow/run_cache.rs +0 -128
- data/bridge/sdk-core/core/src/worker/workflow/wft_extraction.rs +0 -125
- data/bridge/sdk-core/core/src/worker/workflow/wft_poller.rs +0 -85
- data/bridge/sdk-core/core/src/worker/workflow/workflow_stream/saved_wf_inputs.rs +0 -117
- data/bridge/sdk-core/core/src/worker/workflow/workflow_stream/tonic_status_serde.rs +0 -24
- data/bridge/sdk-core/core/src/worker/workflow/workflow_stream.rs +0 -715
- data/bridge/sdk-core/core-api/Cargo.toml +0 -33
- data/bridge/sdk-core/core-api/LICENSE.txt +0 -23
- data/bridge/sdk-core/core-api/src/errors.rs +0 -62
- data/bridge/sdk-core/core-api/src/lib.rs +0 -113
- data/bridge/sdk-core/core-api/src/telemetry.rs +0 -141
- data/bridge/sdk-core/core-api/src/worker.rs +0 -161
- data/bridge/sdk-core/etc/deps.svg +0 -162
- data/bridge/sdk-core/etc/dynamic-config.yaml +0 -2
- data/bridge/sdk-core/etc/otel-collector-config.yaml +0 -36
- data/bridge/sdk-core/etc/prometheus.yaml +0 -6
- data/bridge/sdk-core/etc/regen-depgraph.sh +0 -5
- data/bridge/sdk-core/fsm/Cargo.toml +0 -18
- data/bridge/sdk-core/fsm/LICENSE.txt +0 -23
- data/bridge/sdk-core/fsm/README.md +0 -3
- data/bridge/sdk-core/fsm/rustfsm_procmacro/Cargo.toml +0 -27
- data/bridge/sdk-core/fsm/rustfsm_procmacro/LICENSE.txt +0 -23
- data/bridge/sdk-core/fsm/rustfsm_procmacro/src/lib.rs +0 -650
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/progress.rs +0 -8
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/dupe_transitions_fail.rs +0 -18
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/dupe_transitions_fail.stderr +0 -12
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/dynamic_dest_pass.rs +0 -41
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/forgot_name_fail.rs +0 -14
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/forgot_name_fail.stderr +0 -11
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/handler_arg_pass.rs +0 -32
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/handler_pass.rs +0 -31
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/medium_complex_pass.rs +0 -46
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/no_handle_conversions_require_into_fail.rs +0 -29
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/no_handle_conversions_require_into_fail.stderr +0 -12
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/simple_pass.rs +0 -32
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/struct_event_variant_fail.rs +0 -18
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/struct_event_variant_fail.stderr +0 -5
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_more_item_event_variant_fail.rs +0 -11
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_more_item_event_variant_fail.stderr +0 -5
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_zero_item_event_variant_fail.rs +0 -11
- data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/tuple_zero_item_event_variant_fail.stderr +0 -5
- data/bridge/sdk-core/fsm/rustfsm_trait/Cargo.toml +0 -14
- data/bridge/sdk-core/fsm/rustfsm_trait/LICENSE.txt +0 -23
- data/bridge/sdk-core/fsm/rustfsm_trait/src/lib.rs +0 -254
- data/bridge/sdk-core/fsm/src/lib.rs +0 -2
- data/bridge/sdk-core/histories/evict_while_la_running_no_interference-16_history.bin +0 -0
- data/bridge/sdk-core/histories/evict_while_la_running_no_interference-23_history.bin +0 -0
- data/bridge/sdk-core/histories/evict_while_la_running_no_interference-85_history.bin +0 -0
- data/bridge/sdk-core/histories/fail_wf_task.bin +0 -0
- data/bridge/sdk-core/histories/timer_workflow_history.bin +0 -0
- data/bridge/sdk-core/integ-with-otel.sh +0 -7
- data/bridge/sdk-core/protos/api_upstream/README.md +0 -9
- data/bridge/sdk-core/protos/api_upstream/api-linter.yaml +0 -40
- data/bridge/sdk-core/protos/api_upstream/buf.yaml +0 -9
- data/bridge/sdk-core/protos/api_upstream/build/go.mod +0 -7
- data/bridge/sdk-core/protos/api_upstream/build/go.sum +0 -5
- data/bridge/sdk-core/protos/api_upstream/build/tools.go +0 -29
- data/bridge/sdk-core/protos/api_upstream/dependencies/gogoproto/gogo.proto +0 -141
- data/bridge/sdk-core/protos/api_upstream/go.mod +0 -6
- data/bridge/sdk-core/protos/api_upstream/temporal/api/batch/v1/message.proto +0 -89
- data/bridge/sdk-core/protos/api_upstream/temporal/api/command/v1/message.proto +0 -248
- data/bridge/sdk-core/protos/api_upstream/temporal/api/common/v1/message.proto +0 -123
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/batch_operation.proto +0 -47
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/command_type.proto +0 -52
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/common.proto +0 -56
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/event_type.proto +0 -170
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto +0 -123
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/namespace.proto +0 -51
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/query.proto +0 -50
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/reset.proto +0 -41
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/schedule.proto +0 -60
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/task_queue.proto +0 -59
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/update.proto +0 -56
- data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/workflow.proto +0 -122
- data/bridge/sdk-core/protos/api_upstream/temporal/api/errordetails/v1/message.proto +0 -108
- data/bridge/sdk-core/protos/api_upstream/temporal/api/failure/v1/message.proto +0 -114
- data/bridge/sdk-core/protos/api_upstream/temporal/api/filter/v1/message.proto +0 -56
- data/bridge/sdk-core/protos/api_upstream/temporal/api/history/v1/message.proto +0 -787
- data/bridge/sdk-core/protos/api_upstream/temporal/api/namespace/v1/message.proto +0 -99
- data/bridge/sdk-core/protos/api_upstream/temporal/api/operatorservice/v1/request_response.proto +0 -124
- data/bridge/sdk-core/protos/api_upstream/temporal/api/operatorservice/v1/service.proto +0 -80
- data/bridge/sdk-core/protos/api_upstream/temporal/api/protocol/v1/message.proto +0 -57
- data/bridge/sdk-core/protos/api_upstream/temporal/api/query/v1/message.proto +0 -61
- data/bridge/sdk-core/protos/api_upstream/temporal/api/replication/v1/message.proto +0 -55
- data/bridge/sdk-core/protos/api_upstream/temporal/api/schedule/v1/message.proto +0 -379
- data/bridge/sdk-core/protos/api_upstream/temporal/api/sdk/v1/task_complete_metadata.proto +0 -63
- data/bridge/sdk-core/protos/api_upstream/temporal/api/taskqueue/v1/message.proto +0 -108
- data/bridge/sdk-core/protos/api_upstream/temporal/api/update/v1/message.proto +0 -111
- data/bridge/sdk-core/protos/api_upstream/temporal/api/version/v1/message.proto +0 -59
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflow/v1/message.proto +0 -146
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto +0 -1199
- data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/service.proto +0 -415
- data/bridge/sdk-core/protos/grpc/health/v1/health.proto +0 -63
- data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_result/activity_result.proto +0 -79
- data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_task/activity_task.proto +0 -80
- data/bridge/sdk-core/protos/local/temporal/sdk/core/child_workflow/child_workflow.proto +0 -78
- data/bridge/sdk-core/protos/local/temporal/sdk/core/common/common.proto +0 -16
- data/bridge/sdk-core/protos/local/temporal/sdk/core/core_interface.proto +0 -31
- data/bridge/sdk-core/protos/local/temporal/sdk/core/external_data/external_data.proto +0 -31
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_activation/workflow_activation.proto +0 -270
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_commands/workflow_commands.proto +0 -305
- data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_completion/workflow_completion.proto +0 -35
- data/bridge/sdk-core/protos/testsrv_upstream/api-linter.yaml +0 -38
- data/bridge/sdk-core/protos/testsrv_upstream/buf.yaml +0 -13
- data/bridge/sdk-core/protos/testsrv_upstream/dependencies/gogoproto/gogo.proto +0 -141
- data/bridge/sdk-core/protos/testsrv_upstream/temporal/api/testservice/v1/request_response.proto +0 -63
- data/bridge/sdk-core/protos/testsrv_upstream/temporal/api/testservice/v1/service.proto +0 -90
- data/bridge/sdk-core/rustfmt.toml +0 -1
- data/bridge/sdk-core/sdk/Cargo.toml +0 -48
- data/bridge/sdk-core/sdk/LICENSE.txt +0 -23
- data/bridge/sdk-core/sdk/src/activity_context.rs +0 -230
- data/bridge/sdk-core/sdk/src/app_data.rs +0 -37
- data/bridge/sdk-core/sdk/src/interceptors.rs +0 -50
- data/bridge/sdk-core/sdk/src/lib.rs +0 -861
- data/bridge/sdk-core/sdk/src/payload_converter.rs +0 -11
- data/bridge/sdk-core/sdk/src/workflow_context/options.rs +0 -295
- data/bridge/sdk-core/sdk/src/workflow_context.rs +0 -694
- data/bridge/sdk-core/sdk/src/workflow_future.rs +0 -500
- data/bridge/sdk-core/sdk-core-protos/Cargo.toml +0 -33
- data/bridge/sdk-core/sdk-core-protos/LICENSE.txt +0 -23
- data/bridge/sdk-core/sdk-core-protos/build.rs +0 -142
- data/bridge/sdk-core/sdk-core-protos/src/constants.rs +0 -7
- data/bridge/sdk-core/sdk-core-protos/src/history_builder.rs +0 -557
- data/bridge/sdk-core/sdk-core-protos/src/history_info.rs +0 -234
- data/bridge/sdk-core/sdk-core-protos/src/lib.rs +0 -2088
- data/bridge/sdk-core/sdk-core-protos/src/task_token.rs +0 -48
- data/bridge/sdk-core/sdk-core-protos/src/utilities.rs +0 -14
- data/bridge/sdk-core/test-utils/Cargo.toml +0 -38
- data/bridge/sdk-core/test-utils/src/canned_histories.rs +0 -1389
- data/bridge/sdk-core/test-utils/src/histfetch.rs +0 -28
- data/bridge/sdk-core/test-utils/src/lib.rs +0 -709
- data/bridge/sdk-core/test-utils/src/wf_input_saver.rs +0 -50
- data/bridge/sdk-core/test-utils/src/workflows.rs +0 -29
- data/bridge/sdk-core/tests/fuzzy_workflow.rs +0 -130
- data/bridge/sdk-core/tests/heavy_tests.rs +0 -265
- data/bridge/sdk-core/tests/integ_tests/client_tests.rs +0 -36
- data/bridge/sdk-core/tests/integ_tests/ephemeral_server_tests.rs +0 -150
- data/bridge/sdk-core/tests/integ_tests/heartbeat_tests.rs +0 -223
- data/bridge/sdk-core/tests/integ_tests/metrics_tests.rs +0 -239
- data/bridge/sdk-core/tests/integ_tests/polling_tests.rs +0 -90
- data/bridge/sdk-core/tests/integ_tests/queries_tests.rs +0 -314
- data/bridge/sdk-core/tests/integ_tests/visibility_tests.rs +0 -151
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/activities.rs +0 -902
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/appdata_propagation.rs +0 -61
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs +0 -60
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs +0 -51
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs +0 -51
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs +0 -64
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/determinism.rs +0 -47
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs +0 -669
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs +0 -54
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/patches.rs +0 -92
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/replay.rs +0 -228
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/resets.rs +0 -94
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/signals.rs +0 -171
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs +0 -85
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/timers.rs +0 -120
- data/bridge/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs +0 -77
- data/bridge/sdk-core/tests/integ_tests/workflow_tests.rs +0 -596
- data/bridge/sdk-core/tests/main.rs +0 -103
- data/bridge/sdk-core/tests/runner.rs +0 -132
- data/bridge/sdk-core/tests/wf_input_replay.rs +0 -32
- data/bridge/src/connection.rs +0 -202
- data/bridge/src/lib.rs +0 -494
- data/bridge/src/runtime.rs +0 -54
- data/bridge/src/test_server.rs +0 -153
- data/bridge/src/worker.rs +0 -197
- data/ext/Rakefile +0 -9
- data/lib/gen/dependencies/gogoproto/gogo_pb.rb +0 -14
- data/lib/gen/temporal/api/batch/v1/message_pb.rb +0 -50
- data/lib/gen/temporal/api/command/v1/message_pb.rb +0 -160
- data/lib/gen/temporal/api/common/v1/message_pb.rb +0 -73
- data/lib/gen/temporal/api/enums/v1/batch_operation_pb.rb +0 -33
- data/lib/gen/temporal/api/enums/v1/command_type_pb.rb +0 -37
- data/lib/gen/temporal/api/enums/v1/common_pb.rb +0 -42
- data/lib/gen/temporal/api/enums/v1/event_type_pb.rb +0 -68
- data/lib/gen/temporal/api/enums/v1/failed_cause_pb.rb +0 -79
- data/lib/gen/temporal/api/enums/v1/namespace_pb.rb +0 -37
- data/lib/gen/temporal/api/enums/v1/query_pb.rb +0 -31
- data/lib/gen/temporal/api/enums/v1/reset_pb.rb +0 -24
- data/lib/gen/temporal/api/enums/v1/schedule_pb.rb +0 -28
- data/lib/gen/temporal/api/enums/v1/task_queue_pb.rb +0 -30
- data/lib/gen/temporal/api/enums/v1/update_pb.rb +0 -25
- data/lib/gen/temporal/api/enums/v1/workflow_pb.rb +0 -89
- data/lib/gen/temporal/api/errordetails/v1/message_pb.rb +0 -84
- data/lib/gen/temporal/api/failure/v1/message_pb.rb +0 -83
- data/lib/gen/temporal/api/filter/v1/message_pb.rb +0 -40
- data/lib/gen/temporal/api/history/v1/message_pb.rb +0 -498
- data/lib/gen/temporal/api/namespace/v1/message_pb.rb +0 -64
- data/lib/gen/temporal/api/operatorservice/v1/request_response_pb.rb +0 -88
- data/lib/gen/temporal/api/operatorservice/v1/service_pb.rb +0 -20
- data/lib/gen/temporal/api/protocol/v1/message_pb.rb +0 -30
- data/lib/gen/temporal/api/query/v1/message_pb.rb +0 -38
- data/lib/gen/temporal/api/replication/v1/message_pb.rb +0 -37
- data/lib/gen/temporal/api/schedule/v1/message_pb.rb +0 -149
- data/lib/gen/temporal/api/sdk/v1/task_complete_metadata_pb.rb +0 -23
- data/lib/gen/temporal/api/taskqueue/v1/message_pb.rb +0 -73
- data/lib/gen/temporal/api/testservice/v1/request_response_pb.rb +0 -49
- data/lib/gen/temporal/api/testservice/v1/service_pb.rb +0 -21
- data/lib/gen/temporal/api/update/v1/message_pb.rb +0 -72
- data/lib/gen/temporal/api/version/v1/message_pb.rb +0 -41
- data/lib/gen/temporal/api/workflow/v1/message_pb.rb +0 -111
- data/lib/gen/temporal/api/workflowservice/v1/request_response_pb.rb +0 -798
- data/lib/gen/temporal/api/workflowservice/v1/service_pb.rb +0 -20
- data/lib/gen/temporal/sdk/core/activity_result/activity_result_pb.rb +0 -62
- data/lib/gen/temporal/sdk/core/activity_task/activity_task_pb.rb +0 -61
- data/lib/gen/temporal/sdk/core/child_workflow/child_workflow_pb.rb +0 -61
- data/lib/gen/temporal/sdk/core/common/common_pb.rb +0 -26
- data/lib/gen/temporal/sdk/core/core_interface_pb.rb +0 -40
- data/lib/gen/temporal/sdk/core/external_data/external_data_pb.rb +0 -31
- data/lib/gen/temporal/sdk/core/workflow_activation/workflow_activation_pb.rb +0 -171
- data/lib/gen/temporal/sdk/core/workflow_commands/workflow_commands_pb.rb +0 -200
- data/lib/gen/temporal/sdk/core/workflow_completion/workflow_completion_pb.rb +0 -41
- data/lib/temporalio/bridge/connect_options.rb +0 -15
- data/lib/temporalio/bridge/error.rb +0 -8
- data/lib/temporalio/bridge/retry_config.rb +0 -24
- data/lib/temporalio/bridge/tls_options.rb +0 -19
- data/lib/temporalio/bridge.rb +0 -14
- data/lib/temporalio/client/implementation.rb +0 -340
- data/lib/temporalio/connection/retry_config.rb +0 -44
- data/lib/temporalio/connection/service.rb +0 -20
- data/lib/temporalio/connection/test_service.rb +0 -92
- data/lib/temporalio/connection/tls_options.rb +0 -51
- data/lib/temporalio/connection/workflow_service.rb +0 -731
- data/lib/temporalio/connection.rb +0 -86
- data/lib/temporalio/data_converter.rb +0 -191
- data/lib/temporalio/error/workflow_failure.rb +0 -19
- data/lib/temporalio/errors.rb +0 -40
- data/lib/temporalio/failure_converter/base.rb +0 -26
- data/lib/temporalio/failure_converter/basic.rb +0 -319
- data/lib/temporalio/failure_converter.rb +0 -7
- data/lib/temporalio/interceptor/activity_inbound.rb +0 -22
- data/lib/temporalio/interceptor/activity_outbound.rb +0 -24
- data/lib/temporalio/interceptor/chain.rb +0 -28
- data/lib/temporalio/interceptor/client.rb +0 -127
- data/lib/temporalio/interceptor.rb +0 -22
- data/lib/temporalio/payload_codec/base.rb +0 -32
- data/lib/temporalio/payload_converter/base.rb +0 -24
- data/lib/temporalio/payload_converter/bytes.rb +0 -27
- data/lib/temporalio/payload_converter/composite.rb +0 -49
- data/lib/temporalio/payload_converter/encoding_base.rb +0 -35
- data/lib/temporalio/payload_converter/json.rb +0 -26
- data/lib/temporalio/payload_converter/nil.rb +0 -26
- data/lib/temporalio/payload_converter.rb +0 -14
- data/lib/temporalio/retry_state.rb +0 -35
- data/lib/temporalio/testing/time_skipping_handle.rb +0 -32
- data/lib/temporalio/testing/time_skipping_interceptor.rb +0 -23
- data/lib/temporalio/timeout_type.rb +0 -29
- data/lib/temporalio/worker/activity_runner.rb +0 -114
- data/lib/temporalio/worker/activity_worker.rb +0 -164
- data/lib/temporalio/worker/reactor.rb +0 -46
- data/lib/temporalio/worker/runner.rb +0 -63
- data/lib/temporalio/worker/sync_worker.rb +0 -124
- data/lib/temporalio/worker/thread_pool_executor.rb +0 -51
- data/lib/temporalio/workflow/async.rb +0 -46
- data/lib/temporalio/workflow/execution_info.rb +0 -54
- data/lib/temporalio/workflow/execution_status.rb +0 -36
- data/lib/temporalio/workflow/future.rb +0 -138
- data/lib/temporalio/workflow/id_reuse_policy.rb +0 -36
- data/lib/temporalio/workflow/info.rb +0 -76
- data/lib/temporalio/workflow/query_reject_condition.rb +0 -33
- data/lib/thermite_patch.rb +0 -33
- data/sig/async.rbs +0 -17
- data/sig/protobuf.rbs +0 -16
- data/sig/protos/dependencies/gogoproto/gogo.rbs +0 -914
- data/sig/protos/google/protobuf/any.rbs +0 -157
- data/sig/protos/google/protobuf/descriptor.rbs +0 -2825
- data/sig/protos/google/protobuf/duration.rbs +0 -114
- data/sig/protos/google/protobuf/empty.rbs +0 -36
- data/sig/protos/google/protobuf/timestamp.rbs +0 -145
- data/sig/protos/google/protobuf/wrappers.rbs +0 -358
- data/sig/protos/temporal/api/batch/v1/message.rbs +0 -300
- data/sig/protos/temporal/api/command/v1/message.rbs +0 -1399
- data/sig/protos/temporal/api/common/v1/message.rbs +0 -528
- data/sig/protos/temporal/api/enums/v1/batch_operation.rbs +0 -79
- data/sig/protos/temporal/api/enums/v1/command_type.rbs +0 -68
- data/sig/protos/temporal/api/enums/v1/common.rbs +0 -118
- data/sig/protos/temporal/api/enums/v1/event_type.rbs +0 -264
- data/sig/protos/temporal/api/enums/v1/failed_cause.rbs +0 -277
- data/sig/protos/temporal/api/enums/v1/namespace.rbs +0 -108
- data/sig/protos/temporal/api/enums/v1/query.rbs +0 -81
- data/sig/protos/temporal/api/enums/v1/reset.rbs +0 -44
- data/sig/protos/temporal/api/enums/v1/schedule.rbs +0 -72
- data/sig/protos/temporal/api/enums/v1/task_queue.rbs +0 -92
- data/sig/protos/temporal/api/enums/v1/update.rbs +0 -64
- data/sig/protos/temporal/api/enums/v1/workflow.rbs +0 -371
- data/sig/protos/temporal/api/errordetails/v1/message.rbs +0 -551
- data/sig/protos/temporal/api/failure/v1/message.rbs +0 -581
- data/sig/protos/temporal/api/filter/v1/message.rbs +0 -171
- data/sig/protos/temporal/api/history/v1/message.rbs +0 -4609
- data/sig/protos/temporal/api/namespace/v1/message.rbs +0 -410
- data/sig/protos/temporal/api/operatorservice/v1/request_response.rbs +0 -643
- data/sig/protos/temporal/api/operatorservice/v1/service.rbs +0 -17
- data/sig/protos/temporal/api/protocol/v1/message.rbs +0 -84
- data/sig/protos/temporal/api/query/v1/message.rbs +0 -182
- data/sig/protos/temporal/api/replication/v1/message.rbs +0 -148
- data/sig/protos/temporal/api/schedule/v1/message.rbs +0 -1488
- data/sig/protos/temporal/api/sdk/v1/task_complete_metadata.rbs +0 -110
- data/sig/protos/temporal/api/taskqueue/v1/message.rbs +0 -486
- data/sig/protos/temporal/api/testservice/v1/request_response.rbs +0 -249
- data/sig/protos/temporal/api/testservice/v1/service.rbs +0 -15
- data/sig/protos/temporal/api/update/v1/message.rbs +0 -489
- data/sig/protos/temporal/api/version/v1/message.rbs +0 -184
- data/sig/protos/temporal/api/workflow/v1/message.rbs +0 -824
- data/sig/protos/temporal/api/workflowservice/v1/request_response.rbs +0 -7250
- data/sig/protos/temporal/api/workflowservice/v1/service.rbs +0 -22
- data/sig/protos/temporal/sdk/core/activity_result/activity_result.rbs +0 -380
- data/sig/protos/temporal/sdk/core/activity_task/activity_task.rbs +0 -386
- data/sig/protos/temporal/sdk/core/child_workflow/child_workflow.rbs +0 -323
- data/sig/protos/temporal/sdk/core/common/common.rbs +0 -62
- data/sig/protos/temporal/sdk/core/core_interface.rbs +0 -101
- data/sig/protos/temporal/sdk/core/external_data/external_data.rbs +0 -119
- data/sig/protos/temporal/sdk/core/workflow_activation/workflow_activation.rbs +0 -1473
- data/sig/protos/temporal/sdk/core/workflow_commands/workflow_commands.rbs +0 -1784
- data/sig/protos/temporal/sdk/core/workflow_completion/workflow_completion.rbs +0 -180
- data/sig/ruby.rbs +0 -12
- data/sig/temporalio/activity/context.rbs +0 -29
- data/sig/temporalio/activity/info.rbs +0 -43
- data/sig/temporalio/activity.rbs +0 -19
- data/sig/temporalio/bridge/connect_options.rbs +0 -19
- data/sig/temporalio/bridge/error.rbs +0 -8
- data/sig/temporalio/bridge/retry_config.rbs +0 -21
- data/sig/temporalio/bridge/tls_options.rbs +0 -17
- data/sig/temporalio/bridge.rbs +0 -71
- data/sig/temporalio/client/implementation.rbs +0 -38
- data/sig/temporalio/client/workflow_handle.rbs +0 -41
- data/sig/temporalio/client.rbs +0 -35
- data/sig/temporalio/connection/retry_config.rbs +0 -37
- data/sig/temporalio/connection/service.rbs +0 -14
- data/sig/temporalio/connection/test_service.rbs +0 -13
- data/sig/temporalio/connection/tls_options.rbs +0 -43
- data/sig/temporalio/connection/workflow_service.rbs +0 -48
- data/sig/temporalio/connection.rbs +0 -30
- data/sig/temporalio/data_converter.rbs +0 -35
- data/sig/temporalio/error/failure.rbs +0 -121
- data/sig/temporalio/error/workflow_failure.rbs +0 -9
- data/sig/temporalio/errors.rbs +0 -36
- data/sig/temporalio/failure_converter/base.rbs +0 -12
- data/sig/temporalio/failure_converter/basic.rbs +0 -86
- data/sig/temporalio/failure_converter.rbs +0 -5
- data/sig/temporalio/interceptor/activity_inbound.rbs +0 -21
- data/sig/temporalio/interceptor/activity_outbound.rbs +0 -10
- data/sig/temporalio/interceptor/chain.rbs +0 -24
- data/sig/temporalio/interceptor/client.rbs +0 -148
- data/sig/temporalio/interceptor.rbs +0 -6
- data/sig/temporalio/payload_codec/base.rbs +0 -12
- data/sig/temporalio/payload_converter/base.rbs +0 -12
- data/sig/temporalio/payload_converter/bytes.rbs +0 -9
- data/sig/temporalio/payload_converter/composite.rbs +0 -19
- data/sig/temporalio/payload_converter/encoding_base.rbs +0 -14
- data/sig/temporalio/payload_converter/json.rbs +0 -9
- data/sig/temporalio/payload_converter/nil.rbs +0 -9
- data/sig/temporalio/payload_converter.rbs +0 -5
- data/sig/temporalio/retry_policy.rbs +0 -25
- data/sig/temporalio/retry_state.rbs +0 -20
- data/sig/temporalio/runtime.rbs +0 -12
- data/sig/temporalio/testing/time_skipping_handle.rbs +0 -15
- data/sig/temporalio/testing/time_skipping_interceptor.rbs +0 -13
- data/sig/temporalio/testing/workflow_environment.rbs +0 -22
- data/sig/temporalio/testing.rbs +0 -35
- data/sig/temporalio/timeout_type.rbs +0 -15
- data/sig/temporalio/version.rbs +0 -3
- data/sig/temporalio/worker/activity_runner.rbs +0 -35
- data/sig/temporalio/worker/activity_worker.rbs +0 -44
- data/sig/temporalio/worker/reactor.rbs +0 -22
- data/sig/temporalio/worker/runner.rbs +0 -21
- data/sig/temporalio/worker/sync_worker.rbs +0 -23
- data/sig/temporalio/worker/thread_pool_executor.rbs +0 -23
- data/sig/temporalio/worker.rbs +0 -46
- data/sig/temporalio/workflow/async.rbs +0 -9
- data/sig/temporalio/workflow/execution_info.rbs +0 -55
- data/sig/temporalio/workflow/execution_status.rbs +0 -21
- data/sig/temporalio/workflow/future.rbs +0 -40
- data/sig/temporalio/workflow/id_reuse_policy.rbs +0 -15
- data/sig/temporalio/workflow/info.rbs +0 -55
- data/sig/temporalio/workflow/query_reject_condition.rbs +0 -14
- data/sig/temporalio.rbs +0 -2
- data/sig/thermite_patch.rbs +0 -15
|
@@ -1,1305 +0,0 @@
|
|
|
1
|
-
#[cfg(test)]
|
|
2
|
-
mod managed_wf_test;
|
|
3
|
-
|
|
4
|
-
#[cfg(test)]
|
|
5
|
-
pub(crate) use managed_wf_test::ManagedWFFunc;
|
|
6
|
-
|
|
7
|
-
use crate::{
|
|
8
|
-
abstractions::dbg_panic,
|
|
9
|
-
protosext::WorkflowActivationExt,
|
|
10
|
-
worker::{
|
|
11
|
-
workflow::{
|
|
12
|
-
history_update::HistoryPaginator, machines::WorkflowMachines, ActivationAction,
|
|
13
|
-
ActivationCompleteOutcome, ActivationCompleteResult, ActivationOrAuto,
|
|
14
|
-
EvictionRequestResult, FailedActivationWFTReport, HeartbeatTimeoutMsg, HistoryUpdate,
|
|
15
|
-
LocalActivityRequestSink, LocalResolution, NextPageReq, OutgoingServerCommands,
|
|
16
|
-
OutstandingActivation, OutstandingTask, PermittedWFT, RequestEvictMsg, RunBasics,
|
|
17
|
-
ServerCommandsWithWorkflowInfo, WFCommand, WFMachinesError, WFTReportStatus,
|
|
18
|
-
WorkflowBridge, WorkflowTaskInfo, WFT_HEARTBEAT_TIMEOUT_FRACTION,
|
|
19
|
-
},
|
|
20
|
-
LocalActRequest, LEGACY_QUERY_ID,
|
|
21
|
-
},
|
|
22
|
-
MetricsContext,
|
|
23
|
-
};
|
|
24
|
-
use futures_util::future::AbortHandle;
|
|
25
|
-
use std::{
|
|
26
|
-
collections::HashSet,
|
|
27
|
-
ops::Add,
|
|
28
|
-
rc::Rc,
|
|
29
|
-
sync::mpsc::Sender,
|
|
30
|
-
time::{Duration, Instant},
|
|
31
|
-
};
|
|
32
|
-
use temporal_sdk_core_protos::{
|
|
33
|
-
coresdk::{
|
|
34
|
-
workflow_activation::{
|
|
35
|
-
create_evict_activation, query_to_job, remove_from_cache::EvictionReason,
|
|
36
|
-
workflow_activation_job, RemoveFromCache, WorkflowActivation,
|
|
37
|
-
},
|
|
38
|
-
workflow_commands::QueryResult,
|
|
39
|
-
workflow_completion,
|
|
40
|
-
},
|
|
41
|
-
temporal::api::{enums::v1::WorkflowTaskFailedCause, failure::v1::Failure},
|
|
42
|
-
TaskToken,
|
|
43
|
-
};
|
|
44
|
-
use tokio::sync::oneshot;
|
|
45
|
-
use tracing::Span;
|
|
46
|
-
|
|
47
|
-
type Result<T, E = WFMachinesError> = std::result::Result<T, E>;
|
|
48
|
-
pub(super) type RunUpdateAct = Option<ActivationOrAuto>;
|
|
49
|
-
|
|
50
|
-
/// Manages access to a specific workflow run. Everything inside is entirely synchronous and should
|
|
51
|
-
/// remain that way.
|
|
52
|
-
#[derive(derive_more::DebugCustom)]
|
|
53
|
-
#[debug(
|
|
54
|
-
fmt = "ManagedRun {{ wft: {:?}, activation: {:?}, buffered_resp: {:?} \
|
|
55
|
-
trying_to_evict: {} }}",
|
|
56
|
-
wft,
|
|
57
|
-
activation,
|
|
58
|
-
buffered_resp,
|
|
59
|
-
"trying_to_evict.is_some()"
|
|
60
|
-
)]
|
|
61
|
-
pub(super) struct ManagedRun {
|
|
62
|
-
wfm: WorkflowManager,
|
|
63
|
-
/// Called when the machines need to produce local activity requests. This can't be lifted up
|
|
64
|
-
/// easily as return values, because sometimes local activity requests trigger immediate
|
|
65
|
-
/// resolutions (ex: too many attempts). Thus lifting it up creates a lot of unneeded complexity
|
|
66
|
-
/// pushing things out and then directly back in. The downside is this is the only "impure" part
|
|
67
|
-
/// of the in/out nature of workflow state management. If there's ever a sensible way to lift it
|
|
68
|
-
/// up, that'd be nice.
|
|
69
|
-
local_activity_request_sink: Rc<dyn LocalActivityRequestSink>,
|
|
70
|
-
/// Set if the run is currently waiting on the execution of some local activities.
|
|
71
|
-
waiting_on_la: Option<WaitingOnLAs>,
|
|
72
|
-
/// Is set to true if the machines encounter an error and the only subsequent thing we should
|
|
73
|
-
/// do is be evicted.
|
|
74
|
-
am_broken: bool,
|
|
75
|
-
/// If set, the WFT this run is currently/will be processing.
|
|
76
|
-
wft: Option<OutstandingTask>,
|
|
77
|
-
/// An outstanding activation to lang
|
|
78
|
-
activation: Option<OutstandingActivation>,
|
|
79
|
-
/// If set, it indicates there is a buffered poll response from the server that applies to this
|
|
80
|
-
/// run. This can happen when lang takes too long to complete a task and the task times out, for
|
|
81
|
-
/// example. Upon next completion, the buffered response will be removed and can be made ready
|
|
82
|
-
/// to be returned from polling
|
|
83
|
-
buffered_resp: Option<PermittedWFT>,
|
|
84
|
-
/// Is set if an eviction has been requested for this run
|
|
85
|
-
trying_to_evict: Option<RequestEvictMsg>,
|
|
86
|
-
|
|
87
|
-
/// We track if we have recorded useful debugging values onto a certain span yet, to overcome
|
|
88
|
-
/// duplicating field values. Remove this once https://github.com/tokio-rs/tracing/issues/2334
|
|
89
|
-
/// is fixed.
|
|
90
|
-
recorded_span_ids: HashSet<tracing::Id>,
|
|
91
|
-
metrics: MetricsContext,
|
|
92
|
-
/// We store the paginator used for our own run's history fetching
|
|
93
|
-
paginator: Option<HistoryPaginator>,
|
|
94
|
-
completion_waiting_on_page_fetch: Option<RunActivationCompletion>,
|
|
95
|
-
}
|
|
96
|
-
impl ManagedRun {
|
|
97
|
-
pub(super) fn new(
|
|
98
|
-
basics: RunBasics,
|
|
99
|
-
local_activity_request_sink: Rc<dyn LocalActivityRequestSink>,
|
|
100
|
-
) -> Self {
|
|
101
|
-
let metrics = basics.metrics.clone();
|
|
102
|
-
let wfm = WorkflowManager::new(basics);
|
|
103
|
-
Self {
|
|
104
|
-
wfm,
|
|
105
|
-
local_activity_request_sink,
|
|
106
|
-
waiting_on_la: None,
|
|
107
|
-
am_broken: false,
|
|
108
|
-
wft: None,
|
|
109
|
-
activation: None,
|
|
110
|
-
buffered_resp: None,
|
|
111
|
-
trying_to_evict: None,
|
|
112
|
-
recorded_span_ids: Default::default(),
|
|
113
|
-
metrics,
|
|
114
|
-
paginator: None,
|
|
115
|
-
completion_waiting_on_page_fetch: None,
|
|
116
|
-
}
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
/// Returns true if there are pending jobs that need to be sent to lang.
|
|
120
|
-
pub(super) fn more_pending_work(&self) -> bool {
|
|
121
|
-
// We don't want to consider there to be more local-only work to be done if there is
|
|
122
|
-
// no workflow task associated with the run right now. This can happen if, ex, we
|
|
123
|
-
// complete a local activity while waiting for server to send us the next WFT.
|
|
124
|
-
// Activating lang would be harmful at this stage, as there might be work returned
|
|
125
|
-
// in that next WFT which should be part of the next activation.
|
|
126
|
-
self.wft.is_some() && self.wfm.machines.has_pending_jobs()
|
|
127
|
-
}
|
|
128
|
-
|
|
129
|
-
pub(super) fn have_seen_terminal_event(&self) -> bool {
|
|
130
|
-
self.wfm.machines.have_seen_terminal_event
|
|
131
|
-
}
|
|
132
|
-
|
|
133
|
-
/// Returns a ref to info about the currently tracked workflow task, if any.
|
|
134
|
-
pub(super) fn wft(&self) -> Option<&OutstandingTask> {
|
|
135
|
-
self.wft.as_ref()
|
|
136
|
-
}
|
|
137
|
-
|
|
138
|
-
/// Returns a ref to info about the currently tracked workflow activation, if any.
|
|
139
|
-
pub(super) fn activation(&self) -> Option<&OutstandingActivation> {
|
|
140
|
-
self.activation.as_ref()
|
|
141
|
-
}
|
|
142
|
-
|
|
143
|
-
/// Returns true if this run has already been told it will be evicted.
|
|
144
|
-
pub(super) fn is_trying_to_evict(&self) -> bool {
|
|
145
|
-
self.trying_to_evict.is_some()
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
/// Called whenever a new workflow task is obtained for this run
|
|
149
|
-
pub(super) fn incoming_wft(&mut self, pwft: PermittedWFT) -> RunUpdateAct {
|
|
150
|
-
let res = self._incoming_wft(pwft);
|
|
151
|
-
self.update_to_acts(res.map(Into::into), true)
|
|
152
|
-
}
|
|
153
|
-
|
|
154
|
-
fn _incoming_wft(
|
|
155
|
-
&mut self,
|
|
156
|
-
pwft: PermittedWFT,
|
|
157
|
-
) -> Result<Option<ActivationOrAuto>, RunUpdateErr> {
|
|
158
|
-
if self.wft.is_some() {
|
|
159
|
-
dbg_panic!("Trying to send a new WFT for a run which already has one!");
|
|
160
|
-
}
|
|
161
|
-
let start_time = Instant::now();
|
|
162
|
-
|
|
163
|
-
let work = pwft.work;
|
|
164
|
-
let did_miss_cache = !work.is_incremental() || !work.update.is_real();
|
|
165
|
-
debug!(
|
|
166
|
-
run_id = %work.execution.run_id,
|
|
167
|
-
task_token = %&work.task_token,
|
|
168
|
-
update = ?work.update,
|
|
169
|
-
has_legacy_query = %work.legacy_query.is_some(),
|
|
170
|
-
attempt = %work.attempt,
|
|
171
|
-
"Applying new workflow task from server"
|
|
172
|
-
);
|
|
173
|
-
let wft_info = WorkflowTaskInfo {
|
|
174
|
-
attempt: work.attempt,
|
|
175
|
-
task_token: work.task_token,
|
|
176
|
-
wf_id: work.execution.workflow_id.clone(),
|
|
177
|
-
};
|
|
178
|
-
|
|
179
|
-
let legacy_query_from_poll = work
|
|
180
|
-
.legacy_query
|
|
181
|
-
.map(|q| query_to_job(LEGACY_QUERY_ID.to_string(), q));
|
|
182
|
-
|
|
183
|
-
let mut pending_queries = work.query_requests;
|
|
184
|
-
if !pending_queries.is_empty() && legacy_query_from_poll.is_some() {
|
|
185
|
-
error!(
|
|
186
|
-
"Server issued both normal and legacy queries. This should not happen. Please \
|
|
187
|
-
file a bug report."
|
|
188
|
-
);
|
|
189
|
-
return Err(RunUpdateErr {
|
|
190
|
-
source: WFMachinesError::Fatal(
|
|
191
|
-
"Server issued both normal and legacy query".to_string(),
|
|
192
|
-
),
|
|
193
|
-
complete_resp_chan: None,
|
|
194
|
-
});
|
|
195
|
-
}
|
|
196
|
-
if let Some(lq) = legacy_query_from_poll {
|
|
197
|
-
pending_queries.push(lq);
|
|
198
|
-
}
|
|
199
|
-
|
|
200
|
-
self.paginator = Some(pwft.paginator);
|
|
201
|
-
self.wft = Some(OutstandingTask {
|
|
202
|
-
info: wft_info,
|
|
203
|
-
hit_cache: !did_miss_cache,
|
|
204
|
-
pending_queries,
|
|
205
|
-
start_time,
|
|
206
|
-
permit: pwft.permit,
|
|
207
|
-
});
|
|
208
|
-
|
|
209
|
-
// The update field is only populated in the event we hit the cache
|
|
210
|
-
let activation = if work.update.is_real() {
|
|
211
|
-
self.metrics.sticky_cache_hit();
|
|
212
|
-
self.wfm.feed_history_from_server(work.update)?
|
|
213
|
-
} else {
|
|
214
|
-
let r = self.wfm.get_next_activation()?;
|
|
215
|
-
if r.jobs.is_empty() {
|
|
216
|
-
return Err(RunUpdateErr {
|
|
217
|
-
source: WFMachinesError::Fatal(format!(
|
|
218
|
-
"Machines created for {} with no jobs",
|
|
219
|
-
self.wfm.machines.run_id
|
|
220
|
-
)),
|
|
221
|
-
complete_resp_chan: None,
|
|
222
|
-
});
|
|
223
|
-
}
|
|
224
|
-
r
|
|
225
|
-
};
|
|
226
|
-
|
|
227
|
-
if activation.jobs.is_empty() {
|
|
228
|
-
if self.wfm.machines.outstanding_local_activity_count() > 0 {
|
|
229
|
-
// If the activation has no jobs but there are outstanding LAs, we need to restart
|
|
230
|
-
// the WFT heartbeat.
|
|
231
|
-
if let Some(ref mut lawait) = self.waiting_on_la {
|
|
232
|
-
if lawait.completion_dat.is_some() {
|
|
233
|
-
panic!("Should not have completion dat when getting new wft & empty jobs")
|
|
234
|
-
}
|
|
235
|
-
lawait.hb_timeout_handle.abort();
|
|
236
|
-
lawait.hb_timeout_handle = sink_heartbeat_timeout_start(
|
|
237
|
-
self.wfm.machines.run_id.clone(),
|
|
238
|
-
self.local_activity_request_sink.as_ref(),
|
|
239
|
-
start_time,
|
|
240
|
-
lawait.wft_timeout,
|
|
241
|
-
);
|
|
242
|
-
// No activation needs to be sent to lang. We just need to wait for another
|
|
243
|
-
// heartbeat timeout or LAs to resolve
|
|
244
|
-
return Ok(None);
|
|
245
|
-
} else {
|
|
246
|
-
panic!(
|
|
247
|
-
"Got a new WFT while there are outstanding local activities, but there \
|
|
248
|
-
was no waiting on LA info."
|
|
249
|
-
)
|
|
250
|
-
}
|
|
251
|
-
} else {
|
|
252
|
-
return Ok(Some(ActivationOrAuto::Autocomplete {
|
|
253
|
-
run_id: self.wfm.machines.run_id.clone(),
|
|
254
|
-
}));
|
|
255
|
-
}
|
|
256
|
-
}
|
|
257
|
-
|
|
258
|
-
Ok(Some(ActivationOrAuto::LangActivation(activation)))
|
|
259
|
-
}
|
|
260
|
-
|
|
261
|
-
/// Deletes the currently tracked WFT & records latency metrics. Should be called after it has
|
|
262
|
-
/// been responded to (server has been told). Returns the WFT if there was one.
|
|
263
|
-
pub(super) fn mark_wft_complete(
|
|
264
|
-
&mut self,
|
|
265
|
-
report_status: WFTReportStatus,
|
|
266
|
-
) -> Option<OutstandingTask> {
|
|
267
|
-
debug!("Marking WFT completed");
|
|
268
|
-
let retme = self.wft.take();
|
|
269
|
-
|
|
270
|
-
// Only record latency metrics if we genuinely reported to server
|
|
271
|
-
if matches!(report_status, WFTReportStatus::Reported) {
|
|
272
|
-
if let Some(ot) = &retme {
|
|
273
|
-
self.metrics.wf_task_latency(ot.start_time.elapsed());
|
|
274
|
-
}
|
|
275
|
-
// Tell the LA manager that we're done with the WFT
|
|
276
|
-
self.local_activity_request_sink.sink_reqs(vec![
|
|
277
|
-
LocalActRequest::IndicateWorkflowTaskCompleted(self.wfm.machines.run_id.clone()),
|
|
278
|
-
]);
|
|
279
|
-
}
|
|
280
|
-
|
|
281
|
-
retme
|
|
282
|
-
}
|
|
283
|
-
|
|
284
|
-
/// Checks if any further activations need to go out for this run and produces them if so.
|
|
285
|
-
pub(super) fn check_more_activations(&mut self) -> RunUpdateAct {
|
|
286
|
-
let res = self._check_more_activations();
|
|
287
|
-
self.update_to_acts(res.map(Into::into), false)
|
|
288
|
-
}
|
|
289
|
-
|
|
290
|
-
fn _check_more_activations(&mut self) -> Result<Option<ActivationOrAuto>, RunUpdateErr> {
|
|
291
|
-
// No point in checking for more activations if there's already an outstanding activation.
|
|
292
|
-
if self.activation.is_some() {
|
|
293
|
-
return Ok(None);
|
|
294
|
-
}
|
|
295
|
-
// In the event it's time to evict this run, cancel any outstanding LAs
|
|
296
|
-
if self.trying_to_evict.is_some() {
|
|
297
|
-
self.sink_la_requests(vec![LocalActRequest::CancelAllInRun(
|
|
298
|
-
self.wfm.machines.run_id.clone(),
|
|
299
|
-
)])?;
|
|
300
|
-
}
|
|
301
|
-
|
|
302
|
-
if self.wft.is_none() {
|
|
303
|
-
// It doesn't make sense to do workflow work unless we have a WFT
|
|
304
|
-
return Ok(None);
|
|
305
|
-
}
|
|
306
|
-
|
|
307
|
-
if self.wfm.machines.has_pending_jobs() && !self.am_broken {
|
|
308
|
-
Ok(Some(ActivationOrAuto::LangActivation(
|
|
309
|
-
self.wfm.get_next_activation()?,
|
|
310
|
-
)))
|
|
311
|
-
} else {
|
|
312
|
-
if !self.am_broken {
|
|
313
|
-
let has_pending_queries = self
|
|
314
|
-
.wft
|
|
315
|
-
.as_ref()
|
|
316
|
-
.map(|wft| !wft.pending_queries.is_empty())
|
|
317
|
-
.unwrap_or_default();
|
|
318
|
-
if has_pending_queries {
|
|
319
|
-
return Ok(Some(ActivationOrAuto::ReadyForQueries(
|
|
320
|
-
self.wfm.machines.get_wf_activation(),
|
|
321
|
-
)));
|
|
322
|
-
}
|
|
323
|
-
}
|
|
324
|
-
if let Some(wte) = self.trying_to_evict.clone() {
|
|
325
|
-
let mut act = self.wfm.machines.get_wf_activation();
|
|
326
|
-
// No other jobs make any sense to send if we encountered an error.
|
|
327
|
-
if self.am_broken {
|
|
328
|
-
act.jobs = vec![];
|
|
329
|
-
}
|
|
330
|
-
act.append_evict_job(RemoveFromCache {
|
|
331
|
-
message: wte.message,
|
|
332
|
-
reason: wte.reason as i32,
|
|
333
|
-
});
|
|
334
|
-
Ok(Some(ActivationOrAuto::LangActivation(act)))
|
|
335
|
-
} else {
|
|
336
|
-
Ok(None)
|
|
337
|
-
}
|
|
338
|
-
}
|
|
339
|
-
}
|
|
340
|
-
|
|
341
|
-
/// Called whenever lang successfully completes a workflow activation. Commands produced by the
|
|
342
|
-
/// activation are passed in. `resp_chan` will be used to unblock the completion call when
|
|
343
|
-
/// everything we need to do to fulfill it has happened.
|
|
344
|
-
///
|
|
345
|
-
/// Can return an error in the event that another page of history needs to be fetched before
|
|
346
|
-
/// the completion can proceed.
|
|
347
|
-
pub(super) fn successful_completion(
|
|
348
|
-
&mut self,
|
|
349
|
-
mut commands: Vec<WFCommand>,
|
|
350
|
-
used_flags: Vec<u32>,
|
|
351
|
-
resp_chan: Option<oneshot::Sender<ActivationCompleteResult>>,
|
|
352
|
-
) -> Result<RunUpdateAct, NextPageReq> {
|
|
353
|
-
let activation_was_only_eviction = self.activation_has_only_eviction();
|
|
354
|
-
let (task_token, has_pending_query, start_time) = if let Some(entry) = self.wft.as_ref() {
|
|
355
|
-
(
|
|
356
|
-
entry.info.task_token.clone(),
|
|
357
|
-
!entry.pending_queries.is_empty(),
|
|
358
|
-
entry.start_time,
|
|
359
|
-
)
|
|
360
|
-
} else {
|
|
361
|
-
if !activation_was_only_eviction {
|
|
362
|
-
// Not an error if this was an eviction, since it's normal to issue eviction
|
|
363
|
-
// activations without an associated workflow task in that case.
|
|
364
|
-
dbg_panic!(
|
|
365
|
-
"Attempted to complete activation for run {} without associated workflow task",
|
|
366
|
-
self.run_id()
|
|
367
|
-
);
|
|
368
|
-
}
|
|
369
|
-
self.reply_to_complete(ActivationCompleteOutcome::DoNothing, resp_chan);
|
|
370
|
-
return Ok(None);
|
|
371
|
-
};
|
|
372
|
-
|
|
373
|
-
// If the only command from the activation is a legacy query response, that means we need
|
|
374
|
-
// to respond differently than a typical activation.
|
|
375
|
-
if matches!(&commands.as_slice(),
|
|
376
|
-
&[WFCommand::QueryResponse(qr)] if qr.query_id == LEGACY_QUERY_ID)
|
|
377
|
-
{
|
|
378
|
-
let qr = match commands.remove(0) {
|
|
379
|
-
WFCommand::QueryResponse(qr) => qr,
|
|
380
|
-
_ => unreachable!("We just verified this is the only command"),
|
|
381
|
-
};
|
|
382
|
-
self.reply_to_complete(
|
|
383
|
-
ActivationCompleteOutcome::ReportWFTSuccess(ServerCommandsWithWorkflowInfo {
|
|
384
|
-
task_token,
|
|
385
|
-
action: ActivationAction::RespondLegacyQuery {
|
|
386
|
-
result: Box::new(qr),
|
|
387
|
-
},
|
|
388
|
-
}),
|
|
389
|
-
resp_chan,
|
|
390
|
-
);
|
|
391
|
-
Ok(None)
|
|
392
|
-
} else {
|
|
393
|
-
// First strip out query responses from other commands that actually affect machines
|
|
394
|
-
// Would be prettier with `drain_filter`
|
|
395
|
-
let mut i = 0;
|
|
396
|
-
let mut query_responses = vec![];
|
|
397
|
-
while i < commands.len() {
|
|
398
|
-
if matches!(commands[i], WFCommand::QueryResponse(_)) {
|
|
399
|
-
if let WFCommand::QueryResponse(qr) = commands.remove(i) {
|
|
400
|
-
query_responses.push(qr);
|
|
401
|
-
}
|
|
402
|
-
} else {
|
|
403
|
-
i += 1;
|
|
404
|
-
}
|
|
405
|
-
}
|
|
406
|
-
|
|
407
|
-
if activation_was_only_eviction && !commands.is_empty() {
|
|
408
|
-
dbg_panic!("Reply to an eviction only containing an eviction included commands");
|
|
409
|
-
}
|
|
410
|
-
|
|
411
|
-
let rac = RunActivationCompletion {
|
|
412
|
-
task_token,
|
|
413
|
-
start_time,
|
|
414
|
-
commands,
|
|
415
|
-
activation_was_eviction: self.activation_has_eviction(),
|
|
416
|
-
activation_was_only_eviction,
|
|
417
|
-
has_pending_query,
|
|
418
|
-
query_responses,
|
|
419
|
-
used_flags,
|
|
420
|
-
resp_chan,
|
|
421
|
-
};
|
|
422
|
-
|
|
423
|
-
// Verify we can actually apply the next workflow task, which will happen as part of
|
|
424
|
-
// applying the completion to machines. If we can't, return early indicating we need
|
|
425
|
-
// to fetch a page.
|
|
426
|
-
if !self.wfm.ready_to_apply_next_wft() {
|
|
427
|
-
return if let Some(paginator) = self.paginator.take() {
|
|
428
|
-
debug!("Need to fetch a history page before next WFT can be applied");
|
|
429
|
-
self.completion_waiting_on_page_fetch = Some(rac);
|
|
430
|
-
Err(NextPageReq {
|
|
431
|
-
paginator,
|
|
432
|
-
span: Span::current(),
|
|
433
|
-
})
|
|
434
|
-
} else {
|
|
435
|
-
Ok(self.update_to_acts(
|
|
436
|
-
Err(RunUpdateErr {
|
|
437
|
-
source: WFMachinesError::Fatal(
|
|
438
|
-
"Run's paginator was absent when attempting to fetch next history \
|
|
439
|
-
page. This is a Core SDK bug."
|
|
440
|
-
.to_string(),
|
|
441
|
-
),
|
|
442
|
-
complete_resp_chan: rac.resp_chan,
|
|
443
|
-
}),
|
|
444
|
-
false,
|
|
445
|
-
))
|
|
446
|
-
};
|
|
447
|
-
}
|
|
448
|
-
|
|
449
|
-
Ok(self.process_completion(rac))
|
|
450
|
-
}
|
|
451
|
-
}
|
|
452
|
-
|
|
453
|
-
/// Called after the higher-up machinery has fetched more pages of event history needed to apply
|
|
454
|
-
/// the next workflow task. The history update and paginator used to perform the fetch are
|
|
455
|
-
/// passed in, with the update being used to apply the task, and the paginator stored to be
|
|
456
|
-
/// attached with another fetch request if needed.
|
|
457
|
-
pub(super) fn fetched_page_completion(
|
|
458
|
-
&mut self,
|
|
459
|
-
update: HistoryUpdate,
|
|
460
|
-
paginator: HistoryPaginator,
|
|
461
|
-
) -> RunUpdateAct {
|
|
462
|
-
let res = self._fetched_page_completion(update, paginator);
|
|
463
|
-
self.update_to_acts(res.map(Into::into), false)
|
|
464
|
-
}
|
|
465
|
-
fn _fetched_page_completion(
|
|
466
|
-
&mut self,
|
|
467
|
-
update: HistoryUpdate,
|
|
468
|
-
paginator: HistoryPaginator,
|
|
469
|
-
) -> Result<Option<FulfillableActivationComplete>, RunUpdateErr> {
|
|
470
|
-
self.paginator = Some(paginator);
|
|
471
|
-
if let Some(d) = self.completion_waiting_on_page_fetch.take() {
|
|
472
|
-
self._process_completion(d, Some(update))
|
|
473
|
-
} else {
|
|
474
|
-
dbg_panic!(
|
|
475
|
-
"Shouldn't be possible to be applying a next-page-fetch update when \
|
|
476
|
-
doing anything other than completing an activation."
|
|
477
|
-
);
|
|
478
|
-
Err(RunUpdateErr::from(WFMachinesError::Fatal(
|
|
479
|
-
"Tried to apply next-page-fetch update to a run that wasn't handling a completion"
|
|
480
|
-
.to_string(),
|
|
481
|
-
)))
|
|
482
|
-
}
|
|
483
|
-
}
|
|
484
|
-
|
|
485
|
-
/// Called whenever either core lang cannot complete a workflow activation. EX: Nondeterminism
|
|
486
|
-
/// or user code threw/panicked, respectively. The `cause` and `reason` fields are determined
|
|
487
|
-
/// inside core always. The `failure` field may come from lang. `resp_chan` will be used to
|
|
488
|
-
/// unblock the completion call when everything we need to do to fulfill it has happened.
|
|
489
|
-
pub(super) fn failed_completion(
|
|
490
|
-
&mut self,
|
|
491
|
-
cause: WorkflowTaskFailedCause,
|
|
492
|
-
reason: EvictionReason,
|
|
493
|
-
failure: workflow_completion::Failure,
|
|
494
|
-
resp_chan: Option<oneshot::Sender<ActivationCompleteResult>>,
|
|
495
|
-
) -> RunUpdateAct {
|
|
496
|
-
let tt = if let Some(tt) = self.wft.as_ref().map(|t| t.info.task_token.clone()) {
|
|
497
|
-
tt
|
|
498
|
-
} else {
|
|
499
|
-
dbg_panic!(
|
|
500
|
-
"No workflow task for run id {} found when trying to fail activation",
|
|
501
|
-
self.run_id()
|
|
502
|
-
);
|
|
503
|
-
self.reply_to_complete(ActivationCompleteOutcome::DoNothing, resp_chan);
|
|
504
|
-
return None;
|
|
505
|
-
};
|
|
506
|
-
|
|
507
|
-
self.metrics.wf_task_failed();
|
|
508
|
-
let message = format!("Workflow activation completion failed: {:?}", &failure);
|
|
509
|
-
// Blow up any cached data associated with the workflow
|
|
510
|
-
let evict_req_outcome = self.request_eviction(RequestEvictMsg {
|
|
511
|
-
run_id: self.run_id().to_string(),
|
|
512
|
-
message,
|
|
513
|
-
reason,
|
|
514
|
-
});
|
|
515
|
-
let should_report = match &evict_req_outcome {
|
|
516
|
-
EvictionRequestResult::EvictionRequested(Some(attempt), _)
|
|
517
|
-
| EvictionRequestResult::EvictionAlreadyRequested(Some(attempt)) => *attempt <= 1,
|
|
518
|
-
_ => false,
|
|
519
|
-
};
|
|
520
|
-
let rur = evict_req_outcome.into_run_update_resp();
|
|
521
|
-
// If the outstanding WFT is a legacy query task, report that we need to fail it
|
|
522
|
-
let outcome = if self.pending_work_is_legacy_query() {
|
|
523
|
-
ActivationCompleteOutcome::ReportWFTFail(
|
|
524
|
-
FailedActivationWFTReport::ReportLegacyQueryFailure(tt, failure),
|
|
525
|
-
)
|
|
526
|
-
} else if should_report {
|
|
527
|
-
ActivationCompleteOutcome::ReportWFTFail(FailedActivationWFTReport::Report(
|
|
528
|
-
tt, cause, failure,
|
|
529
|
-
))
|
|
530
|
-
} else {
|
|
531
|
-
ActivationCompleteOutcome::WFTFailedDontReport
|
|
532
|
-
};
|
|
533
|
-
self.reply_to_complete(outcome, resp_chan);
|
|
534
|
-
rur
|
|
535
|
-
}
|
|
536
|
-
|
|
537
|
-
/// Delete the currently tracked workflow activation and return it, if any. Should be called
|
|
538
|
-
/// after the processing of the activation completion, and WFT reporting.
|
|
539
|
-
pub(super) fn delete_activation(&mut self) -> Option<OutstandingActivation> {
|
|
540
|
-
self.activation.take()
|
|
541
|
-
}
|
|
542
|
-
|
|
543
|
-
/// Called when local activities resolve
|
|
544
|
-
pub(super) fn local_resolution(&mut self, res: LocalResolution) -> RunUpdateAct {
|
|
545
|
-
let res = self._local_resolution(res);
|
|
546
|
-
self.update_to_acts(res.map(Into::into), false)
|
|
547
|
-
}
|
|
548
|
-
|
|
549
|
-
fn process_completion(&mut self, completion: RunActivationCompletion) -> RunUpdateAct {
|
|
550
|
-
let res = self._process_completion(completion, None);
|
|
551
|
-
self.update_to_acts(res.map(Into::into), false)
|
|
552
|
-
}
|
|
553
|
-
|
|
554
|
-
fn _process_completion(
|
|
555
|
-
&mut self,
|
|
556
|
-
completion: RunActivationCompletion,
|
|
557
|
-
new_update: Option<HistoryUpdate>,
|
|
558
|
-
) -> Result<Option<FulfillableActivationComplete>, RunUpdateErr> {
|
|
559
|
-
let data = CompletionDataForWFT {
|
|
560
|
-
task_token: completion.task_token,
|
|
561
|
-
query_responses: completion.query_responses,
|
|
562
|
-
has_pending_query: completion.has_pending_query,
|
|
563
|
-
activation_was_only_eviction: completion.activation_was_only_eviction,
|
|
564
|
-
};
|
|
565
|
-
|
|
566
|
-
self.wfm.machines.add_lang_used_flags(completion.used_flags);
|
|
567
|
-
|
|
568
|
-
// If this is just bookkeeping after a reply to an only-eviction activation, we can bypass
|
|
569
|
-
// everything, since there is no reason to continue trying to update machines.
|
|
570
|
-
if completion.activation_was_only_eviction {
|
|
571
|
-
return Ok(Some(self.prepare_complete_resp(
|
|
572
|
-
completion.resp_chan,
|
|
573
|
-
data,
|
|
574
|
-
false,
|
|
575
|
-
)));
|
|
576
|
-
}
|
|
577
|
-
|
|
578
|
-
let outcome = (|| {
|
|
579
|
-
// Send commands from lang into the machines then check if the workflow run needs
|
|
580
|
-
// another activation and mark it if so
|
|
581
|
-
self.wfm.push_commands_and_iterate(completion.commands)?;
|
|
582
|
-
// If there was a new update included as part of the completion, apply it.
|
|
583
|
-
if let Some(update) = new_update {
|
|
584
|
-
self.wfm.feed_history_from_new_page(update)?;
|
|
585
|
-
}
|
|
586
|
-
// Don't bother applying the next task if we're evicting at the end of this activation
|
|
587
|
-
if !completion.activation_was_eviction {
|
|
588
|
-
self.wfm.apply_next_task_if_ready()?;
|
|
589
|
-
}
|
|
590
|
-
let new_local_acts = self.wfm.drain_queued_local_activities();
|
|
591
|
-
self.sink_la_requests(new_local_acts)?;
|
|
592
|
-
|
|
593
|
-
if self.wfm.machines.outstanding_local_activity_count() == 0 {
|
|
594
|
-
Ok(None)
|
|
595
|
-
} else {
|
|
596
|
-
let wft_timeout: Duration = self
|
|
597
|
-
.wfm
|
|
598
|
-
.machines
|
|
599
|
-
.get_started_info()
|
|
600
|
-
.and_then(|attrs| attrs.workflow_task_timeout)
|
|
601
|
-
.ok_or_else(|| {
|
|
602
|
-
WFMachinesError::Fatal(
|
|
603
|
-
"Workflow's start attribs were missing a well formed task timeout"
|
|
604
|
-
.to_string(),
|
|
605
|
-
)
|
|
606
|
-
})?;
|
|
607
|
-
Ok(Some((completion.start_time, wft_timeout)))
|
|
608
|
-
}
|
|
609
|
-
})();
|
|
610
|
-
|
|
611
|
-
match outcome {
|
|
612
|
-
Ok(None) => Ok(Some(self.prepare_complete_resp(
|
|
613
|
-
completion.resp_chan,
|
|
614
|
-
data,
|
|
615
|
-
false,
|
|
616
|
-
))),
|
|
617
|
-
Ok(Some((start_t, wft_timeout))) => {
|
|
618
|
-
if let Some(wola) = self.waiting_on_la.as_mut() {
|
|
619
|
-
wola.hb_timeout_handle.abort();
|
|
620
|
-
}
|
|
621
|
-
self.waiting_on_la = Some(WaitingOnLAs {
|
|
622
|
-
wft_timeout,
|
|
623
|
-
completion_dat: Some((data, completion.resp_chan)),
|
|
624
|
-
hb_timeout_handle: sink_heartbeat_timeout_start(
|
|
625
|
-
self.run_id().to_string(),
|
|
626
|
-
self.local_activity_request_sink.as_ref(),
|
|
627
|
-
start_t,
|
|
628
|
-
wft_timeout,
|
|
629
|
-
),
|
|
630
|
-
});
|
|
631
|
-
Ok(None)
|
|
632
|
-
}
|
|
633
|
-
Err(e) => Err(RunUpdateErr {
|
|
634
|
-
source: e,
|
|
635
|
-
complete_resp_chan: completion.resp_chan,
|
|
636
|
-
}),
|
|
637
|
-
}
|
|
638
|
-
}
|
|
639
|
-
|
|
640
|
-
fn _local_resolution(
|
|
641
|
-
&mut self,
|
|
642
|
-
res: LocalResolution,
|
|
643
|
-
) -> Result<Option<FulfillableActivationComplete>, RunUpdateErr> {
|
|
644
|
-
debug!(resolution=?res, "Applying local resolution");
|
|
645
|
-
self.wfm.notify_of_local_result(res)?;
|
|
646
|
-
if self.wfm.machines.outstanding_local_activity_count() == 0 {
|
|
647
|
-
if let Some(mut wait_dat) = self.waiting_on_la.take() {
|
|
648
|
-
// Cancel the heartbeat timeout
|
|
649
|
-
wait_dat.hb_timeout_handle.abort();
|
|
650
|
-
if let Some((completion_dat, resp_chan)) = wait_dat.completion_dat.take() {
|
|
651
|
-
return Ok(Some(self.prepare_complete_resp(
|
|
652
|
-
resp_chan,
|
|
653
|
-
completion_dat,
|
|
654
|
-
false,
|
|
655
|
-
)));
|
|
656
|
-
}
|
|
657
|
-
}
|
|
658
|
-
}
|
|
659
|
-
Ok(None)
|
|
660
|
-
}
|
|
661
|
-
|
|
662
|
-
pub(super) fn heartbeat_timeout(&mut self) -> RunUpdateAct {
|
|
663
|
-
let maybe_act = if self._heartbeat_timeout() {
|
|
664
|
-
Some(ActivationOrAuto::Autocomplete {
|
|
665
|
-
run_id: self.wfm.machines.run_id.clone(),
|
|
666
|
-
})
|
|
667
|
-
} else {
|
|
668
|
-
None
|
|
669
|
-
};
|
|
670
|
-
self.update_to_acts(Ok(maybe_act).map(Into::into), false)
|
|
671
|
-
}
|
|
672
|
-
/// Returns `true` if autocompletion should be issued, which will actually cause us to end up
|
|
673
|
-
/// in [completion] again, at which point we'll start a new heartbeat timeout, which will
|
|
674
|
-
/// immediately trigger and thus finish the completion, forcing a new task as it should.
|
|
675
|
-
fn _heartbeat_timeout(&mut self) -> bool {
|
|
676
|
-
if let Some(ref mut wait_dat) = self.waiting_on_la {
|
|
677
|
-
// Cancel the heartbeat timeout
|
|
678
|
-
wait_dat.hb_timeout_handle.abort();
|
|
679
|
-
if let Some((completion_dat, resp_chan)) = wait_dat.completion_dat.take() {
|
|
680
|
-
let compl = self.prepare_complete_resp(resp_chan, completion_dat, true);
|
|
681
|
-
// Immediately fulfill the completion since the run update will already have
|
|
682
|
-
// been replied to
|
|
683
|
-
compl.fulfill();
|
|
684
|
-
} else {
|
|
685
|
-
// Auto-reply WFT complete
|
|
686
|
-
return true;
|
|
687
|
-
}
|
|
688
|
-
}
|
|
689
|
-
false
|
|
690
|
-
}
|
|
691
|
-
|
|
692
|
-
/// Returns true if the managed run has any form of pending work
|
|
693
|
-
/// If `ignore_evicts` is true, pending evictions do not count as pending work.
|
|
694
|
-
/// If `ignore_buffered` is true, buffered workflow tasks do not count as pending work.
|
|
695
|
-
pub(super) fn has_any_pending_work(&self, ignore_evicts: bool, ignore_buffered: bool) -> bool {
|
|
696
|
-
let evict_work = if ignore_evicts {
|
|
697
|
-
false
|
|
698
|
-
} else {
|
|
699
|
-
self.trying_to_evict.is_some()
|
|
700
|
-
};
|
|
701
|
-
let act_work = if ignore_evicts {
|
|
702
|
-
if let Some(ref act) = self.activation {
|
|
703
|
-
!act.has_only_eviction()
|
|
704
|
-
} else {
|
|
705
|
-
false
|
|
706
|
-
}
|
|
707
|
-
} else {
|
|
708
|
-
self.activation.is_some()
|
|
709
|
-
};
|
|
710
|
-
let buffered = if ignore_buffered {
|
|
711
|
-
false
|
|
712
|
-
} else {
|
|
713
|
-
self.buffered_resp.is_some()
|
|
714
|
-
};
|
|
715
|
-
trace!(wft=self.wft.is_some(), buffered=?buffered, more_work=?self.more_pending_work(),
|
|
716
|
-
act_work, evict_work, "Does run have pending work?");
|
|
717
|
-
self.wft.is_some() || buffered || self.more_pending_work() || act_work || evict_work
|
|
718
|
-
}
|
|
719
|
-
|
|
720
|
-
/// Stores some work if there is any outstanding WFT or activation for the run. If there was
|
|
721
|
-
/// not, returns the work back out inside the option.
|
|
722
|
-
pub(super) fn buffer_wft_if_outstanding_work(
|
|
723
|
-
&mut self,
|
|
724
|
-
work: PermittedWFT,
|
|
725
|
-
) -> Option<PermittedWFT> {
|
|
726
|
-
let about_to_issue_evict = self.trying_to_evict.is_some();
|
|
727
|
-
let has_wft = self.wft().is_some();
|
|
728
|
-
let has_activation = self.activation().is_some();
|
|
729
|
-
if has_wft || has_activation || about_to_issue_evict || self.more_pending_work() {
|
|
730
|
-
debug!(run_id = %self.run_id(),
|
|
731
|
-
"Got new WFT for a run with outstanding work, buffering it");
|
|
732
|
-
self.buffered_resp = Some(work);
|
|
733
|
-
None
|
|
734
|
-
} else {
|
|
735
|
-
Some(work)
|
|
736
|
-
}
|
|
737
|
-
}
|
|
738
|
-
|
|
739
|
-
/// Returns true if there is a buffered workflow task for this run.
|
|
740
|
-
pub(super) fn has_buffered_wft(&self) -> bool {
|
|
741
|
-
self.buffered_resp.is_some()
|
|
742
|
-
}
|
|
743
|
-
|
|
744
|
-
/// Removes and returns the buffered workflow task, if any.
|
|
745
|
-
pub(super) fn take_buffered_wft(&mut self) -> Option<PermittedWFT> {
|
|
746
|
-
self.buffered_resp.take()
|
|
747
|
-
}
|
|
748
|
-
|
|
749
|
-
pub(super) fn request_eviction(&mut self, info: RequestEvictMsg) -> EvictionRequestResult {
|
|
750
|
-
let attempts = self.wft.as_ref().map(|wt| wt.info.attempt);
|
|
751
|
-
|
|
752
|
-
// If we were waiting on a page fetch and we're getting evicted because fetching failed,
|
|
753
|
-
// then make sure we allow the completion to proceed, otherwise we're stuck waiting forever.
|
|
754
|
-
if self.completion_waiting_on_page_fetch.is_some()
|
|
755
|
-
&& matches!(info.reason, EvictionReason::PaginationOrHistoryFetch)
|
|
756
|
-
{
|
|
757
|
-
// We just checked it is some, unwrap OK.
|
|
758
|
-
let c = self.completion_waiting_on_page_fetch.take().unwrap();
|
|
759
|
-
let run_upd = self.failed_completion(
|
|
760
|
-
WorkflowTaskFailedCause::Unspecified,
|
|
761
|
-
info.reason,
|
|
762
|
-
Failure::application_failure(info.message, false).into(),
|
|
763
|
-
c.resp_chan,
|
|
764
|
-
);
|
|
765
|
-
return EvictionRequestResult::EvictionRequested(attempts, run_upd);
|
|
766
|
-
}
|
|
767
|
-
|
|
768
|
-
if !self.activation_has_eviction() && self.trying_to_evict.is_none() {
|
|
769
|
-
debug!(run_id=%info.run_id, reason=%info.message, "Eviction requested");
|
|
770
|
-
self.trying_to_evict = Some(info);
|
|
771
|
-
EvictionRequestResult::EvictionRequested(attempts, self.check_more_activations())
|
|
772
|
-
} else {
|
|
773
|
-
EvictionRequestResult::EvictionAlreadyRequested(attempts)
|
|
774
|
-
}
|
|
775
|
-
}
|
|
776
|
-
|
|
777
|
-
pub(super) fn record_span_fields(&mut self, span: &Span) {
|
|
778
|
-
if let Some(spid) = span.id() {
|
|
779
|
-
if self.recorded_span_ids.contains(&spid) {
|
|
780
|
-
return;
|
|
781
|
-
}
|
|
782
|
-
self.recorded_span_ids.insert(spid);
|
|
783
|
-
|
|
784
|
-
if let Some(wid) = self.wft().map(|wft| &wft.info.wf_id) {
|
|
785
|
-
span.record("workflow_id", wid.as_str());
|
|
786
|
-
}
|
|
787
|
-
}
|
|
788
|
-
}
|
|
789
|
-
|
|
790
|
-
/// Take the result of some update to ourselves and turn it into a return value of zero or more
|
|
791
|
-
/// actions
|
|
792
|
-
fn update_to_acts(
|
|
793
|
-
&mut self,
|
|
794
|
-
outcome: Result<ActOrFulfill, RunUpdateErr>,
|
|
795
|
-
in_response_to_wft: bool,
|
|
796
|
-
) -> RunUpdateAct {
|
|
797
|
-
match outcome {
|
|
798
|
-
Ok(act_or_fulfill) => {
|
|
799
|
-
let (mut maybe_act, maybe_fulfill) = match act_or_fulfill {
|
|
800
|
-
ActOrFulfill::OutgoingAct(a) => (a, None),
|
|
801
|
-
ActOrFulfill::FulfillableComplete(c) => (None, c),
|
|
802
|
-
};
|
|
803
|
-
// If there's no activation but is pending work, check and possibly generate one
|
|
804
|
-
if self.more_pending_work() && maybe_act.is_none() {
|
|
805
|
-
match self._check_more_activations() {
|
|
806
|
-
Ok(oa) => maybe_act = oa,
|
|
807
|
-
Err(e) => {
|
|
808
|
-
return self.update_to_acts(Err(e), in_response_to_wft);
|
|
809
|
-
}
|
|
810
|
-
}
|
|
811
|
-
}
|
|
812
|
-
let r = match maybe_act {
|
|
813
|
-
Some(ActivationOrAuto::LangActivation(mut activation)) => {
|
|
814
|
-
if in_response_to_wft {
|
|
815
|
-
let wft = self
|
|
816
|
-
.wft
|
|
817
|
-
.as_mut()
|
|
818
|
-
.expect("WFT must exist for run just updated with one");
|
|
819
|
-
// If there are in-poll queries, insert jobs for those queries into the
|
|
820
|
-
// activation, but only if we hit the cache. If we didn't, those queries
|
|
821
|
-
// will need to be dealt with once replay is over
|
|
822
|
-
if wft.hit_cache {
|
|
823
|
-
put_queries_in_act(&mut activation, wft);
|
|
824
|
-
}
|
|
825
|
-
}
|
|
826
|
-
|
|
827
|
-
if activation.jobs.is_empty() {
|
|
828
|
-
dbg_panic!("Should not send lang activation with no jobs");
|
|
829
|
-
}
|
|
830
|
-
Some(ActivationOrAuto::LangActivation(activation))
|
|
831
|
-
}
|
|
832
|
-
Some(ActivationOrAuto::ReadyForQueries(mut act)) => {
|
|
833
|
-
if let Some(wft) = self.wft.as_mut() {
|
|
834
|
-
put_queries_in_act(&mut act, wft);
|
|
835
|
-
Some(ActivationOrAuto::LangActivation(act))
|
|
836
|
-
} else {
|
|
837
|
-
dbg_panic!("Ready for queries but no WFT!");
|
|
838
|
-
None
|
|
839
|
-
}
|
|
840
|
-
}
|
|
841
|
-
a @ Some(
|
|
842
|
-
ActivationOrAuto::Autocomplete { .. } | ActivationOrAuto::AutoFail { .. },
|
|
843
|
-
) => a,
|
|
844
|
-
None => {
|
|
845
|
-
if let Some(reason) = self.trying_to_evict.as_ref() {
|
|
846
|
-
// If we had nothing to do, but we're trying to evict, just do that now
|
|
847
|
-
// as long as there's no other outstanding work.
|
|
848
|
-
if self.activation.is_none() && !self.more_pending_work() {
|
|
849
|
-
let mut evict_act = create_evict_activation(
|
|
850
|
-
self.run_id().to_string(),
|
|
851
|
-
reason.message.clone(),
|
|
852
|
-
reason.reason,
|
|
853
|
-
);
|
|
854
|
-
evict_act.history_length =
|
|
855
|
-
self.most_recently_processed_event_number() as u32;
|
|
856
|
-
Some(ActivationOrAuto::LangActivation(evict_act))
|
|
857
|
-
} else {
|
|
858
|
-
None
|
|
859
|
-
}
|
|
860
|
-
} else {
|
|
861
|
-
None
|
|
862
|
-
}
|
|
863
|
-
}
|
|
864
|
-
};
|
|
865
|
-
if let Some(f) = maybe_fulfill {
|
|
866
|
-
f.fulfill();
|
|
867
|
-
}
|
|
868
|
-
|
|
869
|
-
match r {
|
|
870
|
-
// After each run update, check if it's ready to handle any buffered poll
|
|
871
|
-
None | Some(ActivationOrAuto::Autocomplete { .. })
|
|
872
|
-
if !self.has_any_pending_work(false, true) =>
|
|
873
|
-
{
|
|
874
|
-
if let Some(bufft) = self.buffered_resp.take() {
|
|
875
|
-
self.incoming_wft(bufft)
|
|
876
|
-
} else {
|
|
877
|
-
None
|
|
878
|
-
}
|
|
879
|
-
}
|
|
880
|
-
Some(r) => {
|
|
881
|
-
self.insert_outstanding_activation(&r);
|
|
882
|
-
Some(r)
|
|
883
|
-
}
|
|
884
|
-
None => None,
|
|
885
|
-
}
|
|
886
|
-
}
|
|
887
|
-
Err(fail) => {
|
|
888
|
-
self.am_broken = true;
|
|
889
|
-
let rur = if let Some(resp_chan) = fail.complete_resp_chan {
|
|
890
|
-
// Automatically fail the workflow task in the event we couldn't update machines
|
|
891
|
-
let fail_cause = if matches!(&fail.source, WFMachinesError::Nondeterminism(_)) {
|
|
892
|
-
WorkflowTaskFailedCause::NonDeterministicError
|
|
893
|
-
} else {
|
|
894
|
-
WorkflowTaskFailedCause::Unspecified
|
|
895
|
-
};
|
|
896
|
-
let wft_fail_str = format!("{:?}", fail.source);
|
|
897
|
-
self.failed_completion(
|
|
898
|
-
fail_cause,
|
|
899
|
-
fail.source.evict_reason(),
|
|
900
|
-
Failure::application_failure(wft_fail_str, false).into(),
|
|
901
|
-
Some(resp_chan),
|
|
902
|
-
)
|
|
903
|
-
} else {
|
|
904
|
-
warn!(error=?fail.source, "Error while updating workflow");
|
|
905
|
-
Some(ActivationOrAuto::AutoFail {
|
|
906
|
-
run_id: self.run_id().to_owned(),
|
|
907
|
-
machines_err: fail.source,
|
|
908
|
-
})
|
|
909
|
-
};
|
|
910
|
-
rur
|
|
911
|
-
}
|
|
912
|
-
}
|
|
913
|
-
}
|
|
914
|
-
|
|
915
|
-
fn insert_outstanding_activation(&mut self, act: &ActivationOrAuto) {
|
|
916
|
-
let act_type = match &act {
|
|
917
|
-
ActivationOrAuto::LangActivation(act) | ActivationOrAuto::ReadyForQueries(act) => {
|
|
918
|
-
if act.is_legacy_query() {
|
|
919
|
-
OutstandingActivation::LegacyQuery
|
|
920
|
-
} else {
|
|
921
|
-
OutstandingActivation::Normal {
|
|
922
|
-
contains_eviction: act.eviction_index().is_some(),
|
|
923
|
-
num_jobs: act.jobs.len(),
|
|
924
|
-
}
|
|
925
|
-
}
|
|
926
|
-
}
|
|
927
|
-
ActivationOrAuto::Autocomplete { .. } | ActivationOrAuto::AutoFail { .. } => {
|
|
928
|
-
OutstandingActivation::Autocomplete
|
|
929
|
-
}
|
|
930
|
-
};
|
|
931
|
-
if let Some(old_act) = self.activation {
|
|
932
|
-
// This is a panic because we have screwed up core logic if this is violated. It must be
|
|
933
|
-
// upheld.
|
|
934
|
-
panic!(
|
|
935
|
-
"Attempted to insert a new outstanding activation {act:?}, but there already was \
|
|
936
|
-
one outstanding: {old_act:?}"
|
|
937
|
-
);
|
|
938
|
-
}
|
|
939
|
-
self.activation = Some(act_type);
|
|
940
|
-
}
|
|
941
|
-
|
|
942
|
-
fn prepare_complete_resp(
|
|
943
|
-
&mut self,
|
|
944
|
-
resp_chan: Option<oneshot::Sender<ActivationCompleteResult>>,
|
|
945
|
-
data: CompletionDataForWFT,
|
|
946
|
-
due_to_heartbeat_timeout: bool,
|
|
947
|
-
) -> FulfillableActivationComplete {
|
|
948
|
-
let mut outgoing_cmds = self.wfm.get_server_commands();
|
|
949
|
-
if data.activation_was_only_eviction && !outgoing_cmds.commands.is_empty() {
|
|
950
|
-
if self.am_broken {
|
|
951
|
-
// If we broke there could be commands in the pipe that we didn't get a chance to
|
|
952
|
-
// handle properly during replay, just wipe them all out.
|
|
953
|
-
outgoing_cmds.commands = vec![];
|
|
954
|
-
} else {
|
|
955
|
-
dbg_panic!(
|
|
956
|
-
"There should not be any outgoing commands when preparing a completion response \
|
|
957
|
-
if the activation was only an eviction. This is an SDK bug."
|
|
958
|
-
);
|
|
959
|
-
}
|
|
960
|
-
}
|
|
961
|
-
|
|
962
|
-
let query_responses = data.query_responses;
|
|
963
|
-
let has_query_responses = !query_responses.is_empty();
|
|
964
|
-
let is_query_playback = data.has_pending_query && !has_query_responses;
|
|
965
|
-
let mut force_new_wft = due_to_heartbeat_timeout;
|
|
966
|
-
|
|
967
|
-
// We only actually want to send commands back to the server if there are no more pending
|
|
968
|
-
// activations and we are caught up on replay. We don't want to complete a wft if we already
|
|
969
|
-
// saw the final event in the workflow, or if we are playing back for the express purpose of
|
|
970
|
-
// fulfilling a query. If the activation we sent was *only* an eviction, don't send that
|
|
971
|
-
// either.
|
|
972
|
-
let should_respond = !(self.wfm.machines.has_pending_jobs()
|
|
973
|
-
|| outgoing_cmds.replaying
|
|
974
|
-
|| is_query_playback
|
|
975
|
-
|| data.activation_was_only_eviction);
|
|
976
|
-
// If there are pending LA resolutions, and we're responding to a query here,
|
|
977
|
-
// we want to make sure to force a new task, as otherwise once we tell lang about
|
|
978
|
-
// the LA resolution there wouldn't be any task to reply to with the result of iterating
|
|
979
|
-
// the workflow.
|
|
980
|
-
if has_query_responses && self.wfm.machines.has_pending_la_resolutions() {
|
|
981
|
-
force_new_wft = true;
|
|
982
|
-
}
|
|
983
|
-
|
|
984
|
-
let outcome = if should_respond || has_query_responses {
|
|
985
|
-
ActivationCompleteOutcome::ReportWFTSuccess(ServerCommandsWithWorkflowInfo {
|
|
986
|
-
task_token: data.task_token,
|
|
987
|
-
action: ActivationAction::WftComplete {
|
|
988
|
-
force_new_wft,
|
|
989
|
-
commands: outgoing_cmds.commands,
|
|
990
|
-
query_responses,
|
|
991
|
-
sdk_metadata: self.wfm.machines.get_metadata_for_wft_complete(),
|
|
992
|
-
},
|
|
993
|
-
})
|
|
994
|
-
} else {
|
|
995
|
-
ActivationCompleteOutcome::DoNothing
|
|
996
|
-
};
|
|
997
|
-
FulfillableActivationComplete {
|
|
998
|
-
result: ActivationCompleteResult {
|
|
999
|
-
most_recently_processed_event: self.wfm.machines.last_processed_event as usize,
|
|
1000
|
-
outcome,
|
|
1001
|
-
},
|
|
1002
|
-
resp_chan,
|
|
1003
|
-
}
|
|
1004
|
-
}
|
|
1005
|
-
|
|
1006
|
-
/// Pump some local activity requests into the sink, applying any immediate results to the
|
|
1007
|
-
/// workflow machines.
|
|
1008
|
-
fn sink_la_requests(
|
|
1009
|
-
&mut self,
|
|
1010
|
-
new_local_acts: Vec<LocalActRequest>,
|
|
1011
|
-
) -> Result<(), WFMachinesError> {
|
|
1012
|
-
let immediate_resolutions = self.local_activity_request_sink.sink_reqs(new_local_acts);
|
|
1013
|
-
if !immediate_resolutions.is_empty() {
|
|
1014
|
-
warn!("Immediate res: {:?}", &immediate_resolutions);
|
|
1015
|
-
}
|
|
1016
|
-
for resolution in immediate_resolutions {
|
|
1017
|
-
self.wfm
|
|
1018
|
-
.notify_of_local_result(LocalResolution::LocalActivity(resolution))?;
|
|
1019
|
-
}
|
|
1020
|
-
Ok(())
|
|
1021
|
-
}
|
|
1022
|
-
|
|
1023
|
-
fn reply_to_complete(
|
|
1024
|
-
&self,
|
|
1025
|
-
outcome: ActivationCompleteOutcome,
|
|
1026
|
-
chan: Option<oneshot::Sender<ActivationCompleteResult>>,
|
|
1027
|
-
) {
|
|
1028
|
-
if let Some(chan) = chan {
|
|
1029
|
-
chan.send(ActivationCompleteResult {
|
|
1030
|
-
most_recently_processed_event: self.most_recently_processed_event_number() as usize,
|
|
1031
|
-
outcome,
|
|
1032
|
-
})
|
|
1033
|
-
.expect("Rcv half of activation reply not dropped");
|
|
1034
|
-
}
|
|
1035
|
-
}
|
|
1036
|
-
|
|
1037
|
-
/// Returns true if the handle is currently processing a WFT which contains a legacy query.
|
|
1038
|
-
fn pending_work_is_legacy_query(&self) -> bool {
|
|
1039
|
-
// Either we know because there is a pending legacy query, or it's already been drained and
|
|
1040
|
-
// sent as an activation.
|
|
1041
|
-
matches!(self.activation, Some(OutstandingActivation::LegacyQuery))
|
|
1042
|
-
|| self
|
|
1043
|
-
.wft
|
|
1044
|
-
.as_ref()
|
|
1045
|
-
.map(|t| t.has_pending_legacy_query())
|
|
1046
|
-
.unwrap_or_default()
|
|
1047
|
-
}
|
|
1048
|
-
|
|
1049
|
-
fn most_recently_processed_event_number(&self) -> i64 {
|
|
1050
|
-
self.wfm.machines.last_processed_event
|
|
1051
|
-
}
|
|
1052
|
-
|
|
1053
|
-
fn activation_has_eviction(&mut self) -> bool {
|
|
1054
|
-
self.activation
|
|
1055
|
-
.map(OutstandingActivation::has_eviction)
|
|
1056
|
-
.unwrap_or_default()
|
|
1057
|
-
}
|
|
1058
|
-
|
|
1059
|
-
fn activation_has_only_eviction(&mut self) -> bool {
|
|
1060
|
-
self.activation
|
|
1061
|
-
.map(OutstandingActivation::has_only_eviction)
|
|
1062
|
-
.unwrap_or_default()
|
|
1063
|
-
}
|
|
1064
|
-
|
|
1065
|
-
fn run_id(&self) -> &str {
|
|
1066
|
-
&self.wfm.machines.run_id
|
|
1067
|
-
}
|
|
1068
|
-
}
|
|
1069
|
-
|
|
1070
|
-
/// Drains pending queries from the workflow task and appends them to the activation's jobs
|
|
1071
|
-
fn put_queries_in_act(act: &mut WorkflowActivation, wft: &mut OutstandingTask) {
|
|
1072
|
-
// Nothing to do if there are no pending queries
|
|
1073
|
-
if wft.pending_queries.is_empty() {
|
|
1074
|
-
return;
|
|
1075
|
-
}
|
|
1076
|
-
|
|
1077
|
-
let has_legacy = wft.has_pending_legacy_query();
|
|
1078
|
-
// Cannot dispatch legacy query if there are any other jobs - which can happen if, ex, a local
|
|
1079
|
-
// activity resolves while we've gotten a legacy query after heartbeating.
|
|
1080
|
-
if has_legacy && !act.jobs.is_empty() {
|
|
1081
|
-
return;
|
|
1082
|
-
}
|
|
1083
|
-
|
|
1084
|
-
debug!(queries=?wft.pending_queries, "Dispatching queries");
|
|
1085
|
-
let query_jobs = wft
|
|
1086
|
-
.pending_queries
|
|
1087
|
-
.drain(..)
|
|
1088
|
-
.map(|q| workflow_activation_job::Variant::QueryWorkflow(q).into());
|
|
1089
|
-
act.jobs.extend(query_jobs);
|
|
1090
|
-
}
|
|
1091
|
-
fn sink_heartbeat_timeout_start(
|
|
1092
|
-
run_id: String,
|
|
1093
|
-
sink: &dyn LocalActivityRequestSink,
|
|
1094
|
-
wft_start_time: Instant,
|
|
1095
|
-
wft_timeout: Duration,
|
|
1096
|
-
) -> AbortHandle {
|
|
1097
|
-
// The heartbeat deadline is 80% of the WFT timeout
|
|
1098
|
-
let deadline = wft_start_time.add(wft_timeout.mul_f32(WFT_HEARTBEAT_TIMEOUT_FRACTION));
|
|
1099
|
-
let (abort_handle, abort_reg) = AbortHandle::new_pair();
|
|
1100
|
-
sink.sink_reqs(vec![LocalActRequest::StartHeartbeatTimeout {
|
|
1101
|
-
send_on_elapse: HeartbeatTimeoutMsg {
|
|
1102
|
-
run_id,
|
|
1103
|
-
span: Span::current(),
|
|
1104
|
-
},
|
|
1105
|
-
deadline,
|
|
1106
|
-
abort_reg,
|
|
1107
|
-
}]);
|
|
1108
|
-
abort_handle
|
|
1109
|
-
}
|
|
1110
|
-
|
|
1111
|
-
/// If an activation completion needed to wait on LA completions (or heartbeat timeout) we use
|
|
1112
|
-
/// this struct to store the data we need to finish the completion once that has happened
|
|
1113
|
-
struct WaitingOnLAs {
|
|
1114
|
-
wft_timeout: Duration,
|
|
1115
|
-
/// If set, we are waiting for LAs to complete as part of a just-finished workflow activation.
|
|
1116
|
-
/// If unset, we already had a heartbeat timeout and got a new WFT without any new work while
|
|
1117
|
-
/// there are still incomplete LAs.
|
|
1118
|
-
completion_dat: Option<(
|
|
1119
|
-
CompletionDataForWFT,
|
|
1120
|
-
Option<oneshot::Sender<ActivationCompleteResult>>,
|
|
1121
|
-
)>,
|
|
1122
|
-
/// Can be used to abort heartbeat timeouts
|
|
1123
|
-
hb_timeout_handle: AbortHandle,
|
|
1124
|
-
}
|
|
1125
|
-
#[derive(Debug)]
|
|
1126
|
-
struct CompletionDataForWFT {
|
|
1127
|
-
task_token: TaskToken,
|
|
1128
|
-
query_responses: Vec<QueryResult>,
|
|
1129
|
-
has_pending_query: bool,
|
|
1130
|
-
activation_was_only_eviction: bool,
|
|
1131
|
-
}
|
|
1132
|
-
|
|
1133
|
-
/// Manages an instance of a [WorkflowMachines], which is not thread-safe, as well as other data
|
|
1134
|
-
/// associated with that specific workflow run.
|
|
1135
|
-
struct WorkflowManager {
|
|
1136
|
-
machines: WorkflowMachines,
|
|
1137
|
-
/// Is always `Some` in normal operation. Optional to allow for unit testing with the test
|
|
1138
|
-
/// workflow driver, which does not need to complete activations the normal way.
|
|
1139
|
-
command_sink: Option<Sender<Vec<WFCommand>>>,
|
|
1140
|
-
}
|
|
1141
|
-
|
|
1142
|
-
impl WorkflowManager {
|
|
1143
|
-
/// Create a new workflow manager given workflow history and execution info as would be found
|
|
1144
|
-
/// in [PollWorkflowTaskQueueResponse]
|
|
1145
|
-
fn new(basics: RunBasics) -> Self {
|
|
1146
|
-
let (wfb, cmd_sink) = WorkflowBridge::new();
|
|
1147
|
-
let state_machines = WorkflowMachines::new(basics, Box::new(wfb).into());
|
|
1148
|
-
Self {
|
|
1149
|
-
machines: state_machines,
|
|
1150
|
-
command_sink: Some(cmd_sink),
|
|
1151
|
-
}
|
|
1152
|
-
}
|
|
1153
|
-
|
|
1154
|
-
#[cfg(test)]
|
|
1155
|
-
const fn new_from_machines(workflow_machines: WorkflowMachines) -> Self {
|
|
1156
|
-
Self {
|
|
1157
|
-
machines: workflow_machines,
|
|
1158
|
-
command_sink: None,
|
|
1159
|
-
}
|
|
1160
|
-
}
|
|
1161
|
-
|
|
1162
|
-
/// Given history that was just obtained from the server, pipe it into this workflow's machines.
|
|
1163
|
-
///
|
|
1164
|
-
/// Should only be called when a workflow has caught up on replay (or is just beginning). It
|
|
1165
|
-
/// will return a workflow activation if one is needed.
|
|
1166
|
-
fn feed_history_from_server(&mut self, update: HistoryUpdate) -> Result<WorkflowActivation> {
|
|
1167
|
-
self.machines.new_history_from_server(update)?;
|
|
1168
|
-
self.get_next_activation()
|
|
1169
|
-
}
|
|
1170
|
-
|
|
1171
|
-
/// Update the machines with some events from fetching another page of history. Does *not*
|
|
1172
|
-
/// attempt to pull the next activation, unlike [Self::feed_history_from_server].
|
|
1173
|
-
fn feed_history_from_new_page(&mut self, update: HistoryUpdate) -> Result<()> {
|
|
1174
|
-
self.machines.new_history_from_server(update)
|
|
1175
|
-
}
|
|
1176
|
-
|
|
1177
|
-
/// Let this workflow know that something we've been waiting locally on has resolved, like a
|
|
1178
|
-
/// local activity or side effect
|
|
1179
|
-
///
|
|
1180
|
-
/// Returns true if the resolution did anything. EX: If the activity is already canceled and
|
|
1181
|
-
/// used the TryCancel or Abandon modes, the resolution is uninteresting.
|
|
1182
|
-
fn notify_of_local_result(&mut self, resolved: LocalResolution) -> Result<bool> {
|
|
1183
|
-
self.machines.local_resolution(resolved)
|
|
1184
|
-
}
|
|
1185
|
-
|
|
1186
|
-
/// Fetch the next workflow activation for this workflow if one is required. Doing so will apply
|
|
1187
|
-
/// the next unapplied workflow task if such a sequence exists in history we already know about.
|
|
1188
|
-
///
|
|
1189
|
-
/// Callers may also need to call [get_server_commands] after this to issue any pending commands
|
|
1190
|
-
/// to the server.
|
|
1191
|
-
fn get_next_activation(&mut self) -> Result<WorkflowActivation> {
|
|
1192
|
-
// First check if there are already some pending jobs, which can be a result of replay.
|
|
1193
|
-
let activation = self.machines.get_wf_activation();
|
|
1194
|
-
if !activation.jobs.is_empty() {
|
|
1195
|
-
return Ok(activation);
|
|
1196
|
-
}
|
|
1197
|
-
|
|
1198
|
-
self.machines.apply_next_wft_from_history()?;
|
|
1199
|
-
Ok(self.machines.get_wf_activation())
|
|
1200
|
-
}
|
|
1201
|
-
|
|
1202
|
-
/// Returns true if machines are ready to apply the next WFT sequence, false if events will need
|
|
1203
|
-
/// to be fetched in order to create a complete update with the entire next WFT sequence.
|
|
1204
|
-
pub(crate) fn ready_to_apply_next_wft(&self) -> bool {
|
|
1205
|
-
self.machines.ready_to_apply_next_wft()
|
|
1206
|
-
}
|
|
1207
|
-
|
|
1208
|
-
/// If there are no pending jobs for the workflow, apply the next workflow task and check
|
|
1209
|
-
/// again if there are any jobs. Importantly, does not *drain* jobs.
|
|
1210
|
-
///
|
|
1211
|
-
/// Returns true if there are jobs (before or after applying the next WFT).
|
|
1212
|
-
fn apply_next_task_if_ready(&mut self) -> Result<bool> {
|
|
1213
|
-
if self.machines.has_pending_jobs() {
|
|
1214
|
-
return Ok(true);
|
|
1215
|
-
}
|
|
1216
|
-
loop {
|
|
1217
|
-
let consumed_events = self.machines.apply_next_wft_from_history()?;
|
|
1218
|
-
|
|
1219
|
-
if consumed_events == 0 || !self.machines.replaying || self.machines.has_pending_jobs()
|
|
1220
|
-
{
|
|
1221
|
-
// Keep applying tasks while there are events, we are still replaying, and there are
|
|
1222
|
-
// no jobs
|
|
1223
|
-
break;
|
|
1224
|
-
}
|
|
1225
|
-
}
|
|
1226
|
-
Ok(self.machines.has_pending_jobs())
|
|
1227
|
-
}
|
|
1228
|
-
|
|
1229
|
-
/// Typically called after [get_next_activation], use this to retrieve commands to be sent to
|
|
1230
|
-
/// the server which have been generated by the machines. Does *not* drain those commands.
|
|
1231
|
-
/// See [WorkflowMachines::get_commands].
|
|
1232
|
-
fn get_server_commands(&self) -> OutgoingServerCommands {
|
|
1233
|
-
OutgoingServerCommands {
|
|
1234
|
-
commands: self.machines.get_commands(),
|
|
1235
|
-
replaying: self.machines.replaying,
|
|
1236
|
-
}
|
|
1237
|
-
}
|
|
1238
|
-
|
|
1239
|
-
/// Remove and return all queued local activities. Once this is called, they need to be
|
|
1240
|
-
/// dispatched for execution.
|
|
1241
|
-
fn drain_queued_local_activities(&mut self) -> Vec<LocalActRequest> {
|
|
1242
|
-
self.machines.drain_queued_local_activities()
|
|
1243
|
-
}
|
|
1244
|
-
|
|
1245
|
-
/// Feed the workflow machines new commands issued by the executing workflow code, and iterate
|
|
1246
|
-
/// the machines.
|
|
1247
|
-
fn push_commands_and_iterate(&mut self, cmds: Vec<WFCommand>) -> Result<()> {
|
|
1248
|
-
if let Some(cs) = self.command_sink.as_mut() {
|
|
1249
|
-
cs.send(cmds).map_err(|_| {
|
|
1250
|
-
WFMachinesError::Fatal("Internal error buffering workflow commands".to_string())
|
|
1251
|
-
})?;
|
|
1252
|
-
}
|
|
1253
|
-
self.machines.iterate_machines()?;
|
|
1254
|
-
Ok(())
|
|
1255
|
-
}
|
|
1256
|
-
}
|
|
1257
|
-
|
|
1258
|
-
#[derive(Debug)]
|
|
1259
|
-
struct FulfillableActivationComplete {
|
|
1260
|
-
result: ActivationCompleteResult,
|
|
1261
|
-
resp_chan: Option<oneshot::Sender<ActivationCompleteResult>>,
|
|
1262
|
-
}
|
|
1263
|
-
impl FulfillableActivationComplete {
|
|
1264
|
-
fn fulfill(self) {
|
|
1265
|
-
if let Some(resp_chan) = self.resp_chan {
|
|
1266
|
-
let _ = resp_chan.send(self.result);
|
|
1267
|
-
}
|
|
1268
|
-
}
|
|
1269
|
-
}
|
|
1270
|
-
|
|
1271
|
-
#[derive(Debug)]
|
|
1272
|
-
struct RunActivationCompletion {
|
|
1273
|
-
task_token: TaskToken,
|
|
1274
|
-
start_time: Instant,
|
|
1275
|
-
commands: Vec<WFCommand>,
|
|
1276
|
-
activation_was_eviction: bool,
|
|
1277
|
-
activation_was_only_eviction: bool,
|
|
1278
|
-
has_pending_query: bool,
|
|
1279
|
-
query_responses: Vec<QueryResult>,
|
|
1280
|
-
used_flags: Vec<u32>,
|
|
1281
|
-
/// Used to notify the worker when the completion is done processing and the completion can
|
|
1282
|
-
/// unblock. Must always be `Some` when initialized.
|
|
1283
|
-
resp_chan: Option<oneshot::Sender<ActivationCompleteResult>>,
|
|
1284
|
-
}
|
|
1285
|
-
#[derive(Debug, derive_more::From)]
|
|
1286
|
-
enum ActOrFulfill {
|
|
1287
|
-
OutgoingAct(Option<ActivationOrAuto>),
|
|
1288
|
-
FulfillableComplete(Option<FulfillableActivationComplete>),
|
|
1289
|
-
}
|
|
1290
|
-
|
|
1291
|
-
#[derive(derive_more::DebugCustom)]
|
|
1292
|
-
#[debug(fmt = "RunUpdateErr({source:?})")]
|
|
1293
|
-
struct RunUpdateErr {
|
|
1294
|
-
source: WFMachinesError,
|
|
1295
|
-
complete_resp_chan: Option<oneshot::Sender<ActivationCompleteResult>>,
|
|
1296
|
-
}
|
|
1297
|
-
|
|
1298
|
-
impl From<WFMachinesError> for RunUpdateErr {
|
|
1299
|
-
fn from(e: WFMachinesError) -> Self {
|
|
1300
|
-
RunUpdateErr {
|
|
1301
|
-
source: e,
|
|
1302
|
-
complete_resp_chan: None,
|
|
1303
|
-
}
|
|
1304
|
-
}
|
|
1305
|
-
}
|