amit-temporalio 0.3.1-x86_64-linux-musl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.yardopts +2 -0
- data/Gemfile +23 -0
- data/Rakefile +101 -0
- data/lib/temporalio/activity/complete_async_error.rb +11 -0
- data/lib/temporalio/activity/context.rb +116 -0
- data/lib/temporalio/activity/definition.rb +189 -0
- data/lib/temporalio/activity/info.rb +64 -0
- data/lib/temporalio/activity.rb +12 -0
- data/lib/temporalio/api/activity/v1/message.rb +25 -0
- data/lib/temporalio/api/batch/v1/message.rb +31 -0
- data/lib/temporalio/api/cloud/account/v1/message.rb +28 -0
- data/lib/temporalio/api/cloud/cloudservice/v1/request_response.rb +126 -0
- data/lib/temporalio/api/cloud/cloudservice/v1/service.rb +25 -0
- data/lib/temporalio/api/cloud/cloudservice.rb +3 -0
- data/lib/temporalio/api/cloud/identity/v1/message.rb +41 -0
- data/lib/temporalio/api/cloud/namespace/v1/message.rb +42 -0
- data/lib/temporalio/api/cloud/nexus/v1/message.rb +31 -0
- data/lib/temporalio/api/cloud/operation/v1/message.rb +28 -0
- data/lib/temporalio/api/cloud/region/v1/message.rb +24 -0
- data/lib/temporalio/api/cloud/resource/v1/message.rb +23 -0
- data/lib/temporalio/api/cloud/sink/v1/message.rb +24 -0
- data/lib/temporalio/api/cloud/usage/v1/message.rb +31 -0
- data/lib/temporalio/api/command/v1/message.rb +46 -0
- data/lib/temporalio/api/common/v1/grpc_status.rb +23 -0
- data/lib/temporalio/api/common/v1/message.rb +47 -0
- data/lib/temporalio/api/enums/v1/batch_operation.rb +22 -0
- data/lib/temporalio/api/enums/v1/command_type.rb +21 -0
- data/lib/temporalio/api/enums/v1/common.rb +26 -0
- data/lib/temporalio/api/enums/v1/event_type.rb +21 -0
- data/lib/temporalio/api/enums/v1/failed_cause.rb +26 -0
- data/lib/temporalio/api/enums/v1/namespace.rb +23 -0
- data/lib/temporalio/api/enums/v1/query.rb +22 -0
- data/lib/temporalio/api/enums/v1/reset.rb +23 -0
- data/lib/temporalio/api/enums/v1/schedule.rb +21 -0
- data/lib/temporalio/api/enums/v1/task_queue.rb +25 -0
- data/lib/temporalio/api/enums/v1/update.rb +22 -0
- data/lib/temporalio/api/enums/v1/workflow.rb +30 -0
- data/lib/temporalio/api/errordetails/v1/message.rb +42 -0
- data/lib/temporalio/api/export/v1/message.rb +24 -0
- data/lib/temporalio/api/failure/v1/message.rb +35 -0
- data/lib/temporalio/api/filter/v1/message.rb +27 -0
- data/lib/temporalio/api/history/v1/message.rb +90 -0
- data/lib/temporalio/api/namespace/v1/message.rb +31 -0
- data/lib/temporalio/api/nexus/v1/message.rb +40 -0
- data/lib/temporalio/api/operatorservice/v1/request_response.rb +49 -0
- data/lib/temporalio/api/operatorservice/v1/service.rb +23 -0
- data/lib/temporalio/api/operatorservice.rb +3 -0
- data/lib/temporalio/api/payload_visitor.rb +1513 -0
- data/lib/temporalio/api/protocol/v1/message.rb +23 -0
- data/lib/temporalio/api/query/v1/message.rb +27 -0
- data/lib/temporalio/api/replication/v1/message.rb +26 -0
- data/lib/temporalio/api/schedule/v1/message.rb +43 -0
- data/lib/temporalio/api/sdk/v1/enhanced_stack_trace.rb +25 -0
- data/lib/temporalio/api/sdk/v1/task_complete_metadata.rb +21 -0
- data/lib/temporalio/api/sdk/v1/user_metadata.rb +23 -0
- data/lib/temporalio/api/sdk/v1/workflow_metadata.rb +23 -0
- data/lib/temporalio/api/taskqueue/v1/message.rb +45 -0
- data/lib/temporalio/api/testservice/v1/request_response.rb +31 -0
- data/lib/temporalio/api/testservice/v1/service.rb +23 -0
- data/lib/temporalio/api/update/v1/message.rb +33 -0
- data/lib/temporalio/api/version/v1/message.rb +26 -0
- data/lib/temporalio/api/workflow/v1/message.rb +43 -0
- data/lib/temporalio/api/workflowservice/v1/request_response.rb +204 -0
- data/lib/temporalio/api/workflowservice/v1/service.rb +23 -0
- data/lib/temporalio/api/workflowservice.rb +3 -0
- data/lib/temporalio/api.rb +14 -0
- data/lib/temporalio/cancellation.rb +170 -0
- data/lib/temporalio/client/activity_id_reference.rb +32 -0
- data/lib/temporalio/client/async_activity_handle.rb +85 -0
- data/lib/temporalio/client/connection/cloud_service.rb +726 -0
- data/lib/temporalio/client/connection/operator_service.rb +201 -0
- data/lib/temporalio/client/connection/service.rb +42 -0
- data/lib/temporalio/client/connection/test_service.rb +111 -0
- data/lib/temporalio/client/connection/workflow_service.rb +1041 -0
- data/lib/temporalio/client/connection.rb +316 -0
- data/lib/temporalio/client/interceptor.rb +416 -0
- data/lib/temporalio/client/schedule.rb +967 -0
- data/lib/temporalio/client/schedule_handle.rb +126 -0
- data/lib/temporalio/client/workflow_execution.rb +100 -0
- data/lib/temporalio/client/workflow_execution_count.rb +36 -0
- data/lib/temporalio/client/workflow_execution_status.rb +18 -0
- data/lib/temporalio/client/workflow_handle.rb +389 -0
- data/lib/temporalio/client/workflow_query_reject_condition.rb +14 -0
- data/lib/temporalio/client/workflow_update_handle.rb +65 -0
- data/lib/temporalio/client/workflow_update_wait_stage.rb +17 -0
- data/lib/temporalio/client.rb +484 -0
- data/lib/temporalio/common_enums.rb +41 -0
- data/lib/temporalio/converters/data_converter.rb +99 -0
- data/lib/temporalio/converters/failure_converter.rb +202 -0
- data/lib/temporalio/converters/payload_codec.rb +26 -0
- data/lib/temporalio/converters/payload_converter/binary_null.rb +34 -0
- data/lib/temporalio/converters/payload_converter/binary_plain.rb +35 -0
- data/lib/temporalio/converters/payload_converter/binary_protobuf.rb +42 -0
- data/lib/temporalio/converters/payload_converter/composite.rb +66 -0
- data/lib/temporalio/converters/payload_converter/encoding.rb +35 -0
- data/lib/temporalio/converters/payload_converter/json_plain.rb +44 -0
- data/lib/temporalio/converters/payload_converter/json_protobuf.rb +41 -0
- data/lib/temporalio/converters/payload_converter.rb +71 -0
- data/lib/temporalio/converters/raw_value.rb +20 -0
- data/lib/temporalio/converters.rb +9 -0
- data/lib/temporalio/error/failure.rb +219 -0
- data/lib/temporalio/error.rb +155 -0
- data/lib/temporalio/internal/bridge/3.2/temporalio_bridge.so +0 -0
- data/lib/temporalio/internal/bridge/3.3/temporalio_bridge.so +0 -0
- data/lib/temporalio/internal/bridge/3.4/temporalio_bridge.so +0 -0
- data/lib/temporalio/internal/bridge/api/activity_result/activity_result.rb +34 -0
- data/lib/temporalio/internal/bridge/api/activity_task/activity_task.rb +31 -0
- data/lib/temporalio/internal/bridge/api/child_workflow/child_workflow.rb +33 -0
- data/lib/temporalio/internal/bridge/api/common/common.rb +26 -0
- data/lib/temporalio/internal/bridge/api/core_interface.rb +40 -0
- data/lib/temporalio/internal/bridge/api/external_data/external_data.rb +27 -0
- data/lib/temporalio/internal/bridge/api/nexus/nexus.rb +33 -0
- data/lib/temporalio/internal/bridge/api/workflow_activation/workflow_activation.rb +56 -0
- data/lib/temporalio/internal/bridge/api/workflow_commands/workflow_commands.rb +57 -0
- data/lib/temporalio/internal/bridge/api/workflow_completion/workflow_completion.rb +30 -0
- data/lib/temporalio/internal/bridge/api.rb +3 -0
- data/lib/temporalio/internal/bridge/client.rb +95 -0
- data/lib/temporalio/internal/bridge/runtime.rb +53 -0
- data/lib/temporalio/internal/bridge/testing.rb +66 -0
- data/lib/temporalio/internal/bridge/worker.rb +85 -0
- data/lib/temporalio/internal/bridge.rb +36 -0
- data/lib/temporalio/internal/client/implementation.rb +700 -0
- data/lib/temporalio/internal/metric.rb +122 -0
- data/lib/temporalio/internal/proto_utils.rb +133 -0
- data/lib/temporalio/internal/worker/activity_worker.rb +376 -0
- data/lib/temporalio/internal/worker/multi_runner.rb +213 -0
- data/lib/temporalio/internal/worker/workflow_instance/child_workflow_handle.rb +54 -0
- data/lib/temporalio/internal/worker/workflow_instance/context.rb +333 -0
- data/lib/temporalio/internal/worker/workflow_instance/details.rb +44 -0
- data/lib/temporalio/internal/worker/workflow_instance/external_workflow_handle.rb +32 -0
- data/lib/temporalio/internal/worker/workflow_instance/externally_immutable_hash.rb +22 -0
- data/lib/temporalio/internal/worker/workflow_instance/handler_execution.rb +25 -0
- data/lib/temporalio/internal/worker/workflow_instance/handler_hash.rb +41 -0
- data/lib/temporalio/internal/worker/workflow_instance/illegal_call_tracer.rb +97 -0
- data/lib/temporalio/internal/worker/workflow_instance/inbound_implementation.rb +62 -0
- data/lib/temporalio/internal/worker/workflow_instance/outbound_implementation.rb +415 -0
- data/lib/temporalio/internal/worker/workflow_instance/replay_safe_logger.rb +37 -0
- data/lib/temporalio/internal/worker/workflow_instance/replay_safe_metric.rb +40 -0
- data/lib/temporalio/internal/worker/workflow_instance/scheduler.rb +163 -0
- data/lib/temporalio/internal/worker/workflow_instance.rb +730 -0
- data/lib/temporalio/internal/worker/workflow_worker.rb +236 -0
- data/lib/temporalio/internal.rb +7 -0
- data/lib/temporalio/metric.rb +109 -0
- data/lib/temporalio/retry_policy.rb +74 -0
- data/lib/temporalio/runtime.rb +314 -0
- data/lib/temporalio/scoped_logger.rb +96 -0
- data/lib/temporalio/search_attributes.rb +343 -0
- data/lib/temporalio/testing/activity_environment.rb +136 -0
- data/lib/temporalio/testing/workflow_environment.rb +383 -0
- data/lib/temporalio/testing.rb +10 -0
- data/lib/temporalio/version.rb +5 -0
- data/lib/temporalio/worker/activity_executor/fiber.rb +49 -0
- data/lib/temporalio/worker/activity_executor/thread_pool.rb +46 -0
- data/lib/temporalio/worker/activity_executor.rb +55 -0
- data/lib/temporalio/worker/interceptor.rb +362 -0
- data/lib/temporalio/worker/thread_pool.rb +237 -0
- data/lib/temporalio/worker/tuner.rb +189 -0
- data/lib/temporalio/worker/workflow_executor/thread_pool.rb +230 -0
- data/lib/temporalio/worker/workflow_executor.rb +26 -0
- data/lib/temporalio/worker/workflow_replayer.rb +343 -0
- data/lib/temporalio/worker.rb +569 -0
- data/lib/temporalio/workflow/activity_cancellation_type.rb +20 -0
- data/lib/temporalio/workflow/child_workflow_cancellation_type.rb +21 -0
- data/lib/temporalio/workflow/child_workflow_handle.rb +43 -0
- data/lib/temporalio/workflow/definition.rb +566 -0
- data/lib/temporalio/workflow/external_workflow_handle.rb +41 -0
- data/lib/temporalio/workflow/future.rb +151 -0
- data/lib/temporalio/workflow/handler_unfinished_policy.rb +13 -0
- data/lib/temporalio/workflow/info.rb +82 -0
- data/lib/temporalio/workflow/parent_close_policy.rb +19 -0
- data/lib/temporalio/workflow/update_info.rb +20 -0
- data/lib/temporalio/workflow.rb +529 -0
- data/lib/temporalio/workflow_history.rb +47 -0
- data/lib/temporalio.rb +11 -0
- data/temporalio.gemspec +28 -0
- metadata +238 -0
@@ -0,0 +1,569 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'temporalio/activity'
|
4
|
+
require 'temporalio/cancellation'
|
5
|
+
require 'temporalio/client'
|
6
|
+
require 'temporalio/error'
|
7
|
+
require 'temporalio/internal/bridge'
|
8
|
+
require 'temporalio/internal/bridge/worker'
|
9
|
+
require 'temporalio/internal/worker/activity_worker'
|
10
|
+
require 'temporalio/internal/worker/multi_runner'
|
11
|
+
require 'temporalio/internal/worker/workflow_instance'
|
12
|
+
require 'temporalio/internal/worker/workflow_worker'
|
13
|
+
require 'temporalio/worker/activity_executor'
|
14
|
+
require 'temporalio/worker/interceptor'
|
15
|
+
require 'temporalio/worker/thread_pool'
|
16
|
+
require 'temporalio/worker/tuner'
|
17
|
+
require 'temporalio/worker/workflow_executor'
|
18
|
+
|
19
|
+
module Temporalio
|
20
|
+
# Worker for processing activities and workflows on a task queue.
|
21
|
+
#
|
22
|
+
# Workers are created for a task queue and the items they can run. Then {run} is used for running a single worker, or
|
23
|
+
# {run_all} is used for a collection of workers. These can wait until a block is complete or a {Cancellation} is
|
24
|
+
# canceled.
|
25
|
+
class Worker
|
26
|
+
Options = Data.define(
|
27
|
+
:client,
|
28
|
+
:task_queue,
|
29
|
+
:activities,
|
30
|
+
:workflows,
|
31
|
+
:tuner,
|
32
|
+
:activity_executors,
|
33
|
+
:workflow_executor,
|
34
|
+
:interceptors,
|
35
|
+
:build_id,
|
36
|
+
:identity,
|
37
|
+
:logger,
|
38
|
+
:max_cached_workflows,
|
39
|
+
:max_concurrent_workflow_task_polls,
|
40
|
+
:nonsticky_to_sticky_poll_ratio,
|
41
|
+
:max_concurrent_activity_task_polls,
|
42
|
+
:no_remote_activities,
|
43
|
+
:sticky_queue_schedule_to_start_timeout,
|
44
|
+
:max_heartbeat_throttle_interval,
|
45
|
+
:default_heartbeat_throttle_interval,
|
46
|
+
:max_activities_per_second,
|
47
|
+
:max_task_queue_activities_per_second,
|
48
|
+
:graceful_shutdown_period,
|
49
|
+
:use_worker_versioning,
|
50
|
+
:disable_eager_activity_execution,
|
51
|
+
:illegal_workflow_calls,
|
52
|
+
:workflow_failure_exception_types,
|
53
|
+
:workflow_payload_codec_thread_pool,
|
54
|
+
:debug_mode
|
55
|
+
)
|
56
|
+
|
57
|
+
# Options as returned from {options} for `**to_h` splat use in {initialize}. See {initialize} for details.
|
58
|
+
class Options; end # rubocop:disable Lint/EmptyClass
|
59
|
+
|
60
|
+
# @return [String] Memoized default build ID. This default value is built as a checksum of all of the loaded Ruby
|
61
|
+
# source files in `$LOADED_FEATURES`. Users may prefer to set the build ID to a better representation of the
|
62
|
+
# source.
|
63
|
+
def self.default_build_id
|
64
|
+
@default_build_id ||= _load_default_build_id
|
65
|
+
end
|
66
|
+
|
67
|
+
# @!visibility private
|
68
|
+
def self._load_default_build_id
|
69
|
+
# The goal is to get a hash of runtime code, both Temporal's and the
|
70
|
+
# user's. After all options were explored, we have decided to default to
|
71
|
+
# hashing all bytecode of required files. This means later/dynamic require
|
72
|
+
# won't be accounted for because this is memoized. It also means the
|
73
|
+
# tiniest code change will affect this, which is what we want since this
|
74
|
+
# is meant to be a "binary checksum". We have chosen to use MD5 for speed,
|
75
|
+
# similarity with other SDKs, and because security is not a factor.
|
76
|
+
require 'digest'
|
77
|
+
|
78
|
+
saw_bridge = false
|
79
|
+
build_id = $LOADED_FEATURES.each_with_object(Digest::MD5.new) do |file, digest|
|
80
|
+
saw_bridge = true if file.include?('temporalio_bridge.')
|
81
|
+
digest.update(File.read(file)) if File.file?(file)
|
82
|
+
end.hexdigest
|
83
|
+
raise 'Temporal bridge library not in $LOADED_FEATURES, unable to calculate default build ID' unless saw_bridge
|
84
|
+
|
85
|
+
build_id
|
86
|
+
end
|
87
|
+
|
88
|
+
# Run all workers until cancellation or optional block completes. When the cancellation or block is complete, the
|
89
|
+
# workers are shut down. This will return the block result if everything successful or raise an error if not. See
|
90
|
+
# {run} for details on how worker shutdown works.
|
91
|
+
#
|
92
|
+
# @param workers [Array<Worker>] Workers to run.
|
93
|
+
# @param cancellation [Cancellation] Cancellation that can be canceled to shut down all workers.
|
94
|
+
# @param shutdown_signals [Array] Signals to trap and cause worker shutdown.
|
95
|
+
# @param raise_in_block_on_shutdown [Exception, nil] Exception to {::Thread.raise} or {::Fiber.raise} if a block is
|
96
|
+
# present and still running on shutdown. If nil, `raise` is not used.
|
97
|
+
# @param wait_block_complete [Boolean] If block given and shutdown caused by something else (e.g. cancellation
|
98
|
+
# canceled), whether to wait on the block to complete before returning.
|
99
|
+
# @yield Optional block. This will be run in a new background thread or fiber. Workers will shut down upon
|
100
|
+
# completion of this and, assuming no other failures, return/bubble success/exception of the block.
|
101
|
+
# @return [Object] Return value of the block or nil of no block given.
|
102
|
+
def self.run_all(
|
103
|
+
*workers,
|
104
|
+
cancellation: Cancellation.new,
|
105
|
+
shutdown_signals: [],
|
106
|
+
raise_in_block_on_shutdown: Error::CanceledError.new('Workers finished'),
|
107
|
+
wait_block_complete: true,
|
108
|
+
&block
|
109
|
+
)
|
110
|
+
# Confirm there is at least one and they are all workers
|
111
|
+
raise ArgumentError, 'At least one worker required' if workers.empty?
|
112
|
+
raise ArgumentError, 'Not all parameters are workers' unless workers.all? { |w| w.is_a?(Worker) }
|
113
|
+
|
114
|
+
Internal::Bridge.assert_fiber_compatibility!
|
115
|
+
|
116
|
+
# Start the multi runner
|
117
|
+
runner = Internal::Worker::MultiRunner.new(workers:, shutdown_signals:)
|
118
|
+
|
119
|
+
# Apply block
|
120
|
+
runner.apply_thread_or_fiber_block(&block)
|
121
|
+
|
122
|
+
# Reuse first worker logger
|
123
|
+
logger = workers.first&.options&.logger or raise # Never nil
|
124
|
+
|
125
|
+
# On cancel, initiate shutdown
|
126
|
+
cancellation.add_cancel_callback do
|
127
|
+
logger.info('Cancel invoked, beginning worker shutdown')
|
128
|
+
runner.initiate_shutdown
|
129
|
+
end
|
130
|
+
|
131
|
+
# Poller loop, run until all pollers shut down
|
132
|
+
first_error = nil
|
133
|
+
block_result = nil
|
134
|
+
loop do
|
135
|
+
event = runner.next_event
|
136
|
+
# TODO(cretz): Consider improving performance instead of this case statement
|
137
|
+
case event
|
138
|
+
when Internal::Worker::MultiRunner::Event::PollSuccess
|
139
|
+
# Successful poll
|
140
|
+
event.worker #: Worker
|
141
|
+
._on_poll_bytes(runner, event.worker_type, event.bytes)
|
142
|
+
when Internal::Worker::MultiRunner::Event::PollFailure
|
143
|
+
# Poll failure, this causes shutdown of all workers
|
144
|
+
logger.error('Poll failure (beginning worker shutdown if not already occurring)')
|
145
|
+
logger.error(event.error)
|
146
|
+
first_error ||= event.error
|
147
|
+
runner.initiate_shutdown
|
148
|
+
when Internal::Worker::MultiRunner::Event::WorkflowActivationDecoded
|
149
|
+
# Came back from a codec as decoded
|
150
|
+
event.workflow_worker.handle_activation(runner:, activation: event.activation, decoded: true)
|
151
|
+
when Internal::Worker::MultiRunner::Event::WorkflowActivationComplete
|
152
|
+
# An activation is complete
|
153
|
+
event.workflow_worker.handle_activation_complete(
|
154
|
+
runner:,
|
155
|
+
activation_completion: event.activation_completion,
|
156
|
+
encoded: event.encoded,
|
157
|
+
completion_complete_queue: event.completion_complete_queue
|
158
|
+
)
|
159
|
+
when Internal::Worker::MultiRunner::Event::WorkflowActivationCompletionComplete
|
160
|
+
# Completion complete, only need to log error if it occurs here
|
161
|
+
if event.error
|
162
|
+
logger.error("Activation completion failed to record on run ID #{event.run_id}")
|
163
|
+
logger.error(event.error)
|
164
|
+
end
|
165
|
+
when Internal::Worker::MultiRunner::Event::PollerShutDown
|
166
|
+
# Individual poller shut down. Nothing to do here until we support
|
167
|
+
# worker status or something.
|
168
|
+
when Internal::Worker::MultiRunner::Event::AllPollersShutDown
|
169
|
+
# This is where we break the loop, no more polling can happen
|
170
|
+
break
|
171
|
+
when Internal::Worker::MultiRunner::Event::BlockSuccess
|
172
|
+
logger.info('Block completed, beginning worker shutdown')
|
173
|
+
block_result = event
|
174
|
+
runner.initiate_shutdown
|
175
|
+
when Internal::Worker::MultiRunner::Event::BlockFailure
|
176
|
+
logger.error('Block failure (beginning worker shutdown)')
|
177
|
+
logger.error(event.error)
|
178
|
+
block_result = event
|
179
|
+
first_error ||= event.error
|
180
|
+
runner.initiate_shutdown
|
181
|
+
when Internal::Worker::MultiRunner::Event::ShutdownSignalReceived
|
182
|
+
logger.info('Signal received, beginning worker shutdown')
|
183
|
+
runner.initiate_shutdown
|
184
|
+
else
|
185
|
+
raise "Unexpected event: #{event}"
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
# Now that all pollers have stopped, let's wait for all to complete
|
190
|
+
begin
|
191
|
+
runner.wait_complete_and_finalize_shutdown
|
192
|
+
rescue StandardError => e
|
193
|
+
logger.warn('Failed waiting and finalizing')
|
194
|
+
logger.warn(e)
|
195
|
+
end
|
196
|
+
|
197
|
+
# If there was a block but not a result yet, we want to raise if that is
|
198
|
+
# wanted, and wait if that is wanted
|
199
|
+
if block_given? && block_result.nil?
|
200
|
+
runner.raise_in_thread_or_fiber_block(raise_in_block_on_shutdown) unless raise_in_block_on_shutdown.nil?
|
201
|
+
if wait_block_complete
|
202
|
+
event = runner.next_event
|
203
|
+
case event
|
204
|
+
when Internal::Worker::MultiRunner::Event::BlockSuccess
|
205
|
+
logger.info('Block completed (after worker shutdown)')
|
206
|
+
block_result = event
|
207
|
+
when Internal::Worker::MultiRunner::Event::BlockFailure
|
208
|
+
logger.error('Block failure (after worker shutdown)')
|
209
|
+
logger.error(event.error)
|
210
|
+
block_result = event
|
211
|
+
first_error ||= event.error
|
212
|
+
when Internal::Worker::MultiRunner::Event::ShutdownSignalReceived
|
213
|
+
# Do nothing, waiting for block
|
214
|
+
else
|
215
|
+
raise "Unexpected event: #{event}"
|
216
|
+
end
|
217
|
+
end
|
218
|
+
end
|
219
|
+
|
220
|
+
# Notify each worker we're done with it
|
221
|
+
workers.each(&:_on_shutdown_complete)
|
222
|
+
|
223
|
+
# If there was an shutdown-causing error, we raise that
|
224
|
+
if !first_error.nil?
|
225
|
+
raise first_error
|
226
|
+
elsif block_result.is_a?(Internal::Worker::MultiRunner::Event::BlockSuccess)
|
227
|
+
block_result.result
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
231
|
+
# @return [Hash<String, [:all, Array<Symbol>]>] Default, immutable set illegal calls used for the
|
232
|
+
# `illegal_workflow_calls` worker option. See the documentation of that option for more details.
|
233
|
+
def self.default_illegal_workflow_calls
|
234
|
+
@default_illegal_workflow_calls ||= begin
|
235
|
+
hash = {
|
236
|
+
'BasicSocket' => :all,
|
237
|
+
'Date' => %i[initialize today],
|
238
|
+
'DateTime' => %i[initialize now],
|
239
|
+
'Dir' => :all,
|
240
|
+
'Fiber' => [:set_scheduler],
|
241
|
+
'File' => :all,
|
242
|
+
'FileTest' => :all,
|
243
|
+
'FileUtils' => :all,
|
244
|
+
'Find' => :all,
|
245
|
+
'GC' => :all,
|
246
|
+
'IO' => [
|
247
|
+
:read
|
248
|
+
# Intentionally leaving out write so puts will work. We don't want to add heavy logic replacing stdout or
|
249
|
+
# trying to derive whether it's file vs stdout write.
|
250
|
+
#:write
|
251
|
+
],
|
252
|
+
'Kernel' => %i[abort at_exit autoload autoload? eval exec exit fork gets load open rand readline readlines
|
253
|
+
spawn srand system test trap],
|
254
|
+
'Net::HTTP' => :all,
|
255
|
+
'Pathname' => :all,
|
256
|
+
# TODO(cretz): Investigate why clock_gettime called from Timeout thread affects this code at all. Stack trace
|
257
|
+
# test executing activities inside a timeout will fail if clock_gettime is blocked.
|
258
|
+
'Process' => %i[abort argv0 daemon detach exec exit exit! fork kill setpriority setproctitle setrlimit setsid
|
259
|
+
spawn times wait wait2 waitall warmup],
|
260
|
+
# TODO(cretz): Allow Ractor.current since exception formatting in error_highlight references it
|
261
|
+
# 'Ractor' => :all,
|
262
|
+
'Random::Base' => [:initialize],
|
263
|
+
'Resolv' => :all,
|
264
|
+
'SecureRandom' => :all,
|
265
|
+
'Signal' => :all,
|
266
|
+
'Socket' => :all,
|
267
|
+
'Tempfile' => :all,
|
268
|
+
'Thread' => %i[abort_on_exception= exit fork handle_interrupt ignore_deadlock= kill new pass
|
269
|
+
pending_interrupt? report_on_exception= start stop initialize join name= priority= raise run
|
270
|
+
terminate thread_variable_set wakeup],
|
271
|
+
'Time' => %i[initialize now]
|
272
|
+
} #: Hash[String, :all | Array[Symbol]]
|
273
|
+
hash.each_value(&:freeze)
|
274
|
+
hash.freeze
|
275
|
+
end
|
276
|
+
end
|
277
|
+
|
278
|
+
# @return [Options] Options for this worker which has the same attributes as {initialize}.
|
279
|
+
attr_reader :options
|
280
|
+
|
281
|
+
# Create a new worker. At least one activity or workflow must be present.
|
282
|
+
#
|
283
|
+
# @param client [Client] Client for this worker.
|
284
|
+
# @param task_queue [String] Task queue for this worker.
|
285
|
+
# @param activities [Array<Activity::Definition, Class<Activity::Definition>, Activity::Definition::Info>]
|
286
|
+
# Activities for this worker.
|
287
|
+
# @param workflows [Array<Class<Workflow::Definition>>] Workflows for this worker.
|
288
|
+
# @param tuner [Tuner] Tuner that controls the amount of concurrent activities/workflows that run at a time.
|
289
|
+
# @param activity_executors [Hash<Symbol, Worker::ActivityExecutor>] Executors that activities can run within.
|
290
|
+
# @param workflow_executor [WorkflowExecutor] Workflow executor that workflow tasks run within. This must be a
|
291
|
+
# {WorkflowExecutor::ThreadPool} currently.
|
292
|
+
# @param interceptors [Array<Interceptor::Activity, Interceptor::Workflow>] Interceptors specific to this worker.
|
293
|
+
# Note, interceptors set on the client that include the {Interceptor::Activity} or {Interceptor::Workflow} module
|
294
|
+
# are automatically included here, so no need to specify them again.
|
295
|
+
# @param build_id [String] Unique identifier for the current runtime. This is best set as a unique value
|
296
|
+
# representing all code and should change only when code does. This can be something like a git commit hash. If
|
297
|
+
# unset, default is hash of known Ruby code.
|
298
|
+
# @param identity [String, nil] Override the identity for this worker. If unset, client identity is used.
|
299
|
+
# @param logger [Logger] Logger to override client logger with. Default is the client logger.
|
300
|
+
# @param max_cached_workflows [Integer] Number of workflows held in cache for use by sticky task queue. If set to 0,
|
301
|
+
# workflow caching and sticky queuing are disabled.
|
302
|
+
# @param max_concurrent_workflow_task_polls [Integer] Maximum number of concurrent poll workflow task requests we
|
303
|
+
# will perform at a time on this worker's task queue.
|
304
|
+
# @param nonsticky_to_sticky_poll_ratio [Float] `max_concurrent_workflow_task_polls` * this number = the number of
|
305
|
+
# max pollers that will be allowed for the nonsticky queue when sticky tasks are enabled. If both defaults are
|
306
|
+
# used, the sticky queue will allow 4 max pollers while the nonsticky queue will allow one. The minimum for either
|
307
|
+
# poller is 1, so if `max_concurrent_workflow_task_polls` is 1 and sticky queues are enabled, there will be 2
|
308
|
+
# concurrent polls.
|
309
|
+
# @param max_concurrent_activity_task_polls [Integer] Maximum number of concurrent poll activity task requests we
|
310
|
+
# will perform at a time on this worker's task queue.
|
311
|
+
# @param no_remote_activities [Boolean] If true, this worker will only handle workflow tasks and local activities,
|
312
|
+
# it will not poll for activity tasks.
|
313
|
+
# @param sticky_queue_schedule_to_start_timeout [Float] How long a workflow task is allowed to sit on the sticky
|
314
|
+
# queue before it is timed out and moved to the non-sticky queue where it may be picked up by any worker.
|
315
|
+
# @param max_heartbeat_throttle_interval [Float] Longest interval for throttling activity heartbeats.
|
316
|
+
# @param default_heartbeat_throttle_interval [Float] Default interval for throttling activity heartbeats in case
|
317
|
+
# per-activity heartbeat timeout is unset. Otherwise, it's the per-activity heartbeat timeout * 0.8.
|
318
|
+
# @param max_activities_per_second [Float, nil] Limits the number of activities per second that this worker will
|
319
|
+
# process. The worker will not poll for new activities if by doing so it might receive and execute an activity
|
320
|
+
# which would cause it to exceed this limit.
|
321
|
+
# @param max_task_queue_activities_per_second [Float, nil] Sets the maximum number of activities per second the task
|
322
|
+
# queue will dispatch, controlled server-side. Note that this only takes effect upon an activity poll request. If
|
323
|
+
# multiple workers on the same queue have different values set, they will thrash with the last poller winning.
|
324
|
+
# @param graceful_shutdown_period [Float] Amount of time after shutdown is called that activities are given to
|
325
|
+
# complete before their tasks are canceled.
|
326
|
+
# @param use_worker_versioning [Boolean] If true, the `build_id` argument must be specified, and this worker opts
|
327
|
+
# into the worker versioning feature. This ensures it only receives workflow tasks for workflows which it claims
|
328
|
+
# to be compatible with. For more information, see https://docs.temporal.io/workers#worker-versioning.
|
329
|
+
# @param disable_eager_activity_execution [Boolean] If true, disables eager activity execution. Eager activity
|
330
|
+
# execution is an optimization on some servers that sends activities back to the same worker as the calling
|
331
|
+
# workflow if they can run there. This should be set to true for `max_task_queue_activities_per_second` to work
|
332
|
+
# and in a future version of this API may be implied as such (i.e. this setting will be ignored if that setting is
|
333
|
+
# set).
|
334
|
+
# @param illegal_workflow_calls [Hash<String, [:all, Array<Symbol>]>] Set of illegal workflow calls that are
|
335
|
+
# considered unsafe/non-deterministic and will raise if seen. The key of the hash is the fully qualified string
|
336
|
+
# class name (no leading `::`). The value is either `:all` which means any use of the class, or an array of
|
337
|
+
# symbols for methods on the class that cannot be used. The methods refer to either instance or class methods,
|
338
|
+
# there is no way to differentiate at this time.
|
339
|
+
# @param workflow_failure_exception_types [Array<Class<Exception>>] Workflow failure exception types. This is the
|
340
|
+
# set of exception types that, if a workflow-thrown exception extends, will cause the workflow/update to fail
|
341
|
+
# instead of suspending the workflow via task failure. These are applied in addition to the
|
342
|
+
# `workflow_failure_exception_type` on the workflow definition class itself. If {::Exception} is set, it
|
343
|
+
# effectively will fail a workflow/update in all user exception cases.
|
344
|
+
# @param workflow_payload_codec_thread_pool [ThreadPool, nil] Thread pool to run payload codec encode/decode within.
|
345
|
+
# This is required if a payload codec exists and the worker is not fiber based. Codecs can potentially block
|
346
|
+
# execution which is why they need to be run in the background.
|
347
|
+
# @param debug_mode [Boolean] If true, deadlock detection is disabled. Deadlock detection will fail workflow tasks
|
348
|
+
# if they block the thread for too long. This defaults to true if the `TEMPORAL_DEBUG` environment variable is
|
349
|
+
# `true` or `1`.
|
350
|
+
def initialize(
|
351
|
+
client:,
|
352
|
+
task_queue:,
|
353
|
+
activities: [],
|
354
|
+
workflows: [],
|
355
|
+
tuner: Tuner.create_fixed,
|
356
|
+
activity_executors: ActivityExecutor.defaults,
|
357
|
+
workflow_executor: WorkflowExecutor::ThreadPool.default,
|
358
|
+
interceptors: [],
|
359
|
+
build_id: Worker.default_build_id,
|
360
|
+
identity: nil,
|
361
|
+
logger: client.options.logger,
|
362
|
+
max_cached_workflows: 1000,
|
363
|
+
max_concurrent_workflow_task_polls: 5,
|
364
|
+
nonsticky_to_sticky_poll_ratio: 0.2,
|
365
|
+
max_concurrent_activity_task_polls: 5,
|
366
|
+
no_remote_activities: false,
|
367
|
+
sticky_queue_schedule_to_start_timeout: 10,
|
368
|
+
max_heartbeat_throttle_interval: 60,
|
369
|
+
default_heartbeat_throttle_interval: 30,
|
370
|
+
max_activities_per_second: nil,
|
371
|
+
max_task_queue_activities_per_second: nil,
|
372
|
+
graceful_shutdown_period: 0,
|
373
|
+
use_worker_versioning: false,
|
374
|
+
disable_eager_activity_execution: false,
|
375
|
+
illegal_workflow_calls: Worker.default_illegal_workflow_calls,
|
376
|
+
workflow_failure_exception_types: [],
|
377
|
+
workflow_payload_codec_thread_pool: nil,
|
378
|
+
debug_mode: %w[true 1].include?(ENV['TEMPORAL_DEBUG'].to_s.downcase)
|
379
|
+
)
|
380
|
+
raise ArgumentError, 'Must have at least one activity or workflow' if activities.empty? && workflows.empty?
|
381
|
+
|
382
|
+
@options = Options.new(
|
383
|
+
client:,
|
384
|
+
task_queue:,
|
385
|
+
activities:,
|
386
|
+
workflows:,
|
387
|
+
tuner:,
|
388
|
+
activity_executors:,
|
389
|
+
workflow_executor:,
|
390
|
+
interceptors:,
|
391
|
+
build_id:,
|
392
|
+
identity:,
|
393
|
+
logger:,
|
394
|
+
max_cached_workflows:,
|
395
|
+
max_concurrent_workflow_task_polls:,
|
396
|
+
nonsticky_to_sticky_poll_ratio:,
|
397
|
+
max_concurrent_activity_task_polls:,
|
398
|
+
no_remote_activities:,
|
399
|
+
sticky_queue_schedule_to_start_timeout:,
|
400
|
+
max_heartbeat_throttle_interval:,
|
401
|
+
default_heartbeat_throttle_interval:,
|
402
|
+
max_activities_per_second:,
|
403
|
+
max_task_queue_activities_per_second:,
|
404
|
+
graceful_shutdown_period:,
|
405
|
+
use_worker_versioning:,
|
406
|
+
disable_eager_activity_execution:,
|
407
|
+
illegal_workflow_calls:,
|
408
|
+
workflow_failure_exception_types:,
|
409
|
+
workflow_payload_codec_thread_pool:,
|
410
|
+
debug_mode:
|
411
|
+
).freeze
|
412
|
+
|
413
|
+
# Preload workflow definitions and some workflow settings for the bridge
|
414
|
+
workflow_definitions = Internal::Worker::WorkflowWorker.workflow_definitions(workflows)
|
415
|
+
nondeterminism_as_workflow_fail, nondeterminism_as_workflow_fail_for_types =
|
416
|
+
Internal::Worker::WorkflowWorker.bridge_workflow_failure_exception_type_options(
|
417
|
+
workflow_failure_exception_types:, workflow_definitions:
|
418
|
+
)
|
419
|
+
|
420
|
+
# Create the bridge worker
|
421
|
+
@bridge_worker = Internal::Bridge::Worker.new(
|
422
|
+
client.connection._core_client,
|
423
|
+
Internal::Bridge::Worker::Options.new(
|
424
|
+
activity: !activities.empty?,
|
425
|
+
workflow: !workflows.empty?,
|
426
|
+
namespace: client.namespace,
|
427
|
+
task_queue:,
|
428
|
+
tuner: tuner._to_bridge_options,
|
429
|
+
build_id:,
|
430
|
+
identity_override: identity,
|
431
|
+
max_cached_workflows:,
|
432
|
+
max_concurrent_workflow_task_polls:,
|
433
|
+
nonsticky_to_sticky_poll_ratio:,
|
434
|
+
max_concurrent_activity_task_polls:,
|
435
|
+
# For shutdown to work properly, we must disable remote activities
|
436
|
+
# ourselves if there are no activities
|
437
|
+
no_remote_activities: no_remote_activities || activities.empty?,
|
438
|
+
sticky_queue_schedule_to_start_timeout:,
|
439
|
+
max_heartbeat_throttle_interval:,
|
440
|
+
default_heartbeat_throttle_interval:,
|
441
|
+
max_worker_activities_per_second: max_activities_per_second,
|
442
|
+
max_task_queue_activities_per_second:,
|
443
|
+
graceful_shutdown_period:,
|
444
|
+
use_worker_versioning:,
|
445
|
+
nondeterminism_as_workflow_fail:,
|
446
|
+
nondeterminism_as_workflow_fail_for_types:
|
447
|
+
)
|
448
|
+
)
|
449
|
+
|
450
|
+
# Collect interceptors from client and params
|
451
|
+
@activity_interceptors = (client.options.interceptors + interceptors).select do |i|
|
452
|
+
i.is_a?(Interceptor::Activity)
|
453
|
+
end
|
454
|
+
@workflow_interceptors = (client.options.interceptors + interceptors).select do |i|
|
455
|
+
i.is_a?(Interceptor::Workflow)
|
456
|
+
end
|
457
|
+
|
458
|
+
# Cancellation for the whole worker
|
459
|
+
@worker_shutdown_cancellation = Cancellation.new
|
460
|
+
|
461
|
+
# Create workers
|
462
|
+
unless activities.empty?
|
463
|
+
@activity_worker = Internal::Worker::ActivityWorker.new(worker: self,
|
464
|
+
bridge_worker: @bridge_worker)
|
465
|
+
end
|
466
|
+
unless workflows.empty?
|
467
|
+
@workflow_worker = Internal::Worker::WorkflowWorker.new(
|
468
|
+
bridge_worker: @bridge_worker,
|
469
|
+
namespace: client.namespace,
|
470
|
+
task_queue:,
|
471
|
+
workflow_definitions:,
|
472
|
+
workflow_executor:,
|
473
|
+
logger:,
|
474
|
+
data_converter: client.data_converter,
|
475
|
+
metric_meter: client.connection.options.runtime.metric_meter,
|
476
|
+
workflow_interceptors: @workflow_interceptors,
|
477
|
+
disable_eager_activity_execution:,
|
478
|
+
illegal_workflow_calls:,
|
479
|
+
workflow_failure_exception_types:,
|
480
|
+
workflow_payload_codec_thread_pool:,
|
481
|
+
debug_mode:
|
482
|
+
)
|
483
|
+
end
|
484
|
+
|
485
|
+
# Validate worker
|
486
|
+
@bridge_worker.validate
|
487
|
+
end
|
488
|
+
|
489
|
+
# @return [String] Task queue set on the worker options.
|
490
|
+
def task_queue
|
491
|
+
@options.task_queue
|
492
|
+
end
|
493
|
+
|
494
|
+
# Run this worker until cancellation or optional block completes. When the cancellation or block is complete, the
|
495
|
+
# worker is shut down. This will return the block result if everything successful or raise an error if not.
|
496
|
+
#
|
497
|
+
# Upon shutdown (either via cancellation, block completion, or worker fatal error), the worker immediately stops
|
498
|
+
# accepting new work. Then, after an optional grace period, all activities are canceled. This call then waits for
|
499
|
+
# every activity and workflow task to complete before returning.
|
500
|
+
#
|
501
|
+
# @param cancellation [Cancellation] Cancellation that can be canceled to shut down this worker.
|
502
|
+
# @param shutdown_signals [Array] Signals to trap and cause worker shutdown.
|
503
|
+
# @param raise_in_block_on_shutdown [Exception, nil] Exception to {::Thread.raise} or {::Fiber.raise} if a block is
|
504
|
+
# present and still running on shutdown. If nil, `raise` is not used.
|
505
|
+
# @param wait_block_complete [Boolean] If block given and shutdown caused by something else (e.g. cancellation
|
506
|
+
# canceled), whether to wait on the block to complete before returning.
|
507
|
+
# @yield Optional block. This will be run in a new background thread or fiber. Worker will shut down upon completion
|
508
|
+
# of this and, assuming no other failures, return/bubble success/exception of the block.
|
509
|
+
# @return [Object] Return value of the block or nil of no block given.
|
510
|
+
def run(
|
511
|
+
cancellation: Cancellation.new,
|
512
|
+
shutdown_signals: [],
|
513
|
+
raise_in_block_on_shutdown: Error::CanceledError.new('Workers finished'),
|
514
|
+
wait_block_complete: true,
|
515
|
+
&block
|
516
|
+
)
|
517
|
+
Worker.run_all(self, cancellation:, shutdown_signals:, raise_in_block_on_shutdown:, wait_block_complete:, &block)
|
518
|
+
end
|
519
|
+
|
520
|
+
# @!visibility private
|
521
|
+
def _worker_shutdown_cancellation
|
522
|
+
@worker_shutdown_cancellation
|
523
|
+
end
|
524
|
+
|
525
|
+
# @!visibility private
|
526
|
+
def _initiate_shutdown
|
527
|
+
_bridge_worker.initiate_shutdown
|
528
|
+
_, cancel_proc = _worker_shutdown_cancellation
|
529
|
+
cancel_proc.call
|
530
|
+
end
|
531
|
+
|
532
|
+
# @!visibility private
|
533
|
+
def _wait_all_complete
|
534
|
+
@activity_worker&.wait_all_complete
|
535
|
+
end
|
536
|
+
|
537
|
+
# @!visibility private
|
538
|
+
def _bridge_worker
|
539
|
+
@bridge_worker
|
540
|
+
end
|
541
|
+
|
542
|
+
# @!visibility private
|
543
|
+
def _activity_interceptors
|
544
|
+
@activity_interceptors
|
545
|
+
end
|
546
|
+
|
547
|
+
# @!visibility private
|
548
|
+
def _on_poll_bytes(runner, worker_type, bytes)
|
549
|
+
case worker_type
|
550
|
+
when :activity
|
551
|
+
@activity_worker.handle_task(Internal::Bridge::Api::ActivityTask::ActivityTask.decode(bytes))
|
552
|
+
when :workflow
|
553
|
+
@workflow_worker.handle_activation(
|
554
|
+
runner:,
|
555
|
+
activation: Internal::Bridge::Api::WorkflowActivation::WorkflowActivation.decode(bytes),
|
556
|
+
decoded: false
|
557
|
+
)
|
558
|
+
else
|
559
|
+
raise "Unrecognized worker type #{worker_type}"
|
560
|
+
end
|
561
|
+
end
|
562
|
+
|
563
|
+
# @!visibility private
|
564
|
+
def _on_shutdown_complete
|
565
|
+
@workflow_worker&.on_shutdown_complete
|
566
|
+
@workflow_worker = nil
|
567
|
+
end
|
568
|
+
end
|
569
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'temporalio/internal/bridge/api'
|
4
|
+
|
5
|
+
module Temporalio
|
6
|
+
module Workflow
|
7
|
+
# Cancellation types for activities.
|
8
|
+
module ActivityCancellationType
|
9
|
+
# Initiate a cancellation request and immediately report cancellation to the workflow.
|
10
|
+
TRY_CANCEL = Internal::Bridge::Api::WorkflowCommands::ActivityCancellationType::TRY_CANCEL
|
11
|
+
# Wait for activity cancellation completion. Note that activity must heartbeat to receive a cancellation
|
12
|
+
# notification. This can block the cancellation for a long time if activity doesn't heartbeat or chooses to ignore
|
13
|
+
# the cancellation request.
|
14
|
+
WAIT_CANCELLATION_COMPLETED =
|
15
|
+
Internal::Bridge::Api::WorkflowCommands::ActivityCancellationType::WAIT_CANCELLATION_COMPLETED
|
16
|
+
# Do not request cancellation of the activity and immediately report cancellation to the workflow.
|
17
|
+
ABANDON = Internal::Bridge::Api::WorkflowCommands::ActivityCancellationType::ABANDON
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'temporalio/internal/bridge/api'
|
4
|
+
|
5
|
+
module Temporalio
|
6
|
+
module Workflow
|
7
|
+
# Cancellation types for child workflows.
|
8
|
+
module ChildWorkflowCancellationType
|
9
|
+
# Do not request cancellation of the child workflow if already scheduled.
|
10
|
+
ABANDON = Internal::Bridge::Api::ChildWorkflow::ChildWorkflowCancellationType::ABANDON
|
11
|
+
# Initiate a cancellation request and immediately report cancellation to the parent.
|
12
|
+
TRY_CANCEL = Internal::Bridge::Api::ChildWorkflow::ChildWorkflowCancellationType::TRY_CANCEL
|
13
|
+
# Wait for child cancellation completion.
|
14
|
+
WAIT_CANCELLATION_COMPLETED =
|
15
|
+
Internal::Bridge::Api::ChildWorkflow::ChildWorkflowCancellationType::WAIT_CANCELLATION_COMPLETED
|
16
|
+
# Request cancellation of the child and wait for confirmation that the request was received.
|
17
|
+
WAIT_CANCELLATION_REQUESTED =
|
18
|
+
Internal::Bridge::Api::ChildWorkflow::ChildWorkflowCancellationType::WAIT_CANCELLATION_REQUESTED
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Temporalio
|
4
|
+
module Workflow
|
5
|
+
# Handle for interacting with a child workflow.
|
6
|
+
#
|
7
|
+
# This is created via {Workflow.start_child_workflow}, it is never instantiated directly.
|
8
|
+
class ChildWorkflowHandle
|
9
|
+
# @!visibility private
|
10
|
+
def initialize
|
11
|
+
raise NotImplementedError, 'Cannot instantiate a child handle directly'
|
12
|
+
end
|
13
|
+
|
14
|
+
# @return [String] ID for the workflow.
|
15
|
+
def id
|
16
|
+
raise NotImplementedError
|
17
|
+
end
|
18
|
+
|
19
|
+
# @return [String] Run ID for the workflow.
|
20
|
+
def first_execution_run_id
|
21
|
+
raise NotImplementedError
|
22
|
+
end
|
23
|
+
|
24
|
+
# Wait for the result.
|
25
|
+
#
|
26
|
+
# @return [Object] Result of the child workflow.
|
27
|
+
#
|
28
|
+
# @raise [Error::ChildWorkflowError] Workflow failed with +cause+ as the cause.
|
29
|
+
def result
|
30
|
+
raise NotImplementedError
|
31
|
+
end
|
32
|
+
|
33
|
+
# Signal the child workflow.
|
34
|
+
#
|
35
|
+
# @param signal [Workflow::Definition::Signal, Symbol, String] Signal definition or name.
|
36
|
+
# @param args [Array<Object>] Signal args.
|
37
|
+
# @param cancellation [Cancellation] Cancellation for canceling the signalling.
|
38
|
+
def signal(signal, *args, cancellation: Workflow.cancellation)
|
39
|
+
raise NotImplementedError
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|