temporalio 0.2.0 → 0.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.yardopts +2 -0
- data/Cargo.lock +659 -370
- data/Cargo.toml +2 -2
- data/Gemfile +3 -3
- data/README.md +589 -47
- data/Rakefile +10 -296
- data/ext/Cargo.toml +1 -0
- data/lib/temporalio/activity/complete_async_error.rb +1 -1
- data/lib/temporalio/activity/context.rb +5 -2
- data/lib/temporalio/activity/definition.rb +163 -65
- data/lib/temporalio/activity/info.rb +22 -21
- data/lib/temporalio/activity.rb +2 -59
- data/lib/temporalio/api/activity/v1/message.rb +25 -0
- data/lib/temporalio/api/cloud/account/v1/message.rb +28 -0
- data/lib/temporalio/api/cloud/cloudservice/v1/request_response.rb +34 -1
- data/lib/temporalio/api/cloud/cloudservice/v1/service.rb +1 -1
- data/lib/temporalio/api/cloud/identity/v1/message.rb +6 -1
- data/lib/temporalio/api/cloud/namespace/v1/message.rb +8 -1
- data/lib/temporalio/api/cloud/nexus/v1/message.rb +31 -0
- data/lib/temporalio/api/cloud/operation/v1/message.rb +2 -1
- data/lib/temporalio/api/cloud/region/v1/message.rb +2 -1
- data/lib/temporalio/api/cloud/resource/v1/message.rb +23 -0
- data/lib/temporalio/api/cloud/sink/v1/message.rb +24 -0
- data/lib/temporalio/api/cloud/usage/v1/message.rb +31 -0
- data/lib/temporalio/api/common/v1/message.rb +7 -1
- data/lib/temporalio/api/enums/v1/event_type.rb +1 -1
- data/lib/temporalio/api/enums/v1/failed_cause.rb +1 -1
- data/lib/temporalio/api/enums/v1/reset.rb +1 -1
- data/lib/temporalio/api/history/v1/message.rb +1 -1
- data/lib/temporalio/api/nexus/v1/message.rb +2 -2
- data/lib/temporalio/api/operatorservice/v1/service.rb +1 -1
- data/lib/temporalio/api/payload_visitor.rb +1513 -0
- data/lib/temporalio/api/schedule/v1/message.rb +2 -1
- data/lib/temporalio/api/testservice/v1/request_response.rb +31 -0
- data/lib/temporalio/api/testservice/v1/service.rb +23 -0
- data/lib/temporalio/api/workflow/v1/message.rb +1 -1
- data/lib/temporalio/api/workflowservice/v1/request_response.rb +17 -2
- data/lib/temporalio/api/workflowservice/v1/service.rb +1 -1
- data/lib/temporalio/api.rb +1 -0
- data/lib/temporalio/cancellation.rb +34 -14
- data/lib/temporalio/client/async_activity_handle.rb +12 -37
- data/lib/temporalio/client/connection/cloud_service.rb +309 -231
- data/lib/temporalio/client/connection/operator_service.rb +36 -84
- data/lib/temporalio/client/connection/service.rb +6 -5
- data/lib/temporalio/client/connection/test_service.rb +111 -0
- data/lib/temporalio/client/connection/workflow_service.rb +264 -441
- data/lib/temporalio/client/connection.rb +90 -44
- data/lib/temporalio/client/interceptor.rb +160 -60
- data/lib/temporalio/client/schedule.rb +967 -0
- data/lib/temporalio/client/schedule_handle.rb +126 -0
- data/lib/temporalio/client/workflow_execution.rb +7 -10
- data/lib/temporalio/client/workflow_handle.rb +38 -95
- data/lib/temporalio/client/workflow_update_handle.rb +3 -5
- data/lib/temporalio/client.rb +122 -42
- data/lib/temporalio/common_enums.rb +17 -0
- data/lib/temporalio/converters/data_converter.rb +4 -7
- data/lib/temporalio/converters/failure_converter.rb +5 -3
- data/lib/temporalio/converters/payload_converter/composite.rb +4 -0
- data/lib/temporalio/converters/payload_converter.rb +6 -8
- data/lib/temporalio/converters/raw_value.rb +20 -0
- data/lib/temporalio/error/failure.rb +1 -1
- data/lib/temporalio/error.rb +10 -2
- data/lib/temporalio/internal/bridge/api/core_interface.rb +5 -1
- data/lib/temporalio/internal/bridge/api/nexus/nexus.rb +33 -0
- data/lib/temporalio/internal/bridge/api/workflow_activation/workflow_activation.rb +5 -1
- data/lib/temporalio/internal/bridge/api/workflow_commands/workflow_commands.rb +4 -1
- data/lib/temporalio/internal/bridge/client.rb +11 -6
- data/lib/temporalio/internal/bridge/testing.rb +20 -0
- data/lib/temporalio/internal/bridge/worker.rb +2 -0
- data/lib/temporalio/internal/bridge.rb +1 -1
- data/lib/temporalio/internal/client/implementation.rb +245 -70
- data/lib/temporalio/internal/metric.rb +122 -0
- data/lib/temporalio/internal/proto_utils.rb +86 -7
- data/lib/temporalio/internal/worker/activity_worker.rb +52 -24
- data/lib/temporalio/internal/worker/multi_runner.rb +51 -7
- data/lib/temporalio/internal/worker/workflow_instance/child_workflow_handle.rb +54 -0
- data/lib/temporalio/internal/worker/workflow_instance/context.rb +329 -0
- data/lib/temporalio/internal/worker/workflow_instance/details.rb +44 -0
- data/lib/temporalio/internal/worker/workflow_instance/external_workflow_handle.rb +32 -0
- data/lib/temporalio/internal/worker/workflow_instance/externally_immutable_hash.rb +22 -0
- data/lib/temporalio/internal/worker/workflow_instance/handler_execution.rb +25 -0
- data/lib/temporalio/internal/worker/workflow_instance/handler_hash.rb +41 -0
- data/lib/temporalio/internal/worker/workflow_instance/illegal_call_tracer.rb +97 -0
- data/lib/temporalio/internal/worker/workflow_instance/inbound_implementation.rb +62 -0
- data/lib/temporalio/internal/worker/workflow_instance/outbound_implementation.rb +415 -0
- data/lib/temporalio/internal/worker/workflow_instance/replay_safe_logger.rb +37 -0
- data/lib/temporalio/internal/worker/workflow_instance/replay_safe_metric.rb +40 -0
- data/lib/temporalio/internal/worker/workflow_instance/scheduler.rb +163 -0
- data/lib/temporalio/internal/worker/workflow_instance.rb +730 -0
- data/lib/temporalio/internal/worker/workflow_worker.rb +196 -0
- data/lib/temporalio/metric.rb +109 -0
- data/lib/temporalio/retry_policy.rb +37 -14
- data/lib/temporalio/runtime.rb +118 -75
- data/lib/temporalio/search_attributes.rb +80 -37
- data/lib/temporalio/testing/activity_environment.rb +2 -2
- data/lib/temporalio/testing/workflow_environment.rb +251 -5
- data/lib/temporalio/version.rb +1 -1
- data/lib/temporalio/worker/activity_executor/thread_pool.rb +9 -217
- data/lib/temporalio/worker/activity_executor.rb +3 -3
- data/lib/temporalio/worker/interceptor.rb +340 -66
- data/lib/temporalio/worker/thread_pool.rb +237 -0
- data/lib/temporalio/worker/workflow_executor/thread_pool.rb +230 -0
- data/lib/temporalio/worker/workflow_executor.rb +26 -0
- data/lib/temporalio/worker.rb +201 -30
- data/lib/temporalio/workflow/activity_cancellation_type.rb +20 -0
- data/lib/temporalio/workflow/child_workflow_cancellation_type.rb +21 -0
- data/lib/temporalio/workflow/child_workflow_handle.rb +43 -0
- data/lib/temporalio/workflow/definition.rb +566 -0
- data/lib/temporalio/workflow/external_workflow_handle.rb +41 -0
- data/lib/temporalio/workflow/future.rb +151 -0
- data/lib/temporalio/workflow/handler_unfinished_policy.rb +13 -0
- data/lib/temporalio/workflow/info.rb +82 -0
- data/lib/temporalio/workflow/parent_close_policy.rb +19 -0
- data/lib/temporalio/workflow/update_info.rb +20 -0
- data/lib/temporalio/workflow.rb +523 -0
- data/lib/temporalio.rb +4 -0
- data/temporalio.gemspec +2 -2
- metadata +50 -8
@@ -0,0 +1,730 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'json'
|
4
|
+
require 'temporalio'
|
5
|
+
require 'temporalio/activity/definition'
|
6
|
+
require 'temporalio/api'
|
7
|
+
require 'temporalio/converters/raw_value'
|
8
|
+
require 'temporalio/error'
|
9
|
+
require 'temporalio/internal/bridge/api'
|
10
|
+
require 'temporalio/internal/proto_utils'
|
11
|
+
require 'temporalio/internal/worker/workflow_instance/child_workflow_handle'
|
12
|
+
require 'temporalio/internal/worker/workflow_instance/context'
|
13
|
+
require 'temporalio/internal/worker/workflow_instance/details'
|
14
|
+
require 'temporalio/internal/worker/workflow_instance/externally_immutable_hash'
|
15
|
+
require 'temporalio/internal/worker/workflow_instance/handler_execution'
|
16
|
+
require 'temporalio/internal/worker/workflow_instance/handler_hash'
|
17
|
+
require 'temporalio/internal/worker/workflow_instance/illegal_call_tracer'
|
18
|
+
require 'temporalio/internal/worker/workflow_instance/inbound_implementation'
|
19
|
+
require 'temporalio/internal/worker/workflow_instance/outbound_implementation'
|
20
|
+
require 'temporalio/internal/worker/workflow_instance/replay_safe_logger'
|
21
|
+
require 'temporalio/internal/worker/workflow_instance/replay_safe_metric'
|
22
|
+
require 'temporalio/internal/worker/workflow_instance/scheduler'
|
23
|
+
require 'temporalio/retry_policy'
|
24
|
+
require 'temporalio/scoped_logger'
|
25
|
+
require 'temporalio/worker/interceptor'
|
26
|
+
require 'temporalio/workflow/info'
|
27
|
+
require 'temporalio/workflow/update_info'
|
28
|
+
require 'timeout'
|
29
|
+
|
30
|
+
module Temporalio
|
31
|
+
module Internal
|
32
|
+
module Worker
|
33
|
+
# Instance of a user workflow. This is the instance with all state needed to run the workflow and is expected to
|
34
|
+
# be cached by the worker for sticky execution.
|
35
|
+
class WorkflowInstance
|
36
|
+
def self.new_completion_with_failure(run_id:, error:, failure_converter:, payload_converter:)
|
37
|
+
Bridge::Api::WorkflowCompletion::WorkflowActivationCompletion.new(
|
38
|
+
run_id: run_id,
|
39
|
+
failed: Bridge::Api::WorkflowCompletion::Failure.new(
|
40
|
+
failure: begin
|
41
|
+
failure_converter.to_failure(error, payload_converter)
|
42
|
+
rescue Exception => e # rubocop:disable Lint/RescueException
|
43
|
+
Api::Failure::V1::Failure.new(
|
44
|
+
message: "Failed converting error to failure: #{e.message}, " \
|
45
|
+
"original error message: #{error.message}",
|
46
|
+
application_failure_info: Api::Failure::V1::ApplicationFailureInfo.new
|
47
|
+
)
|
48
|
+
end
|
49
|
+
)
|
50
|
+
)
|
51
|
+
end
|
52
|
+
|
53
|
+
attr_reader :context, :logger, :info, :scheduler, :disable_eager_activity_execution, :pending_activities,
|
54
|
+
:pending_timers, :pending_child_workflow_starts, :pending_child_workflows,
|
55
|
+
:pending_external_signals, :pending_external_cancels, :in_progress_handlers, :payload_converter,
|
56
|
+
:failure_converter, :cancellation, :continue_as_new_suggested, :current_history_length,
|
57
|
+
:current_history_size, :replaying, :random, :signal_handlers, :query_handlers, :update_handlers,
|
58
|
+
:context_frozen
|
59
|
+
|
60
|
+
def initialize(details)
|
61
|
+
# Initialize general state
|
62
|
+
@context = Context.new(self)
|
63
|
+
if details.illegal_calls && !details.illegal_calls.empty?
|
64
|
+
@tracer = IllegalCallTracer.new(details.illegal_calls)
|
65
|
+
end
|
66
|
+
@logger = ReplaySafeLogger.new(logger: details.logger, instance: self)
|
67
|
+
@logger.scoped_values_getter = proc { scoped_logger_info }
|
68
|
+
@runtime_metric_meter = details.metric_meter
|
69
|
+
@scheduler = Scheduler.new(self)
|
70
|
+
@payload_converter = details.payload_converter
|
71
|
+
@failure_converter = details.failure_converter
|
72
|
+
@disable_eager_activity_execution = details.disable_eager_activity_execution
|
73
|
+
@pending_activities = {} # Keyed by sequence, value is fiber to resume with proto result
|
74
|
+
@pending_timers = {} # Keyed by sequence, value is fiber to resume with proto result
|
75
|
+
@pending_child_workflow_starts = {} # Keyed by sequence, value is fiber to resume with proto result
|
76
|
+
@pending_child_workflows = {} # Keyed by sequence, value is ChildWorkflowHandle to resolve with proto result
|
77
|
+
@pending_external_signals = {} # Keyed by sequence, value is fiber to resume with proto result
|
78
|
+
@pending_external_cancels = {} # Keyed by sequence, value is fiber to resume with proto result
|
79
|
+
@buffered_signals = {} # Keyed by signal name, value is array of signal jobs
|
80
|
+
# TODO(cretz): Should these be sets instead? Both should be fairly low counts.
|
81
|
+
@in_progress_handlers = [] # Value is HandlerExecution
|
82
|
+
@patches_notified = []
|
83
|
+
@definition = details.definition
|
84
|
+
@interceptors = details.interceptors
|
85
|
+
@cancellation, @cancellation_proc = Cancellation.new
|
86
|
+
@continue_as_new_suggested = false
|
87
|
+
@current_history_length = 0
|
88
|
+
@current_history_size = 0
|
89
|
+
@replaying = false
|
90
|
+
@failure_exception_types = details.workflow_failure_exception_types + @definition.failure_exception_types
|
91
|
+
@signal_handlers = HandlerHash.new(
|
92
|
+
details.definition.signals,
|
93
|
+
Workflow::Definition::Signal
|
94
|
+
) do |defn|
|
95
|
+
# New definition, drain buffer. If it's dynamic (i.e. no name) drain them all.
|
96
|
+
to_drain = if defn.name.nil?
|
97
|
+
all_signals = @buffered_signals.values.flatten
|
98
|
+
@buffered_signals.clear
|
99
|
+
all_signals
|
100
|
+
else
|
101
|
+
@buffered_signals.delete(defn.name)
|
102
|
+
end
|
103
|
+
to_drain&.each { |job| apply_signal(job) }
|
104
|
+
end
|
105
|
+
@query_handlers = HandlerHash.new(details.definition.queries, Workflow::Definition::Query)
|
106
|
+
@update_handlers = HandlerHash.new(details.definition.updates, Workflow::Definition::Update)
|
107
|
+
|
108
|
+
# Create all things needed from initial job
|
109
|
+
@init_job = details.initial_activation.jobs.find { |j| !j.initialize_workflow.nil? }&.initialize_workflow
|
110
|
+
raise 'Missing init job from first activation' unless @init_job
|
111
|
+
|
112
|
+
illegal_call_tracing_disabled do
|
113
|
+
@info = Workflow::Info.new(
|
114
|
+
attempt: @init_job.attempt,
|
115
|
+
continued_run_id: ProtoUtils.string_or(@init_job.continued_from_execution_run_id),
|
116
|
+
cron_schedule: ProtoUtils.string_or(@init_job.cron_schedule),
|
117
|
+
execution_timeout: ProtoUtils.duration_to_seconds(@init_job.workflow_execution_timeout),
|
118
|
+
last_failure: if @init_job.continued_failure
|
119
|
+
@failure_converter.from_failure(@init_job.continued_failure, @payload_converter)
|
120
|
+
end,
|
121
|
+
last_result: if @init_job.last_completion_result
|
122
|
+
@payload_converter.from_payloads(@init_job.last_completion_result).first
|
123
|
+
end,
|
124
|
+
namespace: details.namespace,
|
125
|
+
parent: if @init_job.parent_workflow_info
|
126
|
+
Workflow::Info::ParentInfo.new(
|
127
|
+
namespace: @init_job.parent_workflow_info.namespace,
|
128
|
+
run_id: @init_job.parent_workflow_info.run_id,
|
129
|
+
workflow_id: @init_job.parent_workflow_info.workflow_id
|
130
|
+
)
|
131
|
+
end,
|
132
|
+
retry_policy: (RetryPolicy._from_proto(@init_job.retry_policy) if @init_job.retry_policy),
|
133
|
+
run_id: details.initial_activation.run_id,
|
134
|
+
run_timeout: ProtoUtils.duration_to_seconds(@init_job.workflow_run_timeout),
|
135
|
+
start_time: ProtoUtils.timestamp_to_time(details.initial_activation.timestamp) || raise,
|
136
|
+
task_queue: details.task_queue,
|
137
|
+
task_timeout: ProtoUtils.duration_to_seconds(@init_job.workflow_task_timeout) || raise,
|
138
|
+
workflow_id: @init_job.workflow_id,
|
139
|
+
workflow_type: @init_job.workflow_type
|
140
|
+
).freeze
|
141
|
+
|
142
|
+
@random = Random.new(@init_job.randomness_seed)
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
def activate(activation)
|
147
|
+
# Run inside of scheduler
|
148
|
+
run_in_scheduler { activate_internal(activation) }
|
149
|
+
end
|
150
|
+
|
151
|
+
def add_command(command)
|
152
|
+
raise Workflow::InvalidWorkflowStateError, 'Cannot add commands in this context' if @context_frozen
|
153
|
+
|
154
|
+
@commands << command
|
155
|
+
end
|
156
|
+
|
157
|
+
def instance
|
158
|
+
@instance or raise 'Instance accessed before created'
|
159
|
+
end
|
160
|
+
|
161
|
+
def search_attributes
|
162
|
+
# Lazy on first access
|
163
|
+
@search_attributes ||= SearchAttributes._from_proto(
|
164
|
+
@init_job.search_attributes, disable_mutations: true, never_nil: true
|
165
|
+
) || raise
|
166
|
+
end
|
167
|
+
|
168
|
+
def memo
|
169
|
+
# Lazy on first access
|
170
|
+
@memo ||= ExternallyImmutableHash.new(ProtoUtils.memo_from_proto(@init_job.memo, payload_converter) || {})
|
171
|
+
end
|
172
|
+
|
173
|
+
def now
|
174
|
+
# Create each time
|
175
|
+
ProtoUtils.timestamp_to_time(@now_timestamp) or raise 'Time unexpectedly not present'
|
176
|
+
end
|
177
|
+
|
178
|
+
def illegal_call_tracing_disabled(&)
|
179
|
+
@tracer.disable(&)
|
180
|
+
end
|
181
|
+
|
182
|
+
def patch(patch_id:, deprecated:)
|
183
|
+
# Use memoized result if present. If this is being deprecated, we can still use memoized result and skip the
|
184
|
+
# command.
|
185
|
+
patch_id = patch_id.to_s
|
186
|
+
@patches_memoized ||= {}
|
187
|
+
@patches_memoized.fetch(patch_id) do
|
188
|
+
patched = !replaying || @patches_notified.include?(patch_id)
|
189
|
+
@patches_memoized[patch_id] = patched
|
190
|
+
if patched
|
191
|
+
add_command(
|
192
|
+
Bridge::Api::WorkflowCommands::WorkflowCommand.new(
|
193
|
+
set_patch_marker: Bridge::Api::WorkflowCommands::SetPatchMarker.new(patch_id:, deprecated:)
|
194
|
+
)
|
195
|
+
)
|
196
|
+
end
|
197
|
+
patched
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
def metric_meter
|
202
|
+
@metric_meter ||= ReplaySafeMetric::Meter.new(
|
203
|
+
@runtime_metric_meter.with_additional_attributes(
|
204
|
+
{
|
205
|
+
namespace: info.namespace,
|
206
|
+
task_queue: info.task_queue,
|
207
|
+
workflow_type: info.workflow_type
|
208
|
+
}
|
209
|
+
)
|
210
|
+
)
|
211
|
+
end
|
212
|
+
|
213
|
+
private
|
214
|
+
|
215
|
+
def run_in_scheduler(&)
|
216
|
+
Fiber.set_scheduler(@scheduler)
|
217
|
+
if @tracer
|
218
|
+
@tracer.enable(&)
|
219
|
+
else
|
220
|
+
yield
|
221
|
+
end
|
222
|
+
ensure
|
223
|
+
Fiber.set_scheduler(nil)
|
224
|
+
end
|
225
|
+
|
226
|
+
def activate_internal(activation)
|
227
|
+
# Reset some activation state
|
228
|
+
@commands = []
|
229
|
+
@current_activation_error = nil
|
230
|
+
@continue_as_new_suggested = activation.continue_as_new_suggested
|
231
|
+
@current_history_length = activation.history_length
|
232
|
+
@current_history_size = activation.history_size_bytes
|
233
|
+
@replaying = activation.is_replaying
|
234
|
+
@now_timestamp = activation.timestamp
|
235
|
+
|
236
|
+
# Apply jobs and run event loop
|
237
|
+
begin
|
238
|
+
# Create instance if it doesn't already exist
|
239
|
+
@instance ||= with_context_frozen { create_instance }
|
240
|
+
|
241
|
+
# Apply jobs
|
242
|
+
activation.jobs.each { |job| apply(job) }
|
243
|
+
|
244
|
+
# Schedule primary 'execute' if not already running (i.e. this is
|
245
|
+
# the first activation)
|
246
|
+
@primary_fiber ||= schedule(top_level: true) { run_workflow }
|
247
|
+
|
248
|
+
# Run the event loop
|
249
|
+
@scheduler.run_until_all_yielded
|
250
|
+
rescue Exception => e # rubocop:disable Lint/RescueException
|
251
|
+
on_top_level_exception(e)
|
252
|
+
end
|
253
|
+
|
254
|
+
# If we are not replaying and workflow is complete but not a
|
255
|
+
# failure (i.e. success, continue as new, or cancel), we warn for
|
256
|
+
# any unfinished handlers.
|
257
|
+
if !@replaying && @commands.any? do |c|
|
258
|
+
!c.complete_workflow_execution.nil? ||
|
259
|
+
!c.continue_as_new_workflow_execution.nil? ||
|
260
|
+
!c.cancel_workflow_execution.nil?
|
261
|
+
end
|
262
|
+
warn_on_any_unfinished_handlers
|
263
|
+
end
|
264
|
+
|
265
|
+
# Return success or failure
|
266
|
+
if @current_activation_error
|
267
|
+
@logger.replay_safety_disabled do
|
268
|
+
@logger.warn('Failed activation')
|
269
|
+
@logger.warn(@current_activation_error)
|
270
|
+
end
|
271
|
+
WorkflowInstance.new_completion_with_failure(
|
272
|
+
run_id: activation.run_id,
|
273
|
+
error: @current_activation_error,
|
274
|
+
failure_converter: @failure_converter,
|
275
|
+
payload_converter: @payload_converter
|
276
|
+
)
|
277
|
+
else
|
278
|
+
Bridge::Api::WorkflowCompletion::WorkflowActivationCompletion.new(
|
279
|
+
run_id: activation.run_id,
|
280
|
+
successful: Bridge::Api::WorkflowCompletion::Success.new(commands: @commands)
|
281
|
+
)
|
282
|
+
end
|
283
|
+
ensure
|
284
|
+
@commands = nil
|
285
|
+
@current_activation_error = nil
|
286
|
+
end
|
287
|
+
|
288
|
+
def create_instance
|
289
|
+
# Convert workflow arguments
|
290
|
+
@workflow_arguments = convert_args(payload_array: @init_job.arguments,
|
291
|
+
method_name: :execute,
|
292
|
+
raw_args: @definition.raw_args)
|
293
|
+
|
294
|
+
# Initialize interceptors
|
295
|
+
@inbound = @interceptors.reverse_each.reduce(InboundImplementation.new(self)) do |acc, int|
|
296
|
+
int.intercept_workflow(acc)
|
297
|
+
end
|
298
|
+
@inbound.init(OutboundImplementation.new(self))
|
299
|
+
|
300
|
+
# Create the user instance
|
301
|
+
if @definition.init
|
302
|
+
@definition.workflow_class.new(*@workflow_arguments)
|
303
|
+
else
|
304
|
+
@definition.workflow_class.new
|
305
|
+
end
|
306
|
+
end
|
307
|
+
|
308
|
+
def apply(job)
|
309
|
+
case job.variant
|
310
|
+
when :initialize_workflow
|
311
|
+
# Ignore
|
312
|
+
when :fire_timer
|
313
|
+
pending_timers.delete(job.fire_timer.seq)&.resume
|
314
|
+
when :update_random_seed
|
315
|
+
@random = illegal_call_tracing_disabled { Random.new(job.update_random_seed.randomness_seed) }
|
316
|
+
when :query_workflow
|
317
|
+
apply_query(job.query_workflow)
|
318
|
+
when :cancel_workflow
|
319
|
+
# TODO(cretz): Use the details somehow?
|
320
|
+
@cancellation_proc.call(reason: 'Workflow canceled')
|
321
|
+
when :signal_workflow
|
322
|
+
apply_signal(job.signal_workflow)
|
323
|
+
when :resolve_activity
|
324
|
+
pending_activities.delete(job.resolve_activity.seq)&.resume(job.resolve_activity.result)
|
325
|
+
when :notify_has_patch
|
326
|
+
@patches_notified << job.notify_has_patch.patch_id
|
327
|
+
when :resolve_child_workflow_execution_start
|
328
|
+
pending_child_workflow_starts.delete(job.resolve_child_workflow_execution_start.seq)&.resume(
|
329
|
+
job.resolve_child_workflow_execution_start
|
330
|
+
)
|
331
|
+
when :resolve_child_workflow_execution
|
332
|
+
pending_child_workflows.delete(job.resolve_child_workflow_execution.seq)&._resolve(
|
333
|
+
job.resolve_child_workflow_execution.result
|
334
|
+
)
|
335
|
+
when :resolve_signal_external_workflow
|
336
|
+
pending_external_signals.delete(job.resolve_signal_external_workflow.seq)&.resume(
|
337
|
+
job.resolve_signal_external_workflow
|
338
|
+
)
|
339
|
+
when :resolve_request_cancel_external_workflow
|
340
|
+
pending_external_cancels.delete(job.resolve_request_cancel_external_workflow.seq)&.resume(
|
341
|
+
job.resolve_request_cancel_external_workflow
|
342
|
+
)
|
343
|
+
when :do_update
|
344
|
+
apply_update(job.do_update)
|
345
|
+
else
|
346
|
+
raise "Unrecognized activation job variant: #{job.variant}"
|
347
|
+
end
|
348
|
+
end
|
349
|
+
|
350
|
+
def apply_signal(job)
|
351
|
+
defn = signal_handlers[job.signal_name] || signal_handlers[nil]
|
352
|
+
handler_exec =
|
353
|
+
if defn
|
354
|
+
HandlerExecution.new(name: job.signal_name, update_id: nil, unfinished_policy: defn.unfinished_policy)
|
355
|
+
end
|
356
|
+
# Process as a top level handler so that errors are treated as if in primary workflow method
|
357
|
+
schedule(top_level: true, handler_exec:) do
|
358
|
+
# Send to interceptor if there is a definition, buffer otherwise
|
359
|
+
if defn
|
360
|
+
@inbound.handle_signal(
|
361
|
+
Temporalio::Worker::Interceptor::Workflow::HandleSignalInput.new(
|
362
|
+
signal: job.signal_name,
|
363
|
+
args: begin
|
364
|
+
convert_handler_args(payload_array: job.input, defn:)
|
365
|
+
rescue StandardError => e
|
366
|
+
# Signals argument conversion failure must not fail task
|
367
|
+
@logger.error("Failed converting signal input arguments for #{job.signal_name}, dropping signal")
|
368
|
+
@logger.error(e)
|
369
|
+
next
|
370
|
+
end,
|
371
|
+
definition: defn,
|
372
|
+
headers: ProtoUtils.headers_from_proto_map(job.headers, @payload_converter) || {}
|
373
|
+
)
|
374
|
+
)
|
375
|
+
else
|
376
|
+
buffered = @buffered_signals[job.signal_name]
|
377
|
+
buffered = @buffered_signals[job.signal_name] = [] if buffered.nil?
|
378
|
+
buffered << job
|
379
|
+
end
|
380
|
+
end
|
381
|
+
end
|
382
|
+
|
383
|
+
def apply_query(job)
|
384
|
+
# TODO(cretz): __temporal_workflow_metadata
|
385
|
+
defn = case job.query_type
|
386
|
+
when '__stack_trace'
|
387
|
+
Workflow::Definition::Query.new(
|
388
|
+
name: '__stack_trace',
|
389
|
+
to_invoke: proc { scheduler.stack_trace }
|
390
|
+
)
|
391
|
+
else
|
392
|
+
query_handlers[job.query_type] || query_handlers[nil]
|
393
|
+
end
|
394
|
+
schedule do
|
395
|
+
unless defn
|
396
|
+
raise "Query handler for #{job.query_type} expected but not found, " \
|
397
|
+
"known queries: [#{query_handlers.keys.compact.sort.join(', ')}]"
|
398
|
+
end
|
399
|
+
|
400
|
+
result = with_context_frozen do
|
401
|
+
@inbound.handle_query(
|
402
|
+
Temporalio::Worker::Interceptor::Workflow::HandleQueryInput.new(
|
403
|
+
id: job.query_id,
|
404
|
+
query: job.query_type,
|
405
|
+
args: begin
|
406
|
+
convert_handler_args(payload_array: job.arguments, defn:)
|
407
|
+
rescue StandardError => e
|
408
|
+
raise "Failed converting query input arguments: #{e}"
|
409
|
+
end,
|
410
|
+
definition: defn,
|
411
|
+
headers: ProtoUtils.headers_from_proto_map(job.headers, @payload_converter) || {}
|
412
|
+
)
|
413
|
+
)
|
414
|
+
end
|
415
|
+
add_command(
|
416
|
+
Bridge::Api::WorkflowCommands::WorkflowCommand.new(
|
417
|
+
respond_to_query: Bridge::Api::WorkflowCommands::QueryResult.new(
|
418
|
+
query_id: job.query_id,
|
419
|
+
succeeded: Bridge::Api::WorkflowCommands::QuerySuccess.new(
|
420
|
+
response: @payload_converter.to_payload(result)
|
421
|
+
)
|
422
|
+
)
|
423
|
+
)
|
424
|
+
)
|
425
|
+
rescue Exception => e # rubocop:disable Lint/RescueException
|
426
|
+
add_command(
|
427
|
+
Bridge::Api::WorkflowCommands::WorkflowCommand.new(
|
428
|
+
respond_to_query: Bridge::Api::WorkflowCommands::QueryResult.new(
|
429
|
+
query_id: job.query_id,
|
430
|
+
failed: @failure_converter.to_failure(e, @payload_converter)
|
431
|
+
)
|
432
|
+
)
|
433
|
+
)
|
434
|
+
end
|
435
|
+
end
|
436
|
+
|
437
|
+
def apply_update(job)
|
438
|
+
defn = update_handlers[job.name] || update_handlers[nil]
|
439
|
+
handler_exec =
|
440
|
+
(HandlerExecution.new(name: job.name, update_id: job.id, unfinished_policy: defn.unfinished_policy) if defn)
|
441
|
+
schedule(handler_exec:) do
|
442
|
+
# Until this is accepted, all errors are rejections
|
443
|
+
accepted = false
|
444
|
+
|
445
|
+
# Set update info
|
446
|
+
Fiber[:__temporal_update_info] = Workflow::UpdateInfo.new(id: job.id, name: job.name).freeze
|
447
|
+
|
448
|
+
# Reject if not present
|
449
|
+
unless defn
|
450
|
+
raise "Update handler for #{job.name} expected but not found, " \
|
451
|
+
"known updates: [#{update_handlers.keys.compact.sort.join(', ')}]"
|
452
|
+
end
|
453
|
+
|
454
|
+
# To match other SDKs, we are only calling the validation interceptor if there is a validator. Also to match
|
455
|
+
# other SDKs, we are re-converting the args between validate and update to disallow user mutation in
|
456
|
+
# validator/interceptor.
|
457
|
+
if job.run_validator && defn.validator_to_invoke
|
458
|
+
with_context_frozen do
|
459
|
+
@inbound.validate_update(
|
460
|
+
Temporalio::Worker::Interceptor::Workflow::HandleUpdateInput.new(
|
461
|
+
id: job.id,
|
462
|
+
update: job.name,
|
463
|
+
args: begin
|
464
|
+
convert_handler_args(payload_array: job.input, defn:)
|
465
|
+
rescue StandardError => e
|
466
|
+
raise "Failed converting update input arguments: #{e}"
|
467
|
+
end,
|
468
|
+
definition: defn,
|
469
|
+
headers: ProtoUtils.headers_from_proto_map(job.headers, @payload_converter) || {}
|
470
|
+
)
|
471
|
+
)
|
472
|
+
end
|
473
|
+
end
|
474
|
+
|
475
|
+
# We build the input before marking accepted so the exception can reject instead of fail task
|
476
|
+
input = Temporalio::Worker::Interceptor::Workflow::HandleUpdateInput.new(
|
477
|
+
id: job.id,
|
478
|
+
update: job.name,
|
479
|
+
args: begin
|
480
|
+
convert_handler_args(payload_array: job.input, defn:)
|
481
|
+
rescue StandardError => e
|
482
|
+
raise "Failed converting update input arguments: #{e}"
|
483
|
+
end,
|
484
|
+
definition: defn,
|
485
|
+
headers: ProtoUtils.headers_from_proto_map(job.headers, @payload_converter) || {}
|
486
|
+
)
|
487
|
+
|
488
|
+
# Accept
|
489
|
+
add_command(
|
490
|
+
Bridge::Api::WorkflowCommands::WorkflowCommand.new(
|
491
|
+
update_response: Bridge::Api::WorkflowCommands::UpdateResponse.new(
|
492
|
+
protocol_instance_id: job.protocol_instance_id,
|
493
|
+
accepted: Google::Protobuf::Empty.new
|
494
|
+
)
|
495
|
+
)
|
496
|
+
)
|
497
|
+
accepted = true
|
498
|
+
|
499
|
+
# Issue update
|
500
|
+
result = @inbound.handle_update(input)
|
501
|
+
|
502
|
+
add_command(
|
503
|
+
Bridge::Api::WorkflowCommands::WorkflowCommand.new(
|
504
|
+
update_response: Bridge::Api::WorkflowCommands::UpdateResponse.new(
|
505
|
+
protocol_instance_id: job.protocol_instance_id,
|
506
|
+
completed: @payload_converter.to_payload(result)
|
507
|
+
)
|
508
|
+
)
|
509
|
+
)
|
510
|
+
rescue Exception => e # rubocop:disable Lint/RescueException
|
511
|
+
# Re-raise to cause task failure if this is accepted but this is not a failure exception
|
512
|
+
raise if accepted && !failure_exception?(e)
|
513
|
+
|
514
|
+
# Reject
|
515
|
+
add_command(
|
516
|
+
Bridge::Api::WorkflowCommands::WorkflowCommand.new(
|
517
|
+
update_response: Bridge::Api::WorkflowCommands::UpdateResponse.new(
|
518
|
+
protocol_instance_id: job.protocol_instance_id,
|
519
|
+
rejected: @failure_converter.to_failure(e, @payload_converter)
|
520
|
+
)
|
521
|
+
)
|
522
|
+
)
|
523
|
+
end
|
524
|
+
end
|
525
|
+
|
526
|
+
def run_workflow
|
527
|
+
result = @inbound.execute(
|
528
|
+
Temporalio::Worker::Interceptor::Workflow::ExecuteInput.new(
|
529
|
+
args: @workflow_arguments,
|
530
|
+
headers: ProtoUtils.headers_from_proto_map(@init_job.headers, @payload_converter) || {}
|
531
|
+
)
|
532
|
+
)
|
533
|
+
add_command(
|
534
|
+
Bridge::Api::WorkflowCommands::WorkflowCommand.new(
|
535
|
+
complete_workflow_execution: Bridge::Api::WorkflowCommands::CompleteWorkflowExecution.new(
|
536
|
+
result: @payload_converter.to_payload(result)
|
537
|
+
)
|
538
|
+
)
|
539
|
+
)
|
540
|
+
end
|
541
|
+
|
542
|
+
def schedule(
|
543
|
+
top_level: false,
|
544
|
+
handler_exec: nil,
|
545
|
+
&
|
546
|
+
)
|
547
|
+
in_progress_handlers << handler_exec if handler_exec
|
548
|
+
Fiber.schedule do
|
549
|
+
yield
|
550
|
+
rescue Exception => e # rubocop:disable Lint/RescueException
|
551
|
+
if top_level
|
552
|
+
on_top_level_exception(e)
|
553
|
+
else
|
554
|
+
@current_activation_error ||= e
|
555
|
+
end
|
556
|
+
ensure
|
557
|
+
in_progress_handlers.delete(handler_exec) if handler_exec
|
558
|
+
end
|
559
|
+
end
|
560
|
+
|
561
|
+
def on_top_level_exception(err)
|
562
|
+
if err.is_a?(Workflow::ContinueAsNewError)
|
563
|
+
@logger.debug('Workflow requested continue as new')
|
564
|
+
add_command(
|
565
|
+
Bridge::Api::WorkflowCommands::WorkflowCommand.new(
|
566
|
+
continue_as_new_workflow_execution: Bridge::Api::WorkflowCommands::ContinueAsNewWorkflowExecution.new(
|
567
|
+
workflow_type: if err.workflow
|
568
|
+
Workflow::Definition._workflow_type_from_workflow_parameter(err.workflow)
|
569
|
+
end,
|
570
|
+
task_queue: err.task_queue,
|
571
|
+
arguments: ProtoUtils.convert_to_payload_array(payload_converter, err.args),
|
572
|
+
workflow_run_timeout: ProtoUtils.seconds_to_duration(err.run_timeout),
|
573
|
+
workflow_task_timeout: ProtoUtils.seconds_to_duration(err.task_timeout),
|
574
|
+
memo: ProtoUtils.memo_to_proto_hash(err.memo, payload_converter),
|
575
|
+
headers: ProtoUtils.headers_to_proto_hash(err.headers, payload_converter),
|
576
|
+
search_attributes: err.search_attributes&._to_proto,
|
577
|
+
retry_policy: err.retry_policy&._to_proto
|
578
|
+
)
|
579
|
+
)
|
580
|
+
)
|
581
|
+
elsif @cancellation.canceled? && Error.canceled?(err)
|
582
|
+
# If cancel was ever requested and this is a cancellation or an activity/child cancellation, we add a
|
583
|
+
# cancel command. Technically this means that a swallowed cancel followed by, say, an activity cancel
|
584
|
+
# later on will show the workflow as cancelled. But this is a Temporal limitation in that cancellation is
|
585
|
+
# a state not an event.
|
586
|
+
@logger.debug('Workflow requested to cancel and properly raised cancel')
|
587
|
+
@logger.debug(err)
|
588
|
+
add_command(
|
589
|
+
Bridge::Api::WorkflowCommands::WorkflowCommand.new(
|
590
|
+
cancel_workflow_execution: Bridge::Api::WorkflowCommands::CancelWorkflowExecution.new
|
591
|
+
)
|
592
|
+
)
|
593
|
+
elsif failure_exception?(err)
|
594
|
+
@logger.debug('Workflow raised failure')
|
595
|
+
@logger.debug(err)
|
596
|
+
add_command(
|
597
|
+
Bridge::Api::WorkflowCommands::WorkflowCommand.new(
|
598
|
+
fail_workflow_execution: Bridge::Api::WorkflowCommands::FailWorkflowExecution.new(
|
599
|
+
failure: @failure_converter.to_failure(err, @payload_converter)
|
600
|
+
)
|
601
|
+
)
|
602
|
+
)
|
603
|
+
else
|
604
|
+
@current_activation_error ||= err
|
605
|
+
end
|
606
|
+
end
|
607
|
+
|
608
|
+
def failure_exception?(err)
|
609
|
+
err.is_a?(Error::Failure) || err.is_a?(Timeout::Error) || @failure_exception_types.any? do |cls|
|
610
|
+
err.is_a?(cls)
|
611
|
+
end
|
612
|
+
end
|
613
|
+
|
614
|
+
def with_context_frozen(&)
|
615
|
+
@context_frozen = true
|
616
|
+
yield
|
617
|
+
ensure
|
618
|
+
@context_frozen = false
|
619
|
+
end
|
620
|
+
|
621
|
+
def convert_handler_args(payload_array:, defn:)
|
622
|
+
convert_args(
|
623
|
+
payload_array:,
|
624
|
+
method_name: defn.to_invoke.is_a?(Symbol) ? defn.to_invoke : nil,
|
625
|
+
raw_args: defn.raw_args,
|
626
|
+
ignore_first_param: defn.name.nil? # Dynamic
|
627
|
+
)
|
628
|
+
end
|
629
|
+
|
630
|
+
def convert_args(payload_array:, method_name:, raw_args:, ignore_first_param: false)
|
631
|
+
# Just in case it is not an array
|
632
|
+
payload_array = payload_array.to_ary
|
633
|
+
|
634
|
+
# We want to discard extra arguments if we can. If there is a method
|
635
|
+
# name, try to look it up. Then, assuming there's no :rest, trim args
|
636
|
+
# to the amount of :req or :opt there are.
|
637
|
+
if method_name && @definition.workflow_class.method_defined?(method_name)
|
638
|
+
count = 0
|
639
|
+
req_count = 0
|
640
|
+
@definition.workflow_class.instance_method(method_name).parameters.each do |(type, _)|
|
641
|
+
if type == :rest
|
642
|
+
count = nil
|
643
|
+
break
|
644
|
+
elsif %i[req opt].include?(type)
|
645
|
+
count += 1
|
646
|
+
req_count += 1 if type == :req
|
647
|
+
end
|
648
|
+
end
|
649
|
+
# Fail if too few required param values, trim off excess if too many. If count is nil, it has a splat.
|
650
|
+
if count
|
651
|
+
if ignore_first_param
|
652
|
+
count -= 1
|
653
|
+
req_count -= 1
|
654
|
+
end
|
655
|
+
if req_count > payload_array.size
|
656
|
+
# We have to fail here instead of let Ruby fail the invocation because some handlers, such as signals,
|
657
|
+
# want to log and ignore invalid arguments instead of fail and if we used Ruby failure, we can't
|
658
|
+
# differentiate between too-few-param caused by us or somewhere else by a user.
|
659
|
+
raise ArgumentError, "wrong number of required arguments for #{method_name} " \
|
660
|
+
"(given #{payload_array.size}, expected #{req_count})"
|
661
|
+
end
|
662
|
+
payload_array = payload_array.take(count)
|
663
|
+
end
|
664
|
+
end
|
665
|
+
|
666
|
+
# Convert
|
667
|
+
if raw_args
|
668
|
+
payload_array.map { |p| Converters::RawValue.new(p) }
|
669
|
+
else
|
670
|
+
ProtoUtils.convert_from_payload_array(@payload_converter, payload_array)
|
671
|
+
end
|
672
|
+
end
|
673
|
+
|
674
|
+
def scoped_logger_info
|
675
|
+
@scoped_logger_info ||= {
|
676
|
+
attempt: info.attempt,
|
677
|
+
namespace: info.namespace,
|
678
|
+
run_id: info.run_id,
|
679
|
+
task_queue: info.task_queue,
|
680
|
+
workflow_id: info.workflow_id,
|
681
|
+
workflow_type: info.workflow_type
|
682
|
+
}
|
683
|
+
# Append update info if there is any
|
684
|
+
update_info = Fiber[:__temporal_update_info]
|
685
|
+
return @scoped_logger_info unless update_info
|
686
|
+
|
687
|
+
@scoped_logger_info.merge({ update_id: update_info.id, update_name: update_info.name })
|
688
|
+
end
|
689
|
+
|
690
|
+
def warn_on_any_unfinished_handlers
|
691
|
+
updates, signals = in_progress_handlers.select do |h|
|
692
|
+
h.unfinished_policy == Workflow::HandlerUnfinishedPolicy::WARN_AND_ABANDON
|
693
|
+
end.partition(&:update_id)
|
694
|
+
|
695
|
+
unless updates.empty?
|
696
|
+
updates_str = JSON.generate(updates.map { |u| { name: u.name, id: u.update_id } })
|
697
|
+
warn(
|
698
|
+
"[TMPRL1102] Workflow #{info.workflow_id} finished while update handlers are still running. This may " \
|
699
|
+
'have interrupted work that the update handler was doing, and the client that sent the update will ' \
|
700
|
+
"receive a 'workflow execution already completed' RPCError instead of the update result. You can wait " \
|
701
|
+
'for all update and signal handlers to complete by using ' \
|
702
|
+
'`Temporalio::Workflow.wait_condition { Temporalio::Workflow.handlers_finished? }`. ' \
|
703
|
+
'Alternatively, if both you and the clients sending the update are okay with interrupting running ' \
|
704
|
+
'handlers when the workflow finishes, and causing clients to receive errors, then you can disable this ' \
|
705
|
+
'warning via the update handler definition: ' \
|
706
|
+
'`workflow_update unfinished_policy: Temporalio::Workflow::HandlerUnfinishedPolicy.ABANDON`. ' \
|
707
|
+
"The following updates were unfinished (and warnings were not disabled for their handler): #{updates_str}"
|
708
|
+
)
|
709
|
+
end
|
710
|
+
|
711
|
+
return if signals.empty?
|
712
|
+
|
713
|
+
signals_str = JSON.generate(signals.group_by(&:name)
|
714
|
+
.transform_values(&:size).sort_by { |_, v| -v }.map { |name, count| { name:, count: } })
|
715
|
+
warn(
|
716
|
+
"[TMPRL1102] Workflow #{info.workflow_id} finished while signal handlers are still running. This may " \
|
717
|
+
'have interrupted work that the signal handler was doing. You can wait for all update and signal ' \
|
718
|
+
'handlers to complete by using ' \
|
719
|
+
'`Temporalio::Workflow.wait_condition { Temporalio::Workflow.handlers_finished? }`. ' \
|
720
|
+
'Alternatively, if both you and the clients sending the signal are okay with interrupting running ' \
|
721
|
+
'handlers when the workflow finishes, then you can disable this warning via the signal handler ' \
|
722
|
+
'definition: ' \
|
723
|
+
'`workflow_signal unfinished_policy: Temporalio::Workflow::HandlerUnfinishedPolicy.ABANDON`. ' \
|
724
|
+
"The following signals were unfinished (and warnings were not disabled for their handler): #{signals_str}"
|
725
|
+
)
|
726
|
+
end
|
727
|
+
end
|
728
|
+
end
|
729
|
+
end
|
730
|
+
end
|