aws-flow 2.0.2 → 2.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +8 -8
- data/Rakefile +3 -3
- data/lib/aws/decider/activity_definition.rb +9 -4
- data/lib/aws/decider/async_decider.rb +11 -22
- data/lib/aws/decider/executor.rb +1 -1
- data/lib/aws/decider/flow_defaults.rb +14 -0
- data/lib/aws/decider/task_poller.rb +84 -20
- data/lib/aws/decider/utilities.rb +86 -0
- data/lib/aws/decider/version.rb +1 -1
- data/lib/aws/decider/workflow_client.rb +2 -2
- data/lib/aws/decider/workflow_definition.rb +22 -5
- data/lib/aws/flow/flow_utils.rb +1 -0
- data/lib/aws/runner.rb +1 -1
- data/spec/aws/decider/integration/activity_spec.rb +3 -4
- data/spec/aws/decider/integration/integration_spec.rb +288 -27
- data/spec/aws/decider/unit/decider_spec.rb +256 -0
- data/spec/aws/decider/unit/workflow_client_spec.rb +23 -0
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,15 +1,15 @@
|
|
1
1
|
---
|
2
2
|
!binary "U0hBMQ==":
|
3
3
|
metadata.gz: !binary |-
|
4
|
-
|
4
|
+
YzZjOTA1Y2MzMjE0MTg0N2I2ZjAwMjQ0ZjY0NDBhNGQ1YjhjMTA1NQ==
|
5
5
|
data.tar.gz: !binary |-
|
6
|
-
|
6
|
+
MzI3ZjhmNTE4ZTZhNWFkOTc2NGFiYTgxYjhlN2JiMDNmYWIwYmUwYw==
|
7
7
|
SHA512:
|
8
8
|
metadata.gz: !binary |-
|
9
|
-
|
10
|
-
|
11
|
-
|
9
|
+
Mzc5NDdjMzcyMGZlNmZmODQ1YTMxYzU5ZTU2MjVhZDZhZjcyMWZmZjMzZDU0
|
10
|
+
ODgyNmU3ZjdlZDk5MDc1MWYyMDVlY2IzZjk4NGFlN2Q2NDkwYTI1YjAxOWFl
|
11
|
+
M2Q0NDQ3MDI3NTkyNzZiNTZiNGM5ZDYwNGZiMDM2MDhhMjRlMGQ=
|
12
12
|
data.tar.gz: !binary |-
|
13
|
-
|
14
|
-
|
15
|
-
|
13
|
+
Njk4Y2ZkMzUxNGY5ZGI3MjA1NGI2YTQ5N2QwYTBlZDFiMzU3ZWIxZDIxOGZl
|
14
|
+
NjViN2YyZjE3ODgzZjBjMGNhYzA2YjU5MTllODI5ODdiMjFkZjQ4M2QzYTM1
|
15
|
+
NzZhNmFmNTYxM2E2MTMyMTQyOGI5NjY5OTU0N2I0YzBiZGI3N2Q=
|
data/Rakefile
CHANGED
@@ -17,19 +17,19 @@ require "rspec/core/rake_task"
|
|
17
17
|
|
18
18
|
desc "Run unit tests"
|
19
19
|
RSpec::Core::RakeTask.new(:unit_tests) do |spec|
|
20
|
-
spec.rspec_opts = ['--color', '--format nested']
|
20
|
+
spec.rspec_opts = ['--color', '--format nested', '--profile']
|
21
21
|
spec.pattern = 'spec/**/unit/*.rb'
|
22
22
|
end
|
23
23
|
|
24
24
|
desc "Run integration tests"
|
25
25
|
RSpec::Core::RakeTask.new(:integration_tests) do |spec|
|
26
|
-
spec.rspec_opts = ['--color', '--format nested']
|
26
|
+
spec.rspec_opts = ['--color', '--format nested', '--profile']
|
27
27
|
spec.pattern = 'spec/**/integration/*.rb'
|
28
28
|
end
|
29
29
|
|
30
30
|
desc "Run all tests"
|
31
31
|
RSpec::Core::RakeTask.new(:all_tests) do |spec|
|
32
|
-
spec.rspec_opts = ['--color', '--format nested']
|
32
|
+
spec.rspec_opts = ['--color', '--format nested', '--profile']
|
33
33
|
spec.pattern = 'spec/**/*.rb'
|
34
34
|
end
|
35
35
|
|
@@ -69,16 +69,21 @@ module AWS
|
|
69
69
|
result = @instance.send(@activity_method, *ruby_input)
|
70
70
|
end
|
71
71
|
rescue Exception => e
|
72
|
-
#TODO we need the proper error handling here
|
73
72
|
raise e if e.is_a? CancellationException
|
74
|
-
|
73
|
+
|
74
|
+
# Check if serialized exception violates the 32k limit and truncate it
|
75
|
+
reason, converted_failure = AWS::Flow::Utilities::check_and_truncate_exception(e, @converter)
|
76
|
+
|
77
|
+
# Wrap the exception that we got into an ActivityFailureException so
|
78
|
+
# that the task poller can handle it properly.
|
79
|
+
raise ActivityFailureException.new(reason, converted_failure)
|
75
80
|
ensure
|
76
81
|
@instance._activity_execution_context = nil
|
77
82
|
end
|
78
83
|
converted_result = @converter.dump(result)
|
79
84
|
# We are going to have to convert this object into a string to submit it, and that's where the 32k limit will be enforced, so it's valid to turn the object to a string and check the size of the result
|
80
|
-
if converted_result.to_s.size >
|
81
|
-
return
|
85
|
+
if converted_result.to_s.size > FlowConstants::DATA_LIMIT
|
86
|
+
return converted_result, result, true
|
82
87
|
end
|
83
88
|
return converted_result, result, false
|
84
89
|
end
|
@@ -278,27 +278,16 @@ module AWS
|
|
278
278
|
def make_fail_decision(decision_id, failure)
|
279
279
|
decision_type = "FailWorkflowExecution"
|
280
280
|
|
281
|
-
#
|
282
|
-
#
|
283
|
-
#
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
# the stack trace and pop that out.
|
292
|
-
details = failure.details if (failure.respond_to? :details)
|
293
|
-
details ||= failure.backtrace.join("")
|
294
|
-
new_details = details[0..(max_response_size - truncation_overhead)]
|
295
|
-
if details.length > (max_response_size - truncation_overhead)
|
296
|
-
new_details += "->->->->->THIS BACKTRACE WAS TRUNCATED"
|
297
|
-
end
|
298
|
-
# details.unshift(reason)
|
299
|
-
# details = details.join("\n")
|
300
|
-
|
301
|
-
fail_workflow_execution_decision_attributes = {:reason => reason, :details => new_details}
|
281
|
+
# Get the reason from the failure. Or get the message if a
|
282
|
+
# CancellationException is initialized without a reason. Fall back to
|
283
|
+
# a default string if nothing is provided
|
284
|
+
reason = failure.reason || failure.message || "Workflow failure did not provide any reason."
|
285
|
+
# Get the details from the failure. Or get the backtrace if a
|
286
|
+
# CancellationException is initialized without a details. Fall back to
|
287
|
+
# a default string if nothing is provided
|
288
|
+
details = failure.details || failure.backtrace.to_s || "Workflow failure did not provide any details."
|
289
|
+
|
290
|
+
fail_workflow_execution_decision_attributes = { reason: reason, details: details }
|
302
291
|
decision = {:decision_type => decision_type, :fail_workflow_execution_decision_attributes => fail_workflow_execution_decision_attributes}
|
303
292
|
CompleteWorkflowStateMachine.new(decision_id, decision)
|
304
293
|
|
@@ -352,7 +341,7 @@ module AWS
|
|
352
341
|
else
|
353
342
|
@decision_helper[decision_id] = make_completion_decision(decision_id, {
|
354
343
|
:decision_type => "CompleteWorkflowExecution",
|
355
|
-
:complete_workflow_execution_decision_attributes => {:result => @result.get}})
|
344
|
+
:complete_workflow_execution_decision_attributes => {:result => @result.get }})
|
356
345
|
end
|
357
346
|
end
|
358
347
|
end
|
data/lib/aws/decider/executor.rb
CHANGED
@@ -158,7 +158,7 @@ module AWS
|
|
158
158
|
if status.success?
|
159
159
|
@log.debug "Child process #{pid} exited successfully"
|
160
160
|
else
|
161
|
-
@log.error "Child process #{pid} exited with non-zero status code"
|
161
|
+
@log.error "Child process #{pid} exited with non-zero status code: #{status}"
|
162
162
|
end
|
163
163
|
|
164
164
|
# Reap
|
@@ -96,6 +96,20 @@ module AWS
|
|
96
96
|
attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
|
97
97
|
end
|
98
98
|
|
99
|
+
# Sizes taken from
|
100
|
+
# http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
|
101
|
+
DATA_LIMIT = 32768
|
102
|
+
|
103
|
+
# Number of chars that can fit in FlowException's reason
|
104
|
+
REASON_LIMIT = 256
|
105
|
+
# Number of chars that can fit in FlowException's details. Same as
|
106
|
+
# DATA_LIMIT
|
107
|
+
DETAILS_LIMIT = DATA_LIMIT
|
108
|
+
# This is the truncation overhead for serialization.
|
109
|
+
TRUNCATION_OVERHEAD = 8000
|
110
|
+
# Truncation string added to the end of a trucated string"
|
111
|
+
TRUNCATED = "[TRUNCATED]"
|
112
|
+
|
99
113
|
INFINITY = -1
|
100
114
|
RETENTION_DEFAULT = 7
|
101
115
|
NUM_OF_WORKERS_DEFAULT = 1
|
@@ -64,19 +64,40 @@ module AWS
|
|
64
64
|
@logger.info Utilities.workflow_task_to_debug_string("Got decision task", task)
|
65
65
|
|
66
66
|
task_completed_request = @handler.handle_decision_task(task)
|
67
|
-
@logger.debug "Response to the task will be #{task_completed_request}"
|
67
|
+
@logger.debug "Response to the task will be #{task_completed_request.inspect}"
|
68
|
+
|
68
69
|
if !task_completed_request[:decisions].empty? && (task_completed_request[:decisions].first.keys.include?(:fail_workflow_execution_decision_attributes))
|
69
70
|
fail_hash = task_completed_request[:decisions].first[:fail_workflow_execution_decision_attributes]
|
70
71
|
reason = fail_hash[:reason]
|
71
72
|
details = fail_hash[:details]
|
72
|
-
@logger.debug "#{reason}, #{details}"
|
73
73
|
end
|
74
|
-
|
74
|
+
|
75
|
+
begin
|
76
|
+
@service.respond_decision_task_completed(task_completed_request)
|
77
|
+
rescue AWS::SimpleWorkflow::Errors::ValidationException => e
|
78
|
+
if e.message.include? "failed to satisfy constraint: Member must have length less than or equal to"
|
79
|
+
# We want to ensure that the WorkflowWorker doesn't just sit around and
|
80
|
+
# time the workflow out. If there is a validation failure possibly
|
81
|
+
# because of large inputs to child workflows/activities or large custom
|
82
|
+
# exceptions we should fail the workflow with some minimal details.
|
83
|
+
task_completed_request[:decisions] = [
|
84
|
+
{
|
85
|
+
decision_type: "FailWorkflowExecution",
|
86
|
+
fail_workflow_execution_decision_attributes: {
|
87
|
+
reason: Utilities.validation_error_string("Workflow"),
|
88
|
+
details: "AWS::SimpleWorkflow::Errors::ValidationException"
|
89
|
+
}
|
90
|
+
}
|
91
|
+
]
|
92
|
+
@service.respond_decision_task_completed(task_completed_request)
|
93
|
+
end
|
94
|
+
@logger.error "#{task.workflow_type.inspect} failed with exception: #{e.inspect}"
|
95
|
+
end
|
75
96
|
@logger.info Utilities.workflow_task_to_debug_string("Finished executing task", task)
|
76
97
|
rescue AWS::SimpleWorkflow::Errors::UnknownResourceFault => e
|
77
|
-
@logger.error "Error in the poller, #{e.
|
98
|
+
@logger.error "Error in the poller, #{e.inspect}"
|
78
99
|
rescue Exception => e
|
79
|
-
@logger.error "Error in the poller, #{e.
|
100
|
+
@logger.error "Error in the poller, #{e.inspect}"
|
80
101
|
end
|
81
102
|
end
|
82
103
|
end
|
@@ -135,19 +156,23 @@ module AWS
|
|
135
156
|
begin
|
136
157
|
context = ActivityExecutionContext.new(@service, @domain, task)
|
137
158
|
unless activity_implementation = @activity_definition_map[activity_type]
|
138
|
-
raise "This activity worker was told to work on activity type
|
159
|
+
raise "This activity worker was told to work on activity type "\
|
160
|
+
"#{activity_type.inspect}, but this activity worker only knows "\
|
161
|
+
"how to work on #{@activity_definition_map.keys.map(&:name).join' '}"
|
139
162
|
end
|
140
163
|
|
141
164
|
output, original_result, too_large = activity_implementation.execute(task.input, context)
|
142
165
|
|
143
|
-
@logger.debug "Responding on task_token #{task.task_token}
|
166
|
+
@logger.debug "Responding on task_token #{task.task_token.inspect}."
|
144
167
|
if too_large
|
145
|
-
@logger.error "
|
168
|
+
@logger.error "#{task.activity_type.inspect} failed: "\
|
169
|
+
"#{Utilities.validation_error_string_partial("Activity")} For "\
|
170
|
+
"reference, the result was #{original_result}"
|
146
171
|
|
147
172
|
respond_activity_task_failed_with_retry(
|
148
173
|
task.task_token,
|
149
|
-
|
150
|
-
|
174
|
+
Utilities.validation_error_string("Activity"),
|
175
|
+
""
|
151
176
|
)
|
152
177
|
elsif ! activity_implementation.execution_options.manual_completion
|
153
178
|
@service.respond_activity_task_completed(
|
@@ -156,15 +181,13 @@ module AWS
|
|
156
181
|
)
|
157
182
|
end
|
158
183
|
rescue ActivityFailureException => e
|
159
|
-
@logger.error "
|
160
|
-
|
184
|
+
@logger.error "#{task.activity_type.inspect} failed with exception: #{e.inspect}."
|
161
185
|
respond_activity_task_failed_with_retry(
|
162
186
|
task.task_token,
|
163
187
|
e.message,
|
164
188
|
e.details
|
165
189
|
)
|
166
190
|
end
|
167
|
-
#TODO all the completion stuffs
|
168
191
|
end
|
169
192
|
|
170
193
|
# Responds to the decider that the activity task has failed, and attempts
|
@@ -208,7 +231,28 @@ module AWS
|
|
208
231
|
# is cancelled.
|
209
232
|
#
|
210
233
|
def respond_activity_task_canceled(task_token, message)
|
211
|
-
|
234
|
+
|
235
|
+
begin
|
236
|
+
@service.respond_activity_task_canceled(
|
237
|
+
:task_token => task_token,
|
238
|
+
:details => message
|
239
|
+
)
|
240
|
+
rescue AWS::SimpleWorkflow::Errors::ValidationException => e
|
241
|
+
if e.message.include? "failed to satisfy constraint: Member must have length less than or equal to"
|
242
|
+
# We want to ensure that the ActivityWorker doesn't just sit
|
243
|
+
# around and time the activity out. If there is a validation failure
|
244
|
+
# possibly because of large custom exceptions we should fail the
|
245
|
+
# activity task with some minimal details
|
246
|
+
respond_activity_task_failed_with_retry(
|
247
|
+
task_token,
|
248
|
+
Utilities.validation_error_string("Activity"),
|
249
|
+
"AWS::SimpleWorkflow::Errors::ValidationException"
|
250
|
+
)
|
251
|
+
end
|
252
|
+
@logger.error "respond_activity_task_canceled call failed with "\
|
253
|
+
"exception: #{e.inspect}"
|
254
|
+
end
|
255
|
+
|
212
256
|
end
|
213
257
|
|
214
258
|
# Responds to the decider that the activity task has failed. No retry is
|
@@ -231,7 +275,28 @@ module AWS
|
|
231
275
|
#
|
232
276
|
def respond_activity_task_failed(task_token, reason, details)
|
233
277
|
@logger.debug "The task token to be reported on is #{task_token}"
|
234
|
-
|
278
|
+
|
279
|
+
begin
|
280
|
+
@service.respond_activity_task_failed(
|
281
|
+
task_token: task_token,
|
282
|
+
reason: reason.to_s,
|
283
|
+
details: details.to_s
|
284
|
+
)
|
285
|
+
rescue AWS::SimpleWorkflow::Errors::ValidationException => e
|
286
|
+
if e.message.include? "failed to satisfy constraint: Member must have length less than or equal to"
|
287
|
+
# We want to ensure that the ActivityWorker doesn't just sit
|
288
|
+
# around and time the activity out. If there is a validation failure
|
289
|
+
# possibly because of large custom exceptions we should fail the
|
290
|
+
# activity task with some minimal details
|
291
|
+
respond_activity_task_failed_with_retry(
|
292
|
+
task.task_token,
|
293
|
+
Utilities.validation_error_string("Activity"),
|
294
|
+
"AWS::SimpleWorkflow::Errors::ValidationException"
|
295
|
+
)
|
296
|
+
end
|
297
|
+
@logger.error "respond_activity_task_failed call failed with "\
|
298
|
+
"exception: #{e.inspect}"
|
299
|
+
end
|
235
300
|
end
|
236
301
|
|
237
302
|
# Processes the specified activity task.
|
@@ -263,18 +328,17 @@ module AWS
|
|
263
328
|
begin
|
264
329
|
execute(task)
|
265
330
|
rescue CancellationException => e
|
266
|
-
@logger.error "
|
331
|
+
@logger.error "#{task.activity_type.inspect} failed with exception: #{e.inspect}"
|
267
332
|
respond_activity_task_canceled_with_retry(task.task_token, e.message)
|
268
333
|
rescue Exception => e
|
269
|
-
@logger.error "
|
334
|
+
@logger.error "#{task.activity_type.inspect} failed with exception: #{e.inspect}"
|
270
335
|
respond_activity_task_failed_with_retry(task.task_token, e.message, e.backtrace)
|
271
|
-
#Do rescue stuff
|
272
336
|
ensure
|
273
337
|
@poll_semaphore.release
|
274
338
|
end
|
275
339
|
rescue Exception => e
|
276
340
|
semaphore_needs_release = true
|
277
|
-
@logger.error "
|
341
|
+
@logger.error "Error in the poller, exception: #{e.inspect}. stacktrace: #{e.backtrace}"
|
278
342
|
raise e
|
279
343
|
ensure
|
280
344
|
@poll_semaphore.release if semaphore_needs_release
|
@@ -307,7 +371,7 @@ module AWS
|
|
307
371
|
@logger.info Utilities.activity_task_to_debug_string("Got activity task", task)
|
308
372
|
end
|
309
373
|
rescue Exception => e
|
310
|
-
@logger.error "Error in the poller, #{e.
|
374
|
+
@logger.error "Error in the poller, #{e.inspect}"
|
311
375
|
@poll_semaphore.release
|
312
376
|
return false
|
313
377
|
end
|
@@ -40,6 +40,20 @@ module AWS
|
|
40
40
|
return "#{message} #{task.activity_type.name}.#{task.activity_type.version} with input: #{task.input} and task_token: #{task.task_token}"
|
41
41
|
end
|
42
42
|
|
43
|
+
# The following two methods are used to generate an error string when
|
44
|
+
# response size of a workflow or activity is greater than 32k.
|
45
|
+
def self.validation_error_string_partial(infix)
|
46
|
+
str = infix.downcase == "workflow" ? "A" : "An"
|
47
|
+
str += " #{infix} cannot send a response with data larger than "\
|
48
|
+
"#{FlowConstants::DATA_LIMIT} characters. Please limit the size of the "\
|
49
|
+
"response."
|
50
|
+
str
|
51
|
+
end
|
52
|
+
|
53
|
+
def self.validation_error_string(infix)
|
54
|
+
"#{self.validation_error_string_partial(infix)} You can look at the "\
|
55
|
+
"#{infix} Worker logs to see the original response."
|
56
|
+
end
|
43
57
|
|
44
58
|
# @api private
|
45
59
|
def self.drill_on_future(future)
|
@@ -73,6 +87,78 @@ module AWS
|
|
73
87
|
client_options
|
74
88
|
end
|
75
89
|
|
90
|
+
# @api private
|
91
|
+
# This method is used to truncate Activity and Workflow exceptions to
|
92
|
+
# fit them into responses to the SWF service.
|
93
|
+
def self.check_and_truncate_exception error, converter
|
94
|
+
|
95
|
+
# serialize the exception so that we can check the actual size of the
|
96
|
+
# payload.
|
97
|
+
converted_failure = converter.dump(error)
|
98
|
+
# get the reason/message of the exception
|
99
|
+
reason = error.message
|
100
|
+
|
101
|
+
# truncate the reason if needed and add a smaller version of the
|
102
|
+
# truncation string at the end
|
103
|
+
if reason.size > FlowConstants::REASON_LIMIT
|
104
|
+
# saving some space at the end to add the truncation string
|
105
|
+
reason = reason.slice(0, FlowConstants::REASON_LIMIT - FlowConstants::TRUNCATED.size)
|
106
|
+
reason += FlowConstants::TRUNCATED
|
107
|
+
end
|
108
|
+
|
109
|
+
if converted_failure.to_s.size > FlowConstants::DETAILS_LIMIT
|
110
|
+
detail_limit = FlowConstants::DETAILS_LIMIT - (reason.size + FlowConstants::TRUNCATION_OVERHEAD)
|
111
|
+
# Get the exception details if the exception is from the flow family of
|
112
|
+
# exceptions
|
113
|
+
details = error.details if error.respond_to? :details
|
114
|
+
# If you don't have details, you must be some other type of
|
115
|
+
# exception. We can't do anything exceedingly clever, so lets just get
|
116
|
+
# the stack trace and pop that out.
|
117
|
+
details ||= error.backtrace.join unless error.backtrace.nil?
|
118
|
+
details ||= ""
|
119
|
+
|
120
|
+
# If the exception was indeed a flow family of exceptions, then details
|
121
|
+
# inside would most likely be another exception. Instead of digging for
|
122
|
+
# more exceptions inside this one, let's just get all the information
|
123
|
+
# from this class and put it in a string so that we can truncate and
|
124
|
+
# serialize it.
|
125
|
+
if details.is_a? Exception
|
126
|
+
details = "exception.class=#{details.class}|exception.message=#{details.message}|exception.backtrace=#{details.backtrace}"
|
127
|
+
if details.respond_to? :details
|
128
|
+
details += "|exception.details=#{details.details}"
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
# truncate the details if needed and add truncation string at the end
|
133
|
+
if details.size > detail_limit
|
134
|
+
# saving some space at the end to add the truncation string
|
135
|
+
details = details.slice(0, detail_limit - FlowConstants::TRUNCATED.size)
|
136
|
+
details += FlowConstants::TRUNCATED
|
137
|
+
end
|
138
|
+
|
139
|
+
# Here we generate a new exception with the reason and details that we
|
140
|
+
# got above. We are using the 'exception' factory method instead of
|
141
|
+
# initializing it directly because Flow Exceptions' constructors are not
|
142
|
+
# uniform and could require 2..4 arguments. Whereas a regular ruby
|
143
|
+
# exception only requires 0..1. Other custom exceptions could require
|
144
|
+
# arbitrary number of arguments.
|
145
|
+
new_exception = error.exception(reason)
|
146
|
+
if new_exception.respond_to? :details
|
147
|
+
new_exception.details = details
|
148
|
+
else
|
149
|
+
new_exception.set_backtrace(details)
|
150
|
+
end
|
151
|
+
converted_failure = converter.dump(new_exception)
|
152
|
+
|
153
|
+
end
|
154
|
+
|
155
|
+
# Return back both - reason and exception so that the caller doesn't
|
156
|
+
# need to check whether this exception responds to :reason or not, i.e.
|
157
|
+
# whether this is a flow exception or a regular ruby exception
|
158
|
+
[reason, converted_failure]
|
159
|
+
|
160
|
+
end
|
161
|
+
|
76
162
|
|
77
163
|
# @api private
|
78
164
|
def self.interpret_block_for_options(option_class, block, use_defaults = false)
|
data/lib/aws/decider/version.rb
CHANGED
@@ -346,10 +346,10 @@ module AWS
|
|
346
346
|
client_options = Utilities::client_options_from_method_name(method_name, @options)
|
347
347
|
options = Utilities::merge_all_options(client_options, options)
|
348
348
|
|
349
|
-
@
|
349
|
+
@data_converter = options[:data_converter]
|
350
350
|
# Basically, we want to avoid the special "NoInput, but allow stuff like nil in"
|
351
351
|
if ! (input.class <= NoInput || input.empty?)
|
352
|
-
options[:input] = @
|
352
|
+
options[:input] = @data_converter.dump input
|
353
353
|
end
|
354
354
|
if @workflow_class.nil?
|
355
355
|
execution_method = @options.execution_method
|
@@ -50,15 +50,32 @@ module AWS
|
|
50
50
|
method_output.set(@instance.send(@workflow_method, *ruby_input))
|
51
51
|
end
|
52
52
|
end
|
53
|
-
t.rescue(Exception) do |
|
54
|
-
|
55
|
-
#
|
53
|
+
t.rescue(Exception) do |e|
|
54
|
+
|
55
|
+
# Check if serialized exception violates the 32k limit and truncate it
|
56
|
+
reason, converted_failure = AWS::Flow::Utilities::check_and_truncate_exception(e, @converter)
|
57
|
+
|
58
|
+
# Wrap the exception that we got into a WorkflowException so that it
|
59
|
+
# can be handled correctly.
|
60
|
+
|
61
|
+
@failure = WorkflowException.new(reason, converted_failure)
|
56
62
|
end
|
57
63
|
t.ensure do
|
58
64
|
raise @failure if @failure
|
59
|
-
|
65
|
+
# We are going to have to convert this object into a string to submit it,
|
66
|
+
# and that's where the 32k limit will be enforced, so it's valid to turn
|
67
|
+
# the object to a string and check the size of the result
|
68
|
+
output = @converter.dump method_output.get
|
69
|
+
|
70
|
+
if output.to_s.size > FlowConstants::DATA_LIMIT
|
71
|
+
raise WorkflowException.new(
|
72
|
+
Utilities.validation_error_string_partial("Workflow"),
|
73
|
+
""
|
74
|
+
)
|
75
|
+
end
|
76
|
+
result.set(output)
|
77
|
+
end
|
60
78
|
end
|
61
|
-
end
|
62
79
|
return result
|
63
80
|
end
|
64
81
|
|
data/lib/aws/flow/flow_utils.rb
CHANGED
data/lib/aws/runner.rb
CHANGED
@@ -194,7 +194,7 @@ module AWS
|
|
194
194
|
task_list = expand_task_list(w['task_list'])
|
195
195
|
|
196
196
|
# create a worker
|
197
|
-
worker = ActivityWorker.new(swf.client, domain, task_list, *w['activities']) {{
|
197
|
+
worker = ActivityWorker.new(swf.client, domain, task_list, *w['activities']) {{ execution_workers: fork_count }}
|
198
198
|
add_implementations(worker, w, {config_key: 'activity_classes',
|
199
199
|
clazz: AWS::Flow::Activities})
|
200
200
|
|
@@ -189,12 +189,11 @@ describe Activities do
|
|
189
189
|
|
190
190
|
activity :run_activity1, :run_activity2, :run_activity3, :run_activity4 do
|
191
191
|
{
|
192
|
-
default_task_heartbeat_timeout: 60,
|
193
192
|
version: "1.0",
|
194
193
|
default_task_list: "large_activity_task_list",
|
195
|
-
default_task_schedule_to_close_timeout:
|
196
|
-
default_task_schedule_to_start_timeout:
|
197
|
-
default_task_start_to_close_timeout:
|
194
|
+
default_task_schedule_to_close_timeout: 60,
|
195
|
+
default_task_schedule_to_start_timeout: 30,
|
196
|
+
default_task_start_to_close_timeout: 30,
|
198
197
|
exponential_retry: {
|
199
198
|
retries_per_exception: {
|
200
199
|
ActivityTaskTimedOutException => Float::INFINITY,
|
@@ -191,29 +191,288 @@ describe "RubyFlowDecider" do
|
|
191
191
|
@my_workflow_client = workflow_client(@domain.client, @domain) { { from_class: @workflow_class } }
|
192
192
|
end
|
193
193
|
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
194
|
+
describe "Workflow/Activity return values/exceptions" do
|
195
|
+
it "ensures that an activity returning more than 32k data fails the activity" do
|
196
|
+
general_test(:task_list => "ActivityTaskLargeOutput", :class_name => "ActivityTaskLargeOutput")
|
197
|
+
@activity_class.class_eval do
|
198
|
+
def run_activity1
|
199
|
+
# Make sure we return something that's over 32k. Note this won't
|
200
|
+
# necessarily work with all converters, as it's pretty trivially
|
201
|
+
# compressible
|
202
|
+
return ":" + "a" * 33000
|
203
|
+
end
|
202
204
|
end
|
205
|
+
workflow_execution = @my_workflow_client.start_execution
|
206
|
+
@worker.run_once
|
207
|
+
@activity_worker.run_once
|
208
|
+
@worker.run_once
|
209
|
+
wait_for_execution(workflow_execution)
|
210
|
+
history_events = workflow_execution.events.map(&:event_type)
|
211
|
+
# Previously, it would time out, as the failure would include the original
|
212
|
+
# large output that killed the completion and failure call. Thus, we need to
|
213
|
+
# check that we fail the ActivityTask.
|
214
|
+
history_events.should include "ActivityTaskFailed"
|
215
|
+
|
216
|
+
workflow_execution.events.to_a.last.attributes.details.should_not =~ /Psych/
|
217
|
+
workflow_execution.events.to_a.last.attributes.reason.should == Utilities.validation_error_string("Activity")
|
218
|
+
history_events.last.should == "WorkflowExecutionFailed"
|
219
|
+
end
|
220
|
+
|
221
|
+
it "ensures that an activity returning an exception of size more than 32k fails the activity correctly and truncates the message" do
|
222
|
+
general_test(:task_list => "ActivityTaskExceptionLargeOutput", :class_name => "ActivityTaskExceptionLargeOutput")
|
223
|
+
@activity_class.class_eval do
|
224
|
+
def run_activity1
|
225
|
+
raise ":" + "a" * 33000
|
226
|
+
end
|
227
|
+
end
|
228
|
+
workflow_execution = @my_workflow_client.start_execution
|
229
|
+
@worker.run_once
|
230
|
+
@activity_worker.run_once
|
231
|
+
@worker.run_once
|
232
|
+
wait_for_execution(workflow_execution)
|
233
|
+
history_events = workflow_execution.events.map(&:event_type)
|
234
|
+
# Previously, it would time out, as the failure would include the original
|
235
|
+
# large output that killed the completion and failure call. Thus, we need to
|
236
|
+
# check that we fail the ActivityTask.
|
237
|
+
history_events.should include "ActivityTaskFailed"
|
238
|
+
|
239
|
+
workflow_execution.events.to_a.last.attributes.details.should_not =~ /Psych/
|
240
|
+
history_events.last.should == "WorkflowExecutionFailed"
|
241
|
+
workflow_execution.events.to_a.last.attributes.reason.should include("[TRUNCATED]")
|
242
|
+
details = workflow_execution.events.to_a.last.attributes.details
|
243
|
+
exception = FlowConstants.default_data_converter.load(details)
|
244
|
+
exception.class.should == AWS::Flow::ActivityTaskFailedException
|
245
|
+
end
|
246
|
+
|
247
|
+
it "ensures that an activity returning a Cancellation Exception of size more than 32k fails the activity" do
|
248
|
+
general_test(:task_list => "ActivityTaskCancellationExceptionLargeOutput", :class_name => "ActivityTaskCancellationExceptionLargeOutput")
|
249
|
+
@activity_class.class_eval do
|
250
|
+
def run_activity1
|
251
|
+
raise CancellationException.new("a" * 33000)
|
252
|
+
end
|
253
|
+
end
|
254
|
+
workflow_execution = @my_workflow_client.start_execution
|
255
|
+
@worker.run_once
|
256
|
+
@activity_worker.run_once
|
257
|
+
@worker.run_once
|
258
|
+
wait_for_execution(workflow_execution)
|
259
|
+
history_events = workflow_execution.events.map(&:event_type)
|
260
|
+
history_events.should include "ActivityTaskFailed"
|
261
|
+
|
262
|
+
history_events.last.should == "WorkflowExecutionFailed"
|
263
|
+
event = workflow_execution.events.to_a.select { |x| x.event_type == "ActivityTaskFailed"}
|
264
|
+
event.first.attributes.reason.should == Utilities.validation_error_string("Activity")
|
265
|
+
event.first.attributes.details.should == "AWS::SimpleWorkflow::Errors::ValidationException"
|
266
|
+
end
|
267
|
+
|
268
|
+
it "ensures that a workflow output > 32k fails the workflow" do
|
269
|
+
general_test(:task_list => "WorkflowOutputTooLarge", :class_name => "WorkflowOutputTooLarge")
|
270
|
+
@workflow_class.class_eval do
|
271
|
+
def entry_point
|
272
|
+
return ":" + "a" * 33000
|
273
|
+
end
|
274
|
+
end
|
275
|
+
workflow_execution = @my_workflow_client.start_execution
|
276
|
+
@worker.run_once
|
277
|
+
wait_for_execution(workflow_execution)
|
278
|
+
last_event = workflow_execution.events.to_a.last
|
279
|
+
last_event.event_type.should == "WorkflowExecutionFailed"
|
280
|
+
last_event.attributes.reason.should == Utilities.validation_error_string_partial("Workflow")
|
281
|
+
end
|
282
|
+
|
283
|
+
it "ensures that a workflow exception details > 32k fails the workflow correctly and truncates the details" do
|
284
|
+
general_test(:task_list => "WorkflowExceptionDetailsTooLarge", :class_name => "WorkflowExceptionDetailsTooLarge")
|
285
|
+
@workflow_class.class_eval do
|
286
|
+
def entry_point
|
287
|
+
e = RuntimeError.new("a")
|
288
|
+
e.set_backtrace("a"*25769)
|
289
|
+
raise e
|
290
|
+
end
|
291
|
+
end
|
292
|
+
workflow_execution = @my_workflow_client.start_execution
|
293
|
+
@worker.run_once
|
294
|
+
wait_for_execution(workflow_execution)
|
295
|
+
last_event = workflow_execution.events.to_a.last
|
296
|
+
last_event.event_type.should == "WorkflowExecutionFailed"
|
297
|
+
details = workflow_execution.events.to_a.last.attributes.details
|
298
|
+
exception = FlowConstants.default_data_converter.load(details)
|
299
|
+
exception.class.should == RuntimeError
|
300
|
+
exception.backtrace.first.should include ("[TRUNCATED]")
|
301
|
+
end
|
302
|
+
|
303
|
+
it "ensures that a workflow exception message > 256 characters fails the workflow correctly and truncates the message" do
|
304
|
+
general_test(:task_list => "WorkflowExceptionMessageTooLarge", :class_name => "WorkflowExceptionMessageTooLarge")
|
305
|
+
@workflow_class.class_eval do
|
306
|
+
def entry_point
|
307
|
+
raise "a" * 257
|
308
|
+
end
|
309
|
+
end
|
310
|
+
workflow_execution = @my_workflow_client.start_execution
|
311
|
+
@worker.run_once
|
312
|
+
wait_for_execution(workflow_execution)
|
313
|
+
last_event = workflow_execution.events.to_a.last
|
314
|
+
last_event.event_type.should == "WorkflowExecutionFailed"
|
315
|
+
workflow_execution.events.to_a.last.attributes.reason.should include("[TRUNCATED]")
|
316
|
+
details = workflow_execution.events.to_a.last.attributes.details
|
317
|
+
exception = FlowConstants.default_data_converter.load(details)
|
318
|
+
exception.class.should == RuntimeError
|
319
|
+
end
|
320
|
+
|
321
|
+
|
322
|
+
it "ensures that a respond_decision_task_completed call with response > 32k that we can't truncate fails the workflow correctly" do
|
323
|
+
class CustomException < FlowException
|
324
|
+
def initialize(reason, details)
|
325
|
+
@something = "a"*50000
|
326
|
+
super(reason, details)
|
327
|
+
end
|
328
|
+
end
|
329
|
+
general_test(:task_list => "CustomWorkflowExceptionTooLarge", :class_name => "CustomWorkflowExceptionTooLarge")
|
330
|
+
@workflow_class.class_eval do
|
331
|
+
def entry_point
|
332
|
+
raise CustomException.new("asdf", "sdf")
|
333
|
+
end
|
334
|
+
end
|
335
|
+
workflow_execution = @my_workflow_client.start_execution
|
336
|
+
@worker.run_once
|
337
|
+
wait_for_execution(workflow_execution)
|
338
|
+
last_event = workflow_execution.events.to_a.last
|
339
|
+
last_event.event_type.should == "WorkflowExecutionFailed"
|
340
|
+
workflow_execution.events.to_a.last.attributes.reason.should == Utilities.validation_error_string("Workflow")
|
341
|
+
end
|
342
|
+
|
343
|
+
it "ensures that an activity input > 32k data fails the workflow" do
|
344
|
+
general_test(:task_list => "ActivityTaskLargeInput", :class_name => "ActivityTaskLargeInput")
|
345
|
+
@workflow_class.class_eval do
|
346
|
+
def entry_point
|
347
|
+
activity.run_activity1("A"*50000)
|
348
|
+
end
|
349
|
+
end
|
350
|
+
workflow_execution = @my_workflow_client.start_execution
|
351
|
+
worker = WorkflowWorker.new(@domain.client, @domain, "ActivityTaskLargeInput", @workflow_class)
|
352
|
+
worker.register
|
353
|
+
worker.run_once
|
354
|
+
wait_for_execution(workflow_execution)
|
355
|
+
last_event = workflow_execution.events.to_a.last
|
356
|
+
last_event.event_type.should == "WorkflowExecutionFailed"
|
357
|
+
last_event.attributes.reason.should == Utilities.validation_error_string("Workflow")
|
358
|
+
last_event.attributes.details.should == "AWS::SimpleWorkflow::Errors::ValidationException"
|
359
|
+
end
|
360
|
+
|
361
|
+
|
362
|
+
it "ensures that a child workflow input > 32k fails the workflow" do
|
363
|
+
general_test(:task_list => "ChildWorkflowInputTooLarge", :class_name => "ChildWorkflowInputTooLarge")
|
364
|
+
@workflow_class.class_eval do
|
365
|
+
workflow(:child) do
|
366
|
+
{
|
367
|
+
version: "1.0",
|
368
|
+
default_execution_start_to_close_timeout: 300,
|
369
|
+
default_task_list: "ChildWorkflowInputTooLarge",
|
370
|
+
prefix_name: "ChildWorkflowInputTooLargeWorkflow"
|
371
|
+
}
|
372
|
+
end
|
373
|
+
def entry_point
|
374
|
+
child_client = AWS::Flow::workflow_client(nil, nil) { { from_class: "ChildWorkflowInputTooLargeWorkflow" } }
|
375
|
+
child_client.child("A"*50000)
|
376
|
+
end
|
377
|
+
def child(input); end
|
378
|
+
end
|
379
|
+
|
380
|
+
worker = WorkflowWorker.new(@domain.client, @domain, "ChildWorkflowInputTooLarge", @workflow_class)
|
381
|
+
worker.register
|
382
|
+
workflow_execution = @my_workflow_client.start_execution
|
383
|
+
worker.run_once
|
384
|
+
|
385
|
+
wait_for_execution(workflow_execution)
|
386
|
+
last_event = workflow_execution.events.to_a.last
|
387
|
+
last_event.event_type.should == "WorkflowExecutionFailed"
|
388
|
+
workflow_execution.events.to_a.last.attributes.reason.should == Utilities.validation_error_string("Workflow")
|
389
|
+
workflow_execution.events.to_a.last.attributes.details.should == "AWS::SimpleWorkflow::Errors::ValidationException"
|
390
|
+
end
|
391
|
+
|
392
|
+
|
393
|
+
|
394
|
+
it "ensures that a child workflow exception > 32k fails the workflow correctly and truncates the stacktrace" do
|
395
|
+
general_test(:task_list => "ChildWorkflowExceptionTooLarge", :class_name => "ChildWorkflowExceptionTooLarge")
|
396
|
+
@workflow_class.class_eval do
|
397
|
+
workflow(:child) do
|
398
|
+
{
|
399
|
+
version: "1.0",
|
400
|
+
default_execution_start_to_close_timeout: 300,
|
401
|
+
default_task_list: "ChildWorkflowExceptionTooLarge",
|
402
|
+
prefix_name: "ChildWorkflowExceptionTooLargeWorkflow"
|
403
|
+
}
|
404
|
+
end
|
405
|
+
def entry_point
|
406
|
+
child_client = AWS::Flow::workflow_client(nil, nil) { { from_class: "ChildWorkflowExceptionTooLargeWorkflow" } }
|
407
|
+
child_client.child
|
408
|
+
end
|
409
|
+
def child
|
410
|
+
raise ":" + "a" * 33000
|
411
|
+
end
|
412
|
+
end
|
413
|
+
|
414
|
+
worker = WorkflowWorker.new(@domain.client, @domain, "ChildWorkflowExceptionTooLarge", @workflow_class)
|
415
|
+
worker.register
|
416
|
+
workflow_execution = @my_workflow_client.start_execution
|
417
|
+
worker.run_once
|
418
|
+
worker.run_once
|
419
|
+
worker.run_once
|
420
|
+
worker.run_once
|
421
|
+
|
422
|
+
wait_for_execution(workflow_execution)
|
423
|
+
last_event = workflow_execution.events.to_a.last
|
424
|
+
last_event.event_type.should == "WorkflowExecutionFailed"
|
425
|
+
workflow_execution.events.to_a.last.attributes.reason.should include("[TRUNCATED]")
|
426
|
+
details = workflow_execution.events.to_a.last.attributes.details
|
427
|
+
exception = FlowConstants.default_data_converter.load(details)
|
428
|
+
exception.class.should == AWS::Flow::ChildWorkflowFailedException
|
429
|
+
exception.cause.class.should == RuntimeError
|
430
|
+
end
|
431
|
+
|
432
|
+
|
433
|
+
it "ensures that a child child workflow exception > 32k fails the workflow correctly and truncates the stacktrace" do
|
434
|
+
general_test(:task_list => "ChildChildWorkflowExceptionTooLarge", :class_name => "ChildChildWorkflowExceptionTooLarge")
|
435
|
+
@workflow_class.class_eval do
|
436
|
+
workflow(:child, :child_1) do
|
437
|
+
{
|
438
|
+
version: "1.0",
|
439
|
+
default_execution_start_to_close_timeout: 300,
|
440
|
+
default_task_list: "ChildChildWorkflowExceptionTooLarge",
|
441
|
+
prefix_name: "ChildChildWorkflowExceptionTooLargeWorkflow"
|
442
|
+
}
|
443
|
+
end
|
444
|
+
def entry_point
|
445
|
+
child_client = AWS::Flow::workflow_client(nil, nil) { { from_class: "ChildChildWorkflowExceptionTooLargeWorkflow" } }
|
446
|
+
child_client.child
|
447
|
+
end
|
448
|
+
def child
|
449
|
+
child_1_client = AWS::Flow::workflow_client(nil, nil) { { from_class: "ChildChildWorkflowExceptionTooLargeWorkflow" } }
|
450
|
+
child_1_client.child_1
|
451
|
+
end
|
452
|
+
def child_1
|
453
|
+
raise ":" + "a" * 33000
|
454
|
+
end
|
455
|
+
end
|
456
|
+
worker = WorkflowWorker.new(@domain.client, @domain, "ChildChildWorkflowExceptionTooLarge", @workflow_class)
|
457
|
+
worker.register
|
458
|
+
workflow_execution = @my_workflow_client.start_execution
|
459
|
+
worker.run_once
|
460
|
+
worker.run_once
|
461
|
+
worker.run_once
|
462
|
+
worker.run_once
|
463
|
+
worker.run_once
|
464
|
+
worker.run_once
|
465
|
+
worker.run_once
|
466
|
+
|
467
|
+
wait_for_execution(workflow_execution)
|
468
|
+
last_event = workflow_execution.events.to_a.last
|
469
|
+
last_event.event_type.should == "WorkflowExecutionFailed"
|
470
|
+
workflow_execution.events.to_a.last.attributes.reason.should include("[TRUNCATED]")
|
471
|
+
details = workflow_execution.events.to_a.last.attributes.details
|
472
|
+
exception = FlowConstants.default_data_converter.load(details)
|
473
|
+
exception.class.should == AWS::Flow::ChildWorkflowFailedException
|
474
|
+
exception.cause.class.should == AWS::Flow::ChildWorkflowFailedException
|
203
475
|
end
|
204
|
-
workflow_execution = @my_workflow_client.start_execution
|
205
|
-
@worker.run_once
|
206
|
-
@activity_worker.run_once
|
207
|
-
@worker.run_once
|
208
|
-
wait_for_execution(workflow_execution)
|
209
|
-
history_events = workflow_execution.events.map(&:event_type)
|
210
|
-
# Previously, it would time out, as the failure would include the original
|
211
|
-
# large output that killed the completion and failure call. Thus, we need to
|
212
|
-
# check that we fail the ActivityTask.
|
213
|
-
history_events.should include "ActivityTaskFailed"
|
214
|
-
|
215
|
-
workflow_execution.events.to_a.last.attributes.details.should_not =~ /Psych/
|
216
|
-
history_events.last.should == "WorkflowExecutionFailed"
|
217
476
|
end
|
218
477
|
|
219
478
|
it "ensures that activities can be processed with different configurations" do
|
@@ -508,7 +767,7 @@ describe "RubyFlowDecider" do
|
|
508
767
|
def entry_point
|
509
768
|
domain = get_test_domain
|
510
769
|
wf = AWS::Flow.workflow_client(domain.client, domain) { { from_class: "FooBar" } }
|
511
|
-
wf.start_execution
|
770
|
+
wf.start_execution
|
512
771
|
end
|
513
772
|
end
|
514
773
|
workflow_execution = @my_workflow_client.start_execution
|
@@ -517,6 +776,7 @@ describe "RubyFlowDecider" do
|
|
517
776
|
@worker.run_once
|
518
777
|
child_worker.run_once
|
519
778
|
@worker.run_once
|
779
|
+
@worker.run_once
|
520
780
|
wait_for_execution(workflow_execution)
|
521
781
|
workflow_execution.events.map(&:event_type).last.should == "WorkflowExecutionFailed"
|
522
782
|
# Make sure this is actually caused by a child workflow failed
|
@@ -732,16 +992,17 @@ describe "RubyFlowDecider" do
|
|
732
992
|
general_test(:task_list => "exponential_retry_key", :class_name => "ExponentialRetryKey")
|
733
993
|
@workflow_class.class_eval do
|
734
994
|
def entry_point
|
735
|
-
activity.
|
995
|
+
activity.run_activity1 do
|
736
996
|
{
|
737
997
|
:exponential_retry => {:maximum_attempts => 1},
|
738
|
-
:
|
739
|
-
|
740
|
-
|
998
|
+
:schedule_to_start_timeout => 1
|
999
|
+
}
|
1000
|
+
end
|
741
1001
|
end
|
742
1002
|
end
|
1003
|
+
worker = WorkflowWorker.new(@domain.client, @domain, "exponential_retry_key", @workflow_class)
|
743
1004
|
workflow_execution = @my_workflow_client.start_execution
|
744
|
-
4.times {
|
1005
|
+
4.times { worker.run_once }
|
745
1006
|
wait_for_execution(workflow_execution)
|
746
1007
|
workflow_execution.events.to_a.last.event_type.should == "WorkflowExecutionFailed"
|
747
1008
|
end
|
@@ -1232,6 +1232,7 @@ describe "FakeHistory" do
|
|
1232
1232
|
#workflow_execution = my_workflow.start_execution
|
1233
1233
|
swf_client.trace.first[:decisions].first[:decision_type].should == "CompleteWorkflowExecution"
|
1234
1234
|
end
|
1235
|
+
|
1235
1236
|
end
|
1236
1237
|
|
1237
1238
|
describe "Misc tests" do
|
@@ -1283,3 +1284,258 @@ describe "Misc tests" do
|
|
1283
1284
|
end
|
1284
1285
|
end
|
1285
1286
|
|
1287
|
+
describe "Workflow/Activity return values/exceptions" do
|
1288
|
+
it "ensures that a workflow exception message > 32k fails the workflow correctly and truncates the message" do
|
1289
|
+
|
1290
|
+
class WorkflowOutputTooLarge
|
1291
|
+
extend Workflows
|
1292
|
+
workflow(:entry_point) do
|
1293
|
+
{
|
1294
|
+
version: "1.0",
|
1295
|
+
default_execution_start_to_close_timeout: 600,
|
1296
|
+
}
|
1297
|
+
end
|
1298
|
+
|
1299
|
+
def entry_point
|
1300
|
+
raise "a"*33000
|
1301
|
+
end
|
1302
|
+
end
|
1303
|
+
|
1304
|
+
class SynchronousWorkflowTaskPoller < WorkflowTaskPoller
|
1305
|
+
def get_decision_task
|
1306
|
+
TestHistoryWrapper.new($workflow_type, FakeWorkflowExecution.new(nil, nil),
|
1307
|
+
FakeEvents.new(["WorkflowExecutionStarted",
|
1308
|
+
"DecisionTaskScheduled",
|
1309
|
+
"DecisionTaskStarted",
|
1310
|
+
]))
|
1311
|
+
end
|
1312
|
+
end
|
1313
|
+
|
1314
|
+
$workflow_type = FakeWorkflowType.new(nil, "WorkflowOutputTooLarge.entry_point", "1.0")
|
1315
|
+
swf_client = FakeServiceClient.new
|
1316
|
+
domain = FakeDomain.new($workflow_type)
|
1317
|
+
client = AWS::Flow::workflow_client(swf_client, domain) { { from_class: "WorkflowOutputTooLarge" } }
|
1318
|
+
|
1319
|
+
task_list = "WorkflowsOutputTooLarge"
|
1320
|
+
|
1321
|
+
client.start_execution
|
1322
|
+
worker = SynchronousWorkflowWorker.new(swf_client, domain, task_list, WorkflowOutputTooLarge)
|
1323
|
+
worker.start
|
1324
|
+
|
1325
|
+
swf_client.trace.first[:decisions].first[:decision_type].should == "FailWorkflowExecution"
|
1326
|
+
|
1327
|
+
reason = swf_client.trace.first[:decisions].first[:fail_workflow_execution_decision_attributes][:reason]
|
1328
|
+
details = swf_client.trace.first[:decisions].first[:fail_workflow_execution_decision_attributes][:details]
|
1329
|
+
reason.should include("TRUNCATED")
|
1330
|
+
exception = FlowConstants.default_data_converter.load(details)
|
1331
|
+
exception.class.should == RuntimeError
|
1332
|
+
exception.message.should == "a"*245+"[TRUNCATED]"
|
1333
|
+
end
|
1334
|
+
|
1335
|
+
it "ensures that a workflow backtrace > 32k fails the workflow correctly and truncates the backtrace" do
|
1336
|
+
|
1337
|
+
class WorkflowOutputTooLarge
|
1338
|
+
extend Workflows
|
1339
|
+
workflow(:entry_point) do
|
1340
|
+
{
|
1341
|
+
version: "1.0",
|
1342
|
+
default_execution_start_to_close_timeout: 600,
|
1343
|
+
}
|
1344
|
+
end
|
1345
|
+
|
1346
|
+
def entry_point
|
1347
|
+
a = StandardError.new("SIMULATION")
|
1348
|
+
a.set_backtrace("a"*33000)
|
1349
|
+
raise a
|
1350
|
+
end
|
1351
|
+
end
|
1352
|
+
|
1353
|
+
class SynchronousWorkflowTaskPoller < WorkflowTaskPoller
|
1354
|
+
def get_decision_task
|
1355
|
+
TestHistoryWrapper.new($workflow_type, FakeWorkflowExecution.new(nil, nil),
|
1356
|
+
FakeEvents.new(["WorkflowExecutionStarted",
|
1357
|
+
"DecisionTaskScheduled",
|
1358
|
+
"DecisionTaskStarted",
|
1359
|
+
]))
|
1360
|
+
end
|
1361
|
+
end
|
1362
|
+
|
1363
|
+
$workflow_type = FakeWorkflowType.new(nil, "WorkflowOutputTooLarge.entry_point", "1.0")
|
1364
|
+
swf_client = FakeServiceClient.new
|
1365
|
+
domain = FakeDomain.new($workflow_type)
|
1366
|
+
client = AWS::Flow::workflow_client(swf_client, domain) { { from_class: "WorkflowOutputTooLarge" } }
|
1367
|
+
|
1368
|
+
task_list = "WorkflowsOutputTooLarge"
|
1369
|
+
|
1370
|
+
client.start_execution
|
1371
|
+
worker = SynchronousWorkflowWorker.new(swf_client, domain, task_list, WorkflowOutputTooLarge)
|
1372
|
+
worker.start
|
1373
|
+
|
1374
|
+
reason = swf_client.trace.first[:decisions].first[:fail_workflow_execution_decision_attributes][:reason]
|
1375
|
+
details = swf_client.trace.first[:decisions].first[:fail_workflow_execution_decision_attributes][:details]
|
1376
|
+
exception = FlowConstants.default_data_converter.load(details)
|
1377
|
+
exception.class.should == StandardError
|
1378
|
+
exception.message.should == "SIMULATION"
|
1379
|
+
exception.backtrace.first.should include("[TRUNCATED]")
|
1380
|
+
swf_client.trace.first[:decisions].first[:decision_type].should == "FailWorkflowExecution"
|
1381
|
+
end
|
1382
|
+
|
1383
|
+
it "ensures that a workflow output > 32k fails the workflow correctly" do
|
1384
|
+
|
1385
|
+
class WorkflowOutputTooLarge
|
1386
|
+
extend Workflows
|
1387
|
+
workflow(:entry_point) do
|
1388
|
+
{
|
1389
|
+
version: "1.0",
|
1390
|
+
default_execution_start_to_close_timeout: 600,
|
1391
|
+
}
|
1392
|
+
end
|
1393
|
+
|
1394
|
+
def entry_point
|
1395
|
+
return "a"*33000
|
1396
|
+
end
|
1397
|
+
end
|
1398
|
+
|
1399
|
+
class SynchronousWorkflowTaskPoller < WorkflowTaskPoller
|
1400
|
+
def get_decision_task
|
1401
|
+
TestHistoryWrapper.new($workflow_type, FakeWorkflowExecution.new(nil, nil),
|
1402
|
+
FakeEvents.new(["WorkflowExecutionStarted",
|
1403
|
+
"DecisionTaskScheduled",
|
1404
|
+
"DecisionTaskStarted",
|
1405
|
+
]))
|
1406
|
+
end
|
1407
|
+
end
|
1408
|
+
|
1409
|
+
$workflow_type = FakeWorkflowType.new(nil, "WorkflowOutputTooLarge.entry_point", "1.0")
|
1410
|
+
swf_client = FakeServiceClient.new
|
1411
|
+
domain = FakeDomain.new($workflow_type)
|
1412
|
+
client = AWS::Flow::workflow_client(swf_client, domain) { { from_class: "WorkflowOutputTooLarge" } }
|
1413
|
+
|
1414
|
+
task_list = "WorkflowsOutputTooLarge"
|
1415
|
+
|
1416
|
+
client.start_execution
|
1417
|
+
worker = SynchronousWorkflowWorker.new(swf_client, domain, task_list, WorkflowOutputTooLarge)
|
1418
|
+
worker.start
|
1419
|
+
swf_client.trace.first[:decisions].first[:decision_type].should == "FailWorkflowExecution"
|
1420
|
+
end
|
1421
|
+
|
1422
|
+
|
1423
|
+
it "ensures that child workflows returning exceptions > 32k get wrapped correctly" do
|
1424
|
+
|
1425
|
+
class ChildWorkflowOutputTooLargeTestWorkflow
|
1426
|
+
extend Workflows
|
1427
|
+
workflow(:entry_point, :child) do
|
1428
|
+
{
|
1429
|
+
version: "1.0",
|
1430
|
+
default_execution_start_to_close_timeout: 600,
|
1431
|
+
}
|
1432
|
+
end
|
1433
|
+
|
1434
|
+
def entry_point
|
1435
|
+
$my_workflow_client.child { { workflow_id: "child_workflow_test" }}
|
1436
|
+
end
|
1437
|
+
def child; end
|
1438
|
+
end
|
1439
|
+
class SynchronousWorkflowTaskPoller < WorkflowTaskPoller
|
1440
|
+
def get_decision_task
|
1441
|
+
fake_workflow_type = FakeWorkflowType.new(nil, "ChildWorkflowOutputTooLargeTestWorkflow.entry_point", "1.0")
|
1442
|
+
TestHistoryWrapper.new(fake_workflow_type, FakeWorkflowExecution.new(nil, nil),
|
1443
|
+
FakeEvents.new(["WorkflowExecutionStarted",
|
1444
|
+
"DecisionTaskScheduled",
|
1445
|
+
"DecisionTaskStarted",
|
1446
|
+
"DecisionTaskCompleted",
|
1447
|
+
["StartChildWorkflowExecutionInitiated", {:workflow_id => "child_workflow_test"}],
|
1448
|
+
["ChildWorkflowExecutionStarted", {:workflow_execution => FakeWorkflowExecution.new("1", "child_workflow_test"), :workflow_id => "child_workflow_test"}],
|
1449
|
+
"DecisionTaskScheduled",
|
1450
|
+
"DecisionTaskStarted",
|
1451
|
+
["ChildWorkflowExecutionFailed", {:workflow_execution => FakeWorkflowExecution.new("1", "child_workflow_test"), :workflow_id => "child_workflow_test", :workflow_type => fake_workflow_type, :reason => "a"*245+"[TRUNCATED]", :details => "SIMULATED"}],
|
1452
|
+
"DecisionTaskScheduled",
|
1453
|
+
"DecisionTaskStarted",
|
1454
|
+
]))
|
1455
|
+
end
|
1456
|
+
end
|
1457
|
+
workflow_type = FakeWorkflowType.new(nil, "ChildWorkflowOutputTooLargeTestWorkflow.entry_point", "1")
|
1458
|
+
|
1459
|
+
domain = FakeDomain.new(workflow_type)
|
1460
|
+
swf_client = FakeServiceClient.new
|
1461
|
+
$my_workflow_client = workflow_client(swf_client, domain) { { from_class: "ChildWorkflowOutputTooLargeTestWorkflow" } }
|
1462
|
+
|
1463
|
+
task_list = "ChildWorkflowOutputTooLargeTestWorkflow"
|
1464
|
+
|
1465
|
+
$my_workflow_client.start_execution
|
1466
|
+
worker = SynchronousWorkflowWorker.new(swf_client, domain, task_list, ChildWorkflowOutputTooLargeTestWorkflow)
|
1467
|
+
worker.start
|
1468
|
+
swf_client.trace.first[:decisions].first[:decision_type].should == "FailWorkflowExecution"
|
1469
|
+
reason = swf_client.trace.first[:decisions].first[:fail_workflow_execution_decision_attributes][:reason]
|
1470
|
+
details = swf_client.trace.first[:decisions].first[:fail_workflow_execution_decision_attributes][:details]
|
1471
|
+
reason.should include("TRUNCATED")
|
1472
|
+
exception = FlowConstants.default_data_converter.load(details)
|
1473
|
+
exception.class.should == AWS::Flow::ChildWorkflowFailedException
|
1474
|
+
exception.reason.should == "a"*245+"[TRUNCATED]"
|
1475
|
+
exception.details.should == "SIMULATED"
|
1476
|
+
end
|
1477
|
+
|
1478
|
+
it "ensures that activities returning exceptions > 32k get wrapped correctly" do
|
1479
|
+
|
1480
|
+
class ActivityExceptionTooLargeActivity
|
1481
|
+
extend Activities
|
1482
|
+
activity(:activity_a) do
|
1483
|
+
{
|
1484
|
+
version: "1.0"
|
1485
|
+
}
|
1486
|
+
end
|
1487
|
+
def activity_a; end
|
1488
|
+
end
|
1489
|
+
class ActivityExceptionTooLargeTestWorkflow
|
1490
|
+
extend Workflows
|
1491
|
+
workflow(:entry_point) do
|
1492
|
+
{
|
1493
|
+
version: "1.0",
|
1494
|
+
default_execution_start_to_close_timeout: 600,
|
1495
|
+
}
|
1496
|
+
end
|
1497
|
+
|
1498
|
+
activity_client(:client) { { from_class: "ActivityExceptionTooLargeActivity" } }
|
1499
|
+
def entry_point
|
1500
|
+
client.activity_a
|
1501
|
+
end
|
1502
|
+
end
|
1503
|
+
class SynchronousWorkflowTaskPoller < WorkflowTaskPoller
|
1504
|
+
def get_decision_task
|
1505
|
+
fake_workflow_type = FakeWorkflowType.new(nil, "ActivityExceptionTooLargeTestWorkflow.entry_point", "1.0")
|
1506
|
+
TestHistoryWrapper.new(fake_workflow_type, FakeWorkflowExecution.new(nil, nil),
|
1507
|
+
FakeEvents.new(["WorkflowExecutionStarted",
|
1508
|
+
"DecisionTaskScheduled",
|
1509
|
+
"DecisionTaskStarted",
|
1510
|
+
"DecisionTaskCompleted",
|
1511
|
+
["ActivityTaskScheduled", {:activity_id => "Activity1"}],
|
1512
|
+
"ActivityTaskStarted",
|
1513
|
+
["ActivityTaskFailed", scheduled_event_id: 5, activity_id: "Activity1", reason: "a"*245+"[TRUNCATED]", details: "SIMULATED"],
|
1514
|
+
"DecisionTaskScheduled",
|
1515
|
+
"DecisionTaskStarted",
|
1516
|
+
]))
|
1517
|
+
end
|
1518
|
+
end
|
1519
|
+
workflow_type = FakeWorkflowType.new(nil, "ActivityExceptionTooLargeTestWorkflow.entry_point", "1")
|
1520
|
+
|
1521
|
+
domain = FakeDomain.new(workflow_type)
|
1522
|
+
swf_client = FakeServiceClient.new
|
1523
|
+
$my_workflow_client = workflow_client(swf_client, domain) { { from_class: "ActivityExceptionTooLargeTestWorkflow" } }
|
1524
|
+
|
1525
|
+
task_list = "ActivityExceptionTooLargeTestWorkflow"
|
1526
|
+
|
1527
|
+
$my_workflow_client.start_execution
|
1528
|
+
worker = SynchronousWorkflowWorker.new(swf_client, domain, task_list, ActivityExceptionTooLargeTestWorkflow)
|
1529
|
+
worker.start
|
1530
|
+
swf_client.trace.first[:decisions].first[:decision_type].should == "FailWorkflowExecution"
|
1531
|
+
reason = swf_client.trace.first[:decisions].first[:fail_workflow_execution_decision_attributes][:reason]
|
1532
|
+
details = swf_client.trace.first[:decisions].first[:fail_workflow_execution_decision_attributes][:details]
|
1533
|
+
reason.should include("TRUNCATED")
|
1534
|
+
exception = FlowConstants.default_data_converter.load(details)
|
1535
|
+
exception.class.should == AWS::Flow::ActivityTaskFailedException
|
1536
|
+
exception.reason.should == "a"*245+"[TRUNCATED]"
|
1537
|
+
exception.details.should == "SIMULATED"
|
1538
|
+
end
|
1539
|
+
|
1540
|
+
end
|
1541
|
+
|
@@ -75,6 +75,29 @@ describe WorkflowClient do
|
|
75
75
|
client.workflow_b
|
76
76
|
end
|
77
77
|
|
78
|
+
it "ensures workflow client uses user supplied data_converter" do
|
79
|
+
class FooWorkflow
|
80
|
+
extend AWS::Flow::Workflows
|
81
|
+
workflow :foo_workflow do
|
82
|
+
{ version: "1.0" }
|
83
|
+
end
|
84
|
+
end
|
85
|
+
class FooDataConverter; end
|
86
|
+
|
87
|
+
swf = double(AWS::SimpleWorkflow)
|
88
|
+
domain = double(AWS::SimpleWorkflow::Domain)
|
89
|
+
|
90
|
+
swf.stub(:start_workflow_execution).and_return({"runId" => "111"})
|
91
|
+
domain.stub(:name)
|
92
|
+
array = []
|
93
|
+
domain.stub(:workflow_executions).and_return(array)
|
94
|
+
array.stub(:at)
|
95
|
+
|
96
|
+
client = AWS::Flow::workflow_client(swf, domain) { { from_class: "WorkflowClientTestWorkflow" } }
|
97
|
+
expect_any_instance_of(FooDataConverter).to receive(:dump)
|
98
|
+
client.start_execution(:foo_workflow, "some_input") { { data_converter: FooDataConverter.new } }
|
99
|
+
end
|
100
|
+
|
78
101
|
end
|
79
102
|
|
80
103
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: aws-flow
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.0
|
4
|
+
version: 2.1.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Michael Steger, Paritosh Mohan, Jacques Thomas
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2014-
|
11
|
+
date: 2014-10-30 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: aws-sdk-v1
|