aws-flow 1.0.4 → 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/Rakefile CHANGED
@@ -20,9 +20,15 @@ Spec::Rake::SpecTask.new(:unit_tests) do |t|
20
20
  t.libs << 'lib'
21
21
  t.spec_opts = ['--color', '--format nested']
22
22
  t.spec_files = FileList['test/**/*.rb']
23
- t.spec_files.delete_if {|x| x =~ /.*factories.rb/ || x =~ /.*spec_helper.rb/ || x =~ /.*integration.*/}
23
+ t.spec_files.delete_if do |x|
24
+ (x =~ /.*factories.rb/) ||
25
+ (x =~ /.*spec_helper.rb/) ||
26
+ (x =~ /.*preinclude_tests.rb/) ||
27
+ (x =~ /.*integration.*/)
28
+ end
24
29
  t.spec_files.unshift("test/aws/factories.rb")
25
30
  t.spec_files.unshift("test/aws/spec_helper.rb")
31
+ t.spec_files.unshift("test/aws/preinclude_tests.rb")
26
32
  end
27
33
  task :test => :unit_tests
28
34
 
@@ -32,8 +38,13 @@ Spec::Rake::SpecTask.new do |t|
32
38
  #t.ruby_opts = ['-rspec/test/unit'] # Add this line in if you're using Test::Unit instead of RSpec
33
39
  t.spec_opts = ['--color', '--format nested']
34
40
  t.spec_files = FileList['test/**/*.rb']
35
- t.spec_files.delete_if {|x| x =~ /.*factories.rb/ || x =~ /.*spec_helper.rb/}
41
+ t.spec_files.delete_if do |x|
42
+ (x =~ /.*factories.rb/) ||
43
+ (x =~ /.*spec_helper.rb/) ||
44
+ (x =~ /.*preinclude_tests.rb/)
45
+ end
36
46
  t.spec_files.unshift("test/aws/factories.rb")
37
47
  t.spec_files.unshift("test/aws/spec_helper.rb")
48
+ t.spec_files.unshift("test/aws/preinclude_tests.rb")
38
49
  end
39
50
  task :integration_tests => :spec
@@ -170,8 +170,11 @@ module AWS
170
170
  activity_id = @decision_helper.get_activity_id(attributes[:scheduled_event_id])
171
171
  @decision_helper[activity_id].consume(:handle_completion_event)
172
172
  open_request_info = @decision_helper.scheduled_activities.delete(activity_id)
173
- reason = attributes[:reason]
174
- details = attributes[:details]
173
+ reason = attributes[:reason] if attributes.keys.include? :reason
174
+ reason ||= "The activity which failed did not provide a reason"
175
+ details = attributes[:details] if attributes.keys.include? :details
176
+ details ||= "The activity which failed did not provide details"
177
+
175
178
  # TODO consider adding user_context to open request, and adding it here
176
179
  # @decision_helper[@decision_helper.activity_scheduling_event_id_to_activity_id[event.attributes.scheduled_event_id]].attributes[:options].data_converter
177
180
  failure = ActivityTaskFailedException.new(event.id, activity_id, reason, details)
@@ -25,28 +25,42 @@ module AWS
25
25
  end
26
26
  def execute(command, options = nil)
27
27
  return schedule_with_retry(command, nil, Hash.new { |hash, key| hash[key] = 1 }, @clock.current_time, 0) if @return_on_start
28
- task do
29
- schedule_with_retry(command, nil, Hash.new { |hash, key| hash[key] = 1 }, @clock.current_time, 0)
28
+ output = Utilities::AddressableFuture.new
29
+ result_lock = Utilities::AddressableFuture.new
30
+ error_handler do |t|
31
+ t.begin do
32
+ output.set(schedule_with_retry(command, nil, Hash.new { |hash, key| hash[key] = 1 }, @clock.current_time, 0))
33
+ end
34
+ t.rescue(Exception) do |error|
35
+ @error_seen = error
36
+ end
37
+ t.ensure do
38
+ output.set unless output.set?
39
+ result_lock.set
40
+ end
30
41
  end
42
+ result_lock.get
43
+ raise @error_seen if @error_seen
44
+ output
31
45
  end
32
46
 
33
- def schedule_with_retry(command, failure, attempt, first_attempt_time, time_of_recorded_failure)
47
+ def schedule_with_retry(command, failure, attempts, first_attempt_time, time_of_recorded_failure)
34
48
  delay = -1
35
- if attempt.values.reduce(0, :+) > 1
49
+ if attempts.values.reduce(0, :+) > 1
36
50
  raise failure unless @retrying_policy.isRetryable(failure)
37
- delay = @retrying_policy.next_retry_delay_seconds(first_attempt_time, time_of_recorded_failure, attempt, failure, @execution_id)
51
+ delay = @retrying_policy.next_retry_delay_seconds(first_attempt_time, time_of_recorded_failure, attempts, failure, @execution_id)
38
52
  raise failure if delay < 0
39
53
  end
40
54
  if delay > 0
41
55
  task do
42
- @clock.create_timer(delay, lambda { invoke(command, attempt, first_attempt_time) })
56
+ @clock.create_timer(delay, lambda { invoke(command, attempts, first_attempt_time) })
43
57
  end
44
58
  else
45
- invoke(command, attempt, first_attempt_time)
59
+ invoke(command, attempts, first_attempt_time)
46
60
  end
47
61
  end
48
62
 
49
- def invoke(command, attempt, first_attempt_time)
63
+ def invoke(command, attempts, first_attempt_time)
50
64
  failure_to_retry = nil
51
65
  should_retry = Future.new
52
66
  return_value = Future.new
@@ -62,8 +76,8 @@ module AWS
62
76
  task do
63
77
  failure = should_retry.get
64
78
  if ! failure.nil?
65
- attempt[failure.class] += 1
66
- output.set(schedule_with_retry(command, failure, attempt, first_attempt_time, @clock.current_time - first_attempt_time))
79
+ attempts[failure.class] += 1
80
+ output.set(schedule_with_retry(command, failure, attempts, first_attempt_time, @clock.current_time - first_attempt_time))
67
81
  else
68
82
  output.set(return_value.get)
69
83
  end
@@ -94,6 +108,7 @@ module AWS
94
108
  @retries_per_exception = options.retries_per_exception
95
109
  @should_jitter = options.should_jitter
96
110
  @jitter_function = options.jitter_function
111
+ @options = options
97
112
  end
98
113
 
99
114
  # @param failure
@@ -125,29 +140,40 @@ module AWS
125
140
  #
126
141
  # @param time_of_recorded_failure
127
142
  #
128
- # @param attempt
143
+ # @param attempts
129
144
  #
130
145
  # @param failure
131
146
  #
132
- def next_retry_delay_seconds(first_attempt, time_of_recorded_failure, attempt, failure = nil, execution_id)
133
- if attempt.values.reduce(0, :+) < 2
134
- raise "This is bad, you have less than 2 attempts. More precisely, #{attempt} attempts"
147
+ def next_retry_delay_seconds(first_attempt, time_of_recorded_failure, attempts, failure = nil, execution_id)
148
+ if attempts.values.reduce(0, :+) < 2
149
+ raise "This is bad, you have less than 2 attempts. More precisely, #{attempts} attempts"
135
150
  end
136
151
  if @max_attempts && @max_attempts != "NONE"
137
- return -1 if attempt.values.reduce(0, :+) > @max_attempts + 1
152
+ return -1 if attempts.values.reduce(0, :+) > @max_attempts + 1
138
153
  end
139
154
  if failure && @retries_per_exception && @retries_per_exception.keys.include?(failure.class)
140
- return -1 if attempt[failure.class] > @retries_per_exception[failure.class]
155
+ return -1 if attempts[failure.class] > @retries_per_exception[failure.class]
141
156
  end
142
157
  return -1 if failure == nil
143
158
 
144
- # Check to see if we should jitter or not and pass in the jitter function to retry function accordingly.
145
- retry_seconds = @retry_function.call(first_attempt, time_of_recorded_failure, attempt)
159
+
160
+ # For reverse compatbility purposes, we must ensure that this function
161
+ # can take 3 arguments. However, we must also consume options in order
162
+ # for the default retry function to work correctly. Because we support
163
+ # ruby 1.9, we cannot use default arguments in a lambda, so we resort to
164
+ # the following workaround to supply a 4th argument if the function
165
+ # expects it.
166
+ call_args = [first_attempt, time_of_recorded_failure, attempts]
167
+ call_args << @options if @retry_function.arity == 4
168
+ retry_seconds = @retry_function.call(*call_args)
169
+ # Check to see if we should jitter or not and pass in the jitter
170
+ # function to retry function accordingly.
146
171
  if @should_jitter
147
172
  retry_seconds += @jitter_function.call(execution_id, retry_seconds/2)
148
173
  end
149
174
  return retry_seconds
150
175
  end
151
176
  end
177
+
152
178
  end
153
179
  end
@@ -111,8 +111,11 @@ module AWS
111
111
  :consume_symbol => :handle_completion_event,
112
112
  :decision_helper_scheduled => :scheduled_external_workflows,
113
113
  :handle_open_request => lambda do |event, open_request|
114
- reason = event.attributes.reason
115
- details = event.attributes.details
114
+ attributes = event.attributes
115
+ reason = attributes[:reason] if attributes.keys.include? :reason
116
+ reason ||= "The activity which failed did not provide a reason"
117
+ details = attributes[:details] if attributes.keys.include? :details
118
+ details ||= "The activity which failed did not provide details"
116
119
  # workflow_id = @decision_helper.child_initiated_event_id_to_workflow_id[event.attributes.initiated_event_id]
117
120
  # @decision_helper.scheduled_external_workflows[workflow_id]
118
121
  failure = ChildWorkflowFailedException.new(event.id, event.attributes[:workflow_execution], event.attributes.workflow_type, reason, details )
@@ -202,7 +205,7 @@ module AWS
202
205
  })
203
206
  end
204
207
 
205
- # Handler for the StartExternalWorkflowExecutionFailed event.
208
+ # Handler for the StartChildWorkflowExecutionFailed event.
206
209
  #
207
210
  # @param [Object] event The event instance.
208
211
  #
@@ -212,7 +215,7 @@ module AWS
212
215
  :consume_symbol => :handle_initiation_failed_event,
213
216
  :decision_helper_scheduled => :scheduled_external_workflows,
214
217
  :handle_open_request => lambda do |event, open_request|
215
- workflow_execution = AWS::SimpleWorkflow::WorkflowExecution.new("",event.attributes.workflow_id, event.attributes.run_id)
218
+ workflow_execution = AWS::SimpleWorkflow::WorkflowExecution.new("",event.attributes.workflow_id, nil)
216
219
  workflow_type = event.attributes.workflow_type
217
220
  cause = event.attributes.cause
218
221
  failure = StartChildWorkflowFailedException.new(event.id, workflow_execution, workflow_type, cause)
@@ -17,7 +17,7 @@ module AWS
17
17
  module Flow
18
18
 
19
19
 
20
- # Exception used to communicate failure during fulfillment of a decision sent to SWF. This exception and all its
20
+ # Exception used to communicate failure during fulfillment of a decision sent to SWF. This exception and all its
21
21
  # subclasses are expected to be thrown by the framework.
22
22
  class DecisionException < Exception
23
23
  attr_accessor :event_id
@@ -91,11 +91,19 @@ module AWS
91
91
  unless @pids.empty?
92
92
  @log.info "Exit requested, waiting up to #{timeout_seconds} seconds for child processes to finish"
93
93
 
94
- # check every second for child processes to finish
95
- timeout_seconds.times do
96
- sleep 1
97
- remove_completed_pids
98
- break if @pids.empty?
94
+ # If the timeout_seconds value is set to Float::INFINITY, it will wait indefinitely till all workers finish
95
+ # their work. This allows us to handle graceful shutdown of workers.
96
+ if timeout_seconds == Float::INFINITY
97
+ @log.info "Exit requested, waiting indefinitely till all child processes finish"
98
+ remove_completed_pids true while !@pids.empty?
99
+ else
100
+ @log.info "Exit requested, waiting up to #{timeout_seconds} seconds for child processes to finish"
101
+ # check every second for child processes to finish
102
+ timeout_seconds.times do
103
+ sleep 1
104
+ remove_completed_pids
105
+ break if @pids.empty?
106
+ end
99
107
  end
100
108
 
101
109
  # forcibly kill all remaining children
@@ -17,7 +17,7 @@ module AWS
17
17
  module Flow
18
18
  class FlowConstants
19
19
  class << self
20
- attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter
20
+ attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval
21
21
  # # The maximum exponential retry interval, in seconds. Use the value -1 (the default) to set <i>no maximum</i>.
22
22
  # attr_reader :exponential_retry_maximum_retry_interval_seconds
23
23
 
@@ -48,14 +48,24 @@ module AWS
48
48
  @should_jitter = true
49
49
  @exponential_retry_exceptions_to_exclude = []
50
50
  @exponential_retry_exceptions_to_include = [Exception]
51
- @exponential_retry_function = lambda do |first, time_of_failure, attempts|
51
+ @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|
52
+
52
53
  raise ArgumentError.new("first is not an instance of Time") unless first.instance_of?(Time)
53
54
  raise ArgumentError.new("time_of_failure can't be negative") if time_of_failure < 0
54
55
  raise ArgumentError.new("number of attempts can't be negative") if (attempts.values.find {|x| x < 0})
55
- result = @exponential_retry_initial_retry_interval * (@exponential_retry_backoff_coefficient ** (attempts.values.reduce(0, :+) - 2))
56
- result = @exponential_retry_maximum_retry_interval_seconds if @exponential_retry_maximum_retry_interval_seconds != INFINITY && result > @exponential_retry_maximum_retry_interval_seconds
56
+ raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
57
+ raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions
58
+
59
+ initial_retry_interval = options.initial_retry_interval
60
+ backoff_coefficient = options.backoff_coefficient
61
+ maximum_retry_interval_seconds = options.maximum_retry_interval_seconds
62
+ retry_expiration_interval_seconds = options.retry_expiration_interval_seconds
63
+ result = initial_retry_interval * (backoff_coefficient ** (attempts.values.reduce(0, :+) - 2))
64
+ result = maximum_retry_interval_seconds if (! maximum_retry_interval_seconds.nil? && maximum_retry_interval_seconds != INFINITY && result > maximum_retry_interval_seconds)
57
65
  seconds_since_first_attempt = time_of_failure.zero? ? 0 : -(first - time_of_failure).to_i
58
- result = -1 if @exponential_retry_retry_expiration_seconds != INFINITY && (result + seconds_since_first_attempt) >= @exponential_retry_retry_expiration_seconds
66
+ result = -1 if (! retry_expiration_interval_seconds.nil? &&
67
+ retry_expiration_interval_seconds != INFINITY &&
68
+ (result + seconds_since_first_attempt) >= retry_expiration_interval_seconds)
59
69
  return result.to_i
60
70
  end
61
71
 
@@ -139,7 +139,7 @@ module AWS
139
139
  # @!visibility private
140
140
  def _retry(method_name, retry_function, block, args = NoInput.new)
141
141
  bail_if_external
142
- retry_options = Utilities::interpret_block_for_options(RetryOptions, block)
142
+ retry_options = Utilities::interpret_block_for_options(ExponentialRetryOptions, block)
143
143
  _retry_with_options(lambda { self.send(method_name, *args) }, retry_function, retry_options)
144
144
  end
145
145
 
@@ -41,6 +41,7 @@ module AWS
41
41
  AWS::Flow.send(:workflow_client, service, domain, &block)
42
42
  end
43
43
 
44
+
44
45
  # Execute a block with retries within a workflow context.
45
46
  #
46
47
  # @param options
@@ -50,14 +51,22 @@ module AWS
50
51
  # The block to execute.
51
52
  #
52
53
  def with_retry(options = {}, &block)
54
+ # TODO raise a specific error instead of a runtime error
53
55
  raise "with_retry can only be used inside a workflow context!" if Utilities::is_external
54
- retry_options = RetryOptions.new(options)
56
+ retry_options = ExponentialRetryOptions.new(options)
55
57
  retry_policy = RetryPolicy.new(retry_options.retry_function, retry_options)
56
58
  async_retrying_executor = AsyncRetryingExecutor.new(retry_policy, self.decision_context.workflow_clock, retry_options.return_on_start)
57
59
  future = async_retrying_executor.execute(lambda { block.call })
58
60
  Utilities::drill_on_future(future) unless retry_options.return_on_start
59
61
  end
60
62
 
63
+ def decision_context
64
+ FlowFiber.current[:decision_context]
65
+ end
66
+
67
+ module_function :decision_context
68
+
69
+ module_function :with_retry
61
70
 
62
71
  # @!visibility private
63
72
  def self.workflow_client(service = nil, domain = nil, &block)
@@ -156,13 +156,16 @@ module AWS
156
156
  def retry_function; FlowConstants.exponential_retry_function; end
157
157
  def exceptions_to_include; FlowConstants.exponential_retry_exceptions_to_include; end
158
158
  def exceptions_to_exclude; FlowConstants.exponential_retry_exceptions_to_exclude; end
159
+ def backoff_coefficient; FlowConstants.exponential_retry_backoff_coefficient; end
159
160
  def should_jitter; FlowConstants.should_jitter; end
160
161
  def jitter_function; FlowConstants.jitter_function; end
162
+ def initial_retry_interval; FlowConstants.exponential_retry_initial_retry_interval; end
161
163
  end
162
164
 
163
165
  # Retry options used with {GenericClient#retry} and {ActivityClient#exponential_retry}
164
166
  class RetryOptions < Options
165
167
  property(:is_retryable_function, [])
168
+ property(:initial_retry_interval, [])
166
169
  property(:exceptions_to_allow, [])
167
170
  property(:maximum_attempts, [lambda {|x| x == "NONE" ? "NONE" : x.to_i}])
168
171
  property(:maximum_retry_interval_seconds, [lambda {|x| x == "NONE" ? "NONE" : x.to_i}])
@@ -238,13 +241,14 @@ module AWS
238
241
  # The backoff coefficient to use. This is a floating point value that is multiplied with the current retry
239
242
  # interval after every retry attempt. The default value is 2.0, which means that each retry will take twice as
240
243
  # long as the previous.
241
- attr_accessor :backoff_coefficient
244
+ default_classes << RetryDefaults.new
245
+ property(:backoff_coefficient, [lambda(&:to_i)])
242
246
 
243
247
  # The retry expiration interval, in seconds. This will be increased after every retry attempt by the factor
244
248
  # provided in +backoff_coefficient+.
245
- attr_accessor :retry_expiration_interval_seconds
249
+ property(:retry_expiration_interval_seconds, [lambda(&:to_i)])
246
250
 
247
- def next_retry_delay_seconds(first_attmept, recorded_failure, attempts)
251
+ def next_retry_delay_seconds(first_attempt, recorded_failure, attempts)
248
252
  raise IllegalArgumentException "Attempt number is #{attempts}, when it needs to be greater than 1"
249
253
  if @maximum_attempts
250
254
  end
@@ -139,10 +139,10 @@ module AWS
139
139
  [:created, :handle_decision_task_started_event, :decision_sent],
140
140
  [:decision_sent, :cancel, :cancelled_before_initiated],
141
141
  [:decision_sent, :handle_initiated_event, :initiated],
142
- [:decision_sent, :handle_initiation_failed_event, :decision_sent],
142
+ [:decision_sent, :handle_initiation_failed_event, :completed],
143
143
  [:initiated, :cancel, :cancelled_after_initiated],
144
- [:initiated, :handle_completion_event, :completed],
145
- [:started, :handle_decision_task_started_event, :started],
144
+ [:initiated, :handle_completion_event, :completed],
145
+ [:started, :handle_decision_task_started_event, :started],
146
146
  ]
147
147
  self_transitions(:handle_decision_task_started_event)
148
148
 
@@ -80,16 +80,14 @@ module AWS
80
80
  end
81
81
 
82
82
  class ActivityTaskPoller
83
- def initialize(service, domain, task_list, activity_definition_map, options=nil)
83
+ def initialize(service, domain, task_list, activity_definition_map, executor, options=nil)
84
84
  @service = service
85
85
  @domain = domain
86
86
  @task_list = task_list
87
87
  @activity_definition_map = activity_definition_map
88
88
  @logger = options.logger if options
89
89
  @logger ||= Utilities::LogFactory.make_logger(self, "debug")
90
- max_workers = options.execution_workers if options
91
- max_workers = 20 if (max_workers.nil? || max_workers.zero?)
92
- @executor = ForkingExecutor.new(:max_workers => max_workers, :logger => @logger)
90
+ @executor = executor
93
91
 
94
92
  end
95
93
 
@@ -16,7 +16,7 @@
16
16
  module AWS
17
17
  module Flow
18
18
  def self.version
19
- "1.0.4"
19
+ "1.0.5"
20
20
  end
21
21
  end
22
22
  end
@@ -230,7 +230,27 @@ module AWS
230
230
  @activity_definition_map = {}
231
231
  @activity_type_options = []
232
232
  @options = Utilities::interpret_block_for_options(WorkerOptions, block)
233
+ @logger = @options.logger if @options
234
+ @logger ||= Utilities::LogFactory.make_logger(self, "debug")
235
+ max_workers = @options.execution_workers if @options
236
+ max_workers = 20 if (max_workers.nil? || max_workers.zero?)
237
+ @executor = ForkingExecutor.new(:max_workers => max_workers, :logger => @logger)
233
238
  super(service, domain, task_list, *args)
239
+
240
+ @shutting_down = false
241
+ %w{ TERM INT }.each do |signal|
242
+ Signal.trap(signal) do
243
+ if @shutting_down
244
+ @executor.shutdown 0
245
+ Kernel.exit! 1
246
+ else
247
+ @shutting_down = true
248
+ @executor.shutdown Float::INFINITY
249
+ Kernel.exit
250
+ end
251
+ end
252
+ end
253
+
234
254
  end
235
255
 
236
256
  # Adds an Activity implementation to this ActivityWorker.
@@ -297,7 +317,7 @@ module AWS
297
317
  #
298
318
  def start(should_register = true)
299
319
  register if should_register
300
- poller = ActivityTaskPoller.new(@service, @domain, @task_list, @activity_definition_map, @options)
320
+ poller = ActivityTaskPoller.new(@service, @domain, @task_list, @activity_definition_map, @executor, @options)
301
321
  loop do
302
322
  run_once(false, poller)
303
323
  end
@@ -313,7 +333,7 @@ module AWS
313
333
  #
314
334
  def run_once(should_register = true, poller = nil)
315
335
  register if should_register
316
- poller = ActivityTaskPoller.new(@service, @domain, @task_list, @activity_definition_map, @options) if poller.nil?
336
+ poller = ActivityTaskPoller.new(@service, @domain, @task_list, @activity_definition_map, @executor, @options) if poller.nil?
317
337
  poller.poll_and_process_single_task(@options.use_forking)
318
338
  end
319
339
  end
@@ -82,6 +82,7 @@ module AWS
82
82
  converter = method_pair.data_converter
83
83
  method_name = method_pair.method_name
84
84
  error_handler do |t|
85
+ parameters = nil
85
86
  t.begin do
86
87
  if input.class <= NoInput
87
88
  @instance.send(method_name)
@@ -389,6 +389,17 @@ describe "FakeHistory" do
389
389
  @trace ||= []
390
390
  @trace << task_completed_request
391
391
  end
392
+ def start_workflow_execution(options)
393
+ @trace ||= []
394
+ @trace << options
395
+ {"runId" => "blah"}
396
+ end
397
+ def register_activity_type(options)
398
+ end
399
+ def register_workflow_type(options)
400
+ end
401
+ def respond_activity_task_completed(task_token, result)
402
+ end
392
403
  def start_workflow_execution(options)
393
404
  {"runId" => "blah"}
394
405
  end
@@ -1174,8 +1185,8 @@ describe "FakeHistory" do
1174
1185
  worker.start
1175
1186
  swf_client.trace.first[:decisions].first[:start_timer_decision_attributes][:start_to_fire_timeout].should == "5"
1176
1187
  end
1177
-
1178
1188
  end
1189
+
1179
1190
  describe "Misc tests" do
1180
1191
  it "makes sure that Workflows is equivalent to Decider" do
1181
1192
  class TestDecider
@@ -1216,31 +1227,124 @@ describe "Misc tests" do
1216
1227
  end
1217
1228
 
1218
1229
  describe FlowConstants do
1230
+ options = {
1231
+ :initial_retry_interval => 1,
1232
+ :backoff_coefficient => 2,
1233
+ :should_jitter => false,
1234
+ :maximum_retry_interval_seconds => 100
1235
+ }
1236
+ options = ExponentialRetryOptions.new(options)
1219
1237
 
1220
1238
  it "will test the default retry function with regular cases" do
1221
1239
  test_first = [Time.now, Time.now, Time.now]
1222
1240
  test_time_of_failure = [0, 10, 100]
1223
- test_attempts = [{}, {Exception=>1}, {ActivityTaskTimedOutException=>5, Exception=>2}]
1224
- test_output = [0, 1, 64]
1241
+ test_attempts = [{Exception=>2}, {Exception=>4}, {ActivityTaskTimedOutException=>5, Exception=>2}]
1242
+ test_output = [1, 4, 32]
1225
1243
  arr = test_first.zip(test_time_of_failure, test_attempts, test_output)
1226
1244
  arr.each do |first, time_of_failure, attempts, output|
1227
- result = FlowConstants.exponential_retry_function.call(first, time_of_failure, attempts)
1245
+ result = FlowConstants.exponential_retry_function.call(first, time_of_failure, attempts, options)
1228
1246
  (result == output).should == true
1229
1247
  end
1230
1248
  end
1231
1249
 
1232
1250
  it "will test for exceptions" do
1233
- expect { FlowConstants.exponential_retry_function.call(-1, 1, {}) }.to raise_error(ArgumentError, "first is not an instance of Time")
1234
- expect { FlowConstants.exponential_retry_function.call(Time.now, -1, {}) }.to raise_error(ArgumentError, "time_of_failure can't be negative")
1235
- expect { FlowConstants.exponential_retry_function.call(Time.now, 1, {Exception=>-1}) }.to raise_error(ArgumentError, "number of attempts can't be negative")
1236
- expect { FlowConstants.exponential_retry_function.call(Time.now, 1, {Exception=>-1, ActivityTaskTimedOutException=>-10}) }.to raise_error(ArgumentError, "number of attempts can't be negative")
1237
- expect { FlowConstants.exponential_retry_function.call(Time.now, 1, {Exception=>2, ActivityTaskTimedOutException=>-10}) }.to raise_error(ArgumentError, "number of attempts can't be negative")
1251
+ expect { FlowConstants.exponential_retry_function.call(-1, 1, {}, options) }.to raise_error(ArgumentError, "first is not an instance of Time")
1252
+ expect { FlowConstants.exponential_retry_function.call(Time.now, -1, {}, options) }.to raise_error(ArgumentError, "time_of_failure can't be negative")
1253
+ expect { FlowConstants.exponential_retry_function.call(Time.now, 1, {Exception=>-1}, options) }.to raise_error(ArgumentError, "number of attempts can't be negative")
1254
+ expect { FlowConstants.exponential_retry_function.call(Time.now, 1, {Exception=>-1, ActivityTaskTimedOutException=>-10}, options) }.to raise_error(ArgumentError, "number of attempts can't be negative")
1255
+ expect { FlowConstants.exponential_retry_function.call(Time.now, 1, {Exception=>2, ActivityTaskTimedOutException=>-10}, options) }.to raise_error(ArgumentError, "number of attempts can't be negative")
1238
1256
  end
1239
1257
 
1240
1258
  end
1241
1259
 
1260
+ class TestActivity
1261
+ extend Activity
1262
+
1263
+ activity :run_activity1 do |o|
1264
+ o.default_task_heartbeat_timeout = "3600"
1265
+ o.default_task_list = "activity_task_list"
1266
+ o.default_task_schedule_to_close_timeout = "3600"
1267
+ o.default_task_schedule_to_start_timeout = "3600"
1268
+ o.default_task_start_to_close_timeout = "3600"
1269
+ o.version = "1"
1270
+ end
1271
+ def run_activity1
1272
+ "first regular activity"
1273
+ end
1274
+ def run_activity2
1275
+ "second regular activity"
1276
+ end
1277
+ end
1278
+
1279
+ class TestActivityWorker < ActivityWorker
1280
+
1281
+ attr_accessor :executor
1282
+ def initialize(service, domain, task_list, forking_executor, *args, &block)
1283
+ super(service, domain, task_list, *args)
1284
+ @executor = forking_executor
1285
+ end
1286
+ end
1242
1287
 
1288
+ describe ActivityWorker do
1243
1289
 
1290
+ it "will test whether the ActivityWorker shuts down cleanly when an interrupt is received" do
1291
+
1292
+ task_list = "TestWorkflow_tasklist"
1293
+ service = FakeServiceClient.new
1294
+ workflow_type_object = double("workflow_type", :name => "TestWorkflow.entry_point", :start_execution => "" )
1295
+ domain = FakeDomain.new(workflow_type_object)
1296
+ forking_executor = ForkingExecutor.new
1297
+ activity_worker = TestActivityWorker.new(service, domain, task_list, forking_executor)
1298
+
1299
+ activity_worker.add_activities_implementation(TestActivity)
1300
+ # Starts the activity worker in a forked process. Also, attaches an at_exit handler to the process. When the process
1301
+ # exits, the handler checks whether the executor's internal is_shutdown variable is set correctly or not.
1302
+ pid = fork do
1303
+ at_exit {
1304
+ activity_worker.executor.is_shutdown.should == true
1305
+ }
1306
+ activity_worker.start true
1307
+ end
1308
+ # Adding a sleep to let things get setup correctly (not ideal but going with this for now)
1309
+ sleep 1
1310
+ # Send an interrupt to the child process
1311
+ Process.kill("INT", pid)
1312
+ status = Process.waitall
1313
+ status[0][1].success?.should be_true
1314
+ end
1315
+
1316
+ # This method will take a long time to run, allowing us to test our shutdown scenarios
1317
+ def dumb_fib(n)
1318
+ n < 1 ? 1 : dumb_fib(n - 1) + dumb_fib(n - 2)
1319
+ end
1320
+
1321
+ it "will test whether the ActivityWorker shuts down immediately if two or more interrupts are received" do
1322
+ task_list = "TestWorkflow_tasklist"
1323
+ service = FakeServiceClient.new
1324
+ workflow_type_object = double("workflow_type", :name => "TestWorkflow.entry_point", :start_execution => "" )
1325
+ domain = FakeDomain.new(workflow_type_object)
1326
+ forking_executor = ForkingExecutor.new
1327
+ activity_worker = TestActivityWorker.new(service, domain, task_list, forking_executor)
1328
+
1329
+ activity_worker.add_activities_implementation(TestActivity)
1330
+ # Starts the activity worker in a forked process. Also, executes a task using the forking executor of the activity
1331
+ # worker. The executor will create a child process to run that task. The task (dumb_fib) is purposefully designed to
1332
+ # be long running so that we can test our shutdown scenario.
1333
+ pid = fork do
1334
+ activity_worker.executor.execute {
1335
+ dumb_fib(1000)
1336
+ }
1337
+ activity_worker.start true
1338
+ end
1339
+ # Adding a sleep to let things get setup correctly (not idea but going with this for now)
1340
+ sleep 2
1341
+ # Send 2 interrupts to the child process
1342
+ 2.times { Process.kill("INT", pid); sleep 3 }
1343
+ status = Process.waitall
1344
+ status[0][1].success?.should be_false
1345
+ end
1346
+
1347
+ end
1244
1348
 
1245
1349
  describe "testing changing default values in RetryOptions and RetryPolicy" do
1246
1350
 
@@ -1340,4 +1444,20 @@ describe "testing changing default values in RetryOptions and RetryPolicy" do
1340
1444
  result = retry_policy.next_retry_delay_seconds(Time.now, 0, {Exception=>4}, Exception.new, 1)
1341
1445
  result.should == 10
1342
1446
  end
1447
+
1448
+ it "makes sure that the default retry function will use the user provided options" do
1449
+
1450
+ first = Time.now
1451
+ time_of_failure = 0
1452
+ attempts = {Exception=>2}
1453
+ options = {
1454
+ :initial_retry_interval => 10,
1455
+ :backoff_coefficient => 2,
1456
+ :should_jitter => false,
1457
+ :maximum_retry_interval_seconds => 5
1458
+ }
1459
+ options = ExponentialRetryOptions.new(options)
1460
+ result = FlowConstants.exponential_retry_function.call(first, time_of_failure, attempts, options)
1461
+ result.should == 5
1462
+ end
1343
1463
  end
@@ -510,6 +510,78 @@ describe "RubyFlowDecider" do
510
510
  @my_workflow_client = workflow_client(@swf.client, @domain) { {:from_class => @workflow_class} }
511
511
  end
512
512
 
513
+ it "ensures that not filling in details/reason for activity_task_failed is handled correctly" do
514
+ general_test(:task_list => "ActivityTaskFailedManually", :class_name => "ActivityTaskFailedManually")
515
+ $task_token = nil
516
+
517
+ @activity_class.class_eval do
518
+ activity :run_activityManual do
519
+ {
520
+ :default_task_heartbeat_timeout => "3600",
521
+ :default_task_list => task_list,
522
+ :task_schedule_to_start_timeout => 120,
523
+ :task_start_to_close_timeout => 120,
524
+ :version => "1",
525
+ :manual_completion => true
526
+ }
527
+ end
528
+ def run_activityManual
529
+ $task_token = activity_execution_context.task_token
530
+ end
531
+ end
532
+
533
+ @workflow_class.class_eval do
534
+ def entry_point
535
+ begin
536
+ activity.run_activityManual
537
+ rescue Exception => e
538
+ #pass
539
+ end
540
+ end
541
+ end
542
+
543
+ activity_worker = ActivityWorker.new(@swf.client, @domain, "ActivityTaskFailedManually", @activity_class) {{ :use_forking => false }}
544
+ activity_worker.register
545
+
546
+ workflow_execution = @my_workflow_client.start_execution
547
+ @worker.run_once
548
+ activity_worker.run_once
549
+
550
+ $swf.client.respond_activity_task_failed(:task_token => $task_token)
551
+
552
+ @worker.run_once
553
+ workflow_execution.events.map(&:event_type).last.should == "WorkflowExecutionCompleted"
554
+ end
555
+
556
+ it "ensures that raising inside a with_retry propagates up correctly" do
557
+ general_test(:task_list => "WithRetryPropagation", :class_name => "WithRetryPropagation")
558
+ @workflow_class.class_eval do
559
+ def entry_point
560
+ error = nil
561
+ begin
562
+ with_retry(:maximum_attempts => 1) { activity.run_activity1 }
563
+ rescue ActivityTaskFailedException => e
564
+ error = e
565
+ end
566
+ return error
567
+ end
568
+ end
569
+ @activity_class.class_eval do
570
+ def run_activity1
571
+ raise "Error!"
572
+ end
573
+ end
574
+ workflow_execution = @my_workflow_client.start_execution
575
+ @worker.run_once
576
+ @activity_worker.run_once
577
+ @worker.run_once
578
+ @worker.run_once
579
+ @activity_worker.run_once
580
+ @worker.run_once
581
+ workflow_execution.events.map(&:event_type).last.should == "WorkflowExecutionCompleted"
582
+ workflow_execution.events.to_a[-1].attributes.result.should =~ /Error!/
583
+ end
584
+
513
585
  it "ensures that backtraces are set correctly with yaml" do
514
586
  general_test(:task_list => "Backtrace_test", :class_name => "BacktraceTest")
515
587
  @workflow_class.class_eval do
@@ -538,114 +610,144 @@ describe "RubyFlowDecider" do
538
610
  # This also effectively tests "RequestCancelExternalWorkflowExecutionInitiated"
539
611
 
540
612
  # TODO: These three tests will sometimes fail, seemingly at random. We need to fix this.
541
- # it "ensures that handle_child_workflow_execution_canceled is correct" do
542
- # class OtherCancellationChildWorkflow
543
- # extend Workflows
544
- # workflow(:entry_point) { {:version => 1, :task_list => "new_child_workflow", :execution_start_to_close_timeout => 3600} }
545
- # def entry_point(arg)
546
- # create_timer(5)
547
- # end
548
- # end
549
- # class BadCancellationChildWorkflow
550
- # extend Workflows
551
- # workflow(:entry_point) { {:version => 1, :task_list => "new_parent_workflow", :execution_start_to_close_timeout => 3600} }
552
- # def other_entry_point
553
- # end
613
+ it "ensures that handle_child_workflow_execution_canceled is correct" do
614
+ class OtherCancellationChildWorkflow
615
+ extend Workflows
616
+ workflow(:entry_point) { {:version => 1, :task_list => "new_child_cancelled_workflow", :execution_start_to_close_timeout => 3600} }
617
+ def entry_point(arg)
618
+ create_timer(5)
619
+ end
620
+ end
621
+ class BadCancellationChildWorkflow
622
+ extend Workflows
623
+ workflow(:entry_point) { {:version => 1, :task_list => "new_parent_cancelled_workflow", :execution_start_to_close_timeout => 3600} }
554
624
 
555
- # def entry_point(arg)
556
- # client = workflow_client($swf.client, $domain) { {:from_class => "OtherCancellationChildWorkflow"} }
557
- # workflow_future = client.send_async(:start_execution, 5)
558
- # client.request_cancel_workflow_execution(workflow_future)
559
- # end
560
- # end
561
- # worker2 = WorkflowWorker.new(@swf.client, @domain, "new_child_workflow", OtherCancellationChildWorkflow)
562
- # worker2.register
563
- # worker = WorkflowWorker.new(@swf.client, @domain, "new_parent_workflow", BadCancellationChildWorkflow)
564
- # worker.register
565
- # client = workflow_client(@swf.client, @domain) { {:from_class => "BadCancellationChildWorkflow"} }
566
- # workflow_execution = client.entry_point(5)
567
-
568
- # worker.run_once
569
- # worker2.run_once
570
- # worker.run_once
571
- # workflow_execution.events.map(&:event_type).should include "ExternalWorkflowExecutionCancelRequested"
572
- # worker2.run_once
573
- # workflow_execution.events.map(&:event_type).should include "ChildWorkflowExecutionCanceled"
574
- # worker.run_once
575
- # workflow_execution.events.to_a.last.attributes.details.should =~ /AWS::Flow::Core::Cancellation/
576
- # end
625
+ def entry_point(arg)
626
+ client = workflow_client($swf.client, $domain) { {:from_class => "OtherCancellationChildWorkflow"} }
627
+ workflow_future = client.send_async(:start_execution, 5)
628
+ client.request_cancel_workflow_execution(workflow_future)
629
+ end
630
+ end
631
+ worker2 = WorkflowWorker.new(@swf.client, @domain, "new_child_cancelled_workflow", OtherCancellationChildWorkflow)
632
+ worker2.register
633
+ worker = WorkflowWorker.new(@swf.client, @domain, "new_parent_cancelled_workflow", BadCancellationChildWorkflow)
634
+ worker.register
635
+ client = workflow_client(@swf.client, @domain) { {:from_class => "BadCancellationChildWorkflow"} }
636
+ workflow_execution = client.entry_point(5)
577
637
 
578
- # it "ensures that handle_child_workflow_terminated is handled correctly" do
579
- # class OtherTerminationChildWorkflow
580
- # extend Workflows
581
- # workflow(:entry_point) { {:version => 1, :task_list => "new_child_workflow", :execution_start_to_close_timeout => 3600} }
638
+ worker.run_once
639
+ worker2.run_once
640
+ worker.run_once
641
+ workflow_execution.events.map(&:event_type).should include "ExternalWorkflowExecutionCancelRequested"
642
+ worker2.run_once
643
+ workflow_execution.events.map(&:event_type).should include "ChildWorkflowExecutionCanceled"
644
+ worker.run_once
645
+ workflow_execution.events.to_a.last.attributes.details.should =~ /AWS::Flow::Core::Cancellation/
646
+ end
582
647
 
583
- # def entry_point(arg)
584
- # create_timer(5)
585
- # end
648
+ it "ensures that handle_child_workflow_terminated is handled correctly" do
649
+ class OtherTerminationChildWorkflow
650
+ extend Workflows
651
+ workflow(:entry_point) { {:version => 1, :task_list => "new_child_terminated_workflow", :execution_start_to_close_timeout => 3600} }
586
652
 
587
- # end
588
- # $workflow_id = nil
589
- # class BadTerminationChildWorkflow
590
- # extend Workflows
591
- # workflow(:entry_point) { {:version => 1, :task_list => "new_parent_workflow", :execution_start_to_close_timeout => 3600} }
592
- # def other_entry_point
593
- # end
653
+ def entry_point(arg)
654
+ create_timer(5)
655
+ end
594
656
 
595
- # def entry_point(arg)
596
- # client = workflow_client($swf.client, $domain) { {:from_class => "OtherTerminationChildWorkflow"} }
597
- # workflow_future = client.send_async(:start_execution, 5)
598
- # $workflow_id = workflow_future.workflow_execution.workflow_id.get
599
- # end
600
- # end
601
- # worker2 = WorkflowWorker.new(@swf.client, @domain, "new_child_workflow", OtherTerminationChildWorkflow)
602
- # worker2.register
603
- # worker = WorkflowWorker.new(@swf.client, @domain, "new_parent_workflow", BadTerminationChildWorkflow)
604
- # worker.register
605
- # client = workflow_client(@swf.client, @domain) { {:from_class => "BadTerminationChildWorkflow"} }
606
- # workflow_execution = client.entry_point(5)
607
-
608
- # worker.run_once
609
- # worker2.run_once
610
- # $swf.client.terminate_workflow_execution({:workflow_id => $workflow_id, :domain => $domain.name})
611
- # worker.run_once
612
- # workflow_execution.events.to_a.last.attributes.details.should =~ /AWS::Flow::ChildWorkflowTerminatedException/
613
- # end
657
+ end
658
+ $workflow_id = nil
659
+ class BadTerminationChildWorkflow
660
+ extend Workflows
661
+ workflow(:entry_point) { {:version => 1, :task_list => "new_parent_terminated_workflow", :execution_start_to_close_timeout => 3600} }
662
+ def other_entry_point
663
+ end
614
664
 
615
- # it "ensures that handle_child_workflow_timed_out is handled correctly" do
616
- # class OtherTimedOutChildWorkflow
617
- # extend Workflows
618
- # workflow(:entry_point) { {:version => 1, :task_list => "new_child_workflow", :execution_start_to_close_timeout => 5} }
665
+ def entry_point(arg)
666
+ client = workflow_client($swf.client, $domain) { {:from_class => "OtherTerminationChildWorkflow"} }
667
+ workflow_future = client.send_async(:start_execution, 5)
668
+ $workflow_id = workflow_future.workflow_execution.workflow_id.get
669
+ end
670
+ end
671
+ worker2 = WorkflowWorker.new(@swf.client, @domain, "new_child_terminated_workflow", OtherTerminationChildWorkflow)
672
+ worker2.register
673
+ worker = WorkflowWorker.new(@swf.client, @domain, "new_parent_terminated_workflow", BadTerminationChildWorkflow)
674
+ worker.register
675
+ client = workflow_client(@swf.client, @domain) { {:from_class => "BadTerminationChildWorkflow"} }
676
+ workflow_execution = client.entry_point(5)
619
677
 
620
- # def entry_point(arg)
621
- # create_timer(5)
622
- # end
678
+ worker.run_once
679
+ worker2.run_once
680
+ $swf.client.terminate_workflow_execution({:workflow_id => $workflow_id, :domain => $domain.name})
681
+ worker.run_once
682
+ workflow_execution.events.to_a.last.attributes.details.should =~ /AWS::Flow::ChildWorkflowTerminatedException/
683
+ end
623
684
 
624
- # end
625
- # $workflow_id = nil
626
- # class BadTimedOutChildWorkflow
627
- # extend Workflows
628
- # workflow(:entry_point) { {:version => 1, :task_list => "new_parent_workflow", :execution_start_to_close_timeout => 3600} }
629
- # def other_entry_point
630
- # end
685
+ it "ensures that handle_child_workflow_timed_out is handled correctly" do
686
+ class OtherTimedOutChildWorkflow
687
+ extend Workflows
688
+ workflow(:entry_point) { {:version => 1, :task_list => "new_child_timed_out_workflow", :execution_start_to_close_timeout => 5} }
631
689
 
632
- # def entry_point(arg)
633
- # client = workflow_client($swf.client, $domain) { {:from_class => "OtherTimedOutChildWorkflow"} }
634
- # workflow_future = client.send_async(:start_execution, 5)
635
- # $workflow_id = workflow_future.workflow_execution.workflow_id.get
636
- # end
637
- # end
638
- # worker2 = WorkflowWorker.new(@swf.client, @domain, "new_child_workflow", OtherTimedOutChildWorkflow)
639
- # worker2.register
640
- # worker = WorkflowWorker.new(@swf.client, @domain, "new_parent_workflow", BadTimedOutChildWorkflow)
641
- # worker.register
642
- # client = workflow_client(@swf.client, @domain) { {:from_class => "BadTimedOutChildWorkflow"} }
643
- # workflow_execution = client.entry_point(5)
644
- # worker.run_once
645
- # sleep 8
646
- # worker.run_once
647
- # workflow_execution.events.to_a.last.attributes.details.should =~ /AWS::Flow::ChildWorkflowTimedOutException/
648
- # end
690
+ def entry_point(arg)
691
+ create_timer(5)
692
+ end
693
+
694
+ end
695
+ $workflow_id = nil
696
+ class BadTimedOutChildWorkflow
697
+ extend Workflows
698
+ workflow(:entry_point) { {:version => 1, :task_list => "new_parent_timed_out_workflow", :execution_start_to_close_timeout => 3600} }
699
+ def other_entry_point
700
+ end
701
+
702
+ def entry_point(arg)
703
+ client = workflow_client($swf.client, $domain) { {:from_class => "OtherTimedOutChildWorkflow"} }
704
+ workflow_future = client.send_async(:start_execution, 5)
705
+ $workflow_id = workflow_future.workflow_execution.workflow_id.get
706
+ end
707
+ end
708
+ worker2 = WorkflowWorker.new(@swf.client, @domain, "new_child_timed_out_workflow", OtherTimedOutChildWorkflow)
709
+ worker2.register
710
+ worker = WorkflowWorker.new(@swf.client, @domain, "new_parent_timed_out_workflow", BadTimedOutChildWorkflow)
711
+ worker.register
712
+ client = workflow_client(@swf.client, @domain) { {:from_class => "BadTimedOutChildWorkflow"} }
713
+ workflow_execution = client.entry_point(5)
714
+ worker.run_once
715
+ sleep 8
716
+ worker.run_once
717
+ workflow_execution.events.to_a.last.attributes.details.should =~ /AWS::Flow::ChildWorkflowTimedOutException/
718
+ end
719
+
720
+ it "ensures that handle_start_child_workflow_execution_failed is fine" do
721
+ general_test(:task_list => "handle_start_child_workflow_execution_failed", :class_name => "HandleStartChildWorkflowExecutionFailed")
722
+ class FooBar
723
+ extend Workflows
724
+ workflow :bad_workflow do
725
+ {
726
+ :version => "1",
727
+ :execution_start_to_close_timeout => 3600,
728
+ :task_list => "handle_start_child_workflow_execution_failed_child"
729
+ }
730
+ end
731
+ def bad_workflow
732
+ raise "Child workflow died"
733
+ end
734
+ end
735
+ @workflow_class.class_eval do
736
+ def entry_point
737
+ wf = AWS::Flow.workflow_client { { :prefix_name => "FooBar", :execution_method => 'bad_workflow', :version => "1", :execution_start_to_close_timeout => 3600, :task_list => "handle_start_child_workflow_execution_failed_child" } }
738
+ wf.start_execution("foo")
739
+ end
740
+ end
741
+ workflow_execution = @my_workflow_client.start_execution
742
+ child_worker = WorkflowWorker.new($swf.client, $domain, "handle_start_child_workflow_execution_failed_child", FooBar)
743
+ child_worker.register
744
+ @worker.run_once
745
+ child_worker.run_once
746
+ @worker.run_once
747
+ workflow_execution.events.map(&:event_type).last.should == "WorkflowExecutionFailed"
748
+ # Make sure this is actually caused by a child workflow failed
749
+ workflow_execution.events.to_a.last.attributes.details.should =~ /ChildWorkflowFailed/
750
+ end
649
751
 
650
752
  it "ensures that handle_timer_canceled is fine" do
651
753
  general_test(:task_list => "handle_timer_canceled", :class_name => "HandleTimerCanceled")
@@ -1057,7 +1159,8 @@ describe "RubyFlowDecider" do
1057
1159
  worker.run_once
1058
1160
  activity_worker.run_once
1059
1161
  worker.run_once
1060
- workflow_execution.events.to_a[6].attributes[:result].should =~ /1/
1162
+ activity_completed_index = workflow_execution.events.map(&:event_type).index("ActivityTaskCompleted")
1163
+ workflow_execution.events.to_a[activity_completed_index].attributes.result.should =~ /1\z/
1061
1164
  end
1062
1165
 
1063
1166
  it "makes sure that timers work" do
@@ -1316,8 +1419,10 @@ describe "RubyFlowDecider" do
1316
1419
  worker.run_once
1317
1420
  worker2.run_once
1318
1421
  worker.run_once
1422
+ # We have to find the index dynamically, because due to how scheduled/starts work, it isn't necessarily in the same place in our history.
1423
+ child_execution_completed_index = workflow_execution.events.map(&:event_type).index("ChildWorkflowExecutionCompleted")
1319
1424
 
1320
- workflow_execution.events.to_a[7].attributes.result.should =~ /1/
1425
+ workflow_execution.events.to_a[child_execution_completed_index].attributes.result.should =~ /1\z/
1321
1426
  end
1322
1427
 
1323
1428
  it "makes sure that the new way of doing child workflows works" do
@@ -2249,6 +2354,7 @@ describe "RubyFlowDecider" do
2249
2354
 
2250
2355
  worker.run_once
2251
2356
  internal_worker.run_once
2357
+
2252
2358
  # Make sure that we finish the execution and fail before reporting ack
2253
2359
  sleep 10
2254
2360
  worker.run_once
@@ -2587,7 +2693,7 @@ describe "RubyFlowDecider" do
2587
2693
  execution.events.map(&:event_type).last.should == "WorkflowExecutionCompleted"
2588
2694
  end
2589
2695
 
2590
- it "makes sure that workers don't error out on schedule_activity_task_failed" do
2696
+ it "makes sure that workflow errors out on schedule_activity_task_failed" do
2591
2697
  class BadActivityActivity
2592
2698
  extend Activity
2593
2699
  activity(:run_activity1) do
@@ -2608,7 +2714,7 @@ describe "RubyFlowDecider" do
2608
2714
  execution = client.with_opts(:task_list => "Foobarbaz").start_execution
2609
2715
  worker.run_once
2610
2716
  worker.run_once
2611
- execution.events.map(&:event_type).last.should == "DecisionTaskCompleted"
2717
+ execution.events.map(&:event_type).last.should == "WorkflowExecutionFailed"
2612
2718
  end
2613
2719
 
2614
2720
  it "makes sure that you can have arbitrary activity names with from_class" do
@@ -0,0 +1,148 @@
1
+
2
+ describe "will test a patch that makes sure with_retry and decision_context can be called without importing AWS::Flow module" do
3
+ before(:all) do
4
+ class FakeDomain
5
+ def initialize(workflow_type_object)
6
+ @workflow_type_object = workflow_type_object
7
+ end
8
+ def page; FakePage.new(@workflow_type_object); end
9
+ def workflow_executions; FakeWorkflowExecutionCollecton.new; end
10
+ def name; "fake_domain"; end
11
+ end
12
+
13
+ class FakeWorkflowExecutionCollecton
14
+ def at(workflow_id, run_id); "Workflow_execution"; end
15
+ end
16
+
17
+ # A fake service client used to mock out calls to the Simple Workflow Service
18
+ class FakeServiceClient
19
+ attr_accessor :trace
20
+ def respond_decision_task_completed(task_completed_request)
21
+ @trace ||= []
22
+ @trace << task_completed_request
23
+ end
24
+ def start_workflow_execution(options)
25
+ @trace ||= []
26
+ @trace << options
27
+ {"runId" => "blah"}
28
+ end
29
+ def register_workflow_type(options)
30
+ end
31
+ end
32
+
33
+ class SynchronousWorkflowWorker < AWS::Flow::WorkflowWorker
34
+ def start
35
+ poller = SynchronousWorkflowTaskPoller.new(@service, nil, AWS::Flow::DecisionTaskHandler.new(@workflow_definition_map), @task_list)
36
+ poller.poll_and_process_single_task
37
+ end
38
+ end
39
+
40
+ class FakeWorkflowType < AWS::Flow::WorkflowType
41
+ attr_accessor :domain, :name, :version
42
+ def initialize(domain, name, version)
43
+ @domain = domain
44
+ @name = name
45
+ @version = version
46
+ end
47
+ end
48
+
49
+ class TestHistoryWrapper
50
+ def initialize(workflow_type, events)
51
+ @workflow_type = workflow_type
52
+ @events = events
53
+ end
54
+ def workflow_execution
55
+ FakeWorkflowExecution.new
56
+ end
57
+ def task_token
58
+ "1"
59
+ end
60
+ def previous_started_event_id
61
+ 1
62
+ end
63
+ attr_reader :events, :workflow_type
64
+ end
65
+
66
+ class FakeWorkflowExecution
67
+ def run_id
68
+ "1"
69
+ end
70
+ end
71
+
72
+ class TestHistoryEvent < AWS::SimpleWorkflow::HistoryEvent
73
+ def initialize(event_type, event_id, attributes)
74
+ @event_type = event_type
75
+ @attributes = attributes
76
+ @event_id = event_id
77
+ @created_at = Time.now
78
+ end
79
+ end
80
+
81
+ class SynchronousWorkflowTaskPoller < AWS::Flow::WorkflowTaskPoller
82
+ def get_decision_tasks
83
+ workflow_type = FakeWorkflowType.new(nil, "TestWorkflow.entry_point", "1")
84
+ TestHistoryWrapper.new(workflow_type,
85
+ [TestHistoryEvent.new("WorkflowExecutionStarted", 1, {:parent_initiated_event_id=>0, :child_policy=>:request_cancel, :execution_start_to_close_timeout=>3600, :task_start_to_close_timeout=>5, :workflow_type=> workflow_type, :task_list=>"TestWorkflow_tasklist"}),
86
+ TestHistoryEvent.new("DecisionTaskScheduled", 2, {:parent_initiated_event_id=>0, :child_policy=>:request_cancel, :execution_start_to_close_timeout=>3600, :task_start_to_close_timeout=>5, :workflow_type=> workflow_type, :task_list=>"TestWorkflow_tastlist"}),
87
+ TestHistoryEvent.new("DecisionTaskStarted", 3, {:scheduled_event_id=>2, :identity=>"some_identity"}),
88
+ ])
89
+
90
+ end
91
+ end
92
+
93
+ end
94
+ it "makes sure that with_retry can be called without including AWS::Flow" do
95
+
96
+ class TestWorkflow
97
+ extend AWS::Flow::Workflows
98
+ workflow (:entry_point) { {:version => "1"} }
99
+
100
+ def entry_point
101
+ AWS::Flow::with_retry do
102
+ return "This is the entry point"
103
+ end
104
+ end
105
+ end
106
+
107
+ workflow_type_object = double("workflow_type", :name => "TestWorkflow.entry_point", :start_execution => "" )
108
+ domain = FakeDomain.new(workflow_type_object)
109
+ swf_client = FakeServiceClient.new
110
+ task_list = "TestWorkflow_tasklist"
111
+
112
+ workflow_worker = SynchronousWorkflowWorker.new(swf_client, domain, task_list)
113
+ workflow_worker.add_workflow_implementation(TestWorkflow)
114
+
115
+ workflow_client = AWS::Flow::WorkflowClient.new(swf_client, domain, TestWorkflow, AWS::Flow::StartWorkflowOptions.new)
116
+ workflow_client.start_execution
117
+
118
+ workflow_worker.start
119
+ end
120
+
121
+ it "makes sure that decision_context can be called without including AWS::Flow" do
122
+
123
+ class TestWorkflow
124
+ extend AWS::Flow::Workflows
125
+ workflow (:entry_point) { {:version => "1"} }
126
+
127
+ def entry_point
128
+ return AWS::Flow::decision_context.workflow_clock.current_time
129
+ end
130
+ end
131
+
132
+ workflow_type_object = double("workflow_type", :name => "TestWorkflow.entry_point", :start_execution => "" )
133
+ domain = FakeDomain.new(workflow_type_object)
134
+ swf_client = FakeServiceClient.new
135
+ task_list = "TestWorkflow_tasklist"
136
+
137
+ workflow_worker = SynchronousWorkflowWorker.new(swf_client, domain, task_list)
138
+ workflow_worker.add_workflow_implementation(TestWorkflow)
139
+
140
+ workflow_client = AWS::Flow::WorkflowClient.new(swf_client, domain, TestWorkflow, AWS::Flow::StartWorkflowOptions.new)
141
+ workflow_client.start_execution
142
+
143
+ workflow_worker.start
144
+ end
145
+
146
+ end
147
+
148
+
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-flow
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.4
4
+ version: 1.0.5
5
5
  prerelease:
6
6
  platform: ruby
7
7
  authors:
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2013-10-04 00:00:00.000000000 Z
12
+ date: 2013-11-01 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: aws-sdk
@@ -83,6 +83,7 @@ files:
83
83
  - test/aws/decider_spec.rb
84
84
  - test/aws/factories.rb
85
85
  - test/aws/integration_spec.rb
86
+ - test/aws/preinclude_tests.rb
86
87
  - test/aws/spec_helper.rb
87
88
  homepage:
88
89
  licenses: []