taskinator 0.0.15 → 0.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +77 -0
  3. data/Gemfile.lock +2 -1
  4. data/README.md +1 -1
  5. data/lib/taskinator/api.rb +3 -1
  6. data/lib/taskinator/create_process_worker.rb +60 -0
  7. data/lib/taskinator/definition.rb +47 -8
  8. data/lib/taskinator/job_worker.rb +2 -2
  9. data/lib/taskinator/logger.rb +13 -75
  10. data/lib/taskinator/persistence.rb +48 -40
  11. data/lib/taskinator/process.rb +13 -6
  12. data/lib/taskinator/process_worker.rb +3 -1
  13. data/lib/taskinator/queues/delayed_job.rb +12 -5
  14. data/lib/taskinator/queues/resque.rb +16 -5
  15. data/lib/taskinator/queues/sidekiq.rb +14 -5
  16. data/lib/taskinator/queues.rb +12 -0
  17. data/lib/taskinator/task.rb +21 -8
  18. data/lib/taskinator/task_worker.rb +3 -1
  19. data/lib/taskinator/tasks.rb +1 -1
  20. data/lib/taskinator/version.rb +1 -1
  21. data/lib/taskinator.rb +20 -1
  22. data/spec/examples/queue_adapter_examples.rb +10 -0
  23. data/spec/spec_helper.rb +11 -0
  24. data/spec/support/mock_definition.rb +22 -0
  25. data/spec/support/spec_support.rb +3 -1
  26. data/spec/support/test_definition.rb +3 -0
  27. data/spec/support/test_process.rb +2 -0
  28. data/spec/support/test_queue.rb +7 -1
  29. data/spec/support/test_task.rb +2 -0
  30. data/spec/taskinator/api_spec.rb +2 -2
  31. data/spec/taskinator/create_process_worker_spec.rb +44 -0
  32. data/spec/taskinator/definition/builder_spec.rb +5 -5
  33. data/spec/taskinator/definition_spec.rb +103 -6
  34. data/spec/taskinator/executor_spec.rb +1 -1
  35. data/spec/taskinator/job_worker_spec.rb +3 -3
  36. data/spec/taskinator/persistence_spec.rb +12 -19
  37. data/spec/taskinator/process_spec.rb +29 -5
  38. data/spec/taskinator/process_worker_spec.rb +3 -3
  39. data/spec/taskinator/queues/delayed_job_spec.rb +23 -6
  40. data/spec/taskinator/queues/resque_spec.rb +27 -6
  41. data/spec/taskinator/queues/sidekiq_spec.rb +28 -10
  42. data/spec/taskinator/task_spec.rb +80 -7
  43. data/spec/taskinator/task_worker_spec.rb +3 -3
  44. data/spec/taskinator/{intermodal_spec.rb → taskinator_spec.rb} +35 -1
  45. data/spec/taskinator/tasks_spec.rb +1 -2
  46. data/taskinator.gemspec +17 -15
  47. metadata +30 -4
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: bc6244f01294aaf18ec4f1236e7c77f1ed0ef79a
4
- data.tar.gz: ba1c09e29ee13118858d9af4e149002a94361018
3
+ metadata.gz: 1c4844721e58ee539e62099b0cf6a9181bad692b
4
+ data.tar.gz: 00d8e01dbf3b6170231946bbeb18856bde5a6e8d
5
5
  SHA512:
6
- metadata.gz: 4130f514ee353149642139cf6c1aac940850c697a42018d5204db7361dc9918f49d270ffb678fd9c99d9c9e53ffe6aea7598f522dd8d7620ec02e40ba58fead1
7
- data.tar.gz: 79f411acfc01f1da54bd639f323c93c442312ee374de99b3228c67ebdb4ca0cd8f6b6cb295c329ef54493b3ba2f0793e3a2dc709044be284ed0f1e1c7712a746
6
+ metadata.gz: e2b21e17464032c8e59cbe5909ba1121b83e9cb77623f0314352563e416f10056b11e36e30717a3091ffa725d5f4ae6f5cc7f337bedf50b7cc41e606721ca672
7
+ data.tar.gz: 9e342b0f899929985f87c86e063b503a310227adec593e6506a591ac8acc7ffc3cebd81b0159b091264f2da33574d842c05721a2d3c55c43759ec463ad6484e9
data/CHANGELOG.md ADDED
@@ -0,0 +1,77 @@
1
+ v0.0.16 - 25 Jun 2015
2
+ ---
3
+ Added ability to enqueue the creation of processes; added a new worker, `CreateProcessWorker`
4
+ Added support for instrumentation
5
+ Improvements to error handling
6
+ Bug fix for the persistence of the `queue` attribute for `Process` and `Task`
7
+ Code clean up and additional specs added
8
+
9
+ v0.0.15 - 28 May 2015
10
+ ---
11
+ Added ability to specify the queue to use when enqueing processes, tasks and jobs
12
+ Improvements to specs for testing with sidekiq; added `rspec-sidekiq` as development dependency
13
+ Gem dependencies updated as per Gemnasium advisory
14
+
15
+ v0.0.14 - 12 May 2015
16
+ ---
17
+ Bug fix for fail! methods
18
+ Bug fix to parameter handling by for_each method
19
+
20
+ v0.0.13 - 11 May 2015
21
+ ---
22
+ Bug fix to `Taskinator::Api` for listing of processes; should only include top-level processes
23
+ Gem dependencies updated as per Gemnasium advisory
24
+
25
+ v0.0.12 - 20 Apr 2015
26
+ ---
27
+ Gem dependencies updated as per Gemnasium advisory
28
+
29
+ v0.0.11 - 2 Mar 2015
30
+ ---
31
+ Gem dependencies updated as per Gemnasium advisory
32
+
33
+ v0.0.10 - 26 Feb 2015
34
+ ---
35
+ Documentation updates
36
+
37
+ v0.0.9 - 19 Dec 2014
38
+ ---
39
+ Various bug fixes
40
+ Added error logging
41
+ Workflow states now include `complete` event
42
+ Gem dependencies updated as per Gemnasium advisory
43
+
44
+ v0.0.8 - 11 Nov 2014
45
+ ---
46
+ Added support for argument chaining with `for_each` and `transform`
47
+ Documentation updates
48
+ Gem dependencies updated as per Gemnasium advisory
49
+
50
+ v0.0.7 - 16 Oct 2014
51
+ ---
52
+ Added better option handling; introduced `option?(key)` method
53
+ Added support for definining the expected arguments for a process
54
+ Gem dependencies updated as per Gemnasium advisory
55
+
56
+ v0.0.5 - 17 Sep 2014
57
+ ---
58
+ Various of bug fixes
59
+ Improved error handling
60
+ Added logging for queuing of processes, tasks and jobs
61
+
62
+ v0.0.4 - 12 Sep 2014
63
+ ---
64
+ Improvements to serialization; make use of GlobalID functionality
65
+ Added support for "job" tasks; reusing existing workers as tasks
66
+
67
+ v0.0.3 - 2 Sep 2014
68
+ ---
69
+ Added failure steps to workflow of processes and tasks
70
+
71
+ v0.0.2 - 12 Aug 2014
72
+ ---
73
+ Refactored how tasks are defined in definitions
74
+
75
+ v0.0.1 - 12 Aug 2014
76
+ ---
77
+ Initial release
data/Gemfile.lock CHANGED
@@ -8,7 +8,7 @@ GIT
8
8
  PATH
9
9
  remote: .
10
10
  specs:
11
- taskinator (0.0.15)
11
+ taskinator (0.0.16)
12
12
  connection_pool (>= 2.2.0)
13
13
  json (>= 1.8.2)
14
14
  redis (>= 3.2.1)
@@ -134,6 +134,7 @@ PLATFORMS
134
134
  ruby
135
135
 
136
136
  DEPENDENCIES
137
+ activesupport (>= 4.0.0)
137
138
  bundler (>= 1.6.0)
138
139
  coveralls (>= 0.7.0)
139
140
  delayed_job (>= 4.0.0)
data/README.md CHANGED
@@ -543,7 +543,7 @@ In reality, each task is executed by a worker process, possibly on another host,
543
543
  To monitor the state of the processes, use the `Taskinator::Api::Processes` class. This is still a work in progress.
544
544
 
545
545
  ```ruby
546
- processes = Taskinator::Api::Processes.new()
546
+ processes = Taskinator::Api::Processes.new
547
547
  processes.each do |process|
548
548
  # => output the unique process identifier and current state
549
549
  puts [:process, process.uuid, process.current_state.name]
@@ -3,7 +3,9 @@ module Taskinator
3
3
  class Processes
4
4
  include Enumerable
5
5
 
6
- def each(&block)
6
+ def each
7
+ return to_enum(__method__) unless block_given?
8
+
7
9
  instance_cache = {}
8
10
  Taskinator.redis do |conn|
9
11
  uuids = conn.smembers("taskinator:processes")
@@ -0,0 +1,60 @@
1
+ module Taskinator
2
+ class CreateProcessWorker
3
+
4
+ attr_reader :definition
5
+ attr_reader :uuid
6
+ attr_reader :args
7
+
8
+ def initialize(definition_name, uuid, args)
9
+
10
+ # convert to the module
11
+ @definition = constantize(definition_name)
12
+
13
+ # this will be uuid of the created process
14
+ @uuid = uuid
15
+
16
+ # convert to the typed arguments
17
+ @args = Taskinator::Persistence.deserialize(args)
18
+
19
+ end
20
+
21
+ def perform
22
+ @definition._create_process_(*@args, :uuid => @uuid).enqueue!
23
+ end
24
+
25
+ private
26
+
27
+ # borrowed from activesupport/lib/active_support/inflector/methods.rb
28
+ def constantize(camel_cased_word)
29
+ names = camel_cased_word.split('::')
30
+
31
+ # Trigger a built-in NameError exception including the ill-formed constant in the message.
32
+ Object.const_get(camel_cased_word) if names.empty?
33
+
34
+ # Remove the first blank element in case of '::ClassName' notation.
35
+ names.shift if names.size > 1 && names.first.empty?
36
+
37
+ names.inject(Object) do |constant, name|
38
+ if constant == Object
39
+ constant.const_get(name)
40
+ else
41
+ candidate = constant.const_get(name)
42
+ next candidate if constant.const_defined?(name, false)
43
+ next candidate unless Object.const_defined?(name)
44
+
45
+ # Go down the ancestors to check if it is owned directly. The check
46
+ # stops when we reach Object or the end of ancestors tree.
47
+ constant = constant.ancestors.inject do |const, ancestor|
48
+ break const if ancestor == Object
49
+ break ancestor if ancestor.const_defined?(name, false)
50
+ const
51
+ end
52
+
53
+ # owner is in Object, so raise
54
+ constant.const_get(name, false)
55
+ end
56
+ end
57
+ end
58
+
59
+ end
60
+ end
@@ -1,9 +1,17 @@
1
1
  module Taskinator
2
2
  module Definition
3
- class UndefinedProcessError < RuntimeError; end
3
+
4
+ # errors
5
+ class ProcessUndefinedError < StandardError; end
6
+ class ProcessAlreadyDefinedError < StandardError; end
7
+
8
+ # for backward compatibility
9
+ UndefinedProcessError = ProcessUndefinedError
4
10
 
5
11
  # defines a process
6
12
  def define_process(*arg_list, &block)
13
+ raise ProcessAlreadyDefinedError if respond_to?(:_create_process_)
14
+
7
15
  define_singleton_method :_create_process_ do |args, options={}|
8
16
 
9
17
  # TODO: better validation of arguments
@@ -12,28 +20,59 @@ module Taskinator
12
20
 
13
21
  raise ArgumentError, "wrong number of arguments (#{args.length} for #{arg_list.length})" if args.length < arg_list.length
14
22
 
15
- process = Process.define_sequential_process_for(self)
16
- Builder.new(process, self, *args).instance_eval(&block)
17
- process.save
23
+ process = Process.define_sequential_process_for(self, options)
18
24
 
19
- # if this is a root process, then add it to the list
20
- Persistence.add_process_to_list(process) unless options[:subprocess]
25
+ # this may take long... up to users definition
26
+ Taskinator.instrumenter.instrument(:create_process, :uuid => process.uuid) do
27
+ Builder.new(process, self, *args).instance_eval(&block)
28
+ end
29
+
30
+ # instrument separately
31
+ Taskinator.instrumenter.instrument(:save_process, :uuid => process.uuid) do
32
+ process.save
33
+ # if this is a root process, then add it to the list
34
+ Persistence.add_process_to_list(process) unless options[:subprocess]
35
+ end
21
36
 
22
37
  process
23
38
  end
24
39
  end
25
40
 
41
+ attr_accessor :queue
42
+
43
+ #
26
44
  # creates an instance of the process
27
45
  # NOTE: the supplied @args are serialized and ultimately passed to each method of the defined process
46
+ #
28
47
  def create_process(*args)
29
- raise UndefinedProcessError unless respond_to?(:_create_process_)
48
+ assert_valid_process_module
30
49
  _create_process_(args)
31
50
  end
32
51
 
52
+ #
53
+ # returns a placeholder process, with the uuid attribute of the
54
+ # actual process. the callee can call `reload` if required to
55
+ # get the actual process, once it has been built by the CreateProcessWorker
56
+ #
57
+ def create_process_remotely(*args)
58
+ assert_valid_process_module
59
+ uuid = SecureRandom.uuid
60
+ Taskinator.queue.enqueue_create_process(self, uuid, args)
61
+
62
+ Taskinator::Persistence::LazyLoader.new(Taskinator::Process, uuid)
63
+ end
64
+
33
65
  def create_sub_process(*args)
34
- raise UndefinedProcessError unless respond_to?(:_create_process_)
66
+ assert_valid_process_module
35
67
  _create_process_(args, :subprocess => true)
36
68
  end
69
+
70
+ private
71
+
72
+ def assert_valid_process_module
73
+ raise ProcessUndefinedError unless respond_to?(:_create_process_)
74
+ end
75
+
37
76
  end
38
77
  end
39
78
 
@@ -10,9 +10,9 @@ module Taskinator
10
10
  return if task.paused? || task.cancelled?
11
11
  begin
12
12
  task.start!
13
- task.perform &block
13
+ task.perform(&block)
14
14
  task.complete!
15
- rescue Exception => e
15
+ rescue => e
16
16
  Taskinator.logger.error(e)
17
17
  task.fail!(e)
18
18
  raise e
@@ -5,11 +5,6 @@
5
5
  # the LGPLv3 license. Please see <http://www.gnu.org/licenses/lgpl-3.0.html>
6
6
  # for license text.
7
7
  #
8
- # Sidekiq Pro has a commercial-friendly license allowing private forks
9
- # and modifications of Sidekiq. Please see http://sidekiq.org/pro/ for
10
- # more detail. You can find the commercial license terms in COMM-LICENSE.
11
- #
12
-
13
8
  require 'time'
14
9
  require 'logger'
15
10
 
@@ -28,82 +23,25 @@ module Taskinator
28
23
  end
29
24
  end
30
25
 
31
- def self.with_context(msg)
32
- begin
33
- Thread.current[:taskinator_context] = msg
34
- yield
35
- ensure
36
- Thread.current[:taskinator_context] = nil
37
- end
38
- end
39
-
40
- def self.initialize_logger(log_target = STDOUT)
41
- oldlogger = defined?(@logger) ? @logger : nil
42
- @logger = Logger.new(log_target)
43
- @logger.level = Logger::INFO
44
- @logger.formatter = Pretty.new
45
- oldlogger.close if oldlogger && !$TESTING # don't want to close testing's STDOUT logging
46
- @logger
47
- end
48
-
49
- def self.logger
50
- defined?(@logger) ? @logger : initialize_logger
51
- end
52
-
53
- def self.logger=(log)
54
- @logger = (log ? log : Logger.new('/dev/null'))
55
- end
56
-
57
- # This reopens ALL logfiles in the process that have been rotated
58
- # using logrotate(8) (without copytruncate) or similar tools.
59
- # A +File+ object is considered for reopening if it is:
60
- # 1) opened with the O_APPEND and O_WRONLY flags
61
- # 2) the current open file handle does not match its original open path
62
- # 3) unbuffered (as far as userspace buffering goes, not O_SYNC)
63
- # Returns the number of files reopened
64
- def self.reopen_logs
65
- to_reopen = []
66
- append_flags = File::WRONLY | File::APPEND
26
+ class << self
67
27
 
68
- ObjectSpace.each_object(File) do |fp|
69
- begin
70
- if !fp.closed? && fp.stat.file? && fp.sync && (fp.fcntl(Fcntl::F_GETFL) & append_flags) == append_flags
71
- to_reopen << fp
72
- end
73
- rescue IOError, Errno::EBADF
74
- end
28
+ def initialize_logger(log_target = STDOUT)
29
+ oldlogger = defined?(@logger) ? @logger : nil
30
+ @logger = Logger.new(log_target)
31
+ @logger.level = Logger::INFO
32
+ @logger.formatter = Pretty.new
33
+ oldlogger.close if oldlogger && !$TESTING # don't want to close testing's STDOUT logging
34
+ @logger
75
35
  end
76
36
 
77
- nr = 0
78
- to_reopen.each do |fp|
79
- orig_st = begin
80
- fp.stat
81
- rescue IOError, Errno::EBADF
82
- next
83
- end
84
-
85
- begin
86
- b = File.stat(fp.path)
87
- next if orig_st.ino == b.ino && orig_st.dev == b.dev
88
- rescue Errno::ENOENT
89
- end
37
+ def logger
38
+ defined?(@logger) ? @logger : initialize_logger
39
+ end
90
40
 
91
- begin
92
- File.open(fp.path, 'a') { |tmpfp| fp.reopen(tmpfp) }
93
- fp.sync = true
94
- nr += 1
95
- rescue IOError, Errno::EBADF
96
- # not much we can do...
97
- end
41
+ def logger=(log)
42
+ @logger = (log ? log : Logger.new('/dev/null'))
98
43
  end
99
- nr
100
- rescue RuntimeError => ex
101
- # RuntimeError: ObjectSpace is disabled; each_object will only work with Class, pass -X+O to enable
102
- puts "Unable to reopen logs: #{ex.message}"
103
- end
104
44
 
105
- def logger
106
- Taskinator::Logging.logger
107
45
  end
108
46
  end
109
47
  end
@@ -94,13 +94,14 @@ module Taskinator
94
94
 
95
95
  # persists the error information
96
96
  def fail(error=nil)
97
- return unless error
97
+ return unless error && error.is_a?(Exception)
98
+
98
99
  Taskinator.redis do |conn|
99
100
  conn.hmset(
100
101
  self.key,
101
102
  :error_type, error.class.name,
102
103
  :error_message, error.message,
103
- :error_backtrace, JSON.generate(error.backtrace)
104
+ :error_backtrace, JSON.generate(error.backtrace || [])
104
105
  )
105
106
  end
106
107
  end
@@ -182,23 +183,8 @@ module Taskinator
182
183
 
183
184
  def visit_args(attribute)
184
185
  values = @instance.send(attribute)
185
-
186
- # special case, convert models to global id's
187
- if values.is_a?(Array)
188
-
189
- values = values.collect {|value|
190
- value.respond_to?(:global_id) ? value.global_id : value
191
- }
192
-
193
- elsif values.is_a?(Hash)
194
-
195
- values.each {|key, value|
196
- values[key] = value.global_id if value.respond_to?(:global_id)
197
- }
198
-
199
- end
200
-
201
- @hmset += [attribute, YAML.dump(values)]
186
+ yaml = Taskinator::Persistence.serialize(values)
187
+ @hmset += [attribute, yaml]
202
188
  end
203
189
  end
204
190
 
@@ -289,30 +275,12 @@ module Taskinator
289
275
  def visit_args(attribute)
290
276
  yaml = @attribute_values[attribute]
291
277
  if yaml
292
- values = YAML.load(yaml)
293
-
294
- # special case for models, so find model
295
- if values.is_a?(Array)
296
-
297
- values = values.collect {|value|
298
- # is it a global id?
299
- value.respond_to?(:model_id) && value.respond_to?(:find) ? value.find : value
300
- }
301
-
302
- elsif values.is_a?(Hash)
303
-
304
- values.each {|key, value|
305
- # is it a global id?
306
- values[key] = value.find if value.respond_to?(:model_id) && value.respond_to?(:find)
307
- }
308
-
309
- end
310
-
278
+ values = Taskinator::Persistence.deserialize(yaml)
311
279
  @instance.instance_variable_set("@#{attribute}", values)
312
280
  end
313
281
  end
314
282
 
315
- private
283
+ private
316
284
 
317
285
  #
318
286
  # creates a proxy for the instance which
@@ -342,17 +310,57 @@ module Taskinator
342
310
  # E.g. this is useful for tasks which refer to their parent processes
343
311
  #
344
312
 
345
- def initialize(type, uuid, instance_cache)
313
+ def initialize(type, uuid, instance_cache={})
346
314
  @type = type
347
315
  @uuid = uuid
348
316
  @instance_cache = instance_cache
349
317
  end
350
318
 
319
+ attr_reader :uuid # shadows the real method, but will be the same!
320
+
321
+ # attempts to reload the actual process
322
+ def reload
323
+ @instance = nil
324
+ __getobj__
325
+ @instance ? true : false
326
+ end
327
+
351
328
  def __getobj__
352
329
  # only fetch the object as needed
353
330
  # and memoize for subsequent calls
354
331
  @instance ||= @type.fetch(@uuid, @instance_cache)
355
332
  end
356
333
  end
334
+
335
+ class << self
336
+ def serialize(values)
337
+ # special case, convert models to global id's
338
+ if values.is_a?(Array)
339
+ values = values.collect {|value|
340
+ value.respond_to?(:global_id) ? value.global_id : value
341
+ }
342
+ elsif values.is_a?(Hash)
343
+ values.each {|key, value|
344
+ values[key] = value.global_id if value.respond_to?(:global_id)
345
+ }
346
+ end
347
+ YAML.dump(values)
348
+ end
349
+
350
+ def deserialize(yaml)
351
+ values = YAML.load(yaml)
352
+ if values.is_a?(Array)
353
+ values = values.collect {|value|
354
+ (value.respond_to?(:model_id) && value.respond_to?(:find)) ? value.find : value
355
+ }
356
+ elsif values.is_a?(Hash)
357
+ values.each {|key, value|
358
+ values[key] = value.find if value.respond_to?(:model_id) && value.respond_to?(:find)
359
+ }
360
+ end
361
+ values
362
+ end
363
+ end
364
+
357
365
  end
358
366
  end
@@ -20,6 +20,7 @@ module Taskinator
20
20
  attr_reader :uuid
21
21
  attr_reader :definition
22
22
  attr_reader :options
23
+ attr_reader :queue
23
24
 
24
25
  # in the case of sub process tasks, the containing task
25
26
  attr_accessor :parent
@@ -28,13 +29,14 @@ module Taskinator
28
29
  raise ArgumentError, 'definition' if definition.nil?
29
30
  raise ArgumentError, "#{definition.name} does not extend the #{Definition.name} module" unless definition.kind_of?(Definition)
30
31
 
31
- @uuid = SecureRandom.uuid
32
+ @uuid = options.delete(:uuid) || SecureRandom.uuid
32
33
  @definition = definition
33
34
  @options = options
35
+ @queue = options.delete(:queue)
34
36
  end
35
37
 
36
38
  def tasks
37
- @tasks ||= Tasks.new()
39
+ @tasks ||= Tasks.new
38
40
  end
39
41
 
40
42
  def accept(visitor)
@@ -43,6 +45,7 @@ module Taskinator
43
45
  visitor.visit_type(:definition)
44
46
  visitor.visit_tasks(tasks)
45
47
  visitor.visit_args(:options)
48
+ visitor.visit_attribute(:queue)
46
49
  end
47
50
 
48
51
  def <=>(other)
@@ -53,10 +56,6 @@ module Taskinator
53
56
  "#<#{self.class.name}:#{uuid}>"
54
57
  end
55
58
 
56
- def queue
57
- options[:queue]
58
- end
59
-
60
59
  workflow do
61
60
  state :initial do
62
61
  event :enqueue, :transitions_to => :enqueued
@@ -201,5 +200,13 @@ module Taskinator
201
200
  visitor.visit_attribute(:complete_on)
202
201
  end
203
202
  end
203
+
204
+ # reloads the process from storage
205
+ # NB: only implemented by LazyLoader so that
206
+ # the process can be lazy loaded, thereafter
207
+ # it has no effect
208
+ def reload
209
+ false
210
+ end
204
211
  end
205
212
  end
@@ -1,5 +1,7 @@
1
1
  module Taskinator
2
2
  class ProcessWorker
3
+ attr_reader :uuid
4
+
3
5
  def initialize(uuid)
4
6
  @uuid = uuid
5
7
  end
@@ -9,7 +11,7 @@ module Taskinator
9
11
  return if process.paused? || process.cancelled?
10
12
  begin
11
13
  process.start!
12
- rescue Exception => e
14
+ rescue => e
13
15
  Taskinator.logger.error(e)
14
16
  process.fail!(e)
15
17
  raise e
@@ -9,11 +9,12 @@ module Taskinator
9
9
 
10
10
  class DelayedJobAdapter
11
11
  def initialize(config={})
12
- @config = {
13
- :process_queue => :default,
14
- :task_queue => :default,
15
- :job_queue => :default,
16
- }.merge(config)
12
+ @config = Taskinator::Queues::DefaultConfig.merge(config)
13
+ end
14
+
15
+ def enqueue_create_process(definition, uuid, args)
16
+ queue = definition.queue || @config[:definition_queue]
17
+ ::Delayed::Job.enqueue CreateProcessWorker.new(definition.name, uuid, Taskinator::Persistence.serialize(args)), :queue => queue
17
18
  end
18
19
 
19
20
  def enqueue_process(process)
@@ -32,6 +33,12 @@ module Taskinator
32
33
  ::Delayed::Job.enqueue JobWorker.new(job.uuid), :queue => queue
33
34
  end
34
35
 
36
+ CreateProcessWorker = Struct.new(:definition_name, :uuid, :args) do
37
+ def perform
38
+ Taskinator::CreateProcessWorker.new(definition_name, uuid, args).perform
39
+ end
40
+ end
41
+
35
42
  ProcessWorker = Struct.new(:process_uuid) do
36
43
  def perform
37
44
  Taskinator::ProcessWorker.new(process_uuid).perform
@@ -9,11 +9,11 @@ module Taskinator
9
9
 
10
10
  class ResqueAdapter
11
11
  def initialize(config={})
12
- config = {
13
- :process_queue => :default,
14
- :task_queue => :default,
15
- :job_queue => :default
16
- }.merge(config)
12
+ config = Taskinator::Queues::DefaultConfig.merge(config)
13
+
14
+ CreateProcessWorker.class_eval do
15
+ @queue = config[:definition_queue]
16
+ end
17
17
 
18
18
  ProcessWorker.class_eval do
19
19
  @queue = config[:process_queue]
@@ -28,6 +28,11 @@ module Taskinator
28
28
  end
29
29
  end
30
30
 
31
+ def enqueue_create_process(definition, uuid, args)
32
+ queue = definition.queue || Resque.queue_from_class(CreateProcessWorker)
33
+ Resque.enqueue_to(queue, CreateProcessWorker, definition.name, uuid, Taskinator::Persistence.serialize(args))
34
+ end
35
+
31
36
  def enqueue_process(process)
32
37
  queue = process.queue || Resque.queue_from_class(ProcessWorker)
33
38
  Resque.enqueue_to(queue, ProcessWorker, process.uuid)
@@ -46,6 +51,12 @@ module Taskinator
46
51
  Resque.enqueue_to(queue, JobWorker, job.uuid)
47
52
  end
48
53
 
54
+ class CreateProcessWorker
55
+ def self.perform(definition_name, uuid, args)
56
+ Taskinator::CreateProcessWorker.new(definition_name, uuid, args).perform
57
+ end
58
+ end
59
+
49
60
  class ProcessWorker
50
61
  def self.perform(process_uuid)
51
62
  Taskinator::ProcessWorker.new(process_uuid).perform