procrastinator 0.6.1 → 1.0.0.pre.rc2

Sign up to get free protection for your applications and to get access to all the features.
data/RELEASE_NOTES.md ADDED
@@ -0,0 +1,44 @@
1
+ # Release Notes
2
+
3
+ ## 1.0.0 ( )
4
+
5
+ ### Major Changes
6
+
7
+ * Minimum supported Ruby is now 2.4
8
+ * Added generic `Procrastinator::Config#log_with`
9
+ * Removed `Procrastinator::Config#log_inside`
10
+ * Removed `Procrastinator::Config#log_at_level`
11
+ * falsey log level is now the control for whether logging occurs, instead of falsey log directory
12
+ * Queues are managed as threads rather than sub processes
13
+ * These unnecessary methods no longer exist:
14
+ * `Procrastinator.test_mode`
15
+ * `Procrastinator::Config#enable_test_mode`
16
+ * `Procrastinator::Config#test_mode?`
17
+ * `Procrastinator::Config#test_mode`
18
+ * `Procrastinator::Config#prefix`
19
+ * `Procrastinator::Config#pid_dir`
20
+ * `Procrastinator::Config#each_process`
21
+ * `Procrastinator::Config#run_process_block`
22
+ * Removed use of envvar `PROCRASTINATOR_STOP`
23
+ * `Procrastinator::QueueManager` is merged into `Procrastinator::Scheduler`
24
+ * Removed rake task to halt queue processes
25
+ * Renamed `Procrastinator::Config#provide_context` to `provide_container`
26
+ * You must now call `Scheduler#work` on the result of `Procrastinator.config`
27
+ * Use a dedicated process monitor (like `monit`) instead in production environments
28
+ * Suuply a block to `daemonized!` to run code in the spawned process.
29
+ * `max_tasks` is removed as it only added concurrency complexity
30
+ * Data is now stored as JSON instead of YAML
31
+ * Added with_store that applies its settings to its block
32
+ * `load_with` has been removed
33
+ * Removed `task_attr` and `Procrastinator::Task` module. Tasks is now duck-type checked for accessors instead.
34
+
35
+ ### Minor Changes
36
+
37
+ * Started release notes file
38
+ * Updated development gems
39
+ * Logs now include the queue name in log lines
40
+ * Logs can now set the shift size or age (like Ruby's Logger)
41
+
42
+ ### Bugfixes
43
+
44
+ * none
data/Rakefile CHANGED
@@ -1,6 +1,8 @@
1
- require "bundler/gem_tasks"
2
- require "rspec/core/rake_task"
1
+ # frozen_string_literal: true
2
+
3
+ require 'bundler/gem_tasks'
4
+ require 'rspec/core/rake_task'
3
5
 
4
6
  RSpec::Core::RakeTask.new(:spec)
5
7
 
6
- task :default => :spec
8
+ task default: :spec
@@ -0,0 +1,149 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'time'
4
+
5
+ module Procrastinator
6
+ # Configuration object (State Pattern) used to coordinate settings across
7
+ # various components within Procrastinator.
8
+ #
9
+ # It is immutable after init; use the config DSL in the configuration block to set its state.
10
+ #
11
+ # @author Robin Miller
12
+ #
13
+ # @!attribute [r] :queues
14
+ # @return [Array] List of defined queues
15
+ # @!attribute [r] :container
16
+ # @return [Object] Container object that will be forwarded to tasks
17
+ # @!attribute [r] :log_dir
18
+ # @return [Pathname] Directory to write log files in
19
+ # @!attribute [r] :log_level
20
+ # @return [Integer] Logging level to use
21
+ # @!attribute [r] :log_shift_age
22
+ # @return [Integer] Number of previous files to keep (see Ruby Logger for details)
23
+ # @!attribute [r] :log_shift_size
24
+ # @return [Integer] Filesize before rotating to a new logfile (see Ruby Logger for details)
25
+ class Config
26
+ attr_reader :queues, :log_dir, :log_level, :log_shift_age, :log_shift_size, :container
27
+
28
+ DEFAULT_LOG_DIRECTORY = Pathname.new('log').freeze
29
+ DEFAULT_LOG_SHIFT_AGE = 0
30
+ DEFAULT_LOG_SHIFT_SIZE = 2 ** 20 # 1 MB
31
+ DEFAULT_LOG_FORMATTER = proc do |severity, datetime, progname, msg|
32
+ [datetime.iso8601(8),
33
+ severity,
34
+ "#{ progname } (#{ Process.pid }):",
35
+ msg].join("\t") << "\n"
36
+ end
37
+
38
+ def initialize
39
+ @queues = []
40
+ @container = nil
41
+ @log_dir = DEFAULT_LOG_DIRECTORY
42
+ @log_level = Logger::INFO
43
+ @log_shift_age = DEFAULT_LOG_SHIFT_AGE
44
+ @log_shift_size = DEFAULT_LOG_SHIFT_SIZE
45
+
46
+ with_store(csv: TaskStore::SimpleCommaStore::DEFAULT_FILE) do
47
+ if block_given?
48
+ yield(self)
49
+ raise SetupError, SetupError::ERR_NO_QUEUE if @queues.empty?
50
+ end
51
+ end
52
+
53
+ @log_dir = @log_dir.expand_path
54
+
55
+ @queues.freeze
56
+ freeze
57
+ end
58
+
59
+ # Collection of all of the methods intended for use within Procrastinator.setup
60
+ #
61
+ # @see Procrastinator
62
+ module DSL
63
+ # Assigns a task loader
64
+ def with_store(store)
65
+ raise(ArgumentError, 'with_store must be provided a block') unless block_given?
66
+
67
+ old_store = @default_store
68
+ @default_store = interpret_store(store)
69
+ yield
70
+ @default_store = old_store
71
+ end
72
+
73
+ def provide_container(container)
74
+ @container = container
75
+ end
76
+
77
+ def define_queue(name, task_class, properties = {})
78
+ raise ArgumentError, 'queue name cannot be nil' if name.nil?
79
+ raise ArgumentError, 'queue task class cannot be nil' if task_class.nil?
80
+
81
+ properties[:store] = interpret_store(properties[:store]) if properties.key? :store
82
+
83
+ @queues << Queue.new(**{name: name, task_class: task_class, store: @default_store}.merge(properties))
84
+ end
85
+
86
+ # Sets details of logging behaviour
87
+ #
88
+ # @param directory [Pathname,String] the directory to save logs within.
89
+ # @param level [Logger::UNKNOWN,Logger::FATAL,Logger::ERROR,Logger::WARN,Logger::INFO,Logger::DEBUG,Integer,Boolean] the Ruby Logger level to use. If falsey, no logging is performed.
90
+ # @param shift_age [Integer] number of old log files to keep (see Ruby Logger for details)
91
+ # @param shift_size [Integer] filesize before log is rotated to a fresh file (see Ruby Logger for details)
92
+ def log_with(directory: @log_dir, level: @log_level, shift_age: @log_shift_age, shift_size: @log_shift_size)
93
+ @log_dir = directory ? Pathname.new(directory) : directory
94
+ @log_level = level
95
+ @log_shift_age = shift_age
96
+ @log_shift_size = shift_size
97
+ end
98
+ end
99
+
100
+ include DSL
101
+
102
+ def queue(name: nil)
103
+ queue = if name
104
+ @queues.find do |q|
105
+ q.name == name
106
+ end
107
+ else
108
+ if name.nil? && @queues.length > 1
109
+ raise ArgumentError,
110
+ "queue must be specified when more than one is defined. #{ known_queues }"
111
+ end
112
+
113
+ @queues.first
114
+ end
115
+
116
+ raise ArgumentError, "there is no :#{ name } queue registered. #{ known_queues }" unless queue
117
+
118
+ queue
119
+ end
120
+
121
+ private
122
+
123
+ def known_queues
124
+ "Known queues are: #{ @queues.map { |queue| ":#{ queue.name }" }.join(', ') }"
125
+ end
126
+
127
+ def interpret_store(store)
128
+ raise(ArgumentError, 'task store cannot be nil') if store.nil?
129
+
130
+ case store
131
+ when Hash
132
+ store_strategy = :csv
133
+ unless store.key? store_strategy
134
+ raise ArgumentError, "Must pass keyword :#{ store_strategy } if specifying a location for CSV file"
135
+ end
136
+
137
+ TaskStore::SimpleCommaStore.new(store[store_strategy])
138
+ when String, Pathname
139
+ TaskStore::SimpleCommaStore.new(store)
140
+ else
141
+ store
142
+ end
143
+ end
144
+
145
+ class SetupError < RuntimeError
146
+ ERR_NO_QUEUE = 'setup block must call #define_queue on the environment'
147
+ end
148
+ end
149
+ end
@@ -0,0 +1,50 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'json'
4
+ require 'ostruct'
5
+ require 'timeout'
6
+ # require 'forwardable'
7
+ require 'delegate'
8
+ require_relative 'task'
9
+
10
+ module Procrastinator
11
+ # Task wrapper that adds logging to each step.
12
+ #
13
+ # @author Robin Miller
14
+ #
15
+ # @see Task
16
+ class LoggedTask < DelegateClass(Task)
17
+ # extend Forwardable
18
+ #
19
+ # def_delegators :@task, :id, :to_h
20
+
21
+ attr_reader :logger
22
+
23
+ alias task __getobj__
24
+
25
+ def initialize(task, logger: Logger.new(StringIO.new))
26
+ super task
27
+ @logger = logger || raise(ArgumentError, 'Logger cannot be nil')
28
+ end
29
+
30
+ def run
31
+ task.run
32
+
33
+ begin
34
+ @logger.info("Task completed: #{ task }")
35
+ rescue StandardError => e
36
+ warn "Task logging error: #{ e.message }"
37
+ end
38
+ end
39
+
40
+ def fail(error)
41
+ hook = task.fail(error)
42
+ begin
43
+ @logger.error("Task #{ hook }ed: #{ task }")
44
+ rescue StandardError => e
45
+ warn "Task logging error: #{ e.message }"
46
+ end
47
+ hook
48
+ end
49
+ end
50
+ end
@@ -0,0 +1,206 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Procrastinator
4
+ # A Queue defines how a certain type task will be processed.
5
+ #
6
+ # @author Robin Miller
7
+ #
8
+ # @!attribute [r] :name
9
+ # @return [Symbol] The queue's identifier symbol
10
+ # @!attribute [r] :task_class
11
+ # @return [Class] Class that defines the work to be done for jobs in this queue.
12
+ # @!attribute [r] :timeout
13
+ # @return [Object] Duration (seconds) after which tasks in this queue should fail for taking too long.
14
+ # @!attribute [r] :max_attempts
15
+ # @return [Object] Maximum number of attempts for tasks in this queue.
16
+ # @!attribute [r] :update_period
17
+ # @return [Pathname] Delay (seconds) between reloads of tasks from the task store.
18
+ class Queue
19
+ extend Forwardable
20
+
21
+ DEFAULT_TIMEOUT = 3600 # in seconds; one hour total
22
+ DEFAULT_MAX_ATTEMPTS = 20
23
+ DEFAULT_UPDATE_PERIOD = 10 # seconds
24
+
25
+ attr_reader :name, :max_attempts, :timeout, :update_period, :task_store, :task_class
26
+
27
+ alias store task_store
28
+ alias storage task_store
29
+
30
+ def_delegators :@task_store, :read, :update, :delete
31
+
32
+ # Timeout is in seconds
33
+ def initialize(name:, task_class:,
34
+ max_attempts: DEFAULT_MAX_ATTEMPTS,
35
+ timeout: DEFAULT_TIMEOUT,
36
+ update_period: DEFAULT_UPDATE_PERIOD,
37
+ store: TaskStore::SimpleCommaStore.new)
38
+ raise ArgumentError, ':name cannot be nil' unless name
39
+
40
+ raise ArgumentError, ':task_class cannot be nil' unless task_class
41
+ raise ArgumentError, 'Task class must be initializable' unless task_class.respond_to? :new
42
+
43
+ raise ArgumentError, ':timeout cannot be negative' if timeout&.negative?
44
+
45
+ @name = name.to_s.strip.gsub(/[^A-Za-z0-9]+/, '_').to_sym
46
+ @task_class = task_class
47
+ @task_store = store
48
+ @max_attempts = max_attempts
49
+ @timeout = timeout
50
+ @update_period = update_period
51
+
52
+ validate!
53
+
54
+ freeze
55
+ end
56
+
57
+ def next_task(logger: Logger.new(StringIO.new), container: nil, scheduler: nil)
58
+ metadata = next_metas.find(&:runnable?)
59
+
60
+ return nil unless metadata
61
+
62
+ task = Task.new(metadata, task_handler(data: metadata.data,
63
+ container: container,
64
+ logger: logger,
65
+ scheduler: scheduler))
66
+
67
+ LoggedTask.new(task, logger: logger)
68
+ end
69
+
70
+ def fetch_task(identifier)
71
+ identifier[:data] = JSON.dump(identifier[:data]) if identifier[:data]
72
+
73
+ tasks = read(**identifier)
74
+
75
+ raise "no task found matching #{ identifier }" if tasks.nil? || tasks.empty?
76
+ raise "too many (#{ tasks.size }) tasks match #{ identifier }. Found: #{ tasks }" if tasks.size > 1
77
+
78
+ TaskMetaData.new(tasks.first.merge(queue: self))
79
+ end
80
+
81
+ def create(run_at:, expire_at:, data:)
82
+ if data.nil? && expects_data?
83
+ raise ArgumentError, "task #{ @task_class } expects to receive :data. Provide :data to #delay."
84
+ end
85
+
86
+ unless data.nil? || expects_data?
87
+ raise MalformedTaskError, <<~ERROR
88
+ found unexpected :data argument. Either do not provide :data when scheduling a task,
89
+ or add this in the #{ @task_class } class definition:
90
+ attr_accessor :data
91
+ ERROR
92
+ end
93
+
94
+ # TODO: shorten to using slice once updated to Ruby 2.5+
95
+ attrs = {queue: self, run_at: run_at, initial_run_at: run_at, expire_at: expire_at, data: JSON.dump(data)}
96
+
97
+ create_data = TaskMetaData.new(**attrs).to_h
98
+ create_data.delete(:id)
99
+ create_data.delete(:attempts)
100
+ create_data.delete(:last_fail_at)
101
+ create_data.delete(:last_error)
102
+ @task_store.create(**create_data)
103
+ end
104
+
105
+ def expects_data?
106
+ @task_class.method_defined?(:data=)
107
+ end
108
+
109
+ private
110
+
111
+ def task_handler(data: nil, container: nil, logger: nil, scheduler: nil)
112
+ handler = @task_class.new
113
+ handler.data = data if handler.respond_to?(:data=)
114
+ handler.container = container
115
+ handler.logger = logger
116
+ handler.scheduler = scheduler
117
+ handler
118
+ end
119
+
120
+ def next_metas
121
+ tasks = read(queue: @name).reject { |t| t[:run_at].nil? }.collect do |t|
122
+ t.to_h.delete_if { |key| !TaskMetaData::EXPECTED_DATA.include?(key) }.merge(queue: self)
123
+ end
124
+
125
+ sort_tasks(tasks.collect { |t| TaskMetaData.new(**t) })
126
+ end
127
+
128
+ def sort_tasks(tasks)
129
+ # TODO: improve this
130
+ # shuffling and re-sorting to avoid worst case O(n^2) when receiving already sorted data
131
+ # on quicksort (which is default ruby sort). It is not unreasonable that the persister could return sorted
132
+ # results
133
+ # Ideally, we'd use a better algo than qsort for this, but this will do for now
134
+ tasks.shuffle.sort_by(&:run_at)
135
+ end
136
+
137
+ # Internal queue validator
138
+ module QueueValidation
139
+ def validate!
140
+ verify_task_class!
141
+ verify_task_store!
142
+ end
143
+
144
+ def verify_task_class!
145
+ verify_run_method!
146
+ verify_accessors!
147
+ verify_hooks!
148
+ end
149
+
150
+ # The interface compliance is checked on init because it's one of those rare cases where you want to know early;
151
+ # otherwise, you wouldn't know until task execution and that could be far in the future.
152
+ # UX is important for devs, too.
153
+ # - R
154
+ def verify_run_method!
155
+ unless @task_class.method_defined? :run
156
+ raise MalformedTaskError, "task #{ @task_class } does not support #run method"
157
+ end
158
+
159
+ return unless @task_class.instance_method(:run).arity.positive?
160
+
161
+ raise MalformedTaskError, "task #{ @task_class } cannot require parameters to its #run method"
162
+ end
163
+
164
+ def verify_accessors!
165
+ [:logger, :container, :scheduler].each do |method_name|
166
+ next if @task_class.method_defined?(method_name) && @task_class.method_defined?("#{ method_name }=")
167
+
168
+ raise MalformedTaskError, <<~ERR
169
+ Task handler is missing a #{ method_name } accessor. Add this to the #{ @task_class } class definition:
170
+ attr_accessor :logger, :container, :scheduler
171
+ ERR
172
+ end
173
+ end
174
+
175
+ def verify_hooks!
176
+ expected_arity = 1
177
+
178
+ [:success, :fail, :final_fail].each do |method_name|
179
+ next unless @task_class.method_defined?(method_name)
180
+ next if @task_class.instance_method(method_name).arity == expected_arity
181
+
182
+ err = "task #{ @task_class } must accept #{ expected_arity } parameter to its ##{ method_name } method"
183
+
184
+ raise MalformedTaskError, err
185
+ end
186
+ end
187
+
188
+ def verify_task_store!
189
+ raise ArgumentError, ':store cannot be nil' if @task_store.nil?
190
+
191
+ [:read, :create, :update, :delete].each do |method|
192
+ unless @task_store.respond_to? method
193
+ raise MalformedTaskStoreError, "task store #{ @task_store.class } must respond to ##{ method }"
194
+ end
195
+ end
196
+ end
197
+ end
198
+ include QueueValidation
199
+ end
200
+
201
+ class MalformedTaskError < StandardError
202
+ end
203
+
204
+ class MalformedTaskStoreError < RuntimeError
205
+ end
206
+ end
@@ -1,116 +1,91 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Procrastinator
4
+ # A QueueWorker checks for tasks to run from the task store and executes them, updating information in the task
5
+ # store as necessary.
6
+ #
7
+ # @author Robin Miller
2
8
  class QueueWorker
3
- DEFAULT_TIMEOUT = 3600 # in seconds; one hour total
4
- DEFAULT_MAX_ATTEMPTS = 20
5
- DEFAULT_UPDATE_PERIOD = 10 # seconds
6
- DEFAULT_MAX_TASKS = 10
7
-
8
- attr_reader :name, :timeout, :max_attempts, :update_period, :max_tasks
9
-
10
- # Timeout is in seconds
11
- def initialize(name:,
12
- persister:,
13
- log_dir: nil,
14
- log_level: Logger::INFO,
15
- max_attempts: DEFAULT_MAX_ATTEMPTS,
16
- timeout: DEFAULT_TIMEOUT,
17
- update_period: DEFAULT_UPDATE_PERIOD,
18
- max_tasks: DEFAULT_MAX_TASKS)
19
- raise ArgumentError.new('Queue name may not be nil') unless name
20
- raise ArgumentError.new('Persister may not be nil') unless persister
21
-
22
- raise(MalformedTaskPersisterError.new('The supplied IO object must respond to #read_tasks')) unless persister.respond_to? :read_tasks
23
- raise(MalformedTaskPersisterError.new('The supplied IO object must respond to #update_task')) unless persister.respond_to? :update_task
24
- raise(MalformedTaskPersisterError.new('The supplied IO object must respond to #delete_task')) unless persister.respond_to? :delete_task
25
-
26
- @name = name.to_s.gsub(/\s/, '_').to_sym
27
- @timeout = timeout
28
- @max_attempts = max_attempts
29
- @update_period = update_period
30
- @max_tasks = max_tasks
31
- @persister = persister
32
- @log_dir = log_dir
33
- @log_level = log_level
34
-
35
- start_log
36
- end
9
+ extend Forwardable
37
10
 
38
- def work
39
- begin
40
- loop do
41
- sleep(@update_period)
11
+ def_delegators :@queue, :name, :next_task
42
12
 
43
- act
44
- end
45
- rescue StandardError => e
46
- @logger.fatal(e)
47
- # raise e
48
- end
49
- end
13
+ # expected methods for all persistence strategies
14
+ PERSISTER_METHODS = [:read, :update, :delete].freeze
15
+
16
+ def initialize(queue:, config:)
17
+ raise ArgumentError, ':queue cannot be nil' if queue.nil?
18
+ raise ArgumentError, ':config cannot be nil' if config.nil?
50
19
 
51
- def act
52
- # shuffling and re-sorting to avoid worst case O(n^2) on quicksort (which is default ruby sort)
53
- # when receiving already sorted data. Ideally, we'd use a better algo, but this will do for now
54
- tasks = @persister.read_tasks(@name).reject { |t| t[:run_at].nil? }.shuffle.sort_by { |t| t[:run_at] }
20
+ @config = config
55
21
 
56
- tasks.first(@max_tasks).each do |task_data|
57
- if Time.now.to_i >= task_data[:run_at].to_i
58
- task_data.merge!(logger: @logger) if @logger
22
+ @queue = if queue.is_a? Symbol
23
+ config.queue(name: queue)
24
+ else
25
+ queue
26
+ end
27
+
28
+ @scheduler = Scheduler.new(config)
29
+ @logger = Logger.new(StringIO.new)
30
+ end
59
31
 
60
- tw = TaskWorker.new(task_data)
32
+ # Works on jobs forever
33
+ def work!
34
+ @logger = open_log!("#{ name }-queue-worker", @config)
35
+ @logger.info("Started worker thread to consume queue: #{ name }")
61
36
 
62
- tw.work
37
+ loop do
38
+ sleep(@queue.update_period)
63
39
 
64
- if tw.successful?
65
- @persister.delete_task(task_data[:id])
66
- else
67
- @persister.update_task(tw.to_hash.merge(queue: @name))
68
- end
69
- end
40
+ work_one
70
41
  end
71
- end
42
+ rescue StandardError => e
43
+ @logger.fatal(e)
72
44
 
73
- def long_name
74
- "#{@name}-queue-worker"
45
+ raise
75
46
  end
76
47
 
77
- # Starts a log file and stores the logger within this queue worker.
78
- #
79
- # Separate from init because logging is context-dependent
80
- def start_log
81
- if @log_dir
82
- log_path = Pathname.new("#{@log_dir}/#{long_name}.log")
83
-
84
- log_path.dirname.mkpath
85
- File.open(log_path.to_path, 'a+') do |f|
86
- f.write ''
87
- end
48
+ # Performs exactly one task on the queue
49
+ def work_one
50
+ task = next_task(logger: @logger,
51
+ container: @config.container,
52
+ scheduler: @scheduler) || return
88
53
 
89
- @logger = Logger.new(log_path.to_path)
54
+ begin
55
+ task.run
90
56
 
91
- @logger.level = @log_level
57
+ @queue.delete(task.id)
58
+ rescue StandardError => e
59
+ task.fail(e)
92
60
 
93
- @logger.info(['',
94
- '===================================',
95
- "Started worker process, #{long_name}, to work off queue #{@name}.",
96
- "Worker pid=#{Process.pid}; parent pid=#{Process.ppid}.",
97
- '==================================='].join("\n"))
61
+ task_info = task.to_h
62
+ id = task_info.delete(:id)
63
+ @queue.update(id, **task_info)
98
64
  end
99
65
  end
100
66
 
101
- # Logs a termination due to parent process termination
102
- #
103
- # == Parameters:
104
- # @param ppid the parent's process id
105
- # @param pid the child's process id
106
- #
107
- def log_parent_exit(ppid:, pid:)
108
- raise RuntimeError.new('Cannot log when logger not defined. Call #start_log first.') unless @logger
67
+ def halt
68
+ @logger&.info("Halted worker on queue: #{ name }")
69
+ @logger&.close
70
+ end
71
+
72
+ # Starts a log file and returns the created Logger
73
+ def open_log!(name, config)
74
+ return @logger unless config.log_level
75
+
76
+ log_path = config.log_dir / "#{ name }.log"
77
+
78
+ config.log_dir.mkpath
79
+ FileUtils.touch(log_path)
109
80
 
110
- @logger.error("Terminated worker process (pid=#{pid}) due to main process (ppid=#{ppid}) disappearing.")
81
+ Logger.new(log_path.to_path,
82
+ config.log_shift_age, config.log_shift_size,
83
+ level: config.log_level || Logger::FATAL,
84
+ progname: name,
85
+ formatter: Config::DEFAULT_LOG_FORMATTER)
111
86
  end
112
87
  end
113
88
 
114
89
  class MalformedTaskPersisterError < StandardError
115
90
  end
116
- end
91
+ end