rocketjob 5.4.0.beta1 → 6.0.0.rc2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (60) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +19 -5
  3. data/bin/rocketjob_batch_perf +1 -1
  4. data/bin/rocketjob_perf +1 -1
  5. data/lib/rocket_job/batch.rb +3 -0
  6. data/lib/rocket_job/batch/categories.rb +341 -0
  7. data/lib/rocket_job/batch/io.rb +128 -60
  8. data/lib/rocket_job/batch/model.rb +20 -68
  9. data/lib/rocket_job/batch/performance.rb +19 -7
  10. data/lib/rocket_job/batch/statistics.rb +34 -12
  11. data/lib/rocket_job/batch/tabular.rb +2 -0
  12. data/lib/rocket_job/batch/tabular/input.rb +8 -6
  13. data/lib/rocket_job/batch/tabular/output.rb +4 -2
  14. data/lib/rocket_job/batch/throttle_running_workers.rb +8 -17
  15. data/lib/rocket_job/batch/worker.rb +27 -24
  16. data/lib/rocket_job/category/base.rb +78 -0
  17. data/lib/rocket_job/category/input.rb +110 -0
  18. data/lib/rocket_job/category/output.rb +25 -0
  19. data/lib/rocket_job/cli.rb +25 -17
  20. data/lib/rocket_job/dirmon_entry.rb +22 -12
  21. data/lib/rocket_job/event.rb +1 -1
  22. data/lib/rocket_job/extensions/iostreams/path.rb +32 -0
  23. data/lib/rocket_job/extensions/mongoid/contextual/mongo.rb +2 -2
  24. data/lib/rocket_job/extensions/mongoid/factory.rb +4 -12
  25. data/lib/rocket_job/extensions/mongoid/stringified_symbol.rb +50 -0
  26. data/lib/rocket_job/extensions/psych/yaml_tree.rb +8 -0
  27. data/lib/rocket_job/extensions/rocket_job_adapter.rb +2 -2
  28. data/lib/rocket_job/jobs/dirmon_job.rb +2 -2
  29. data/lib/rocket_job/jobs/housekeeping_job.rb +7 -7
  30. data/lib/rocket_job/jobs/on_demand_batch_job.rb +15 -6
  31. data/lib/rocket_job/jobs/on_demand_job.rb +1 -2
  32. data/lib/rocket_job/jobs/performance_job.rb +3 -1
  33. data/lib/rocket_job/jobs/re_encrypt/relational_job.rb +103 -96
  34. data/lib/rocket_job/jobs/upload_file_job.rb +44 -8
  35. data/lib/rocket_job/lookup_collection.rb +69 -0
  36. data/lib/rocket_job/plugins/job/model.rb +25 -50
  37. data/lib/rocket_job/plugins/job/throttle.rb +2 -2
  38. data/lib/rocket_job/plugins/job/throttle_running_jobs.rb +12 -4
  39. data/lib/rocket_job/plugins/job/worker.rb +2 -7
  40. data/lib/rocket_job/plugins/restart.rb +12 -5
  41. data/lib/rocket_job/plugins/state_machine.rb +2 -1
  42. data/lib/rocket_job/plugins/throttle_dependent_jobs.rb +38 -0
  43. data/lib/rocket_job/ractor_worker.rb +42 -0
  44. data/lib/rocket_job/server/model.rb +1 -1
  45. data/lib/rocket_job/sliced.rb +15 -70
  46. data/lib/rocket_job/sliced/bzip2_output_slice.rb +2 -2
  47. data/lib/rocket_job/sliced/input.rb +1 -1
  48. data/lib/rocket_job/sliced/slice.rb +5 -13
  49. data/lib/rocket_job/sliced/slices.rb +14 -2
  50. data/lib/rocket_job/sliced/writer/output.rb +33 -45
  51. data/lib/rocket_job/subscribers/server.rb +1 -1
  52. data/lib/rocket_job/thread_worker.rb +46 -0
  53. data/lib/rocket_job/throttle_definitions.rb +7 -1
  54. data/lib/rocket_job/version.rb +1 -1
  55. data/lib/rocket_job/worker.rb +21 -55
  56. data/lib/rocket_job/worker_pool.rb +5 -7
  57. data/lib/rocketjob.rb +53 -43
  58. metadata +35 -26
  59. data/lib/rocket_job/extensions/mongoid/remove_warnings.rb +0 -12
  60. data/lib/rocket_job/jobs/on_demand_batch_tabular_job.rb +0 -28
@@ -0,0 +1,69 @@
1
+ module RocketJob
2
+ class LookupCollection < Mongo::Collection
3
+ # Rapidly upload individual records in batches.
4
+ #
5
+ # Operates directly on a Mongo Collection to avoid the overhead of creating Mongoid objects
6
+ # for each and every row.
7
+ #
8
+ # Example:
9
+ # lookup_collection(:my_lookup).upload do |io|
10
+ # io << {id: 123, data: "first record"}
11
+ # io << {id: 124, data: "second record"}
12
+ # end
13
+ #
14
+ # input_category(:my_lookup).find(id: 123).first
15
+ def upload(batch_size: 10_000, &block)
16
+ BatchUploader.upload(batch_size: batch_size, &block)
17
+ end
18
+
19
+ # Looks up the value at the specified id.
20
+ # Returns [nil] if no record was found with the supplied id.
21
+ def lookup(id)
22
+ find(id: id).first
23
+ end
24
+
25
+ # Internal class for uploading records in batches
26
+ class BatchUploader
27
+ attr_reader :record_count
28
+
29
+ def self.upload(collection, **args)
30
+ writer = new(collection, **args)
31
+ yield(writer)
32
+ writer.record_count
33
+ ensure
34
+ writer&.close
35
+ end
36
+
37
+ def initialize(collection, batch_size:)
38
+ @batch_size = batch_size
39
+ @record_count = 0
40
+ @batch_count = 0
41
+ @documents = []
42
+ @collection = collection
43
+ end
44
+
45
+ def <<(record)
46
+ raise(ArgumentError, "Record must be a Hash") unless record.is_a?(Hash)
47
+
48
+ unless record.key?(:id) || record.key?("id") || record.key?("_id")
49
+ raise(ArgumentError, "Record must include an :id key")
50
+ end
51
+
52
+ @documents << record
53
+ @record_count += 1
54
+ @batch_count += 1
55
+ if @batch_count >= @batch_size
56
+ @collection.insert_many(@documents)
57
+ @documents.clear
58
+ @batch_count = 0
59
+ end
60
+
61
+ self
62
+ end
63
+
64
+ def close
65
+ @collection.insert_many(@documents) unless @documents.empty?
66
+ end
67
+ end
68
+ end
69
+ end
@@ -37,12 +37,10 @@ module RocketJob
37
37
  # arrives, then the current job will complete the current slices and process
38
38
  # the new higher priority job
39
39
  field :priority, type: Integer, default: 50, class_attribute: true, user_editable: true, copy_on_restart: true
40
+ validates_inclusion_of :priority, in: 1..100
40
41
 
41
42
  # When the job completes destroy it from both the database and the UI
42
- field :destroy_on_complete, type: Boolean, default: true, class_attribute: true, copy_on_restart: true
43
-
44
- # Whether to store the results from this job
45
- field :collect_output, type: Boolean, default: false, class_attribute: true
43
+ field :destroy_on_complete, type: Mongoid::Boolean, default: true, class_attribute: true, copy_on_restart: true
46
44
 
47
45
  # Run this job no earlier than this time
48
46
  field :run_at, type: Time, user_editable: true
@@ -54,14 +52,15 @@ module RocketJob
54
52
  # Can be used to reduce log noise, especially during high volume calls
55
53
  # For debugging a single job can be logged at a low level such as :trace
56
54
  # Levels supported: :trace, :debug, :info, :warn, :error, :fatal
57
- field :log_level, type: Symbol, class_attribute: true, user_editable: true, copy_on_restart: true
55
+ field :log_level, type: Mongoid::StringifiedSymbol, class_attribute: true, user_editable: true, copy_on_restart: true
56
+ validates_inclusion_of :log_level, in: SemanticLogger::LEVELS + [nil]
58
57
 
59
58
  #
60
59
  # Read-only attributes
61
60
  #
62
61
 
63
62
  # Current state, as set by the state machine. Do not modify this value directly.
64
- field :state, type: Symbol, default: :queued
63
+ field :state, type: Mongoid::StringifiedSymbol, default: :queued
65
64
 
66
65
  # When the job was created
67
66
  field :created_at, type: Time, default: -> { Time.now }
@@ -89,17 +88,12 @@ module RocketJob
89
88
  # Store the last exception for this job
90
89
  embeds_one :exception, class_name: "RocketJob::JobException"
91
90
 
92
- # Store the Hash result from this job if collect_output is true,
93
- # and the job returned actually returned a Hash, otherwise nil
94
- # Not applicable to SlicedJob jobs, since its output is stored in a
95
- # separate collection
96
- field :result, type: Hash
97
-
91
+ # Used when workers fetch jobs to work on.
98
92
  index({state: 1, priority: 1, _id: 1}, background: true)
93
+ # Used by Mission Control to display completed jobs sorted by completion.
94
+ index({completed_at: 1}, background: true)
99
95
 
100
96
  validates_presence_of :state, :failure_count, :created_at
101
- validates :priority, inclusion: 1..100
102
- validates :log_level, inclusion: SemanticLogger::LEVELS + [nil]
103
97
  end
104
98
 
105
99
  module ClassMethods
@@ -155,14 +149,8 @@ module RocketJob
155
149
 
156
150
  # Scope for queued jobs that can run now
157
151
  # I.e. Queued jobs excluding scheduled jobs
158
- if Mongoid::VERSION.to_f >= 7.1
159
- def queued_now
160
- queued.and(RocketJob::Job.where(run_at: nil).or(:run_at.lte => Time.now))
161
- end
162
- else
163
- def queued_now
164
- queued.or({run_at: nil}, :run_at.lte => Time.now)
165
- end
152
+ def queued_now
153
+ queued.and(RocketJob::Job.where(run_at: nil).or(:run_at.lte => Time.now))
166
154
  end
167
155
 
168
156
  # Defines all the fields that are accessible on the Document
@@ -183,43 +171,30 @@ module RocketJob
183
171
  #
184
172
  # @return [ Field ] The generated field
185
173
  def field(name, options)
186
- if options.delete(:user_editable) == true
187
- self.user_editable_fields += [name.to_sym] unless user_editable_fields.include?(name.to_sym)
174
+ if (options.delete(:user_editable) == true) && !user_editable_fields.include?(name.to_sym)
175
+ self.user_editable_fields += [name.to_sym]
188
176
  end
177
+
189
178
  if options.delete(:class_attribute) == true
190
179
  class_attribute(name, instance_accessor: false)
191
180
  public_send("#{name}=", options[:default]) if options.key?(:default)
192
181
  options[:default] = -> { self.class.public_send(name) }
193
182
  end
194
- if options.delete(:copy_on_restart) == true
195
- self.rocket_job_restart_attributes += [name.to_sym] unless rocket_job_restart_attributes.include?(name.to_sym)
183
+
184
+ if (options.delete(:copy_on_restart) == true) && !rocket_job_restart_attributes.include?(name.to_sym)
185
+ self.rocket_job_restart_attributes += [name.to_sym]
196
186
  end
197
- super(name, options)
198
- end
199
187
 
200
- # DEPRECATED
201
- def rocket_job
202
- warn "Replace calls to .rocket_job with calls to set class instance variables. For example: self.priority = 50"
203
- yield(self)
188
+ super(name, options)
204
189
  end
205
190
 
206
- # DEPRECATED
207
- def public_rocket_job_properties(*args)
208
- warn "Replace calls to .public_rocket_job_properties by adding `user_editable: true` option to the field declaration in #{name} for: #{args.inspect}"
209
- self.user_editable_fields += args.collect(&:to_sym)
191
+ # Builds this job instance from the supplied properties hash.
192
+ # Overridden by batch to support child objects.
193
+ def from_properties(properties)
194
+ new(properties)
210
195
  end
211
196
  end
212
197
 
213
- # Returns [true|false] whether to collect nil results from running this batch
214
- def collect_nil_output?
215
- collect_output? ? (collect_nil_output == true) : false
216
- end
217
-
218
- # Returns [true|false] whether to collect the results from running this batch
219
- def collect_output?
220
- collect_output == true
221
- end
222
-
223
198
  # Returns [Float] the number of seconds the job has taken
224
199
  # - Elapsed seconds to process the job from when a worker first started working on it
225
200
  # until now if still running, or until it was completed
@@ -282,7 +257,6 @@ module RocketJob
282
257
  # Returns [Hash] status of this job
283
258
  def as_json
284
259
  attrs = serializable_hash(methods: %i[seconds duration])
285
- attrs.delete("result") unless collect_output?
286
260
  attrs.delete("failure_count") unless failure_count.positive?
287
261
  if queued?
288
262
  attrs.delete("started_at")
@@ -319,16 +293,17 @@ module RocketJob
319
293
  h = as_json
320
294
  h.delete("seconds")
321
295
  h.dup.each_pair do |k, v|
322
- if v.is_a?(Time)
296
+ case v
297
+ when Time
323
298
  h[k] = v.in_time_zone(time_zone).to_s
324
- elsif v.is_a?(BSON::ObjectId)
299
+ when BSON::ObjectId
325
300
  h[k] = v.to_s
326
301
  end
327
302
  end
328
303
  h
329
304
  end
330
305
 
331
- # Returns [Boolean] whether the worker runs on a particular server.
306
+ # Returns [true|false] whether the worker runs on a particular server.
332
307
  def worker_on_server?(server_name)
333
308
  return false unless worker_name.present? && server_name.present?
334
309
 
@@ -48,7 +48,7 @@ module RocketJob
48
48
  # Note: Throttles are executed in the order they are defined.
49
49
  def define_throttle(method_name, filter: :throttle_filter_class)
50
50
  # Duplicate to prevent modifying parent class throttles
51
- definitions = rocket_job_throttles ? rocket_job_throttles.dup : ThrottleDefinitions.new
51
+ definitions = rocket_job_throttles ? rocket_job_throttles.deep_dup : ThrottleDefinitions.new
52
52
  definitions.add(method_name, filter)
53
53
  self.rocket_job_throttles = definitions
54
54
  end
@@ -57,7 +57,7 @@ module RocketJob
57
57
  def undefine_throttle(method_name)
58
58
  return unless rocket_job_throttles
59
59
 
60
- definitions = rocket_job_throttles.dup
60
+ definitions = rocket_job_throttles.deep_dup
61
61
  definitions.remove(method_name)
62
62
  self.rocket_job_throttles = definitions
63
63
  end
@@ -29,20 +29,28 @@ module RocketJob
29
29
  class_attribute :throttle_running_jobs
30
30
  self.throttle_running_jobs = nil
31
31
 
32
+ # Allow jobs to be throttled by group name instance of the job class name.
33
+ field :throttle_group, type: String, class_attribute: true, user_editable: true, copy_on_restart: true
34
+
32
35
  define_throttle :throttle_running_jobs_exceeded?
33
36
  end
34
37
 
35
38
  private
36
39
 
37
- # Returns [Boolean] whether the throttle for this job has been exceeded
40
+ # Returns [true|false] whether the throttle for this job has been exceeded
38
41
  def throttle_running_jobs_exceeded?
39
- return unless throttle_running_jobs&.positive?
42
+ return false unless throttle_running_jobs&.positive?
40
43
 
41
- # Cannot use this class since it will include instances of parent job classes.
42
44
  RocketJob::Job.with(read: {mode: :primary}) do |conn|
43
- conn.running.where("_type" => self.class.name, :id.ne => id).count >= throttle_running_jobs
45
+ query = throttle_running_jobs_base_query
46
+ throttle_group ? query["throttle_group"] = throttle_group : query["_type"] = self.class.name
47
+ conn.running.where(query).count >= throttle_running_jobs
44
48
  end
45
49
  end
50
+
51
+ def throttle_running_jobs_base_query
52
+ {:id.ne => id}
53
+ end
46
54
  end
47
55
  end
48
56
  end
@@ -48,11 +48,11 @@ module RocketJob
48
48
  def perform_now
49
49
  raise(::Mongoid::Errors::Validations, self) unless valid?
50
50
 
51
- worker = RocketJob::Worker.new(inline: true)
51
+ worker = RocketJob::Worker.new
52
52
  start if may_start?
53
53
  # Re-Raise exceptions
54
54
  rocket_job_work(worker, true) if running?
55
- result
55
+ @rocket_job_output
56
56
  end
57
57
 
58
58
  def perform(*)
@@ -106,11 +106,6 @@ module RocketJob
106
106
  end
107
107
  end
108
108
 
109
- if collect_output?
110
- # Result must be a Hash, if not put it in a Hash
111
- self.result = @rocket_job_output.is_a?(Hash) ? @rocket_job_output : {"result" => @rocket_job_output}
112
- end
113
-
114
109
  if new_record? || destroyed?
115
110
  complete if may_complete?
116
111
  else
@@ -91,8 +91,16 @@ module RocketJob
91
91
  logger.info("Job has expired. Not creating a new instance.")
92
92
  return
93
93
  end
94
- attributes = rocket_job_restart_attributes.each_with_object({}) { |attr, attrs| attrs[attr] = send(attr) }
95
- rocket_job_restart_create(attributes)
94
+ job_attrs =
95
+ rocket_job_restart_attributes.each_with_object({}) { |attr, attrs| attrs[attr] = send(attr) }
96
+ job = self.class.new(job_attrs)
97
+
98
+ # Copy across input and output categories to new scheduled job so that all of the
99
+ # settings are remembered between instance. Example: slice_size
100
+ job.input_categories = input_categories if respond_to?(:input_categories)
101
+ job.output_categories = output_categories if respond_to?(:output_categories)
102
+
103
+ rocket_job_restart_save(job)
96
104
  end
97
105
 
98
106
  def rocket_job_restart_abort
@@ -101,11 +109,10 @@ module RocketJob
101
109
 
102
110
  # Allow Singleton to prevent the creation of a new job if one is already running
103
111
  # Retry since the delete may not have persisted to disk yet.
104
- def rocket_job_restart_create(attrs, retry_limit = 3, sleep_interval = 0.1)
112
+ def rocket_job_restart_save(job, retry_limit = 10, sleep_interval = 0.5)
105
113
  count = 0
106
114
  while count < retry_limit
107
- job = self.class.create(attrs)
108
- if job.persisted?
115
+ if job.save
109
116
  logger.info("Created a new job instance: #{job.id}")
110
117
  return true
111
118
  else
@@ -51,7 +51,8 @@ module RocketJob
51
51
  # Validate methods are any of Symbol String Proc
52
52
  methods.each do |method|
53
53
  unless method.is_a?(Symbol) || method.is_a?(String)
54
- raise(ArgumentError, "#{action}_#{event_name} currently does not support any options. Only Symbol and String method names can be supplied.")
54
+ raise(ArgumentError,
55
+ "#{action}_#{event_name} currently does not support any options. Only Symbol and String method names can be supplied.")
55
56
  end
56
57
  end
57
58
  methods
@@ -0,0 +1,38 @@
1
+ require "active_support/concern"
2
+ module RocketJob
3
+ module Plugins
4
+ # Prevent this job from starting, or a batch slice from starting if the dependent jobs are running.
5
+ #
6
+ # Features:
7
+ # - Ensures dependent jobs won't run
8
+ # When the throttle has been exceeded all jobs of this class will be ignored until the
9
+ # next refresh. `RocketJob::Config::re_check_seconds` which by default is 60 seconds.
10
+ module ThrottleDependentJobs
11
+ extend ActiveSupport::Concern
12
+
13
+ included do
14
+ class_attribute :dependent_jobs
15
+ self.dependent_jobs = nil
16
+
17
+ define_throttle :dependent_job_exists?
18
+ define_batch_throttle :dependent_job_exists? if respond_to?(:define_batch_throttle)
19
+ end
20
+
21
+ private
22
+
23
+ # Checks if there are any dependent jobs are running
24
+ def dependent_job_exists?
25
+ return false if dependent_jobs.blank?
26
+
27
+ jobs_count = RocketJob::Job.running.where(:_type.in => dependent_jobs).count
28
+ return false if jobs_count.zero?
29
+
30
+ logger.info(
31
+ message: "#{jobs_count} Dependent Jobs are running from #{dependent_jobs.join(', ')}",
32
+ metric: "#{self.class.name}/dependent_jobs_throttle"
33
+ )
34
+ true
35
+ end
36
+ end
37
+ end
38
+ end
@@ -0,0 +1,42 @@
1
+ module RocketJob
2
+ # Run each worker in its own "Ractor".
3
+ class RactorWorker < Worker
4
+ attr_reader :thread
5
+
6
+ def initialize(id:, server_name:)
7
+ super(id: id, server_name: server_name)
8
+ @shutdown = Concurrent::Event.new
9
+ @thread = Ractor.new(name: "rocketjob-#{id}") { run }
10
+ end
11
+
12
+ def alive?
13
+ @thread.alive?
14
+ end
15
+
16
+ def backtrace
17
+ @thread.backtrace
18
+ end
19
+
20
+ def join(*args)
21
+ @thread.join(*args)
22
+ end
23
+
24
+ # Send each active worker the RocketJob::ShutdownException so that stops processing immediately.
25
+ def kill
26
+ @thread.raise(Shutdown, "Shutdown due to kill request for worker: #{name}") if @thread.alive?
27
+ end
28
+
29
+ def shutdown?
30
+ @shutdown.set?
31
+ end
32
+
33
+ def shutdown!
34
+ @shutdown.set
35
+ end
36
+
37
+ # Returns [true|false] whether the shutdown indicator was set
38
+ def wait_for_shutdown?(timeout = nil)
39
+ @shutdown.wait(timeout)
40
+ end
41
+ end
42
+ end
@@ -28,7 +28,7 @@ module RocketJob
28
28
 
29
29
  # Current state
30
30
  # Internal use only. Do not set this field directly
31
- field :state, type: Symbol, default: :starting
31
+ field :state, type: Mongoid::StringifiedSymbol, default: :starting
32
32
 
33
33
  index({name: 1}, background: true, unique: true)
34
34
 
@@ -14,78 +14,23 @@ module RocketJob
14
14
  autoload :Output, "rocket_job/sliced/writer/output"
15
15
  end
16
16
 
17
- # Returns [RocketJob::Sliced::Slices] for the relevant type and category.
18
- #
19
- # Supports compress and encrypt with [true|false|Hash] values.
20
- # When [Hash] they must specify whether the apply to the input or output collection types.
21
- #
22
- # Example, compress both input and output collections:
23
- # class MyJob < RocketJob::Job
24
- # include RocketJob::Batch
25
- # self.compress = true
26
- # end
27
- #
28
- # Example, compress just the output collections:
29
- # class MyJob < RocketJob::Job
30
- # include RocketJob::Batch
31
- # self.compress = {output: true}
32
- # end
33
- #
34
- # To use the specialized BZip output compressor, and the regular compressor for the input collections:
35
- # class MyJob < RocketJob::Job
36
- # include RocketJob::Batch
37
- # self.compress = {output: :bzip2, input: true}
38
- # end
39
- def self.factory(type, category, job)
40
- raise(ArgumentError, "Unknown type: #{type.inspect}") unless %i[input output].include?(type)
41
-
42
- collection_name = "rocket_job.#{type}s.#{job.id}"
43
- collection_name << ".#{category}" unless category == :main
44
-
45
- args = {collection_name: collection_name, slice_size: job.slice_size}
46
- klass = slice_class(type, job)
47
- args[:slice_class] = klass if klass
48
-
49
- if type == :input
50
- RocketJob::Sliced::Input.new(args)
17
+ # Returns [RocketJob::Sliced::Slices] for the relevant direction and category.
18
+ def self.factory(direction, category, job)
19
+ collection_name = "rocket_job.#{direction}s.#{job.id}"
20
+ collection_name << ".#{category.name}" unless category.name == :main
21
+
22
+ case direction
23
+ when :input
24
+ RocketJob::Sliced::Input.new(
25
+ collection_name: collection_name,
26
+ slice_class: category.serializer_class,
27
+ slice_size: category.slice_size
28
+ )
29
+ when :output
30
+ RocketJob::Sliced::Output.new(collection_name: collection_name, slice_class: category.serializer_class)
51
31
  else
52
- RocketJob::Sliced::Output.new(args)
53
- end
54
- end
55
-
56
- private
57
-
58
- # Parses the encrypt and compress options to determine which slice serializer to use.
59
- # `encrypt` takes priority over any `compress` option.
60
- def self.slice_class(type, job)
61
- encrypt = extract_value(type, job.encrypt)
62
- compress = extract_value(type, job.compress)
63
-
64
- if encrypt
65
- case encrypt
66
- when true
67
- EncryptedSlice
68
- else
69
- raise(ArgumentError, "Unknown job `encrypt` value: #{compress}") unless compress.is_a?(Slices)
70
- # Returns the supplied class to use for encryption.
71
- encrypt
72
- end
73
- elsif compress
74
- case compress
75
- when true
76
- CompressedSlice
77
- when :bzip2
78
- BZip2OutputSlice
79
- else
80
- raise(ArgumentError, "Unknown job `compress` value: #{compress}") unless compress.is_a?(Slices)
81
- # Returns the supplied class to use for compression.
82
- compress
83
- end
32
+ raise(ArgumentError, "Unknown direction: #{direction.inspect}")
84
33
  end
85
34
  end
86
-
87
- def self.extract_value(type, value)
88
- value.is_a?(Hash) ? value[type] : value
89
- end
90
35
  end
91
36
  end