procrastinator 1.0.0.pre.rc4 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/RELEASE_NOTES.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Release Notes
2
2
 
3
- ## 1.0.0 ( )
3
+ ## 1.0.0 (2022-09-18)
4
4
 
5
5
  ### Major Changes
6
6
 
@@ -21,16 +21,19 @@
21
21
  * `Procrastinator::Config#run_process_block`
22
22
  * Removed use of envvar `PROCRASTINATOR_STOP`
23
23
  * `Procrastinator::QueueManager` is merged into `Procrastinator::Scheduler`
24
- * Removed rake task to halt queue processes
24
+ * Removed rake task to halt individual queue processes
25
25
  * Renamed `Procrastinator::Config#provide_context` to `provide_container`
26
26
  * You must now call `Scheduler#work` on the result of `Procrastinator.config`
27
- * Use a dedicated process monitor (like `monit`) instead in production environments
28
- * Suuply a block to `daemonized!` to run code in the spawned process.
29
- * `max_tasks` is removed as it only added concurrency complexity
27
+ * Use a dedicated process monitor (like `monit`) instead in production environments to maintain uptime
28
+ * `max_tasks` is removed as it only added concurrency complexity. Each queue worker only selects one task from only its
29
+ queue.
30
30
  * Data is now stored as JSON instead of YAML
31
31
  * Added with_store that applies its settings to its block
32
32
  * `load_with` has been removed
33
33
  * Removed `task_attr` and `Procrastinator::Task` module. Tasks is now duck-type checked for accessors instead.
34
+ * Added Rake tasks to manage process daemon
35
+ * Times are passed to Task Store as a Ruby Time object instead of an epoch time integer
36
+ * `#delay` is now `#defer`
34
37
 
35
38
  ### Minor Changes
36
39
 
@@ -38,6 +41,8 @@
38
41
  * Updated development gems
39
42
  * Logs now include the queue name in log lines
40
43
  * Logs can now set the shift size or age (like Ruby's Logger)
44
+ * Log format is now tab-separated to align better and work with POSIX cut
45
+ * General renaming of terms
41
46
 
42
47
  ### Bugfixes
43
48
 
data/Rakefile CHANGED
@@ -2,7 +2,14 @@
2
2
 
3
3
  require 'bundler/gem_tasks'
4
4
  require 'rspec/core/rake_task'
5
+ require 'yard'
5
6
 
6
7
  RSpec::Core::RakeTask.new(:spec)
7
8
 
8
9
  task default: :spec
10
+
11
+ YARD::Rake::YardocTask.new do |t|
12
+ t.files = %w[lib/**/*.rb]
13
+ # t.options = %w[--some-option]
14
+ t.stats_options = ['--list-undoc']
15
+ end
@@ -25,10 +25,20 @@ module Procrastinator
25
25
  class Config
26
26
  attr_reader :queues, :log_dir, :log_level, :log_shift_age, :log_shift_size, :container
27
27
 
28
- DEFAULT_LOG_DIRECTORY = Pathname.new('log').freeze
29
- DEFAULT_LOG_SHIFT_AGE = 0
28
+ # Default directory to keep logs in.
29
+ DEFAULT_LOG_DIRECTORY = Pathname.new('log').freeze
30
+
31
+ # Default age to keep log files for.
32
+ # @see Logger
33
+ DEFAULT_LOG_SHIFT_AGE = 0
34
+
35
+ # Default max size to keep log files under.
36
+ # @see Logger
30
37
  DEFAULT_LOG_SHIFT_SIZE = 2 ** 20 # 1 MB
31
- DEFAULT_LOG_FORMATTER = proc do |severity, datetime, progname, msg|
38
+
39
+ # Default log formatter
40
+ # @see Logger
41
+ DEFAULT_LOG_FORMATTER = proc do |severity, datetime, progname, msg|
32
42
  [datetime.iso8601(8),
33
43
  severity,
34
44
  "#{ progname } (#{ Process.pid }):",
@@ -70,10 +80,29 @@ module Procrastinator
70
80
  @default_store = old_store
71
81
  end
72
82
 
83
+ # Defines the container to assign to each Task Handler's :container attribute.
84
+ #
85
+ # @param container [Object] the container
73
86
  def provide_container(container)
74
87
  @container = container
75
88
  end
76
89
 
90
+ # Defines a queue in the Procrastinator Scheduler.
91
+ #
92
+ # The Task Handler will be initialized for each task and assigned each of these attributes:
93
+ # :container, :logger, :scheduler
94
+ #
95
+ # @param name [Symbol, String] the identifier to label the queue. Referenced in #defer calls
96
+ # @param task_class [Class] the Task Handler class.
97
+ # @param properties [Hash] Settings options object for this queue
98
+ # @option properties [Object] :store (Procrastinator::TaskStore::SimpleCommaStore)
99
+ # Storage strategy for tasks in the queue.
100
+ # @option properties [Integer] :max_attempts (Procrastinator::Queue::DEFAULT_MAX_ATTEMPTS)
101
+ # Maximum number of times a task may be attempted before giving up.
102
+ # @option properties [Integer] :timeout (Procrastinator::Queue::DEFAULT_TIMEOUT)
103
+ # Maximum number of seconds to wait for a single task to complete.
104
+ # @option properties [Integer] :update_period (Procrastinator::Queue::DEFAULT_UPDATE_PERIOD)
105
+ # Time to wait before checking for new tasks.
77
106
  def define_queue(name, task_class, properties = {})
78
107
  raise ArgumentError, 'queue name cannot be nil' if name.nil?
79
108
  raise ArgumentError, 'queue task class cannot be nil' if task_class.nil?
@@ -142,7 +171,9 @@ module Procrastinator
142
171
  end
143
172
  end
144
173
 
174
+ # Raised when there is an error in the setup configuration.
145
175
  class SetupError < RuntimeError
176
+ # Standard error message for when there is no queue defined
146
177
  ERR_NO_QUEUE = 'setup block must call #define_queue on the environment'
147
178
  end
148
179
  end
@@ -14,10 +14,6 @@ module Procrastinator
14
14
  #
15
15
  # @see Task
16
16
  class LoggedTask < DelegateClass(Task)
17
- # extend Forwardable
18
- #
19
- # def_delegators :@task, :id, :to_h
20
-
21
17
  attr_reader :logger
22
18
 
23
19
  alias task __getobj__
@@ -27,6 +23,7 @@ module Procrastinator
27
23
  @logger = logger || raise(ArgumentError, 'Logger cannot be nil')
28
24
  end
29
25
 
26
+ # (see Task#run)
30
27
  def run
31
28
  task.run
32
29
 
@@ -37,6 +34,7 @@ module Procrastinator
37
34
  end
38
35
  end
39
36
 
37
+ # @param (see Task#fail)
40
38
  def fail(error)
41
39
  hook = task.fail(error)
42
40
  begin
@@ -18,8 +18,13 @@ module Procrastinator
18
18
  class Queue
19
19
  extend Forwardable
20
20
 
21
- DEFAULT_TIMEOUT = 3600 # in seconds; one hour total
22
- DEFAULT_MAX_ATTEMPTS = 20
21
+ # Default number of seconds to wait for a task to complete
22
+ DEFAULT_TIMEOUT = 3600 # in seconds; one hour total
23
+
24
+ # Default number of times to retry a task
25
+ DEFAULT_MAX_ATTEMPTS = 20
26
+
27
+ # Default amount of time between checks for new Tasks
23
28
  DEFAULT_UPDATE_PERIOD = 10 # seconds
24
29
 
25
30
  attr_reader :name, :max_attempts, :timeout, :update_period, :task_store, :task_class
@@ -54,6 +59,12 @@ module Procrastinator
54
59
  freeze
55
60
  end
56
61
 
62
+ # Constructs the next available task on the queue.
63
+ #
64
+ # @param logger [Logger] logger to provide to the constructed task handler
65
+ # @param container [Object, nil] container to provide to the constructed task handler
66
+ # @param scheduler [Procrastinator::Scheduler, nil] the scheduler to provide to the constructed task handler
67
+ # @return [LoggedTask, nil] A Task or nil if no task is found
57
68
  def next_task(logger: Logger.new(StringIO.new), container: nil, scheduler: nil)
58
69
  metadata = next_metas.find(&:runnable?)
59
70
 
@@ -67,6 +78,9 @@ module Procrastinator
67
78
  LoggedTask.new(task, logger: logger)
68
79
  end
69
80
 
81
+ # Fetch a task matching the given identifier
82
+ #
83
+ # @param identifier [Hash] attributes to match
70
84
  def fetch_task(identifier)
71
85
  identifier[:data] = JSON.dump(identifier[:data]) if identifier[:data]
72
86
 
@@ -78,6 +92,11 @@ module Procrastinator
78
92
  TaskMetaData.new(tasks.first.merge(queue: self))
79
93
  end
80
94
 
95
+ # Creates a task on the queue, saved using the Task Store strategy.
96
+ #
97
+ # @param run_at [Time] Earliest time to attempt running the task
98
+ # @param expire_at [Time, nil] Time after which the task will not be attempted
99
+ # @param data [Hash, String, Numeric, nil] The data to save
81
100
  def create(run_at:, expire_at:, data:)
82
101
  if data.nil? && expects_data?
83
102
  raise ArgumentError, "task #{ @task_class } expects to receive :data. Provide :data to #delay."
@@ -102,6 +121,7 @@ module Procrastinator
102
121
  @task_store.create(**create_data)
103
122
  end
104
123
 
124
+ # @return [Boolean] whether the task handler will accept data to be assigned via its :data attribute
105
125
  def expects_data?
106
126
  @task_class.method_defined?(:data=)
107
127
  end
@@ -136,6 +156,8 @@ module Procrastinator
136
156
 
137
157
  # Internal queue validator
138
158
  module QueueValidation
159
+ private
160
+
139
161
  def validate!
140
162
  verify_task_class!
141
163
  verify_task_store!
@@ -198,9 +220,11 @@ module Procrastinator
198
220
  include QueueValidation
199
221
  end
200
222
 
223
+ # Raised when a Task Handler does not conform to the expected API
201
224
  class MalformedTaskError < StandardError
202
225
  end
203
226
 
227
+ # Raised when a Task Store strategy does not conform to the expected API
204
228
  class MalformedTaskStoreError < RuntimeError
205
229
  end
206
230
  end
@@ -64,6 +64,7 @@ module Procrastinator
64
64
  end
65
65
  end
66
66
 
67
+ # Logs halting the queue
67
68
  def halt
68
69
  @logger&.info("Halted worker on queue: #{ name }")
69
70
  @logger&.close
@@ -86,6 +87,9 @@ module Procrastinator
86
87
  end
87
88
  end
88
89
 
90
+ # Raised when a Task Storage strategy is missing a required part of the API.
91
+ #
92
+ # @see TaskStore
89
93
  class MalformedTaskPersisterError < StandardError
90
94
  end
91
95
  end
@@ -3,14 +3,20 @@
3
3
  require 'rake'
4
4
 
5
5
  module Procrastinator
6
+ # Rake tasks specific to Procrastinator.
7
+ #
8
+ # Provide this in your Rakefile:
9
+ #
10
+ # require 'procrastinator/rake/task'
11
+ # Procrastinator::RakeTask.new do
12
+ # # return your Procrastinator::Scheduler here or construct it using Procrastinator.config
13
+ # end
14
+ #
15
+ # And then you will be able to run rake tasks like:
16
+ #
17
+ # bundle exec rake procrastinator:start
6
18
  module Rake
7
- # RakeTask builder. Provide this in your Rakefile:
8
- #
9
- # require 'procrastinator/rake/task'
10
- # Procrastinator::RakeTask.new('/var/run') do
11
- # # return your Procrastinator::Scheduler here or construct it using Procrastinator.config
12
- # end
13
- #
19
+ # RakeTask builder class. Use DaemonTasks.define to generate the needed tasks.
14
20
  class DaemonTasks
15
21
  include ::Rake::Cloneable
16
22
  include ::Rake::DSL
@@ -26,7 +32,7 @@ module Procrastinator
26
32
  # Defines procrastinator:start and procrastinator:stop Rake tasks that operate on the given scheduler.
27
33
  #
28
34
  # @param pid_path [Pathname, File, String, nil] The pid file path
29
- # @yieldreturn scheduler [Procrastinator::Scheduler]
35
+ # @yieldreturn [Procrastinator::Scheduler] Constructed Scheduler to use as basis for starting tasks
30
36
  #
31
37
  # @see Scheduler::DaemonWorking#daemonized!
32
38
  def define(pid_path: nil)
@@ -78,8 +84,9 @@ module Procrastinator
78
84
  def stop
79
85
  return unless Scheduler::DaemonWorking.running?(@pid_path)
80
86
 
87
+ pid = File.read(@pid_path)
81
88
  Scheduler::DaemonWorking.halt!(@pid_path)
82
- warn "Procrastinator pid #{ File.read(@pid_path) } halted."
89
+ warn "Procrastinator pid #{ pid } halted."
83
90
  end
84
91
  end
85
92
  end
@@ -0,0 +1,30 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rspec/expectations'
4
+
5
+ # Determines if the given task store has a task that matches the expectation hash
6
+ RSpec::Matchers.define :have_task do |expected_task|
7
+ match do |task_store|
8
+ task_store.read.any? do |task|
9
+ task_hash = task.to_h
10
+ task_hash[:data] = JSON.parse(task_hash[:data], symbolize_names: true) unless task_hash[:data].empty?
11
+
12
+ expected_task.all? do |field, expected_value|
13
+ expected_value = case field
14
+ when :queue
15
+ expected_value.to_sym
16
+ when :run_at, :initial_run_at, :expire_at, :last_fail_at
17
+ Time.at(expected_value.to_i)
18
+ else
19
+ expected_value
20
+ end
21
+
22
+ values_match? expected_value, task_hash[field]
23
+ end
24
+ end
25
+ end
26
+
27
+ description do
28
+ "have a task with properties #{ expected_task.collect { |k, v| "#{ k }=#{ v }" }.join(', ') }"
29
+ end
30
+ end
@@ -19,7 +19,7 @@ module Procrastinator
19
19
  # @param run_at [Time, Integer] Optional time when this task should be executed. Defaults to the current time.
20
20
  # @param data [Hash, Array, String, Integer] Optional simple data object to be provided to the task on execution.
21
21
  # @param expire_at [Time, Integer] Optional time when the task should be abandoned
22
- def delay(queue_name = nil, data: nil, run_at: Time.now, expire_at: nil)
22
+ def defer(queue_name = nil, data: nil, run_at: Time.now, expire_at: nil)
23
23
  raise ArgumentError, <<~ERR unless queue_name.nil? || queue_name.is_a?(Symbol)
24
24
  must provide a queue name as the first argument. Received: #{ queue_name }
25
25
  ERR
@@ -29,13 +29,15 @@ module Procrastinator
29
29
  queue.create(run_at: run_at, expire_at: expire_at, data: data)
30
30
  end
31
31
 
32
+ alias delay defer
33
+
32
34
  # Alters an existing task to run at a new time, expire at a new time, or both.
33
35
  #
34
36
  # Call #to on the result and pass in the new :run_at and/or :expire_at.
35
37
  #
36
38
  # Example:
37
39
  #
38
- # scheduler.reschedule(:alerts, data: {user_id: 5}).to(run_at: Time.now, expire_at: Time.now + 10)
40
+ # scheduler.reschedule(:alerts, data: {user_id: 5}).to(run_at: Time.now, expire_at: Time.now + 10)
39
41
  #
40
42
  # The identifier can include any data field stored in the task loader. Often this is the information in :data.
41
43
  #
@@ -123,6 +125,7 @@ module Procrastinator
123
125
  #
124
126
  # @see WorkProxy
125
127
  module ThreadedWorking
128
+ # Program name. Used as default for pid file names and in logging.
126
129
  PROG_NAME = 'Procrastinator'
127
130
 
128
131
  # Work off jobs per queue, each in its own thread.
@@ -135,7 +138,7 @@ module Procrastinator
135
138
  begin
136
139
  @threads = spawn_threads
137
140
 
138
- @logger.info "Procrastinator running. Process ID: #{ Process.pid }"
141
+ @logger.info "#{ PROG_NAME } running. Process ID: #{ Process.pid }"
139
142
  @threads.each do |thread|
140
143
  thread.join(timeout)
141
144
  end
@@ -243,10 +246,13 @@ module Procrastinator
243
246
  #
244
247
  # @see WorkProxy
245
248
  module DaemonWorking
246
- PID_EXT = '.pid'
247
- DEFAULT_PID_DIR = Pathname.new('/var/run/').freeze
249
+ # File extension for process ID files
250
+ PID_EXT = '.pid'
251
+
252
+ # Default directory to store PID files in.
253
+ DEFAULT_PID_DIR = Pathname.new('/tmp').freeze
248
254
 
249
- # 15 chars is linux limit
255
+ # Maximum process name size. 15 chars is linux limit
250
256
  MAX_PROC_LEN = 15
251
257
 
252
258
  # Consumes the current process and turns it into a background daemon and proceed as #threaded.
@@ -290,6 +296,10 @@ module Procrastinator
290
296
  false
291
297
  end
292
298
 
299
+ # Raised when a process is already found to exist using the same pid_path
300
+ class ProcessExistsError < RuntimeError
301
+ end
302
+
293
303
  private
294
304
 
295
305
  def spawn_daemon(pid_path)
@@ -384,7 +394,4 @@ module Procrastinator
384
394
  end
385
395
  end
386
396
  end
387
-
388
- class ProcessExistsError < RuntimeError
389
- end
390
397
  end
@@ -21,6 +21,12 @@ module Procrastinator
21
21
  @handler = handler
22
22
  end
23
23
 
24
+ # Executes the Task Handler's #run hook and records the attempt.
25
+ #
26
+ # If the #run hook completes successfully, the #success hook will also be executed, if defined.
27
+ #
28
+ # @raise [ExpiredError] when the task run_at is after the expired_at.
29
+ # @raise [AttemptsExhaustedError] when the task has been attempted more times than allowed by the queue settings.
24
30
  def run
25
31
  raise ExpiredError, "task is over its expiry time of #{ @metadata.expire_at.iso8601 }" if @metadata.expired?
26
32
 
@@ -45,19 +51,25 @@ module Procrastinator
45
51
  hook
46
52
  end
47
53
 
54
+ # Attempts to run the given optional event hook on the handler, catching any resultant errors to prevent the whole
55
+ # task from failing despite the actual work in #run completing.
48
56
  def try_hook(method, *params)
49
57
  @handler.send(method, *params) if @handler.respond_to? method
50
58
  rescue StandardError => e
51
59
  warn "#{ method.to_s.capitalize } hook error: #{ e.message }"
52
60
  end
53
61
 
62
+ # Convert the task into a human-legible string.
63
+ # @return [String] Including the queue name, id, and serialized data.
54
64
  def to_s
55
65
  "#{ @metadata.queue.name }##{ id } [#{ serialized_data }]"
56
66
  end
57
67
 
68
+ # Raised when a Task's run_at is beyond its expire_at
58
69
  class ExpiredError < RuntimeError
59
70
  end
60
71
 
72
+ # Raised when a Task's attempts has exceeded the max_attempts defined for its queue (if any).
61
73
  class AttemptsExhaustedError < RuntimeError
62
74
  end
63
75
  end
@@ -50,6 +50,9 @@ module Procrastinator
50
50
  @data = data ? JSON.parse(data, symbolize_names: true) : nil
51
51
  end
52
52
 
53
+ # Increases the number of attempts on this task by one, unless the limit has been reached.
54
+ #
55
+ # @raise [Task::AttemptsExhaustedError] when the number of attempts has exceeded the Queue's defined maximum.
53
56
  def add_attempt
54
57
  raise Task::AttemptsExhaustedError unless attempts_left?
55
58
 
@@ -72,22 +75,28 @@ module Procrastinator
72
75
  end
73
76
  end
74
77
 
78
+ # @return [Boolean] whether the task has attempts left and is not expired
75
79
  def retryable?
76
80
  attempts_left? && !expired?
77
81
  end
78
82
 
83
+ # @return [Boolean] whether the task is expired
79
84
  def expired?
80
85
  !@expire_at.nil? && @expire_at < Time.now
81
86
  end
82
87
 
88
+ # @return [Boolean] whether there are attempts left until the Queue's defined maximum is reached (if any)
83
89
  def attempts_left?
84
90
  @queue.max_attempts.nil? || @attempts < @queue.max_attempts
85
91
  end
86
92
 
93
+ # @return [Boolean] whether the task's run_at is exceeded
87
94
  def runnable?
88
95
  !@run_at.nil? && @run_at <= Time.now
89
96
  end
90
97
 
98
+ # @return [Boolean] whether the task's last execution completed successfully.
99
+ # @raise [RuntimeError] when the task has not been attempted yet or when it is expired
91
100
  def successful?
92
101
  raise 'you cannot check for success before running #work' if !expired? && @attempts <= 0
93
102
 
@@ -117,6 +126,7 @@ module Procrastinator
117
126
  @run_at += 30 + (@attempts ** 4) unless @run_at.nil?
118
127
  end
119
128
 
129
+ # @return [Hash] representation of the task metadata as a hash
120
130
  def to_h
121
131
  {id: @id,
122
132
  queue: @queue.name.to_s,
@@ -129,10 +139,12 @@ module Procrastinator
129
139
  data: serialized_data}
130
140
  end
131
141
 
142
+ # @return [String] :data serialized as a JSON string
132
143
  def serialized_data
133
144
  JSON.dump(@data)
134
145
  end
135
146
 
147
+ # Resets the last failure time and error.
136
148
  def clear_fails
137
149
  @last_error = nil
138
150
  @last_fail_at = nil
@@ -3,74 +3,72 @@
3
3
  require 'pathname'
4
4
 
5
5
  module Procrastinator
6
- module TaskStore
7
- # The general idea is that there may be two threads that need to do these actions on the same file:
8
- # thread A: read
9
- # thread B: read
10
- # thread A/B: write
11
- # thread A/B: write
12
- #
13
- # When this sequence happens, the second file write is based on old information and loses the info from
14
- # the prior write. Using a global mutex per file path prevents this case.
15
- #
16
- # This situation can also occur with multi processing, so file locking is also used for solitary access.
17
- # File locking is only advisory in some systems, though, so it may only work against other applications
18
- # that request a lock.
19
- #
20
- # @author Robin Miller
21
- class FileTransaction
22
- # Holds the mutual exclusion locks for file paths by name
23
- @file_mutex = {}
6
+ # The general idea is that there may be two threads that need to do these actions on the same file:
7
+ # thread A: read
8
+ # thread B: read
9
+ # thread A/B: write
10
+ # thread A/B: write
11
+ #
12
+ # When this sequence happens, the second file write is based on old information and loses the info from
13
+ # the prior write. Using a global mutex per file path prevents this case.
14
+ #
15
+ # This situation can also occur with multi processing, so file locking is also used for solitary access.
16
+ # File locking is only advisory in some systems, though, so it may only work against other applications
17
+ # that request a lock.
18
+ #
19
+ # @author Robin Miller
20
+ class FileTransaction
21
+ # Holds the mutual exclusion locks for file paths by name
22
+ @file_mutex = {}
24
23
 
25
- class << self
26
- attr_reader :file_mutex
27
- end
24
+ class << self
25
+ attr_reader :file_mutex
26
+ end
28
27
 
29
- def initialize(path)
30
- @path = ensure_path(path)
31
- end
28
+ def initialize(path)
29
+ @path = ensure_path(path)
30
+ end
32
31
 
33
- # Alias for transact(writable: false)
34
- def read(&block)
35
- transact(writable: false, &block)
36
- end
32
+ # Alias for transact(writable: false)
33
+ def read(&block)
34
+ transact(writable: false, &block)
35
+ end
37
36
 
38
- # Alias for transact(writable: true)
39
- def write(&block)
40
- transact(writable: true, &block)
41
- end
37
+ # Alias for transact(writable: true)
38
+ def write(&block)
39
+ transact(writable: true, &block)
40
+ end
42
41
 
43
- # Completes the given block as an atomic transaction locked using a global mutex table.
44
- # The block is provided the current file contents.
45
- # The block's result is written to the file.
46
- def transact(writable: false)
47
- semaphore = FileTransaction.file_mutex[@path.to_s] ||= Mutex.new
42
+ # Completes the given block as an atomic transaction locked using a global mutex table.
43
+ # The block is provided the current file contents.
44
+ # The block's result is written to the file.
45
+ def transact(writable: false)
46
+ semaphore = FileTransaction.file_mutex[@path.to_s] ||= Mutex.new
48
47
 
49
- semaphore.synchronize do
50
- @path.open(writable ? 'r+' : 'r') do |file|
51
- file.flock(File::LOCK_EX)
48
+ semaphore.synchronize do
49
+ @path.open(writable ? 'r+' : 'r') do |file|
50
+ file.flock(File::LOCK_EX)
52
51
 
53
- yield_result = yield(file.read)
54
- if writable
55
- file.rewind
56
- file.write yield_result
57
- file.truncate(file.pos)
58
- end
59
- yield_result
52
+ yield_result = yield(file.read)
53
+ if writable
54
+ file.rewind
55
+ file.write yield_result
56
+ file.truncate(file.pos)
60
57
  end
58
+ yield_result
61
59
  end
62
60
  end
61
+ end
63
62
 
64
- private
63
+ private
65
64
 
66
- def ensure_path(path)
67
- path = Pathname.new path
68
- unless path.exist?
69
- path.dirname.mkpath
70
- FileUtils.touch path
71
- end
72
- path
65
+ def ensure_path(path)
66
+ path = Pathname.new path
67
+ unless path.exist?
68
+ path.dirname.mkpath
69
+ FileUtils.touch path
73
70
  end
71
+ path
74
72
  end
75
73
  end
76
74
  end