fiber_job 0.2.1 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 902c8f2047330d092fdcadda50067f8bef6a47bc43039ed6bad7172d6976a1b7
4
- data.tar.gz: 9cd001ba179f3f6a16619441b448e1d46dc7917431a3deea753615135af499f7
3
+ metadata.gz: e66c22f05345c01f395ff4c12d2552166dacdbf793beb21d33a80d5039d8614b
4
+ data.tar.gz: d05d0d697c0160b4b1265e10034cc8f6b305b6b6b71cb08394225f5d2d3573b4
5
5
  SHA512:
6
- metadata.gz: 391d2e8826377526d5c852f1ba1a07a84767d3c57544d73ce93cb7cb6e3f3cf381591e6a76049219d3122259af047d6f3dce492a00aea764319f53583ba20385
7
- data.tar.gz: e5dce3e93ae978c1c0d02169cfe2b95cd92888a92d9f35675f372cd31fe6ae17e2a6a551a977abaed40095e2d5de3068dc94cbe59b69c0a01ef54ed005b46ab8
6
+ metadata.gz: 71fd43833e9d47668af585d97cfe114950fe59e0197bbc2597e8b0b479c97c5d19eef8a877690f2f8910aaadccdc05718e938eed870e62e24a14a0d23a92d8dc
7
+ data.tar.gz: 7234510238408774dfba23fb6377d4b0a21be216ec2b549ff25bc9a27ecf29f0970b374fd12e82d4c992a60c67e203875e14530c1e46c263907513ba023f376c
@@ -3,6 +3,24 @@
3
3
  require 'logger'
4
4
 
5
5
  module FiberJob
6
+ # Null logger implementation for maximum performance when logging is disabled.
7
+ # All logging methods are no-ops, eliminating the overhead of log message formatting.
8
+ class NullLogger
9
+ %i[debug info warn error fatal unknown].each do |level|
10
+ define_method(level) { |*_args| nil }
11
+ define_method("#{level}?") { false }
12
+ end
13
+
14
+ def level=(*_args); end
15
+ def level = 0
16
+ def add(*_args); end
17
+ def log(*_args); end
18
+ def close; end
19
+ def reopen(*_args); end
20
+ def formatter=(*_args); end
21
+ def datetime_format=(*_args); end
22
+ end
23
+
6
24
  # Configuration class for FiberJob library settings.
7
25
  # Manages Redis connection, worker concurrency, queue configuration,
8
26
  # and logging settings. Supports both global and per-queue configuration.
@@ -44,7 +62,12 @@ module FiberJob
44
62
  # @return [Symbol] Logging level (:debug, :info, :warn, :error)
45
63
  # @!attribute [rw] job_paths
46
64
  # @return [Array<String>] List of paths to auto-load job classes from
47
- attr_accessor :redis_url, :concurrency, :queues, :queue_concurrency, :logger, :log_level, :job_paths
65
+ # @!attribute [rw] pool_size
66
+ # @return [Integer] Redis connection pool size
67
+ # @!attribute [rw] enable_logging
68
+ # @return [Boolean] Whether to enable logging (set to false for maximum performance)
69
+ attr_accessor :redis_url, :concurrency, :queues, :queue_concurrency, :log_level, :job_paths, :pool_size,
70
+ :enable_logging
48
71
 
49
72
  # Initializes configuration with sensible defaults.
50
73
  # Values can be overridden through environment variables or configuration blocks.
@@ -54,15 +77,19 @@ module FiberJob
54
77
  # Environment variables:
55
78
  # - REDIS_URL: Redis connection URL (default: redis://localhost:6379)
56
79
  # - FIBER_JOB_LOG_LEVEL: Logging level (default: info)
80
+ # - FIBER_JOB_CONCURRENCY: Global concurrency level (default: 2)
81
+ # - FIBER_JOB_ENABLE_LOGGING: Enable/disable logging (default: true)
82
+ # - FIBER_JOB_POOL_SIZE: Redis connection pool size (default: 5)
57
83
  def initialize
58
- @redis_url = ENV['REDIS_URL'] || 'redis://localhost:6379'
59
- @concurrency = 2
84
+ @redis_url = ENV.fetch('REDIS_URL', 'redis://localhost:6379')
85
+ @concurrency = ENV.fetch('FIBER_JOB_CONCURRENCY', 2).to_i
60
86
  @queues = [:default]
61
- @queue_concurrency = { default: 2 } # Per-queue concurrency
62
- @log_level = ENV['FIBER_JOB_LOG_LEVEL']&.to_sym || :info
63
- @logger = ::Logger.new($stdout)
64
- @logger.level = ::Logger.const_get(@log_level.to_s.upcase)
87
+ @queue_concurrency = { default: 10 } # Per-queue concurrency
88
+ @log_level = ENV.fetch('FIBER_JOB_LOG_LEVEL', 'info').to_sym
89
+ @enable_logging = ENV.fetch('FIBER_JOB_ENABLE_LOGGING', 'true') == 'true'
90
+ @logger = nil # Lazy loaded
65
91
  @job_paths = []
92
+ @pool_size = ENV.fetch('FIBER_JOB_POOL_SIZE', 5).to_i
66
93
  end
67
94
 
68
95
  # Returns the concurrency setting for a specific queue.
@@ -98,33 +125,50 @@ module FiberJob
98
125
  next
99
126
  end
100
127
 
101
- @logger.info "Loading jobs from: #{path}"
128
+ Dir.glob("#{path}/**/*.rb").each do |file|
129
+ # Track classes before requiring the file
130
+ classes_before = job_classes
102
131
 
103
- Dir.glob("#{path}/**/*.rb").sort.each do |file|
104
- begin
105
- # Track classes before requiring the file
106
- classes_before = job_classes
132
+ require_relative File.expand_path(file)
107
133
 
108
- require_relative File.expand_path(file)
134
+ # Find newly loaded job classes
135
+ new_classes = job_classes - classes_before
109
136
 
110
- # Find newly loaded job classes
111
- new_classes = job_classes - classes_before
112
-
113
- new_classes.each do |job_class|
114
- @logger.debug "Loaded job class: #{job_class}"
115
- loaded_classes << job_class
116
- end
117
-
118
- rescue => e
119
- @logger.error "Failed to load job file #{file}: #{e.message}"
137
+ new_classes.each do |job_class|
138
+ @logger.debug "Loaded job class: #{job_class}"
139
+ loaded_classes << job_class
120
140
  end
141
+ rescue StandardError => e
142
+ @logger.error "Failed to load job file #{file}: #{e.message}"
121
143
  end
122
144
  end
123
145
 
124
- @logger.info "Loaded #{loaded_classes.size} job classes"
125
146
  loaded_classes
126
147
  end
127
148
 
149
+ # Returns the logger instance, creating it lazily if needed.
150
+ # When logging is disabled, returns a null logger for maximum performance.
151
+ #
152
+ # @return [Logger, NullLogger] Logger instance or null logger
153
+ def logger
154
+ return @logger if @logger
155
+
156
+ @logger = if @enable_logging
157
+ create_logger
158
+ else
159
+ NullLogger.new
160
+ end
161
+ end
162
+
163
+ # Sets a custom logger instance.
164
+ # Setting a logger will override the enable_logging setting.
165
+ #
166
+ # @param value [Logger, nil] Logger instance or nil
167
+ def logger=(value)
168
+ @logger = value
169
+ @enable_logging = !value.nil?
170
+ end
171
+
128
172
  private
129
173
 
130
174
  # Returns all classes that inherit from FiberJob::Job
@@ -135,5 +179,12 @@ module FiberJob
135
179
  klass < FiberJob::Job
136
180
  end
137
181
  end
182
+
183
+ # Creates a logger instance with the configured level
184
+ def create_logger
185
+ logger = ::Logger.new($stdout)
186
+ logger.level = ::Logger.const_get(@log_level.to_s.upcase)
187
+ logger
188
+ end
138
189
  end
139
190
  end
@@ -1,6 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require 'redis'
4
+ require 'connection_pool'
4
5
 
5
6
  module FiberJob
6
7
  # Queue provides Redis-based queue operations for job storage and retrieval.
@@ -35,7 +36,19 @@ module FiberJob
35
36
  end
36
37
  end
37
38
 
39
+ # Returns a Redis connection from the pool.
40
+ # This is the preferred method for getting Redis connections as it
41
+ # provides connection pooling and thread/fiber safety.
42
+ #
43
+ # @return [ConnectionPool] The Redis connection pool
44
+ def self.redis_pool
45
+ @redis_pool ||= ConnectionPool.new(size: config.pool_size, timeout: 5) do
46
+ Redis.new(url: config.redis_url)
47
+ end
48
+ end
49
+
38
50
  # Returns the shared Redis connection instance.
51
+ # @deprecated Use redis_pool.with { |redis| ... } instead for better performance
39
52
  # Creates a new connection if one doesn't exist.
40
53
  #
41
54
  # @return [Redis] The shared Redis connection
@@ -44,6 +57,7 @@ module FiberJob
44
57
  end
45
58
 
46
59
  # Creates a new Redis connection for fiber-safe operations.
60
+ # @deprecated Use redis_pool.with { |redis| ... } instead for better performance
47
61
  # Used when concurrent operations need separate connections.
48
62
  #
49
63
  # @return [Redis] A new Redis connection instance
@@ -64,7 +78,7 @@ module FiberJob
64
78
  # payload = { 'class' => 'EmailJob', 'args' => [123, 'message'] }
65
79
  # FiberJob::Queue.push(:default, payload)
66
80
  def self.push(queue_name, payload)
67
- redis.lpush("queue:#{queue_name}", JSON.dump(payload))
81
+ redis_pool.with { |redis| redis.lpush("queue:#{queue_name}", JSON.dump(payload)) }
68
82
  end
69
83
 
70
84
  # Adds a job to the head of the queue for priority processing.
@@ -78,15 +92,16 @@ module FiberJob
78
92
  # FiberJob::Queue.push_priority(:default, urgent_job_data)
79
93
  def self.push_priority(queue_name, payload)
80
94
  # Add to the head of the queue for priority execution
81
- redis.rpush("queue:#{queue_name}", JSON.dump(payload))
95
+ redis_pool.with { |redis| redis.rpush("queue:#{queue_name}", JSON.dump(payload)) }
82
96
  end
83
97
 
84
98
  # Removes and returns a job from the specified queue.
85
99
  # Blocks for the specified timeout waiting for jobs to become available.
100
+ # Uses connection pool by default for better performance.
86
101
  #
87
102
  # @param queue_name [String, Symbol] Name of the source queue
88
103
  # @param timeout [Float] Maximum time to wait for a job (default: 0.1)
89
- # @param redis_conn [Redis, nil] Optional Redis connection to use
104
+ # @param redis_conn [Redis, nil] Optional Redis connection to use (bypasses pool)
90
105
  # @return [Hash, nil] Job data hash or nil if timeout reached
91
106
  #
92
107
  # @example Pop from queue with timeout
@@ -95,8 +110,13 @@ module FiberJob
95
110
  # puts "Processing job: #{job['class']}"
96
111
  # end
97
112
  def self.pop(queue_name, timeout: 0.1, redis_conn: nil)
98
- conn = redis_conn || redis
99
- data = conn.brpop("queue:#{queue_name}", timeout: timeout)
113
+ data = if redis_conn
114
+ # Use provided connection (for legacy compatibility)
115
+ redis_conn.brpop("queue:#{queue_name}", timeout: timeout)
116
+ else
117
+ # Use connection pool for better performance
118
+ redis_pool.with { |redis| redis.brpop("queue:#{queue_name}", timeout: timeout) }
119
+ end
100
120
  data ? JSON.parse(data[1]) : nil
101
121
  end
102
122
 
@@ -113,7 +133,7 @@ module FiberJob
113
133
  # future_time = Time.now.to_f + 3600 # 1 hour from now
114
134
  # FiberJob::Queue.schedule(:default, job_data, future_time)
115
135
  def self.schedule(queue_name, payload, scheduled_at)
116
- redis.zadd("schedule:#{queue_name}", scheduled_at, JSON.dump(payload))
136
+ redis_pool.with { |redis| redis.zadd("schedule:#{queue_name}", scheduled_at, JSON.dump(payload)) }
117
137
  end
118
138
 
119
139
  # Schedules a job with priority for execution at a specific time.
@@ -126,38 +146,49 @@ module FiberJob
126
146
  def self.schedule_priority(queue_name, payload, scheduled_at)
127
147
  # Mark as priority retry for head-of-queue execution
128
148
  priority_payload = payload.merge('priority_retry' => true)
129
- redis.zadd("schedule:#{queue_name}", scheduled_at, JSON.dump(priority_payload))
149
+ redis_pool.with { |redis| redis.zadd("schedule:#{queue_name}", scheduled_at, JSON.dump(priority_payload)) }
130
150
  end
131
151
 
132
152
  # Processes scheduled jobs that are ready for execution.
133
153
  # Moves jobs from the scheduled set to the appropriate queue
134
154
  # when their scheduled time has arrived.
155
+ # Uses Redis pipeline for better performance when processing multiple jobs.
135
156
  #
136
157
  # @param queue_name [String, Symbol] Name of the queue to process
137
- # @return [void]
158
+ # @return [Integer] Number of jobs moved to queue
138
159
  #
139
160
  # @example Process scheduled jobs
140
- # FiberJob::Queue.scheduled_jobs(:default) # Called by workers
161
+ # moved_count = FiberJob::Queue.scheduled_jobs(:default) # Called by workers
141
162
  def self.scheduled_jobs(queue_name)
142
- now = Time.now.to_f
143
- jobs = redis.zrangebyscore("schedule:#{queue_name}", 0, now)
144
-
145
- jobs.each do |job_json|
146
- redis.zrem("schedule:#{queue_name}", job_json)
147
- job_data = JSON.parse(job_json)
148
-
149
- # Use priority queue for retries
150
- if job_data['priority_retry']
151
- job_data.delete('priority_retry') # Clean up the flag
152
- push_priority(queue_name, job_data)
153
- else
154
- push(queue_name, job_data)
163
+ redis_pool.with do |redis|
164
+ now = Time.now.to_f
165
+ jobs = redis.zrangebyscore("schedule:#{queue_name}", 0, now)
166
+
167
+ return 0 if jobs.empty?
168
+
169
+ # Use pipeline for better performance with multiple operations
170
+ redis.pipelined do |pipeline|
171
+ jobs.each do |job_json|
172
+ pipeline.zrem("schedule:#{queue_name}", job_json)
173
+ job_data = JSON.parse(job_json)
174
+
175
+ # Use priority queue for retries
176
+ if job_data['priority_retry']
177
+ job_data.delete('priority_retry') # Clean up the flag
178
+ pipeline.rpush("queue:#{queue_name}", JSON.dump(job_data))
179
+ else
180
+ pipeline.lpush("queue:#{queue_name}", JSON.dump(job_data))
181
+ end
182
+ end
155
183
  end
184
+
185
+ jobs.size
156
186
  end
157
187
  end
158
188
 
159
189
  # Returns statistics for the specified queue.
160
190
  # Provides insight into queue depth, scheduled jobs, and processing status.
191
+ # Uses pipeline for efficient batch operations.
161
192
  #
162
193
  # @param queue_name [String, Symbol] Name of the queue
163
194
  # @return [Hash] Statistics hash with :size, :scheduled, and :processing keys
@@ -166,11 +197,19 @@ module FiberJob
166
197
  # stats = FiberJob::Queue.stats(:default)
167
198
  # puts "Pending: #{stats[:size]}, Scheduled: #{stats[:scheduled]}"
168
199
  def self.stats(queue_name)
169
- {
170
- size: redis.llen("queue:#{queue_name}"),
171
- scheduled: redis.zcard("schedule:#{queue_name}"),
172
- processing: redis.get("processing:#{queue_name}").to_i
173
- }
200
+ redis_pool.with do |redis|
201
+ results = redis.pipelined do |pipeline|
202
+ pipeline.llen("queue:#{queue_name}")
203
+ pipeline.zcard("schedule:#{queue_name}")
204
+ pipeline.get("processing:#{queue_name}")
205
+ end
206
+
207
+ {
208
+ size: results[0],
209
+ scheduled: results[1],
210
+ processing: results[2].to_i
211
+ }
212
+ end
174
213
  end
175
214
 
176
215
  # Stores a failed job with error information for later analysis.
@@ -188,11 +227,11 @@ module FiberJob
188
227
  # end
189
228
  def self.store_failed_job(job_data, error)
190
229
  failed_job_data = job_data.merge({
191
- 'failed_at' => Time.now.to_f,
192
- 'error' => error.message,
193
- 'backtrace' => error.backtrace&.first(10)
194
- })
195
- redis.lpush("failed", JSON.dump(failed_job_data))
230
+ 'failed_at' => Time.now.to_f,
231
+ 'error' => error.message,
232
+ 'backtrace' => error.backtrace&.first(10)
233
+ })
234
+ redis_pool.with { |redis| redis.lpush('failed', JSON.dump(failed_job_data)) }
196
235
  end
197
236
 
198
237
  # Retrieves all failed jobs for inspection and debugging.
@@ -203,7 +242,9 @@ module FiberJob
203
242
  # failed = FiberJob::Queue.failed_jobs
204
243
  # failed.each { |job| puts "Failed: #{job['class']} - #{job['error']}" }
205
244
  def self.failed_jobs
206
- redis.lrange("failed", 0, -1).map { |job_json| JSON.parse(job_json) }
245
+ redis_pool.with do |redis|
246
+ redis.lrange('failed', 0, -1).map { |job_json| JSON.parse(job_json) }
247
+ end
207
248
  end
208
249
  end
209
250
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module FiberJob
4
- VERSION = "0.2.1"
5
- end
4
+ VERSION = '0.2.3'
5
+ end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fiber_job
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.1
4
+ version: 0.2.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Caio Mendonca
@@ -37,6 +37,20 @@ dependencies:
37
37
  - - "~>"
38
38
  - !ruby/object:Gem::Version
39
39
  version: 5.4.1
40
+ - !ruby/object:Gem::Dependency
41
+ name: connection_pool
42
+ requirement: !ruby/object:Gem::Requirement
43
+ requirements:
44
+ - - "~>"
45
+ - !ruby/object:Gem::Version
46
+ version: 2.5.3
47
+ type: :runtime
48
+ prerelease: false
49
+ version_requirements: !ruby/object:Gem::Requirement
50
+ requirements:
51
+ - - "~>"
52
+ - !ruby/object:Gem::Version
53
+ version: 2.5.3
40
54
  executables:
41
55
  - fiber_job
42
56
  extensions: []