fiber_job 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +29 -0
- data/README.md +290 -0
- data/bin/fiber_job +32 -0
- data/lib/fiber_job/client.rb +103 -0
- data/lib/fiber_job/concurrency.rb +18 -0
- data/lib/fiber_job/config.rb +74 -0
- data/lib/fiber_job/cron.rb +61 -0
- data/lib/fiber_job/cron_job.rb +41 -0
- data/lib/fiber_job/cron_parser.rb +65 -0
- data/lib/fiber_job/job.rb +143 -0
- data/lib/fiber_job/logger.rb +117 -0
- data/lib/fiber_job/process_manager.rb +18 -0
- data/lib/fiber_job/queue.rb +201 -0
- data/lib/fiber_job/version.rb +5 -0
- data/lib/fiber_job/worker.rb +195 -0
- data/lib/fiber_job.rb +108 -0
- metadata +85 -0
@@ -0,0 +1,195 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'async'
|
4
|
+
require 'async/queue'
|
5
|
+
|
6
|
+
module FiberJob
|
7
|
+
class Worker
|
8
|
+
def initialize(queues: nil, concurrency: nil)
|
9
|
+
@queues = queues || FiberJob.config.queues
|
10
|
+
@concurrency = concurrency || FiberJob.config.concurrency
|
11
|
+
@running = false
|
12
|
+
@managers = {}
|
13
|
+
@job_queues = {} # In-memory Async::Queue per Redis queue
|
14
|
+
end
|
15
|
+
|
16
|
+
def start
|
17
|
+
@running = true
|
18
|
+
|
19
|
+
Sync do |task|
|
20
|
+
# Initialize all queues first
|
21
|
+
@queues.each do |queue_name|
|
22
|
+
@job_queues[queue_name] = Async::Queue.new
|
23
|
+
queue_concurrency = FiberJob.config.concurrency_for_queue(queue_name)
|
24
|
+
@managers[queue_name] = ConcurrencyManager.new(max_concurrency: queue_concurrency)
|
25
|
+
end
|
26
|
+
|
27
|
+
# Start independent pollers for each queue
|
28
|
+
@queues.each do |queue_name|
|
29
|
+
task.async do
|
30
|
+
poll_redis_queue(queue_name)
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
# Start independent worker pools for each queue
|
35
|
+
@queues.each do |queue_name|
|
36
|
+
queue_concurrency = FiberJob.config.concurrency_for_queue(queue_name)
|
37
|
+
queue_concurrency.times do
|
38
|
+
task.async do
|
39
|
+
process_job_queue(queue_name)
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
# Global support fibers
|
45
|
+
task.async do
|
46
|
+
process_scheduled_jobs
|
47
|
+
end
|
48
|
+
|
49
|
+
task.async do
|
50
|
+
process_cron_jobs
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def stop
|
56
|
+
@running = false
|
57
|
+
|
58
|
+
# Close all in-memory queues to signal completion to workers
|
59
|
+
@job_queues.each_value(&:close)
|
60
|
+
end
|
61
|
+
|
62
|
+
private
|
63
|
+
|
64
|
+
# Single poller fiber that fetches jobs from Redis and distributes to workers
|
65
|
+
# This eliminates Redis brpop contention by having only one fiber per queue accessing Redis
|
66
|
+
def poll_redis_queue(queue_name)
|
67
|
+
# Create dedicated Redis connection for this poller to avoid blocking other pollers
|
68
|
+
redis_conn = Queue.redis_connection
|
69
|
+
while @running
|
70
|
+
begin
|
71
|
+
# Use longer timeout since we're the only poller - no contention
|
72
|
+
job_data = Queue.pop(queue_name, timeout: 1.0, redis_conn: redis_conn)
|
73
|
+
|
74
|
+
if job_data
|
75
|
+
# Push to in-memory queue for worker fibers to process
|
76
|
+
@job_queues[queue_name].push(job_data)
|
77
|
+
end
|
78
|
+
rescue StandardError => e
|
79
|
+
FiberJob.logger.error "Redis polling error for queue #{queue_name}: #{e.message}"
|
80
|
+
sleep(1) # Brief pause on error
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
# Worker fibers process jobs from the fast in-memory queue
|
86
|
+
# Multiple workers can process concurrently without Redis contention
|
87
|
+
def process_job_queue(queue_name)
|
88
|
+
while @running
|
89
|
+
begin
|
90
|
+
# Fast in-memory dequeue operation
|
91
|
+
job_data = @job_queues[queue_name].dequeue
|
92
|
+
|
93
|
+
if job_data
|
94
|
+
# Use semaphore to control actual job execution concurrency
|
95
|
+
@managers[queue_name].execute do
|
96
|
+
execute_job(job_data)
|
97
|
+
end
|
98
|
+
end
|
99
|
+
rescue Async::Queue::ClosedError
|
100
|
+
# Queue closed during shutdown - exit gracefully
|
101
|
+
break
|
102
|
+
rescue StandardError => e
|
103
|
+
FiberJob.logger.error "Job processing error for queue #{queue_name}: #{e.message}"
|
104
|
+
end
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
def execute_job(job_data)
|
109
|
+
job_class = Object.const_get(job_data['class'])
|
110
|
+
job = job_class.new
|
111
|
+
|
112
|
+
job.retry_count = job_data['retry_count'] || 0
|
113
|
+
|
114
|
+
# if job.retry_count > 0
|
115
|
+
# FiberJob.logger.info "Executing #{job_class} (retry #{job.retry_count}/#{job.max_retries})"
|
116
|
+
# end
|
117
|
+
|
118
|
+
begin
|
119
|
+
Timeout.timeout(job.timeout) do
|
120
|
+
args = (job_data['args'] || []).dup
|
121
|
+
args << job_data['enqueued_at'] if job_data['enqueued_at']
|
122
|
+
job.perform(*args)
|
123
|
+
end
|
124
|
+
rescue => e
|
125
|
+
handle_failure(job, job_data, e)
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
def execute_cron_job(job_data)
|
130
|
+
job_class = Object.const_get(job_data['class'])
|
131
|
+
job = job_class.new
|
132
|
+
|
133
|
+
begin
|
134
|
+
Timeout.timeout(job.timeout) do
|
135
|
+
job.perform
|
136
|
+
end
|
137
|
+
rescue => e
|
138
|
+
FiberJob.logger.error "Cron job #{job_data['class']} failed: #{e.message}"
|
139
|
+
FiberJob.logger.error e.backtrace.join("\n")
|
140
|
+
|
141
|
+
next_time = job_class.next_run_time
|
142
|
+
Cron.schedule_job(job_class, next_time)
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
def handle_failure(job, job_data, error)
|
147
|
+
if job.retry_count < job.max_retries
|
148
|
+
job.retry_count += 1
|
149
|
+
delay = job.retry_delay(job.retry_count)
|
150
|
+
|
151
|
+
retry_job_data = job_data.dup
|
152
|
+
retry_job_data['retry_count'] = job.retry_count
|
153
|
+
|
154
|
+
message = "#{job.class} failed: #{error.message}. "
|
155
|
+
message += "Retrying in #{delay.round(1)}s (attempt #{job.retry_count}/#{job.max_retries})"
|
156
|
+
FiberJob.logger.warn message
|
157
|
+
|
158
|
+
if job.priority_retry?
|
159
|
+
Queue.schedule_priority(job.queue, retry_job_data, Time.now.to_f + delay)
|
160
|
+
else
|
161
|
+
Queue.schedule(job.queue, retry_job_data, Time.now.to_f + delay)
|
162
|
+
end
|
163
|
+
else
|
164
|
+
FiberJob.logger.error "#{job.class} permanently failed after #{job.max_retries} retries: #{error.message}"
|
165
|
+
Queue.store_failed_job(job_data, error)
|
166
|
+
end
|
167
|
+
end
|
168
|
+
|
169
|
+
def process_scheduled_jobs
|
170
|
+
while @running
|
171
|
+
@queues.each do |queue_name|
|
172
|
+
Queue.scheduled_jobs(queue_name)
|
173
|
+
end
|
174
|
+
|
175
|
+
sleep(1)
|
176
|
+
end
|
177
|
+
end
|
178
|
+
|
179
|
+
def process_cron_jobs
|
180
|
+
while @running
|
181
|
+
due_jobs = Cron.due_jobs
|
182
|
+
|
183
|
+
due_jobs.each do |job_data|
|
184
|
+
queue_name = job_data['queue']
|
185
|
+
|
186
|
+
next unless @queues.include?(queue_name)
|
187
|
+
|
188
|
+
@managers[queue_name].execute { execute_cron_job(job_data) }
|
189
|
+
end
|
190
|
+
|
191
|
+
sleep(1)
|
192
|
+
end
|
193
|
+
end
|
194
|
+
end
|
195
|
+
end
|
data/lib/fiber_job.rb
ADDED
@@ -0,0 +1,108 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative 'fiber_job/version'
|
4
|
+
require_relative 'fiber_job/config'
|
5
|
+
require_relative 'fiber_job/logger'
|
6
|
+
require_relative 'fiber_job/job'
|
7
|
+
require_relative 'fiber_job/queue'
|
8
|
+
require_relative 'fiber_job/worker'
|
9
|
+
require_relative 'fiber_job/concurrency'
|
10
|
+
require_relative 'fiber_job/process_manager'
|
11
|
+
require_relative 'fiber_job/client'
|
12
|
+
require_relative 'fiber_job/cron_parser'
|
13
|
+
require_relative 'fiber_job/cron'
|
14
|
+
require_relative 'fiber_job/cron_job'
|
15
|
+
|
16
|
+
# FiberJob is a high-performance, Redis-based background job processing library for Ruby
|
17
|
+
# built on modern fiber-based concurrency. It combines the persistence of Redis with the
|
18
|
+
# speed of async fibers to deliver exceptional performance and reliability.
|
19
|
+
#
|
20
|
+
# @example Basic usage
|
21
|
+
# # Configure the library
|
22
|
+
# FiberJob.configure do |config|
|
23
|
+
# config.redis_url = 'redis://localhost:6379/0'
|
24
|
+
# config.queues = { default: 5, high: 10 }
|
25
|
+
# end
|
26
|
+
#
|
27
|
+
# # Define a job
|
28
|
+
# class EmailJob < FiberJob::Job
|
29
|
+
# def perform(user_id, message)
|
30
|
+
# # Send email logic here
|
31
|
+
# end
|
32
|
+
# end
|
33
|
+
#
|
34
|
+
# # Enqueue a job
|
35
|
+
# EmailJob.perform_async(123, "Welcome!")
|
36
|
+
#
|
37
|
+
# @example Starting a worker
|
38
|
+
# worker = FiberJob::Worker.new(queues: ['default', 'high'])
|
39
|
+
# worker.start
|
40
|
+
#
|
41
|
+
module FiberJob
|
42
|
+
class << self
|
43
|
+
# Configures the FiberJob library with custom settings.
|
44
|
+
# Yields the configuration object to the provided block for customization.
|
45
|
+
#
|
46
|
+
# @example Basic configuration
|
47
|
+
# FiberJob.configure do |config|
|
48
|
+
# config.redis_url = 'redis://localhost:6379/0'
|
49
|
+
# config.queues = { default: 5, high: 10, low: 2 }
|
50
|
+
# config.log_level = :info
|
51
|
+
# end
|
52
|
+
#
|
53
|
+
# @yield [config] Configuration object for customization
|
54
|
+
# @yieldparam config [FiberJob::Config] The configuration instance
|
55
|
+
# @return [void]
|
56
|
+
# @see FiberJob::Config
|
57
|
+
def configure
|
58
|
+
yield(config)
|
59
|
+
end
|
60
|
+
|
61
|
+
# Returns the current configuration instance.
|
62
|
+
# Creates a new configuration if one doesn't exist.
|
63
|
+
#
|
64
|
+
# @example Accessing configuration
|
65
|
+
# redis_url = FiberJob.config.redis_url
|
66
|
+
# queues = FiberJob.config.queues
|
67
|
+
#
|
68
|
+
# @return [FiberJob::Config] The current configuration instance
|
69
|
+
# @see FiberJob::Config
|
70
|
+
def config
|
71
|
+
@config ||= Config.new
|
72
|
+
end
|
73
|
+
|
74
|
+
# Returns the current logger instance.
|
75
|
+
# Creates a new logger with the current configuration if one doesn't exist.
|
76
|
+
#
|
77
|
+
# @example Logging messages
|
78
|
+
# FiberJob.logger.info("Job completed successfully")
|
79
|
+
# FiberJob.logger.error("Job failed: #{error.message}")
|
80
|
+
#
|
81
|
+
# @return [FiberJob::Logger] The current logger instance
|
82
|
+
# @see FiberJob::Logger
|
83
|
+
def logger
|
84
|
+
@logger ||= FiberJob::Logger.new(config)
|
85
|
+
end
|
86
|
+
|
87
|
+
# Automatically discovers and registers all cron job classes.
|
88
|
+
# Scans ObjectSpace for classes that inherit from FiberJob::CronJob
|
89
|
+
# and registers them for scheduling.
|
90
|
+
#
|
91
|
+
# @example Manual registration
|
92
|
+
# class DailySummaryJob < FiberJob::CronJob
|
93
|
+
# cron '0 9 * * *' # Every day at 9 AM
|
94
|
+
# end
|
95
|
+
#
|
96
|
+
# FiberJob.register_cron_jobs
|
97
|
+
#
|
98
|
+
# @return [void]
|
99
|
+
# @see FiberJob::CronJob
|
100
|
+
# @see FiberJob::Cron
|
101
|
+
def register_cron_jobs
|
102
|
+
ObjectSpace.each_object(Class).select { |klass| klass < FiberJob::CronJob }.each(&:register)
|
103
|
+
end
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
# Register cron jobs after module is fully loaded
|
108
|
+
FiberJob.register_cron_jobs
|
metadata
ADDED
@@ -0,0 +1,85 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: fiber_job
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Caio Mendonca
|
8
|
+
bindir: bin
|
9
|
+
cert_chain: []
|
10
|
+
date: 1980-01-02 00:00:00.000000000 Z
|
11
|
+
dependencies:
|
12
|
+
- !ruby/object:Gem::Dependency
|
13
|
+
name: async
|
14
|
+
requirement: !ruby/object:Gem::Requirement
|
15
|
+
requirements:
|
16
|
+
- - "~>"
|
17
|
+
- !ruby/object:Gem::Version
|
18
|
+
version: 2.26.0
|
19
|
+
type: :runtime
|
20
|
+
prerelease: false
|
21
|
+
version_requirements: !ruby/object:Gem::Requirement
|
22
|
+
requirements:
|
23
|
+
- - "~>"
|
24
|
+
- !ruby/object:Gem::Version
|
25
|
+
version: 2.26.0
|
26
|
+
- !ruby/object:Gem::Dependency
|
27
|
+
name: redis
|
28
|
+
requirement: !ruby/object:Gem::Requirement
|
29
|
+
requirements:
|
30
|
+
- - "~>"
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: 5.4.1
|
33
|
+
type: :runtime
|
34
|
+
prerelease: false
|
35
|
+
version_requirements: !ruby/object:Gem::Requirement
|
36
|
+
requirements:
|
37
|
+
- - "~>"
|
38
|
+
- !ruby/object:Gem::Version
|
39
|
+
version: 5.4.1
|
40
|
+
executables:
|
41
|
+
- fiber_job
|
42
|
+
extensions: []
|
43
|
+
extra_rdoc_files: []
|
44
|
+
files:
|
45
|
+
- CHANGELOG.md
|
46
|
+
- README.md
|
47
|
+
- bin/fiber_job
|
48
|
+
- lib/fiber_job.rb
|
49
|
+
- lib/fiber_job/client.rb
|
50
|
+
- lib/fiber_job/concurrency.rb
|
51
|
+
- lib/fiber_job/config.rb
|
52
|
+
- lib/fiber_job/cron.rb
|
53
|
+
- lib/fiber_job/cron_job.rb
|
54
|
+
- lib/fiber_job/cron_parser.rb
|
55
|
+
- lib/fiber_job/job.rb
|
56
|
+
- lib/fiber_job/logger.rb
|
57
|
+
- lib/fiber_job/process_manager.rb
|
58
|
+
- lib/fiber_job/queue.rb
|
59
|
+
- lib/fiber_job/version.rb
|
60
|
+
- lib/fiber_job/worker.rb
|
61
|
+
homepage: https://github.com/caieras/fiber_job
|
62
|
+
licenses:
|
63
|
+
- MIT
|
64
|
+
metadata:
|
65
|
+
source_code_uri: https://github.com/caieras/fiber_job
|
66
|
+
changelog_uri: https://github.com/caieras/fiber_job/blob/main/CHANGELOG.md
|
67
|
+
rdoc_options: []
|
68
|
+
require_paths:
|
69
|
+
- lib
|
70
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
71
|
+
requirements:
|
72
|
+
- - ">="
|
73
|
+
- !ruby/object:Gem::Version
|
74
|
+
version: '3.2'
|
75
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
76
|
+
requirements:
|
77
|
+
- - ">="
|
78
|
+
- !ruby/object:Gem::Version
|
79
|
+
version: '0'
|
80
|
+
requirements: []
|
81
|
+
rubygems_version: 3.6.7
|
82
|
+
specification_version: 4
|
83
|
+
summary: Experimental High-performance, Redis-based background job processing library
|
84
|
+
for Ruby built on fiber-based concurrency
|
85
|
+
test_files: []
|