fiber_job 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,65 @@
1
+ # frozen_string_literal: true
2
+
3
+ module FiberJob
4
+ # Simple cron parser - supports basic patterns
5
+ # Format: [second] minute hour day month weekday
6
+ # Examples: "*/30 * * * * *" (every 30 sec), "0 2 * * *" (daily 2am)
7
+ class CronParser
8
+ def self.next_run(cron_expression, from_time = Time.now)
9
+ fields = cron_expression.split
10
+
11
+ if fields.length == 6
12
+ # 6-field format: second minute hour day month weekday
13
+ second, minute, hour, day, month, weekday = fields
14
+ current = from_time + 1 # Start from next second
15
+ current = Time.new(current.year, current.month, current.day, current.hour, current.min, current.sec)
16
+ increment = 1
17
+ max_iterations = 86_400 # 24 hours in seconds
18
+ elsif fields.length == 5
19
+ # 5-field format: minute hour day month weekday
20
+ minute, hour, day, month, weekday = fields
21
+ second = nil
22
+ current = from_time + 60 # Start from next minute
23
+ current = Time.new(current.year, current.month, current.day, current.hour, current.min, 0)
24
+ increment = 60
25
+ max_iterations = 1440 # 24 hours in minutes
26
+ else
27
+ raise "Invalid cron expression: #{cron_expression}. Must have 5 or 6 fields."
28
+ end
29
+
30
+ # Simple implementation - find next matching time
31
+ max_iterations.times do
32
+ if matches?(current, second, minute, hour, day, month, weekday)
33
+ return current
34
+ end
35
+ current += increment
36
+ end
37
+
38
+ raise "No matching time found for cron expression: #{cron_expression}"
39
+ end
40
+
41
+ private
42
+
43
+ def self.matches?(time, second, minute, hour, day, month, weekday)
44
+ (second.nil? || match_field?(time.sec, second)) &&
45
+ match_field?(time.min, minute) &&
46
+ match_field?(time.hour, hour) &&
47
+ match_field?(time.day, day) &&
48
+ match_field?(time.month, month) &&
49
+ match_field?(time.wday, weekday)
50
+ end
51
+
52
+ def self.match_field?(value, pattern)
53
+ return true if pattern == '*'
54
+
55
+ if pattern.start_with?('*/')
56
+ # Handle */5 pattern
57
+ interval = pattern[2..-1].to_i
58
+ return (value % interval) == 0
59
+ end
60
+
61
+ # Handle exact match
62
+ return value == pattern.to_i
63
+ end
64
+ end
65
+ end
@@ -0,0 +1,143 @@
1
+ # frozen_string_literal: true
2
+
3
+ module FiberJob
4
+ # Base class for all background jobs in the FiberJob system.
5
+ # Provides interface for job execution, retry logic, and scheduling capabilities.
6
+ #
7
+ # All job classes should inherit from this class and implement the {#perform} method
8
+ # to define their specific behavior.
9
+ #
10
+ # @example Basic job definition
11
+ # class EmailJob < FiberJob::Job
12
+ # def perform(user_id, message)
13
+ # user = User.find(user_id)
14
+ # UserMailer.notification(user, message).deliver_now
15
+ # end
16
+ # end
17
+ #
18
+ # @example Job with custom configuration
19
+ # class ComplexJob < FiberJob::Job
20
+ # def initialize
21
+ # super
22
+ # @queue = :high_priority
23
+ # @max_retries = 5
24
+ # @timeout = 600 # 10 minutes
25
+ # end
26
+ #
27
+ # def perform(data)
28
+ # # Complex processing logic
29
+ # end
30
+ #
31
+ # def retry_delay(attempt)
32
+ # attempt * 60 # Linear backoff: 1min, 2min, 3min...
33
+ # end
34
+ # end
35
+ class Job
36
+ # @!attribute [rw] queue
37
+ # @return [Symbol] The queue name where this job will be processed
38
+ # @!attribute [rw] retry_count
39
+ # @return [Integer] Current number of retry attempts
40
+ # @!attribute [rw] max_retries
41
+ # @return [Integer] Maximum number of retry attempts before giving up
42
+ # @!attribute [rw] priority
43
+ # @return [Integer] Job priority (higher numbers = higher priority)
44
+ # @!attribute [rw] timeout
45
+ # @return [Integer] Maximum execution time in seconds before timeout
46
+ attr_accessor :queue, :retry_count, :max_retries, :priority, :timeout
47
+
48
+ # Initializes a new job instance with default configuration.
49
+ # Sets reasonable defaults for queue, retries, and timeout values.
50
+ #
51
+ # @return [void]
52
+ def initialize
53
+ @queue = :default
54
+ @retry_count = 0
55
+ @max_retries = 3
56
+ @priority = 0
57
+ @timeout = 300 # 5 minutes
58
+ end
59
+
60
+ # Executes the job with the provided arguments.
61
+ # This method must be implemented by all job subclasses.
62
+ #
63
+ # @param args [Array] Arguments passed to the job
64
+ # @raise [NotImplementedError] When called on the base Job class
65
+ # @return [void]
66
+ # @abstract Subclasses must implement this method
67
+ #
68
+ # @example Implementation in subclass
69
+ # def perform(user_id, message)
70
+ # user = User.find(user_id)
71
+ # # Process the job...
72
+ # end
73
+ def perform(*args)
74
+ raise NotImplementedError, 'Subclasses must implement perform'
75
+ end
76
+
77
+ # Calculates the delay before retrying a failed job.
78
+ # Uses exponential backoff with random jitter by default.
79
+ #
80
+ # @param attempt [Integer] The current retry attempt number (0-based)
81
+ # @return [Integer] Delay in seconds before retry
82
+ #
83
+ # @example Custom retry delay
84
+ # def retry_delay(attempt)
85
+ # [30, 60, 120, 300][attempt] || 300 # Fixed intervals
86
+ # end
87
+ def retry_delay(attempt)
88
+ # Default exponential backoff: 2^attempt + random jitter
89
+ (2 ** attempt) + rand(10)
90
+ end
91
+
92
+ # Determines if failed jobs should be retried with priority.
93
+ # Priority retries are processed before regular jobs in the queue.
94
+ #
95
+ # @return [Boolean] Whether to use priority retry queue
96
+ def priority_retry?
97
+ true
98
+ end
99
+
100
+ # Returns the queue name for this job class.
101
+ # Creates a temporary instance to get the configured queue name.
102
+ #
103
+ # @return [Symbol] The queue name where jobs of this class will be processed
104
+ def self.queue
105
+ new.queue
106
+ end
107
+
108
+ # Enqueues the job for immediate asynchronous execution.
109
+ #
110
+ # @param args [Array] Arguments to pass to the job's perform method
111
+ # @return [String] Job ID for tracking
112
+ #
113
+ # @example Enqueue a job
114
+ # EmailJob.perform_async(user.id, "Welcome message")
115
+ def self.perform_async(*args)
116
+ Client.enqueue(self, *args)
117
+ end
118
+
119
+ # Enqueues the job for execution after a specified delay.
120
+ #
121
+ # @param delay [Numeric] Delay in seconds before execution
122
+ # @param args [Array] Arguments to pass to the job's perform method
123
+ # @return [String] Job ID for tracking
124
+ #
125
+ # @example Enqueue with delay
126
+ # EmailJob.perform_in(3600, user.id, "Reminder message") # 1 hour delay
127
+ def self.perform_in(delay, *args)
128
+ Client.enqueue_in(delay, self, *args)
129
+ end
130
+
131
+ # Enqueues the job for execution at a specific time.
132
+ #
133
+ # @param time [Time, Integer] Specific time or timestamp for execution
134
+ # @param args [Array] Arguments to pass to the job's perform method
135
+ # @return [String] Job ID for tracking
136
+ #
137
+ # @example Enqueue at specific time
138
+ # EmailJob.perform_at(tomorrow_9am, user.id, "Daily summary")
139
+ def self.perform_at(time, *args)
140
+ Client.enqueue_at(time, self, *args)
141
+ end
142
+ end
143
+ end
@@ -0,0 +1,117 @@
1
+ # frozen_string_literal: true
2
+
3
+ module FiberJob
4
+ # Logger provides structured logging for FiberJob operations.
5
+ # Wraps the configured logger and provides level-based filtering
6
+ # and FiberJob-specific formatting.
7
+ #
8
+ # The logger respects the configured log level and only outputs
9
+ # messages at or above the current threshold.
10
+ #
11
+ # @example Using the logger
12
+ # FiberJob.logger.info("Job processing started")
13
+ # FiberJob.logger.error("Job failed: #{error.message}")
14
+ # FiberJob.logger.debug("Processing queue: #{queue_name}")
15
+ #
16
+ # @see FiberJob::Config
17
+ class Logger
18
+ # Log level hierarchy for filtering messages.
19
+ # Lower numbers indicate more verbose logging.
20
+ LOG_LEVELS = {
21
+ debug: 0,
22
+ info: 1,
23
+ warn: 2,
24
+ error: 3,
25
+ fatal: 4
26
+ }.freeze
27
+
28
+ # Initializes the logger with configuration settings.
29
+ #
30
+ # @param config [FiberJob::Config] Configuration object containing log level and logger instance
31
+ def initialize(config)
32
+ @config = config
33
+ @level = LOG_LEVELS[@config.log_level.to_sym] || LOG_LEVELS[:info]
34
+ end
35
+
36
+ # Logs a debug message if debug level is enabled.
37
+ # Used for detailed diagnostic information.
38
+ #
39
+ # @param message [String] The message to log
40
+ # @return [void]
41
+ #
42
+ # @example Debug logging
43
+ # FiberJob.logger.debug("Processing job #{job.class.name} with args: #{args.inspect}")
44
+ def debug(message)
45
+ log(:debug, message)
46
+ end
47
+
48
+ # Logs an info message if info level is enabled.
49
+ # Used for general operational messages.
50
+ #
51
+ # @param message [String] The message to log
52
+ # @return [void]
53
+ #
54
+ # @example Info logging
55
+ # FiberJob.logger.info("Worker started processing queue: #{queue_name}")
56
+ def info(message)
57
+ log(:info, message)
58
+ end
59
+
60
+ # Logs a warning message if warn level is enabled.
61
+ # Used for concerning but non-fatal conditions.
62
+ #
63
+ # @param message [String] The message to log
64
+ # @return [void]
65
+ #
66
+ # @example Warning logging
67
+ # FiberJob.logger.warn("Queue #{queue_name} is getting full")
68
+ def warn(message)
69
+ log(:warn, message)
70
+ end
71
+
72
+ # Logs an error message if error level is enabled.
73
+ # Used for error conditions and exceptions.
74
+ #
75
+ # @param message [String] The message to log
76
+ # @return [void]
77
+ #
78
+ # @example Error logging
79
+ # FiberJob.logger.error("Job failed: #{error.class.name} - #{error.message}")
80
+ def error(message)
81
+ log(:error, message)
82
+ end
83
+
84
+ # Logs a fatal message if fatal level is enabled.
85
+ # Used for severe error conditions that may terminate the process.
86
+ #
87
+ # @param message [String] The message to log
88
+ # @return [void]
89
+ #
90
+ # @example Fatal logging
91
+ # FiberJob.logger.fatal("Unable to connect to Redis, shutting down")
92
+ def fatal(message)
93
+ log(:fatal, message)
94
+ end
95
+
96
+ private
97
+
98
+ # Internal logging method that checks level and delegates to configured logger.
99
+ #
100
+ # @param level [Symbol] The log level
101
+ # @param message [String] The message to log
102
+ # @return [void]
103
+ def log(level, message)
104
+ return unless should_log?(level)
105
+
106
+ @config.logger.send(level, message)
107
+ end
108
+
109
+ # Determines if a message should be logged based on current log level.
110
+ #
111
+ # @param level [Symbol] The level to check
112
+ # @return [Boolean] Whether the message should be logged
113
+ def should_log?(level)
114
+ LOG_LEVELS[level] >= @level
115
+ end
116
+ end
117
+ end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ module FiberJob
4
+ # ProcessManager is responsible for managing the lifecycle of workers
5
+ # and ensuring they run with the correct configuration.
6
+ class ProcessManager
7
+ def self.start_worker(queues: nil, concurrency: nil)
8
+ queues ||= FiberJob.config.queues
9
+ concurrency ||= FiberJob.config.concurrency
10
+ worker = Worker.new(queues: queues, concurrency: concurrency)
11
+
12
+ trap('INT') { worker.stop }
13
+ trap('TERM') { worker.stop }
14
+
15
+ worker.start
16
+ end
17
+ end
18
+ end
@@ -0,0 +1,201 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'redis'
4
+
5
+ module FiberJob
6
+ # Queue provides Redis-based queue operations for job storage and retrieval.
7
+ # Handles immediate execution queues, scheduled jobs, priority handling,
8
+ # and failure tracking using Redis data structures.
9
+ #
10
+ # The queue system uses Redis lists for immediate jobs, sorted sets for
11
+ # scheduled jobs, and separate lists for failed job tracking.
12
+ #
13
+ # @example Basic queue operations
14
+ # # Push a job to queue
15
+ # FiberJob::Queue.push(:default, job_data)
16
+ #
17
+ # # Pop a job from queue
18
+ # job = FiberJob::Queue.pop(:default, timeout: 1.0)
19
+ #
20
+ # # Schedule a job for later
21
+ # FiberJob::Queue.schedule(:default, job_data, Time.now.to_f + 3600)
22
+ #
23
+ # @example Queue statistics
24
+ # stats = FiberJob::Queue.stats(:default)
25
+ # puts "Queue size: #{stats[:size]}"
26
+ # puts "Scheduled jobs: #{stats[:scheduled]}"
27
+ #
28
+ # @see FiberJob::Worker
29
+ class Queue
30
+ # Returns the shared Redis connection instance.
31
+ # Creates a new connection if one doesn't exist.
32
+ #
33
+ # @return [Redis] The shared Redis connection
34
+ def self.redis
35
+ @redis ||= Redis.new(url: FiberJob.config.redis_url)
36
+ end
37
+
38
+ # Creates a new Redis connection for fiber-safe operations.
39
+ # Used when concurrent operations need separate connections.
40
+ #
41
+ # @return [Redis] A new Redis connection instance
42
+ def self.redis_connection
43
+ # Create a new Redis connection for fiber-safe operations
44
+ Redis.new(url: FiberJob.config.redis_url)
45
+ end
46
+
47
+ # Adds a job to the specified queue for immediate processing.
48
+ # Jobs are added to the left side of the list (LPUSH) and processed
49
+ # from the right side (BRPOP) implementing FIFO behavior.
50
+ #
51
+ # @param queue_name [String, Symbol] Name of the target queue
52
+ # @param payload [Hash] Job data including class, args, and metadata
53
+ # @return [Integer] The length of the queue after push
54
+ #
55
+ # @example Push a job
56
+ # payload = { 'class' => 'EmailJob', 'args' => [123, 'message'] }
57
+ # FiberJob::Queue.push(:default, payload)
58
+ def self.push(queue_name, payload)
59
+ redis.lpush("queue:#{queue_name}", JSON.dump(payload))
60
+ end
61
+
62
+ # Adds a job to the head of the queue for priority processing.
63
+ # Priority jobs are processed before regular jobs in the same queue.
64
+ #
65
+ # @param queue_name [String, Symbol] Name of the target queue
66
+ # @param payload [Hash] Job data including class, args, and metadata
67
+ # @return [Integer] The length of the queue after push
68
+ #
69
+ # @example Push priority job
70
+ # FiberJob::Queue.push_priority(:default, urgent_job_data)
71
+ def self.push_priority(queue_name, payload)
72
+ # Add to the head of the queue for priority execution
73
+ redis.rpush("queue:#{queue_name}", JSON.dump(payload))
74
+ end
75
+
76
+ # Removes and returns a job from the specified queue.
77
+ # Blocks for the specified timeout waiting for jobs to become available.
78
+ #
79
+ # @param queue_name [String, Symbol] Name of the source queue
80
+ # @param timeout [Float] Maximum time to wait for a job (default: 0.1)
81
+ # @param redis_conn [Redis, nil] Optional Redis connection to use
82
+ # @return [Hash, nil] Job data hash or nil if timeout reached
83
+ #
84
+ # @example Pop from queue with timeout
85
+ # job = FiberJob::Queue.pop(:default, timeout: 5.0)
86
+ # if job
87
+ # puts "Processing job: #{job['class']}"
88
+ # end
89
+ def self.pop(queue_name, timeout: 0.1, redis_conn: nil)
90
+ conn = redis_conn || redis
91
+ data = conn.brpop("queue:#{queue_name}", timeout: timeout)
92
+ data ? JSON.parse(data[1]) : nil
93
+ end
94
+
95
+ # Schedules a job for execution at a specific time.
96
+ # Uses Redis sorted sets with timestamp as score for efficient
97
+ # time-based retrieval.
98
+ #
99
+ # @param queue_name [String, Symbol] Name of the target queue
100
+ # @param payload [Hash] Job data including class, args, and metadata
101
+ # @param scheduled_at [Float] Unix timestamp for execution
102
+ # @return [Boolean] True if job was added to schedule
103
+ #
104
+ # @example Schedule a job
105
+ # future_time = Time.now.to_f + 3600 # 1 hour from now
106
+ # FiberJob::Queue.schedule(:default, job_data, future_time)
107
+ def self.schedule(queue_name, payload, scheduled_at)
108
+ redis.zadd("schedule:#{queue_name}", scheduled_at, JSON.dump(payload))
109
+ end
110
+
111
+ # Schedules a job with priority for execution at a specific time.
112
+ # Priority scheduled jobs are moved to the head of the queue when ready.
113
+ #
114
+ # @param queue_name [String, Symbol] Name of the target queue
115
+ # @param payload [Hash] Job data including class, args, and metadata
116
+ # @param scheduled_at [Float] Unix timestamp for execution
117
+ # @return [Boolean] True if job was added to schedule
118
+ def self.schedule_priority(queue_name, payload, scheduled_at)
119
+ # Mark as priority retry for head-of-queue execution
120
+ priority_payload = payload.merge('priority_retry' => true)
121
+ redis.zadd("schedule:#{queue_name}", scheduled_at, JSON.dump(priority_payload))
122
+ end
123
+
124
+ # Processes scheduled jobs that are ready for execution.
125
+ # Moves jobs from the scheduled set to the appropriate queue
126
+ # when their scheduled time has arrived.
127
+ #
128
+ # @param queue_name [String, Symbol] Name of the queue to process
129
+ # @return [void]
130
+ #
131
+ # @example Process scheduled jobs
132
+ # FiberJob::Queue.scheduled_jobs(:default) # Called by workers
133
+ def self.scheduled_jobs(queue_name)
134
+ now = Time.now.to_f
135
+ jobs = redis.zrangebyscore("schedule:#{queue_name}", 0, now)
136
+
137
+ jobs.each do |job_json|
138
+ redis.zrem("schedule:#{queue_name}", job_json)
139
+ job_data = JSON.parse(job_json)
140
+
141
+ # Use priority queue for retries
142
+ if job_data['priority_retry']
143
+ job_data.delete('priority_retry') # Clean up the flag
144
+ push_priority(queue_name, job_data)
145
+ else
146
+ push(queue_name, job_data)
147
+ end
148
+ end
149
+ end
150
+
151
+ # Returns statistics for the specified queue.
152
+ # Provides insight into queue depth, scheduled jobs, and processing status.
153
+ #
154
+ # @param queue_name [String, Symbol] Name of the queue
155
+ # @return [Hash] Statistics hash with :size, :scheduled, and :processing keys
156
+ #
157
+ # @example Get queue statistics
158
+ # stats = FiberJob::Queue.stats(:default)
159
+ # puts "Pending: #{stats[:size]}, Scheduled: #{stats[:scheduled]}"
160
+ def self.stats(queue_name)
161
+ {
162
+ size: redis.llen("queue:#{queue_name}"),
163
+ scheduled: redis.zcard("schedule:#{queue_name}"),
164
+ processing: redis.get("processing:#{queue_name}").to_i
165
+ }
166
+ end
167
+
168
+ # Stores a failed job with error information for later analysis.
169
+ # Failed jobs are stored with original job data plus failure metadata.
170
+ #
171
+ # @param job_data [Hash] Original job data
172
+ # @param error [Exception] The error that caused the failure
173
+ # @return [Integer] Length of failed jobs list after storage
174
+ #
175
+ # @example Store failed job
176
+ # begin
177
+ # job.perform
178
+ # rescue => e
179
+ # FiberJob::Queue.store_failed_job(job_data, e)
180
+ # end
181
+ def self.store_failed_job(job_data, error)
182
+ failed_job_data = job_data.merge({
183
+ 'failed_at' => Time.now.to_f,
184
+ 'error' => error.message,
185
+ 'backtrace' => error.backtrace&.first(10)
186
+ })
187
+ redis.lpush("failed", JSON.dump(failed_job_data))
188
+ end
189
+
190
+ # Retrieves all failed jobs for inspection and debugging.
191
+ #
192
+ # @return [Array<Hash>] Array of failed job data hashes
193
+ #
194
+ # @example Get failed jobs
195
+ # failed = FiberJob::Queue.failed_jobs
196
+ # failed.each { |job| puts "Failed: #{job['class']} - #{job['error']}" }
197
+ def self.failed_jobs
198
+ redis.lrange("failed", 0, -1).map { |job_json| JSON.parse(job_json) }
199
+ end
200
+ end
201
+ end
@@ -0,0 +1,5 @@
1
+ # frozen_string_literal: true
2
+
3
+ module FiberJob
4
+ VERSION = "0.1.0"
5
+ end