riverqueue 0.6.1 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: db28757576fce6283b32ae3509deb6eb3a66c2e20691e30a67f107fff5b553aa
4
- data.tar.gz: d5e6b8447b5e5e9defcd2ab72f95cc1266d9bb488b0f9bf20a6ede96f47ce71f
3
+ metadata.gz: 8aa1b2a14e085df2b2a7e79fa6c6cd5b19aeb2c893a4fd369b99e469dd5916b9
4
+ data.tar.gz: 3cebd975dcadb223ecc163918b2e8bb0ce67bb671be99c6f706d3d9ec2da6507
5
5
  SHA512:
6
- metadata.gz: 9fe6c2f467f9d79d5819a267f3b620481a179b6024339d3bafc20b2811599364daf4a7d85928d873a2a06b250b6a3ebc1783009b08b7e1251920d6d2d1b672c3
7
- data.tar.gz: d06cfdf233641f120e2170f3ac694bb98ca04fa2556e9de8bbeaed117d98b56384de9b552e71c9526d6bda2ee15eea79c2475beec625bfb663329e77edfb30c0
6
+ metadata.gz: 868459a39a19fe9aacec7552b03148d97228ff58bdfb6b79a6e724c800cc97ef10017097a46f5132684d5d4e97ea9c788b83c813dcb6dc812835e9c0c9ec9645
7
+ data.tar.gz: '05538541ee6466adc722af9d6ab4cbf4c1a081a59c9eb887089b8abea8474415d61dfa9fdf827e4863ee393874258f09e6c54b06ffac87ddc90931f346f12f0b'
data/lib/client.rb ADDED
@@ -0,0 +1,330 @@
1
+ require "digest"
2
+ require "fnv"
3
+ require "time"
4
+
5
+ module River
6
+ # Default number of maximum attempts for a job.
7
+ MAX_ATTEMPTS_DEFAULT = 25
8
+
9
+ # Default priority for a job.
10
+ PRIORITY_DEFAULT = 1
11
+
12
+ # Default queue for a job.
13
+ QUEUE_DEFAULT = "default"
14
+
15
+ # Provides a client for River that inserts jobs. Unlike the Go version of the
16
+ # River client, this one can insert jobs only. Jobs can only be worked from Go
17
+ # code, so job arg kinds and JSON encoding details must be shared between Ruby
18
+ # and Go code.
19
+ #
20
+ # Used in conjunction with a River driver like:
21
+ #
22
+ # DB = Sequel.connect(...)
23
+ # client = River::Client.new(River::Driver::Sequel.new(DB))
24
+ #
25
+ # River drivers are found in separate gems like `riverqueue-sequel` to help
26
+ # minimize transient dependencies.
27
+ class Client
28
+ def initialize(driver, advisory_lock_prefix: nil)
29
+ @driver = driver
30
+ @advisory_lock_prefix = check_advisory_lock_prefix_bounds(advisory_lock_prefix)
31
+ @time_now_utc = -> { Time.now.utc } # for test time stubbing
32
+ end
33
+
34
+ # Inserts a new job for work given a job args implementation and insertion
35
+ # options (which may be omitted).
36
+ #
37
+ # With job args only:
38
+ #
39
+ # insert_res = client.insert(SimpleArgs.new(job_num: 1))
40
+ # insert_res.job # inserted job row
41
+ #
42
+ # With insert opts:
43
+ #
44
+ # insert_res = client.insert(SimpleArgs.new(job_num: 1), insert_opts: InsertOpts.new(queue: "high_priority"))
45
+ # insert_res.job # inserted job row
46
+ #
47
+ # Job arg implementations are expected to respond to:
48
+ #
49
+ # * `#kind`: A string that uniquely identifies the job in the database.
50
+ # * `#to_json`: Encodes the args to JSON for persistence in the database.
51
+ # Must match encoding an args struct on the Go side to be workable.
52
+ #
53
+ # They may also respond to `#insert_opts` which is expected to return an
54
+ # `InsertOpts` that contains options that will apply to all jobs of this
55
+ # kind. Insertion options provided as an argument to `#insert` override
56
+ # those returned by job args.
57
+ #
58
+ # For example:
59
+ #
60
+ # class SimpleArgs
61
+ # attr_accessor :job_num
62
+ #
63
+ # def initialize(job_num:)
64
+ # self.job_num = job_num
65
+ # end
66
+ #
67
+ # def kind = "simple"
68
+ #
69
+ # def to_json = JSON.dump({job_num: job_num})
70
+ # end
71
+ #
72
+ # See also JobArgsHash for an easy way to insert a job from a hash.
73
+ #
74
+ # Returns an instance of InsertResult.
75
+ def insert(args, insert_opts: EMPTY_INSERT_OPTS)
76
+ insert_params, unique_opts = make_insert_params(args, insert_opts)
77
+ check_unique_job(insert_params, unique_opts) do
78
+ job = @driver.job_insert(insert_params)
79
+ InsertResult.new(job)
80
+ end
81
+ end
82
+
83
+ # Inserts many new jobs as part of a single batch operation for improved
84
+ # efficiency.
85
+ #
86
+ # Takes an array of job args or InsertManyParams which encapsulate job args
87
+ # and a paired InsertOpts.
88
+ #
89
+ # With job args:
90
+ #
91
+ # num_inserted = client.insert_many([
92
+ # SimpleArgs.new(job_num: 1),
93
+ # SimpleArgs.new(job_num: 2)
94
+ # ])
95
+ #
96
+ # With InsertManyParams:
97
+ #
98
+ # num_inserted = client.insert_many([
99
+ # River::InsertManyParams.new(SimpleArgs.new(job_num: 1), insert_opts: InsertOpts.new(max_attempts: 5)),
100
+ # River::InsertManyParams.new(SimpleArgs.new(job_num: 2), insert_opts: InsertOpts.new(queue: "high_priority"))
101
+ # ])
102
+ #
103
+ # Job arg implementations are expected to respond to:
104
+ #
105
+ # * `#kind`: A string that uniquely identifies the job in the database.
106
+ # * `#to_json`: Encodes the args to JSON for persistence in the database.
107
+ # Must match encoding an args struct on the Go side to be workable.
108
+ #
109
+ # For example:
110
+ #
111
+ # class SimpleArgs
112
+ # attr_accessor :job_num
113
+ #
114
+ # def initialize(job_num:)
115
+ # self.job_num = job_num
116
+ # end
117
+ #
118
+ # def kind = "simple"
119
+ #
120
+ # def to_json = JSON.dump({job_num: job_num})
121
+ # end
122
+ #
123
+ # See also JobArgsHash for an easy way to insert a job from a hash.
124
+ #
125
+ # Unique job insertion isn't supported with bulk insertion because it'd run
126
+ # the risk of major lock contention.
127
+ #
128
+ # Returns the number of jobs inserted.
129
+ def insert_many(args)
130
+ all_params = args.map do |arg|
131
+ if arg.is_a?(InsertManyParams)
132
+ make_insert_params(arg.args, arg.insert_opts || EMPTY_INSERT_OPTS, is_insert_many: true).first # unique opts ignored on batch insert
133
+ else # jobArgs
134
+ make_insert_params(arg, EMPTY_INSERT_OPTS, is_insert_many: true).first # unique opts ignored on batch insert
135
+ end
136
+ end
137
+
138
+ @driver.job_insert_many(all_params)
139
+ end
140
+
141
+ private def check_advisory_lock_prefix_bounds(advisory_lock_prefix)
142
+ return nil if advisory_lock_prefix.nil?
143
+
144
+ # 2**32-1 is 0xffffffff (the largest number that's four bytes)
145
+ if advisory_lock_prefix < 0 || advisory_lock_prefix > 2**32 - 1
146
+ raise ArgumentError, "advisory lock prefix must fit inside four bytes"
147
+ end
148
+ advisory_lock_prefix
149
+ end
150
+
151
+ # Default states that are used during a unique insert. Can be overridden by
152
+ # setting UniqueOpts#by_state.
153
+ DEFAULT_UNIQUE_STATES = [
154
+ JOB_STATE_AVAILABLE,
155
+ JOB_STATE_COMPLETED,
156
+ JOB_STATE_RETRYABLE,
157
+ JOB_STATE_RUNNING,
158
+ JOB_STATE_SCHEDULED
159
+ ].freeze
160
+ private_constant :DEFAULT_UNIQUE_STATES
161
+
162
+ EMPTY_INSERT_OPTS = InsertOpts.new.freeze
163
+ private_constant :EMPTY_INSERT_OPTS
164
+
165
+ private def check_unique_job(insert_params, unique_opts, &block)
166
+ return block.call if unique_opts.nil?
167
+
168
+ any_unique_opts = false
169
+ get_params = Driver::JobGetByKindAndUniquePropertiesParam.new(kind: insert_params.kind)
170
+ unique_key = ""
171
+
172
+ # It's extremely important here that this lock string format and algorithm
173
+ # match the one in the main River library _exactly_. Don't change them
174
+ # unless they're updated everywhere.
175
+ if unique_opts.by_args
176
+ any_unique_opts = true
177
+ get_params.encoded_args = insert_params.encoded_args
178
+ unique_key += "&args=#{insert_params.encoded_args}"
179
+ end
180
+
181
+ if unique_opts.by_period
182
+ lower_period_bound = truncate_time(@time_now_utc.call, unique_opts.by_period).utc
183
+
184
+ any_unique_opts = true
185
+ get_params.created_at = [lower_period_bound, lower_period_bound + unique_opts.by_period]
186
+ unique_key += "&period=#{lower_period_bound.strftime("%FT%TZ")}"
187
+ end
188
+
189
+ if unique_opts.by_queue
190
+ any_unique_opts = true
191
+ get_params.queue = insert_params.queue
192
+ unique_key += "&queue=#{insert_params.queue}"
193
+ end
194
+
195
+ if unique_opts.by_state
196
+ any_unique_opts = true
197
+ get_params.state = unique_opts.by_state
198
+ unique_key += "&state=#{unique_opts.by_state.join(",")}"
199
+ else
200
+ get_params.state = DEFAULT_UNIQUE_STATES
201
+ unique_key += "&state=#{DEFAULT_UNIQUE_STATES.join(",")}"
202
+ end
203
+
204
+ return block.call unless any_unique_opts
205
+
206
+ # fast path
207
+ if !unique_opts.by_state || unique_opts.by_state.sort == DEFAULT_UNIQUE_STATES
208
+ unique_key_hash = Digest::SHA256.digest(unique_key)
209
+ job, unique_skipped_as_duplicate = @driver.job_insert_unique(insert_params, unique_key_hash)
210
+ return InsertResult.new(job, unique_skipped_as_duplicated: unique_skipped_as_duplicate)
211
+ end
212
+
213
+ @driver.transaction do
214
+ lock_str = "unique_key"
215
+ lock_str += "kind=#{insert_params.kind}"
216
+ lock_str += unique_key
217
+
218
+ lock_key = if @advisory_lock_prefix.nil?
219
+ FNV.fnv1_hash(lock_str, size: 64)
220
+ else
221
+ # Steep should be able to tell that this is not nil, but it can't.
222
+ prefix = @advisory_lock_prefix #: Integer # rubocop:disable Layout/LeadingCommentSpace
223
+ prefix << 32 | FNV.fnv1_hash(lock_str, size: 32)
224
+ end
225
+
226
+ # Packs a uint64 then unpacks to int64, which we need to do to keep the
227
+ # value within the bounds of Postgres' bigint. Overflow is okay because
228
+ # we can use the full bigint space (including negative numbers) for the
229
+ # advisory lock.
230
+ lock_key = uint64_to_int64(lock_key)
231
+
232
+ @driver.advisory_lock(lock_key)
233
+
234
+ existing_job = @driver.job_get_by_kind_and_unique_properties(get_params)
235
+ return InsertResult.new(existing_job, unique_skipped_as_duplicated: true) if existing_job
236
+
237
+ block.call
238
+ end
239
+ end
240
+
241
+ private def make_insert_params(args, insert_opts, is_insert_many: false)
242
+ raise "args should respond to `#kind`" if !args.respond_to?(:kind)
243
+
244
+ # ~all objects in Ruby respond to `#to_json`, so check non-nil instead.
245
+ args_json = args.to_json
246
+ raise "args should return non-nil from `#to_json`" if !args_json
247
+
248
+ args_insert_opts = if args.respond_to?(:insert_opts)
249
+ args_with_insert_opts = args #: _JobArgsWithInsertOpts # rubocop:disable Layout/LeadingCommentSpace
250
+ args_with_insert_opts.insert_opts || EMPTY_INSERT_OPTS
251
+ else
252
+ EMPTY_INSERT_OPTS
253
+ end
254
+
255
+ scheduled_at = insert_opts.scheduled_at || args_insert_opts.scheduled_at
256
+ unique_opts = insert_opts.unique_opts || args_insert_opts.unique_opts
257
+
258
+ raise ArgumentError, "unique opts can't be used with `#insert_many`" if is_insert_many && unique_opts
259
+
260
+ [
261
+ Driver::JobInsertParams.new(
262
+ encoded_args: args_json,
263
+ kind: args.kind,
264
+ max_attempts: insert_opts.max_attempts || args_insert_opts.max_attempts || MAX_ATTEMPTS_DEFAULT,
265
+ priority: insert_opts.priority || args_insert_opts.priority || PRIORITY_DEFAULT,
266
+ queue: insert_opts.queue || args_insert_opts.queue || QUEUE_DEFAULT,
267
+ scheduled_at: scheduled_at&.utc, # database defaults to now
268
+ state: scheduled_at ? JOB_STATE_SCHEDULED : JOB_STATE_AVAILABLE,
269
+ tags: validate_tags(insert_opts.tags || args_insert_opts.tags)
270
+ ),
271
+ unique_opts
272
+ ]
273
+ end
274
+
275
+ # Truncates the given time down to the interval. For example:
276
+ #
277
+ # Thu Jan 15 21:26:36 UTC 2024 @ 15 minutes ->
278
+ # Thu Jan 15 21:15:00 UTC 2024
279
+ private def truncate_time(time, interval_seconds)
280
+ Time.at((time.to_f / interval_seconds).floor * interval_seconds)
281
+ end
282
+
283
+ # Moves an integer that may occupy the entire uint64 space to one that's
284
+ # bounded within int64. Allows overflow.
285
+ private def uint64_to_int64(int)
286
+ [int].pack("Q").unpack1("q") #: Integer # rubocop:disable Layout/LeadingCommentSpace
287
+ end
288
+
289
+ TAG_RE = /\A[\w][\w\-]+[\w]\z/
290
+ private_constant :TAG_RE
291
+
292
+ private def validate_tags(tags)
293
+ tags&.each do |tag|
294
+ raise ArgumentError, "tags should be 255 characters or less" if tag.length > 255
295
+ raise ArgumentError, "tag should match regex #{TAG_RE.inspect}" unless TAG_RE.match(tag)
296
+ end
297
+ end
298
+ end
299
+
300
+ # A single job to insert that's part of an #insert_many batch insert. Unlike
301
+ # sending raw job args, supports an InsertOpts to pair with the job.
302
+ class InsertManyParams
303
+ # Job args to insert.
304
+ attr_reader :args
305
+
306
+ # Insertion options to use with the insert.
307
+ attr_reader :insert_opts
308
+
309
+ def initialize(args, insert_opts: nil)
310
+ @args = args
311
+ @insert_opts = insert_opts
312
+ end
313
+ end
314
+
315
+ # Result of a single insertion.
316
+ class InsertResult
317
+ # Inserted job row, or an existing job row if insert was skipped due to a
318
+ # previously existing unique job.
319
+ attr_reader :job
320
+
321
+ # True if for a unique job, the insertion was skipped due to an equivalent
322
+ # job matching unique property already being present.
323
+ attr_reader :unique_skipped_as_duplicated
324
+
325
+ def initialize(job, unique_skipped_as_duplicated: false)
326
+ @job = job
327
+ @unique_skipped_as_duplicated = unique_skipped_as_duplicated
328
+ end
329
+ end
330
+ end
data/lib/driver.rb ADDED
@@ -0,0 +1,63 @@
1
+ module River
2
+ # Contains an interface used by the top-level River module to interface with
3
+ # its driver implementations. All types and methods in this module should be
4
+ # considered to be for internal use only and subject to change. API stability
5
+ # is not guaranteed.
6
+ module Driver
7
+ # Parameters for looking up a job by kind and unique properties.
8
+ class JobGetByKindAndUniquePropertiesParam
9
+ attr_accessor :created_at
10
+ attr_accessor :encoded_args
11
+ attr_accessor :kind
12
+ attr_accessor :queue
13
+ attr_accessor :state
14
+
15
+ def initialize(
16
+ kind:,
17
+ created_at: nil,
18
+ encoded_args: nil,
19
+ queue: nil,
20
+ state: nil
21
+ )
22
+ self.kind = kind
23
+ self.created_at = created_at
24
+ self.encoded_args = encoded_args
25
+ self.queue = queue
26
+ self.state = state
27
+ end
28
+ end
29
+
30
+ # Insert parameters for a job. This is sent to underlying drivers and is meant
31
+ # for internal use only. Its interface is subject to change.
32
+ class JobInsertParams
33
+ attr_accessor :encoded_args
34
+ attr_accessor :kind
35
+ attr_accessor :max_attempts
36
+ attr_accessor :priority
37
+ attr_accessor :queue
38
+ attr_accessor :scheduled_at
39
+ attr_accessor :state
40
+ attr_accessor :tags
41
+
42
+ def initialize(
43
+ encoded_args:,
44
+ kind:,
45
+ max_attempts:,
46
+ priority:,
47
+ queue:,
48
+ scheduled_at:,
49
+ state:,
50
+ tags:
51
+ )
52
+ self.encoded_args = encoded_args
53
+ self.kind = kind
54
+ self.max_attempts = max_attempts
55
+ self.priority = priority
56
+ self.queue = queue
57
+ self.scheduled_at = scheduled_at
58
+ self.state = state
59
+ self.tags = tags
60
+ end
61
+ end
62
+ end
63
+ end
data/lib/fnv.rb ADDED
@@ -0,0 +1,35 @@
1
+ module River
2
+ # FNV is the Fowler–Noll–Vo hash function, a simple hash that's very easy to
3
+ # implement, and hash the perfect characteristics for use with the 64 bits of
4
+ # available space in a PG advisory lock.
5
+ #
6
+ # I'm implemented it myself so that the River gem can stay dependency free
7
+ # (and because it's quite easy to do).
8
+ module FNV
9
+ def self.fnv1_hash(str, size:)
10
+ hash = OFFSET_BASIS.fetch(size)
11
+ mask = (2**size - 1).to_int # creates a mask of 1s of `size` bits long like 0xffffffff
12
+ prime = PRIME.fetch(size)
13
+
14
+ str.each_byte do |byte|
15
+ hash *= prime
16
+ hash &= mask
17
+ hash ^= byte
18
+ end
19
+
20
+ hash
21
+ end
22
+
23
+ OFFSET_BASIS = {
24
+ 32 => 0x811c9dc5,
25
+ 64 => 0xcbf29ce484222325
26
+ }.freeze
27
+ private_constant :OFFSET_BASIS
28
+
29
+ PRIME = {
30
+ 32 => 0x01000193,
31
+ 64 => 0x00000100000001B3
32
+ }.freeze
33
+ private_constant :PRIME
34
+ end
35
+ end
@@ -0,0 +1,136 @@
1
+ module River
2
+ # Options for job insertion, and which can be provided by implementing
3
+ # #insert_opts on job args, or specified as a parameter on #insert or
4
+ # #insert_many.
5
+ class InsertOpts
6
+ # The maximum number of total attempts (including both the original run and
7
+ # all retries) before a job is abandoned and set as discarded.
8
+ attr_accessor :max_attempts
9
+
10
+ # The priority of the job, with 1 being the highest priority and 4 being the
11
+ # lowest. When fetching available jobs to work, the highest priority jobs
12
+ # will always be fetched before any lower priority jobs are fetched. Note
13
+ # that if your workers are swamped with more high-priority jobs then they
14
+ # can handle, lower priority jobs may not be fetched.
15
+ #
16
+ # Defaults to PRIORITY_DEFAULT.
17
+ attr_accessor :priority
18
+
19
+ # The name of the job queue in which to insert the job.
20
+ #
21
+ # Defaults to QUEUE_DEFAULT.
22
+ attr_accessor :queue
23
+
24
+ # A time in future at which to schedule the job (i.e. in cases where it
25
+ # shouldn't be run immediately). The job is guaranteed not to run before
26
+ # this time, but may run slightly after depending on the number of other
27
+ # scheduled jobs and how busy the queue is.
28
+ #
29
+ # Use of this option generally only makes sense when passing options into
30
+ # Insert rather than when a job args is returning `#insert_opts`, however,
31
+ # it will work in both cases.
32
+ attr_accessor :scheduled_at
33
+
34
+ # An arbitrary list of keywords to add to the job. They have no functional
35
+ # behavior and are meant entirely as a user-specified construct to help
36
+ # group and categorize jobs.
37
+ #
38
+ # If tags are specified from both a job args override and from options on
39
+ # Insert, the latter takes precedence. Tags are not merged.
40
+ attr_accessor :tags
41
+
42
+ # Options relating to job uniqueness. No unique options means that the job
43
+ # is never treated as unique.
44
+ attr_accessor :unique_opts
45
+
46
+ def initialize(
47
+ max_attempts: nil,
48
+ priority: nil,
49
+ queue: nil,
50
+ scheduled_at: nil,
51
+ tags: nil,
52
+ unique_opts: nil
53
+ )
54
+ self.max_attempts = max_attempts
55
+ self.priority = priority
56
+ self.queue = queue
57
+ self.scheduled_at = scheduled_at
58
+ self.tags = tags
59
+ self.unique_opts = unique_opts
60
+ end
61
+ end
62
+
63
+ # Parameters for uniqueness for a job.
64
+ #
65
+ # If all properties are nil, no uniqueness at is enforced. As each property is
66
+ # initialized, it's added as a dimension on the uniqueness matrix, and with
67
+ # any property on, the job's kind always counts toward uniqueness.
68
+ #
69
+ # So for example, if only #by_queue is on, then for the given job kind, only a
70
+ # single instance is allowed in any given queue, regardless of other
71
+ # properties on the job. If both #by_args and #by_queue are on, then for the
72
+ # given job kind, a single instance is allowed for each combination of args
73
+ # and queues. If either args or queue is changed on a new job, it's allowed to
74
+ # be inserted as a new job.
75
+ #
76
+ # Uniquenes is checked at insert time by taking a Postgres advisory lock,
77
+ # doing a look up for an equivalent row, and inserting only if none was found.
78
+ # There's no database-level mechanism that guarantees jobs stay unique, so if
79
+ # an equivalent row is inserted out of band (or batch inserted, where a unique
80
+ # check doesn't occur), it's conceivable that duplicates could coexist.
81
+ class UniqueOpts
82
+ # Indicates that uniqueness should be enforced for any specific instance of
83
+ # encoded args for a job.
84
+ #
85
+ # Default is false, meaning that as long as any other unique property is
86
+ # enabled, uniqueness will be enforced for a kind regardless of input args.
87
+ attr_accessor :by_args
88
+
89
+ # Defines uniqueness within a given period. On an insert time is rounded
90
+ # down to the nearest multiple of the given period, and a job is only
91
+ # inserted if there isn't an existing job that will run between then and the
92
+ # next multiple of the period.
93
+ #
94
+ # The period should be specified in seconds. So a job that's unique every 15
95
+ # minute period would have a value of 900.
96
+ #
97
+ # Default is no unique period, meaning that as long as any other unique
98
+ # property is enabled, uniqueness will be enforced across all jobs of the
99
+ # kind in the database, regardless of when they were scheduled.
100
+ attr_accessor :by_period
101
+
102
+ # Indicates that uniqueness should be enforced within each queue.
103
+ #
104
+ # Default is false, meaning that as long as any other unique property is
105
+ # enabled, uniqueness will be enforced for a kind across all queues.
106
+ attr_accessor :by_queue
107
+
108
+ # Indicates that uniqueness should be enforced across any of the states in
109
+ # the given set. For example, if the given states were `(scheduled,
110
+ # running)` then a new job could be inserted even if one of the same kind
111
+ # was already being worked by the queue (new jobs are inserted as
112
+ # `available`).
113
+ #
114
+ # Unlike other unique options, ByState gets a default when it's not set for
115
+ # user convenience. The default is equivalent to:
116
+ #
117
+ # by_state: [River::JOB_STATE_AVAILABLE, River::JOB_STATE_COMPLETED, River::JOB_STATE_RUNNING, River::JOB_STATE_RETRYABLE, River::JOB_STATE_SCHEDULED]
118
+ #
119
+ # With this setting, any jobs of the same kind that have been completed or
120
+ # discarded, but not yet cleaned out by the system, won't count towards the
121
+ # uniqueness of a new insert.
122
+ attr_accessor :by_state
123
+
124
+ def initialize(
125
+ by_args: nil,
126
+ by_period: nil,
127
+ by_queue: nil,
128
+ by_state: nil
129
+ )
130
+ self.by_args = by_args
131
+ self.by_period = by_period
132
+ self.by_queue = by_queue
133
+ self.by_state = by_state
134
+ end
135
+ end
136
+ end
data/lib/job.rb ADDED
@@ -0,0 +1,187 @@
1
+ module River
2
+ JOB_STATE_AVAILABLE = "available"
3
+ JOB_STATE_CANCELLED = "cancelled"
4
+ JOB_STATE_COMPLETED = "completed"
5
+ JOB_STATE_DISCARDED = "discarded"
6
+ JOB_STATE_RETRYABLE = "retryable"
7
+ JOB_STATE_RUNNING = "running"
8
+ JOB_STATE_SCHEDULED = "scheduled"
9
+
10
+ # Provides a way of creating a job args from a simple Ruby hash for a quick
11
+ # way to insert a job without having to define a class. The first argument is
12
+ # a "kind" string for identifying the job in the database and the second is a
13
+ # hash that will be encoded to JSON.
14
+ #
15
+ # For example:
16
+ #
17
+ # insert_res = client.insert(River::JobArgsHash.new("job_kind", {
18
+ # job_num: 1
19
+ # }))
20
+ class JobArgsHash
21
+ def initialize(kind, hash)
22
+ raise "kind should be non-nil" if !kind
23
+ raise "hash should be non-nil" if !hash
24
+
25
+ @kind = kind
26
+ @hash = hash
27
+ end
28
+
29
+ attr_reader :kind
30
+
31
+ def to_json
32
+ JSON.dump(@hash)
33
+ end
34
+ end
35
+
36
+ # JobRow contains the properties of a job that are persisted to the database.
37
+ class JobRow
38
+ # ID of the job. Generated as part of a Postgres sequence and generally
39
+ # ascending in nature, but there may be gaps in it as transactions roll
40
+ # back.
41
+ attr_accessor :id
42
+
43
+ # The job's args as a hash decoded from JSON.
44
+ attr_accessor :args
45
+
46
+ # The attempt number of the job. Jobs are inserted at 0, the number is
47
+ # incremented to 1 the first time work its worked, and may increment further
48
+ # if it's either snoozed or errors.
49
+ attr_accessor :attempt
50
+
51
+ # The time that the job was last worked. Starts out as `nil` on a new
52
+ # insert.
53
+ attr_accessor :attempted_at
54
+
55
+ # The set of worker IDs that have worked this job. A worker ID differs
56
+ # between different programs, but is shared by all executors within any
57
+ # given one. (i.e. Different Go processes have different IDs, but IDs are
58
+ # shared within any given process.) A process generates a new ID based on
59
+ # host and current time when it starts up.
60
+ attr_accessor :attempted_by
61
+
62
+ # When the job record was created.
63
+ attr_accessor :created_at
64
+
65
+ # A set of errors that occurred when the job was worked, one for each
66
+ # attempt. Ordered from earliest error to the latest error.
67
+ attr_accessor :errors
68
+
69
+ # The time at which the job was "finalized", meaning it was either completed
70
+ # successfully or errored for the last time such that it'll no longer be
71
+ # retried.
72
+ attr_accessor :finalized_at
73
+
74
+ # Kind uniquely identifies the type of job and instructs which worker
75
+ # should work it. It is set at insertion time via `#kind` on job args.
76
+ attr_accessor :kind
77
+
78
+ # The maximum number of attempts that the job will be tried before it errors
79
+ # for the last time and will no longer be worked.
80
+ attr_accessor :max_attempts
81
+
82
+ # Arbitrary metadata associated with the job.
83
+ attr_accessor :metadata
84
+
85
+ # The priority of the job, with 1 being the highest priority and 4 being the
86
+ # lowest. When fetching available jobs to work, the highest priority jobs
87
+ # will always be fetched before any lower priority jobs are fetched. Note
88
+ # that if your workers are swamped with more high-priority jobs then they
89
+ # can handle, lower priority jobs may not be fetched.
90
+ attr_accessor :priority
91
+
92
+ # The name of the queue where the job will be worked. Queues can be
93
+ # configured independently and be used to isolate jobs.
94
+ attr_accessor :queue
95
+
96
+ # When the job is scheduled to become available to be worked. Jobs default
97
+ # to running immediately, but may be scheduled for the future when they're
98
+ # inserted. They may also be scheduled for later because they were snoozed
99
+ # or because they errored and have additional retry attempts remaining.
100
+ attr_accessor :scheduled_at
101
+
102
+ # The state of job like `available` or `completed`. Jobs are `available`
103
+ # when they're first inserted.
104
+ attr_accessor :state
105
+
106
+ # Tags are an arbitrary list of keywords to add to the job. They have no
107
+ # functional behavior and are meant entirely as a user-specified construct
108
+ # to help group and categorize jobs.
109
+ attr_accessor :tags
110
+
111
+ # A unique key for the job within its kind that's used for unique job
112
+ # insertions. It's generated by hashing an inserted job's unique opts
113
+ # configuration.
114
+ attr_accessor :unique_key
115
+
116
+ def initialize(
117
+ id:,
118
+ args:,
119
+ attempt:,
120
+ created_at:,
121
+ kind:,
122
+ max_attempts:,
123
+ metadata:,
124
+ priority:,
125
+ queue:,
126
+ scheduled_at:,
127
+ state:,
128
+
129
+ # nullable/optional
130
+ attempted_at: nil,
131
+ attempted_by: nil,
132
+ errors: nil,
133
+ finalized_at: nil,
134
+ tags: nil,
135
+ unique_key: nil
136
+ )
137
+ self.id = id
138
+ self.args = args
139
+ self.attempt = attempt
140
+ self.attempted_at = attempted_at
141
+ self.attempted_by = attempted_by
142
+ self.created_at = created_at
143
+ self.errors = errors
144
+ self.finalized_at = finalized_at
145
+ self.kind = kind
146
+ self.max_attempts = max_attempts
147
+ self.metadata = metadata
148
+ self.priority = priority
149
+ self.queue = queue
150
+ self.scheduled_at = scheduled_at
151
+ self.state = state
152
+ self.tags = tags
153
+ self.unique_key = unique_key
154
+ end
155
+ end
156
+
157
+ # A failed job work attempt containing information about the error or panic
158
+ # that occurred.
159
+ class AttemptError
160
+ # The time at which the error occurred.
161
+ attr_accessor :at
162
+
163
+ # The attempt number on which the error occurred (maps to #attempt on a job
164
+ # row).
165
+ attr_accessor :attempt
166
+
167
+ # Contains the stringified error of an error returned from a job or a panic
168
+ # value in case of a panic.
169
+ attr_accessor :error
170
+
171
+ # Contains a stack trace from a job that panicked. The trace is produced by
172
+ # invoking `debug.Trace()` in Go.
173
+ attr_accessor :trace
174
+
175
+ def initialize(
176
+ at:,
177
+ attempt:,
178
+ error:,
179
+ trace:
180
+ )
181
+ self.at = at
182
+ self.attempt = attempt
183
+ self.error = error
184
+ self.trace = trace
185
+ end
186
+ end
187
+ end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: riverqueue
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.1
4
+ version: 0.7.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Blake Gentry
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2024-08-21 00:00:00.000000000 Z
12
+ date: 2024-08-31 00:00:00.000000000 Z
13
13
  dependencies: []
14
14
  description: River is a fast job queue for Go. Use this gem in conjunction with gems
15
15
  riverqueue-activerecord or riverqueue-sequel to insert jobs in Ruby which will be
@@ -19,6 +19,11 @@ executables: []
19
19
  extensions: []
20
20
  extra_rdoc_files: []
21
21
  files:
22
+ - lib/client.rb
23
+ - lib/driver.rb
24
+ - lib/fnv.rb
25
+ - lib/insert_opts.rb
26
+ - lib/job.rb
22
27
  - lib/riverqueue.rb
23
28
  homepage: https://riverqueue.com
24
29
  licenses: