solid_queue 0.4.1 → 0.7.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +45 -25
- data/UPGRADING.md +102 -0
- data/app/jobs/solid_queue/recurring_job.rb +9 -0
- data/app/models/solid_queue/claimed_execution.rb +21 -8
- data/app/models/solid_queue/process/executor.rb +13 -1
- data/app/models/solid_queue/process/prunable.rb +8 -1
- data/app/models/solid_queue/process.rb +13 -6
- data/app/models/solid_queue/recurring_execution.rb +17 -4
- data/app/models/solid_queue/recurring_task/arguments.rb +17 -0
- data/app/models/solid_queue/recurring_task.rb +122 -0
- data/app/models/solid_queue/semaphore.rb +18 -5
- data/db/migrate/20240719134516_create_recurring_tasks.rb +20 -0
- data/db/migrate/20240811173327_add_name_to_processes.rb +5 -0
- data/db/migrate/20240813160053_make_name_not_null.rb +16 -0
- data/db/migrate/20240819165045_change_solid_queue_recurring_tasks_static_to_not_null.rb +5 -0
- data/lib/generators/solid_queue/install/USAGE +1 -0
- data/lib/generators/solid_queue/install/install_generator.rb +21 -7
- data/lib/generators/solid_queue/install/templates/jobs +6 -0
- data/lib/puma/plugin/solid_queue.rb +10 -32
- data/lib/solid_queue/cli.rb +20 -0
- data/lib/solid_queue/configuration.rb +40 -29
- data/lib/solid_queue/dispatcher/recurring_schedule.rb +21 -12
- data/lib/solid_queue/dispatcher.rb +8 -8
- data/lib/solid_queue/lifecycle_hooks.rb +43 -0
- data/lib/solid_queue/log_subscriber.rb +13 -6
- data/lib/solid_queue/processes/base.rb +11 -0
- data/lib/solid_queue/processes/poller.rb +8 -4
- data/lib/solid_queue/processes/process_exit_error.rb +20 -0
- data/lib/solid_queue/processes/process_missing_error.rb +9 -0
- data/lib/solid_queue/processes/process_pruned_error.rb +11 -0
- data/lib/solid_queue/processes/registrable.rb +1 -0
- data/lib/solid_queue/processes/runnable.rb +10 -16
- data/lib/solid_queue/supervisor/maintenance.rb +5 -3
- data/lib/solid_queue/supervisor.rb +126 -10
- data/lib/solid_queue/version.rb +1 -1
- data/lib/solid_queue/worker.rb +5 -0
- data/lib/solid_queue.rb +10 -0
- metadata +33 -7
- data/lib/solid_queue/dispatcher/recurring_task.rb +0 -99
- data/lib/solid_queue/supervisor/async_supervisor.rb +0 -44
- data/lib/solid_queue/supervisor/fork_supervisor.rb +0 -108
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a7234dc4430998648196bf2d3905b66bb85e265c2393121a7c5343fed36b1996
|
4
|
+
data.tar.gz: 216a0918e29194e6d11fe4bf365183228127f8390658a11ace329523ad41468c
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a080aedf20f39940d8c25e8a14628811801b8fd4832fbcb926beecd0d1ce4c694ec8d80b608656f9de80cc8cc69b525f62d4fe7bc7258408aa6d6af62292d4fd
|
7
|
+
data.tar.gz: 66d24c1bb1c7cb1b9afbcf8a0b667df624dd3f47cea92887fbcd69d93b1832af3c44afb570d1cfb459c37e2b3c163900e0dd2febb067ee9179b4c49514e4b359
|
data/README.md
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
Solid Queue is a DB-based queuing backend for [Active Job](https://edgeguides.rubyonrails.org/active_job_basics.html), designed with simplicity and performance in mind.
|
4
4
|
|
5
|
-
Besides regular job enqueuing and processing, Solid Queue supports delayed jobs, concurrency controls, pausing queues, numeric priorities per job, priorities by queue order, and bulk enqueuing (`enqueue_all` for Active Job's `perform_all_later`).
|
5
|
+
Besides regular job enqueuing and processing, Solid Queue supports delayed jobs, concurrency controls, pausing queues, numeric priorities per job, priorities by queue order, and bulk enqueuing (`enqueue_all` for Active Job's `perform_all_later`).
|
6
6
|
|
7
7
|
Solid Queue can be used with SQL databases such as MySQL, PostgreSQL or SQLite, and it leverages the `FOR UPDATE SKIP LOCKED` clause, if available, to avoid blocking and waiting on locks when polling jobs. It relies on Active Job for retries, discarding, error handling, serialization, or delays, and it's compatible with Ruby on Rails multi-threading.
|
8
8
|
|
@@ -31,9 +31,9 @@ $ bin/rails generate solid_queue:install
|
|
31
31
|
|
32
32
|
This will set `solid_queue` as the Active Job's adapter in production, and will copy the required migration over to your app.
|
33
33
|
|
34
|
-
Alternatively, you can
|
34
|
+
Alternatively, you can skip setting the Active Job's adapter with:
|
35
35
|
```bash
|
36
|
-
$ bin/rails solid_queue:install
|
36
|
+
$ bin/rails generate solid_queue:install --skip_adapter
|
37
37
|
```
|
38
38
|
|
39
39
|
And set Solid Queue as your Active Job's queue backend manually, in your environment config:
|
@@ -42,7 +42,7 @@ And set Solid Queue as your Active Job's queue backend manually, in your environ
|
|
42
42
|
config.active_job.queue_adapter = :solid_queue
|
43
43
|
```
|
44
44
|
|
45
|
-
|
45
|
+
Or you can set only specific jobs to use Solid Queue as their backend if you're migrating from another adapter and want to move jobs progressively:
|
46
46
|
|
47
47
|
```ruby
|
48
48
|
# app/jobs/my_job.rb
|
@@ -59,14 +59,14 @@ Finally, you need to run the migrations:
|
|
59
59
|
$ bin/rails db:migrate
|
60
60
|
```
|
61
61
|
|
62
|
-
After this, you'll be ready to enqueue jobs using Solid Queue, but you need to start Solid Queue's supervisor to run them.
|
62
|
+
After this, you'll be ready to enqueue jobs using Solid Queue, but you need to start Solid Queue's supervisor to run them. You can use the provided binstub:`
|
63
63
|
```bash
|
64
|
-
$
|
64
|
+
$ bin/jobs
|
65
65
|
```
|
66
66
|
|
67
67
|
This will start processing jobs in all queues using the default configuration. See [below](#configuration) to learn more about configuring Solid Queue.
|
68
68
|
|
69
|
-
For small projects, you can run Solid Queue on the same machine as your webserver. When you're ready to scale, Solid Queue supports horizontal scaling out-of-the-box. You can run Solid Queue on a separate server from your webserver, or even run `
|
69
|
+
For small projects, you can run Solid Queue on the same machine as your webserver. When you're ready to scale, Solid Queue supports horizontal scaling out-of-the-box. You can run Solid Queue on a separate server from your webserver, or even run `bin/jobs` on multiple machines at the same time. Depending on the configuration, you can designate some machines to run only dispatchers or only workers. See the [configuration](#configuration) section for more details on this.
|
70
70
|
|
71
71
|
## Requirements
|
72
72
|
Besides Rails 7.1, Solid Queue works best with MySQL 8+ or PostgreSQL 9.5+, as they support `FOR UPDATE SKIP LOCKED`. You can use it with older versions, but in that case, you might run into lock waits if you run multiple workers for the same queue.
|
@@ -80,7 +80,7 @@ We have three types of actors in Solid Queue:
|
|
80
80
|
- _Dispatchers_ are in charge of selecting jobs scheduled to run in the future that are due and _dispatching_ them, which is simply moving them from the `solid_queue_scheduled_executions` table over to the `solid_queue_ready_executions` table so that workers can pick them up. They're also in charge of managing [recurring tasks](#recurring-tasks), dispatching jobs to process them according to their schedule. On top of that, they do some maintenance work related to [concurrency controls](#concurrency-controls).
|
81
81
|
- The _supervisor_ runs workers and dispatchers according to the configuration, controls their heartbeats, and stops and starts them when needed.
|
82
82
|
|
83
|
-
|
83
|
+
Solid Queue's supervisor will fork a separate process for each supervised worker/dispatcher.
|
84
84
|
|
85
85
|
By default, Solid Queue will try to find your configuration under `config/solid_queue.yml`, but you can set a different path using the environment variable `SOLID_QUEUE_CONFIG`. This is what this configuration looks like:
|
86
86
|
|
@@ -131,7 +131,7 @@ Here's an overview of the different options:
|
|
131
131
|
|
132
132
|
Finally, you can combine prefixes with exact names, like `[ staging*, background ]`, and the behaviour with respect to order will be the same as with only exact names.
|
133
133
|
- `threads`: this is the max size of the thread pool that each worker will have to run jobs. Each worker will fetch this number of jobs from their queue(s), at most and will post them to the thread pool to be run. By default, this is `3`. Only workers have this setting.
|
134
|
-
- `processes`: this is the number of worker processes that will be forked by the supervisor with the settings given. By default, this is `1`, just a single process. This setting is useful if you want to dedicate more than one CPU core to a queue or queues with the same configuration. Only workers have this setting.
|
134
|
+
- `processes`: this is the number of worker processes that will be forked by the supervisor with the settings given. By default, this is `1`, just a single process. This setting is useful if you want to dedicate more than one CPU core to a queue or queues with the same configuration. Only workers have this setting.
|
135
135
|
- `concurrency_maintenance`: whether the dispatcher will perform the concurrency maintenance work. This is `true` by default, and it's useful if you don't use any [concurrency controls](#concurrency-controls) and want to disable it or if you run multiple dispatchers and want some of them to just dispatch jobs without doing anything else.
|
136
136
|
- `recurring_tasks`: a list of recurring tasks the dispatcher will manage. Read more details about this one in the [Recurring tasks](#recurring-tasks) section.
|
137
137
|
|
@@ -194,13 +194,13 @@ development:
|
|
194
194
|
# ...
|
195
195
|
```
|
196
196
|
|
197
|
-
Install migrations and specify the dedicated database name with the `
|
197
|
+
Install migrations and specify the dedicated database name with the `--database` option. This will create the Solid Queue migration files in a separate directory, matching the value provided in `migrations_paths` in `config/database.yml`.
|
198
198
|
|
199
199
|
```bash
|
200
|
-
$ bin/rails solid_queue:install
|
200
|
+
$ bin/rails g solid_queue:install --database solid_queue
|
201
201
|
```
|
202
202
|
|
203
|
-
Note: If you've already run the solid queue install command (`bin/rails generate solid_queue:install`), the migration files will have already been generated under the primary database's `db/migrate/` directory. You can remove these files and keep the ones generated by the database-specific migration installation above.
|
203
|
+
Note: If you've already run the solid queue install command (`bin/rails generate solid_queue:install`) without a `--database` option, the migration files will have already been generated under the primary database's `db/migrate/` directory. You can remove these files and keep the ones generated by the database-specific migration installation above.
|
204
204
|
|
205
205
|
Finally, run the migrations:
|
206
206
|
|
@@ -208,17 +208,47 @@ Finally, run the migrations:
|
|
208
208
|
$ bin/rails db:migrate
|
209
209
|
```
|
210
210
|
|
211
|
+
## Lifecycle hooks
|
212
|
+
|
213
|
+
In Solid queue, you can hook into two different points in the supervisor's life:
|
214
|
+
- `start`: after the supervisor has finished booting and right before it forks workers and dispatchers.
|
215
|
+
- `stop`: after receiving a signal (`TERM`, `INT` or `QUIT`) and right before starting graceful or immediate shutdown.
|
216
|
+
|
217
|
+
And into two different points in a worker's life:
|
218
|
+
- `worker_start`: after the worker has finished booting and right before it starts the polling loop.
|
219
|
+
- `worker_stop`: after receiving a signal (`TERM`, `INT` or `QUIT`) and right before starting graceful or immediate shutdown (which is just `exit!`).
|
220
|
+
|
221
|
+
You can use the following methods with a block to do this:
|
222
|
+
```ruby
|
223
|
+
SolidQueue.on_start
|
224
|
+
SolidQueue.on_stop
|
225
|
+
|
226
|
+
SolidQueue.on_worker_start
|
227
|
+
SolidQueue.on_worker_stop
|
228
|
+
```
|
229
|
+
|
230
|
+
For example:
|
231
|
+
```ruby
|
232
|
+
SolidQueue.on_start { start_metrics_server }
|
233
|
+
SolidQueue.on_stop { stop_metrics_server }
|
234
|
+
```
|
235
|
+
|
236
|
+
These can be called several times to add multiple hooks, but it needs to happen before Solid Queue is started. An initializer would be a good place to do this.
|
237
|
+
|
238
|
+
|
211
239
|
### Other configuration settings
|
212
240
|
_Note_: The settings in this section should be set in your `config/application.rb` or your environment config like this: `config.solid_queue.silence_polling = true`
|
213
241
|
|
214
242
|
There are several settings that control how Solid Queue works that you can set as well:
|
215
243
|
- `logger`: the logger you want Solid Queue to use. Defaults to the app logger.
|
216
244
|
- `app_executor`: the [Rails executor](https://guides.rubyonrails.org/threading_and_code_execution.html#executor) used to wrap asynchronous operations, defaults to the app executor
|
217
|
-
- `on_thread_error`: custom lambda/Proc to call when there's an error within a thread that takes the exception raised as argument. Defaults to
|
245
|
+
- `on_thread_error`: custom lambda/Proc to call when there's an error within a Solid Queue thread that takes the exception raised as argument. Defaults to
|
218
246
|
|
219
247
|
```ruby
|
220
248
|
-> (exception) { Rails.error.report(exception, handled: false) }
|
221
249
|
```
|
250
|
+
**This is not used for errors raised within a job execution**. Errors happening in jobs are handled by Active Job's `retry_on` or `discard_on`, and ultimately will result in [failed jobs](#failed-jobs-and-retries). This is for errors happening within Solid Queue itself.
|
251
|
+
|
222
252
|
- `use_skip_locked`: whether to use `FOR UPDATE SKIP LOCKED` when performing locking reads. This will be automatically detected in the future, and for now, you'd only need to set this to `false` if your database doesn't support it. For MySQL, that'd be versions < 8, and for PostgreSQL, versions < 9.5. If you use SQLite, this has no effect, as writes are sequential.
|
223
253
|
- `process_heartbeat_interval`: the heartbeat interval that all processes will follow—defaults to 60 seconds.
|
224
254
|
- `process_alive_threshold`: how long to wait until a process is considered dead after its last heartbeat—defaults to 5 minutes.
|
@@ -283,6 +313,8 @@ In this case, if we have a `Box::MovePostingsByContactToDesignatedBoxJob` job en
|
|
283
313
|
|
284
314
|
Note that the `duration` setting depends indirectly on the value for `concurrency_maintenance_interval` that you set for your dispatcher(s), as that'd be the frequency with which blocked jobs are checked and unblocked. In general, you should set `duration` in a way that all your jobs would finish well under that duration and think of the concurrency maintenance task as a failsafe in case something goes wrong.
|
285
315
|
|
316
|
+
Jobs are unblocked in order of priority but queue order is not taken into account for unblocking jobs. That means that if you have a group of jobs that share a concurrency group but are in different queues, or jobs of the same class that you enqueue in different queues, the queue order you set for a worker is not taken into account when unblocking blocked ones. The reason is that a job that runs unblocks the next one, and the job itself doesn't know about a particular worker's queue order (you could even have different workers with different queue orders), it can only know about priority. Once blocked jobs are unblocked and available for polling, they'll be picked up by a worker following its queue order.
|
317
|
+
|
286
318
|
Finally, failed jobs that are automatically or manually retried work in the same way as new jobs that get enqueued: they get in the queue for gaining the lock, and whenever they get it, they'll be run. It doesn't matter if they had gained the lock already in the past.
|
287
319
|
|
288
320
|
## Failed jobs and retries
|
@@ -305,18 +337,6 @@ plugin :solid_queue
|
|
305
337
|
```
|
306
338
|
to your `puma.rb` configuration.
|
307
339
|
|
308
|
-
### Running as a fork or asynchronously
|
309
|
-
|
310
|
-
By default, the Puma plugin will fork additional processes for each worker and dispatcher so that they run in different processes. This provides the best isolation and performance, but can have additional memory usage.
|
311
|
-
|
312
|
-
Alternatively, workers and dispatchers can be run within the same Puma process(s). To do so just configure the plugin as:
|
313
|
-
|
314
|
-
```ruby
|
315
|
-
plugin :solid_queue
|
316
|
-
solid_queue_mode :async
|
317
|
-
```
|
318
|
-
|
319
|
-
Note that in this case, the `processes` configuration option will be ignored.
|
320
340
|
|
321
341
|
## Jobs and transactional integrity
|
322
342
|
:warning: Having your jobs in the same ACID-compliant database as your application data enables a powerful yet sharp tool: taking advantage of transactional integrity to ensure some action in your app is not committed unless your job is also committed. This can be very powerful and useful, but it can also backfire if you base some of your logic on this behaviour, and in the future, you move to another active job backend, or if you simply move Solid Queue to its own database, and suddenly the behaviour changes under you.
|
data/UPGRADING.md
ADDED
@@ -0,0 +1,102 @@
|
|
1
|
+
# Upgrading to version 0.7.x
|
2
|
+
|
3
|
+
This version removed the new async mode introduced in version 0.4.0 and introduced a new binstub that can be used to start Solid Queue's supervisor. It includes also a minor migration.
|
4
|
+
|
5
|
+
To install both the binstub `bin/jobs` and the migration, you can just run
|
6
|
+
```
|
7
|
+
bin/rails generate solid_queue:install
|
8
|
+
```
|
9
|
+
|
10
|
+
Or, if you're using a different database for Solid Queue:
|
11
|
+
|
12
|
+
```bash
|
13
|
+
$ bin/rails generate solid_queue:install --database <the_name_of_your_solid_queue_db>
|
14
|
+
```
|
15
|
+
|
16
|
+
|
17
|
+
# Upgrading to version 0.6.x
|
18
|
+
|
19
|
+
## New migration in 3 steps
|
20
|
+
This version adds two new migrations to modify the `solid_queue_processes` table. The goal of that migration is to add a new column that needs to be `NOT NULL`. This needs to be done with two migrations and the following steps to ensure it happens without downtime and with new processes being able to register just fine:
|
21
|
+
1. Run the first migration that adds the new column, nullable
|
22
|
+
2. Deploy the updated Solid Queue code that uses this column
|
23
|
+
2. Run the second migration. This migration does two things:
|
24
|
+
- Backfill existing rows that would have the column as NULL
|
25
|
+
- Make the column not nullable and add a new index
|
26
|
+
|
27
|
+
Besides, it adds another migration with no effects to the `solid_queue_recurring_tasks` table. This one can be run just fine whenever, as the column affected is not used.
|
28
|
+
|
29
|
+
To install the migrations:
|
30
|
+
```bash
|
31
|
+
$ bin/rails solid_queue:install:migrations
|
32
|
+
```
|
33
|
+
|
34
|
+
Or, if you're using a different database for Solid Queue:
|
35
|
+
|
36
|
+
```bash
|
37
|
+
$ bin/rails solid_queue:install:migrations DATABASE=<the_name_of_your_solid_queue_db>
|
38
|
+
```
|
39
|
+
|
40
|
+
And then follow the steps above, running first one, then deploying the code, then running the second one.
|
41
|
+
|
42
|
+
## New behaviour when workers are killed
|
43
|
+
From this version onwards, when a worker is killed and the supervisor can detect that, it'll fail in-progress jobs claimed by that worker. For this to work correctly, you need to run the above migration and ensure you restart any supervisors you'd have.
|
44
|
+
|
45
|
+
|
46
|
+
# Upgrading to version 0.5.x
|
47
|
+
This version includes a new migration to improve recurring tasks. To install it, just run:
|
48
|
+
|
49
|
+
```bash
|
50
|
+
$ bin/rails solid_queue:install:migrations
|
51
|
+
```
|
52
|
+
|
53
|
+
Or, if you're using a different database for Solid Queue:
|
54
|
+
|
55
|
+
```bash
|
56
|
+
$ bin/rails solid_queue:install:migrations DATABASE=<the_name_of_your_solid_queue_db>
|
57
|
+
```
|
58
|
+
|
59
|
+
And then run the migrations.
|
60
|
+
|
61
|
+
|
62
|
+
# Upgrading to version 0.4.x
|
63
|
+
This version introduced an _async_ mode (this mode has been removed in version 0.7.0) to run the supervisor and have all workers and dispatchers run as part of the same process as the supervisor, instead of separate, forked, processes. Together with this, we introduced some changes in how the supervisor is started. Prior this change, you could choose whether you wanted to run workers, dispatchers or both, by starting Solid Queue as `solid_queue:work` or `solid_queue:dispatch`. From version 0.4.0, the only option available is:
|
64
|
+
|
65
|
+
```
|
66
|
+
$ bundle exec rake solid_queue:start
|
67
|
+
```
|
68
|
+
Whether the supervisor starts workers, dispatchers or both will depend on your configuration. For example, if you don't configure any dispatchers, only workers will be started. That is, with this configuration:
|
69
|
+
|
70
|
+
```yml
|
71
|
+
production:
|
72
|
+
workers:
|
73
|
+
- queues: [ real_time, background ]
|
74
|
+
threads: 5
|
75
|
+
polling_interval: 0.1
|
76
|
+
processes: 3
|
77
|
+
```
|
78
|
+
the supervisor will run 3 workers, each one with 5 threads, and no supervisors. With this configuration:
|
79
|
+
```yml
|
80
|
+
production:
|
81
|
+
dispatchers:
|
82
|
+
- polling_interval: 1
|
83
|
+
batch_size: 500
|
84
|
+
concurrency_maintenance_interval: 300
|
85
|
+
```
|
86
|
+
the supervisor will run 1 dispatcher and no workers.
|
87
|
+
|
88
|
+
|
89
|
+
# Upgrading to version 0.3.x
|
90
|
+
This version introduced support for [recurring (cron-style) jobs](https://github.com/rails/solid_queue/blob/main/README.md#recurring-tasks), and it needs a new DB migration for it. To install it, just run:
|
91
|
+
|
92
|
+
```bash
|
93
|
+
$ bin/rails solid_queue:install:migrations
|
94
|
+
```
|
95
|
+
|
96
|
+
Or, if you're using a different database for Solid Queue:
|
97
|
+
|
98
|
+
```bash
|
99
|
+
$ bin/rails solid_queue:install:migrations DATABASE=<the_name_of_your_solid_queue_db>
|
100
|
+
```
|
101
|
+
|
102
|
+
And then run the migrations.
|
@@ -29,8 +29,21 @@ class SolidQueue::ClaimedExecution < SolidQueue::Execution
|
|
29
29
|
def release_all
|
30
30
|
SolidQueue.instrument(:release_many_claimed) do |payload|
|
31
31
|
includes(:job).tap do |executions|
|
32
|
-
payload[:size] = executions.size
|
33
32
|
executions.each(&:release)
|
33
|
+
|
34
|
+
payload[:size] = executions.size
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def fail_all_with(error)
|
40
|
+
SolidQueue.instrument(:fail_many_claimed) do |payload|
|
41
|
+
includes(:job).tap do |executions|
|
42
|
+
executions.each { |execution| execution.failed_with(error) }
|
43
|
+
|
44
|
+
payload[:process_ids] = executions.map(&:process_id).uniq
|
45
|
+
payload[:job_ids] = executions.map(&:job_id).uniq
|
46
|
+
payload[:size] = executions.size
|
34
47
|
end
|
35
48
|
end
|
36
49
|
end
|
@@ -69,6 +82,13 @@ class SolidQueue::ClaimedExecution < SolidQueue::Execution
|
|
69
82
|
raise UndiscardableError, "Can't discard a job in progress"
|
70
83
|
end
|
71
84
|
|
85
|
+
def failed_with(error)
|
86
|
+
transaction do
|
87
|
+
job.failed_with(error)
|
88
|
+
destroy!
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
72
92
|
private
|
73
93
|
def execute
|
74
94
|
ActiveJob::Base.execute(job.arguments)
|
@@ -83,11 +103,4 @@ class SolidQueue::ClaimedExecution < SolidQueue::Execution
|
|
83
103
|
destroy!
|
84
104
|
end
|
85
105
|
end
|
86
|
-
|
87
|
-
def failed_with(error)
|
88
|
-
transaction do
|
89
|
-
job.failed_with(error)
|
90
|
-
destroy!
|
91
|
-
end
|
92
|
-
end
|
93
106
|
end
|
@@ -8,7 +8,19 @@ module SolidQueue
|
|
8
8
|
included do
|
9
9
|
has_many :claimed_executions
|
10
10
|
|
11
|
-
after_destroy
|
11
|
+
after_destroy :release_all_claimed_executions
|
12
|
+
end
|
13
|
+
|
14
|
+
def fail_all_claimed_executions_with(error)
|
15
|
+
if claims_executions?
|
16
|
+
claimed_executions.fail_all_with(error)
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
def release_all_claimed_executions
|
21
|
+
if claims_executions?
|
22
|
+
claimed_executions.release_all
|
23
|
+
end
|
12
24
|
end
|
13
25
|
|
14
26
|
private
|
@@ -15,11 +15,18 @@ module SolidQueue
|
|
15
15
|
prunable.non_blocking_lock.find_in_batches(batch_size: 50) do |batch|
|
16
16
|
payload[:size] += batch.size
|
17
17
|
|
18
|
-
batch.each
|
18
|
+
batch.each(&:prune)
|
19
19
|
end
|
20
20
|
end
|
21
21
|
end
|
22
22
|
end
|
23
|
+
|
24
|
+
def prune
|
25
|
+
error = Processes::ProcessPrunedError.new(last_heartbeat_at)
|
26
|
+
fail_all_claimed_executions_with(error)
|
27
|
+
|
28
|
+
deregister(pruned: true)
|
29
|
+
end
|
23
30
|
end
|
24
31
|
end
|
25
32
|
end
|
@@ -4,7 +4,7 @@ class SolidQueue::Process < SolidQueue::Record
|
|
4
4
|
include Executor, Prunable
|
5
5
|
|
6
6
|
belongs_to :supervisor, class_name: "SolidQueue::Process", optional: true, inverse_of: :supervisees
|
7
|
-
has_many :supervisees, class_name: "SolidQueue::Process", inverse_of: :supervisor, foreign_key: :supervisor_id
|
7
|
+
has_many :supervisees, class_name: "SolidQueue::Process", inverse_of: :supervisor, foreign_key: :supervisor_id
|
8
8
|
|
9
9
|
store :metadata, coder: JSON
|
10
10
|
|
@@ -13,10 +13,10 @@ class SolidQueue::Process < SolidQueue::Record
|
|
13
13
|
create!(attributes.merge(last_heartbeat_at: Time.current)).tap do |process|
|
14
14
|
payload[:process_id] = process.id
|
15
15
|
end
|
16
|
+
rescue Exception => error
|
17
|
+
payload[:error] = error
|
18
|
+
raise
|
16
19
|
end
|
17
|
-
rescue Exception => error
|
18
|
-
SolidQueue.instrument :register_process, **attributes.merge(error: error)
|
19
|
-
raise
|
20
20
|
end
|
21
21
|
|
22
22
|
def heartbeat
|
@@ -25,12 +25,19 @@ class SolidQueue::Process < SolidQueue::Record
|
|
25
25
|
|
26
26
|
def deregister(pruned: false)
|
27
27
|
SolidQueue.instrument :deregister_process, process: self, pruned: pruned do |payload|
|
28
|
-
payload[:claimed_size] = claimed_executions.size if claims_executions?
|
29
|
-
|
30
28
|
destroy!
|
29
|
+
|
30
|
+
unless supervised? || pruned
|
31
|
+
supervisees.each(&:deregister)
|
32
|
+
end
|
31
33
|
rescue Exception => error
|
32
34
|
payload[:error] = error
|
33
35
|
raise
|
34
36
|
end
|
35
37
|
end
|
38
|
+
|
39
|
+
private
|
40
|
+
def supervised?
|
41
|
+
supervisor_id.present?
|
42
|
+
end
|
36
43
|
end
|
@@ -7,16 +7,29 @@ module SolidQueue
|
|
7
7
|
scope :clearable, -> { where.missing(:job) }
|
8
8
|
|
9
9
|
class << self
|
10
|
+
def create_or_insert!(**attributes)
|
11
|
+
if connection.supports_insert_conflict_target?
|
12
|
+
# PostgreSQL fails and aborts the current transaction when it hits a duplicate key conflict
|
13
|
+
# during two concurrent INSERTs for the same value of an unique index. We need to explicitly
|
14
|
+
# indicate unique_by to ignore duplicate rows by this value when inserting
|
15
|
+
unless insert(attributes, unique_by: [ :task_key, :run_at ]).any?
|
16
|
+
raise AlreadyRecorded
|
17
|
+
end
|
18
|
+
else
|
19
|
+
create!(**attributes)
|
20
|
+
end
|
21
|
+
rescue ActiveRecord::RecordNotUnique
|
22
|
+
raise AlreadyRecorded
|
23
|
+
end
|
24
|
+
|
10
25
|
def record(task_key, run_at, &block)
|
11
26
|
transaction do
|
12
27
|
block.call.tap do |active_job|
|
13
|
-
if active_job
|
14
|
-
|
28
|
+
if active_job && active_job.successfully_enqueued?
|
29
|
+
create_or_insert!(job_id: active_job.provider_job_id, task_key: task_key, run_at: run_at)
|
15
30
|
end
|
16
31
|
end
|
17
32
|
end
|
18
|
-
rescue ActiveRecord::RecordNotUnique => e
|
19
|
-
raise AlreadyRecorded
|
20
33
|
end
|
21
34
|
|
22
35
|
def clear_in_batches(batch_size: 500)
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "active_job/arguments"
|
4
|
+
|
5
|
+
module SolidQueue
|
6
|
+
class RecurringTask::Arguments
|
7
|
+
class << self
|
8
|
+
def load(data)
|
9
|
+
data.nil? ? [] : ActiveJob::Arguments.deserialize(ActiveSupport::JSON.load(data))
|
10
|
+
end
|
11
|
+
|
12
|
+
def dump(data)
|
13
|
+
ActiveSupport::JSON.dump(ActiveJob::Arguments.serialize(Array(data)))
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,122 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "fugit"
|
4
|
+
|
5
|
+
module SolidQueue
|
6
|
+
class RecurringTask < Record
|
7
|
+
serialize :arguments, coder: Arguments, default: []
|
8
|
+
|
9
|
+
validate :supported_schedule
|
10
|
+
validate :existing_job_class
|
11
|
+
|
12
|
+
scope :static, -> { where(static: true) }
|
13
|
+
|
14
|
+
class << self
|
15
|
+
def wrap(args)
|
16
|
+
args.is_a?(self) ? args : from_configuration(args.first, **args.second)
|
17
|
+
end
|
18
|
+
|
19
|
+
def from_configuration(key, **options)
|
20
|
+
new(key: key, class_name: options[:class], schedule: options[:schedule], arguments: options[:args])
|
21
|
+
end
|
22
|
+
|
23
|
+
def create_or_update_all(tasks)
|
24
|
+
if connection.supports_insert_conflict_target?
|
25
|
+
# PostgreSQL fails and aborts the current transaction when it hits a duplicate key conflict
|
26
|
+
# during two concurrent INSERTs for the same value of an unique index. We need to explicitly
|
27
|
+
# indicate unique_by to ignore duplicate rows by this value when inserting
|
28
|
+
upsert_all tasks.map(&:attributes_for_upsert), unique_by: :key
|
29
|
+
else
|
30
|
+
upsert_all tasks.map(&:attributes_for_upsert)
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def delay_from_now
|
36
|
+
[ (next_time - Time.current).to_f, 0 ].max
|
37
|
+
end
|
38
|
+
|
39
|
+
def next_time
|
40
|
+
parsed_schedule.next_time.utc
|
41
|
+
end
|
42
|
+
|
43
|
+
def enqueue(at:)
|
44
|
+
SolidQueue.instrument(:enqueue_recurring_task, task: key, at: at) do |payload|
|
45
|
+
active_job = if using_solid_queue_adapter?
|
46
|
+
enqueue_and_record(run_at: at)
|
47
|
+
else
|
48
|
+
payload[:other_adapter] = true
|
49
|
+
|
50
|
+
perform_later do |job|
|
51
|
+
unless job.successfully_enqueued?
|
52
|
+
payload[:enqueue_error] = job.enqueue_error&.message
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
payload[:active_job_id] = active_job.job_id if active_job
|
58
|
+
rescue RecurringExecution::AlreadyRecorded
|
59
|
+
payload[:skipped] = true
|
60
|
+
rescue Job::EnqueueError => error
|
61
|
+
payload[:enqueue_error] = error.message
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def to_s
|
66
|
+
"#{class_name}.perform_later(#{arguments.map(&:inspect).join(",")}) [ #{parsed_schedule.original} ]"
|
67
|
+
end
|
68
|
+
|
69
|
+
def attributes_for_upsert
|
70
|
+
attributes.without("id", "created_at", "updated_at")
|
71
|
+
end
|
72
|
+
|
73
|
+
private
|
74
|
+
def supported_schedule
|
75
|
+
unless parsed_schedule.instance_of?(Fugit::Cron)
|
76
|
+
errors.add :schedule, :unsupported, message: "is not a supported recurring schedule"
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
def existing_job_class
|
81
|
+
unless job_class.present?
|
82
|
+
errors.add :class_name, :undefined, message: "doesn't correspond to an existing class"
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
def using_solid_queue_adapter?
|
87
|
+
job_class.queue_adapter_name.inquiry.solid_queue?
|
88
|
+
end
|
89
|
+
|
90
|
+
def enqueue_and_record(run_at:)
|
91
|
+
RecurringExecution.record(key, run_at) do
|
92
|
+
job_class.new(*arguments_with_kwargs).tap do |active_job|
|
93
|
+
active_job.run_callbacks(:enqueue) do
|
94
|
+
Job.enqueue(active_job)
|
95
|
+
end
|
96
|
+
active_job.successfully_enqueued = true
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
def perform_later(&block)
|
102
|
+
job_class.perform_later(*arguments_with_kwargs, &block)
|
103
|
+
end
|
104
|
+
|
105
|
+
def arguments_with_kwargs
|
106
|
+
if arguments.last.is_a?(Hash)
|
107
|
+
arguments[0...-1] + [ Hash.ruby2_keywords_hash(arguments.last) ]
|
108
|
+
else
|
109
|
+
arguments
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
|
114
|
+
def parsed_schedule
|
115
|
+
@parsed_schedule ||= Fugit.parse(schedule)
|
116
|
+
end
|
117
|
+
|
118
|
+
def job_class
|
119
|
+
@job_class ||= class_name&.safe_constantize
|
120
|
+
end
|
121
|
+
end
|
122
|
+
end
|
@@ -17,6 +17,17 @@ module SolidQueue
|
|
17
17
|
def signal_all(jobs)
|
18
18
|
Proxy.signal_all(jobs)
|
19
19
|
end
|
20
|
+
|
21
|
+
# Requires a unique index on key
|
22
|
+
def create_unique_by(attributes)
|
23
|
+
if connection.supports_insert_conflict_target?
|
24
|
+
insert({ **attributes }, unique_by: :key).any?
|
25
|
+
else
|
26
|
+
create!(**attributes)
|
27
|
+
end
|
28
|
+
rescue ActiveRecord::RecordNotUnique
|
29
|
+
false
|
30
|
+
end
|
20
31
|
end
|
21
32
|
|
22
33
|
class Proxy
|
@@ -44,15 +55,17 @@ module SolidQueue
|
|
44
55
|
attr_accessor :job
|
45
56
|
|
46
57
|
def attempt_creation
|
47
|
-
Semaphore.
|
48
|
-
|
49
|
-
rescue ActiveRecord::RecordNotUnique
|
50
|
-
if limit == 1 then false
|
58
|
+
if Semaphore.create_unique_by(key: key, value: limit - 1, expires_at: expires_at)
|
59
|
+
true
|
51
60
|
else
|
52
|
-
|
61
|
+
check_limit_or_decrement
|
53
62
|
end
|
54
63
|
end
|
55
64
|
|
65
|
+
def check_limit_or_decrement
|
66
|
+
limit == 1 ? false : attempt_decrement
|
67
|
+
end
|
68
|
+
|
56
69
|
def attempt_decrement
|
57
70
|
Semaphore.available.where(key: key).update_all([ "value = value - 1, expires_at = ?", expires_at ]) > 0
|
58
71
|
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
class CreateRecurringTasks < ActiveRecord::Migration[7.1]
|
2
|
+
def change
|
3
|
+
create_table :solid_queue_recurring_tasks do |t|
|
4
|
+
t.string :key, null: false, index: { unique: true }
|
5
|
+
t.string :schedule, null: false
|
6
|
+
t.string :command, limit: 2048
|
7
|
+
t.string :class_name
|
8
|
+
t.text :arguments
|
9
|
+
|
10
|
+
t.string :queue_name
|
11
|
+
t.integer :priority, default: 0
|
12
|
+
|
13
|
+
t.boolean :static, default: true, index: true
|
14
|
+
|
15
|
+
t.text :description
|
16
|
+
|
17
|
+
t.timestamps
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|