inst-jobs 0.13.2 → 0.13.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/delayed/backend/active_record.rb +12 -7
- data/lib/delayed/backend/base.rb +3 -3
- data/lib/delayed/backend/redis/job.rb +2 -2
- data/lib/delayed/settings.rb +1 -1
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/parent_process/client.rb +7 -1
- data/lib/delayed/work_queue/parent_process/server.rb +36 -27
- data/spec/active_record_job_spec.rb +8 -8
- data/spec/delayed/work_queue/parent_process/server_spec.rb +51 -13
- data/spec/spec_helper.rb +1 -1
- metadata +3 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 87a4b51b71b02dd1ab9e4817dffbc6b4780c4224
|
4
|
+
data.tar.gz: e6a9a26a33f424950c37c83238085233be136e72
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 21bb8f405b710a970a8e70570934d6344ebdb25d91b811542f6d8cc4fc1008d8550570960497a1f15d8a27c110a61e2dba1a819cf1c7982980f5269cb49bedfe
|
7
|
+
data.tar.gz: 2eff9e07b7b083bee75d4536d50434f92b12f860c6390160ab90ff1737164fd83917b4d59fe64495278be2f75affe3cf63684d64b3a3930dfeb15a7d9a96feee
|
@@ -202,8 +202,8 @@ module Delayed
|
|
202
202
|
queue = Delayed::Settings.queue,
|
203
203
|
min_priority = nil,
|
204
204
|
max_priority = nil,
|
205
|
-
|
206
|
-
|
205
|
+
prefetch: 0,
|
206
|
+
prefetch_owner: nil)
|
207
207
|
|
208
208
|
check_queue(queue)
|
209
209
|
check_priorities(min_priority, max_priority)
|
@@ -218,7 +218,7 @@ module Delayed
|
|
218
218
|
effective_worker_names = Array(worker_names)
|
219
219
|
|
220
220
|
target_jobs = all_available(queue, min_priority, max_priority).
|
221
|
-
limit(effective_worker_names.length +
|
221
|
+
limit(effective_worker_names.length + prefetch).
|
222
222
|
lock
|
223
223
|
jobs_with_row_number = all.from(target_jobs).
|
224
224
|
select("id, ROW_NUMBER() OVER () AS row_number")
|
@@ -226,8 +226,8 @@ module Delayed
|
|
226
226
|
effective_worker_names.each_with_index do |worker, i|
|
227
227
|
updates << "WHEN #{i + 1} THEN #{connection.quote(worker)} "
|
228
228
|
end
|
229
|
-
if
|
230
|
-
updates << "ELSE #{connection.quote(
|
229
|
+
if prefetch_owner
|
230
|
+
updates << "ELSE #{connection.quote(prefetch_owner)} "
|
231
231
|
end
|
232
232
|
updates << "END, locked_at = #{connection.quote(db_time_now)}"
|
233
233
|
# joins and returning in an update! just bypass AR
|
@@ -238,8 +238,8 @@ module Delayed
|
|
238
238
|
# all of the jobs we tried to lock had already been locked by someone else
|
239
239
|
if worker_names.is_a?(Array)
|
240
240
|
result = jobs.index_by(&:locked_by)
|
241
|
-
# all of the
|
242
|
-
result[
|
241
|
+
# all of the prefetched jobs can come back as an array
|
242
|
+
result[prefetch_owner] = jobs.select { |j| j.locked_by == prefetch_owner } if prefetch_owner
|
243
243
|
return result
|
244
244
|
else
|
245
245
|
return jobs.first
|
@@ -326,6 +326,11 @@ module Delayed
|
|
326
326
|
end
|
327
327
|
end
|
328
328
|
|
329
|
+
def self.unlock_orphaned_prefetched_jobs
|
330
|
+
horizon = db_time_now - Settings.parent_process[:prefetched_jobs_timeout] * 4
|
331
|
+
where("locked_by LIKE 'prefetch:%' AND locked_at<?", horizon).update_all(locked_at: nil, locked_by: nil)
|
332
|
+
end
|
333
|
+
|
329
334
|
def self.unlock(jobs)
|
330
335
|
unlocked = where(id: jobs).update_all(locked_at: nil, locked_by: nil)
|
331
336
|
jobs.each(&:unlock)
|
data/lib/delayed/backend/base.rb
CHANGED
@@ -118,9 +118,9 @@ module Delayed
|
|
118
118
|
Time.now.utc
|
119
119
|
end
|
120
120
|
|
121
|
-
def
|
122
|
-
horizon = db_time_now - Settings.parent_process[:
|
123
|
-
orphaned_jobs = running_jobs.select { |job| job.locked_by.start_with?('
|
121
|
+
def unlock_orphaned_prefetched_jobs
|
122
|
+
horizon = db_time_now - Settings.parent_process[:prefetched_jobs_timeout] * 4
|
123
|
+
orphaned_jobs = running_jobs.select { |job| job.locked_by.start_with?('prefetch:') && job.locked_at < horizon }
|
124
124
|
return 0 if orphaned_jobs.empty?
|
125
125
|
unlock(orphaned_jobs)
|
126
126
|
end
|
@@ -222,8 +222,8 @@ class Job
|
|
222
222
|
queue = Delayed::Settings.queue,
|
223
223
|
min_priority = Delayed::MIN_PRIORITY,
|
224
224
|
max_priority = Delayed::MAX_PRIORITY,
|
225
|
-
|
226
|
-
|
225
|
+
prefetch: nil,
|
226
|
+
prefetch_owner: nil)
|
227
227
|
|
228
228
|
check_queue(queue)
|
229
229
|
check_priorities(min_priority, max_priority)
|
data/lib/delayed/settings.rb
CHANGED
data/lib/delayed/version.rb
CHANGED
@@ -18,11 +18,17 @@ class ParentProcess
|
|
18
18
|
# to wait for anything to be available on the 'wire', this is a valid
|
19
19
|
# assumption because we control the server and it's a Unix domain socket,
|
20
20
|
# not TCP.
|
21
|
-
|
21
|
+
if socket.eof?
|
22
|
+
# Other end closed gracefully, so should we
|
23
|
+
logger.debug("server closed connection")
|
24
|
+
return reset_connection
|
25
|
+
end
|
26
|
+
|
22
27
|
Marshal.load(socket).tap do |response|
|
23
28
|
unless response.nil? || (response.is_a?(Delayed::Job) && response.locked_by == worker_name)
|
24
29
|
raise(ProtocolError, "response is not a locked job: #{response.inspect}")
|
25
30
|
end
|
31
|
+
logger.debug("Received job #{response.id}")
|
26
32
|
end
|
27
33
|
rescue SystemCallError, IOError => ex
|
28
34
|
logger.error("Work queue connection lost, reestablishing on next poll. (#{ex})")
|
@@ -11,7 +11,7 @@ class ParentProcess
|
|
11
11
|
@parent_pid = parent_pid
|
12
12
|
@clients = {}
|
13
13
|
@waiting_clients = {}
|
14
|
-
@
|
14
|
+
@prefetched_jobs = {}
|
15
15
|
|
16
16
|
@config = config
|
17
17
|
@client_timeout = config['server_socket_timeout'] || 10.0 # left for backwards compat
|
@@ -30,12 +30,12 @@ class ParentProcess
|
|
30
30
|
def run
|
31
31
|
logger.debug "Starting work queue process"
|
32
32
|
|
33
|
-
|
33
|
+
last_orphaned_prefetched_jobs_purge = Job.db_time_now - rand(15 * 60)
|
34
34
|
while !exit?
|
35
35
|
run_once
|
36
|
-
if
|
37
|
-
Job.
|
38
|
-
|
36
|
+
if last_orphaned_prefetched_jobs_purge + 15 * 60 < Job.db_time_now
|
37
|
+
Job.unlock_orphaned_prefetched_jobs
|
38
|
+
last_orphaned_prefetched_jobs_purge = Job.db_time_now
|
39
39
|
end
|
40
40
|
end
|
41
41
|
|
@@ -43,7 +43,7 @@ class ParentProcess
|
|
43
43
|
logger.debug "WorkQueue Server died: #{e.inspect}", :error
|
44
44
|
raise
|
45
45
|
ensure
|
46
|
-
|
46
|
+
unlock_all_prefetched_jobs
|
47
47
|
end
|
48
48
|
|
49
49
|
def run_once
|
@@ -54,7 +54,7 @@ class ParentProcess
|
|
54
54
|
readable.each { |s| handle_read(s) }
|
55
55
|
end
|
56
56
|
check_for_work
|
57
|
-
|
57
|
+
unlock_timed_out_prefetched_jobs
|
58
58
|
end
|
59
59
|
|
60
60
|
def handle_read(socket)
|
@@ -82,9 +82,12 @@ class ParentProcess
|
|
82
82
|
# request and then leave the socket open. Doing so would leave us hanging
|
83
83
|
# in Marshal.load forever. This is only a reasonable assumption because we
|
84
84
|
# control the client.
|
85
|
-
return drop_socket(socket) if socket.eof?
|
86
|
-
worker_name, worker_config = Marshal.load(socket)
|
87
85
|
client = @clients[socket]
|
86
|
+
if socket.eof?
|
87
|
+
logger.debug("Client #{client.name} closed connection")
|
88
|
+
return drop_socket(socket)
|
89
|
+
end
|
90
|
+
worker_name, worker_config = Marshal.load(socket)
|
88
91
|
client.name = worker_name
|
89
92
|
client.working = false
|
90
93
|
(@waiting_clients[worker_config] ||= []) << client
|
@@ -95,17 +98,18 @@ class ParentProcess
|
|
95
98
|
|
96
99
|
def check_for_work
|
97
100
|
@waiting_clients.each do |(worker_config, workers)|
|
98
|
-
|
99
|
-
logger.debug("I have #{
|
100
|
-
while !
|
101
|
-
job =
|
101
|
+
prefetched_jobs = @prefetched_jobs[worker_config] ||= []
|
102
|
+
logger.debug("I have #{prefetched_jobs.length} jobs for #{workers.length} waiting workers")
|
103
|
+
while !prefetched_jobs.empty? && !workers.empty?
|
104
|
+
job = prefetched_jobs.shift
|
102
105
|
client = workers.shift
|
103
106
|
# couldn't re-lock it for some reason
|
104
|
-
unless job.transfer_lock!(from:
|
107
|
+
unless job.transfer_lock!(from: prefetch_owner, to: client.name)
|
105
108
|
workers.unshift(client)
|
106
109
|
next
|
107
110
|
end
|
108
111
|
begin
|
112
|
+
logger.debug("Sending prefetched job #{job.id} to #{client.name}")
|
109
113
|
client_timeout { Marshal.dump(job, client.socket) }
|
110
114
|
rescue SystemCallError, IOError, Timeout::Error => ex
|
111
115
|
logger.error("Failed to send pre-fetched job to #{client.name}: #{ex.inspect}")
|
@@ -124,18 +128,19 @@ class ParentProcess
|
|
124
128
|
worker_config[:queue],
|
125
129
|
worker_config[:min_priority],
|
126
130
|
worker_config[:max_priority],
|
127
|
-
|
128
|
-
|
131
|
+
prefetch: Settings.fetch_batch_size * (worker_config[:workers] || 1) - recipients.length,
|
132
|
+
prefetch_owner: prefetch_owner)
|
129
133
|
response.each do |(worker_name, job)|
|
130
|
-
if worker_name ==
|
134
|
+
if worker_name == prefetch_owner
|
131
135
|
# it's actually an array of all the extra jobs
|
132
|
-
|
136
|
+
prefetched_jobs.concat(job)
|
133
137
|
next
|
134
138
|
end
|
135
139
|
client = workers.find { |worker| worker.name == worker_name }
|
136
140
|
client.working = true
|
137
141
|
@waiting_clients[worker_config].delete(client)
|
138
142
|
begin
|
143
|
+
logger.debug("Sending job #{job.id} to #{client.name}")
|
139
144
|
client_timeout { Marshal.dump(job, client.socket) }
|
140
145
|
rescue SystemCallError, IOError, Timeout::Error => ex
|
141
146
|
logger.error("Failed to send job to #{client.name}: #{ex.inspect}")
|
@@ -147,22 +152,22 @@ class ParentProcess
|
|
147
152
|
end
|
148
153
|
end
|
149
154
|
|
150
|
-
def
|
151
|
-
@
|
155
|
+
def unlock_timed_out_prefetched_jobs
|
156
|
+
@prefetched_jobs.each do |(worker_config, jobs)|
|
152
157
|
next if jobs.empty?
|
153
|
-
if jobs.first.locked_at < Time.now.utc - Settings.parent_process[:
|
158
|
+
if jobs.first.locked_at < Time.now.utc - Settings.parent_process[:prefetched_jobs_timeout]
|
154
159
|
Delayed::Job.unlock(jobs)
|
155
|
-
@
|
160
|
+
@prefetched_jobs[worker_config] = []
|
156
161
|
end
|
157
162
|
end
|
158
163
|
end
|
159
164
|
|
160
|
-
def
|
161
|
-
@
|
165
|
+
def unlock_all_prefetched_jobs
|
166
|
+
@prefetched_jobs.each do |(_worker_config, jobs)|
|
162
167
|
next if jobs.empty?
|
163
168
|
Delayed::Job.unlock(jobs)
|
164
169
|
end
|
165
|
-
@
|
170
|
+
@prefetched_jobs = {}
|
166
171
|
end
|
167
172
|
|
168
173
|
def drop_socket(socket)
|
@@ -171,15 +176,19 @@ class ParentProcess
|
|
171
176
|
socket.close
|
172
177
|
rescue IOError
|
173
178
|
end
|
179
|
+
client = @clients[socket]
|
174
180
|
@clients.delete(socket)
|
181
|
+
@waiting_clients.each do |(_config, workers)|
|
182
|
+
workers.delete(client)
|
183
|
+
end
|
175
184
|
end
|
176
185
|
|
177
186
|
def exit?
|
178
187
|
parent_exited?
|
179
188
|
end
|
180
189
|
|
181
|
-
def
|
182
|
-
"
|
190
|
+
def prefetch_owner
|
191
|
+
"prefetch:#{Socket.gethostname rescue 'X'}"
|
183
192
|
end
|
184
193
|
|
185
194
|
def parent_exited?
|
@@ -244,20 +244,20 @@ describe 'Delayed::Backed::ActiveRecord::Job' do
|
|
244
244
|
end
|
245
245
|
end
|
246
246
|
|
247
|
-
it "unlocks orphaned
|
247
|
+
it "unlocks orphaned prefetched_jobs" do
|
248
248
|
job1 = Delayed::Job.new(:tag => 'tag')
|
249
249
|
job2 = Delayed::Job.new(:tag => 'tag')
|
250
250
|
|
251
|
-
job1.create_and_lock!("
|
251
|
+
job1.create_and_lock!("prefetch:a")
|
252
252
|
job1.locked_at = Delayed::Job.db_time_now - 15 * 60
|
253
253
|
job1.save!
|
254
|
-
job2.create_and_lock!("
|
254
|
+
job2.create_and_lock!("prefetch:a")
|
255
255
|
|
256
|
-
expect(Delayed::Job.
|
257
|
-
expect(Delayed::Job.
|
256
|
+
expect(Delayed::Job.unlock_orphaned_prefetched_jobs).to eq 1
|
257
|
+
expect(Delayed::Job.unlock_orphaned_prefetched_jobs).to eq 0
|
258
258
|
|
259
259
|
expect(Delayed::Job.find(job1.id).locked_by).to be_nil
|
260
|
-
expect(Delayed::Job.find(job2.id).locked_by).to eq '
|
260
|
+
expect(Delayed::Job.find(job2.id).locked_by).to eq 'prefetch:a'
|
261
261
|
end
|
262
262
|
|
263
263
|
it "allows fetching multiple jobs at once" do
|
@@ -272,8 +272,8 @@ describe 'Delayed::Backed::ActiveRecord::Job' do
|
|
272
272
|
it "allows fetching extra jobs" do
|
273
273
|
jobs = 5.times.map { Delayed::Job.create :payload_object => SimpleJob.new }
|
274
274
|
locked_jobs = Delayed::Job.get_and_lock_next_available(['worker1'],
|
275
|
-
|
276
|
-
|
275
|
+
prefetch: 2,
|
276
|
+
prefetch_owner: 'work_queue')
|
277
277
|
expect(locked_jobs.length).to eq 2
|
278
278
|
expect(locked_jobs.keys).to eq ['worker1', 'work_queue']
|
279
279
|
expect(locked_jobs['worker1']).to eq jobs[0]
|
@@ -1,13 +1,25 @@
|
|
1
1
|
require 'spec_helper'
|
2
2
|
|
3
3
|
RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
|
4
|
+
class JobClass
|
5
|
+
attr_reader :id
|
6
|
+
|
7
|
+
def initialize
|
8
|
+
@id = rand
|
9
|
+
end
|
10
|
+
|
11
|
+
def ==(other)
|
12
|
+
self.id == other.id
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
4
16
|
let(:parent) { Delayed::WorkQueue::ParentProcess.new }
|
5
17
|
let(:subject) { described_class.new(listen_socket) }
|
6
18
|
let(:listen_socket) { Socket.unix_server_socket(parent.server_address) }
|
7
|
-
let(:job) {
|
19
|
+
let(:job) { JobClass.new }
|
8
20
|
let(:worker_config) { { queue: "queue_name", min_priority: 1, max_priority: 2 } }
|
9
21
|
let(:args) { ["worker_name", worker_config] }
|
10
|
-
let(:job_args) { [["worker_name"], "queue_name", 1, 2, hash_including(
|
22
|
+
let(:job_args) { [["worker_name"], "queue_name", 1, 2, hash_including(prefetch: 4)] }
|
11
23
|
|
12
24
|
before :all do
|
13
25
|
Delayed.select_backend(Delayed::Backend::ActiveRecord::Job)
|
@@ -47,33 +59,35 @@ RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
|
|
47
59
|
client2 = Socket.unix(subject.listen_socket.local_address.unix_path)
|
48
60
|
subject.run_once
|
49
61
|
|
50
|
-
|
51
|
-
|
62
|
+
job1 = JobClass.new
|
63
|
+
job2 = JobClass.new
|
64
|
+
job_args = [["worker_name1", "worker_name2"], "queue_name", 1, 2, hash_including(prefetch: 3)]
|
65
|
+
jobs = { 'worker_name1' => job1, 'worker_name2' => job2 }
|
52
66
|
|
53
67
|
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*job_args).and_return(jobs)
|
54
68
|
Marshal.dump(["worker_name1", worker_config], client1)
|
55
69
|
Marshal.dump(["worker_name2", worker_config], client2)
|
56
70
|
subject.run_once
|
57
|
-
expect(Marshal.load(client1)).to eq(
|
58
|
-
expect(Marshal.load(client2)).to eq(
|
71
|
+
expect(Marshal.load(client1)).to eq(job1)
|
72
|
+
expect(Marshal.load(client2)).to eq(job2)
|
59
73
|
end
|
60
74
|
|
61
|
-
it 'will
|
75
|
+
it 'will prefetch and use jobs' do
|
62
76
|
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
63
77
|
subject.run_once
|
64
78
|
|
65
|
-
allow(subject).to receive(:
|
66
|
-
job_args = [["worker_name1"], "queue_name", 1, 2,
|
79
|
+
allow(subject).to receive(:prefetch_owner).and_return('work_queue:X')
|
80
|
+
job_args = [["worker_name1"], "queue_name", 1, 2, prefetch: 4, prefetch_owner: 'work_queue:X']
|
67
81
|
job2 = Delayed::Job.new(:tag => 'tag')
|
68
82
|
job2.create_and_lock!('work_queue:X')
|
69
83
|
job3 = Delayed::Job.new(:tag => 'tag')
|
70
84
|
job3.create_and_lock!('work_queue:X')
|
71
|
-
jobs = { 'worker_name1' =>
|
85
|
+
jobs = { 'worker_name1' => job, 'work_queue:X' => [job2, job3]}
|
72
86
|
|
73
87
|
expect(Delayed::Job).to receive(:get_and_lock_next_available).once.with(*job_args).and_return(jobs)
|
74
88
|
Marshal.dump(["worker_name1", worker_config], client)
|
75
89
|
subject.run_once
|
76
|
-
expect(Marshal.load(client)).to eq(
|
90
|
+
expect(Marshal.load(client)).to eq(job)
|
77
91
|
Marshal.dump(["worker_name1", worker_config], client)
|
78
92
|
subject.run_once
|
79
93
|
expect(Marshal.load(client)).to eq(job2)
|
@@ -112,11 +126,35 @@ RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
|
|
112
126
|
subject.run_once
|
113
127
|
|
114
128
|
Marshal.dump(args, client)
|
129
|
+
# make sure the server knows the client is waiting for a job
|
130
|
+
subject.run_once
|
131
|
+
|
132
|
+
client.close
|
133
|
+
expect { subject.run_once }.to change(subject, :connected_clients).by(-1)
|
134
|
+
expect(subject.instance_variable_get(:@waiting_clients).first.last).to eq []
|
135
|
+
end
|
136
|
+
|
137
|
+
it 'drops the client when a write fails' do
|
138
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
139
|
+
subject.run_once
|
140
|
+
|
141
|
+
Marshal.dump(args, client)
|
142
|
+
subject.run_once
|
143
|
+
|
144
|
+
client.close
|
115
145
|
|
116
146
|
server_client_socket = subject.clients.keys.first
|
147
|
+
# don't let the server see the close and process it there; we want to check a failure later
|
148
|
+
expect(subject).to receive(:handle_request).with(server_client_socket)
|
117
149
|
|
118
|
-
expect(
|
119
|
-
|
150
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*job_args).and_return('worker_name' => job)
|
151
|
+
# the job gets unlocked
|
152
|
+
expect(Delayed::Job).to receive(:unlock).with([job])
|
153
|
+
subject.run_once
|
154
|
+
|
155
|
+
# and the server removes the client from both of its internal state arrays
|
156
|
+
expect(subject.connected_clients).to eq 0
|
157
|
+
expect(subject.instance_variable_get(:@waiting_clients).first.last).to eq []
|
120
158
|
end
|
121
159
|
|
122
160
|
it 'tracks when clients are idle' do
|
data/spec/spec_helper.rb
CHANGED
@@ -41,7 +41,7 @@ Delayed::Backend::Redis::Job.redis.select ENV['TEST_ENV_NUMBER']
|
|
41
41
|
|
42
42
|
connection_config = {
|
43
43
|
adapter: :postgresql,
|
44
|
-
host: ENV['TEST_DB_HOST'],
|
44
|
+
host: ENV['TEST_DB_HOST'].presence,
|
45
45
|
encoding: 'utf8',
|
46
46
|
username: ENV['TEST_DB_USERNAME'],
|
47
47
|
database: ENV['TEST_DB_DATABASE'],
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: inst-jobs
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.13.
|
4
|
+
version: 0.13.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Tobias Luetke
|
@@ -45,14 +45,14 @@ dependencies:
|
|
45
45
|
requirements:
|
46
46
|
- - "~>"
|
47
47
|
- !ruby/object:Gem::Version
|
48
|
-
version: 3.
|
48
|
+
version: '3.4'
|
49
49
|
type: :runtime
|
50
50
|
prerelease: false
|
51
51
|
version_requirements: !ruby/object:Gem::Requirement
|
52
52
|
requirements:
|
53
53
|
- - "~>"
|
54
54
|
- !ruby/object:Gem::Version
|
55
|
-
version: 3.
|
55
|
+
version: '3.4'
|
56
56
|
- !ruby/object:Gem::Dependency
|
57
57
|
name: redis
|
58
58
|
requirement: !ruby/object:Gem::Requirement
|