rage-rb 1.15.1 → 1.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +25 -0
- data/Gemfile +1 -0
- data/OVERVIEW.md +32 -2
- data/README.md +3 -2
- data/lib/rage/all.rb +1 -0
- data/lib/rage/cable/adapters/redis.rb +1 -1
- data/lib/rage/cable/cable.rb +7 -2
- data/lib/rage/cable/channel.rb +4 -4
- data/lib/rage/cable/{protocol → protocols}/actioncable_v1_json.rb +30 -62
- data/lib/rage/cable/protocols/base.rb +88 -0
- data/lib/rage/cable/protocols/raw_web_socket_json.rb +144 -0
- data/lib/rage/cli.rb +20 -0
- data/lib/rage/code_loader.rb +25 -14
- data/lib/rage/configuration.rb +187 -3
- data/lib/rage/controller/api.rb +5 -1
- data/lib/rage/deferred/backends/disk.rb +258 -0
- data/lib/rage/deferred/backends/nil.rb +16 -0
- data/lib/rage/deferred/deferred.rb +95 -0
- data/lib/rage/deferred/metadata.rb +43 -0
- data/lib/rage/deferred/proxy.rb +34 -0
- data/lib/rage/deferred/queue.rb +78 -0
- data/lib/rage/deferred/task.rb +96 -0
- data/lib/rage/fiber_scheduler.rb +1 -0
- data/lib/rage/hooks.rb +25 -0
- data/lib/rage/openapi/parsers/ext/alba.rb +23 -9
- data/lib/rage/request.rb +0 -1
- data/lib/rage/response.rb +39 -0
- data/lib/rage/router/constrainer.rb +0 -2
- data/lib/rage/router/node.rb +0 -2
- data/lib/rage/rspec.rb +1 -1
- data/lib/rage/setup.rb +3 -0
- data/lib/rage/templates/controller-template/controller.rb +2 -0
- data/lib/rage/version.rb +1 -1
- data/lib/rage-rb.rb +9 -0
- data/rage.gemspec +2 -2
- metadata +17 -6
data/lib/rage/configuration.rb
CHANGED
@@ -35,6 +35,15 @@ require "erb"
|
|
35
35
|
#
|
36
36
|
# > Defines one or several old secrets that need to be rotated. Can accept a single key or an array of keys. Rage will fall back to the `FALLBACK_SECRET_KEY_BASE` environment variable if this is not set.
|
37
37
|
#
|
38
|
+
# • _config.after_initialize_
|
39
|
+
#
|
40
|
+
# > Schedule a block of code to run after Rage has finished loading the application code. Use this to reference application-level constants during the initialization process.
|
41
|
+
# > ```
|
42
|
+
# Rage.config.after_initialize do
|
43
|
+
# SUPER_USER = User.find_by!(super: true)
|
44
|
+
# end
|
45
|
+
# > ```
|
46
|
+
#
|
38
47
|
# # Middleware Configuration
|
39
48
|
#
|
40
49
|
# • _config.middleware.use_
|
@@ -115,7 +124,7 @@ require "erb"
|
|
115
124
|
#
|
116
125
|
# • _config.cable.protocol_
|
117
126
|
#
|
118
|
-
# > Specifies the protocol the server will use.
|
127
|
+
# > Specifies the protocol the server will use. Supported values include {Rage::Cable::Protocols::ActioncableV1Json :actioncable_v1_json} and {Rage::Cable::Protocols::RawWebSocketJson :raw_websocket_json}. Defaults to {Rage::Cable::Protocols::ActioncableV1Json :actioncable_v1_json}.
|
119
128
|
#
|
120
129
|
# • _config.cable.allowed_request_origins_
|
121
130
|
#
|
@@ -136,6 +145,38 @@ require "erb"
|
|
136
145
|
# end
|
137
146
|
# > ```
|
138
147
|
#
|
148
|
+
# # Deferred Configuration
|
149
|
+
# • _config.deferred.backend_
|
150
|
+
#
|
151
|
+
# > Specifies the backend for deferred tasks. Supported values are `:disk`, which uses disk storage, or `nil`, which disables persistence of deferred tasks.
|
152
|
+
# > The `:disk` backend accepts the following options:
|
153
|
+
# >
|
154
|
+
# > - `:path` - the path to the directory where deferred tasks will be stored. Defaults to `storage`.
|
155
|
+
# > - `:prefix` - the prefix for the deferred task files. Defaults to `deferred-`.
|
156
|
+
# > - `:fsync_frequency` - the frequency of `fsync` calls in seconds. Defaults to `0.5`.
|
157
|
+
#
|
158
|
+
# > ```ruby
|
159
|
+
# config.deferred.backend = :disk, { path: "storage" }
|
160
|
+
# > ```
|
161
|
+
#
|
162
|
+
# • _config.deferred.backpressure_
|
163
|
+
#
|
164
|
+
# > Enables the backpressure for deferred tasks. The backpressure is used to limit the number of pending tasks in the queue. It accepts a hash with the following options:
|
165
|
+
# >
|
166
|
+
# > - `:high_water_mark` - the maximum number of pending tasks in the queue. Defaults to `1000`.
|
167
|
+
# > - `:low_water_mark` - the minimum number of pending tasks in the queue before the backpressure is released. Defaults to `800`.
|
168
|
+
# > - `:timeout` - the timeout for the backpressure in seconds. Defaults to `2`.
|
169
|
+
#
|
170
|
+
# > ```ruby
|
171
|
+
# config.deferred.backpressure = { high_water_mark: 1000, low_water_mark: 800, timeout: 2 }
|
172
|
+
# > ```
|
173
|
+
#
|
174
|
+
# > Additionally, you can set the backpressure value to `true` to use the default values:
|
175
|
+
#
|
176
|
+
# > ```ruby
|
177
|
+
# config.deferred.backpressure = true
|
178
|
+
# ```
|
179
|
+
#
|
139
180
|
# # Transient Settings
|
140
181
|
#
|
141
182
|
# The settings described in this section should be configured using **environment variables** and are either temporary or will become the default in the future.
|
@@ -153,6 +194,8 @@ require "erb"
|
|
153
194
|
# > Instructs Rage to not reuse Active Record connections between different fibers.
|
154
195
|
#
|
155
196
|
class Rage::Configuration
|
197
|
+
include Hooks
|
198
|
+
|
156
199
|
attr_accessor :logger
|
157
200
|
attr_reader :log_formatter, :log_level
|
158
201
|
attr_writer :secret_key_base, :fallback_secret_key_base
|
@@ -197,10 +240,22 @@ class Rage::Configuration
|
|
197
240
|
@openapi ||= OpenAPI.new
|
198
241
|
end
|
199
242
|
|
243
|
+
def deferred
|
244
|
+
@deferred ||= Deferred.new
|
245
|
+
end
|
246
|
+
|
200
247
|
def internal
|
201
248
|
@internal ||= Internal.new
|
202
249
|
end
|
203
250
|
|
251
|
+
def after_initialize(&block)
|
252
|
+
push_hook(block, :after_initialize)
|
253
|
+
end
|
254
|
+
|
255
|
+
def run_after_initialize!
|
256
|
+
run_hooks_for!(:after_initialize, self)
|
257
|
+
end
|
258
|
+
|
204
259
|
class Server
|
205
260
|
attr_accessor :port, :workers_count, :timeout, :max_clients
|
206
261
|
attr_reader :threads_count
|
@@ -257,15 +312,29 @@ class Rage::Configuration
|
|
257
312
|
end
|
258
313
|
|
259
314
|
class Cable
|
260
|
-
attr_accessor :
|
315
|
+
attr_accessor :allowed_request_origins, :disable_request_forgery_protection
|
316
|
+
attr_reader :protocol
|
261
317
|
|
262
318
|
def initialize
|
263
|
-
@protocol = Rage::Cable::
|
319
|
+
@protocol = Rage::Cable::Protocols::ActioncableV1Json
|
264
320
|
@allowed_request_origins = if Rage.env.development? || Rage.env.test?
|
265
321
|
/localhost/
|
266
322
|
end
|
267
323
|
end
|
268
324
|
|
325
|
+
def protocol=(protocol)
|
326
|
+
@protocol = case protocol
|
327
|
+
when Class
|
328
|
+
protocol
|
329
|
+
when :actioncable_v1_json
|
330
|
+
Rage::Cable::Protocols::ActioncableV1Json
|
331
|
+
when :raw_websocket_json
|
332
|
+
Rage::Cable::Protocols::RawWebSocketJson
|
333
|
+
else
|
334
|
+
raise ArgumentError, "Unknown protocol. Supported values are `:actioncable_v1_json` and `:raw_websocket_json`."
|
335
|
+
end
|
336
|
+
end
|
337
|
+
|
269
338
|
# @private
|
270
339
|
def middlewares
|
271
340
|
@middlewares ||= begin
|
@@ -314,6 +383,121 @@ class Rage::Configuration
|
|
314
383
|
attr_accessor :tag_resolver
|
315
384
|
end
|
316
385
|
|
386
|
+
class Deferred
|
387
|
+
attr_reader :backpressure
|
388
|
+
|
389
|
+
def initialize
|
390
|
+
@configured = false
|
391
|
+
end
|
392
|
+
|
393
|
+
def backend
|
394
|
+
unless @backend_class
|
395
|
+
@backend_class = Rage::Deferred::Backends::Disk
|
396
|
+
@backend_options = parse_disk_backend_options({})
|
397
|
+
end
|
398
|
+
|
399
|
+
@backend_class.new(**@backend_options)
|
400
|
+
end
|
401
|
+
|
402
|
+
def backend=(config)
|
403
|
+
@configured = true
|
404
|
+
|
405
|
+
backend_id, opts = if config.is_a?(Array)
|
406
|
+
[config[0], config[1]]
|
407
|
+
else
|
408
|
+
[config, {}]
|
409
|
+
end
|
410
|
+
|
411
|
+
@backend_class = case backend_id
|
412
|
+
when :disk
|
413
|
+
@backend_options = parse_disk_backend_options(opts)
|
414
|
+
Rage::Deferred::Backends::Disk
|
415
|
+
when nil
|
416
|
+
Rage::Deferred::Backends::Nil
|
417
|
+
else
|
418
|
+
raise ArgumentError, "unsupported backend value; supported keys are `:disk` and `nil`"
|
419
|
+
end
|
420
|
+
end
|
421
|
+
|
422
|
+
class Backpressure
|
423
|
+
attr_reader :high_water_mark, :low_water_mark, :timeout, :sleep_interval, :timeout_iterations
|
424
|
+
|
425
|
+
def initialize(high_water_mark = nil, low_water_mark = nil, timeout = nil)
|
426
|
+
@high_water_mark = high_water_mark || 1_000
|
427
|
+
@low_water_mark = low_water_mark || (@high_water_mark * 0.8).round
|
428
|
+
|
429
|
+
@timeout = timeout || 2
|
430
|
+
@sleep_interval = 0.05
|
431
|
+
@timeout_iterations = (@timeout / @sleep_interval).round
|
432
|
+
end
|
433
|
+
end
|
434
|
+
|
435
|
+
def backpressure=(config)
|
436
|
+
@configured = true
|
437
|
+
|
438
|
+
if config == true
|
439
|
+
@backpressure = Backpressure.new
|
440
|
+
return
|
441
|
+
elsif config == false
|
442
|
+
@backpressure = nil
|
443
|
+
return
|
444
|
+
end
|
445
|
+
|
446
|
+
if opts.except(:high_water_mark, :low_water_mark, :timeout).any?
|
447
|
+
raise ArgumentError, "unsupported backpressure options; supported keys are `:high_water_mark`, `:low_water_mark`, `:timeout`"
|
448
|
+
end
|
449
|
+
|
450
|
+
high_water_mark, low_water_mark, timeout = config.values_at(:high_water_mark, :low_water_mark, :timeout)
|
451
|
+
@backpressure = Backpressure.new(high_water_mark, low_water_mark, timeout)
|
452
|
+
end
|
453
|
+
|
454
|
+
def default_disk_storage_path
|
455
|
+
Pathname.new("storage")
|
456
|
+
end
|
457
|
+
|
458
|
+
def default_disk_storage_prefix
|
459
|
+
"deferred-"
|
460
|
+
end
|
461
|
+
|
462
|
+
def has_default_disk_storage?
|
463
|
+
default_disk_storage_path.glob("#{default_disk_storage_prefix}*").any?
|
464
|
+
end
|
465
|
+
|
466
|
+
def configured?
|
467
|
+
@configured
|
468
|
+
end
|
469
|
+
|
470
|
+
private
|
471
|
+
|
472
|
+
def parse_disk_backend_options(opts)
|
473
|
+
if opts.except(:path, :prefix, :fsync_frequency).any?
|
474
|
+
raise ArgumentError, "unsupported backend options; supported values are `:path`, `:prefix`, `:fsync_frequency`"
|
475
|
+
end
|
476
|
+
|
477
|
+
parsed_options = {}
|
478
|
+
|
479
|
+
parsed_options[:path] = if opts[:path]
|
480
|
+
opts[:path].is_a?(Pathname) ? opts[:path] : Pathname.new(opts[:path])
|
481
|
+
else
|
482
|
+
default_disk_storage_path
|
483
|
+
end
|
484
|
+
|
485
|
+
parsed_options[:prefix] = if opts[:prefix]
|
486
|
+
opts[:prefix].end_with?("-") ? opts[:prefix] : "#{opts[:prefix]}-"
|
487
|
+
else
|
488
|
+
default_disk_storage_prefix
|
489
|
+
end
|
490
|
+
|
491
|
+
parsed_options[:fsync_frequency] = if opts[:fsync_frequency]
|
492
|
+
(opts[:fsync_frequency].to_i * 1_000).round
|
493
|
+
else
|
494
|
+
500
|
495
|
+
end
|
496
|
+
|
497
|
+
parsed_options
|
498
|
+
end
|
499
|
+
end
|
500
|
+
|
317
501
|
# @private
|
318
502
|
class Internal
|
319
503
|
attr_accessor :rails_mode
|
data/lib/rage/controller/api.rb
CHANGED
@@ -618,6 +618,7 @@ class RageController::API
|
|
618
618
|
# stale?(etag: "123", last_modified: Time.utc(2023, 12, 15))
|
619
619
|
# stale?(last_modified: Time.utc(2023, 12, 15))
|
620
620
|
# stale?(etag: "123")
|
621
|
+
# @note `stale?` will set ETag and Last-Modified response headers made of passed arguments in the method. Value for ETag will be additionally hashified using SHA1 algorithm, whereas value for Last-Modified will be converted to the string which represents time as RFC 1123 date of HTTP-date defined by RFC 2616.
|
621
622
|
# @note `stale?` will set the response status to 304 if the request is fresh. This side effect will cause a double render error, if `render` gets called after this method. Make sure to implement a proper conditional in your action to prevent this from happening:
|
622
623
|
# ```ruby
|
623
624
|
# if stale?(etag: "123")
|
@@ -625,7 +626,10 @@ class RageController::API
|
|
625
626
|
# end
|
626
627
|
# ```
|
627
628
|
def stale?(etag: nil, last_modified: nil)
|
628
|
-
|
629
|
+
response.etag = etag
|
630
|
+
response.last_modified = last_modified
|
631
|
+
|
632
|
+
still_fresh = request.fresh?(etag: response.etag, last_modified: last_modified)
|
629
633
|
|
630
634
|
head :not_modified if still_fresh
|
631
635
|
!still_fresh
|
@@ -0,0 +1,258 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "zlib"
|
4
|
+
|
5
|
+
##
|
6
|
+
# `Rage::Deferred::Backends` implements a storage layer to persist deferred tasks.
|
7
|
+
# A storage should implement the following instance methods:
|
8
|
+
#
|
9
|
+
# * `add` - called when a task has to be added to the storage;
|
10
|
+
# * `remove` - called when a task has to be removed from the storage;
|
11
|
+
# * `pending_tasks` - the method should iterate over the underlying storage and return a list of tasks to replay;
|
12
|
+
#
|
13
|
+
class Rage::Deferred::Backends::Disk
|
14
|
+
STORAGE_VERSION = "0"
|
15
|
+
STORAGE_SIZE_INCREASE_RATIO = 1.5
|
16
|
+
|
17
|
+
DEFAULT_PUBLISH_AT = "0"
|
18
|
+
DEFAULT_STORAGE_SIZE_LIMIT = 2_000_000
|
19
|
+
|
20
|
+
def initialize(path:, prefix:, fsync_frequency:)
|
21
|
+
@storage_path = path
|
22
|
+
@storage_prefix = "#{prefix}#{STORAGE_VERSION}"
|
23
|
+
@fsync_frequency = fsync_frequency
|
24
|
+
|
25
|
+
@storage_path.mkpath
|
26
|
+
|
27
|
+
# try to open and take ownership of all storage files in the storage directory
|
28
|
+
storage_files = @storage_path.glob("#{@storage_prefix}-*").filter_map do |file_path|
|
29
|
+
file = file_path.open("a+b")
|
30
|
+
if file.flock(File::LOCK_EX | File::LOCK_NB)
|
31
|
+
sleep 0.01 # reduce contention between workers
|
32
|
+
file
|
33
|
+
else
|
34
|
+
file.close
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
# if there are no storage files - create one;
|
39
|
+
# otherwise the first one is used as the main storage; the rest will be merged into the main storage
|
40
|
+
if storage_files.empty?
|
41
|
+
@storage = create_storage
|
42
|
+
else
|
43
|
+
@storage = storage_files[0]
|
44
|
+
@recovered_storages = storage_files[1..] if storage_files.length > 1
|
45
|
+
end
|
46
|
+
|
47
|
+
# create seed value for the task IDs
|
48
|
+
task_id_seed = Time.now.to_i # TODO: ensure timestamps in the file are not higher
|
49
|
+
@task_id_base, @task_id_i = "#{task_id_seed}-#{Process.pid}", 0
|
50
|
+
Iodine.run_every(1_000) do
|
51
|
+
task_id_seed += 1
|
52
|
+
@task_id_base, @task_id_i = "#{task_id_seed}-#{Process.pid}", 0
|
53
|
+
end
|
54
|
+
|
55
|
+
@storage_size_limit = DEFAULT_STORAGE_SIZE_LIMIT
|
56
|
+
@storage_size = @storage.size
|
57
|
+
@fsync_scheduled = false
|
58
|
+
@should_rotate = false
|
59
|
+
|
60
|
+
# we use different counters for different tasks:
|
61
|
+
# delayed tasks are stored in the hash; for regular tasks we only maintain a counter;
|
62
|
+
# this information is only used during storage rotation
|
63
|
+
@immediate_tasks_in_queue = 0
|
64
|
+
@delayed_tasks = {}
|
65
|
+
|
66
|
+
# ensure data is written to disk
|
67
|
+
@storage_has_changes = false
|
68
|
+
Iodine.run_every(@fsync_frequency) do
|
69
|
+
if @storage_has_changes
|
70
|
+
@storage_has_changes = false
|
71
|
+
@storage.fsync
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
# Add a record to the log representing a new task.
|
77
|
+
# @param task [Rage::Deferred::Task]
|
78
|
+
# @param publish_at [Integer, nil]
|
79
|
+
# @param task_id [String, nil]
|
80
|
+
# @return [String]
|
81
|
+
def add(task, publish_at: nil, task_id: nil)
|
82
|
+
serialized_task = Marshal.dump(task).dump
|
83
|
+
|
84
|
+
persisted_task_id = task_id || generate_task_id
|
85
|
+
|
86
|
+
entry = build_add_entry(persisted_task_id, serialized_task, publish_at)
|
87
|
+
write_to_storage(entry)
|
88
|
+
|
89
|
+
if publish_at
|
90
|
+
@delayed_tasks[persisted_task_id] = [serialized_task, publish_at]
|
91
|
+
else
|
92
|
+
@immediate_tasks_in_queue += 1
|
93
|
+
end
|
94
|
+
|
95
|
+
persisted_task_id
|
96
|
+
end
|
97
|
+
|
98
|
+
# Add a record to the log representing a task removal.
|
99
|
+
# @param task_id [String]
|
100
|
+
def remove(task_id)
|
101
|
+
write_to_storage(build_remove_entry(task_id))
|
102
|
+
|
103
|
+
if @delayed_tasks.has_key?(task_id)
|
104
|
+
@delayed_tasks.delete(task_id)
|
105
|
+
else
|
106
|
+
@immediate_tasks_in_queue -= 1
|
107
|
+
end
|
108
|
+
|
109
|
+
# rotate the storage once the size is over the limit and all non-delayed tasks are processed
|
110
|
+
rotate_storage if @should_rotate && @immediate_tasks_in_queue == 0
|
111
|
+
end
|
112
|
+
|
113
|
+
# Return a list of pending tasks in the storage.
|
114
|
+
# @return [Array<(String, Rage::Deferred::Task, Integer, Integer)>
|
115
|
+
def pending_tasks
|
116
|
+
if @recovered_storages
|
117
|
+
# `@recovered_storages` will only be present if the server has previously crashed and left
|
118
|
+
# some storage files behind, or if the new cluster is started with fewer workers than before;
|
119
|
+
# TLDR: this code is expected to execute very rarely
|
120
|
+
@recovered_storages.each { |storage| recover_tasks(storage) }
|
121
|
+
end
|
122
|
+
|
123
|
+
tasks = {}
|
124
|
+
corrupted_tasks_count = 0
|
125
|
+
|
126
|
+
# find pending tasks in the storage
|
127
|
+
@storage.tap(&:rewind).each_line(chomp: true) do |entry|
|
128
|
+
signature, op, payload = entry[0...8], entry[9...12], entry[9..]
|
129
|
+
next if signature&.empty? || payload&.empty? || op&.empty?
|
130
|
+
|
131
|
+
unless signature == Zlib.crc32(payload).to_s(16).rjust(8, "0")
|
132
|
+
corrupted_tasks_count += 1
|
133
|
+
next
|
134
|
+
end
|
135
|
+
|
136
|
+
if op == "add"
|
137
|
+
task_id = entry[13...entry.index(":", 13).to_i]
|
138
|
+
tasks[task_id] = entry
|
139
|
+
elsif op == "rem"
|
140
|
+
task_id = entry[13..]
|
141
|
+
tasks.delete(task_id)
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
if corrupted_tasks_count != 0
|
146
|
+
puts "WARNING: Detected #{corrupted_tasks_count} corrupted deferred task(s)"
|
147
|
+
end
|
148
|
+
|
149
|
+
tasks.filter_map do |task_id, entry|
|
150
|
+
_, _, _, serialized_publish_at, serialized_task = entry.split(":", 5)
|
151
|
+
|
152
|
+
task = Marshal.load(serialized_task.undump)
|
153
|
+
|
154
|
+
publish_at = (serialized_publish_at == DEFAULT_PUBLISH_AT ? nil : serialized_publish_at.to_i)
|
155
|
+
|
156
|
+
if publish_at
|
157
|
+
@delayed_tasks[task_id] = [serialized_task, publish_at]
|
158
|
+
else
|
159
|
+
@immediate_tasks_in_queue += 1
|
160
|
+
end
|
161
|
+
|
162
|
+
[task_id, task, publish_at]
|
163
|
+
|
164
|
+
rescue ArgumentError, NameError => e
|
165
|
+
puts "ERROR: Can't deserialize the task with id #{task_id}: (#{e.class}) #{e.message}"
|
166
|
+
nil
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
private
|
171
|
+
|
172
|
+
def generate_task_id
|
173
|
+
@task_id_i += 1
|
174
|
+
"#{@task_id_base}-#{@task_id_i}"
|
175
|
+
end
|
176
|
+
|
177
|
+
def create_storage
|
178
|
+
file = @storage_path.join("#{@storage_prefix}-#{Time.now.strftime("%Y%m%d")}-#{Process.pid}-#{rand(0x100000000).to_s(36)}")
|
179
|
+
|
180
|
+
file.open("a+b").tap { |f| f.flock(File::LOCK_EX) }
|
181
|
+
end
|
182
|
+
|
183
|
+
def write_to_storage(content, adjust_size_limit: false)
|
184
|
+
@storage.write(content)
|
185
|
+
@storage_has_changes = true
|
186
|
+
|
187
|
+
@storage_size += content.bytesize
|
188
|
+
@should_rotate = true if @storage_size >= @storage_size_limit
|
189
|
+
|
190
|
+
if adjust_size_limit
|
191
|
+
# if the data copied from recovered storages or during the rotation takes up most of the storage, we might
|
192
|
+
# end up in an infinite rotation loop; instead, we dynamically increase the storage size limit
|
193
|
+
if @storage_size * STORAGE_SIZE_INCREASE_RATIO >= @storage_size_limit
|
194
|
+
@storage_size_limit *= STORAGE_SIZE_INCREASE_RATIO
|
195
|
+
@should_rotate = false
|
196
|
+
end
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
def rotate_storage
|
201
|
+
old_storage = @storage
|
202
|
+
@storage = nil # in case `create_storage` ends up blocking the fiber
|
203
|
+
|
204
|
+
# create a new storage and update internal state;
|
205
|
+
# after this point all new tasks will be written to the new storage
|
206
|
+
@should_rotate = false
|
207
|
+
@storage_size = 0
|
208
|
+
@storage_size_limit = DEFAULT_STORAGE_SIZE_LIMIT
|
209
|
+
@storage = create_storage
|
210
|
+
|
211
|
+
# copy delayed tasks to the new storage in batches
|
212
|
+
@delayed_tasks.keys.each_slice(100) do |task_ids|
|
213
|
+
entries = task_ids.filter_map do |task_id|
|
214
|
+
# don't copy the task if it has already been processed during the rotation
|
215
|
+
next unless @delayed_tasks.has_key?(task_id)
|
216
|
+
|
217
|
+
serialized_task, publish_at = @delayed_tasks[task_id]
|
218
|
+
build_add_entry(task_id, serialized_task, publish_at)
|
219
|
+
end
|
220
|
+
|
221
|
+
write_to_storage(entries.join, adjust_size_limit: true)
|
222
|
+
|
223
|
+
Fiber.pause
|
224
|
+
end
|
225
|
+
|
226
|
+
# delete the old storage ensuring the copied data has already been written to disk
|
227
|
+
Iodine.run_after(@fsync_frequency) do
|
228
|
+
old_storage.close
|
229
|
+
File.unlink(old_storage.path)
|
230
|
+
end
|
231
|
+
end
|
232
|
+
|
233
|
+
def build_add_entry(task_id, serialized_task, publish_at)
|
234
|
+
entry = "add:#{task_id}:#{publish_at || DEFAULT_PUBLISH_AT}:#{serialized_task}"
|
235
|
+
crc = Zlib.crc32(entry).to_s(16).rjust(8, "0")
|
236
|
+
|
237
|
+
"#{crc}:#{entry}\n"
|
238
|
+
end
|
239
|
+
|
240
|
+
def build_remove_entry(task_id)
|
241
|
+
entry = "rem:#{task_id}"
|
242
|
+
crc = Zlib.crc32(entry).to_s(16).rjust(8, "0")
|
243
|
+
|
244
|
+
"#{crc}:#{entry}\n"
|
245
|
+
end
|
246
|
+
|
247
|
+
def recover_tasks(storage)
|
248
|
+
# copy records to the main storage
|
249
|
+
while (content = storage.read(262_144))
|
250
|
+
write_to_storage(content, adjust_size_limit: true)
|
251
|
+
end
|
252
|
+
|
253
|
+
Iodine.run_after(@fsync_frequency) do
|
254
|
+
storage.close
|
255
|
+
File.unlink(storage.path)
|
256
|
+
end
|
257
|
+
end
|
258
|
+
end
|
@@ -0,0 +1,95 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
##
|
4
|
+
# `Rage::Deferred` is an in-process background task queue with at-least-once delivery guarantee that allows you to schedule tasks to be executed later.
|
5
|
+
# It can be used to offload long-running operations, such as sending emails or communicating with external APIs.
|
6
|
+
#
|
7
|
+
# To schedule a task, first define a task class that includes `Rage::Deferred::Task` and implements the `#perform` method.
|
8
|
+
#
|
9
|
+
# ```ruby
|
10
|
+
# class SendWelcomeEmail
|
11
|
+
# include Rage::Deferred::Task
|
12
|
+
#
|
13
|
+
# def perform(email)
|
14
|
+
# # logic to send the welcome email
|
15
|
+
# end
|
16
|
+
# end
|
17
|
+
# ```
|
18
|
+
#
|
19
|
+
# Then, push the task to the deferred queue:
|
20
|
+
#
|
21
|
+
# ```ruby
|
22
|
+
# SendWelcomeEmail.enqueue(email: user.email)
|
23
|
+
# ```
|
24
|
+
#
|
25
|
+
# You can also specify a delay for the task execution using the `delay` option:
|
26
|
+
#
|
27
|
+
# ```ruby
|
28
|
+
# SendWelcomeEmail.enqueue(email: user.email, delay: 10) # execute after 10 seconds
|
29
|
+
# ```
|
30
|
+
#
|
31
|
+
# Or you can specify a specific time in the future when the task should be executed:
|
32
|
+
#
|
33
|
+
# ```ruby
|
34
|
+
# SendWelcomeEmail.enqueue(email: user.email, delay_until: Time.now + 3600) # execute in 1 hour
|
35
|
+
# ```
|
36
|
+
#
|
37
|
+
module Rage::Deferred
|
38
|
+
# Push an instance to the deferred queue without including the `Rage::Deferred::Task` module.
|
39
|
+
# @param instance [Object] the instance to wrap
|
40
|
+
# @param delay [Integer, nil] the delay in seconds before the task is executed
|
41
|
+
# @param delay_until [Time, nil] the specific time when the task should be executed
|
42
|
+
# @example Schedule an arbitrary method to be called in the background
|
43
|
+
# class SendWelcomeEmail < Struct.new(:email)
|
44
|
+
# def call
|
45
|
+
# end
|
46
|
+
# end
|
47
|
+
#
|
48
|
+
# email_service = SendWelcomeEmail.new(email: user.email)
|
49
|
+
# Rage::Deferred.wrap(email_service).call
|
50
|
+
def self.wrap(instance, delay: nil, delay_until: nil)
|
51
|
+
Rage::Deferred::Proxy.new(instance, delay:, delay_until:)
|
52
|
+
end
|
53
|
+
|
54
|
+
# @private
|
55
|
+
def self.__backend
|
56
|
+
@__backend ||= Rage.config.deferred.backend
|
57
|
+
end
|
58
|
+
|
59
|
+
# @private
|
60
|
+
def self.__queue
|
61
|
+
@__queue ||= Rage::Deferred::Queue.new(__backend)
|
62
|
+
end
|
63
|
+
|
64
|
+
# @private
|
65
|
+
def self.__load_tasks
|
66
|
+
current_time = Time.now.to_i
|
67
|
+
|
68
|
+
__backend.pending_tasks.each do |task_id, task_wrapper, publish_at|
|
69
|
+
publish_in = publish_at - current_time if publish_at
|
70
|
+
__queue.schedule(task_id, task_wrapper, publish_in:)
|
71
|
+
rescue => e
|
72
|
+
puts "ERROR: Failed to load deferred task #{task_id}: #{e.class} (#{e.message}). Removing task from the queue."
|
73
|
+
__backend.remove(task_id)
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
module Backends
|
78
|
+
end
|
79
|
+
|
80
|
+
class PushTimeout < StandardError
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
require_relative "task"
|
85
|
+
require_relative "queue"
|
86
|
+
require_relative "proxy"
|
87
|
+
require_relative "metadata"
|
88
|
+
require_relative "backends/disk"
|
89
|
+
require_relative "backends/nil"
|
90
|
+
|
91
|
+
if Iodine.running?
|
92
|
+
Rage::Deferred.__load_tasks
|
93
|
+
else
|
94
|
+
Iodine.on_state(:on_start) { Rage::Deferred.__load_tasks }
|
95
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
##
|
4
|
+
# Metadata for deferred tasks.
|
5
|
+
# The class encapsulates the metadata associated with a deferred task, and allows to store it without modifying the task instance.
|
6
|
+
#
|
7
|
+
class Rage::Deferred::Metadata
|
8
|
+
def self.build(task, args, kwargs)
|
9
|
+
request_id = Thread.current[:rage_logger][:tags][0] if Thread.current[:rage_logger]
|
10
|
+
|
11
|
+
[
|
12
|
+
task,
|
13
|
+
args.empty? ? nil : args,
|
14
|
+
kwargs.empty? ? nil : kwargs,
|
15
|
+
nil,
|
16
|
+
request_id
|
17
|
+
]
|
18
|
+
end
|
19
|
+
|
20
|
+
def self.get_task(metadata)
|
21
|
+
metadata[0]
|
22
|
+
end
|
23
|
+
|
24
|
+
def self.get_args(metadata)
|
25
|
+
metadata[1]
|
26
|
+
end
|
27
|
+
|
28
|
+
def self.get_kwargs(metadata)
|
29
|
+
metadata[2]
|
30
|
+
end
|
31
|
+
|
32
|
+
def self.get_attempts(metadata)
|
33
|
+
metadata[3]
|
34
|
+
end
|
35
|
+
|
36
|
+
def self.get_request_id(metadata)
|
37
|
+
metadata[4]
|
38
|
+
end
|
39
|
+
|
40
|
+
def self.inc_attempts(metadata)
|
41
|
+
metadata[3] = metadata[3].to_i + 1
|
42
|
+
end
|
43
|
+
end
|