rage-rb 1.16.0 → 1.17.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +18 -0
- data/Gemfile +1 -0
- data/README.md +2 -1
- data/lib/rage/cli.rb +20 -0
- data/lib/rage/code_loader.rb +25 -14
- data/lib/rage/configuration.rb +151 -0
- data/lib/rage/controller/api.rb +5 -1
- data/lib/rage/deferred/backends/disk.rb +258 -0
- data/lib/rage/deferred/backends/nil.rb +16 -0
- data/lib/rage/deferred/deferred.rb +95 -0
- data/lib/rage/deferred/metadata.rb +43 -0
- data/lib/rage/deferred/proxy.rb +34 -0
- data/lib/rage/deferred/queue.rb +76 -0
- data/lib/rage/deferred/task.rb +96 -0
- data/lib/rage/fiber_scheduler.rb +1 -0
- data/lib/rage/response.rb +39 -0
- data/lib/rage/templates/controller-template/controller.rb +2 -0
- data/lib/rage/version.rb +1 -1
- data/lib/rage-rb.rb +5 -0
- data/rage.gemspec +1 -1
- metadata +12 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: aa6f9f4b7b4e1f422fd778585be775834fbe1e8e30c523e2cac096bab71d0046
|
4
|
+
data.tar.gz: 776090fd9779c994c95bf4ec368cb4562c544b8fc2d99043fa0c2cb5c91903af
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c5bbbfc62d7da523ae2f7e1d9b3be2acaa67072fd34fe464f23ab909324c46c707983e828c451969c46cb019f11c6ca0abc026d641b44c4df5340add13c830dd
|
7
|
+
data.tar.gz: 814cebf4625dc114d07de548981e0a593b78f2d55dd514ca9824d68296cfabd99f79e838b2580fdc07480ed906c8081585a38d6361f33bd64c3a06a070155bed
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,23 @@
|
|
1
1
|
## [Unreleased]
|
2
2
|
|
3
|
+
## [1.17.1] - 2025-08-21
|
4
|
+
|
5
|
+
### Fixed
|
6
|
+
|
7
|
+
- Apply backpressure to every `enqueue` call (#166).
|
8
|
+
|
9
|
+
## [1.17.0] - 2025-08-20
|
10
|
+
|
11
|
+
### Added
|
12
|
+
|
13
|
+
- Add `Rage::Deferred` (#164).
|
14
|
+
- Add a controller generator by [@alex-rogachev](https://github.com/alex-rogachev) (#160).
|
15
|
+
- Update `stale?` to set cache headers by [@serhii-sadovskyi](https://github.com/serhii-sadovskyi) (#159).
|
16
|
+
|
17
|
+
### Fixed
|
18
|
+
|
19
|
+
- Sub-millisecond sleep results in hang (#161).
|
20
|
+
|
3
21
|
## [1.16.0] - 2025-05-20
|
4
22
|
|
5
23
|
### Added
|
data/Gemfile
CHANGED
data/README.md
CHANGED
@@ -62,11 +62,12 @@ Built-in middleware:
|
|
62
62
|
- [CORS](https://rage-rb.pages.dev/Rage/Cors)
|
63
63
|
- [RequestId](https://rage-rb.pages.dev/Rage/RequestId)
|
64
64
|
|
65
|
-
Also, see the following
|
65
|
+
Also, see the following guides:
|
66
66
|
|
67
67
|
- [Rails Integration](https://github.com/rage-rb/rage/wiki/Rails-integration)
|
68
68
|
- [RSpec Integration](https://github.com/rage-rb/rage/wiki/RSpec-integration)
|
69
69
|
- [WebSockets Guide](https://github.com/rage-rb/rage/wiki/WebSockets-guide)
|
70
|
+
- [Background Tasks Guide](https://github.com/rage-rb/rage/wiki/Background-Tasks-Guide)
|
70
71
|
|
71
72
|
If you are a first-time contributor, make sure to check the [overview doc](https://github.com/rage-rb/rage/blob/master/OVERVIEW.md) that shows how Rage's core components interact with each other.
|
72
73
|
|
data/lib/rage/cli.rb
CHANGED
@@ -30,6 +30,26 @@ module Rage
|
|
30
30
|
template("model-template/model.rb", "app/models/#{name.singularize.underscore}.rb")
|
31
31
|
end
|
32
32
|
|
33
|
+
desc "controller NAME", "Generate a new controller."
|
34
|
+
def controller(name = nil)
|
35
|
+
return help("controller") if name.nil?
|
36
|
+
|
37
|
+
setup
|
38
|
+
unless defined?(ActiveSupport::Inflector)
|
39
|
+
raise LoadError, <<~ERR
|
40
|
+
ActiveSupport::Inflector is required to run this command. Add the following line to your Gemfile:
|
41
|
+
gem "activesupport", require: "active_support/inflector"
|
42
|
+
ERR
|
43
|
+
end
|
44
|
+
|
45
|
+
# remove trailing Controller if already present
|
46
|
+
normalized_name = name.sub(/_?controller$/i, "")
|
47
|
+
@controller_name = "#{normalized_name.camelize}Controller"
|
48
|
+
file_name = "#{normalized_name.underscore}_controller.rb"
|
49
|
+
|
50
|
+
template("controller-template/controller.rb", "app/controllers/#{file_name}")
|
51
|
+
end
|
52
|
+
|
33
53
|
private
|
34
54
|
|
35
55
|
def setup
|
data/lib/rage/code_loader.rb
CHANGED
@@ -22,6 +22,8 @@ class Rage::CodeLoader
|
|
22
22
|
@loader.enable_reloading if enable_reloading
|
23
23
|
@loader.setup
|
24
24
|
@loader.eager_load if enable_eager_loading
|
25
|
+
|
26
|
+
configure_components
|
25
27
|
end
|
26
28
|
|
27
29
|
# in standalone mode - reload the code and the routes
|
@@ -34,13 +36,7 @@ class Rage::CodeLoader
|
|
34
36
|
Rage.__router.reset_routes
|
35
37
|
load("#{Rage.root}/config/routes.rb")
|
36
38
|
|
37
|
-
|
38
|
-
Rage::Cable.__router.reset
|
39
|
-
end
|
40
|
-
|
41
|
-
unless Rage.autoload?(:OpenAPI) # the `OpenAPI` component is loaded
|
42
|
-
Rage::OpenAPI.__reset_data_cache
|
43
|
-
end
|
39
|
+
reload_components
|
44
40
|
end
|
45
41
|
|
46
42
|
# in Rails mode - reset the routes; everything else will be done by Rails
|
@@ -50,13 +46,7 @@ class Rage::CodeLoader
|
|
50
46
|
@reloading = true
|
51
47
|
Rage.__router.reset_routes
|
52
48
|
|
53
|
-
|
54
|
-
Rage::Cable.__router.reset
|
55
|
-
end
|
56
|
-
|
57
|
-
unless Rage.autoload?(:OpenAPI) # the `OpenAPI` component is loaded
|
58
|
-
Rage::OpenAPI.__reset_data_cache
|
59
|
-
end
|
49
|
+
reload_components
|
60
50
|
end
|
61
51
|
|
62
52
|
def reloading?
|
@@ -73,4 +63,25 @@ class Rage::CodeLoader
|
|
73
63
|
ensure
|
74
64
|
@last_watched, @last_update_at = current_watched, current_update_at
|
75
65
|
end
|
66
|
+
|
67
|
+
private
|
68
|
+
|
69
|
+
def configure_components
|
70
|
+
if Rage.env.development? && (Rage.config.deferred.configured? || Rage.config.deferred.has_default_disk_storage?)
|
71
|
+
# if there's at least one task, `Rage::Deferred` will be automatically loaded in production;
|
72
|
+
# in development, however, eager loading is disabled, and we want to automatically load
|
73
|
+
# the module in case it was explicitly configured or if a disk storage exists
|
74
|
+
Rage::Deferred
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
def reload_components
|
79
|
+
unless Rage.autoload?(:Cable) # the `Cable` component is loaded
|
80
|
+
Rage::Cable.__router.reset
|
81
|
+
end
|
82
|
+
|
83
|
+
unless Rage.autoload?(:OpenAPI) # the `OpenAPI` component is loaded
|
84
|
+
Rage::OpenAPI.__reset_data_cache
|
85
|
+
end
|
86
|
+
end
|
76
87
|
end
|
data/lib/rage/configuration.rb
CHANGED
@@ -145,6 +145,38 @@ require "erb"
|
|
145
145
|
# end
|
146
146
|
# > ```
|
147
147
|
#
|
148
|
+
# # Deferred Configuration
|
149
|
+
# • _config.deferred.backend_
|
150
|
+
#
|
151
|
+
# > Specifies the backend for deferred tasks. Supported values are `:disk`, which uses disk storage, or `nil`, which disables persistence of deferred tasks.
|
152
|
+
# > The `:disk` backend accepts the following options:
|
153
|
+
# >
|
154
|
+
# > - `:path` - the path to the directory where deferred tasks will be stored. Defaults to `storage`.
|
155
|
+
# > - `:prefix` - the prefix for the deferred task files. Defaults to `deferred-`.
|
156
|
+
# > - `:fsync_frequency` - the frequency of `fsync` calls in seconds. Defaults to `0.5`.
|
157
|
+
#
|
158
|
+
# > ```ruby
|
159
|
+
# config.deferred.backend = :disk, { path: "storage" }
|
160
|
+
# > ```
|
161
|
+
#
|
162
|
+
# • _config.deferred.backpressure_
|
163
|
+
#
|
164
|
+
# > Enables the backpressure for deferred tasks. The backpressure is used to limit the number of pending tasks in the queue. It accepts a hash with the following options:
|
165
|
+
# >
|
166
|
+
# > - `:high_water_mark` - the maximum number of pending tasks in the queue. Defaults to `1000`.
|
167
|
+
# > - `:low_water_mark` - the minimum number of pending tasks in the queue before the backpressure is released. Defaults to `high_water_mark * 0.8`.
|
168
|
+
# > - `:timeout` - the timeout for the backpressure in seconds. Defaults to `2`.
|
169
|
+
#
|
170
|
+
# > ```ruby
|
171
|
+
# config.deferred.backpressure = { high_water_mark: 1000, low_water_mark: 800, timeout: 2 }
|
172
|
+
# > ```
|
173
|
+
#
|
174
|
+
# > Additionally, you can set the backpressure value to `true` to use the default values:
|
175
|
+
#
|
176
|
+
# > ```ruby
|
177
|
+
# config.deferred.backpressure = true
|
178
|
+
# ```
|
179
|
+
#
|
148
180
|
# # Transient Settings
|
149
181
|
#
|
150
182
|
# The settings described in this section should be configured using **environment variables** and are either temporary or will become the default in the future.
|
@@ -208,6 +240,10 @@ class Rage::Configuration
|
|
208
240
|
@openapi ||= OpenAPI.new
|
209
241
|
end
|
210
242
|
|
243
|
+
def deferred
|
244
|
+
@deferred ||= Deferred.new
|
245
|
+
end
|
246
|
+
|
211
247
|
def internal
|
212
248
|
@internal ||= Internal.new
|
213
249
|
end
|
@@ -347,6 +383,121 @@ class Rage::Configuration
|
|
347
383
|
attr_accessor :tag_resolver
|
348
384
|
end
|
349
385
|
|
386
|
+
class Deferred
|
387
|
+
attr_reader :backpressure
|
388
|
+
|
389
|
+
def initialize
|
390
|
+
@configured = false
|
391
|
+
end
|
392
|
+
|
393
|
+
def backend
|
394
|
+
unless @backend_class
|
395
|
+
@backend_class = Rage::Deferred::Backends::Disk
|
396
|
+
@backend_options = parse_disk_backend_options({})
|
397
|
+
end
|
398
|
+
|
399
|
+
@backend_class.new(**@backend_options)
|
400
|
+
end
|
401
|
+
|
402
|
+
def backend=(config)
|
403
|
+
@configured = true
|
404
|
+
|
405
|
+
backend_id, opts = if config.is_a?(Array)
|
406
|
+
[config[0], config[1]]
|
407
|
+
else
|
408
|
+
[config, {}]
|
409
|
+
end
|
410
|
+
|
411
|
+
@backend_class = case backend_id
|
412
|
+
when :disk
|
413
|
+
@backend_options = parse_disk_backend_options(opts)
|
414
|
+
Rage::Deferred::Backends::Disk
|
415
|
+
when nil
|
416
|
+
Rage::Deferred::Backends::Nil
|
417
|
+
else
|
418
|
+
raise ArgumentError, "unsupported backend value; supported keys are `:disk` and `nil`"
|
419
|
+
end
|
420
|
+
end
|
421
|
+
|
422
|
+
class Backpressure
|
423
|
+
attr_reader :high_water_mark, :low_water_mark, :timeout, :sleep_interval, :timeout_iterations
|
424
|
+
|
425
|
+
def initialize(high_water_mark = nil, low_water_mark = nil, timeout = nil)
|
426
|
+
@high_water_mark = high_water_mark || 1_000
|
427
|
+
@low_water_mark = low_water_mark || (@high_water_mark * 0.8).round
|
428
|
+
|
429
|
+
@timeout = timeout || 2
|
430
|
+
@sleep_interval = 0.05
|
431
|
+
@timeout_iterations = (@timeout / @sleep_interval).round
|
432
|
+
end
|
433
|
+
end
|
434
|
+
|
435
|
+
def backpressure=(config)
|
436
|
+
@configured = true
|
437
|
+
|
438
|
+
if config == true
|
439
|
+
@backpressure = Backpressure.new
|
440
|
+
return
|
441
|
+
elsif config == false
|
442
|
+
@backpressure = nil
|
443
|
+
return
|
444
|
+
end
|
445
|
+
|
446
|
+
if config.except(:high_water_mark, :low_water_mark, :timeout).any?
|
447
|
+
raise ArgumentError, "unsupported backpressure options; supported keys are `:high_water_mark`, `:low_water_mark`, `:timeout`"
|
448
|
+
end
|
449
|
+
|
450
|
+
high_water_mark, low_water_mark, timeout = config.values_at(:high_water_mark, :low_water_mark, :timeout)
|
451
|
+
@backpressure = Backpressure.new(high_water_mark, low_water_mark, timeout)
|
452
|
+
end
|
453
|
+
|
454
|
+
def default_disk_storage_path
|
455
|
+
Pathname.new("storage")
|
456
|
+
end
|
457
|
+
|
458
|
+
def default_disk_storage_prefix
|
459
|
+
"deferred-"
|
460
|
+
end
|
461
|
+
|
462
|
+
def has_default_disk_storage?
|
463
|
+
default_disk_storage_path.glob("#{default_disk_storage_prefix}*").any?
|
464
|
+
end
|
465
|
+
|
466
|
+
def configured?
|
467
|
+
@configured
|
468
|
+
end
|
469
|
+
|
470
|
+
private
|
471
|
+
|
472
|
+
def parse_disk_backend_options(opts)
|
473
|
+
if opts.except(:path, :prefix, :fsync_frequency).any?
|
474
|
+
raise ArgumentError, "unsupported backend options; supported values are `:path`, `:prefix`, `:fsync_frequency`"
|
475
|
+
end
|
476
|
+
|
477
|
+
parsed_options = {}
|
478
|
+
|
479
|
+
parsed_options[:path] = if opts[:path]
|
480
|
+
opts[:path].is_a?(Pathname) ? opts[:path] : Pathname.new(opts[:path])
|
481
|
+
else
|
482
|
+
default_disk_storage_path
|
483
|
+
end
|
484
|
+
|
485
|
+
parsed_options[:prefix] = if opts[:prefix]
|
486
|
+
opts[:prefix].end_with?("-") ? opts[:prefix] : "#{opts[:prefix]}-"
|
487
|
+
else
|
488
|
+
default_disk_storage_prefix
|
489
|
+
end
|
490
|
+
|
491
|
+
parsed_options[:fsync_frequency] = if opts[:fsync_frequency]
|
492
|
+
(opts[:fsync_frequency].to_i * 1_000).round
|
493
|
+
else
|
494
|
+
500
|
495
|
+
end
|
496
|
+
|
497
|
+
parsed_options
|
498
|
+
end
|
499
|
+
end
|
500
|
+
|
350
501
|
# @private
|
351
502
|
class Internal
|
352
503
|
attr_accessor :rails_mode
|
data/lib/rage/controller/api.rb
CHANGED
@@ -618,6 +618,7 @@ class RageController::API
|
|
618
618
|
# stale?(etag: "123", last_modified: Time.utc(2023, 12, 15))
|
619
619
|
# stale?(last_modified: Time.utc(2023, 12, 15))
|
620
620
|
# stale?(etag: "123")
|
621
|
+
# @note `stale?` will set ETag and Last-Modified response headers made of passed arguments in the method. Value for ETag will be additionally hashified using SHA1 algorithm, whereas value for Last-Modified will be converted to the string which represents time as RFC 1123 date of HTTP-date defined by RFC 2616.
|
621
622
|
# @note `stale?` will set the response status to 304 if the request is fresh. This side effect will cause a double render error, if `render` gets called after this method. Make sure to implement a proper conditional in your action to prevent this from happening:
|
622
623
|
# ```ruby
|
623
624
|
# if stale?(etag: "123")
|
@@ -625,7 +626,10 @@ class RageController::API
|
|
625
626
|
# end
|
626
627
|
# ```
|
627
628
|
def stale?(etag: nil, last_modified: nil)
|
628
|
-
|
629
|
+
response.etag = etag
|
630
|
+
response.last_modified = last_modified
|
631
|
+
|
632
|
+
still_fresh = request.fresh?(etag: response.etag, last_modified: last_modified)
|
629
633
|
|
630
634
|
head :not_modified if still_fresh
|
631
635
|
!still_fresh
|
@@ -0,0 +1,258 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "zlib"
|
4
|
+
|
5
|
+
##
|
6
|
+
# `Rage::Deferred::Backends` implements a storage layer to persist deferred tasks.
|
7
|
+
# A storage should implement the following instance methods:
|
8
|
+
#
|
9
|
+
# * `add` - called when a task has to be added to the storage;
|
10
|
+
# * `remove` - called when a task has to be removed from the storage;
|
11
|
+
# * `pending_tasks` - the method should iterate over the underlying storage and return a list of tasks to replay;
|
12
|
+
#
|
13
|
+
class Rage::Deferred::Backends::Disk
|
14
|
+
STORAGE_VERSION = "0"
|
15
|
+
STORAGE_SIZE_INCREASE_RATIO = 1.5
|
16
|
+
|
17
|
+
DEFAULT_PUBLISH_AT = "0"
|
18
|
+
DEFAULT_STORAGE_SIZE_LIMIT = 2_000_000
|
19
|
+
|
20
|
+
def initialize(path:, prefix:, fsync_frequency:)
|
21
|
+
@storage_path = path
|
22
|
+
@storage_prefix = "#{prefix}#{STORAGE_VERSION}"
|
23
|
+
@fsync_frequency = fsync_frequency
|
24
|
+
|
25
|
+
@storage_path.mkpath
|
26
|
+
|
27
|
+
# try to open and take ownership of all storage files in the storage directory
|
28
|
+
storage_files = @storage_path.glob("#{@storage_prefix}-*").filter_map do |file_path|
|
29
|
+
file = file_path.open("a+b")
|
30
|
+
if file.flock(File::LOCK_EX | File::LOCK_NB)
|
31
|
+
sleep 0.01 # reduce contention between workers
|
32
|
+
file
|
33
|
+
else
|
34
|
+
file.close
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
# if there are no storage files - create one;
|
39
|
+
# otherwise the first one is used as the main storage; the rest will be merged into the main storage
|
40
|
+
if storage_files.empty?
|
41
|
+
@storage = create_storage
|
42
|
+
else
|
43
|
+
@storage = storage_files[0]
|
44
|
+
@recovered_storages = storage_files[1..] if storage_files.length > 1
|
45
|
+
end
|
46
|
+
|
47
|
+
# create seed value for the task IDs
|
48
|
+
task_id_seed = Time.now.to_i # TODO: ensure timestamps in the file are not higher
|
49
|
+
@task_id_base, @task_id_i = "#{task_id_seed}-#{Process.pid}", 0
|
50
|
+
Iodine.run_every(1_000) do
|
51
|
+
task_id_seed += 1
|
52
|
+
@task_id_base, @task_id_i = "#{task_id_seed}-#{Process.pid}", 0
|
53
|
+
end
|
54
|
+
|
55
|
+
@storage_size_limit = DEFAULT_STORAGE_SIZE_LIMIT
|
56
|
+
@storage_size = @storage.size
|
57
|
+
@fsync_scheduled = false
|
58
|
+
@should_rotate = false
|
59
|
+
|
60
|
+
# we use different counters for different tasks:
|
61
|
+
# delayed tasks are stored in the hash; for regular tasks we only maintain a counter;
|
62
|
+
# this information is only used during storage rotation
|
63
|
+
@immediate_tasks_in_queue = 0
|
64
|
+
@delayed_tasks = {}
|
65
|
+
|
66
|
+
# ensure data is written to disk
|
67
|
+
@storage_has_changes = false
|
68
|
+
Iodine.run_every(@fsync_frequency) do
|
69
|
+
if @storage_has_changes
|
70
|
+
@storage_has_changes = false
|
71
|
+
@storage.fsync
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
# Add a record to the log representing a new task.
|
77
|
+
# @param task [Rage::Deferred::Task]
|
78
|
+
# @param publish_at [Integer, nil]
|
79
|
+
# @param task_id [String, nil]
|
80
|
+
# @return [String]
|
81
|
+
def add(task, publish_at: nil, task_id: nil)
|
82
|
+
serialized_task = Marshal.dump(task).dump
|
83
|
+
|
84
|
+
persisted_task_id = task_id || generate_task_id
|
85
|
+
|
86
|
+
entry = build_add_entry(persisted_task_id, serialized_task, publish_at)
|
87
|
+
write_to_storage(entry)
|
88
|
+
|
89
|
+
if publish_at
|
90
|
+
@delayed_tasks[persisted_task_id] = [serialized_task, publish_at]
|
91
|
+
else
|
92
|
+
@immediate_tasks_in_queue += 1
|
93
|
+
end
|
94
|
+
|
95
|
+
persisted_task_id
|
96
|
+
end
|
97
|
+
|
98
|
+
# Add a record to the log representing a task removal.
|
99
|
+
# @param task_id [String]
|
100
|
+
def remove(task_id)
|
101
|
+
write_to_storage(build_remove_entry(task_id))
|
102
|
+
|
103
|
+
if @delayed_tasks.has_key?(task_id)
|
104
|
+
@delayed_tasks.delete(task_id)
|
105
|
+
else
|
106
|
+
@immediate_tasks_in_queue -= 1
|
107
|
+
end
|
108
|
+
|
109
|
+
# rotate the storage once the size is over the limit and all non-delayed tasks are processed
|
110
|
+
rotate_storage if @should_rotate && @immediate_tasks_in_queue == 0
|
111
|
+
end
|
112
|
+
|
113
|
+
# Return a list of pending tasks in the storage.
|
114
|
+
# @return [Array<(String, Rage::Deferred::Task, Integer, Integer)>
|
115
|
+
def pending_tasks
|
116
|
+
if @recovered_storages
|
117
|
+
# `@recovered_storages` will only be present if the server has previously crashed and left
|
118
|
+
# some storage files behind, or if the new cluster is started with fewer workers than before;
|
119
|
+
# TLDR: this code is expected to execute very rarely
|
120
|
+
@recovered_storages.each { |storage| recover_tasks(storage) }
|
121
|
+
end
|
122
|
+
|
123
|
+
tasks = {}
|
124
|
+
corrupted_tasks_count = 0
|
125
|
+
|
126
|
+
# find pending tasks in the storage
|
127
|
+
@storage.tap(&:rewind).each_line(chomp: true) do |entry|
|
128
|
+
signature, op, payload = entry[0...8], entry[9...12], entry[9..]
|
129
|
+
next if signature&.empty? || payload&.empty? || op&.empty?
|
130
|
+
|
131
|
+
unless signature == Zlib.crc32(payload).to_s(16).rjust(8, "0")
|
132
|
+
corrupted_tasks_count += 1
|
133
|
+
next
|
134
|
+
end
|
135
|
+
|
136
|
+
if op == "add"
|
137
|
+
task_id = entry[13...entry.index(":", 13).to_i]
|
138
|
+
tasks[task_id] = entry
|
139
|
+
elsif op == "rem"
|
140
|
+
task_id = entry[13..]
|
141
|
+
tasks.delete(task_id)
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
if corrupted_tasks_count != 0
|
146
|
+
puts "WARNING: Detected #{corrupted_tasks_count} corrupted deferred task(s)"
|
147
|
+
end
|
148
|
+
|
149
|
+
tasks.filter_map do |task_id, entry|
|
150
|
+
_, _, _, serialized_publish_at, serialized_task = entry.split(":", 5)
|
151
|
+
|
152
|
+
task = Marshal.load(serialized_task.undump)
|
153
|
+
|
154
|
+
publish_at = (serialized_publish_at == DEFAULT_PUBLISH_AT ? nil : serialized_publish_at.to_i)
|
155
|
+
|
156
|
+
if publish_at
|
157
|
+
@delayed_tasks[task_id] = [serialized_task, publish_at]
|
158
|
+
else
|
159
|
+
@immediate_tasks_in_queue += 1
|
160
|
+
end
|
161
|
+
|
162
|
+
[task_id, task, publish_at]
|
163
|
+
|
164
|
+
rescue ArgumentError, NameError => e
|
165
|
+
puts "ERROR: Can't deserialize the task with id #{task_id}: (#{e.class}) #{e.message}"
|
166
|
+
nil
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
private
|
171
|
+
|
172
|
+
def generate_task_id
|
173
|
+
@task_id_i += 1
|
174
|
+
"#{@task_id_base}-#{@task_id_i}"
|
175
|
+
end
|
176
|
+
|
177
|
+
def create_storage
|
178
|
+
file = @storage_path.join("#{@storage_prefix}-#{Time.now.strftime("%Y%m%d")}-#{Process.pid}-#{rand(0x100000000).to_s(36)}")
|
179
|
+
|
180
|
+
file.open("a+b").tap { |f| f.flock(File::LOCK_EX) }
|
181
|
+
end
|
182
|
+
|
183
|
+
def write_to_storage(content, adjust_size_limit: false)
|
184
|
+
@storage.write(content)
|
185
|
+
@storage_has_changes = true
|
186
|
+
|
187
|
+
@storage_size += content.bytesize
|
188
|
+
@should_rotate = true if @storage_size >= @storage_size_limit
|
189
|
+
|
190
|
+
if adjust_size_limit
|
191
|
+
# if the data copied from recovered storages or during the rotation takes up most of the storage, we might
|
192
|
+
# end up in an infinite rotation loop; instead, we dynamically increase the storage size limit
|
193
|
+
if @storage_size * STORAGE_SIZE_INCREASE_RATIO >= @storage_size_limit
|
194
|
+
@storage_size_limit *= STORAGE_SIZE_INCREASE_RATIO
|
195
|
+
@should_rotate = false
|
196
|
+
end
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
def rotate_storage
|
201
|
+
old_storage = @storage
|
202
|
+
@storage = nil # in case `create_storage` ends up blocking the fiber
|
203
|
+
|
204
|
+
# create a new storage and update internal state;
|
205
|
+
# after this point all new tasks will be written to the new storage
|
206
|
+
@should_rotate = false
|
207
|
+
@storage_size = 0
|
208
|
+
@storage_size_limit = DEFAULT_STORAGE_SIZE_LIMIT
|
209
|
+
@storage = create_storage
|
210
|
+
|
211
|
+
# copy delayed tasks to the new storage in batches
|
212
|
+
@delayed_tasks.keys.each_slice(100) do |task_ids|
|
213
|
+
entries = task_ids.filter_map do |task_id|
|
214
|
+
# don't copy the task if it has already been processed during the rotation
|
215
|
+
next unless @delayed_tasks.has_key?(task_id)
|
216
|
+
|
217
|
+
serialized_task, publish_at = @delayed_tasks[task_id]
|
218
|
+
build_add_entry(task_id, serialized_task, publish_at)
|
219
|
+
end
|
220
|
+
|
221
|
+
write_to_storage(entries.join, adjust_size_limit: true)
|
222
|
+
|
223
|
+
Fiber.pause
|
224
|
+
end
|
225
|
+
|
226
|
+
# delete the old storage ensuring the copied data has already been written to disk
|
227
|
+
Iodine.run_after(@fsync_frequency) do
|
228
|
+
old_storage.close
|
229
|
+
File.unlink(old_storage.path)
|
230
|
+
end
|
231
|
+
end
|
232
|
+
|
233
|
+
def build_add_entry(task_id, serialized_task, publish_at)
|
234
|
+
entry = "add:#{task_id}:#{publish_at || DEFAULT_PUBLISH_AT}:#{serialized_task}"
|
235
|
+
crc = Zlib.crc32(entry).to_s(16).rjust(8, "0")
|
236
|
+
|
237
|
+
"#{crc}:#{entry}\n"
|
238
|
+
end
|
239
|
+
|
240
|
+
def build_remove_entry(task_id)
|
241
|
+
entry = "rem:#{task_id}"
|
242
|
+
crc = Zlib.crc32(entry).to_s(16).rjust(8, "0")
|
243
|
+
|
244
|
+
"#{crc}:#{entry}\n"
|
245
|
+
end
|
246
|
+
|
247
|
+
def recover_tasks(storage)
|
248
|
+
# copy records to the main storage
|
249
|
+
while (content = storage.read(262_144))
|
250
|
+
write_to_storage(content, adjust_size_limit: true)
|
251
|
+
end
|
252
|
+
|
253
|
+
Iodine.run_after(@fsync_frequency) do
|
254
|
+
storage.close
|
255
|
+
File.unlink(storage.path)
|
256
|
+
end
|
257
|
+
end
|
258
|
+
end
|
@@ -0,0 +1,95 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
##
|
4
|
+
# `Rage::Deferred` is an in-process background task queue with at-least-once delivery guarantee that allows you to schedule tasks to be executed later.
|
5
|
+
# It can be used to offload long-running operations, such as sending emails or communicating with external APIs.
|
6
|
+
#
|
7
|
+
# To schedule a task, first define a task class that includes `Rage::Deferred::Task` and implements the `#perform` method.
|
8
|
+
#
|
9
|
+
# ```ruby
|
10
|
+
# class SendWelcomeEmail
|
11
|
+
# include Rage::Deferred::Task
|
12
|
+
#
|
13
|
+
# def perform(email)
|
14
|
+
# # logic to send the welcome email
|
15
|
+
# end
|
16
|
+
# end
|
17
|
+
# ```
|
18
|
+
#
|
19
|
+
# Then, push the task to the deferred queue:
|
20
|
+
#
|
21
|
+
# ```ruby
|
22
|
+
# SendWelcomeEmail.enqueue(email: user.email)
|
23
|
+
# ```
|
24
|
+
#
|
25
|
+
# You can also specify a delay for the task execution using the `delay` option:
|
26
|
+
#
|
27
|
+
# ```ruby
|
28
|
+
# SendWelcomeEmail.enqueue(email: user.email, delay: 10) # execute after 10 seconds
|
29
|
+
# ```
|
30
|
+
#
|
31
|
+
# Or you can specify a specific time in the future when the task should be executed:
|
32
|
+
#
|
33
|
+
# ```ruby
|
34
|
+
# SendWelcomeEmail.enqueue(email: user.email, delay_until: Time.now + 3600) # execute in 1 hour
|
35
|
+
# ```
|
36
|
+
#
|
37
|
+
module Rage::Deferred
|
38
|
+
# Push an instance to the deferred queue without including the `Rage::Deferred::Task` module.
|
39
|
+
# @param instance [Object] the instance to wrap
|
40
|
+
# @param delay [Integer, nil] the delay in seconds before the task is executed
|
41
|
+
# @param delay_until [Time, nil] the specific time when the task should be executed
|
42
|
+
# @example Schedule an arbitrary method to be called in the background
|
43
|
+
# class SendWelcomeEmail < Struct.new(:email)
|
44
|
+
# def call
|
45
|
+
# end
|
46
|
+
# end
|
47
|
+
#
|
48
|
+
# email_service = SendWelcomeEmail.new(email: user.email)
|
49
|
+
# Rage::Deferred.wrap(email_service).call
|
50
|
+
def self.wrap(instance, delay: nil, delay_until: nil)
|
51
|
+
Rage::Deferred::Proxy.new(instance, delay:, delay_until:)
|
52
|
+
end
|
53
|
+
|
54
|
+
# @private
|
55
|
+
def self.__backend
|
56
|
+
@__backend ||= Rage.config.deferred.backend
|
57
|
+
end
|
58
|
+
|
59
|
+
# @private
|
60
|
+
def self.__queue
|
61
|
+
@__queue ||= Rage::Deferred::Queue.new(__backend)
|
62
|
+
end
|
63
|
+
|
64
|
+
# @private
|
65
|
+
def self.__load_tasks
|
66
|
+
current_time = Time.now.to_i
|
67
|
+
|
68
|
+
__backend.pending_tasks.each do |task_id, task_wrapper, publish_at|
|
69
|
+
publish_in = publish_at - current_time if publish_at
|
70
|
+
__queue.schedule(task_id, task_wrapper, publish_in:)
|
71
|
+
rescue => e
|
72
|
+
puts "ERROR: Failed to load deferred task #{task_id}: #{e.class} (#{e.message}). Removing task from the queue."
|
73
|
+
__backend.remove(task_id)
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
module Backends
|
78
|
+
end
|
79
|
+
|
80
|
+
class PushTimeout < StandardError
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
require_relative "task"
|
85
|
+
require_relative "queue"
|
86
|
+
require_relative "proxy"
|
87
|
+
require_relative "metadata"
|
88
|
+
require_relative "backends/disk"
|
89
|
+
require_relative "backends/nil"
|
90
|
+
|
91
|
+
if Iodine.running?
|
92
|
+
Rage::Deferred.__load_tasks
|
93
|
+
else
|
94
|
+
Iodine.on_state(:on_start) { Rage::Deferred.__load_tasks }
|
95
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
##
|
4
|
+
# Metadata for deferred tasks.
|
5
|
+
# The class encapsulates the metadata associated with a deferred task, and allows to store it without modifying the task instance.
|
6
|
+
#
|
7
|
+
class Rage::Deferred::Metadata
|
8
|
+
def self.build(task, args, kwargs)
|
9
|
+
request_id = Thread.current[:rage_logger][:tags][0] if Thread.current[:rage_logger]
|
10
|
+
|
11
|
+
[
|
12
|
+
task,
|
13
|
+
args.empty? ? nil : args,
|
14
|
+
kwargs.empty? ? nil : kwargs,
|
15
|
+
nil,
|
16
|
+
request_id
|
17
|
+
]
|
18
|
+
end
|
19
|
+
|
20
|
+
def self.get_task(metadata)
|
21
|
+
metadata[0]
|
22
|
+
end
|
23
|
+
|
24
|
+
def self.get_args(metadata)
|
25
|
+
metadata[1]
|
26
|
+
end
|
27
|
+
|
28
|
+
def self.get_kwargs(metadata)
|
29
|
+
metadata[2]
|
30
|
+
end
|
31
|
+
|
32
|
+
def self.get_attempts(metadata)
|
33
|
+
metadata[3]
|
34
|
+
end
|
35
|
+
|
36
|
+
def self.get_request_id(metadata)
|
37
|
+
metadata[4]
|
38
|
+
end
|
39
|
+
|
40
|
+
def self.inc_attempts(metadata)
|
41
|
+
metadata[3] = metadata[3].to_i + 1
|
42
|
+
end
|
43
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class Rage::Deferred::Proxy
|
4
|
+
class Wrapper
|
5
|
+
include Rage::Deferred::Task
|
6
|
+
|
7
|
+
def perform(instance, method_name, *, **)
|
8
|
+
instance.public_send(method_name, *, **)
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
def initialize(instance, delay: nil, delay_until: nil)
|
13
|
+
@instance = instance
|
14
|
+
|
15
|
+
@delay = delay
|
16
|
+
@delay_until = delay_until
|
17
|
+
end
|
18
|
+
|
19
|
+
def method_missing(method_name, *, **)
|
20
|
+
if @instance.respond_to?(method_name)
|
21
|
+
self.class.define_method(method_name) do |*args, **kwargs|
|
22
|
+
Wrapper.enqueue(@instance, method_name, *args, delay: @delay, delay_until: @delay_until, **kwargs)
|
23
|
+
end
|
24
|
+
|
25
|
+
send(method_name, *, **)
|
26
|
+
else
|
27
|
+
@instance.public_send(method_name, *, **)
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def respond_to_missing?(method_name, _)
|
32
|
+
@instance.respond_to?(method_name)
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,76 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class Rage::Deferred::Queue
|
4
|
+
attr_reader :backlog_size
|
5
|
+
|
6
|
+
def initialize(backend)
|
7
|
+
@backend = backend
|
8
|
+
@backlog_size = 0
|
9
|
+
@backpressure = Rage.config.deferred.backpressure
|
10
|
+
end
|
11
|
+
|
12
|
+
# Write the task to the storage and schedule it for execution.
|
13
|
+
def enqueue(task_metadata, delay: nil, delay_until: nil, task_id: nil)
|
14
|
+
apply_backpressure if @backpressure
|
15
|
+
|
16
|
+
publish_in, publish_at = if delay
|
17
|
+
delay_i = delay.to_i
|
18
|
+
[delay_i, Time.now.to_i + delay_i] if delay_i > 0
|
19
|
+
elsif delay_until
|
20
|
+
delay_until_i, current_time_i = delay_until.to_i, Time.now.to_i
|
21
|
+
[delay_until_i - current_time_i, delay_until_i] if delay_until_i > current_time_i
|
22
|
+
end
|
23
|
+
|
24
|
+
persisted_task_id = @backend.add(task_metadata, publish_at:, task_id:)
|
25
|
+
schedule(persisted_task_id, task_metadata, publish_in:)
|
26
|
+
end
|
27
|
+
|
28
|
+
# Schedule the task for execution.
|
29
|
+
def schedule(task_id, task_metadata, publish_in: nil)
|
30
|
+
publish_in_ms = publish_in.to_i * 1_000 if publish_in && publish_in > 0
|
31
|
+
task = Rage::Deferred::Metadata.get_task(task_metadata)
|
32
|
+
@backlog_size += 1 unless publish_in_ms
|
33
|
+
|
34
|
+
Iodine.run_after(publish_in_ms) do
|
35
|
+
@backlog_size -= 1 unless publish_in_ms
|
36
|
+
|
37
|
+
unless Iodine.stopping?
|
38
|
+
Fiber.schedule do
|
39
|
+
Iodine.task_inc!
|
40
|
+
|
41
|
+
is_completed = task.new.__perform(task_metadata)
|
42
|
+
|
43
|
+
if is_completed
|
44
|
+
@backend.remove(task_id)
|
45
|
+
else
|
46
|
+
attempts = Rage::Deferred::Metadata.inc_attempts(task_metadata)
|
47
|
+
if task.__should_retry?(attempts)
|
48
|
+
enqueue(task_metadata, delay: task.__next_retry_in(attempts), task_id:)
|
49
|
+
else
|
50
|
+
@backend.remove(task_id)
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
ensure
|
55
|
+
Iodine.task_dec!
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
private
|
62
|
+
|
63
|
+
def apply_backpressure
|
64
|
+
if @backlog_size > @backpressure.high_water_mark
|
65
|
+
i, target_backlog_size = 0, @backpressure.low_water_mark
|
66
|
+
while @backlog_size > target_backlog_size && i < @backpressure.timeout_iterations
|
67
|
+
sleep @backpressure.sleep_interval
|
68
|
+
i += 1
|
69
|
+
end
|
70
|
+
|
71
|
+
if i == @backpressure.timeout_iterations
|
72
|
+
raise Rage::Deferred::PushTimeout, "could not enqueue deferred task within #{@backpressure.timeout} seconds"
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
@@ -0,0 +1,96 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
##
|
4
|
+
# `Rage::Deferred::Task` is a module that should be included in classes that represent tasks to be executed
|
5
|
+
# in the background by the `Rage::Deferred` queue. It ensures the tasks can be retried in case of a failure,
|
6
|
+
# with a maximum number of attempts and an exponential backoff strategy.
|
7
|
+
#
|
8
|
+
# To define a task, include the module and implement the `#perform` method:
|
9
|
+
#
|
10
|
+
# ```ruby
|
11
|
+
# class ProcessImage
|
12
|
+
# include Rage::Deferred::Task
|
13
|
+
#
|
14
|
+
# def perform(image_path:)
|
15
|
+
# # logic to process the image
|
16
|
+
# end
|
17
|
+
# end
|
18
|
+
# ```
|
19
|
+
#
|
20
|
+
# The task can be enqueued using the `enqueue` method:
|
21
|
+
#
|
22
|
+
# ```ruby
|
23
|
+
# ProcessImage.enqueue(image_path: 'path/to/image.jpg')
|
24
|
+
# ```
|
25
|
+
#
|
26
|
+
# The `delay` and `delay_until` options can be used to specify when the task should be executed.
|
27
|
+
#
|
28
|
+
# ```ruby
|
29
|
+
# ProcessImage.enqueue(image_path: 'path/to/image.jpg', delay: 10) # delays execution by 10 seconds
|
30
|
+
# ProcessImage.enqueue(image_path: 'path/to/image.jpg', delay_until: Time.now + 3600) # executes after 1 hour
|
31
|
+
# ```
|
32
|
+
#
|
33
|
+
module Rage::Deferred::Task
|
34
|
+
MAX_ATTEMPTS = 5
|
35
|
+
private_constant :MAX_ATTEMPTS
|
36
|
+
|
37
|
+
BACKOFF_INTERVAL = 5
|
38
|
+
private_constant :BACKOFF_INTERVAL
|
39
|
+
|
40
|
+
def perform
|
41
|
+
end
|
42
|
+
|
43
|
+
# @private
|
44
|
+
def __with_optional_log_tag(tag)
|
45
|
+
if tag
|
46
|
+
Rage.logger.tagged(tag) { yield }
|
47
|
+
else
|
48
|
+
yield
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
# @private
|
53
|
+
def __perform(metadata)
|
54
|
+
args = Rage::Deferred::Metadata.get_args(metadata)
|
55
|
+
kwargs = Rage::Deferred::Metadata.get_kwargs(metadata)
|
56
|
+
attempts = Rage::Deferred::Metadata.get_attempts(metadata)
|
57
|
+
request_id = Rage::Deferred::Metadata.get_request_id(metadata)
|
58
|
+
|
59
|
+
context = { task: self.class.name }
|
60
|
+
context[:attempt] = attempts + 1 if attempts
|
61
|
+
|
62
|
+
Rage.logger.with_context(context) do
|
63
|
+
__with_optional_log_tag(request_id) do
|
64
|
+
perform(*args, **kwargs)
|
65
|
+
true
|
66
|
+
rescue Exception => e
|
67
|
+
Rage.logger.error("Deferred task failed with exception: #{e.class} (#{e.message}):\n#{e.backtrace.join("\n")}")
|
68
|
+
false
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
def self.included(klass)
|
74
|
+
klass.extend(ClassMethods)
|
75
|
+
end
|
76
|
+
|
77
|
+
module ClassMethods
|
78
|
+
def enqueue(*args, delay: nil, delay_until: nil, **kwargs)
|
79
|
+
Rage::Deferred.__queue.enqueue(
|
80
|
+
Rage::Deferred::Metadata.build(self, args, kwargs),
|
81
|
+
delay:,
|
82
|
+
delay_until:
|
83
|
+
)
|
84
|
+
end
|
85
|
+
|
86
|
+
# @private
|
87
|
+
def __should_retry?(attempts)
|
88
|
+
attempts < MAX_ATTEMPTS
|
89
|
+
end
|
90
|
+
|
91
|
+
# @private
|
92
|
+
def __next_retry_in(attempts)
|
93
|
+
rand(BACKOFF_INTERVAL * 2**attempts.to_i) + 1
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
data/lib/rage/fiber_scheduler.rb
CHANGED
data/lib/rage/response.rb
CHANGED
@@ -1,6 +1,12 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require "digest"
|
4
|
+
require "time"
|
5
|
+
|
3
6
|
class Rage::Response
|
7
|
+
ETAG_HEADER = "ETag"
|
8
|
+
LAST_MODIFIED_HEADER = "Last-Modified"
|
9
|
+
|
4
10
|
# @private
|
5
11
|
def initialize(headers, body)
|
6
12
|
@headers = headers
|
@@ -18,4 +24,37 @@ class Rage::Response
|
|
18
24
|
def headers
|
19
25
|
@headers
|
20
26
|
end
|
27
|
+
|
28
|
+
# Returns ETag response header or +nil+ if it's empty.
|
29
|
+
#
|
30
|
+
# @return [String, nil]
|
31
|
+
def etag
|
32
|
+
headers[Rage::Response::ETAG_HEADER]
|
33
|
+
end
|
34
|
+
|
35
|
+
# Sets ETag header to the response. Additionally, it will hashify the value using +Digest::SHA1.hexdigest+. Pass +nil+ for resetting it.
|
36
|
+
# @note ETag will be always Weak since no strong validation is implemented.
|
37
|
+
# @note ArgumentError is raised if ETag value is neither +String+, nor +nil+
|
38
|
+
# @param etag [String, nil] The etag of the resource in the response.
|
39
|
+
def etag=(etag)
|
40
|
+
raise ArgumentError, "Expected `String` but `#{etag.class}` is received" unless etag.is_a?(String) || etag.nil?
|
41
|
+
|
42
|
+
headers[Rage::Response::ETAG_HEADER] = etag.nil? ? nil : %(W/"#{Digest::SHA1.hexdigest(etag)}")
|
43
|
+
end
|
44
|
+
|
45
|
+
# Returns Last-Modified response header or +nil+ if it's empty.
|
46
|
+
#
|
47
|
+
# @return [String, nil]
|
48
|
+
def last_modified
|
49
|
+
headers[Rage::Response::LAST_MODIFIED_HEADER]
|
50
|
+
end
|
51
|
+
|
52
|
+
# Sets Last-Modified header to the response by calling httpdate on the argument.
|
53
|
+
# @note ArgumentError is raised if +last_modified+ is not a +Time+ object instance
|
54
|
+
# @param last_modified [Time, nil] The last modified time of the resource in the response.
|
55
|
+
def last_modified=(last_modified)
|
56
|
+
raise ArgumentError, "Expected `Time` but `#{last_modified.class}` is received" unless last_modified.is_a?(Time) || last_modified.nil?
|
57
|
+
|
58
|
+
headers[Rage::Response::LAST_MODIFIED_HEADER] = last_modified&.httpdate
|
59
|
+
end
|
21
60
|
end
|
data/lib/rage/version.rb
CHANGED
data/lib/rage-rb.rb
CHANGED
@@ -22,6 +22,10 @@ module Rage
|
|
22
22
|
Rage::OpenAPI
|
23
23
|
end
|
24
24
|
|
25
|
+
def self.deferred
|
26
|
+
Rage::Deferred
|
27
|
+
end
|
28
|
+
|
25
29
|
def self.routes
|
26
30
|
Rage::Router::DSL.new(__router)
|
27
31
|
end
|
@@ -130,6 +134,7 @@ module Rage
|
|
130
134
|
autoload :Session, "rage/session"
|
131
135
|
autoload :Cable, "rage/cable/cable"
|
132
136
|
autoload :OpenAPI, "rage/openapi/openapi"
|
137
|
+
autoload :Deferred, "rage/deferred/deferred"
|
133
138
|
end
|
134
139
|
|
135
140
|
module RageController
|
data/rage.gemspec
CHANGED
@@ -29,7 +29,7 @@ Gem::Specification.new do |spec|
|
|
29
29
|
|
30
30
|
spec.add_dependency "thor", "~> 1.0"
|
31
31
|
spec.add_dependency "rack", "~> 2.0"
|
32
|
-
spec.add_dependency "rage-iodine", "~> 4.
|
32
|
+
spec.add_dependency "rage-iodine", "~> 4.3"
|
33
33
|
spec.add_dependency "zeitwerk", "~> 2.6"
|
34
34
|
spec.add_dependency "rack-test", "~> 2.1"
|
35
35
|
spec.add_dependency "rake", ">= 12.0"
|
metadata
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: rage-rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.17.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Roman Samoilov
|
8
8
|
bindir: exe
|
9
9
|
cert_chain: []
|
10
|
-
date: 2025-
|
10
|
+
date: 2025-08-21 00:00:00.000000000 Z
|
11
11
|
dependencies:
|
12
12
|
- !ruby/object:Gem::Dependency
|
13
13
|
name: thor
|
@@ -43,14 +43,14 @@ dependencies:
|
|
43
43
|
requirements:
|
44
44
|
- - "~>"
|
45
45
|
- !ruby/object:Gem::Version
|
46
|
-
version: '4.
|
46
|
+
version: '4.3'
|
47
47
|
type: :runtime
|
48
48
|
prerelease: false
|
49
49
|
version_requirements: !ruby/object:Gem::Requirement
|
50
50
|
requirements:
|
51
51
|
- - "~>"
|
52
52
|
- !ruby/object:Gem::Version
|
53
|
-
version: '4.
|
53
|
+
version: '4.3'
|
54
54
|
- !ruby/object:Gem::Dependency
|
55
55
|
name: zeitwerk
|
56
56
|
requirement: !ruby/object:Gem::Requirement
|
@@ -128,6 +128,13 @@ files:
|
|
128
128
|
- lib/rage/configuration.rb
|
129
129
|
- lib/rage/controller/api.rb
|
130
130
|
- lib/rage/cookies.rb
|
131
|
+
- lib/rage/deferred/backends/disk.rb
|
132
|
+
- lib/rage/deferred/backends/nil.rb
|
133
|
+
- lib/rage/deferred/deferred.rb
|
134
|
+
- lib/rage/deferred/metadata.rb
|
135
|
+
- lib/rage/deferred/proxy.rb
|
136
|
+
- lib/rage/deferred/queue.rb
|
137
|
+
- lib/rage/deferred/task.rb
|
131
138
|
- lib/rage/env.rb
|
132
139
|
- lib/rage/errors.rb
|
133
140
|
- lib/rage/ext/active_record/connection_pool.rb
|
@@ -189,6 +196,7 @@ files:
|
|
189
196
|
- lib/rage/templates/config-initializers-.keep
|
190
197
|
- lib/rage/templates/config-routes.rb
|
191
198
|
- lib/rage/templates/config.ru
|
199
|
+
- lib/rage/templates/controller-template/controller.rb
|
192
200
|
- lib/rage/templates/db-templates/app-models-application_record.rb
|
193
201
|
- lib/rage/templates/db-templates/db-seeds.rb
|
194
202
|
- lib/rage/templates/db-templates/mysql/config-database.yml
|