cloudtasker 0.8.1 → 0.10.rc4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +41 -0
- data/.rubocop.yml +4 -1
- data/CHANGELOG.md +40 -0
- data/README.md +51 -5
- data/app/controllers/cloudtasker/worker_controller.rb +21 -3
- data/cloudtasker.gemspec +2 -2
- data/gemfiles/google_cloud_tasks_1.0.gemfile +3 -5
- data/gemfiles/google_cloud_tasks_1.1.gemfile +3 -5
- data/gemfiles/google_cloud_tasks_1.2.gemfile +3 -5
- data/gemfiles/google_cloud_tasks_1.3.gemfile +3 -5
- data/gemfiles/rails_5.2.gemfile +3 -5
- data/gemfiles/rails_6.0.gemfile +3 -5
- data/lib/cloudtasker.rb +1 -0
- data/lib/cloudtasker/backend/google_cloud_task.rb +25 -5
- data/lib/cloudtasker/backend/memory_task.rb +8 -23
- data/lib/cloudtasker/backend/redis_task.rb +1 -1
- data/lib/cloudtasker/cloud_task.rb +2 -0
- data/lib/cloudtasker/config.rb +52 -4
- data/lib/cloudtasker/local_server.rb +5 -2
- data/lib/cloudtasker/max_task_size_exceeded_error.rb +14 -0
- data/lib/cloudtasker/redis_client.rb +11 -2
- data/lib/cloudtasker/testing.rb +2 -2
- data/lib/cloudtasker/version.rb +1 -1
- data/lib/cloudtasker/worker.rb +54 -14
- data/lib/cloudtasker/worker_handler.rb +144 -7
- data/lib/cloudtasker/worker_logger.rb +3 -2
- metadata +15 -15
- data/.travis.yml +0 -16
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 228f7fc8ef7ca52a1a9827b233ae1ebfea77e2a6d6683d68caa3fffef697ffb4
|
4
|
+
data.tar.gz: b58fee4392d02847c462684744a5b90ff413564a02e3e74b51514b7f8aee4107
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 88d7bfd9d4bbdb38ca48cf5e3af1fd9f901a3f89b84bce7d41609baa22d7ded5ce0c3a657d8aaaa9ad3b9274ad389825bdd7e17272f65ef80a68c6cb96538d25
|
7
|
+
data.tar.gz: b0a1f08dd892a9c709ecb24fb9a38c6507489179ce06779e49c9641d1ed849b02af6c3717476d85e7cf852879d0a5c8429123bc843a4ce60933f8319d0241e5a
|
@@ -0,0 +1,41 @@
|
|
1
|
+
name: Test
|
2
|
+
|
3
|
+
on:
|
4
|
+
push:
|
5
|
+
branches: [ master ]
|
6
|
+
pull_request:
|
7
|
+
branches: [ master ]
|
8
|
+
|
9
|
+
jobs:
|
10
|
+
build:
|
11
|
+
runs-on: ubuntu-latest
|
12
|
+
strategy:
|
13
|
+
matrix:
|
14
|
+
ruby:
|
15
|
+
- '2.5.x'
|
16
|
+
- '2.6.x'
|
17
|
+
appraisal:
|
18
|
+
- 'google-cloud-tasks-1.0'
|
19
|
+
- 'google-cloud-tasks-1.1'
|
20
|
+
- 'google-cloud-tasks-1.2'
|
21
|
+
- 'google-cloud-tasks-1.3'
|
22
|
+
- 'rails-5.2'
|
23
|
+
- 'rails-6.0'
|
24
|
+
steps:
|
25
|
+
- name: Setup System
|
26
|
+
run: sudo apt-get install libsqlite3-dev
|
27
|
+
- uses: actions/checkout@v2
|
28
|
+
- uses: zhulik/redis-action@1.1.0
|
29
|
+
- name: Set up Ruby 2.6
|
30
|
+
uses: actions/setup-ruby@v1
|
31
|
+
with:
|
32
|
+
ruby-version: ${{ matrix.ruby }}
|
33
|
+
- name: Build and test with Rake
|
34
|
+
env:
|
35
|
+
APPRAISAL_CONTEXT: ${{ matrix.appraisal }}
|
36
|
+
run: |
|
37
|
+
gem install bundler
|
38
|
+
bundle install --jobs 4 --retry 3
|
39
|
+
bundle exec rubocop
|
40
|
+
bundle exec appraisal ${APPRAISAL_CONTEXT} bundle
|
41
|
+
bundle exec appraisal ${APPRAISAL_CONTEXT} rspec
|
data/.rubocop.yml
CHANGED
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,45 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## [v0.9.2](https://github.com/keypup-io/cloudtasker/tree/v0.9.2) (2020-03-04)
|
4
|
+
|
5
|
+
[Full Changelog](https://github.com/keypup-io/cloudtasker/compare/v0.9.1...v0.9.2)
|
6
|
+
|
7
|
+
**Fixed bugs:**
|
8
|
+
- Cloud Task: ignore "not found" errors when trying to delete an already deleted task.
|
9
|
+
|
10
|
+
## [v0.9.1](https://github.com/keypup-io/cloudtasker/tree/v0.9.1) (2020-02-11)
|
11
|
+
|
12
|
+
[Full Changelog](https://github.com/keypup-io/cloudtasker/compare/v0.9.0...v0.9.1)
|
13
|
+
|
14
|
+
**Fixed bugs:**
|
15
|
+
- Cloud Task: raise `Cloudtasker::MaxTaskSizeExceededError` if job payload exceeds 100 KB. This is mainly to have production parity in development when running the local processing server.
|
16
|
+
|
17
|
+
## [v0.9.0](https://github.com/keypup-io/cloudtasker/tree/v0.9.0) (2020-01-23)
|
18
|
+
|
19
|
+
[Full Changelog](https://github.com/keypup-io/cloudtasker/compare/v0.8.2...v0.9.0)
|
20
|
+
|
21
|
+
**Fixed bugs:**
|
22
|
+
- Cloud Task: Base64 encode task body to support UTF-8 characters (e.g. emojis).
|
23
|
+
- Redis: Restrict to one connection (class level) to avoid too many DNS lookups
|
24
|
+
|
25
|
+
**Migration**
|
26
|
+
For Sinatra applications please update your Cloudtasker controller according to [this diff](https://github.com/keypup-io/cloudtasker/commit/311fa8f9beec91fbae012164a25b2ee6e261a2e4#diff-c2a0ea6c6e6c31c749d2e1acdc574f0f).
|
27
|
+
|
28
|
+
## [v0.8.2](https://github.com/keypup-io/cloudtasker/tree/v0.8.2) (2019-12-05)
|
29
|
+
|
30
|
+
[Full Changelog](https://github.com/keypup-io/cloudtasker/compare/v0.8.1...v0.8.2)
|
31
|
+
|
32
|
+
**Fixed bugs:**
|
33
|
+
- Config: do not add processor host to `Rails.application.config.hosts` if originally empty.
|
34
|
+
|
35
|
+
## [v0.8.1](https://github.com/keypup-io/cloudtasker/tree/v0.8.1) (2019-12-03)
|
36
|
+
|
37
|
+
[Full Changelog](https://github.com/keypup-io/cloudtasker/compare/v0.8.0...v0.8.1)
|
38
|
+
|
39
|
+
**Fixed bugs:**
|
40
|
+
- Local dev server: ensure job queue name is kept when taks is retried
|
41
|
+
- Rails/Controller: bypass Rails munge logic to preserve nil values inside job arguments.
|
42
|
+
|
3
43
|
## [v0.8.0](https://github.com/keypup-io/cloudtasker/tree/v0.8.0) (2019-11-27)
|
4
44
|
|
5
45
|
[Full Changelog](https://github.com/keypup-io/cloudtasker/compare/v0.7.0...v0.8.0)
|
data/README.md
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
|
1
|
+
![Build Status](https://github.com/keypup-io/cloudtasker/workflows/Test/badge.svg) [![Gem Version](https://badge.fury.io/rb/cloudtasker.svg)](https://badge.fury.io/rb/cloudtasker)
|
2
2
|
|
3
3
|
# Cloudtasker
|
4
4
|
|
@@ -224,7 +224,7 @@ Cloudtasker.configure do |config|
|
|
224
224
|
#
|
225
225
|
# config.max_retries = 10
|
226
226
|
|
227
|
-
#
|
227
|
+
#
|
228
228
|
# Specify the redis connection hash.
|
229
229
|
#
|
230
230
|
# This is ONLY required in development for the Cloudtasker local server and in
|
@@ -235,6 +235,26 @@ Cloudtasker.configure do |config|
|
|
235
235
|
# Default: redis-rb connects to redis://127.0.0.1:6379/0
|
236
236
|
#
|
237
237
|
# config.redis = { url: 'redis://localhost:6379/5' }
|
238
|
+
|
239
|
+
#
|
240
|
+
# Set to true to store job arguments in Redis instead of sending arguments as part
|
241
|
+
# of the job payload to Google Cloud Tasks.
|
242
|
+
#
|
243
|
+
# This is useful if you expect to process jobs with payloads exceeding 100KB, which
|
244
|
+
# is the limit enforced by Google Cloud Tasks.
|
245
|
+
#
|
246
|
+
# You can set this configuration parameter to a KB value if you want to store jobs
|
247
|
+
# args in redis only if the JSONified arguments payload exceeds that threshold.
|
248
|
+
#
|
249
|
+
# Supported since: v0.10.rc1
|
250
|
+
#
|
251
|
+
# Default: false
|
252
|
+
#
|
253
|
+
# Store all job payloads in Redis:
|
254
|
+
# config.store_payloads_in_redis = true
|
255
|
+
#
|
256
|
+
# Store all job payloads in Redis exceeding 50 KB:
|
257
|
+
# config.store_payloads_in_redis = 50
|
238
258
|
end
|
239
259
|
```
|
240
260
|
|
@@ -485,7 +505,7 @@ See the [Cloudtasker::Worker class](lib/cloudtasker/worker.rb) for more informat
|
|
485
505
|
|
486
506
|
## Error Handling
|
487
507
|
|
488
|
-
Jobs failing will automatically return an HTTP error to Cloud Task and trigger a retry at a later time. The number of retries Cloud Task will
|
508
|
+
Jobs failing will automatically return an HTTP error to Cloud Task and trigger a retry at a later time. The number of Cloud Task retries Cloud Task will depend on the configuration of your queue in Cloud Tasks.
|
489
509
|
|
490
510
|
### HTTP Error codes
|
491
511
|
|
@@ -531,6 +551,8 @@ By default jobs are retried 25 times - using an exponential backoff - before bei
|
|
531
551
|
|
532
552
|
Note that the number of retries set on your Cloud Task queue should be many times higher than the number of retries configured in Cloudtasker because Cloud Task also includes failures to connect to your application. Ideally set the number of retries to `unlimited` in Cloud Tasks.
|
533
553
|
|
554
|
+
**Note**: The `X-CloudTasks-TaskExecutionCount` header sent by Google Cloud Tasks and providing the number of retries outside of `HTTP 503` (instance not reachable) is currently bugged and remains at `0` all the time. Starting with `0.10.rc3` Cloudtasker uses the `X-CloudTasks-TaskRetryCount` header to detect the number of retries. This header includes `HTTP 503` errors which means that if your application is down at some point, jobs will fail and these failures will be counted toward the maximum number of retries. A [bug report](https://issuetracker.google.com/issues/154532072) has been raised with GCP to address this issue. Once fixed we will revert to using `X-CloudTasks-TaskExecutionCount` to avoid counting `HTTP 503` as job failures.
|
555
|
+
|
534
556
|
E.g. Set max number of retries globally via the cloudtasker initializer.
|
535
557
|
```ruby
|
536
558
|
# config/initializers/cloudtasker.rb
|
@@ -565,7 +587,6 @@ end
|
|
565
587
|
```
|
566
588
|
|
567
589
|
|
568
|
-
|
569
590
|
## Best practices building workers
|
570
591
|
|
571
592
|
Below are recommendations and notes about creating workers.
|
@@ -635,7 +656,32 @@ If you enqueue this worker by omitting the second argument `MyWorker.perform_asy
|
|
635
656
|
- The `time_at` argument will be ignored by the `unique-job` extension, meaning that job uniqueness will be only based on the `user_id` argument.
|
636
657
|
|
637
658
|
### Handling big job payloads
|
638
|
-
|
659
|
+
Google Cloud Tasks enforces a limit of 100 KB for job payloads. Taking into accounts Cloudtasker authentication headers and meta information this leave ~85 KB of free space for JSONified job arguments.
|
660
|
+
|
661
|
+
Any excessive job payload (> 100 KB) will raise a `Cloudtasker::MaxTaskSizeExceededError`, both in production and development mode.
|
662
|
+
|
663
|
+
#### Option 1: Use Cloudtasker optional support for payload storage in Redis
|
664
|
+
**Supported since**: `0.10.rc1`
|
665
|
+
|
666
|
+
Cloudtasker provides optional support for storing argument payloads in Redis instead of sending them to Google Cloud Tasks.
|
667
|
+
|
668
|
+
To enable it simply put the following in your Cloudtasker initializer:
|
669
|
+
```ruby
|
670
|
+
# config/initializers/cloudtasker.rb
|
671
|
+
|
672
|
+
Cloudtasker.configure do |config|
|
673
|
+
# Enable Redis support. Specify your redis connection
|
674
|
+
config.redis = { url: 'redis://localhost:6379/5' }
|
675
|
+
|
676
|
+
# Store all job payloads in Redis:
|
677
|
+
config.store_payloads_in_redis = true
|
678
|
+
|
679
|
+
# OR: store all job payloads in Redis exceeding 50 KB:
|
680
|
+
# config.store_payloads_in_redis = 50
|
681
|
+
end
|
682
|
+
```
|
683
|
+
|
684
|
+
#### Option 2: Do it yourself solution
|
639
685
|
|
640
686
|
If you feel that a job payload is going to get big, prefer to store the payload using a datastore (e.g. Redis) and pass a reference to the job to retrieve the payload inside your job `perform` method.
|
641
687
|
|
@@ -16,9 +16,6 @@ module Cloudtasker
|
|
16
16
|
# Run a worker from a Cloud Task payload
|
17
17
|
#
|
18
18
|
def run
|
19
|
-
# Build payload
|
20
|
-
payload = JSON.parse(request.body.read).merge(job_retries: job_retries)
|
21
|
-
|
22
19
|
# Process payload
|
23
20
|
WorkerHandler.execute_from_payload!(payload)
|
24
21
|
head :no_content
|
@@ -37,6 +34,27 @@ module Cloudtasker
|
|
37
34
|
|
38
35
|
private
|
39
36
|
|
37
|
+
#
|
38
|
+
# Parse the request body and return the actual job
|
39
|
+
# payload.
|
40
|
+
#
|
41
|
+
# @return [Hash] The job payload
|
42
|
+
#
|
43
|
+
def payload
|
44
|
+
@payload ||= begin
|
45
|
+
# Get raw body
|
46
|
+
content = request.body.read
|
47
|
+
|
48
|
+
# Decode content if the body is Base64 encoded
|
49
|
+
if request.headers[Cloudtasker::Config::ENCODING_HEADER].to_s.downcase == 'base64'
|
50
|
+
content = Base64.decode64(content)
|
51
|
+
end
|
52
|
+
|
53
|
+
# Return content parsed as JSON and add job retries count
|
54
|
+
JSON.parse(content).merge(job_retries: job_retries)
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
40
58
|
#
|
41
59
|
# Extract the number of times this task failed at runtime.
|
42
60
|
#
|
data/cloudtasker.gemspec
CHANGED
@@ -39,10 +39,10 @@ Gem::Specification.new do |spec|
|
|
39
39
|
spec.add_development_dependency 'appraisal'
|
40
40
|
spec.add_development_dependency 'bundler', '~> 2.0'
|
41
41
|
spec.add_development_dependency 'github_changelog_generator'
|
42
|
-
spec.add_development_dependency 'rake', '
|
42
|
+
spec.add_development_dependency 'rake', '>= 12.3.3'
|
43
43
|
spec.add_development_dependency 'rspec', '~> 3.0'
|
44
44
|
spec.add_development_dependency 'rubocop', '0.76.0'
|
45
|
-
spec.add_development_dependency 'rubocop-rspec'
|
45
|
+
spec.add_development_dependency 'rubocop-rspec', '1.37.0'
|
46
46
|
spec.add_development_dependency 'timecop'
|
47
47
|
spec.add_development_dependency 'webmock'
|
48
48
|
|
data/gemfiles/rails_5.2.gemfile
CHANGED
data/gemfiles/rails_6.0.gemfile
CHANGED
data/lib/cloudtasker.rb
CHANGED
@@ -8,6 +8,7 @@ require 'cloudtasker/config'
|
|
8
8
|
require 'cloudtasker/authentication_error'
|
9
9
|
require 'cloudtasker/dead_worker_error'
|
10
10
|
require 'cloudtasker/invalid_worker_error'
|
11
|
+
require 'cloudtasker/max_task_size_exceeded_error'
|
11
12
|
|
12
13
|
require 'cloudtasker/middleware/chain'
|
13
14
|
require 'cloudtasker/authenticator'
|
@@ -82,6 +82,29 @@ module Cloudtasker
|
|
82
82
|
Google::Protobuf::Timestamp.new.tap { |e| e.seconds = schedule_time.to_i }
|
83
83
|
end
|
84
84
|
|
85
|
+
#
|
86
|
+
# Format the job payload sent to Cloud Tasks.
|
87
|
+
#
|
88
|
+
# @param [Hash] hash The worker payload.
|
89
|
+
#
|
90
|
+
# @return [Hash] The Cloud Task payloadd.
|
91
|
+
#
|
92
|
+
def self.format_task_payload(payload)
|
93
|
+
payload = JSON.parse(payload.to_json, symbolize_names: true) # deep dup
|
94
|
+
|
95
|
+
# Format schedule time to Google Protobuf timestamp
|
96
|
+
payload[:schedule_time] = format_schedule_time(payload[:schedule_time])
|
97
|
+
|
98
|
+
# Encode job content to support UTF-8. Google Cloud Task
|
99
|
+
# expect content to be ASCII-8BIT compatible (binary)
|
100
|
+
payload[:http_request][:headers] ||= {}
|
101
|
+
payload[:http_request][:headers][Cloudtasker::Config::CONTENT_TYPE_HEADER] = 'text/json'
|
102
|
+
payload[:http_request][:headers][Cloudtasker::Config::ENCODING_HEADER] = 'Base64'
|
103
|
+
payload[:http_request][:body] = Base64.encode64(payload[:http_request][:body])
|
104
|
+
|
105
|
+
payload
|
106
|
+
end
|
107
|
+
|
85
108
|
#
|
86
109
|
# Find a task by id.
|
87
110
|
#
|
@@ -104,10 +127,7 @@ module Cloudtasker
|
|
104
127
|
# @return [Cloudtasker::Backend::GoogleCloudTask, nil] The created task.
|
105
128
|
#
|
106
129
|
def self.create(payload)
|
107
|
-
|
108
|
-
payload = payload.merge(
|
109
|
-
schedule_time: format_schedule_time(payload[:schedule_time])
|
110
|
-
).compact
|
130
|
+
payload = format_task_payload(payload)
|
111
131
|
|
112
132
|
# Extract relative queue name
|
113
133
|
relative_queue = payload.delete(:queue)
|
@@ -126,7 +146,7 @@ module Cloudtasker
|
|
126
146
|
#
|
127
147
|
def self.delete(id)
|
128
148
|
client.delete_task(id)
|
129
|
-
rescue Google::Gax::RetryError
|
149
|
+
rescue Google::Gax::NotFoundError, Google::Gax::RetryError, GRPC::NotFound, Google::Gax::PermissionDeniedError
|
130
150
|
nil
|
131
151
|
end
|
132
152
|
|
@@ -7,6 +7,7 @@ module Cloudtasker
|
|
7
7
|
# Manage local tasks pushed to memory.
|
8
8
|
# Used for testing.
|
9
9
|
class MemoryTask
|
10
|
+
attr_accessor :job_retries
|
10
11
|
attr_reader :id, :http_request, :schedule_time, :queue
|
11
12
|
|
12
13
|
#
|
@@ -18,17 +19,6 @@ module Cloudtasker
|
|
18
19
|
@queue ||= []
|
19
20
|
end
|
20
21
|
|
21
|
-
#
|
22
|
-
# Return the workers currently in the queue.
|
23
|
-
#
|
24
|
-
# @param [String] worker_class_name Filter jobs on worker class name.
|
25
|
-
#
|
26
|
-
# @return [Array<Cloudtasker::Worker] The list of workers
|
27
|
-
#
|
28
|
-
def self.jobs(worker_class_name = nil)
|
29
|
-
all(worker_class_name).map(&:worker)
|
30
|
-
end
|
31
|
-
|
32
22
|
#
|
33
23
|
# Run all Tasks in the queue. Optionally filter which tasks to run based
|
34
24
|
# on the worker class name.
|
@@ -116,11 +106,12 @@ module Cloudtasker
|
|
116
106
|
# @param [Hash] http_request The HTTP request content.
|
117
107
|
# @param [Integer] schedule_time When to run the task (Unix timestamp)
|
118
108
|
#
|
119
|
-
def initialize(id:, http_request:, schedule_time: nil, queue: nil)
|
109
|
+
def initialize(id:, http_request:, schedule_time: nil, queue: nil, job_retries: 0)
|
120
110
|
@id = id
|
121
111
|
@http_request = http_request
|
122
112
|
@schedule_time = Time.at(schedule_time || 0)
|
123
113
|
@queue = queue
|
114
|
+
@job_retries = job_retries || 0
|
124
115
|
end
|
125
116
|
|
126
117
|
#
|
@@ -155,26 +146,20 @@ module Cloudtasker
|
|
155
146
|
}
|
156
147
|
end
|
157
148
|
|
158
|
-
#
|
159
|
-
# Return the worker attached to this task.
|
160
|
-
#
|
161
|
-
# @return [Cloudtasker::Worker] The task worker.
|
162
|
-
#
|
163
|
-
def worker
|
164
|
-
@worker ||= Worker.from_hash(payload)
|
165
|
-
end
|
166
|
-
|
167
149
|
#
|
168
150
|
# Execute the task.
|
169
151
|
#
|
170
152
|
# @return [Any] The return value of the worker perform method.
|
171
153
|
#
|
172
154
|
def execute
|
173
|
-
|
155
|
+
# Execute worker
|
156
|
+
resp = WorkerHandler.with_worker_handling(payload, &:execute)
|
157
|
+
|
158
|
+
# Delete task
|
174
159
|
self.class.delete(id)
|
175
160
|
resp
|
176
161
|
rescue StandardError
|
177
|
-
|
162
|
+
self.job_retries += 1
|
178
163
|
end
|
179
164
|
|
180
165
|
#
|
@@ -248,7 +248,7 @@ module Cloudtasker
|
|
248
248
|
req = Net::HTTP::Post.new(uri.path, http_request[:headers])
|
249
249
|
|
250
250
|
# Add retries header
|
251
|
-
req[
|
251
|
+
req[Cloudtasker::Config::RETRY_HEADER] = retries
|
252
252
|
|
253
253
|
# Set job payload
|
254
254
|
req.body = http_request[:body]
|
@@ -48,6 +48,8 @@ module Cloudtasker
|
|
48
48
|
# @return [Cloudtasker::CloudTask] The created task.
|
49
49
|
#
|
50
50
|
def self.create(payload)
|
51
|
+
raise MaxTaskSizeExceededError if payload.to_json.bytesize > Config::MAX_TASK_SIZE
|
52
|
+
|
51
53
|
resp = backend.create(payload)&.to_h
|
52
54
|
resp ? new(resp) : nil
|
53
55
|
end
|
data/lib/cloudtasker/config.rb
CHANGED
@@ -5,12 +5,34 @@ require 'logger'
|
|
5
5
|
module Cloudtasker
|
6
6
|
# Holds cloudtasker configuration. See Cloudtasker#configure
|
7
7
|
class Config
|
8
|
-
attr_accessor :redis
|
8
|
+
attr_accessor :redis, :store_payloads_in_redis
|
9
9
|
attr_writer :secret, :gcp_location_id, :gcp_project_id,
|
10
10
|
:gcp_queue_prefix, :processor_path, :logger, :mode, :max_retries
|
11
11
|
|
12
|
+
# Max Cloud Task size in bytes
|
13
|
+
MAX_TASK_SIZE = 100 * 1024 # 100 KB
|
14
|
+
|
12
15
|
# Retry header in Cloud Task responses
|
13
|
-
|
16
|
+
#
|
17
|
+
# TODO: use 'X-CloudTasks-TaskExecutionCount' instead of 'X-CloudTasks-TaskRetryCount'
|
18
|
+
# 'X-CloudTasks-TaskExecutionCount' is currently bugged and remains at 0 even on retries.
|
19
|
+
#
|
20
|
+
# See bug: https://issuetracker.google.com/issues/154532072
|
21
|
+
#
|
22
|
+
# Definitions:
|
23
|
+
# X-CloudTasks-TaskRetryCount: total number of retries (including 504 "instance unreachable")
|
24
|
+
# X-CloudTasks-TaskExecutionCount: number of non-503 retries (= actual number of job failures)
|
25
|
+
#
|
26
|
+
RETRY_HEADER = 'X-CloudTasks-TaskRetryCount'
|
27
|
+
|
28
|
+
# Content-Transfer-Encoding header in Cloud Task responses
|
29
|
+
ENCODING_HEADER = 'Content-Transfer-Encoding'
|
30
|
+
|
31
|
+
# Content Type
|
32
|
+
CONTENT_TYPE_HEADER = 'Content-Type'
|
33
|
+
|
34
|
+
# Authorization header
|
35
|
+
AUTHORIZATION_HEADER = 'Authorization'
|
14
36
|
|
15
37
|
# Default values
|
16
38
|
DEFAULT_LOCATION_ID = 'us-east1'
|
@@ -21,7 +43,15 @@ module Cloudtasker
|
|
21
43
|
DEFAULT_QUEUE_CONCURRENCY = 10
|
22
44
|
DEFAULT_QUEUE_RETRIES = -1 # unlimited
|
23
45
|
|
24
|
-
# The number of times jobs will be attempted before declaring them dead
|
46
|
+
# The number of times jobs will be attempted before declaring them dead.
|
47
|
+
#
|
48
|
+
# With the default retry configuration (maxDoublings = 16 and minBackoff = 0.100s)
|
49
|
+
# it means that jobs will be declared dead after 20h of consecutive failing.
|
50
|
+
#
|
51
|
+
# Note that this configuration parameter is internal to Cloudtasker and does not
|
52
|
+
# affect the Cloud Task queue configuration. The number of retries configured
|
53
|
+
# on the Cloud Task queue should be higher than the number below to also cover
|
54
|
+
# failures due to the instance being unreachable.
|
25
55
|
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
26
56
|
|
27
57
|
PROCESSOR_HOST_MISSING = <<~DOC
|
@@ -42,6 +72,21 @@ module Cloudtasker
|
|
42
72
|
Please specify a secret in the cloudtasker initializer or add Rails secret_key_base in your credentials
|
43
73
|
DOC
|
44
74
|
|
75
|
+
#
|
76
|
+
# Return the threshold above which job arguments must be stored
|
77
|
+
# in Redis instead of being sent to the backend as part of the job
|
78
|
+
# payload.
|
79
|
+
#
|
80
|
+
# Return nil if redis payload storage is disabled.
|
81
|
+
#
|
82
|
+
# @return [Integer, nil] The threshold above which payloads will be stored in Redis.
|
83
|
+
#
|
84
|
+
def redis_payload_storage_threshold
|
85
|
+
return nil unless store_payloads_in_redis
|
86
|
+
|
87
|
+
store_payloads_in_redis.respond_to?(:to_i) ? store_payloads_in_redis.to_i : 0
|
88
|
+
end
|
89
|
+
|
45
90
|
#
|
46
91
|
# The number of times jobs will be retried. This number of
|
47
92
|
# retries does not include failures due to the application being unreachable.
|
@@ -102,7 +147,10 @@ module Cloudtasker
|
|
102
147
|
@processor_host = val
|
103
148
|
|
104
149
|
# Check if Rails supports host filtering
|
105
|
-
return unless val &&
|
150
|
+
return unless val &&
|
151
|
+
defined?(Rails) &&
|
152
|
+
Rails.application.config.respond_to?(:hosts) &&
|
153
|
+
Rails.application.config.hosts&.any?
|
106
154
|
|
107
155
|
# Add processor host to the list of authorized hosts
|
108
156
|
Rails.application.config.hosts << val.gsub(%r{https?://}, '')
|
@@ -12,6 +12,9 @@ module Cloudtasker
|
|
12
12
|
# Default number of threads to allocate to process a specific queue
|
13
13
|
QUEUE_CONCURRENCY = 1
|
14
14
|
|
15
|
+
# Job Polling. How frequently to poll jobs in redis.
|
16
|
+
JOB_POLLING_FREQUENCY = 0.5 # seconds
|
17
|
+
|
15
18
|
#
|
16
19
|
# Stop the local server.
|
17
20
|
#
|
@@ -46,7 +49,7 @@ module Cloudtasker
|
|
46
49
|
@start ||= Thread.new do
|
47
50
|
until @done
|
48
51
|
queues.each { |(n, c)| process_jobs(n, c) }
|
49
|
-
sleep
|
52
|
+
sleep JOB_POLLING_FREQUENCY
|
50
53
|
end
|
51
54
|
Cloudtasker.logger.info('[Cloudtasker/Server] Local server exiting...')
|
52
55
|
end
|
@@ -82,7 +85,7 @@ module Cloudtasker
|
|
82
85
|
# Deliver task
|
83
86
|
begin
|
84
87
|
Thread.current['task'].deliver
|
85
|
-
rescue Errno::ECONNREFUSED => e
|
88
|
+
rescue Errno::EBADF, Errno::ECONNREFUSED => e
|
86
89
|
raise(e) unless Thread.current['attempts'] < 3
|
87
90
|
|
88
91
|
# Retry on connection error, in case the web server is not
|
@@ -0,0 +1,14 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Cloudtasker
|
4
|
+
# Handle Cloud Task size quota
|
5
|
+
# See: https://cloud.google.com/appengine/quotas#Task_Queue
|
6
|
+
#
|
7
|
+
class MaxTaskSizeExceededError < StandardError
|
8
|
+
MSG = 'The size of Cloud Tasks must not exceed 100KB'
|
9
|
+
|
10
|
+
def initialize(msg = MSG)
|
11
|
+
super
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
@@ -7,6 +7,12 @@ module Cloudtasker
|
|
7
7
|
class RedisClient
|
8
8
|
# Suffix added to cache keys when locking them
|
9
9
|
LOCK_KEY_PREFIX = 'cloudtasker/lock'
|
10
|
+
LOCK_DURATION = 2 # seconds
|
11
|
+
LOCK_WAIT_DURATION = 0.03 # seconds
|
12
|
+
|
13
|
+
def self.client
|
14
|
+
@client ||= Redis.new(Cloudtasker.config.redis || {})
|
15
|
+
end
|
10
16
|
|
11
17
|
#
|
12
18
|
# Return the underlying redis client.
|
@@ -14,7 +20,7 @@ module Cloudtasker
|
|
14
20
|
# @return [Redis] The redis client.
|
15
21
|
#
|
16
22
|
def client
|
17
|
-
@client ||=
|
23
|
+
@client ||= self.class.client
|
18
24
|
end
|
19
25
|
|
20
26
|
#
|
@@ -47,6 +53,9 @@ module Cloudtasker
|
|
47
53
|
#
|
48
54
|
# Acquire a lock on a cache entry.
|
49
55
|
#
|
56
|
+
# Locks are enforced to be short-lived (2s).
|
57
|
+
# The yielded block should limit its logic to short operations (e.g. redis get/set).
|
58
|
+
#
|
50
59
|
# @example
|
51
60
|
# redis = RedisClient.new
|
52
61
|
# redis.with_lock('foo')
|
@@ -61,7 +70,7 @@ module Cloudtasker
|
|
61
70
|
|
62
71
|
# Wait to acquire lock
|
63
72
|
lock_key = [LOCK_KEY_PREFIX, cache_key].join('/')
|
64
|
-
|
73
|
+
sleep(LOCK_WAIT_DURATION) until client.set(lock_key, true, nx: true, ex: LOCK_DURATION)
|
65
74
|
|
66
75
|
# yield content
|
67
76
|
yield
|
data/lib/cloudtasker/testing.rb
CHANGED
@@ -114,10 +114,10 @@ module Cloudtasker
|
|
114
114
|
#
|
115
115
|
# Return all jobs related to this worker class.
|
116
116
|
#
|
117
|
-
# @return [Array<Cloudtasker::
|
117
|
+
# @return [Array<Cloudtasker::Backend::MemoryTask>] The list of tasks
|
118
118
|
#
|
119
119
|
def jobs
|
120
|
-
Backend::MemoryTask.
|
120
|
+
Backend::MemoryTask.all(to_s)
|
121
121
|
end
|
122
122
|
|
123
123
|
#
|
data/lib/cloudtasker/version.rb
CHANGED
data/lib/cloudtasker/worker.rb
CHANGED
@@ -7,7 +7,8 @@ module Cloudtasker
|
|
7
7
|
def self.included(base)
|
8
8
|
base.extend(ClassMethods)
|
9
9
|
base.attr_writer :job_queue
|
10
|
-
base.attr_accessor :job_args, :job_id, :job_meta, :job_reenqueued, :job_retries
|
10
|
+
base.attr_accessor :job_args, :job_id, :job_meta, :job_reenqueued, :job_retries,
|
11
|
+
:perform_started_at, :perform_ended_at
|
11
12
|
end
|
12
13
|
|
13
14
|
#
|
@@ -181,21 +182,19 @@ module Cloudtasker
|
|
181
182
|
#
|
182
183
|
def execute
|
183
184
|
logger.info('Starting job...')
|
184
|
-
resp = Cloudtasker.config.server_middleware.invoke(self) do
|
185
|
-
begin
|
186
|
-
perform(*job_args)
|
187
|
-
rescue StandardError => e
|
188
|
-
try(:on_error, e)
|
189
|
-
return raise(e) unless job_dead?
|
190
185
|
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
end
|
197
|
-
logger.info('Job done')
|
186
|
+
# Perform job logic
|
187
|
+
resp = execute_middleware_chain
|
188
|
+
|
189
|
+
# Log job completion and return result
|
190
|
+
logger.info("Job done after #{job_duration}s") { { duration: job_duration } }
|
198
191
|
resp
|
192
|
+
rescue DeadWorkerError => e
|
193
|
+
logger.info("Job dead after #{job_duration}s and #{job_retries} retries") { { duration: job_duration } }
|
194
|
+
raise(e)
|
195
|
+
rescue StandardError => e
|
196
|
+
logger.info("Job failed after #{job_duration}s") { { duration: job_duration } }
|
197
|
+
raise(e)
|
199
198
|
end
|
200
199
|
|
201
200
|
#
|
@@ -286,5 +285,46 @@ module Cloudtasker
|
|
286
285
|
def job_dead?
|
287
286
|
job_retries >= Cloudtasker.config.max_retries
|
288
287
|
end
|
288
|
+
|
289
|
+
#
|
290
|
+
# Return the time taken (in seconds) to perform the job. This duration
|
291
|
+
# includes the middlewares and the actual perform method.
|
292
|
+
#
|
293
|
+
# @return [Float] The time taken in seconds as a floating point number.
|
294
|
+
#
|
295
|
+
def job_duration
|
296
|
+
return 0.0 unless perform_ended_at && perform_started_at
|
297
|
+
|
298
|
+
(perform_ended_at - perform_started_at).ceil(3)
|
299
|
+
end
|
300
|
+
|
301
|
+
#=============================
|
302
|
+
# Private
|
303
|
+
#=============================
|
304
|
+
private
|
305
|
+
|
306
|
+
#
|
307
|
+
# Execute the worker perform method through the middleware chain.
|
308
|
+
#
|
309
|
+
# @return [Any] The result of the perform method.
|
310
|
+
#
|
311
|
+
def execute_middleware_chain
|
312
|
+
self.perform_started_at = Time.now
|
313
|
+
|
314
|
+
Cloudtasker.config.server_middleware.invoke(self) do
|
315
|
+
begin
|
316
|
+
perform(*job_args)
|
317
|
+
rescue StandardError => e
|
318
|
+
try(:on_error, e)
|
319
|
+
return raise(e) unless job_dead?
|
320
|
+
|
321
|
+
# Flag job as dead
|
322
|
+
try(:on_dead, e)
|
323
|
+
raise(DeadWorkerError, e)
|
324
|
+
end
|
325
|
+
end
|
326
|
+
ensure
|
327
|
+
self.perform_ended_at = Time.now
|
328
|
+
end
|
289
329
|
end
|
290
330
|
end
|
@@ -10,16 +10,113 @@ module Cloudtasker
|
|
10
10
|
# Alrogith used to sign the verification token
|
11
11
|
JWT_ALG = 'HS256'
|
12
12
|
|
13
|
+
# Sub-namespace to use for redis keys when storing
|
14
|
+
# payloads in Redis
|
15
|
+
REDIS_PAYLOAD_NAMESPACE = 'payload'
|
16
|
+
|
17
|
+
# Arg payload cache keys get expired instead of deleted
|
18
|
+
# in case jobs are re-processed due to connection interruption
|
19
|
+
# (job is successful but Cloud Task considers it as failed due
|
20
|
+
# to network interruption)
|
21
|
+
ARGS_PAYLOAD_CLEANUP_TTL = 3600 # 1 hour
|
22
|
+
|
23
|
+
#
|
24
|
+
# Return a namespaced key
|
25
|
+
#
|
26
|
+
# @param [String, Symbol] val The key to namespace
|
27
|
+
#
|
28
|
+
# @return [String] The namespaced key.
|
29
|
+
#
|
30
|
+
def self.key(val)
|
31
|
+
return nil if val.nil?
|
32
|
+
|
33
|
+
[to_s.underscore, val.to_s].join('/')
|
34
|
+
end
|
35
|
+
|
36
|
+
#
|
37
|
+
# Return the cloudtasker redis client
|
38
|
+
#
|
39
|
+
# @return [Cloudtasker::RedisClient] The cloudtasker redis client.
|
40
|
+
#
|
41
|
+
def self.redis
|
42
|
+
@redis ||= begin
|
43
|
+
require 'cloudtasker/redis_client'
|
44
|
+
RedisClient.new
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
13
48
|
#
|
14
49
|
# Execute a task worker from a task payload
|
15
50
|
#
|
16
|
-
# @param [Hash]
|
51
|
+
# @param [Hash] input_payload The Cloud Task payload.
|
17
52
|
#
|
18
53
|
# @return [Any] The return value of the worker perform method.
|
19
54
|
#
|
20
|
-
def self.execute_from_payload!(
|
55
|
+
def self.execute_from_payload!(input_payload)
|
56
|
+
with_worker_handling(input_payload, &:execute)
|
57
|
+
end
|
58
|
+
|
59
|
+
# TODO: do not delete redis payload if job has been re-enqueued
|
60
|
+
# worker.job_reenqueued
|
61
|
+
#
|
62
|
+
# Idea: change with_worker_handling to with_worker_handling and build the worker
|
63
|
+
# inside the with_worker_handling block.
|
64
|
+
#
|
65
|
+
# Local middleware used to retrieve the job arg payload from cache
|
66
|
+
# if a arg payload reference is present.
|
67
|
+
#
|
68
|
+
# @param [Hash] payload The full job payload
|
69
|
+
#
|
70
|
+
# @yield [Hash] The actual payload to use to process the job.
|
71
|
+
#
|
72
|
+
# @return [Any] The block result
|
73
|
+
#
|
74
|
+
def self.with_worker_handling(input_payload)
|
75
|
+
# Extract payload information
|
76
|
+
extracted_payload = extract_payload(input_payload)
|
77
|
+
payload = extracted_payload[:payload]
|
78
|
+
args_payload_key = extracted_payload[:args_payload_key]
|
79
|
+
|
80
|
+
# Build worker
|
21
81
|
worker = Cloudtasker::Worker.from_hash(payload) || raise(InvalidWorkerError)
|
22
|
-
|
82
|
+
|
83
|
+
# Yied worker
|
84
|
+
resp = yield(worker)
|
85
|
+
|
86
|
+
# Schedule args payload deletion after job has been successfully processed
|
87
|
+
# Note: we expire the key instead of deleting it immediately in case the job
|
88
|
+
# succeeds but is considered as failed by Cloud Task due to network interruption.
|
89
|
+
# In such case the job is likely to be re-processed soon after.
|
90
|
+
redis.expire(args_payload_key, ARGS_PAYLOAD_CLEANUP_TTL) if args_payload_key && !worker.job_reenqueued
|
91
|
+
|
92
|
+
resp
|
93
|
+
rescue DeadWorkerError => e
|
94
|
+
# Delete stored args payload if job is dead
|
95
|
+
redis.expire(args_payload_key, ARGS_PAYLOAD_CLEANUP_TTL) if args_payload_key
|
96
|
+
raise(e)
|
97
|
+
end
|
98
|
+
|
99
|
+
#
|
100
|
+
# Return the argument payload key (if present) along with the actual worker payload.
|
101
|
+
#
|
102
|
+
# If the payload was stored in Redis then retrieve it.
|
103
|
+
#
|
104
|
+
# @return [Hash] Hash
|
105
|
+
#
|
106
|
+
def self.extract_payload(input_payload)
|
107
|
+
# Get references
|
108
|
+
payload = JSON.parse(input_payload.to_json, symbolize_names: true)
|
109
|
+
args_payload_id = payload.delete(:job_args_payload_id)
|
110
|
+
args_payload_key = args_payload_id ? key([REDIS_PAYLOAD_NAMESPACE, args_payload_id].join('/')) : nil
|
111
|
+
|
112
|
+
# Retrieve the actual worker args payload
|
113
|
+
args_payload = args_payload_key ? redis.fetch(args_payload_key) : payload[:job_args]
|
114
|
+
|
115
|
+
# Return the payload
|
116
|
+
{
|
117
|
+
args_payload_key: args_payload_key,
|
118
|
+
payload: payload.merge(job_args: args_payload)
|
119
|
+
}
|
23
120
|
end
|
24
121
|
|
25
122
|
#
|
@@ -42,8 +139,8 @@ module Cloudtasker
|
|
42
139
|
http_method: 'POST',
|
43
140
|
url: Cloudtasker.config.processor_url,
|
44
141
|
headers: {
|
45
|
-
|
46
|
-
|
142
|
+
Cloudtasker::Config::CONTENT_TYPE_HEADER => 'application/json',
|
143
|
+
Cloudtasker::Config::AUTHORIZATION_HEADER => "Bearer #{Authenticator.verification_token}"
|
47
144
|
},
|
48
145
|
body: worker_payload.to_json
|
49
146
|
},
|
@@ -51,6 +148,47 @@ module Cloudtasker
|
|
51
148
|
}
|
52
149
|
end
|
53
150
|
|
151
|
+
#
|
152
|
+
# Return true if the worker args must be stored in Redis.
|
153
|
+
#
|
154
|
+
# @return [Boolean] True if the payload must be stored in redis.
|
155
|
+
#
|
156
|
+
def store_payload_in_redis?
|
157
|
+
Cloudtasker.config.redis_payload_storage_threshold &&
|
158
|
+
worker.job_args.to_json.bytesize > (Cloudtasker.config.redis_payload_storage_threshold * 1024)
|
159
|
+
end
|
160
|
+
|
161
|
+
#
|
162
|
+
# Return the payload to use for job arguments. This payload
|
163
|
+
# is merged inside the #worker_payload.
|
164
|
+
#
|
165
|
+
# If the argument payload must be stored in Redis then returns:
|
166
|
+
# `{ job_args_payload_id: <worker_id> }`
|
167
|
+
#
|
168
|
+
# If the argument payload must be natively handled by the backend
|
169
|
+
# then returns:
|
170
|
+
# `{ job_args: [...] }`
|
171
|
+
#
|
172
|
+
# @return [Hash] The worker args payload.
|
173
|
+
#
|
174
|
+
def worker_args_payload
|
175
|
+
@worker_args_payload ||= begin
|
176
|
+
if store_payload_in_redis?
|
177
|
+
# Store payload in Redis
|
178
|
+
self.class.redis.write(
|
179
|
+
self.class.key([REDIS_PAYLOAD_NAMESPACE, worker.job_id].join('/')),
|
180
|
+
worker.job_args
|
181
|
+
)
|
182
|
+
|
183
|
+
# Return reference to args payload
|
184
|
+
{ job_args_payload_id: worker.job_id }
|
185
|
+
else
|
186
|
+
# Return regular job args payload
|
187
|
+
{ job_args: worker.job_args }
|
188
|
+
end
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
54
192
|
#
|
55
193
|
# Return the task payload that Google Task will eventually
|
56
194
|
# send to the job processor.
|
@@ -68,9 +206,8 @@ module Cloudtasker
|
|
68
206
|
worker: worker.job_class_name,
|
69
207
|
job_queue: worker.job_queue,
|
70
208
|
job_id: worker.job_id,
|
71
|
-
job_args: worker.job_args,
|
72
209
|
job_meta: worker.job_meta.to_h
|
73
|
-
}
|
210
|
+
}.merge(worker_args_payload)
|
74
211
|
end
|
75
212
|
|
76
213
|
#
|
@@ -59,7 +59,7 @@ module Cloudtasker
|
|
59
59
|
# @return [String] The formatted log message
|
60
60
|
#
|
61
61
|
def formatted_message(msg)
|
62
|
-
"[Cloudtasker][#{worker.job_id}] #{msg}"
|
62
|
+
"[Cloudtasker][#{worker.class}][#{worker.job_id}] #{msg}"
|
63
63
|
end
|
64
64
|
|
65
65
|
#
|
@@ -141,7 +141,8 @@ module Cloudtasker
|
|
141
141
|
# @param [Proc] &block Optional context block.
|
142
142
|
#
|
143
143
|
def log_message(level, msg, &block)
|
144
|
-
|
144
|
+
# Merge log-specific context into worker-specific context
|
145
|
+
payload_block = -> { log_block.call.merge(block&.call || {}) }
|
145
146
|
|
146
147
|
# ActiveSupport::Logger does not support passing a payload through a block on top
|
147
148
|
# of a message.
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: cloudtasker
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.10.rc4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Arnaud Lachaume
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2020-05-02 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: activesupport
|
@@ -126,16 +126,16 @@ dependencies:
|
|
126
126
|
name: rake
|
127
127
|
requirement: !ruby/object:Gem::Requirement
|
128
128
|
requirements:
|
129
|
-
- - "
|
129
|
+
- - ">="
|
130
130
|
- !ruby/object:Gem::Version
|
131
|
-
version:
|
131
|
+
version: 12.3.3
|
132
132
|
type: :development
|
133
133
|
prerelease: false
|
134
134
|
version_requirements: !ruby/object:Gem::Requirement
|
135
135
|
requirements:
|
136
|
-
- - "
|
136
|
+
- - ">="
|
137
137
|
- !ruby/object:Gem::Version
|
138
|
-
version:
|
138
|
+
version: 12.3.3
|
139
139
|
- !ruby/object:Gem::Dependency
|
140
140
|
name: rspec
|
141
141
|
requirement: !ruby/object:Gem::Requirement
|
@@ -168,16 +168,16 @@ dependencies:
|
|
168
168
|
name: rubocop-rspec
|
169
169
|
requirement: !ruby/object:Gem::Requirement
|
170
170
|
requirements:
|
171
|
-
- -
|
171
|
+
- - '='
|
172
172
|
- !ruby/object:Gem::Version
|
173
|
-
version:
|
173
|
+
version: 1.37.0
|
174
174
|
type: :development
|
175
175
|
prerelease: false
|
176
176
|
version_requirements: !ruby/object:Gem::Requirement
|
177
177
|
requirements:
|
178
|
-
- -
|
178
|
+
- - '='
|
179
179
|
- !ruby/object:Gem::Version
|
180
|
-
version:
|
180
|
+
version: 1.37.0
|
181
181
|
- !ruby/object:Gem::Dependency
|
182
182
|
name: timecop
|
183
183
|
requirement: !ruby/object:Gem::Requirement
|
@@ -256,10 +256,10 @@ executables:
|
|
256
256
|
extensions: []
|
257
257
|
extra_rdoc_files: []
|
258
258
|
files:
|
259
|
+
- ".github/workflows/test.yml"
|
259
260
|
- ".gitignore"
|
260
261
|
- ".rspec"
|
261
262
|
- ".rubocop.yml"
|
262
|
-
- ".travis.yml"
|
263
263
|
- Appraisals
|
264
264
|
- CHANGELOG.md
|
265
265
|
- CODE_OF_CONDUCT.md
|
@@ -320,6 +320,7 @@ files:
|
|
320
320
|
- lib/cloudtasker/engine.rb
|
321
321
|
- lib/cloudtasker/invalid_worker_error.rb
|
322
322
|
- lib/cloudtasker/local_server.rb
|
323
|
+
- lib/cloudtasker/max_task_size_exceeded_error.rb
|
323
324
|
- lib/cloudtasker/meta_store.rb
|
324
325
|
- lib/cloudtasker/middleware/chain.rb
|
325
326
|
- lib/cloudtasker/redis_client.rb
|
@@ -363,12 +364,11 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
363
364
|
version: '0'
|
364
365
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
365
366
|
requirements:
|
366
|
-
- - "
|
367
|
+
- - ">"
|
367
368
|
- !ruby/object:Gem::Version
|
368
|
-
version:
|
369
|
+
version: 1.3.1
|
369
370
|
requirements: []
|
370
|
-
|
371
|
-
rubygems_version: 2.7.9
|
371
|
+
rubygems_version: 3.0.0
|
372
372
|
signing_key:
|
373
373
|
specification_version: 4
|
374
374
|
summary: Background jobs for Ruby using Google Cloud Tasks (beta)
|
data/.travis.yml
DELETED
@@ -1,16 +0,0 @@
|
|
1
|
-
---
|
2
|
-
language: ruby
|
3
|
-
cache: bundler
|
4
|
-
rvm:
|
5
|
-
- 2.5.5
|
6
|
-
services:
|
7
|
-
- redis-server
|
8
|
-
before_install: gem install bundler -v 2.0.2
|
9
|
-
before_script: bundle exec rubocop
|
10
|
-
gemfile:
|
11
|
-
- gemfiles/google_cloud_tasks_1.0.gemfile
|
12
|
-
- gemfiles/google_cloud_tasks_1.1.gemfile
|
13
|
-
- gemfiles/google_cloud_tasks_1.2.gemfile
|
14
|
-
- gemfiles/google_cloud_tasks_1.3.gemfile
|
15
|
-
- gemfiles/rails_5.2.gemfile
|
16
|
-
- gemfiles/rails_6.0.gemfile
|