pallets 0.7.0 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.travis.yml +4 -4
- data/CHANGELOG.md +8 -0
- data/examples/config_savvy.rb +3 -0
- data/lib/pallets.rb +1 -8
- data/lib/pallets/backends/base.rb +3 -3
- data/lib/pallets/backends/redis.rb +6 -4
- data/lib/pallets/backends/scripts/run_workflow.lua +2 -3
- data/lib/pallets/backends/scripts/save.lua +7 -10
- data/lib/pallets/cli.rb +2 -0
- data/lib/pallets/configuration.rb +10 -0
- data/lib/pallets/graph.rb +5 -12
- data/lib/pallets/version.rb +1 -1
- data/lib/pallets/worker.rb +1 -1
- data/lib/pallets/workflow.rb +15 -5
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 46a631b6e48f2f1c4efe5bce09c5d74fa2cb064a78d5e546492c6c8ce1dec843
|
4
|
+
data.tar.gz: 61f6414e16941de41defb30ac8dc9121703f75251e47422239c311a8b7749787
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4b6f77bd93576dc7dc2c5cf945d52c425a63178740c12fa958f0c18c4ca870da7b02d303c7b8e871504d84a13461f79edc52da2869069322f5a81943bdbd79df
|
7
|
+
data.tar.gz: a2a71f4a2343927a94871523b7475f587bb64181e9c6a4654a40111453e2fc2f20e82a02da44adbc32865f68c53a95dd3a9c6f0b641cfd85c7939f1e06d3521f
|
data/.travis.yml
CHANGED
data/CHANGELOG.md
CHANGED
@@ -6,6 +6,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
6
6
|
|
7
7
|
## [Unreleased]
|
8
8
|
|
9
|
+
## [0.8.0] - 2020-06-09
|
10
|
+
### Added
|
11
|
+
- sync output in CLI (#49)
|
12
|
+
- support for configuring custom loggers (#50)
|
13
|
+
|
14
|
+
### Changed
|
15
|
+
- improve job scheduling using jobmasks (#52)
|
16
|
+
|
9
17
|
## [0.7.0] - 2020-01-19
|
10
18
|
### Added
|
11
19
|
- support for Ruby 2.7 (#46)
|
data/examples/config_savvy.rb
CHANGED
@@ -1,3 +1,4 @@
|
|
1
|
+
require 'logger'
|
1
2
|
require 'pallets'
|
2
3
|
|
3
4
|
class AnnounceProcessing
|
@@ -31,6 +32,8 @@ Pallets.configure do |c|
|
|
31
32
|
# given up. Retry times are exponential and happen after: 7, 22, 87, 262, ...
|
32
33
|
c.max_failures = 5
|
33
34
|
|
35
|
+
# Custom loggers can be used too
|
36
|
+
c.logger = Logger.new(STDOUT)
|
34
37
|
# Job execution can be wrapped with middleware to provide custom logic.
|
35
38
|
# Anything that responds to `call` would do
|
36
39
|
c.middleware << AnnounceProcessing
|
data/lib/pallets.rb
CHANGED
@@ -57,13 +57,6 @@ module Pallets
|
|
57
57
|
end
|
58
58
|
|
59
59
|
def self.logger
|
60
|
-
@logger ||=
|
61
|
-
level: Pallets::Logger::INFO,
|
62
|
-
formatter: Pallets::Logger::Formatters::Pretty.new
|
63
|
-
)
|
64
|
-
end
|
65
|
-
|
66
|
-
def self.logger=(logger)
|
67
|
-
@logger = logger
|
60
|
+
@logger ||= configuration.logger
|
68
61
|
end
|
69
62
|
end
|
@@ -6,12 +6,12 @@ module Pallets
|
|
6
6
|
raise NotImplementedError
|
7
7
|
end
|
8
8
|
|
9
|
-
def get_context(
|
9
|
+
def get_context(wfid)
|
10
10
|
raise NotImplementedError
|
11
11
|
end
|
12
12
|
|
13
13
|
# Saves a job after successfully processing it
|
14
|
-
def save(
|
14
|
+
def save(wfid, jid, job, context_buffer)
|
15
15
|
raise NotImplementedError
|
16
16
|
end
|
17
17
|
|
@@ -29,7 +29,7 @@ module Pallets
|
|
29
29
|
raise NotImplementedError
|
30
30
|
end
|
31
31
|
|
32
|
-
def run_workflow(
|
32
|
+
def run_workflow(wfid, jobs, jobmasks, context)
|
33
33
|
raise NotImplementedError
|
34
34
|
end
|
35
35
|
end
|
@@ -9,6 +9,7 @@ module Pallets
|
|
9
9
|
RETRY_SET_KEY = 'retry-set'
|
10
10
|
GIVEN_UP_SET_KEY = 'given-up-set'
|
11
11
|
WORKFLOW_QUEUE_KEY = 'workflow-queue:%s'
|
12
|
+
JOBMASK_KEY = 'jobmask:%s'
|
12
13
|
CONTEXT_KEY = 'context:%s'
|
13
14
|
REMAINING_KEY = 'remaining:%s'
|
14
15
|
|
@@ -41,11 +42,11 @@ module Pallets
|
|
41
42
|
end
|
42
43
|
end
|
43
44
|
|
44
|
-
def save(wfid, job, context_buffer)
|
45
|
+
def save(wfid, jid, job, context_buffer)
|
45
46
|
@pool.execute do |client|
|
46
47
|
client.evalsha(
|
47
48
|
@scripts['save'],
|
48
|
-
[WORKFLOW_QUEUE_KEY % wfid, QUEUE_KEY, RELIABILITY_QUEUE_KEY, RELIABILITY_SET_KEY, CONTEXT_KEY % wfid, REMAINING_KEY % wfid],
|
49
|
+
[WORKFLOW_QUEUE_KEY % wfid, QUEUE_KEY, RELIABILITY_QUEUE_KEY, RELIABILITY_SET_KEY, CONTEXT_KEY % wfid, REMAINING_KEY % wfid, JOBMASK_KEY % jid],
|
49
50
|
context_buffer.to_a << job
|
50
51
|
)
|
51
52
|
end
|
@@ -81,13 +82,14 @@ module Pallets
|
|
81
82
|
end
|
82
83
|
end
|
83
84
|
|
84
|
-
def run_workflow(wfid,
|
85
|
+
def run_workflow(wfid, jobs, jobmasks, context_buffer)
|
85
86
|
@pool.execute do |client|
|
86
87
|
client.multi do
|
88
|
+
jobmasks.each { |jid, jobmask| client.zadd(JOBMASK_KEY % jid, jobmask) }
|
87
89
|
client.evalsha(
|
88
90
|
@scripts['run_workflow'],
|
89
91
|
[WORKFLOW_QUEUE_KEY % wfid, QUEUE_KEY, REMAINING_KEY % wfid],
|
90
|
-
|
92
|
+
jobs
|
91
93
|
)
|
92
94
|
client.hmset(CONTEXT_KEY % wfid, *context_buffer.to_a) unless context_buffer.empty?
|
93
95
|
end
|
@@ -6,9 +6,8 @@ redis.call("SET", KEYS[3], eta)
|
|
6
6
|
|
7
7
|
-- Queue jobs that are ready to be processed (their score is 0) and
|
8
8
|
-- remove queued jobs from the sorted set
|
9
|
-
local
|
10
|
-
if
|
11
|
-
local work = redis.call("ZRANGEBYSCORE", KEYS[1], 0, 0)
|
9
|
+
local work = redis.call("ZRANGEBYSCORE", KEYS[1], 0, 0)
|
10
|
+
if #work > 0 then
|
12
11
|
redis.call("LPUSH", KEYS[2], unpack(work))
|
13
12
|
redis.call("ZREM", KEYS[1], unpack(work))
|
14
13
|
end
|
@@ -10,24 +10,21 @@ if #ARGV > 0 then
|
|
10
10
|
redis.call("HMSET", KEYS[5], unpack(ARGV))
|
11
11
|
end
|
12
12
|
|
13
|
-
-- Decrement
|
14
|
-
|
15
|
-
|
16
|
-
redis.call("ZINCRBY", KEYS[1], -1, task)
|
17
|
-
end
|
13
|
+
-- Decrement jobs from the sorted set by applying a jobmask
|
14
|
+
redis.call("ZUNIONSTORE", KEYS[1], 2, KEYS[1], KEYS[7])
|
15
|
+
redis.call("DEL", KEYS[7])
|
18
16
|
|
19
17
|
-- Queue jobs that are ready to be processed (their score is 0) and
|
20
18
|
-- remove queued jobs from sorted set
|
21
|
-
local
|
22
|
-
if
|
23
|
-
local work = redis.call("ZRANGEBYSCORE", KEYS[1], 0, 0)
|
19
|
+
local work = redis.call("ZRANGEBYSCORE", KEYS[1], 0, 0)
|
20
|
+
if #work > 0 then
|
24
21
|
redis.call("LPUSH", KEYS[2], unpack(work))
|
25
22
|
redis.call("ZREM", KEYS[1], unpack(work))
|
26
23
|
end
|
27
24
|
|
28
25
|
-- Decrement ETA and remove it together with the context if all tasks have
|
29
26
|
-- been processed (ETA is 0)
|
30
|
-
redis.call("DECR", KEYS[6])
|
31
|
-
if
|
27
|
+
local remaining = redis.call("DECR", KEYS[6])
|
28
|
+
if remaining == 0 then
|
32
29
|
redis.call("DEL", KEYS[5], KEYS[6])
|
33
30
|
end
|
data/lib/pallets/cli.rb
CHANGED
@@ -20,6 +20,9 @@ module Pallets
|
|
20
20
|
# period, it is considered failed, and scheduled to be processed again
|
21
21
|
attr_accessor :job_timeout
|
22
22
|
|
23
|
+
# Custom logger used throughout Pallets
|
24
|
+
attr_writer :logger
|
25
|
+
|
23
26
|
# Maximum number of failures allowed per job. Can also be configured on a
|
24
27
|
# per task basis
|
25
28
|
attr_accessor :max_failures
|
@@ -51,6 +54,13 @@ module Pallets
|
|
51
54
|
@middleware = default_middleware
|
52
55
|
end
|
53
56
|
|
57
|
+
def logger
|
58
|
+
@logger || Pallets::Logger.new(STDOUT,
|
59
|
+
level: Pallets::Logger::INFO,
|
60
|
+
formatter: Pallets::Logger::Formatters::Pretty.new
|
61
|
+
)
|
62
|
+
end
|
63
|
+
|
54
64
|
def pool_size
|
55
65
|
@pool_size || @concurrency + 1
|
56
66
|
end
|
data/lib/pallets/graph.rb
CHANGED
@@ -24,18 +24,11 @@ module Pallets
|
|
24
24
|
nodes.empty?
|
25
25
|
end
|
26
26
|
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
# Assign order to each node
|
34
|
-
i = 0
|
35
|
-
groups.flat_map do |group|
|
36
|
-
group_with_order = group.product([i])
|
37
|
-
i += group.size
|
38
|
-
group_with_order
|
27
|
+
def each
|
28
|
+
return enum_for(__method__) unless block_given?
|
29
|
+
|
30
|
+
tsort_each do |node|
|
31
|
+
yield(node, parents(node))
|
39
32
|
end
|
40
33
|
end
|
41
34
|
|
data/lib/pallets/version.rb
CHANGED
data/lib/pallets/worker.rb
CHANGED
@@ -116,7 +116,7 @@ module Pallets
|
|
116
116
|
end
|
117
117
|
|
118
118
|
def handle_job_success(context, job, job_hash)
|
119
|
-
backend.save(job_hash['wfid'], job, serializer.dump_context(context.buffer))
|
119
|
+
backend.save(job_hash['wfid'], job_hash['jid'], job, serializer.dump_context(context.buffer))
|
120
120
|
end
|
121
121
|
|
122
122
|
def backoff_in_seconds(count)
|
data/lib/pallets/workflow.rb
CHANGED
@@ -20,7 +20,7 @@ module Pallets
|
|
20
20
|
raise WorkflowError, "#{self.class.name} has no tasks. Workflows "\
|
21
21
|
"must contain at least one task" if self.class.graph.empty?
|
22
22
|
|
23
|
-
backend.run_workflow(id,
|
23
|
+
backend.run_workflow(id, *prepare_jobs, serializer.dump_context(context.buffer))
|
24
24
|
id
|
25
25
|
end
|
26
26
|
|
@@ -30,11 +30,21 @@ module Pallets
|
|
30
30
|
|
31
31
|
private
|
32
32
|
|
33
|
-
def
|
34
|
-
|
35
|
-
|
36
|
-
|
33
|
+
def prepare_jobs
|
34
|
+
jobs = []
|
35
|
+
jobmasks = Hash.new { |h, k| h[k] = [] }
|
36
|
+
acc = {}
|
37
|
+
|
38
|
+
self.class.graph.each do |task_alias, dependencies|
|
39
|
+
job_hash = construct_job(task_alias)
|
40
|
+
acc[task_alias] = job_hash['jid']
|
41
|
+
job = serializer.dump(job_hash)
|
42
|
+
|
43
|
+
jobs << [dependencies.size, job]
|
44
|
+
dependencies.each { |d| jobmasks[acc[d]] << [-1, job] }
|
37
45
|
end
|
46
|
+
|
47
|
+
[jobs, jobmasks]
|
38
48
|
end
|
39
49
|
|
40
50
|
def construct_job(task_alias)
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: pallets
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.8.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Horak
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2020-
|
11
|
+
date: 2020-06-09 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: redis
|