fairway 0.2.7 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +13 -5
- data/Gemfile.lock +8 -7
- data/go/channeled_connection_test.go +22 -0
- data/go/config.go +1 -1
- data/go/config_test.go +2 -2
- data/go/connection.go +5 -0
- data/go/connection_test.go +1 -0
- data/go/fairway_ack.go +60 -0
- data/go/fairway_deliver.go +32 -27
- data/go/fairway_destroy.go +11 -6
- data/go/fairway_inflight.go +15 -0
- data/go/fairway_peek.go +1 -3
- data/go/fairway_ping.go +20 -0
- data/go/fairway_priority.go +2 -3
- data/go/fairway_pull.go +67 -61
- data/go/message.go +3 -2
- data/go/queue.go +31 -2
- data/go/queue_test.go +467 -9
- data/go/scripts.go +82 -4
- data/lib/fairway/queue.rb +13 -1
- data/lib/fairway/scripts.rb +20 -0
- data/lib/fairway/version.rb +1 -1
- data/redis/fairway_deliver.lua +32 -26
- data/redis/fairway_priority.lua +1 -1
- data/redis/fairway_pull.lua +66 -59
- data/spec/lib/fairway/channeled_connection_spec.rb +1 -0
- data/spec/lib/fairway/connection_spec.rb +1 -0
- data/spec/lib/fairway/queue_spec.rb +3 -7
- data/spec/lib/fairway/scripts_spec.rb +2 -2
- metadata +16 -13
checksums.yaml
CHANGED
@@ -1,7 +1,15 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
|
2
|
+
!binary "U0hBMQ==":
|
3
|
+
metadata.gz: !binary |-
|
4
|
+
OGE1MjlkNzEzY2FiZDc0MWMzMGNkZDhlYjkxODg4MzY3ZmJhNGE0NA==
|
5
|
+
data.tar.gz: !binary |-
|
6
|
+
ZjBiMzdjNmQ3YWI4Mjg1OTE5MDVkNTU2OTUwMTZiNTIwODhjZDg5OQ==
|
5
7
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
|
8
|
+
metadata.gz: !binary |-
|
9
|
+
ODAyMTVhODhiZWRmY2NhMzgyYTkyMzliZDIyNDUyOGM2NGJkMWY3Nzg1MmVm
|
10
|
+
NmZhM2Q5OWI4NWZkNGQzZWIxOWQyMGJmZGM2NWUxOTY5ZjI0MmUzNmZiMTcx
|
11
|
+
MTkwYzlhYzJkNWQyMzVmMjdhYThhNzAzMzI2YzMxNmFiZWY2Y2Y=
|
12
|
+
data.tar.gz: !binary |-
|
13
|
+
YWRjM2RhYWI3NzJjNDljZmQwYjgxYzIzZGMwNGI4MDliZmY2NTFkZGY0ZDlm
|
14
|
+
Y2U3MGU0YTUyZjU4ODNhNzA3N2RkOTI4ODYwYWY2NDM1YzYzMDk4NzQ5NDM1
|
15
|
+
MjRiZGM1MjdkYjU1N2M1NTI0YjgzYjYwMGE0MzJlNjA5NjExMjA=
|
data/Gemfile.lock
CHANGED
@@ -1,8 +1,8 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
fairway (0.2.
|
5
|
-
activesupport
|
4
|
+
fairway (0.2.7)
|
5
|
+
activesupport (= 4.0.2)
|
6
6
|
connection_pool
|
7
7
|
redis
|
8
8
|
redis-namespace (>= 1.3.0)
|
@@ -16,14 +16,13 @@ GEM
|
|
16
16
|
multi_json (~> 1.3)
|
17
17
|
thread_safe (~> 0.1)
|
18
18
|
tzinfo (~> 0.3.37)
|
19
|
-
atomic (1.1.14)
|
20
19
|
celluloid (0.12.4)
|
21
20
|
facter (>= 1.6.12)
|
22
21
|
timers (>= 1.0.0)
|
23
22
|
connection_pool (1.0.0)
|
24
23
|
diff-lcs (1.1.3)
|
25
24
|
facter (1.6.17)
|
26
|
-
i18n (0.
|
25
|
+
i18n (0.7.0)
|
27
26
|
minitest (4.7.5)
|
28
27
|
multi_json (1.5.0)
|
29
28
|
rake (10.1.0)
|
@@ -44,10 +43,9 @@ GEM
|
|
44
43
|
multi_json (~> 1)
|
45
44
|
redis (~> 3)
|
46
45
|
redis-namespace
|
47
|
-
thread_safe (0.
|
48
|
-
atomic
|
46
|
+
thread_safe (0.3.5)
|
49
47
|
timers (1.1.0)
|
50
|
-
tzinfo (0.3.
|
48
|
+
tzinfo (0.3.46)
|
51
49
|
|
52
50
|
PLATFORMS
|
53
51
|
ruby
|
@@ -57,3 +55,6 @@ DEPENDENCIES
|
|
57
55
|
rake
|
58
56
|
rspec
|
59
57
|
sidekiq
|
58
|
+
|
59
|
+
BUNDLED WITH
|
60
|
+
1.10.6
|
@@ -2,6 +2,7 @@ package fairway
|
|
2
2
|
|
3
3
|
import (
|
4
4
|
"fmt"
|
5
|
+
|
5
6
|
"github.com/customerio/gospec"
|
6
7
|
. "github.com/customerio/gospec"
|
7
8
|
"github.com/customerio/redigo/redis"
|
@@ -37,4 +38,25 @@ func ChanneledConnectionSpec(c gospec.Context) {
|
|
37
38
|
c.Expect(count, Equals, 0)
|
38
39
|
})
|
39
40
|
})
|
41
|
+
|
42
|
+
c.Specify("DeliverBytes", func() {
|
43
|
+
c.Specify("only queues up message for matching queues", func() {
|
44
|
+
r := config.Pool.Get()
|
45
|
+
defer r.Close()
|
46
|
+
|
47
|
+
count, _ := redis.Int(r.Do("llen", "fairway:myqueue:default"))
|
48
|
+
c.Expect(count, Equals, 0)
|
49
|
+
count, _ = redis.Int(r.Do("llen", "fairway:myqueue2:default"))
|
50
|
+
c.Expect(count, Equals, 0)
|
51
|
+
|
52
|
+
msg, _ := NewMsg(map[string]string{"type": "a"})
|
53
|
+
|
54
|
+
conn.DeliverBytes("channel:typea:channel", "default", []byte(msg.json()))
|
55
|
+
|
56
|
+
count, _ = redis.Int(r.Do("llen", "fairway:myqueue:default"))
|
57
|
+
c.Expect(count, Equals, 1)
|
58
|
+
count, _ = redis.Int(r.Do("llen", "fairway:myqueue2:default"))
|
59
|
+
c.Expect(count, Equals, 0)
|
60
|
+
})
|
61
|
+
})
|
40
62
|
}
|
data/go/config.go
CHANGED
@@ -32,7 +32,7 @@ func NewConfig(server string, db string, poolSize int) *Config {
|
|
32
32
|
[]*QueueDefinition{},
|
33
33
|
&redis.Pool{
|
34
34
|
MaxIdle: poolSize,
|
35
|
-
MaxActive:
|
35
|
+
MaxActive: 0,
|
36
36
|
IdleTimeout: 240 * time.Second,
|
37
37
|
Dial: func() (redis.Conn, error) {
|
38
38
|
c, err := redis.Dial("tcp", server)
|
data/go/config_test.go
CHANGED
@@ -25,10 +25,10 @@ func ConfigSpec(c gospec.Context) {
|
|
25
25
|
|
26
26
|
c.Specify("sets redis pool size", func() {
|
27
27
|
c.Expect(config.Pool.MaxIdle, Equals, 10)
|
28
|
-
c.Expect(config.Pool.MaxActive, Equals,
|
28
|
+
c.Expect(config.Pool.MaxActive, Equals, 0)
|
29
29
|
config = NewConfig("localhost:6379", "15", 20)
|
30
30
|
c.Expect(config.Pool.MaxIdle, Equals, 20)
|
31
|
-
c.Expect(config.Pool.MaxActive, Equals,
|
31
|
+
c.Expect(config.Pool.MaxActive, Equals, 0)
|
32
32
|
})
|
33
33
|
|
34
34
|
c.Specify("can specify custom namespace", func() {
|
data/go/connection.go
CHANGED
@@ -5,6 +5,7 @@ type Connection interface {
|
|
5
5
|
Queues() []*Queue
|
6
6
|
Channel(*Msg) string
|
7
7
|
Deliver(*Msg) error
|
8
|
+
DeliverBytes(channel, facet string, bytes []byte) error
|
8
9
|
Configuration() *Config
|
9
10
|
}
|
10
11
|
|
@@ -40,6 +41,10 @@ func (c *conn) Deliver(msg *Msg) error {
|
|
40
41
|
return c.scripts.deliver(channel, facet, msg)
|
41
42
|
}
|
42
43
|
|
44
|
+
func (c *conn) DeliverBytes(channel, facet string, msg []byte) error {
|
45
|
+
return c.scripts.deliverBytes(channel, facet, msg)
|
46
|
+
}
|
47
|
+
|
43
48
|
func (c *conn) Configuration() *Config {
|
44
49
|
return c.config
|
45
50
|
}
|
data/go/connection_test.go
CHANGED
data/go/fairway_ack.go
ADDED
@@ -0,0 +1,60 @@
|
|
1
|
+
package fairway
|
2
|
+
|
3
|
+
func FairwayAck() string {
|
4
|
+
return `
|
5
|
+
local namespace = KEYS[1];
|
6
|
+
|
7
|
+
local k = function (queue, subkey)
|
8
|
+
return namespace .. queue .. ':' .. subkey;
|
9
|
+
end
|
10
|
+
|
11
|
+
local queue = ARGV[1];
|
12
|
+
local facet = ARGV[2];
|
13
|
+
local message = ARGV[3];
|
14
|
+
|
15
|
+
local active_facets = k(queue, 'active_facets');
|
16
|
+
local round_robin = k(queue, 'facet_queue');
|
17
|
+
local facet_pool = k(queue, 'facet_pool');
|
18
|
+
local inflight = k(queue, 'inflight');
|
19
|
+
local messages = k(queue, facet);
|
20
|
+
local inflight_total = k(queue, facet .. ':inflight');
|
21
|
+
local inflight_limit = k(queue, 'limit');
|
22
|
+
local priorities = k(queue, 'priorities');
|
23
|
+
|
24
|
+
local removed = tonumber(redis.call('zrem', inflight, message))
|
25
|
+
|
26
|
+
if removed > 0 then
|
27
|
+
-- Manage facet queue and active facets
|
28
|
+
local current = tonumber(redis.call('hget', facet_pool, facet)) or 0;
|
29
|
+
local priority = tonumber(redis.call('hget', priorities, facet)) or 1;
|
30
|
+
local length = redis.call('llen', messages);
|
31
|
+
local inflight_cur = tonumber(redis.call('decr', inflight_total)) or 0;
|
32
|
+
local inflight_max = tonumber(redis.call('get', inflight_limit)) or 0;
|
33
|
+
|
34
|
+
local n = 0
|
35
|
+
|
36
|
+
-- redis.log(redis.LOG_WARNING, current.."/"..length.."/"..priority.."/"..inflight_max.."/"..inflight_cur)
|
37
|
+
|
38
|
+
if inflight_max > 0 then
|
39
|
+
n = math.min(length, priority, inflight_max - inflight_cur);
|
40
|
+
else
|
41
|
+
n = math.min(length, priority);
|
42
|
+
end
|
43
|
+
|
44
|
+
-- redis.log(redis.LOG_WARNING, "ACK: "..current.."/"..n);
|
45
|
+
|
46
|
+
if n > current then
|
47
|
+
-- redis.log(redis.LOG_WARNING, "growing");
|
48
|
+
redis.call('lpush', round_robin, facet);
|
49
|
+
redis.call('hset', facet_pool, facet, current + 1);
|
50
|
+
end
|
51
|
+
|
52
|
+
if (current == 0 and length == 0 and inflight_cur == 0 and n == 0) then
|
53
|
+
redis.call('del', inflight_total);
|
54
|
+
redis.call('srem', active_facets, facet);
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
return removed
|
59
|
+
`
|
60
|
+
}
|
data/go/fairway_deliver.go
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
package fairway
|
2
2
|
|
3
3
|
func FairwayDeliver() string {
|
4
|
-
|
4
|
+
return `
|
5
5
|
local namespace = KEYS[1];
|
6
6
|
local topic = ARGV[1];
|
7
7
|
local facet = ARGV[2];
|
@@ -23,45 +23,50 @@ for i = 1, #registered_queues, 2 do
|
|
23
23
|
-- If the message topic matches the queue topic,
|
24
24
|
-- we deliver the message to the queue.
|
25
25
|
if string.find(topic, queue_topic) then
|
26
|
-
local priorities
|
27
|
-
local active_facets
|
28
|
-
local round_robin
|
29
|
-
local facet_pool
|
26
|
+
local priorities = k(queue, 'priorities');
|
27
|
+
local active_facets = k(queue, 'active_facets');
|
28
|
+
local round_robin = k(queue, 'facet_queue');
|
29
|
+
local facet_pool = k(queue, 'facet_pool');
|
30
|
+
local inflight_total = k(queue, facet .. ':inflight');
|
31
|
+
local inflight_limit = k(queue, 'limit');
|
30
32
|
|
31
33
|
-- Delivering the message to a queue is as simple as
|
32
34
|
-- pushing it onto the facet's message list, and
|
33
35
|
-- incrementing the length of the queue itself.
|
34
|
-
redis.call('lpush', k(queue, facet), message)
|
36
|
+
local length = redis.call('lpush', k(queue, facet), message)
|
35
37
|
redis.call('incr', k(queue, 'length'));
|
36
38
|
|
37
|
-
-- If the facet just became active, we need to add
|
38
|
-
-- the facet to the round-robin queue, so it's
|
39
|
-
-- messages will be processed.
|
40
|
-
if redis.call('sadd', active_facets, facet) == 1 then
|
41
|
-
local priority = tonumber(redis.call('hget', priorities, facet)) or 1
|
42
39
|
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
40
|
+
-- Manage facet queue and active facets
|
41
|
+
local current = tonumber(redis.call('hget', facet_pool, facet)) or 0;
|
42
|
+
local priority = tonumber(redis.call('hget', priorities, facet)) or 1;
|
43
|
+
local inflight_cur = tonumber(redis.call('get', inflight_total)) or 0;
|
44
|
+
local inflight_max = tonumber(redis.call('get', inflight_limit)) or 0;
|
45
|
+
|
46
|
+
local n = 0
|
47
|
+
|
48
|
+
-- redis.log(redis.LOG_WARNING, current.."/"..length.."/"..priority.."/"..inflight_max.."/"..inflight_cur);
|
49
|
+
|
50
|
+
if inflight_max > 0 then
|
51
|
+
n = math.min(length, priority, inflight_max - inflight_cur);
|
52
|
+
else
|
53
|
+
n = math.min(length, priority);
|
54
|
+
end
|
55
|
+
|
56
|
+
-- redis.log(redis.LOG_WARNING, "PUSH: "..current.."/"..n);
|
57
|
+
|
58
|
+
if n > current then
|
59
|
+
-- redis.log(redis.LOG_WARNING, "growing");
|
60
|
+
redis.call('lpush', round_robin, facet);
|
61
|
+
redis.call('hset', facet_pool, facet, current + 1);
|
58
62
|
end
|
63
|
+
|
64
|
+
redis.call('sadd', active_facets, facet)
|
59
65
|
end
|
60
66
|
end
|
61
67
|
|
62
68
|
-- For any clients listening over pub/sub,
|
63
69
|
-- we should publish the message.
|
64
70
|
redis.call('publish', namespace .. topic, message);
|
65
|
-
|
66
71
|
`
|
67
72
|
}
|
data/go/fairway_destroy.go
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
package fairway
|
2
2
|
|
3
3
|
func FairwayDestroy() string {
|
4
|
-
|
4
|
+
return `
|
5
5
|
local namespace = KEYS[1];
|
6
6
|
|
7
7
|
local k = function (queue, subkey)
|
@@ -13,15 +13,18 @@ end
|
|
13
13
|
-- provided queues, and delete related keys
|
14
14
|
-- for each queue.
|
15
15
|
for i, queue in ipairs(ARGV) do
|
16
|
-
local priorities
|
17
|
-
local active_facets
|
18
|
-
local round_robin
|
19
|
-
local facet_pool
|
20
|
-
local
|
16
|
+
local priorities = k(queue, 'priorities');
|
17
|
+
local active_facets = k(queue, 'active_facets');
|
18
|
+
local round_robin = k(queue, 'facet_queue');
|
19
|
+
local facet_pool = k(queue, 'facet_pool');
|
20
|
+
local inflight = k(queue, 'inflight');
|
21
|
+
local inflight_limit = k(queue, 'limit');
|
22
|
+
local length = k(queue, 'length');
|
21
23
|
|
22
24
|
local facets = redis.call('smembers', active_facets);
|
23
25
|
|
24
26
|
for i = 1, #facets, 1 do
|
27
|
+
redis.call('del', k(queue, facets[i] .. ':inflight'));
|
25
28
|
redis.call('del', k(queue, facets[i]));
|
26
29
|
end
|
27
30
|
|
@@ -29,6 +32,8 @@ for i, queue in ipairs(ARGV) do
|
|
29
32
|
redis.call('del', active_facets);
|
30
33
|
redis.call('del', round_robin);
|
31
34
|
redis.call('del', facet_pool);
|
35
|
+
redis.call('del', inflight);
|
36
|
+
redis.call('del', inflight_limit);
|
32
37
|
redis.call('del', length);
|
33
38
|
end
|
34
39
|
|
@@ -0,0 +1,15 @@
|
|
1
|
+
package fairway
|
2
|
+
|
3
|
+
func FairwayInflight() string {
|
4
|
+
return `
|
5
|
+
local namespace = KEYS[1];
|
6
|
+
|
7
|
+
local k = function (queue, subkey)
|
8
|
+
return namespace .. queue .. ':' .. subkey;
|
9
|
+
end
|
10
|
+
|
11
|
+
for i, queue in ipairs(ARGV) do
|
12
|
+
return redis.call('zrange', k(queue, 'inflight'), 0, -1);
|
13
|
+
end
|
14
|
+
`
|
15
|
+
}
|
data/go/fairway_peek.go
CHANGED
@@ -1,11 +1,10 @@
|
|
1
1
|
package fairway
|
2
2
|
|
3
3
|
func FairwayPeek() string {
|
4
|
-
|
4
|
+
return `
|
5
5
|
local namespace = KEYS[1];
|
6
6
|
|
7
7
|
for index, queue_name in ipairs(ARGV) do
|
8
|
-
local active_facets = namespace .. queue_name .. ':active_facets';
|
9
8
|
local facet_queue = namespace .. queue_name .. ':facet_queue';
|
10
9
|
|
11
10
|
local facet = redis.call('lrange', facet_queue, -1, -1)[1];
|
@@ -17,6 +16,5 @@ for index, queue_name in ipairs(ARGV) do
|
|
17
16
|
return {queue_name, message};
|
18
17
|
end
|
19
18
|
end
|
20
|
-
|
21
19
|
`
|
22
20
|
}
|
data/go/fairway_ping.go
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
package fairway
|
2
|
+
|
3
|
+
func FairwayPing() string {
|
4
|
+
return `
|
5
|
+
local namespace = KEYS[1];
|
6
|
+
local timestamp = tonumber(KEYS[2]);
|
7
|
+
local wait = tonumber(KEYS[3]);
|
8
|
+
|
9
|
+
local k = function (queue, subkey)
|
10
|
+
return namespace .. queue .. ':' .. subkey;
|
11
|
+
end
|
12
|
+
|
13
|
+
local queue = ARGV[1];
|
14
|
+
local message = ARGV[2];
|
15
|
+
|
16
|
+
local inflight = k(queue, 'inflight');
|
17
|
+
|
18
|
+
redis.call('zadd', inflight, timestamp + wait, message);
|
19
|
+
`
|
20
|
+
}
|
data/go/fairway_priority.go
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
package fairway
|
2
2
|
|
3
3
|
func FairwayPriority() string {
|
4
|
-
|
4
|
+
return `
|
5
5
|
local namespace = KEYS[1];
|
6
6
|
local queue = ARGV[1];
|
7
7
|
local facet = ARGV[2];
|
@@ -17,7 +17,7 @@ local facet_pool = k(queue, 'facet_pool');
|
|
17
17
|
|
18
18
|
-- Find the current state of the facet for the queue
|
19
19
|
local priority = tonumber(redis.call('hget', priorities, facet)) or 1;
|
20
|
-
local current = tonumber(redis.call('hget', facet_pool, facet));
|
20
|
+
local current = tonumber(redis.call('hget', facet_pool, facet)) or 0;
|
21
21
|
|
22
22
|
-- If priority is currently zero, we need to jump
|
23
23
|
-- start the facet by adding it to the round-robin
|
@@ -31,6 +31,5 @@ end
|
|
31
31
|
-- set the new priority, and the real priority
|
32
32
|
-- will update lazily on pull.
|
33
33
|
redis.call('hset', priorities, facet, new_priority);
|
34
|
-
|
35
34
|
`
|
36
35
|
}
|
data/go/fairway_pull.go
CHANGED
@@ -1,8 +1,10 @@
|
|
1
1
|
package fairway
|
2
2
|
|
3
3
|
func FairwayPull() string {
|
4
|
-
|
4
|
+
return `
|
5
5
|
local namespace = KEYS[1];
|
6
|
+
local timestamp = tonumber(KEYS[2]);
|
7
|
+
local wait = tonumber(KEYS[3]);
|
6
8
|
|
7
9
|
local k = function (queue, subkey)
|
8
10
|
return namespace .. queue .. ':' .. subkey;
|
@@ -13,10 +15,28 @@ end
|
|
13
15
|
-- provided queues, and return a message
|
14
16
|
-- from the first one that isn't empty.
|
15
17
|
for i, queue in ipairs(ARGV) do
|
16
|
-
local
|
17
|
-
local
|
18
|
-
local
|
19
|
-
local
|
18
|
+
local active_facets = k(queue, 'active_facets');
|
19
|
+
local round_robin = k(queue, 'facet_queue');
|
20
|
+
local inflight = k(queue, 'inflight');
|
21
|
+
local inflight_limit = k(queue, 'limit');
|
22
|
+
local priorities = k(queue, 'priorities');
|
23
|
+
local facet_pool = k(queue, 'facet_pool');
|
24
|
+
|
25
|
+
if wait ~= -1 then
|
26
|
+
-- Check if any current inflight messages
|
27
|
+
-- have been inflight for a long time.
|
28
|
+
local inflightmessage = redis.call('zrange', inflight, 0, 0, 'WITHSCORES');
|
29
|
+
|
30
|
+
-- If we have an inflight message and it's score
|
31
|
+
-- is less than the current pull timestamp, reset
|
32
|
+
-- the inflight score for the the message and resend.
|
33
|
+
if #inflightmessage > 0 then
|
34
|
+
if tonumber(inflightmessage[2]) <= timestamp then
|
35
|
+
redis.call('zadd', inflight, timestamp + wait, inflightmessage[1]);
|
36
|
+
return {queue, inflightmessage[1]}
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
20
40
|
|
21
41
|
-- Pull a facet from the round-robin list.
|
22
42
|
-- This list guarantees each active facet will have a
|
@@ -27,73 +47,59 @@ for i, queue in ipairs(ARGV) do
|
|
27
47
|
-- If we found an active facet, we know the facet
|
28
48
|
-- has at least one message available to be pulled
|
29
49
|
-- from it's message queue.
|
30
|
-
local messages
|
31
|
-
local
|
50
|
+
local messages = k(queue, facet);
|
51
|
+
local inflight_total = k(queue, facet .. ':inflight');
|
52
|
+
|
53
|
+
local message = redis.call('rpop', messages);
|
32
54
|
|
33
55
|
if message then
|
56
|
+
if wait ~= -1 then
|
57
|
+
redis.call('zadd', inflight, timestamp + wait, message);
|
58
|
+
redis.call('incr', inflight_total);
|
59
|
+
end
|
60
|
+
|
34
61
|
redis.call('decr', k(queue, 'length'));
|
35
62
|
end
|
36
63
|
|
37
|
-
|
64
|
+
-- Manage facet queue and active facets
|
65
|
+
local current = tonumber(redis.call('hget', facet_pool, facet)) or 0;
|
66
|
+
local priority = tonumber(redis.call('hget', priorities, facet)) or 1;
|
67
|
+
local length = redis.call('llen', messages);
|
68
|
+
local inflight_cur = tonumber(redis.call('get', inflight_total)) or 0;
|
69
|
+
local inflight_max = tonumber(redis.call('get', inflight_limit)) or 0;
|
38
70
|
|
39
|
-
|
40
|
-
|
41
|
-
--
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
-- round-robin queue.
|
46
|
-
redis.call('srem', active_facets, facet);
|
47
|
-
|
48
|
-
-- If the facet still has messages to process,
|
49
|
-
-- it remains in the active facet set, and is
|
50
|
-
-- pushed back on the round-robin queue.
|
51
|
-
--
|
52
|
-
-- Additionally, the priority of the facet may
|
53
|
-
-- have changed, so we'll check and update the
|
54
|
-
-- current facet's priority if needed.
|
71
|
+
local n = 0
|
72
|
+
|
73
|
+
-- redis.log(redis.LOG_WARNING, current.."/"..length.."/"..priority.."/"..inflight_max.."/"..inflight_cur);
|
74
|
+
|
75
|
+
if inflight_max > 0 then
|
76
|
+
n = math.min(length, priority, inflight_max - inflight_cur);
|
55
77
|
else
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
--
|
63
|
-
|
64
|
-
|
65
|
-
--
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
-- Note: Also decrement priority if there aren't
|
78
|
-
-- enough messages for the current priority. This
|
79
|
-
-- ensures priority (entries in the round-robin queue)
|
80
|
-
-- never exceeds the number of messages for a given
|
81
|
-
-- facet.
|
82
|
-
elseif current > priority or current > length then
|
83
|
-
redis.call('hset', facet_pool, facet, current - 1);
|
84
|
-
|
85
|
-
-- If the current priority is equals the
|
86
|
-
-- desired priority, let's maintain the current priority
|
87
|
-
-- by pushing the current facet on the round-robin
|
88
|
-
-- queue once.
|
89
|
-
else
|
90
|
-
redis.call('lpush', round_robin, facet);
|
91
|
-
end
|
78
|
+
n = math.min(length, priority);
|
79
|
+
end
|
80
|
+
|
81
|
+
-- redis.log(redis.LOG_WARNING, "PULL: "..current.."/"..n);
|
82
|
+
|
83
|
+
if n < current then
|
84
|
+
-- redis.log(redis.LOG_WARNING, "shrinking");
|
85
|
+
redis.call('hset', facet_pool, facet, current - 1);
|
86
|
+
elseif n > current then
|
87
|
+
-- redis.log(redis.LOG_WARNING, "growing");
|
88
|
+
redis.call('lpush', round_robin, facet);
|
89
|
+
redis.call('lpush', round_robin, facet);
|
90
|
+
redis.call('hset', facet_pool, facet, current + 1);
|
91
|
+
else
|
92
|
+
-- redis.log(redis.LOG_WARNING, "maintaining");
|
93
|
+
redis.call('lpush', round_robin, facet);
|
94
|
+
end
|
95
|
+
|
96
|
+
if (current == 1 and length == 0 and inflight_cur == 0 and n == 0) then
|
97
|
+
redis.call('del', inflight_total);
|
98
|
+
redis.call('srem', active_facets, facet);
|
92
99
|
end
|
93
100
|
|
94
101
|
return {queue, message};
|
95
102
|
end
|
96
103
|
end
|
97
|
-
|
98
104
|
`
|
99
105
|
}
|
data/go/message.go
CHANGED
@@ -6,6 +6,7 @@ import (
|
|
6
6
|
)
|
7
7
|
|
8
8
|
type Msg struct {
|
9
|
+
Original string
|
9
10
|
*simplejson.Json
|
10
11
|
}
|
11
12
|
|
@@ -20,7 +21,7 @@ func NewMsg(body interface{}) (*Msg, error) {
|
|
20
21
|
return nil, err
|
21
22
|
}
|
22
23
|
|
23
|
-
return &Msg{simplej}, nil
|
24
|
+
return &Msg{string(bytes), simplej}, nil
|
24
25
|
}
|
25
26
|
|
26
27
|
func NewMsgFromString(body string) (*Msg, error) {
|
@@ -29,7 +30,7 @@ func NewMsgFromString(body string) (*Msg, error) {
|
|
29
30
|
return nil, err
|
30
31
|
}
|
31
32
|
|
32
|
-
return &Msg{simplej}, nil
|
33
|
+
return &Msg{body, simplej}, nil
|
33
34
|
}
|
34
35
|
|
35
36
|
func (m *Msg) json() string {
|
data/go/queue.go
CHANGED
@@ -9,6 +9,35 @@ func NewQueue(conn Connection, name string) *Queue {
|
|
9
9
|
return &Queue{conn, name}
|
10
10
|
}
|
11
11
|
|
12
|
-
func (q *Queue)
|
13
|
-
return q.
|
12
|
+
func (q *Queue) Name() string {
|
13
|
+
return q.name
|
14
|
+
}
|
15
|
+
|
16
|
+
func (q *Queue) Length() (int, error) {
|
17
|
+
return q.conn.Configuration().scripts().length(q.name)
|
18
|
+
}
|
19
|
+
|
20
|
+
func (q *Queue) Pull(resendTimeframe int) (string, *Msg) {
|
21
|
+
return q.conn.Configuration().scripts().pull(q.name, resendTimeframe)
|
22
|
+
}
|
23
|
+
|
24
|
+
func (q *Queue) Inflight() []string {
|
25
|
+
return q.conn.Configuration().scripts().inflight(q.name)
|
26
|
+
}
|
27
|
+
|
28
|
+
func (q *Queue) InflightLimit() (int, error) {
|
29
|
+
return q.conn.Configuration().scripts().inflightLimit(q.name)
|
30
|
+
}
|
31
|
+
|
32
|
+
func (q *Queue) SetInflightLimit(limit int) error {
|
33
|
+
return q.conn.Configuration().scripts().setInflightLimit(q.name, limit)
|
34
|
+
}
|
35
|
+
|
36
|
+
func (q *Queue) Ping(message *Msg, resendTimeframe int) error {
|
37
|
+
return q.conn.Configuration().scripts().ping(q.name, message, resendTimeframe)
|
38
|
+
}
|
39
|
+
|
40
|
+
func (q *Queue) Ack(message *Msg) error {
|
41
|
+
facet := q.conn.Configuration().Facet(message)
|
42
|
+
return q.conn.Configuration().scripts().ack(q.name, facet, message)
|
14
43
|
}
|