rbczmq 0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +23 -0
- data/.travis.yml +19 -0
- data/Gemfile +5 -0
- data/Gemfile.lock +19 -0
- data/MIT-LICENSE +20 -0
- data/README.rdoc +247 -0
- data/Rakefile +67 -0
- data/examples/loop.rb +109 -0
- data/examples/poller.rb +37 -0
- data/examples/pub_sub.rb +101 -0
- data/examples/push_pull.rb +104 -0
- data/examples/req_rep.rb +100 -0
- data/ext/czmq.tar.gz +0 -0
- data/ext/rbczmq/context.c +280 -0
- data/ext/rbczmq/context.h +26 -0
- data/ext/rbczmq/extconf.rb +138 -0
- data/ext/rbczmq/frame.c +401 -0
- data/ext/rbczmq/frame.h +24 -0
- data/ext/rbczmq/jruby.h +22 -0
- data/ext/rbczmq/loop.c +413 -0
- data/ext/rbczmq/loop.h +24 -0
- data/ext/rbczmq/message.c +620 -0
- data/ext/rbczmq/message.h +24 -0
- data/ext/rbczmq/poller.c +308 -0
- data/ext/rbczmq/poller.h +29 -0
- data/ext/rbczmq/pollitem.c +251 -0
- data/ext/rbczmq/pollitem.h +25 -0
- data/ext/rbczmq/rbczmq_ext.c +198 -0
- data/ext/rbczmq/rbczmq_ext.h +94 -0
- data/ext/rbczmq/rbczmq_prelude.h +22 -0
- data/ext/rbczmq/rubinius.h +24 -0
- data/ext/rbczmq/ruby18.h +43 -0
- data/ext/rbczmq/ruby19.h +15 -0
- data/ext/rbczmq/socket.c +1570 -0
- data/ext/rbczmq/socket.h +136 -0
- data/ext/rbczmq/timer.c +110 -0
- data/ext/rbczmq/timer.h +23 -0
- data/ext/zeromq.tar.gz +0 -0
- data/lib/rbczmq.rb +3 -0
- data/lib/zmq.rb +77 -0
- data/lib/zmq/context.rb +50 -0
- data/lib/zmq/default_handler.rb +16 -0
- data/lib/zmq/frame.rb +11 -0
- data/lib/zmq/handler.rb +76 -0
- data/lib/zmq/loop.rb +131 -0
- data/lib/zmq/message.rb +9 -0
- data/lib/zmq/poller.rb +22 -0
- data/lib/zmq/pollitem.rb +31 -0
- data/lib/zmq/socket.rb +125 -0
- data/lib/zmq/socket/dealer.rb +33 -0
- data/lib/zmq/socket/pair.rb +39 -0
- data/lib/zmq/socket/pub.rb +30 -0
- data/lib/zmq/socket/pull.rb +29 -0
- data/lib/zmq/socket/push.rb +32 -0
- data/lib/zmq/socket/rep.rb +37 -0
- data/lib/zmq/socket/req.rb +37 -0
- data/lib/zmq/socket/router.rb +38 -0
- data/lib/zmq/socket/sub.rb +27 -0
- data/lib/zmq/timer.rb +12 -0
- data/lib/zmq/version.rb +5 -0
- data/perf/pair.rb +7 -0
- data/perf/pair/local.rb +22 -0
- data/perf/pair/remote.rb +25 -0
- data/perf/pub_sub.rb +7 -0
- data/perf/pub_sub/local.rb +22 -0
- data/perf/pub_sub/remote.rb +25 -0
- data/perf/push_pull.rb +7 -0
- data/perf/push_pull/local.rb +21 -0
- data/perf/push_pull/remote.rb +25 -0
- data/perf/req_rep.rb +7 -0
- data/perf/req_rep/local.rb +35 -0
- data/perf/req_rep/remote.rb +28 -0
- data/perf/runner.rb +142 -0
- data/rbczmq.gemspec +22 -0
- data/test/helper.rb +21 -0
- data/test/socket/test_dealer_socket.rb +14 -0
- data/test/socket/test_pair_socket.rb +24 -0
- data/test/socket/test_pair_sockets.rb +74 -0
- data/test/socket/test_pub_socket.rb +17 -0
- data/test/socket/test_pub_sub_sockets.rb +87 -0
- data/test/socket/test_pull_socket.rb +17 -0
- data/test/socket/test_push_pull_sockets.rb +81 -0
- data/test/socket/test_push_socket.rb +17 -0
- data/test/socket/test_rep_socket.rb +25 -0
- data/test/socket/test_req_rep_sockets.rb +42 -0
- data/test/socket/test_req_socket.rb +27 -0
- data/test/socket/test_router_socket.rb +14 -0
- data/test/socket/test_routing.rb +66 -0
- data/test/socket/test_sub_socket.rb +17 -0
- data/test/test_context.rb +86 -0
- data/test/test_frame.rb +78 -0
- data/test/test_handler.rb +28 -0
- data/test/test_loop.rb +252 -0
- data/test/test_message.rb +201 -0
- data/test/test_poller.rb +154 -0
- data/test/test_pollitem.rb +78 -0
- data/test/test_socket.rb +403 -0
- data/test/test_threading.rb +34 -0
- data/test/test_timer.rb +37 -0
- data/test/test_zmq.rb +62 -0
- metadata +208 -0
data/examples/poller.rb
ADDED
@@ -0,0 +1,37 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
$:.unshift('.')
|
4
|
+
$:.unshift(File.expand_path(File.dirname(__FILE__)) + '/../lib')
|
5
|
+
require 'zmq'
|
6
|
+
|
7
|
+
ctx = ZMQ::Context.new
|
8
|
+
pub = ctx.bind(:PUB, 'inproc://example.poller')
|
9
|
+
|
10
|
+
subscribers = []
|
11
|
+
|
12
|
+
poller = ZMQ::Poller.new
|
13
|
+
|
14
|
+
5.times do
|
15
|
+
sub = ctx.connect(:SUB, 'inproc://example.poller')
|
16
|
+
sub.subscribe("")
|
17
|
+
subscribers << sub
|
18
|
+
poller.register_readable(sub)
|
19
|
+
end
|
20
|
+
|
21
|
+
puts "[#{subscribers.size}] subscribers registered with the poller"
|
22
|
+
p subscribers
|
23
|
+
|
24
|
+
puts "publisher sends 'test'"
|
25
|
+
pub.send("test")
|
26
|
+
|
27
|
+
puts "poll, timeout 1s"
|
28
|
+
poller.poll(1)
|
29
|
+
puts "readable sockets ..."
|
30
|
+
p poller.readables
|
31
|
+
puts "writable sockets ..."
|
32
|
+
p poller.writables
|
33
|
+
|
34
|
+
puts "Subscriber sockets can receive without blocking ..."
|
35
|
+
p subscribers.map{|s| s.recv_nonblock }
|
36
|
+
|
37
|
+
ctx.destroy
|
data/examples/pub_sub.rb
ADDED
@@ -0,0 +1,101 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
$:.unshift('.')
|
4
|
+
$:.unshift(File.expand_path(File.dirname(__FILE__)) + '/../lib')
|
5
|
+
require 'zmq'
|
6
|
+
require 'pp'
|
7
|
+
|
8
|
+
# PUB / SUB topology
|
9
|
+
|
10
|
+
Thread.abort_on_exception = true
|
11
|
+
|
12
|
+
class Consumer
|
13
|
+
attr_reader :thread
|
14
|
+
def initialize(ctx, endpoint, topic = "")
|
15
|
+
@thread = nil
|
16
|
+
@connect = Proc.new do
|
17
|
+
@socket = ctx.socket(:SUB)
|
18
|
+
@socket.subscribe("")
|
19
|
+
# verbose output
|
20
|
+
@socket.verbose = true
|
21
|
+
@socket.subscribe(topic)
|
22
|
+
@socket.connect(endpoint)
|
23
|
+
@socket.linger = 1
|
24
|
+
end
|
25
|
+
@jobs, @working = 0, 0.0
|
26
|
+
end
|
27
|
+
|
28
|
+
def start
|
29
|
+
@thread = Thread.new do
|
30
|
+
@connect.call
|
31
|
+
loop do
|
32
|
+
break if Thread.current[:interrupted]
|
33
|
+
perform(@socket.recv)
|
34
|
+
end
|
35
|
+
end
|
36
|
+
self
|
37
|
+
end
|
38
|
+
|
39
|
+
def stop
|
40
|
+
return unless @thread
|
41
|
+
@thread[:interrupted] = true
|
42
|
+
@thread.join(0.1)
|
43
|
+
stats
|
44
|
+
end
|
45
|
+
|
46
|
+
def perform(work)
|
47
|
+
# Random hot loop to simulate CPU intensive work
|
48
|
+
start = Time.now
|
49
|
+
work.to_i.times{}
|
50
|
+
@jobs += 1
|
51
|
+
@working += (Time.now - start).to_f
|
52
|
+
end
|
53
|
+
|
54
|
+
private
|
55
|
+
def stats
|
56
|
+
puts "Processed #{@jobs} jobs in %.4f seconds" % @working
|
57
|
+
$stdout.flush
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
class Producer
|
62
|
+
def initialize(ctx, endpoint, topic = "")
|
63
|
+
@ctx, @endpoint, @topic, @consumers = ctx, endpoint, topic, []
|
64
|
+
@socket = ctx.socket(:PUB)
|
65
|
+
# verbose output
|
66
|
+
@socket.verbose = true
|
67
|
+
@socket.bind(endpoint)
|
68
|
+
@socket.linger = 1
|
69
|
+
@interrupted = false
|
70
|
+
end
|
71
|
+
|
72
|
+
def spawn_consumers(count = 10)
|
73
|
+
count.times do
|
74
|
+
@consumers << Consumer.new(@ctx, @endpoint, @topic).start
|
75
|
+
sleep 0.01 # give each thread time to spin up
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
def start(messages = 100)
|
80
|
+
messages.to_i.times do
|
81
|
+
# Tasks are hot loops with random 0 to 100k iterations
|
82
|
+
work = "#{@topic}#{rand(100_000).to_s}"
|
83
|
+
@socket.send(work)
|
84
|
+
break if @interrupted
|
85
|
+
end
|
86
|
+
@consumers.each{|c| c.stop }
|
87
|
+
@ctx.destroy
|
88
|
+
end
|
89
|
+
|
90
|
+
def stop
|
91
|
+
@interrupted = true
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
ctx = ZMQ::Context.new
|
96
|
+
producer = Producer.new(ctx, 'inproc://example.pub_sub')
|
97
|
+
producer.spawn_consumers
|
98
|
+
trap(:INT) do
|
99
|
+
producer.stop
|
100
|
+
end
|
101
|
+
producer.start(ENV['MESSAGES'] || 1000)
|
@@ -0,0 +1,104 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
$:.unshift('.')
|
4
|
+
$:.unshift(File.expand_path(File.dirname(__FILE__)) + '/../lib')
|
5
|
+
require 'zmq'
|
6
|
+
require 'pp'
|
7
|
+
|
8
|
+
# PUSH / PULL topology that demonstrates work being distributed in round robin fashion to a pool of worker threads.
|
9
|
+
# This pattern is a good start where little operational complexity is crucial and can scale out to multiple processes
|
10
|
+
# and boxes by just swapping out the transport.
|
11
|
+
|
12
|
+
Thread.abort_on_exception = true
|
13
|
+
|
14
|
+
class Worker
|
15
|
+
attr_reader :thread
|
16
|
+
def initialize(ctx, endpoint, watermark = 1000)
|
17
|
+
@thread = nil
|
18
|
+
@connect = Proc.new do
|
19
|
+
@socket = ctx.socket(:PULL)
|
20
|
+
# verbose output
|
21
|
+
@socket.verbose = true
|
22
|
+
@socket.connect(endpoint)
|
23
|
+
# Limit the amount of work queued for this worker. When the high water mark ceiling hits, a particular worker
|
24
|
+
# is ignored during round robin distribution
|
25
|
+
@socket.hwm = watermark
|
26
|
+
@socket.linger = 0
|
27
|
+
end
|
28
|
+
@jobs, @working = 0, 0.0
|
29
|
+
end
|
30
|
+
|
31
|
+
def start
|
32
|
+
@thread = Thread.new do
|
33
|
+
@connect.call
|
34
|
+
loop do
|
35
|
+
break if Thread.current[:interrupted]
|
36
|
+
perform(@socket.recv)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
self
|
40
|
+
end
|
41
|
+
|
42
|
+
def stop
|
43
|
+
return unless @thread
|
44
|
+
@thread[:interrupted] = true
|
45
|
+
@thread.join(0.1)
|
46
|
+
stats
|
47
|
+
end
|
48
|
+
|
49
|
+
def perform(work)
|
50
|
+
# Random hot loop to simulate CPU intensive work
|
51
|
+
start = Time.now
|
52
|
+
work.to_i.times{}
|
53
|
+
@jobs += 1
|
54
|
+
@working += (Time.now - start).to_f
|
55
|
+
end
|
56
|
+
|
57
|
+
private
|
58
|
+
def stats
|
59
|
+
puts "Processed #{@jobs} jobs in %.4f seconds" % @working
|
60
|
+
$stdout.flush
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
class Master
|
65
|
+
def initialize(ctx, endpoint)
|
66
|
+
@ctx, @endpoint, @workers = ctx, endpoint, []
|
67
|
+
@socket = ctx.socket(:PUSH)
|
68
|
+
# verbose output
|
69
|
+
@socket.verbose = true
|
70
|
+
@socket.bind(endpoint)
|
71
|
+
@socket.linger = 0
|
72
|
+
@interrupted = false
|
73
|
+
end
|
74
|
+
|
75
|
+
def spawn_workers(count = 10)
|
76
|
+
count.times do
|
77
|
+
@workers << Worker.new(@ctx, @endpoint).start
|
78
|
+
sleep 0.01 # give each thread time to spin up
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
def start(messages = 100)
|
83
|
+
messages.to_i.times do
|
84
|
+
# Tasks are hot loops with random 0 to 100k iterations
|
85
|
+
work = rand(100_000).to_s
|
86
|
+
@socket.send(work)
|
87
|
+
break if @interrupted
|
88
|
+
end
|
89
|
+
@workers.each{|w| w.stop }
|
90
|
+
@ctx.destroy
|
91
|
+
end
|
92
|
+
|
93
|
+
def stop
|
94
|
+
@interrupted = true
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
ctx = ZMQ::Context.new
|
99
|
+
master = Master.new(ctx, 'inproc://example.push_pull')
|
100
|
+
master.spawn_workers
|
101
|
+
trap(:INT) do
|
102
|
+
master.stop
|
103
|
+
end
|
104
|
+
master.start(ENV['MESSAGES'] || 1000)
|
data/examples/req_rep.rb
ADDED
@@ -0,0 +1,100 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
$:.unshift('.')
|
4
|
+
$:.unshift(File.expand_path(File.dirname(__FILE__)) + '/../lib')
|
5
|
+
require 'zmq'
|
6
|
+
require 'pp'
|
7
|
+
|
8
|
+
# REQ / REP topology
|
9
|
+
|
10
|
+
Thread.abort_on_exception = true
|
11
|
+
|
12
|
+
class Server
|
13
|
+
def initialize(ctx, endpoint)
|
14
|
+
@thread = nil
|
15
|
+
@connect = Proc.new do
|
16
|
+
@socket = ctx.socket(:REP)
|
17
|
+
# verbose output
|
18
|
+
@socket.verbose = true
|
19
|
+
@socket.bind(endpoint)
|
20
|
+
@socket.linger = 1
|
21
|
+
end
|
22
|
+
@jobs, @working = 0, 0.0
|
23
|
+
end
|
24
|
+
|
25
|
+
def start
|
26
|
+
@thread = Thread.new do
|
27
|
+
@connect.call
|
28
|
+
loop do
|
29
|
+
perform(@socket.recv)
|
30
|
+
break if Thread.current[:interrupted]
|
31
|
+
end
|
32
|
+
end
|
33
|
+
self
|
34
|
+
end
|
35
|
+
|
36
|
+
def stop
|
37
|
+
return unless @thread
|
38
|
+
@thread[:interrupted] = true
|
39
|
+
@thread.join(0.1) unless @thread.stop?
|
40
|
+
stats
|
41
|
+
end
|
42
|
+
|
43
|
+
def perform(work)
|
44
|
+
# Random hot loop to simulate CPU intensive work
|
45
|
+
start = Time.now
|
46
|
+
work.to_i.times{}
|
47
|
+
@jobs += 1
|
48
|
+
@working += (Time.now - start).to_f
|
49
|
+
@socket.send "done"
|
50
|
+
end
|
51
|
+
|
52
|
+
private
|
53
|
+
def stats
|
54
|
+
puts "Processed #{@jobs} jobs in %.4f seconds" % @working
|
55
|
+
$stdout.flush
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
class Client
|
60
|
+
def initialize(ctx, endpoint)
|
61
|
+
@ctx, @endpoint, @server, @interrupted = ctx, endpoint, nil, false
|
62
|
+
@socket = ctx.socket(:REQ)
|
63
|
+
# verbose output
|
64
|
+
@socket.verbose = true
|
65
|
+
end
|
66
|
+
|
67
|
+
def spawn_server
|
68
|
+
@server = Server.new(@ctx, @endpoint).start
|
69
|
+
sleep 0.01 # give each thread time to spin up
|
70
|
+
connect
|
71
|
+
end
|
72
|
+
|
73
|
+
def start(messages = 100)
|
74
|
+
messages.to_i.times do
|
75
|
+
request = "#{@topic}#{rand(100_000).to_s}"
|
76
|
+
@socket.send(request)
|
77
|
+
response = @socket.recv
|
78
|
+
break if @interrupted
|
79
|
+
end
|
80
|
+
@server.stop
|
81
|
+
@ctx.destroy
|
82
|
+
end
|
83
|
+
|
84
|
+
def stop
|
85
|
+
@interrupted = true
|
86
|
+
end
|
87
|
+
private
|
88
|
+
def connect
|
89
|
+
@socket.connect(@endpoint)
|
90
|
+
@socket.linger = 1
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
ctx = ZMQ::Context.new
|
95
|
+
client = Client.new(ctx, 'inproc://example.req_rep')
|
96
|
+
client.spawn_server
|
97
|
+
trap(:INT) do
|
98
|
+
client.stop
|
99
|
+
end
|
100
|
+
client.start(ENV['MESSAGES'] || 1000)
|
data/ext/czmq.tar.gz
ADDED
Binary file
|
@@ -0,0 +1,280 @@
|
|
1
|
+
#include <rbczmq_ext.h>
|
2
|
+
|
3
|
+
static VALUE intern_zctx_process;
|
4
|
+
|
5
|
+
static VALUE rb_czmq_ctx_set_iothreads(VALUE context, VALUE io_threads);
|
6
|
+
|
7
|
+
static VALUE get_pid()
|
8
|
+
{
|
9
|
+
rb_secure(2);
|
10
|
+
return INT2NUM(getpid());
|
11
|
+
}
|
12
|
+
|
13
|
+
/*
|
14
|
+
* :nodoc:
|
15
|
+
* Destroy the context while the GIL is released - zctx_destroy also closes it's list of sockets and thus may block
|
16
|
+
* depending on socket linger values.
|
17
|
+
*
|
18
|
+
*/
|
19
|
+
static VALUE rb_czmq_nogvl_zctx_destroy(void *ptr)
|
20
|
+
{
|
21
|
+
errno = 0;
|
22
|
+
zmq_ctx_wrapper *ctx = ptr;
|
23
|
+
zctx_destroy(&ctx->ctx);
|
24
|
+
ctx->flags |= ZMQ_CONTEXT_DESTROYED;
|
25
|
+
return Qnil;
|
26
|
+
}
|
27
|
+
|
28
|
+
/*
|
29
|
+
* :nodoc:
|
30
|
+
* Free all resources for a context - invoked by the lower level ZMQ::Context#destroy as well as the GC callback
|
31
|
+
*
|
32
|
+
*/
|
33
|
+
static void rb_czmq_free_ctx(zmq_ctx_wrapper *ctx)
|
34
|
+
{
|
35
|
+
VALUE ctx_map;
|
36
|
+
ctx_map = rb_ivar_get(rb_mZmq, intern_zctx_process);
|
37
|
+
rb_thread_blocking_region(rb_czmq_nogvl_zctx_destroy, ctx, RUBY_UBF_IO, 0);
|
38
|
+
ctx->ctx = NULL;
|
39
|
+
rb_hash_aset(ctx_map, get_pid(), Qnil);
|
40
|
+
}
|
41
|
+
|
42
|
+
/*
|
43
|
+
* :nodoc:
|
44
|
+
* GC free callback
|
45
|
+
*
|
46
|
+
*/
|
47
|
+
static void rb_czmq_free_ctx_gc(void *ptr)
|
48
|
+
{
|
49
|
+
zmq_ctx_wrapper *ctx = (zmq_ctx_wrapper *)ptr;
|
50
|
+
if (ctx) {
|
51
|
+
if (ctx->ctx != NULL && !(ctx->flags & ZMQ_CONTEXT_DESTROYED)) rb_czmq_free_ctx(ctx);
|
52
|
+
xfree(ctx);
|
53
|
+
}
|
54
|
+
}
|
55
|
+
|
56
|
+
/*
|
57
|
+
* :nodoc:
|
58
|
+
* Creates a new context while the GIL is released.
|
59
|
+
*
|
60
|
+
*/
|
61
|
+
static VALUE rb_czmq_nogvl_zctx_new(ZMQ_UNUSED void *ptr)
|
62
|
+
{
|
63
|
+
errno = 0;
|
64
|
+
zctx_t *ctx = NULL;
|
65
|
+
ctx = zctx_new();
|
66
|
+
zctx_set_linger(ctx, 1);
|
67
|
+
return (VALUE)ctx;
|
68
|
+
}
|
69
|
+
|
70
|
+
/*
|
71
|
+
* call-seq:
|
72
|
+
* ZMQ::Context.new => ZMQ::Context
|
73
|
+
* ZMQ::Context.new(1) => ZMQ::Context
|
74
|
+
*
|
75
|
+
* Returns a handle to a new ZMQ context. A single context per process is supported in order to guarantee stability across
|
76
|
+
* all Ruby implementations. A context should be passed as an argument to any Ruby threads. Optionally a context can be
|
77
|
+
* initialized with an I/O threads value (default: 1) - there should be no need to fiddle with this.
|
78
|
+
*
|
79
|
+
* === Examples
|
80
|
+
* ZMQ::Context.new => ZMQ::Context
|
81
|
+
* ZMQ::Context.new(1) => ZMQ::Context
|
82
|
+
*
|
83
|
+
*/
|
84
|
+
|
85
|
+
static VALUE rb_czmq_ctx_s_new(int argc, VALUE *argv, VALUE context)
|
86
|
+
{
|
87
|
+
VALUE ctx_map;
|
88
|
+
VALUE io_threads;
|
89
|
+
zmq_ctx_wrapper *ctx = NULL;
|
90
|
+
rb_scan_args(argc, argv, "01", &io_threads);
|
91
|
+
ctx_map = rb_ivar_get(rb_mZmq, intern_zctx_process);
|
92
|
+
if (!NIL_P(rb_hash_aref(ctx_map, get_pid()))) rb_raise(rb_eZmqError, "single ZMQ context per process allowed");
|
93
|
+
context = Data_Make_Struct(rb_cZmqContext, zmq_ctx_wrapper, 0, rb_czmq_free_ctx_gc, ctx);
|
94
|
+
ctx->ctx = (zctx_t*)rb_thread_blocking_region(rb_czmq_nogvl_zctx_new, NULL, RUBY_UBF_IO, 0);
|
95
|
+
ZmqAssertObjOnAlloc(ctx->ctx, ctx);
|
96
|
+
ctx->flags = 0;
|
97
|
+
rb_obj_call_init(context, 0, NULL);
|
98
|
+
rb_hash_aset(ctx_map, get_pid(), context);
|
99
|
+
if (!NIL_P(io_threads)) rb_czmq_ctx_set_iothreads(context, io_threads);
|
100
|
+
return context;
|
101
|
+
}
|
102
|
+
|
103
|
+
/*
|
104
|
+
* call-seq:
|
105
|
+
* ctx.destroy => nil
|
106
|
+
*
|
107
|
+
* Destroy a ZMQ context and all sockets in it. Useful for manual memory management, otherwise the GC
|
108
|
+
* will take the same action if a context object is not reachable anymore on the next GC cycle. This is
|
109
|
+
* a lower level API.
|
110
|
+
*
|
111
|
+
* === Examples
|
112
|
+
* ctx = ZMQ::Context.new
|
113
|
+
* ctx.destroy => nil
|
114
|
+
*
|
115
|
+
*/
|
116
|
+
|
117
|
+
static VALUE rb_czmq_ctx_destroy(VALUE obj)
|
118
|
+
{
|
119
|
+
ZmqGetContext(obj);
|
120
|
+
rb_czmq_free_ctx(ctx);
|
121
|
+
return Qnil;
|
122
|
+
}
|
123
|
+
|
124
|
+
/*
|
125
|
+
* call-seq:
|
126
|
+
* ctx.iothreads = 2 => nil
|
127
|
+
*
|
128
|
+
* Raises default I/O threads from 1 - there should be no need to fiddle with this.
|
129
|
+
*
|
130
|
+
* === Examples
|
131
|
+
* ctx = ZMQ::Context.new
|
132
|
+
* ctx.iothreads = 2 => nil
|
133
|
+
*
|
134
|
+
*/
|
135
|
+
|
136
|
+
static VALUE rb_czmq_ctx_set_iothreads(VALUE obj, VALUE threads)
|
137
|
+
{
|
138
|
+
int iothreads;
|
139
|
+
errno = 0;
|
140
|
+
ZmqGetContext(obj);
|
141
|
+
Check_Type(threads, T_FIXNUM);
|
142
|
+
iothreads = FIX2INT(threads);
|
143
|
+
if (iothreads > 1) rb_warn("You probably don't want to spawn more than 1 I/O thread per ZMQ context.");
|
144
|
+
if (iothreads < 0) rb_raise(rb_eZmqError, "negative I/O threads count is not supported.");
|
145
|
+
zctx_set_iothreads(ctx->ctx, iothreads);
|
146
|
+
if (zmq_errno() == EINVAL) ZmqRaiseSysError();
|
147
|
+
return Qnil;
|
148
|
+
}
|
149
|
+
|
150
|
+
/*
|
151
|
+
* call-seq:
|
152
|
+
* ctx.linger = 100 => nil
|
153
|
+
*
|
154
|
+
* Set msecs to flush sockets when closing them. A high value may block / pause the application on socket close. This
|
155
|
+
* binding defaults to a linger value of 1 msec set for all sockets, which is important for the reactor implementation
|
156
|
+
* in ZMQ::Loop to avoid stalling the event loop.
|
157
|
+
*
|
158
|
+
* === Examples
|
159
|
+
* ctx = ZMQ::Context.new
|
160
|
+
* ctx.linger = 100 => nil
|
161
|
+
*
|
162
|
+
*/
|
163
|
+
|
164
|
+
static VALUE rb_czmq_ctx_set_linger(VALUE obj, VALUE linger)
|
165
|
+
{
|
166
|
+
errno = 0;
|
167
|
+
int msecs;
|
168
|
+
ZmqGetContext(obj);
|
169
|
+
Check_Type(linger, T_FIXNUM);
|
170
|
+
msecs = FIX2INT(linger);
|
171
|
+
if (msecs < 0) rb_raise(rb_eZmqError, "negative linger / timeout values is not supported.");
|
172
|
+
zctx_set_linger(ctx->ctx, msecs);
|
173
|
+
return Qnil;
|
174
|
+
}
|
175
|
+
|
176
|
+
/*
|
177
|
+
* :nodoc:
|
178
|
+
* Creates a new socket while the GIL is released.
|
179
|
+
*
|
180
|
+
*/
|
181
|
+
VALUE rb_czmq_nogvl_socket_new(void *ptr)
|
182
|
+
{
|
183
|
+
errno = 0;
|
184
|
+
struct nogvl_socket_args *args = ptr;
|
185
|
+
return (VALUE)zsocket_new(args->ctx, args->type);
|
186
|
+
}
|
187
|
+
|
188
|
+
/*
|
189
|
+
* :nodoc:
|
190
|
+
* Maps a Ruby class to a ZMQ socket type.
|
191
|
+
*
|
192
|
+
*/
|
193
|
+
static inline VALUE rb_czmq_ctx_socket_klass(int socket_type)
|
194
|
+
{
|
195
|
+
switch (socket_type) {
|
196
|
+
case ZMQ_PUB: return rb_cZmqPubSocket;
|
197
|
+
break;
|
198
|
+
case ZMQ_SUB: return rb_cZmqSubSocket;
|
199
|
+
break;
|
200
|
+
case ZMQ_PUSH: return rb_cZmqPushSocket;
|
201
|
+
break;
|
202
|
+
case ZMQ_PULL: return rb_cZmqPullSocket;
|
203
|
+
break;
|
204
|
+
case ZMQ_PAIR: return rb_cZmqPairSocket;
|
205
|
+
break;
|
206
|
+
case ZMQ_REQ: return rb_cZmqReqSocket;
|
207
|
+
break;
|
208
|
+
case ZMQ_REP: return rb_cZmqRepSocket;
|
209
|
+
break;
|
210
|
+
case ZMQ_ROUTER: return rb_cZmqRouterSocket;
|
211
|
+
break;
|
212
|
+
case ZMQ_DEALER: return rb_cZmqDealerSocket;
|
213
|
+
break;
|
214
|
+
default: rb_raise(rb_eZmqError, "ZMQ socket type %d not supported!", socket_type);
|
215
|
+
break;
|
216
|
+
}
|
217
|
+
}
|
218
|
+
|
219
|
+
/*
|
220
|
+
* call-seq:
|
221
|
+
* ctx.socket(:PUSH) => ZMQ::Socket
|
222
|
+
* ctx.socket(ZMQ::PUSH) => ZMQ::Socket
|
223
|
+
*
|
224
|
+
* Creates a socket within this ZMQ context. This is the only API exposed for creating sockets - they're always spawned off
|
225
|
+
* a context. Sockets also track state of the current Ruby thread they're created in to ensure they always only ever do work
|
226
|
+
* on the thread they were spawned on.
|
227
|
+
*
|
228
|
+
* === Examples
|
229
|
+
* ctx = ZMQ::Context.new
|
230
|
+
* ctx.socket(:PUSH) => ZMQ::Socket
|
231
|
+
* ctx.socket(ZMQ::PUSH) => ZMQ::Socket
|
232
|
+
*
|
233
|
+
*/
|
234
|
+
|
235
|
+
static VALUE rb_czmq_ctx_socket(VALUE obj, VALUE type)
|
236
|
+
{
|
237
|
+
VALUE socket;
|
238
|
+
int socket_type;
|
239
|
+
struct nogvl_socket_args args;
|
240
|
+
zmq_sock_wrapper *sock = NULL;
|
241
|
+
errno = 0;
|
242
|
+
ZmqGetContext(obj);
|
243
|
+
if (TYPE(type) != T_FIXNUM && TYPE(type) != T_SYMBOL) rb_raise(rb_eTypeError, "wrong socket type %s (expected Fixnum or Symbol)", RSTRING_PTR(rb_obj_as_string(type)));
|
244
|
+
socket_type = FIX2INT((SYMBOL_P(type)) ? rb_const_get_at(rb_mZmq, rb_to_id(type)) : type);
|
245
|
+
|
246
|
+
socket = Data_Make_Struct(rb_czmq_ctx_socket_klass(socket_type), zmq_sock_wrapper, rb_czmq_mark_sock, rb_czmq_free_sock_gc, sock);
|
247
|
+
args.ctx = ctx->ctx;
|
248
|
+
args.type = socket_type;
|
249
|
+
sock->socket = (void*)rb_thread_blocking_region(rb_czmq_nogvl_socket_new, (void *)&args, RUBY_UBF_IO, 0);
|
250
|
+
ZmqAssertObjOnAlloc(sock->socket, sock);
|
251
|
+
#ifndef HAVE_RB_THREAD_BLOCKING_REGION
|
252
|
+
sock->str_buffer = zlist_new();
|
253
|
+
sock->frame_buffer = zlist_new();
|
254
|
+
sock->msg_buffer = zlist_new();
|
255
|
+
#endif
|
256
|
+
sock->flags = 0;
|
257
|
+
sock->ctx = ctx->ctx;
|
258
|
+
sock->verbose = FALSE;
|
259
|
+
sock->state = ZMQ_SOCKET_PENDING;
|
260
|
+
sock->endpoint = Qnil;
|
261
|
+
sock->thread = rb_thread_current();
|
262
|
+
sock->recv_timeout = ZMQ_SOCKET_DEFAULT_TIMEOUT;
|
263
|
+
sock->send_timeout = ZMQ_SOCKET_DEFAULT_TIMEOUT;
|
264
|
+
rb_obj_call_init(socket, 0, NULL);
|
265
|
+
return socket;
|
266
|
+
}
|
267
|
+
|
268
|
+
void _init_rb_czmq_context()
|
269
|
+
{
|
270
|
+
intern_zctx_process = rb_intern("@__zmq_ctx_process");
|
271
|
+
rb_ivar_set(rb_mZmq, intern_zctx_process, rb_hash_new());
|
272
|
+
|
273
|
+
rb_cZmqContext = rb_define_class_under(rb_mZmq, "Context", rb_cObject);
|
274
|
+
|
275
|
+
rb_define_singleton_method(rb_cZmqContext, "new", rb_czmq_ctx_s_new, -1);
|
276
|
+
rb_define_method(rb_cZmqContext, "destroy", rb_czmq_ctx_destroy, 0);
|
277
|
+
rb_define_method(rb_cZmqContext, "iothreads=", rb_czmq_ctx_set_iothreads, 1);
|
278
|
+
rb_define_method(rb_cZmqContext, "linger=", rb_czmq_ctx_set_linger, 1);
|
279
|
+
rb_define_method(rb_cZmqContext, "socket", rb_czmq_ctx_socket, 1);
|
280
|
+
}
|