rbczmq 1.6.2 → 1.6.4
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +4 -3
- data/.gitmodules +6 -0
- data/.travis.yml +5 -1
- data/CHANGELOG.rdoc +15 -0
- data/Gemfile.lock +2 -2
- data/README.rdoc +5 -2
- data/Rakefile +8 -3
- data/ext/czmq/.gitignore +52 -0
- data/ext/czmq/.travis.yml +18 -0
- data/ext/czmq/AUTHORS +9 -0
- data/ext/czmq/COPYING +674 -0
- data/ext/czmq/COPYING.LESSER +178 -0
- data/ext/czmq/ChangeLog +0 -0
- data/ext/czmq/Makefile.am +22 -0
- data/ext/czmq/NEWS +263 -0
- data/ext/czmq/README +0 -0
- data/ext/czmq/README.md +1122 -0
- data/ext/czmq/README.txt +327 -0
- data/ext/czmq/autogen.sh +46 -0
- data/ext/czmq/builds/android/Android.mk +35 -0
- data/ext/czmq/builds/android/Application.mk +1 -0
- data/ext/czmq/builds/android/build.sh +59 -0
- data/ext/czmq/builds/android/clean.sh +26 -0
- data/ext/czmq/builds/mingw32/Makefile.mingw32 +38 -0
- data/ext/czmq/builds/mingw32/platform.h +0 -0
- data/ext/czmq/builds/msvc/.gitignore +18 -0
- data/ext/czmq/builds/msvc/README.txt +17 -0
- data/ext/czmq/builds/msvc/czmq.sln +69 -0
- data/ext/czmq/builds/msvc/czmq.vcproj +2246 -0
- data/ext/czmq/builds/msvc/czmq.vcxproj +329 -0
- data/ext/czmq/builds/msvc/czmq.vcxproj.filters +117 -0
- data/ext/czmq/builds/msvc/czmq11.sln +36 -0
- data/ext/czmq/builds/msvc/czmq_selftest.vcproj +840 -0
- data/ext/czmq/builds/msvc/czmq_selftest.vcxproj +189 -0
- data/ext/czmq/builds/msvc/czmq_selftest.vcxproj.filters +14 -0
- data/ext/czmq/c +520 -0
- data/ext/czmq/configure.ac +229 -0
- data/ext/czmq/doc/Makefile.am +49 -0
- data/ext/czmq/doc/asciidoc.conf +57 -0
- data/ext/czmq/doc/czmq.txt +334 -0
- data/ext/czmq/doc/mkman +100 -0
- data/ext/czmq/doc/mksite +65 -0
- data/ext/czmq/doc/wdput +43 -0
- data/ext/czmq/doc/xml2wd.pl +242 -0
- data/ext/czmq/doc/zbeacon.txt +173 -0
- data/ext/czmq/doc/zclock.txt +51 -0
- data/ext/czmq/doc/zconfig.txt +92 -0
- data/ext/czmq/doc/zctx.txt +111 -0
- data/ext/czmq/doc/zfile.txt +77 -0
- data/ext/czmq/doc/zframe.txt +222 -0
- data/ext/czmq/doc/zhash.txt +225 -0
- data/ext/czmq/doc/zlist.txt +176 -0
- data/ext/czmq/doc/zloop.txt +106 -0
- data/ext/czmq/doc/zmsg.txt +315 -0
- data/ext/czmq/doc/zmutex.txt +54 -0
- data/ext/czmq/doc/zsocket.txt +110 -0
- data/ext/czmq/doc/zsockopt.txt +528 -0
- data/ext/czmq/doc/zstr.txt +80 -0
- data/ext/czmq/doc/zsys.txt +44 -0
- data/ext/czmq/doc/zthread.txt +126 -0
- data/ext/czmq/doc/ztree.txt +236 -0
- data/ext/czmq/images/README_1.png +0 -0
- data/ext/czmq/images/README_2.png +0 -0
- data/ext/czmq/include/czmq.h +64 -0
- data/ext/czmq/include/czmq_prelude.h +504 -0
- data/ext/czmq/include/zbeacon.h +91 -0
- data/ext/czmq/include/zclock.h +56 -0
- data/ext/czmq/include/zconfig.h +117 -0
- data/ext/czmq/include/zctx.h +96 -0
- data/ext/czmq/include/zfile.h +82 -0
- data/ext/czmq/include/zframe.h +145 -0
- data/ext/czmq/include/zhash.h +127 -0
- data/ext/czmq/include/zlist.h +113 -0
- data/ext/czmq/include/zloop.h +98 -0
- data/ext/czmq/include/zmsg.h +165 -0
- data/ext/czmq/include/zmutex.h +62 -0
- data/ext/czmq/include/zsocket.h +104 -0
- data/ext/czmq/include/zsockopt.h +249 -0
- data/ext/czmq/include/zstr.h +69 -0
- data/ext/czmq/include/zsys.h +66 -0
- data/ext/czmq/include/zthread.h +62 -0
- data/ext/czmq/include/ztree.h +133 -0
- data/ext/czmq/mkdoc +14 -0
- data/ext/czmq/model/generate +2 -0
- data/ext/czmq/model/sockopts.xml +101 -0
- data/ext/czmq/notes.txt +21 -0
- data/ext/czmq/scripts/sockopts.gsl +325 -0
- data/ext/czmq/src/Makefile.am +61 -0
- data/ext/czmq/src/czmq_selftest.c +60 -0
- data/ext/czmq/src/libczmq.pc.in +11 -0
- data/ext/czmq/src/selftest +7 -0
- data/ext/czmq/src/selftest.cfg +5 -0
- data/ext/czmq/src/valgrind.supp +14 -0
- data/ext/czmq/src/vg +2 -0
- data/ext/czmq/src/zbeacon.c +787 -0
- data/ext/czmq/src/zclock.c +143 -0
- data/ext/czmq/src/zconfig.c +691 -0
- data/ext/czmq/src/zctx.c +287 -0
- data/ext/czmq/src/zfile.c +237 -0
- data/ext/czmq/src/zframe.c +551 -0
- data/ext/czmq/src/zhash.c +664 -0
- data/ext/czmq/src/zlist.c +459 -0
- data/ext/czmq/src/zloop.c +496 -0
- data/ext/czmq/src/zmsg.c +854 -0
- data/ext/czmq/src/zmutex.c +134 -0
- data/ext/czmq/src/zsocket.c +313 -0
- data/ext/czmq/src/zsockopt.c +1756 -0
- data/ext/czmq/src/zstr.c +297 -0
- data/ext/czmq/src/zsys.c +136 -0
- data/ext/czmq/src/zthread.c +269 -0
- data/ext/czmq/src/ztree.c +888 -0
- data/ext/czmq/version.sh +21 -0
- data/ext/rbczmq/extconf.rb +1 -18
- data/ext/rbczmq/poller.c +4 -1
- data/ext/rbczmq/socket.c +28 -5
- data/ext/rbczmq/socket.h +1 -0
- data/ext/zeromq/AUTHORS +110 -0
- data/ext/zeromq/CMakeLists.txt +392 -0
- data/ext/zeromq/COPYING +674 -0
- data/ext/zeromq/COPYING.LESSER +179 -0
- data/ext/zeromq/INSTALL +246 -0
- data/ext/zeromq/MAINTAINERS +56 -0
- data/ext/zeromq/Makefile.am +40 -0
- data/ext/zeromq/NEWS +333 -0
- data/ext/zeromq/README +39 -0
- data/ext/zeromq/acinclude.m4 +930 -0
- data/ext/zeromq/autogen.sh +45 -0
- data/ext/zeromq/branding.bmp +0 -0
- data/ext/zeromq/builds/msvc/Makefile.am +33 -0
- data/ext/zeromq/builds/msvc/c_local_lat/c_local_lat.vcproj +176 -0
- data/ext/zeromq/builds/msvc/c_local_lat/c_local_lat.vcxproj +87 -0
- data/ext/zeromq/builds/msvc/c_local_thr/c_local_thr.vcproj +176 -0
- data/ext/zeromq/builds/msvc/c_local_thr/c_local_thr.vcxproj +87 -0
- data/ext/zeromq/builds/msvc/c_remote_lat/c_remote_lat.vcproj +176 -0
- data/ext/zeromq/builds/msvc/c_remote_lat/c_remote_lat.vcxproj +87 -0
- data/ext/zeromq/builds/msvc/c_remote_thr/c_remote_thr.vcproj +176 -0
- data/ext/zeromq/builds/msvc/c_remote_thr/c_remote_thr.vcxproj +87 -0
- data/ext/zeromq/builds/msvc/errno.cpp +32 -0
- data/ext/zeromq/builds/msvc/errno.hpp +56 -0
- data/ext/zeromq/builds/msvc/inproc_lat/inproc_lat.vcproj +174 -0
- data/ext/zeromq/builds/msvc/inproc_lat/inproc_lat.vcxproj +86 -0
- data/ext/zeromq/builds/msvc/inproc_thr/inproc_thr.vcproj +174 -0
- data/ext/zeromq/builds/msvc/inproc_thr/inproc_thr.vcxproj +86 -0
- data/ext/zeromq/builds/msvc/libzmq/libzmq.vcproj +804 -0
- data/ext/zeromq/builds/msvc/libzmq/libzmq.vcxproj +252 -0
- data/ext/zeromq/builds/msvc/libzmq/libzmq.vcxproj.filters +431 -0
- data/ext/zeromq/builds/msvc/msvc.sln +89 -0
- data/ext/zeromq/builds/msvc/msvc10.sln +116 -0
- data/ext/zeromq/builds/msvc/platform.hpp +32 -0
- data/ext/zeromq/builds/msvc/properties/Common.props +21 -0
- data/ext/zeromq/builds/msvc/properties/Debug.props +19 -0
- data/ext/zeromq/builds/msvc/properties/Dynamic.props +20 -0
- data/ext/zeromq/builds/msvc/properties/Executable.props +19 -0
- data/ext/zeromq/builds/msvc/properties/Precompiled.props +14 -0
- data/ext/zeromq/builds/msvc/properties/Release.props +22 -0
- data/ext/zeromq/builds/msvc/properties/Win32.props +12 -0
- data/ext/zeromq/builds/msvc/properties/Win32_Release.props +17 -0
- data/ext/zeromq/builds/msvc/properties/WithOpenPGM.props +12 -0
- data/ext/zeromq/builds/msvc/properties/ZeroMQ.props +23 -0
- data/ext/zeromq/builds/msvc/properties/x64.props +12 -0
- data/ext/zeromq/builds/redhat/zeromq.spec.in +160 -0
- data/ext/zeromq/builds/valgrind/valgrind.supp +14 -0
- data/ext/zeromq/builds/valgrind/vg +1 -0
- data/ext/zeromq/cmake/Modules/TestZMQVersion.cmake +35 -0
- data/ext/zeromq/cmake/Modules/zmq_version.cpp +31 -0
- data/ext/zeromq/cmake/NSIS.template32.in +952 -0
- data/ext/zeromq/cmake/NSIS.template64.in +960 -0
- data/ext/zeromq/configure.in +428 -0
- data/ext/zeromq/doc/Makefile.am +51 -0
- data/ext/zeromq/doc/asciidoc.conf +56 -0
- data/ext/zeromq/doc/zmq.txt +233 -0
- data/ext/zeromq/doc/zmq_bind.txt +102 -0
- data/ext/zeromq/doc/zmq_close.txt +52 -0
- data/ext/zeromq/doc/zmq_connect.txt +98 -0
- data/ext/zeromq/doc/zmq_ctx_destroy.txt +66 -0
- data/ext/zeromq/doc/zmq_ctx_get.txt +67 -0
- data/ext/zeromq/doc/zmq_ctx_new.txt +49 -0
- data/ext/zeromq/doc/zmq_ctx_set.txt +75 -0
- data/ext/zeromq/doc/zmq_disconnect.txt +67 -0
- data/ext/zeromq/doc/zmq_epgm.txt +162 -0
- data/ext/zeromq/doc/zmq_errno.txt +50 -0
- data/ext/zeromq/doc/zmq_getsockopt.txt +516 -0
- data/ext/zeromq/doc/zmq_init.txt +52 -0
- data/ext/zeromq/doc/zmq_inproc.txt +85 -0
- data/ext/zeromq/doc/zmq_ipc.txt +85 -0
- data/ext/zeromq/doc/zmq_msg_close.txt +55 -0
- data/ext/zeromq/doc/zmq_msg_copy.txt +57 -0
- data/ext/zeromq/doc/zmq_msg_data.txt +48 -0
- data/ext/zeromq/doc/zmq_msg_get.txt +72 -0
- data/ext/zeromq/doc/zmq_msg_init.txt +65 -0
- data/ext/zeromq/doc/zmq_msg_init_data.txt +85 -0
- data/ext/zeromq/doc/zmq_msg_init_size.txt +58 -0
- data/ext/zeromq/doc/zmq_msg_more.txt +63 -0
- data/ext/zeromq/doc/zmq_msg_move.txt +52 -0
- data/ext/zeromq/doc/zmq_msg_recv.txt +125 -0
- data/ext/zeromq/doc/zmq_msg_send.txt +122 -0
- data/ext/zeromq/doc/zmq_msg_set.txt +45 -0
- data/ext/zeromq/doc/zmq_msg_size.txt +48 -0
- data/ext/zeromq/doc/zmq_pgm.txt +162 -0
- data/ext/zeromq/doc/zmq_poll.txt +132 -0
- data/ext/zeromq/doc/zmq_proxy.txt +97 -0
- data/ext/zeromq/doc/zmq_recv.txt +93 -0
- data/ext/zeromq/doc/zmq_recvmsg.txt +123 -0
- data/ext/zeromq/doc/zmq_send.txt +100 -0
- data/ext/zeromq/doc/zmq_sendmsg.txt +119 -0
- data/ext/zeromq/doc/zmq_setsockopt.txt +523 -0
- data/ext/zeromq/doc/zmq_socket.txt +369 -0
- data/ext/zeromq/doc/zmq_socket_monitor.txt +288 -0
- data/ext/zeromq/doc/zmq_strerror.txt +55 -0
- data/ext/zeromq/doc/zmq_tcp.txt +101 -0
- data/ext/zeromq/doc/zmq_term.txt +66 -0
- data/ext/zeromq/doc/zmq_unbind.txt +65 -0
- data/ext/zeromq/doc/zmq_version.txt +53 -0
- data/ext/zeromq/foreign/openpgm/Makefile.am +8 -0
- data/ext/zeromq/foreign/openpgm/libpgm-5.1.118~dfsg.tar.gz +0 -0
- data/ext/zeromq/include/zmq.h +402 -0
- data/ext/zeromq/include/zmq_utils.h +64 -0
- data/ext/zeromq/installer.ico +0 -0
- data/ext/zeromq/perf/Makefile.am +22 -0
- data/ext/zeromq/perf/inproc_lat.cpp +233 -0
- data/ext/zeromq/perf/inproc_thr.cpp +241 -0
- data/ext/zeromq/perf/local_lat.cpp +109 -0
- data/ext/zeromq/perf/local_thr.cpp +133 -0
- data/ext/zeromq/perf/remote_lat.cpp +122 -0
- data/ext/zeromq/perf/remote_thr.cpp +105 -0
- data/ext/zeromq/src/Makefile.am +171 -0
- data/ext/zeromq/src/address.cpp +78 -0
- data/ext/zeromq/src/address.hpp +52 -0
- data/ext/zeromq/src/array.hpp +155 -0
- data/ext/zeromq/src/atomic_counter.hpp +197 -0
- data/ext/zeromq/src/atomic_ptr.hpp +196 -0
- data/ext/zeromq/src/blob.hpp +129 -0
- data/ext/zeromq/src/clock.cpp +147 -0
- data/ext/zeromq/src/clock.hpp +60 -0
- data/ext/zeromq/src/command.hpp +154 -0
- data/ext/zeromq/src/config.hpp +89 -0
- data/ext/zeromq/src/ctx.cpp +352 -0
- data/ext/zeromq/src/ctx.hpp +173 -0
- data/ext/zeromq/src/dealer.cpp +133 -0
- data/ext/zeromq/src/dealer.hpp +92 -0
- data/ext/zeromq/src/decoder.cpp +166 -0
- data/ext/zeromq/src/decoder.hpp +248 -0
- data/ext/zeromq/src/devpoll.cpp +190 -0
- data/ext/zeromq/src/devpoll.hpp +105 -0
- data/ext/zeromq/src/dist.cpp +194 -0
- data/ext/zeromq/src/dist.hpp +105 -0
- data/ext/zeromq/src/encoder.cpp +102 -0
- data/ext/zeromq/src/encoder.hpp +200 -0
- data/ext/zeromq/src/epoll.cpp +178 -0
- data/ext/zeromq/src/epoll.hpp +101 -0
- data/ext/zeromq/src/err.cpp +291 -0
- data/ext/zeromq/src/err.hpp +155 -0
- data/ext/zeromq/src/fd.hpp +45 -0
- data/ext/zeromq/src/fq.cpp +141 -0
- data/ext/zeromq/src/fq.hpp +74 -0
- data/ext/zeromq/src/i_decoder.hpp +49 -0
- data/ext/zeromq/src/i_encoder.hpp +55 -0
- data/ext/zeromq/src/i_engine.hpp +55 -0
- data/ext/zeromq/src/i_msg_sink.hpp +43 -0
- data/ext/zeromq/src/i_msg_source.hpp +44 -0
- data/ext/zeromq/src/i_poll_events.hpp +47 -0
- data/ext/zeromq/src/io_object.cpp +108 -0
- data/ext/zeromq/src/io_object.hpp +81 -0
- data/ext/zeromq/src/io_thread.cpp +104 -0
- data/ext/zeromq/src/io_thread.hpp +91 -0
- data/ext/zeromq/src/ip.cpp +109 -0
- data/ext/zeromq/src/ip.hpp +41 -0
- data/ext/zeromq/src/ipc_address.cpp +84 -0
- data/ext/zeromq/src/ipc_address.hpp +67 -0
- data/ext/zeromq/src/ipc_connecter.cpp +265 -0
- data/ext/zeromq/src/ipc_connecter.hpp +128 -0
- data/ext/zeromq/src/ipc_listener.cpp +206 -0
- data/ext/zeromq/src/ipc_listener.hpp +99 -0
- data/ext/zeromq/src/kqueue.cpp +201 -0
- data/ext/zeromq/src/kqueue.hpp +107 -0
- data/ext/zeromq/src/lb.cpp +148 -0
- data/ext/zeromq/src/lb.hpp +73 -0
- data/ext/zeromq/src/libzmq.pc.in +10 -0
- data/ext/zeromq/src/likely.hpp +33 -0
- data/ext/zeromq/src/mailbox.cpp +87 -0
- data/ext/zeromq/src/mailbox.hpp +75 -0
- data/ext/zeromq/src/msg.cpp +299 -0
- data/ext/zeromq/src/msg.hpp +148 -0
- data/ext/zeromq/src/mtrie.cpp +428 -0
- data/ext/zeromq/src/mtrie.hpp +93 -0
- data/ext/zeromq/src/mutex.hpp +118 -0
- data/ext/zeromq/src/object.cpp +393 -0
- data/ext/zeromq/src/object.hpp +134 -0
- data/ext/zeromq/src/options.cpp +562 -0
- data/ext/zeromq/src/options.hpp +135 -0
- data/ext/zeromq/src/own.cpp +206 -0
- data/ext/zeromq/src/own.hpp +145 -0
- data/ext/zeromq/src/pair.cpp +136 -0
- data/ext/zeromq/src/pair.hpp +79 -0
- data/ext/zeromq/src/pgm_receiver.cpp +283 -0
- data/ext/zeromq/src/pgm_receiver.hpp +141 -0
- data/ext/zeromq/src/pgm_sender.cpp +218 -0
- data/ext/zeromq/src/pgm_sender.hpp +113 -0
- data/ext/zeromq/src/pgm_socket.cpp +706 -0
- data/ext/zeromq/src/pgm_socket.hpp +124 -0
- data/ext/zeromq/src/pipe.cpp +447 -0
- data/ext/zeromq/src/pipe.hpp +207 -0
- data/ext/zeromq/src/poll.cpp +176 -0
- data/ext/zeromq/src/poll.hpp +105 -0
- data/ext/zeromq/src/poller.hpp +82 -0
- data/ext/zeromq/src/poller_base.cpp +99 -0
- data/ext/zeromq/src/poller_base.hpp +86 -0
- data/ext/zeromq/src/precompiled.cpp +21 -0
- data/ext/zeromq/src/precompiled.hpp +47 -0
- data/ext/zeromq/src/proxy.cpp +150 -0
- data/ext/zeromq/src/proxy.hpp +32 -0
- data/ext/zeromq/src/pub.cpp +57 -0
- data/ext/zeromq/src/pub.hpp +69 -0
- data/ext/zeromq/src/pull.cpp +79 -0
- data/ext/zeromq/src/pull.hpp +81 -0
- data/ext/zeromq/src/push.cpp +76 -0
- data/ext/zeromq/src/push.hpp +80 -0
- data/ext/zeromq/src/random.cpp +52 -0
- data/ext/zeromq/src/random.hpp +37 -0
- data/ext/zeromq/src/reaper.cpp +117 -0
- data/ext/zeromq/src/reaper.hpp +80 -0
- data/ext/zeromq/src/rep.cpp +137 -0
- data/ext/zeromq/src/rep.hpp +80 -0
- data/ext/zeromq/src/req.cpp +185 -0
- data/ext/zeromq/src/req.hpp +91 -0
- data/ext/zeromq/src/router.cpp +364 -0
- data/ext/zeromq/src/router.hpp +138 -0
- data/ext/zeromq/src/select.cpp +216 -0
- data/ext/zeromq/src/select.hpp +126 -0
- data/ext/zeromq/src/session_base.cpp +503 -0
- data/ext/zeromq/src/session_base.hpp +156 -0
- data/ext/zeromq/src/signaler.cpp +406 -0
- data/ext/zeromq/src/signaler.hpp +63 -0
- data/ext/zeromq/src/socket_base.cpp +1236 -0
- data/ext/zeromq/src/socket_base.hpp +255 -0
- data/ext/zeromq/src/stdint.hpp +63 -0
- data/ext/zeromq/src/stream_engine.cpp +594 -0
- data/ext/zeromq/src/stream_engine.hpp +149 -0
- data/ext/zeromq/src/sub.cpp +93 -0
- data/ext/zeromq/src/sub.hpp +71 -0
- data/ext/zeromq/src/tcp.cpp +131 -0
- data/ext/zeromq/src/tcp.hpp +38 -0
- data/ext/zeromq/src/tcp_address.cpp +613 -0
- data/ext/zeromq/src/tcp_address.hpp +100 -0
- data/ext/zeromq/src/tcp_connecter.cpp +319 -0
- data/ext/zeromq/src/tcp_connecter.hpp +123 -0
- data/ext/zeromq/src/tcp_listener.cpp +293 -0
- data/ext/zeromq/src/tcp_listener.hpp +91 -0
- data/ext/zeromq/src/thread.cpp +107 -0
- data/ext/zeromq/src/thread.hpp +79 -0
- data/ext/zeromq/src/trie.cpp +337 -0
- data/ext/zeromq/src/trie.hpp +79 -0
- data/ext/zeromq/src/v1_decoder.cpp +162 -0
- data/ext/zeromq/src/v1_decoder.hpp +68 -0
- data/ext/zeromq/src/v1_encoder.cpp +103 -0
- data/ext/zeromq/src/v1_encoder.hpp +60 -0
- data/ext/zeromq/src/v1_protocol.hpp +43 -0
- data/ext/zeromq/src/version.rc.in +93 -0
- data/ext/zeromq/src/windows.hpp +181 -0
- data/ext/zeromq/src/wire.hpp +99 -0
- data/ext/zeromq/src/xpub.cpp +200 -0
- data/ext/zeromq/src/xpub.hpp +110 -0
- data/ext/zeromq/src/xsub.cpp +242 -0
- data/ext/zeromq/src/xsub.hpp +108 -0
- data/ext/zeromq/src/ypipe.hpp +210 -0
- data/ext/zeromq/src/yqueue.hpp +199 -0
- data/ext/zeromq/src/zmq.cpp +1058 -0
- data/ext/zeromq/src/zmq_utils.cpp +61 -0
- data/ext/zeromq/tests/Makefile.am +55 -0
- data/ext/zeromq/tests/test_connect_delay.cpp +260 -0
- data/ext/zeromq/tests/test_connect_resolve.cpp +54 -0
- data/ext/zeromq/tests/test_disconnect_inproc.cpp +120 -0
- data/ext/zeromq/tests/test_hwm.cpp +83 -0
- data/ext/zeromq/tests/test_invalid_rep.cpp +92 -0
- data/ext/zeromq/tests/test_last_endpoint.cpp +60 -0
- data/ext/zeromq/tests/test_monitor.cpp +289 -0
- data/ext/zeromq/tests/test_msg_flags.cpp +78 -0
- data/ext/zeromq/tests/test_pair_inproc.cpp +53 -0
- data/ext/zeromq/tests/test_pair_ipc.cpp +53 -0
- data/ext/zeromq/tests/test_pair_tcp.cpp +54 -0
- data/ext/zeromq/tests/test_reqrep_device.cpp +143 -0
- data/ext/zeromq/tests/test_reqrep_inproc.cpp +53 -0
- data/ext/zeromq/tests/test_reqrep_ipc.cpp +53 -0
- data/ext/zeromq/tests/test_reqrep_tcp.cpp +54 -0
- data/ext/zeromq/tests/test_router_mandatory.cpp +62 -0
- data/ext/zeromq/tests/test_shutdown_stress.cpp +93 -0
- data/ext/zeromq/tests/test_sub_forward.cpp +99 -0
- data/ext/zeromq/tests/test_term_endpoint.cpp +118 -0
- data/ext/zeromq/tests/test_timeo.cpp +119 -0
- data/ext/zeromq/tests/testutil.hpp +77 -0
- data/ext/zeromq/version.sh +21 -0
- data/lib/zmq/version.rb +1 -1
- data/rbczmq.gemspec +16 -3
- data/test/test_socket.rb +13 -1
- metadata +398 -9
- checksums.yaml +0 -15
- data/ext/czmq-1.4.1.tar.gz +0 -0
- data/ext/zeromq-3.2.3.tar.gz +0 -0
@@ -0,0 +1,63 @@
|
|
1
|
+
/*
|
2
|
+
Copyright (c) 2010-2011 250bpm s.r.o.
|
3
|
+
Copyright (c) 2010-2011 Other contributors as noted in the AUTHORS file
|
4
|
+
|
5
|
+
This file is part of 0MQ.
|
6
|
+
|
7
|
+
0MQ is free software; you can redistribute it and/or modify it under
|
8
|
+
the terms of the GNU Lesser General Public License as published by
|
9
|
+
the Free Software Foundation; either version 3 of the License, or
|
10
|
+
(at your option) any later version.
|
11
|
+
|
12
|
+
0MQ is distributed in the hope that it will be useful,
|
13
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
15
|
+
GNU Lesser General Public License for more details.
|
16
|
+
|
17
|
+
You should have received a copy of the GNU Lesser General Public License
|
18
|
+
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
19
|
+
*/
|
20
|
+
|
21
|
+
#ifndef __ZMQ_SIGNALER_HPP_INCLUDED__
|
22
|
+
#define __ZMQ_SIGNALER_HPP_INCLUDED__
|
23
|
+
|
24
|
+
#include "fd.hpp"
|
25
|
+
|
26
|
+
namespace zmq
|
27
|
+
{
|
28
|
+
|
29
|
+
// This is a cross-platform equivalent to signal_fd. However, as opposed
|
30
|
+
// to signal_fd there can be at most one signal in the signaler at any
|
31
|
+
// given moment. Attempt to send a signal before receiving the previous
|
32
|
+
// one will result in undefined behaviour.
|
33
|
+
|
34
|
+
class signaler_t
|
35
|
+
{
|
36
|
+
public:
|
37
|
+
|
38
|
+
signaler_t ();
|
39
|
+
~signaler_t ();
|
40
|
+
|
41
|
+
fd_t get_fd ();
|
42
|
+
void send ();
|
43
|
+
int wait (int timeout_);
|
44
|
+
void recv ();
|
45
|
+
|
46
|
+
private:
|
47
|
+
|
48
|
+
// Creates a pair of filedescriptors that will be used
|
49
|
+
// to pass the signals.
|
50
|
+
static int make_fdpair (fd_t *r_, fd_t *w_);
|
51
|
+
|
52
|
+
// Underlying write & read file descriptor.
|
53
|
+
fd_t w;
|
54
|
+
fd_t r;
|
55
|
+
|
56
|
+
// Disable copying of signaler_t object.
|
57
|
+
signaler_t (const signaler_t&);
|
58
|
+
const signaler_t &operator = (const signaler_t&);
|
59
|
+
};
|
60
|
+
|
61
|
+
}
|
62
|
+
|
63
|
+
#endif
|
@@ -0,0 +1,1236 @@
|
|
1
|
+
/*
|
2
|
+
Copyright (c) 2009-2011 250bpm s.r.o.
|
3
|
+
Copyright (c) 2007-2009 iMatix Corporation
|
4
|
+
Copyright (c) 2011 VMware, Inc.
|
5
|
+
Copyright (c) 2007-2011 Other contributors as noted in the AUTHORS file
|
6
|
+
|
7
|
+
This file is part of 0MQ.
|
8
|
+
|
9
|
+
0MQ is free software; you can redistribute it and/or modify it under
|
10
|
+
the terms of the GNU Lesser General Public License as published by
|
11
|
+
the Free Software Foundation; either version 3 of the License, or
|
12
|
+
(at your option) any later version.
|
13
|
+
|
14
|
+
0MQ is distributed in the hope that it will be useful,
|
15
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
16
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
17
|
+
GNU Lesser General Public License for more details.
|
18
|
+
|
19
|
+
You should have received a copy of the GNU Lesser General Public License
|
20
|
+
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
21
|
+
*/
|
22
|
+
|
23
|
+
#include <new>
|
24
|
+
#include <string>
|
25
|
+
#include <algorithm>
|
26
|
+
|
27
|
+
#include "platform.hpp"
|
28
|
+
|
29
|
+
#if defined ZMQ_HAVE_WINDOWS
|
30
|
+
#include "windows.hpp"
|
31
|
+
#if defined _MSC_VER
|
32
|
+
#if defined WINCE
|
33
|
+
#include <cmnintrin.h>
|
34
|
+
#else
|
35
|
+
#include <intrin.h>
|
36
|
+
#endif
|
37
|
+
#endif
|
38
|
+
#else
|
39
|
+
#include <unistd.h>
|
40
|
+
#endif
|
41
|
+
|
42
|
+
#include "socket_base.hpp"
|
43
|
+
#include "tcp_listener.hpp"
|
44
|
+
#include "ipc_listener.hpp"
|
45
|
+
#include "tcp_connecter.hpp"
|
46
|
+
#include "io_thread.hpp"
|
47
|
+
#include "session_base.hpp"
|
48
|
+
#include "config.hpp"
|
49
|
+
#include "pipe.hpp"
|
50
|
+
#include "err.hpp"
|
51
|
+
#include "ctx.hpp"
|
52
|
+
#include "platform.hpp"
|
53
|
+
#include "likely.hpp"
|
54
|
+
#include "msg.hpp"
|
55
|
+
#include "address.hpp"
|
56
|
+
#include "ipc_address.hpp"
|
57
|
+
#include "tcp_address.hpp"
|
58
|
+
#ifdef ZMQ_HAVE_OPENPGM
|
59
|
+
#include "pgm_socket.hpp"
|
60
|
+
#endif
|
61
|
+
|
62
|
+
#include "pair.hpp"
|
63
|
+
#include "pub.hpp"
|
64
|
+
#include "sub.hpp"
|
65
|
+
#include "req.hpp"
|
66
|
+
#include "rep.hpp"
|
67
|
+
#include "pull.hpp"
|
68
|
+
#include "push.hpp"
|
69
|
+
#include "dealer.hpp"
|
70
|
+
#include "router.hpp"
|
71
|
+
#include "xpub.hpp"
|
72
|
+
#include "xsub.hpp"
|
73
|
+
|
74
|
+
bool zmq::socket_base_t::check_tag ()
|
75
|
+
{
|
76
|
+
return tag == 0xbaddecaf;
|
77
|
+
}
|
78
|
+
|
79
|
+
zmq::socket_base_t *zmq::socket_base_t::create (int type_, class ctx_t *parent_,
|
80
|
+
uint32_t tid_, int sid_)
|
81
|
+
{
|
82
|
+
socket_base_t *s = NULL;
|
83
|
+
switch (type_) {
|
84
|
+
|
85
|
+
case ZMQ_PAIR:
|
86
|
+
s = new (std::nothrow) pair_t (parent_, tid_, sid_);
|
87
|
+
break;
|
88
|
+
case ZMQ_PUB:
|
89
|
+
s = new (std::nothrow) pub_t (parent_, tid_, sid_);
|
90
|
+
break;
|
91
|
+
case ZMQ_SUB:
|
92
|
+
s = new (std::nothrow) sub_t (parent_, tid_, sid_);
|
93
|
+
break;
|
94
|
+
case ZMQ_REQ:
|
95
|
+
s = new (std::nothrow) req_t (parent_, tid_, sid_);
|
96
|
+
break;
|
97
|
+
case ZMQ_REP:
|
98
|
+
s = new (std::nothrow) rep_t (parent_, tid_, sid_);
|
99
|
+
break;
|
100
|
+
case ZMQ_DEALER:
|
101
|
+
s = new (std::nothrow) dealer_t (parent_, tid_, sid_);
|
102
|
+
break;
|
103
|
+
case ZMQ_ROUTER:
|
104
|
+
s = new (std::nothrow) router_t (parent_, tid_, sid_);
|
105
|
+
break;
|
106
|
+
case ZMQ_PULL:
|
107
|
+
s = new (std::nothrow) pull_t (parent_, tid_, sid_);
|
108
|
+
break;
|
109
|
+
case ZMQ_PUSH:
|
110
|
+
s = new (std::nothrow) push_t (parent_, tid_, sid_);
|
111
|
+
break;
|
112
|
+
case ZMQ_XPUB:
|
113
|
+
s = new (std::nothrow) xpub_t (parent_, tid_, sid_);
|
114
|
+
break;
|
115
|
+
case ZMQ_XSUB:
|
116
|
+
s = new (std::nothrow) xsub_t (parent_, tid_, sid_);
|
117
|
+
break;
|
118
|
+
default:
|
119
|
+
errno = EINVAL;
|
120
|
+
return NULL;
|
121
|
+
}
|
122
|
+
alloc_assert (s);
|
123
|
+
return s;
|
124
|
+
}
|
125
|
+
|
126
|
+
zmq::socket_base_t::socket_base_t (ctx_t *parent_, uint32_t tid_, int sid_) :
|
127
|
+
own_t (parent_, tid_),
|
128
|
+
tag (0xbaddecaf),
|
129
|
+
ctx_terminated (false),
|
130
|
+
destroyed (false),
|
131
|
+
last_tsc (0),
|
132
|
+
ticks (0),
|
133
|
+
rcvmore (false),
|
134
|
+
monitor_socket (NULL),
|
135
|
+
monitor_events (0)
|
136
|
+
{
|
137
|
+
options.socket_id = sid_;
|
138
|
+
}
|
139
|
+
|
140
|
+
zmq::socket_base_t::~socket_base_t ()
|
141
|
+
{
|
142
|
+
stop_monitor ();
|
143
|
+
zmq_assert (destroyed);
|
144
|
+
}
|
145
|
+
|
146
|
+
zmq::mailbox_t *zmq::socket_base_t::get_mailbox ()
|
147
|
+
{
|
148
|
+
return &mailbox;
|
149
|
+
}
|
150
|
+
|
151
|
+
void zmq::socket_base_t::stop ()
|
152
|
+
{
|
153
|
+
// Called by ctx when it is terminated (zmq_term).
|
154
|
+
// 'stop' command is sent from the threads that called zmq_term to
|
155
|
+
// the thread owning the socket. This way, blocking call in the
|
156
|
+
// owner thread can be interrupted.
|
157
|
+
send_stop ();
|
158
|
+
}
|
159
|
+
|
160
|
+
int zmq::socket_base_t::parse_uri (const char *uri_,
|
161
|
+
std::string &protocol_, std::string &address_)
|
162
|
+
{
|
163
|
+
zmq_assert (uri_ != NULL);
|
164
|
+
|
165
|
+
std::string uri (uri_);
|
166
|
+
std::string::size_type pos = uri.find ("://");
|
167
|
+
if (pos == std::string::npos) {
|
168
|
+
errno = EINVAL;
|
169
|
+
return -1;
|
170
|
+
}
|
171
|
+
protocol_ = uri.substr (0, pos);
|
172
|
+
address_ = uri.substr (pos + 3);
|
173
|
+
|
174
|
+
if (protocol_.empty () || address_.empty ()) {
|
175
|
+
errno = EINVAL;
|
176
|
+
return -1;
|
177
|
+
}
|
178
|
+
return 0;
|
179
|
+
}
|
180
|
+
|
181
|
+
int zmq::socket_base_t::check_protocol (const std::string &protocol_)
|
182
|
+
{
|
183
|
+
// First check out whether the protcol is something we are aware of.
|
184
|
+
if (protocol_ != "inproc" && protocol_ != "ipc" && protocol_ != "tcp" &&
|
185
|
+
protocol_ != "pgm" && protocol_ != "epgm") {
|
186
|
+
errno = EPROTONOSUPPORT;
|
187
|
+
return -1;
|
188
|
+
}
|
189
|
+
|
190
|
+
// If 0MQ is not compiled with OpenPGM, pgm and epgm transports
|
191
|
+
// are not avaialble.
|
192
|
+
#if !defined ZMQ_HAVE_OPENPGM
|
193
|
+
if (protocol_ == "pgm" || protocol_ == "epgm") {
|
194
|
+
errno = EPROTONOSUPPORT;
|
195
|
+
return -1;
|
196
|
+
}
|
197
|
+
#endif
|
198
|
+
|
199
|
+
// IPC transport is not available on Windows and OpenVMS.
|
200
|
+
#if defined ZMQ_HAVE_WINDOWS || defined ZMQ_HAVE_OPENVMS
|
201
|
+
if (protocol_ == "ipc") {
|
202
|
+
// Unknown protocol.
|
203
|
+
errno = EPROTONOSUPPORT;
|
204
|
+
return -1;
|
205
|
+
}
|
206
|
+
#endif
|
207
|
+
|
208
|
+
// Check whether socket type and transport protocol match.
|
209
|
+
// Specifically, multicast protocols can't be combined with
|
210
|
+
// bi-directional messaging patterns (socket types).
|
211
|
+
if ((protocol_ == "pgm" || protocol_ == "epgm") &&
|
212
|
+
options.type != ZMQ_PUB && options.type != ZMQ_SUB &&
|
213
|
+
options.type != ZMQ_XPUB && options.type != ZMQ_XSUB) {
|
214
|
+
errno = ENOCOMPATPROTO;
|
215
|
+
return -1;
|
216
|
+
}
|
217
|
+
|
218
|
+
// Protocol is available.
|
219
|
+
return 0;
|
220
|
+
}
|
221
|
+
|
222
|
+
void zmq::socket_base_t::attach_pipe (pipe_t *pipe_, bool icanhasall_)
|
223
|
+
{
|
224
|
+
// First, register the pipe so that we can terminate it later on.
|
225
|
+
pipe_->set_event_sink (this);
|
226
|
+
pipes.push_back (pipe_);
|
227
|
+
|
228
|
+
// Let the derived socket type know about new pipe.
|
229
|
+
xattach_pipe (pipe_, icanhasall_);
|
230
|
+
|
231
|
+
// If the socket is already being closed, ask any new pipes to terminate
|
232
|
+
// straight away.
|
233
|
+
if (is_terminating ()) {
|
234
|
+
register_term_acks (1);
|
235
|
+
pipe_->terminate (false);
|
236
|
+
}
|
237
|
+
}
|
238
|
+
|
239
|
+
int zmq::socket_base_t::setsockopt (int option_, const void *optval_,
|
240
|
+
size_t optvallen_)
|
241
|
+
{
|
242
|
+
if (unlikely (ctx_terminated)) {
|
243
|
+
errno = ETERM;
|
244
|
+
return -1;
|
245
|
+
}
|
246
|
+
|
247
|
+
// First, check whether specific socket type overloads the option.
|
248
|
+
int rc = xsetsockopt (option_, optval_, optvallen_);
|
249
|
+
if (rc == 0 || errno != EINVAL)
|
250
|
+
return rc;
|
251
|
+
|
252
|
+
// If the socket type doesn't support the option, pass it to
|
253
|
+
// the generic option parser.
|
254
|
+
return options.setsockopt (option_, optval_, optvallen_);
|
255
|
+
}
|
256
|
+
|
257
|
+
int zmq::socket_base_t::getsockopt (int option_, void *optval_,
|
258
|
+
size_t *optvallen_)
|
259
|
+
{
|
260
|
+
if (unlikely (ctx_terminated)) {
|
261
|
+
errno = ETERM;
|
262
|
+
return -1;
|
263
|
+
}
|
264
|
+
|
265
|
+
if (option_ == ZMQ_RCVMORE) {
|
266
|
+
if (*optvallen_ < sizeof (int)) {
|
267
|
+
errno = EINVAL;
|
268
|
+
return -1;
|
269
|
+
}
|
270
|
+
*((int*) optval_) = rcvmore ? 1 : 0;
|
271
|
+
*optvallen_ = sizeof (int);
|
272
|
+
return 0;
|
273
|
+
}
|
274
|
+
|
275
|
+
if (option_ == ZMQ_FD) {
|
276
|
+
if (*optvallen_ < sizeof (fd_t)) {
|
277
|
+
errno = EINVAL;
|
278
|
+
return -1;
|
279
|
+
}
|
280
|
+
*((fd_t*) optval_) = mailbox.get_fd ();
|
281
|
+
*optvallen_ = sizeof (fd_t);
|
282
|
+
return 0;
|
283
|
+
}
|
284
|
+
|
285
|
+
if (option_ == ZMQ_EVENTS) {
|
286
|
+
if (*optvallen_ < sizeof (int)) {
|
287
|
+
errno = EINVAL;
|
288
|
+
return -1;
|
289
|
+
}
|
290
|
+
int rc = process_commands (0, false);
|
291
|
+
if (rc != 0 && (errno == EINTR || errno == ETERM))
|
292
|
+
return -1;
|
293
|
+
errno_assert (rc == 0);
|
294
|
+
*((int*) optval_) = 0;
|
295
|
+
if (has_out ())
|
296
|
+
*((int*) optval_) |= ZMQ_POLLOUT;
|
297
|
+
if (has_in ())
|
298
|
+
*((int*) optval_) |= ZMQ_POLLIN;
|
299
|
+
*optvallen_ = sizeof (int);
|
300
|
+
return 0;
|
301
|
+
}
|
302
|
+
|
303
|
+
return options.getsockopt (option_, optval_, optvallen_);
|
304
|
+
}
|
305
|
+
|
306
|
+
int zmq::socket_base_t::bind (const char *addr_)
|
307
|
+
{
|
308
|
+
if (unlikely (ctx_terminated)) {
|
309
|
+
errno = ETERM;
|
310
|
+
return -1;
|
311
|
+
}
|
312
|
+
|
313
|
+
// Process pending commands, if any.
|
314
|
+
int rc = process_commands (0, false);
|
315
|
+
if (unlikely (rc != 0))
|
316
|
+
return -1;
|
317
|
+
|
318
|
+
// Parse addr_ string.
|
319
|
+
std::string protocol;
|
320
|
+
std::string address;
|
321
|
+
rc = parse_uri (addr_, protocol, address);
|
322
|
+
if (rc != 0)
|
323
|
+
return -1;
|
324
|
+
|
325
|
+
rc = check_protocol (protocol);
|
326
|
+
if (rc != 0)
|
327
|
+
return -1;
|
328
|
+
|
329
|
+
if (protocol == "inproc") {
|
330
|
+
endpoint_t endpoint = {this, options};
|
331
|
+
int rc = register_endpoint (addr_, endpoint);
|
332
|
+
if (rc == 0) {
|
333
|
+
// Save last endpoint URI
|
334
|
+
options.last_endpoint.assign (addr_);
|
335
|
+
}
|
336
|
+
return rc;
|
337
|
+
}
|
338
|
+
|
339
|
+
if (protocol == "pgm" || protocol == "epgm") {
|
340
|
+
// For convenience's sake, bind can be used interchageable with
|
341
|
+
// connect for PGM and EPGM transports.
|
342
|
+
return connect (addr_);
|
343
|
+
}
|
344
|
+
|
345
|
+
// Remaining trasnports require to be run in an I/O thread, so at this
|
346
|
+
// point we'll choose one.
|
347
|
+
io_thread_t *io_thread = choose_io_thread (options.affinity);
|
348
|
+
if (!io_thread) {
|
349
|
+
errno = EMTHREAD;
|
350
|
+
return -1;
|
351
|
+
}
|
352
|
+
|
353
|
+
if (protocol == "tcp") {
|
354
|
+
tcp_listener_t *listener = new (std::nothrow) tcp_listener_t (
|
355
|
+
io_thread, this, options);
|
356
|
+
alloc_assert (listener);
|
357
|
+
int rc = listener->set_address (address.c_str ());
|
358
|
+
if (rc != 0) {
|
359
|
+
delete listener;
|
360
|
+
event_bind_failed (address, zmq_errno());
|
361
|
+
return -1;
|
362
|
+
}
|
363
|
+
|
364
|
+
// Save last endpoint URI
|
365
|
+
listener->get_address (options.last_endpoint);
|
366
|
+
|
367
|
+
add_endpoint (addr_, (own_t *) listener);
|
368
|
+
return 0;
|
369
|
+
}
|
370
|
+
|
371
|
+
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
|
372
|
+
if (protocol == "ipc") {
|
373
|
+
ipc_listener_t *listener = new (std::nothrow) ipc_listener_t (
|
374
|
+
io_thread, this, options);
|
375
|
+
alloc_assert (listener);
|
376
|
+
int rc = listener->set_address (address.c_str ());
|
377
|
+
if (rc != 0) {
|
378
|
+
delete listener;
|
379
|
+
event_bind_failed (address, zmq_errno());
|
380
|
+
return -1;
|
381
|
+
}
|
382
|
+
|
383
|
+
// Save last endpoint URI
|
384
|
+
listener->get_address (options.last_endpoint);
|
385
|
+
|
386
|
+
add_endpoint (addr_, (own_t *) listener);
|
387
|
+
return 0;
|
388
|
+
}
|
389
|
+
#endif
|
390
|
+
|
391
|
+
zmq_assert (false);
|
392
|
+
return -1;
|
393
|
+
}
|
394
|
+
|
395
|
+
int zmq::socket_base_t::connect (const char *addr_)
|
396
|
+
{
|
397
|
+
if (unlikely (ctx_terminated)) {
|
398
|
+
errno = ETERM;
|
399
|
+
return -1;
|
400
|
+
}
|
401
|
+
|
402
|
+
// Process pending commands, if any.
|
403
|
+
int rc = process_commands (0, false);
|
404
|
+
if (unlikely (rc != 0))
|
405
|
+
return -1;
|
406
|
+
|
407
|
+
// Parse addr_ string.
|
408
|
+
std::string protocol;
|
409
|
+
std::string address;
|
410
|
+
rc = parse_uri (addr_, protocol, address);
|
411
|
+
if (rc != 0)
|
412
|
+
return -1;
|
413
|
+
|
414
|
+
rc = check_protocol (protocol);
|
415
|
+
if (rc != 0)
|
416
|
+
return -1;
|
417
|
+
|
418
|
+
if (protocol == "inproc") {
|
419
|
+
|
420
|
+
// TODO: inproc connect is specific with respect to creating pipes
|
421
|
+
// as there's no 'reconnect' functionality implemented. Once that
|
422
|
+
// is in place we should follow generic pipe creation algorithm.
|
423
|
+
|
424
|
+
// Find the peer endpoint.
|
425
|
+
endpoint_t peer = find_endpoint (addr_);
|
426
|
+
if (!peer.socket)
|
427
|
+
return -1;
|
428
|
+
|
429
|
+
// The total HWM for an inproc connection should be the sum of
|
430
|
+
// the binder's HWM and the connector's HWM.
|
431
|
+
int sndhwm = 0;
|
432
|
+
if (options.sndhwm != 0 && peer.options.rcvhwm != 0)
|
433
|
+
sndhwm = options.sndhwm + peer.options.rcvhwm;
|
434
|
+
int rcvhwm = 0;
|
435
|
+
if (options.rcvhwm != 0 && peer.options.sndhwm != 0)
|
436
|
+
rcvhwm = options.rcvhwm + peer.options.sndhwm;
|
437
|
+
|
438
|
+
// Create a bi-directional pipe to connect the peers.
|
439
|
+
object_t *parents [2] = {this, peer.socket};
|
440
|
+
pipe_t *pipes [2] = {NULL, NULL};
|
441
|
+
int hwms [2] = {sndhwm, rcvhwm};
|
442
|
+
bool delays [2] = {options.delay_on_disconnect, options.delay_on_close};
|
443
|
+
int rc = pipepair (parents, pipes, hwms, delays);
|
444
|
+
errno_assert (rc == 0);
|
445
|
+
|
446
|
+
// Attach local end of the pipe to this socket object.
|
447
|
+
attach_pipe (pipes [0]);
|
448
|
+
|
449
|
+
// If required, send the identity of the local socket to the peer.
|
450
|
+
if (peer.options.recv_identity) {
|
451
|
+
msg_t id;
|
452
|
+
rc = id.init_size (options.identity_size);
|
453
|
+
errno_assert (rc == 0);
|
454
|
+
memcpy (id.data (), options.identity, options.identity_size);
|
455
|
+
id.set_flags (msg_t::identity);
|
456
|
+
bool written = pipes [0]->write (&id);
|
457
|
+
zmq_assert (written);
|
458
|
+
pipes [0]->flush ();
|
459
|
+
}
|
460
|
+
|
461
|
+
// If required, send the identity of the peer to the local socket.
|
462
|
+
if (options.recv_identity) {
|
463
|
+
msg_t id;
|
464
|
+
rc = id.init_size (peer.options.identity_size);
|
465
|
+
errno_assert (rc == 0);
|
466
|
+
memcpy (id.data (), peer.options.identity, peer.options.identity_size);
|
467
|
+
id.set_flags (msg_t::identity);
|
468
|
+
bool written = pipes [1]->write (&id);
|
469
|
+
zmq_assert (written);
|
470
|
+
pipes [1]->flush ();
|
471
|
+
}
|
472
|
+
|
473
|
+
// Attach remote end of the pipe to the peer socket. Note that peer's
|
474
|
+
// seqnum was incremented in find_endpoint function. We don't need it
|
475
|
+
// increased here.
|
476
|
+
send_bind (peer.socket, pipes [1], false);
|
477
|
+
|
478
|
+
// Save last endpoint URI
|
479
|
+
options.last_endpoint.assign (addr_);
|
480
|
+
|
481
|
+
// remember inproc connections for disconnect
|
482
|
+
inprocs.insert (inprocs_t::value_type (std::string (addr_), pipes[0]));
|
483
|
+
|
484
|
+
return 0;
|
485
|
+
}
|
486
|
+
|
487
|
+
// Choose the I/O thread to run the session in.
|
488
|
+
io_thread_t *io_thread = choose_io_thread (options.affinity);
|
489
|
+
if (!io_thread) {
|
490
|
+
errno = EMTHREAD;
|
491
|
+
return -1;
|
492
|
+
}
|
493
|
+
|
494
|
+
address_t *paddr = new (std::nothrow) address_t (protocol, address);
|
495
|
+
alloc_assert (paddr);
|
496
|
+
|
497
|
+
// Resolve address (if needed by the protocol)
|
498
|
+
if (protocol == "tcp") {
|
499
|
+
paddr->resolved.tcp_addr = new (std::nothrow) tcp_address_t ();
|
500
|
+
alloc_assert (paddr->resolved.tcp_addr);
|
501
|
+
int rc = paddr->resolved.tcp_addr->resolve (
|
502
|
+
address.c_str (), false, options.ipv4only ? true : false);
|
503
|
+
if (rc != 0) {
|
504
|
+
delete paddr;
|
505
|
+
return -1;
|
506
|
+
}
|
507
|
+
}
|
508
|
+
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
|
509
|
+
else
|
510
|
+
if (protocol == "ipc") {
|
511
|
+
paddr->resolved.ipc_addr = new (std::nothrow) ipc_address_t ();
|
512
|
+
alloc_assert (paddr->resolved.ipc_addr);
|
513
|
+
int rc = paddr->resolved.ipc_addr->resolve (address.c_str ());
|
514
|
+
if (rc != 0) {
|
515
|
+
delete paddr;
|
516
|
+
return -1;
|
517
|
+
}
|
518
|
+
}
|
519
|
+
#endif
|
520
|
+
#ifdef ZMQ_HAVE_OPENPGM
|
521
|
+
if (protocol == "pgm" || protocol == "epgm") {
|
522
|
+
struct pgm_addrinfo_t *res = NULL;
|
523
|
+
uint16_t port_number = 0;
|
524
|
+
int rc = pgm_socket_t::init_address(address.c_str(), &res, &port_number);
|
525
|
+
if (res != NULL)
|
526
|
+
pgm_freeaddrinfo (res);
|
527
|
+
if (rc != 0 || port_number == 0)
|
528
|
+
return -1;
|
529
|
+
}
|
530
|
+
#endif
|
531
|
+
// Create session.
|
532
|
+
session_base_t *session = session_base_t::create (io_thread, true, this,
|
533
|
+
options, paddr);
|
534
|
+
errno_assert (session);
|
535
|
+
|
536
|
+
// PGM does not support subscription forwarding; ask for all data to be
|
537
|
+
// sent to this pipe.
|
538
|
+
bool icanhasall = protocol == "pgm" || protocol == "epgm";
|
539
|
+
|
540
|
+
if (options.delay_attach_on_connect != 1 || icanhasall) {
|
541
|
+
// Create a bi-directional pipe.
|
542
|
+
object_t *parents [2] = {this, session};
|
543
|
+
pipe_t *pipes [2] = {NULL, NULL};
|
544
|
+
int hwms [2] = {options.sndhwm, options.rcvhwm};
|
545
|
+
bool delays [2] = {options.delay_on_disconnect, options.delay_on_close};
|
546
|
+
rc = pipepair (parents, pipes, hwms, delays);
|
547
|
+
errno_assert (rc == 0);
|
548
|
+
|
549
|
+
// Attach local end of the pipe to the socket object.
|
550
|
+
attach_pipe (pipes [0], icanhasall);
|
551
|
+
|
552
|
+
// Attach remote end of the pipe to the session object later on.
|
553
|
+
session->attach_pipe (pipes [1]);
|
554
|
+
}
|
555
|
+
|
556
|
+
// Save last endpoint URI
|
557
|
+
paddr->to_string (options.last_endpoint);
|
558
|
+
|
559
|
+
add_endpoint (addr_, (own_t *) session);
|
560
|
+
return 0;
|
561
|
+
}
|
562
|
+
|
563
|
+
void zmq::socket_base_t::add_endpoint (const char *addr_, own_t *endpoint_)
|
564
|
+
{
|
565
|
+
// Activate the session. Make it a child of this socket.
|
566
|
+
launch_child (endpoint_);
|
567
|
+
endpoints.insert (endpoints_t::value_type (std::string (addr_), endpoint_));
|
568
|
+
}
|
569
|
+
|
570
|
+
int zmq::socket_base_t::term_endpoint (const char *addr_)
|
571
|
+
{
|
572
|
+
// Check whether the library haven't been shut down yet.
|
573
|
+
if (unlikely (ctx_terminated)) {
|
574
|
+
errno = ETERM;
|
575
|
+
return -1;
|
576
|
+
}
|
577
|
+
|
578
|
+
// Check whether endpoint address passed to the function is valid.
|
579
|
+
if (unlikely (!addr_)) {
|
580
|
+
errno = EINVAL;
|
581
|
+
return -1;
|
582
|
+
}
|
583
|
+
|
584
|
+
// Process pending commands, if any, since there could be pending unprocessed process_own()'s
|
585
|
+
// (from launch_child() for example) we're asked to terminate now.
|
586
|
+
int rc = process_commands (0, false);
|
587
|
+
if (unlikely (rc != 0))
|
588
|
+
return -1;
|
589
|
+
|
590
|
+
// Parse addr_ string.
|
591
|
+
std::string protocol;
|
592
|
+
std::string address;
|
593
|
+
rc = parse_uri (addr_, protocol, address);
|
594
|
+
if (rc != 0)
|
595
|
+
return -1;
|
596
|
+
|
597
|
+
rc = check_protocol (protocol);
|
598
|
+
if (rc != 0)
|
599
|
+
return -1;
|
600
|
+
|
601
|
+
// Disconnect an inproc socket
|
602
|
+
if (protocol == "inproc") {
|
603
|
+
std::pair <inprocs_t::iterator, inprocs_t::iterator> range = inprocs.equal_range (std::string (addr_));
|
604
|
+
if (range.first == range.second) {
|
605
|
+
errno = ENOENT;
|
606
|
+
return -1;
|
607
|
+
}
|
608
|
+
|
609
|
+
for (inprocs_t::iterator it = range.first; it != range.second; ++it)
|
610
|
+
it->second->terminate(true);
|
611
|
+
inprocs.erase (range.first, range.second);
|
612
|
+
return 0;
|
613
|
+
}
|
614
|
+
|
615
|
+
|
616
|
+
// Find the endpoints range (if any) corresponding to the addr_ string.
|
617
|
+
std::pair <endpoints_t::iterator, endpoints_t::iterator> range = endpoints.equal_range (std::string (addr_));
|
618
|
+
if (range.first == range.second) {
|
619
|
+
errno = ENOENT;
|
620
|
+
return -1;
|
621
|
+
}
|
622
|
+
|
623
|
+
for (endpoints_t::iterator it = range.first; it != range.second; ++it)
|
624
|
+
term_child (it->second);
|
625
|
+
endpoints.erase (range.first, range.second);
|
626
|
+
return 0;
|
627
|
+
}
|
628
|
+
|
629
|
+
int zmq::socket_base_t::send (msg_t *msg_, int flags_)
|
630
|
+
{
|
631
|
+
// Check whether the library haven't been shut down yet.
|
632
|
+
if (unlikely (ctx_terminated)) {
|
633
|
+
errno = ETERM;
|
634
|
+
return -1;
|
635
|
+
}
|
636
|
+
|
637
|
+
// Check whether message passed to the function is valid.
|
638
|
+
if (unlikely (!msg_ || !msg_->check ())) {
|
639
|
+
errno = EFAULT;
|
640
|
+
return -1;
|
641
|
+
}
|
642
|
+
|
643
|
+
// Process pending commands, if any.
|
644
|
+
int rc = process_commands (0, true);
|
645
|
+
if (unlikely (rc != 0))
|
646
|
+
return -1;
|
647
|
+
|
648
|
+
// Clear any user-visible flags that are set on the message.
|
649
|
+
msg_->reset_flags (msg_t::more);
|
650
|
+
|
651
|
+
// At this point we impose the flags on the message.
|
652
|
+
if (flags_ & ZMQ_SNDMORE)
|
653
|
+
msg_->set_flags (msg_t::more);
|
654
|
+
|
655
|
+
// Try to send the message.
|
656
|
+
rc = xsend (msg_, flags_);
|
657
|
+
if (rc == 0)
|
658
|
+
return 0;
|
659
|
+
if (unlikely (errno != EAGAIN))
|
660
|
+
return -1;
|
661
|
+
|
662
|
+
// In case of non-blocking send we'll simply propagate
|
663
|
+
// the error - including EAGAIN - up the stack.
|
664
|
+
if (flags_ & ZMQ_DONTWAIT || options.sndtimeo == 0)
|
665
|
+
return -1;
|
666
|
+
|
667
|
+
// Compute the time when the timeout should occur.
|
668
|
+
// If the timeout is infite, don't care.
|
669
|
+
int timeout = options.sndtimeo;
|
670
|
+
uint64_t end = timeout < 0 ? 0 : (clock.now_ms () + timeout);
|
671
|
+
|
672
|
+
// Oops, we couldn't send the message. Wait for the next
|
673
|
+
// command, process it and try to send the message again.
|
674
|
+
// If timeout is reached in the meantime, return EAGAIN.
|
675
|
+
while (true) {
|
676
|
+
if (unlikely (process_commands (timeout, false) != 0))
|
677
|
+
return -1;
|
678
|
+
rc = xsend (msg_, flags_);
|
679
|
+
if (rc == 0)
|
680
|
+
break;
|
681
|
+
if (unlikely (errno != EAGAIN))
|
682
|
+
return -1;
|
683
|
+
if (timeout > 0) {
|
684
|
+
timeout = (int) (end - clock.now_ms ());
|
685
|
+
if (timeout <= 0) {
|
686
|
+
errno = EAGAIN;
|
687
|
+
return -1;
|
688
|
+
}
|
689
|
+
}
|
690
|
+
}
|
691
|
+
return 0;
|
692
|
+
}
|
693
|
+
|
694
|
+
int zmq::socket_base_t::recv (msg_t *msg_, int flags_)
|
695
|
+
{
|
696
|
+
// Check whether the library haven't been shut down yet.
|
697
|
+
if (unlikely (ctx_terminated)) {
|
698
|
+
errno = ETERM;
|
699
|
+
return -1;
|
700
|
+
}
|
701
|
+
|
702
|
+
// Check whether message passed to the function is valid.
|
703
|
+
if (unlikely (!msg_ || !msg_->check ())) {
|
704
|
+
errno = EFAULT;
|
705
|
+
return -1;
|
706
|
+
}
|
707
|
+
|
708
|
+
// Once every inbound_poll_rate messages check for signals and process
|
709
|
+
// incoming commands. This happens only if we are not polling altogether
|
710
|
+
// because there are messages available all the time. If poll occurs,
|
711
|
+
// ticks is set to zero and thus we avoid this code.
|
712
|
+
//
|
713
|
+
// Note that 'recv' uses different command throttling algorithm (the one
|
714
|
+
// described above) from the one used by 'send'. This is because counting
|
715
|
+
// ticks is more efficient than doing RDTSC all the time.
|
716
|
+
if (++ticks == inbound_poll_rate) {
|
717
|
+
if (unlikely (process_commands (0, false) != 0))
|
718
|
+
return -1;
|
719
|
+
ticks = 0;
|
720
|
+
}
|
721
|
+
|
722
|
+
// Get the message.
|
723
|
+
int rc = xrecv (msg_, flags_);
|
724
|
+
if (unlikely (rc != 0 && errno != EAGAIN))
|
725
|
+
return -1;
|
726
|
+
|
727
|
+
// If we have the message, return immediately.
|
728
|
+
if (rc == 0) {
|
729
|
+
extract_flags (msg_);
|
730
|
+
return 0;
|
731
|
+
}
|
732
|
+
|
733
|
+
// If the message cannot be fetched immediately, there are two scenarios.
|
734
|
+
// For non-blocking recv, commands are processed in case there's an
|
735
|
+
// activate_reader command already waiting int a command pipe.
|
736
|
+
// If it's not, return EAGAIN.
|
737
|
+
if (flags_ & ZMQ_DONTWAIT || options.rcvtimeo == 0) {
|
738
|
+
if (unlikely (process_commands (0, false) != 0))
|
739
|
+
return -1;
|
740
|
+
ticks = 0;
|
741
|
+
|
742
|
+
rc = xrecv (msg_, flags_);
|
743
|
+
if (rc < 0)
|
744
|
+
return rc;
|
745
|
+
extract_flags (msg_);
|
746
|
+
return 0;
|
747
|
+
}
|
748
|
+
|
749
|
+
// Compute the time when the timeout should occur.
|
750
|
+
// If the timeout is infite, don't care.
|
751
|
+
int timeout = options.rcvtimeo;
|
752
|
+
uint64_t end = timeout < 0 ? 0 : (clock.now_ms () + timeout);
|
753
|
+
|
754
|
+
// In blocking scenario, commands are processed over and over again until
|
755
|
+
// we are able to fetch a message.
|
756
|
+
bool block = (ticks != 0);
|
757
|
+
while (true) {
|
758
|
+
if (unlikely (process_commands (block ? timeout : 0, false) != 0))
|
759
|
+
return -1;
|
760
|
+
rc = xrecv (msg_, flags_);
|
761
|
+
if (rc == 0) {
|
762
|
+
ticks = 0;
|
763
|
+
break;
|
764
|
+
}
|
765
|
+
if (unlikely (errno != EAGAIN))
|
766
|
+
return -1;
|
767
|
+
block = true;
|
768
|
+
if (timeout > 0) {
|
769
|
+
timeout = (int) (end - clock.now_ms ());
|
770
|
+
if (timeout <= 0) {
|
771
|
+
errno = EAGAIN;
|
772
|
+
return -1;
|
773
|
+
}
|
774
|
+
}
|
775
|
+
}
|
776
|
+
|
777
|
+
extract_flags (msg_);
|
778
|
+
return 0;
|
779
|
+
}
|
780
|
+
|
781
|
+
int zmq::socket_base_t::close ()
|
782
|
+
{
|
783
|
+
// Mark the socket as dead
|
784
|
+
tag = 0xdeadbeef;
|
785
|
+
|
786
|
+
// Transfer the ownership of the socket from this application thread
|
787
|
+
// to the reaper thread which will take care of the rest of shutdown
|
788
|
+
// process.
|
789
|
+
send_reap (this);
|
790
|
+
|
791
|
+
return 0;
|
792
|
+
}
|
793
|
+
|
794
|
+
bool zmq::socket_base_t::has_in ()
|
795
|
+
{
|
796
|
+
return xhas_in ();
|
797
|
+
}
|
798
|
+
|
799
|
+
bool zmq::socket_base_t::has_out ()
|
800
|
+
{
|
801
|
+
return xhas_out ();
|
802
|
+
}
|
803
|
+
|
804
|
+
void zmq::socket_base_t::start_reaping (poller_t *poller_)
|
805
|
+
{
|
806
|
+
// Plug the socket to the reaper thread.
|
807
|
+
poller = poller_;
|
808
|
+
handle = poller->add_fd (mailbox.get_fd (), this);
|
809
|
+
poller->set_pollin (handle);
|
810
|
+
|
811
|
+
// Initialise the termination and check whether it can be deallocated
|
812
|
+
// immediately.
|
813
|
+
terminate ();
|
814
|
+
check_destroy ();
|
815
|
+
}
|
816
|
+
|
817
|
+
int zmq::socket_base_t::process_commands (int timeout_, bool throttle_)
|
818
|
+
{
|
819
|
+
int rc;
|
820
|
+
command_t cmd;
|
821
|
+
if (timeout_ != 0) {
|
822
|
+
|
823
|
+
// If we are asked to wait, simply ask mailbox to wait.
|
824
|
+
rc = mailbox.recv (&cmd, timeout_);
|
825
|
+
}
|
826
|
+
else {
|
827
|
+
|
828
|
+
// If we are asked not to wait, check whether we haven't processed
|
829
|
+
// commands recently, so that we can throttle the new commands.
|
830
|
+
|
831
|
+
// Get the CPU's tick counter. If 0, the counter is not available.
|
832
|
+
uint64_t tsc = zmq::clock_t::rdtsc ();
|
833
|
+
|
834
|
+
// Optimised version of command processing - it doesn't have to check
|
835
|
+
// for incoming commands each time. It does so only if certain time
|
836
|
+
// elapsed since last command processing. Command delay varies
|
837
|
+
// depending on CPU speed: It's ~1ms on 3GHz CPU, ~2ms on 1.5GHz CPU
|
838
|
+
// etc. The optimisation makes sense only on platforms where getting
|
839
|
+
// a timestamp is a very cheap operation (tens of nanoseconds).
|
840
|
+
if (tsc && throttle_) {
|
841
|
+
|
842
|
+
// Check whether TSC haven't jumped backwards (in case of migration
|
843
|
+
// between CPU cores) and whether certain time have elapsed since
|
844
|
+
// last command processing. If it didn't do nothing.
|
845
|
+
if (tsc >= last_tsc && tsc - last_tsc <= max_command_delay)
|
846
|
+
return 0;
|
847
|
+
last_tsc = tsc;
|
848
|
+
}
|
849
|
+
|
850
|
+
// Check whether there are any commands pending for this thread.
|
851
|
+
rc = mailbox.recv (&cmd, 0);
|
852
|
+
}
|
853
|
+
|
854
|
+
// Process all available commands.
|
855
|
+
while (rc == 0) {
|
856
|
+
cmd.destination->process_command (cmd);
|
857
|
+
rc = mailbox.recv (&cmd, 0);
|
858
|
+
}
|
859
|
+
|
860
|
+
if (errno == EINTR)
|
861
|
+
return -1;
|
862
|
+
|
863
|
+
zmq_assert (errno == EAGAIN);
|
864
|
+
|
865
|
+
if (ctx_terminated) {
|
866
|
+
errno = ETERM;
|
867
|
+
return -1;
|
868
|
+
}
|
869
|
+
|
870
|
+
return 0;
|
871
|
+
}
|
872
|
+
|
873
|
+
void zmq::socket_base_t::process_stop ()
|
874
|
+
{
|
875
|
+
// Here, someone have called zmq_term while the socket was still alive.
|
876
|
+
// We'll remember the fact so that any blocking call is interrupted and any
|
877
|
+
// further attempt to use the socket will return ETERM. The user is still
|
878
|
+
// responsible for calling zmq_close on the socket though!
|
879
|
+
stop_monitor ();
|
880
|
+
ctx_terminated = true;
|
881
|
+
}
|
882
|
+
|
883
|
+
void zmq::socket_base_t::process_bind (pipe_t *pipe_)
|
884
|
+
{
|
885
|
+
attach_pipe (pipe_);
|
886
|
+
}
|
887
|
+
|
888
|
+
void zmq::socket_base_t::process_term (int linger_)
|
889
|
+
{
|
890
|
+
// Unregister all inproc endpoints associated with this socket.
|
891
|
+
// Doing this we make sure that no new pipes from other sockets (inproc)
|
892
|
+
// will be initiated.
|
893
|
+
unregister_endpoints (this);
|
894
|
+
|
895
|
+
// Ask all attached pipes to terminate.
|
896
|
+
for (pipes_t::size_type i = 0; i != pipes.size (); ++i)
|
897
|
+
pipes [i]->terminate (false);
|
898
|
+
register_term_acks ((int) pipes.size ());
|
899
|
+
|
900
|
+
// Continue the termination process immediately.
|
901
|
+
own_t::process_term (linger_);
|
902
|
+
}
|
903
|
+
|
904
|
+
void zmq::socket_base_t::process_destroy ()
|
905
|
+
{
|
906
|
+
destroyed = true;
|
907
|
+
}
|
908
|
+
|
909
|
+
int zmq::socket_base_t::xsetsockopt (int, const void *, size_t)
|
910
|
+
{
|
911
|
+
errno = EINVAL;
|
912
|
+
return -1;
|
913
|
+
}
|
914
|
+
|
915
|
+
bool zmq::socket_base_t::xhas_out ()
|
916
|
+
{
|
917
|
+
return false;
|
918
|
+
}
|
919
|
+
|
920
|
+
int zmq::socket_base_t::xsend (msg_t *, int)
|
921
|
+
{
|
922
|
+
errno = ENOTSUP;
|
923
|
+
return -1;
|
924
|
+
}
|
925
|
+
|
926
|
+
bool zmq::socket_base_t::xhas_in ()
|
927
|
+
{
|
928
|
+
return false;
|
929
|
+
}
|
930
|
+
|
931
|
+
int zmq::socket_base_t::xrecv (msg_t *, int)
|
932
|
+
{
|
933
|
+
errno = ENOTSUP;
|
934
|
+
return -1;
|
935
|
+
}
|
936
|
+
|
937
|
+
void zmq::socket_base_t::xread_activated (pipe_t *)
|
938
|
+
{
|
939
|
+
zmq_assert (false);
|
940
|
+
}
|
941
|
+
void zmq::socket_base_t::xwrite_activated (pipe_t *)
|
942
|
+
{
|
943
|
+
zmq_assert (false);
|
944
|
+
}
|
945
|
+
|
946
|
+
void zmq::socket_base_t::xhiccuped (pipe_t *)
|
947
|
+
{
|
948
|
+
zmq_assert (false);
|
949
|
+
}
|
950
|
+
|
951
|
+
void zmq::socket_base_t::in_event ()
|
952
|
+
{
|
953
|
+
// This function is invoked only once the socket is running in the context
|
954
|
+
// of the reaper thread. Process any commands from other threads/sockets
|
955
|
+
// that may be available at the moment. Ultimately, the socket will
|
956
|
+
// be destroyed.
|
957
|
+
process_commands (0, false);
|
958
|
+
check_destroy ();
|
959
|
+
}
|
960
|
+
|
961
|
+
void zmq::socket_base_t::out_event ()
|
962
|
+
{
|
963
|
+
zmq_assert (false);
|
964
|
+
}
|
965
|
+
|
966
|
+
void zmq::socket_base_t::timer_event (int)
|
967
|
+
{
|
968
|
+
zmq_assert (false);
|
969
|
+
}
|
970
|
+
|
971
|
+
void zmq::socket_base_t::check_destroy ()
|
972
|
+
{
|
973
|
+
// If the object was already marked as destroyed, finish the deallocation.
|
974
|
+
if (destroyed) {
|
975
|
+
|
976
|
+
// Remove the socket from the reaper's poller.
|
977
|
+
poller->rm_fd (handle);
|
978
|
+
|
979
|
+
// Remove the socket from the context.
|
980
|
+
destroy_socket (this);
|
981
|
+
|
982
|
+
// Notify the reaper about the fact.
|
983
|
+
send_reaped ();
|
984
|
+
|
985
|
+
// Deallocate.
|
986
|
+
own_t::process_destroy ();
|
987
|
+
}
|
988
|
+
}
|
989
|
+
|
990
|
+
void zmq::socket_base_t::read_activated (pipe_t *pipe_)
|
991
|
+
{
|
992
|
+
xread_activated (pipe_);
|
993
|
+
}
|
994
|
+
|
995
|
+
void zmq::socket_base_t::write_activated (pipe_t *pipe_)
|
996
|
+
{
|
997
|
+
xwrite_activated (pipe_);
|
998
|
+
}
|
999
|
+
|
1000
|
+
void zmq::socket_base_t::hiccuped (pipe_t *pipe_)
|
1001
|
+
{
|
1002
|
+
if (options.delay_attach_on_connect == 1)
|
1003
|
+
pipe_->terminate (false);
|
1004
|
+
else
|
1005
|
+
// Notify derived sockets of the hiccup
|
1006
|
+
xhiccuped (pipe_);
|
1007
|
+
}
|
1008
|
+
|
1009
|
+
void zmq::socket_base_t::terminated (pipe_t *pipe_)
|
1010
|
+
{
|
1011
|
+
// Notify the specific socket type about the pipe termination.
|
1012
|
+
xterminated (pipe_);
|
1013
|
+
|
1014
|
+
// Remove pipe from inproc pipes
|
1015
|
+
for (inprocs_t::iterator it = inprocs.begin(); it != inprocs.end(); ++it) {
|
1016
|
+
if (it->second == pipe_) {
|
1017
|
+
inprocs.erase(it);
|
1018
|
+
break;
|
1019
|
+
}
|
1020
|
+
}
|
1021
|
+
|
1022
|
+
// Remove the pipe from the list of attached pipes and confirm its
|
1023
|
+
// termination if we are already shutting down.
|
1024
|
+
pipes.erase (pipe_);
|
1025
|
+
if (is_terminating ())
|
1026
|
+
unregister_term_ack ();
|
1027
|
+
}
|
1028
|
+
|
1029
|
+
void zmq::socket_base_t::extract_flags (msg_t *msg_)
|
1030
|
+
{
|
1031
|
+
// Test whether IDENTITY flag is valid for this socket type.
|
1032
|
+
if (unlikely (msg_->flags () & msg_t::identity))
|
1033
|
+
zmq_assert (options.recv_identity);
|
1034
|
+
|
1035
|
+
// Remove MORE flag.
|
1036
|
+
rcvmore = msg_->flags () & msg_t::more ? true : false;
|
1037
|
+
}
|
1038
|
+
|
1039
|
+
int zmq::socket_base_t::monitor (const char *addr_, int events_)
|
1040
|
+
{
|
1041
|
+
int rc;
|
1042
|
+
if (unlikely (ctx_terminated)) {
|
1043
|
+
errno = ETERM;
|
1044
|
+
return -1;
|
1045
|
+
}
|
1046
|
+
|
1047
|
+
// Support deregistering monitoring endpoints as well
|
1048
|
+
if (addr_ == NULL) {
|
1049
|
+
stop_monitor ();
|
1050
|
+
return 0;
|
1051
|
+
}
|
1052
|
+
|
1053
|
+
// Parse addr_ string.
|
1054
|
+
std::string protocol;
|
1055
|
+
std::string address;
|
1056
|
+
rc = parse_uri (addr_, protocol, address);
|
1057
|
+
if (rc != 0)
|
1058
|
+
return -1;
|
1059
|
+
|
1060
|
+
rc = check_protocol (protocol);
|
1061
|
+
if (rc != 0)
|
1062
|
+
return -1;
|
1063
|
+
|
1064
|
+
// Event notification only supported over inproc://
|
1065
|
+
if (protocol != "inproc") {
|
1066
|
+
errno = EPROTONOSUPPORT;
|
1067
|
+
return -1;
|
1068
|
+
}
|
1069
|
+
|
1070
|
+
// Register events to monitor
|
1071
|
+
monitor_events = events_;
|
1072
|
+
monitor_socket = zmq_socket (get_ctx (), ZMQ_PAIR);
|
1073
|
+
if (monitor_socket == NULL)
|
1074
|
+
return -1;
|
1075
|
+
|
1076
|
+
// Never block context termination on pending event messages
|
1077
|
+
int linger = 0;
|
1078
|
+
rc = zmq_setsockopt (monitor_socket, ZMQ_LINGER, &linger, sizeof (linger));
|
1079
|
+
if (rc == -1)
|
1080
|
+
stop_monitor ();
|
1081
|
+
|
1082
|
+
// Spawn the monitor socket endpoint
|
1083
|
+
rc = zmq_bind (monitor_socket, addr_);
|
1084
|
+
if (rc == -1)
|
1085
|
+
stop_monitor ();
|
1086
|
+
return rc;
|
1087
|
+
}
|
1088
|
+
|
1089
|
+
void zmq::socket_base_t::event_connected (std::string &addr_, int fd_)
|
1090
|
+
{
|
1091
|
+
if (monitor_events & ZMQ_EVENT_CONNECTED) {
|
1092
|
+
zmq_event_t event;
|
1093
|
+
event.event = ZMQ_EVENT_CONNECTED;
|
1094
|
+
event.data.connected.addr = (char *) malloc (addr_.size () + 1);
|
1095
|
+
copy_monitor_address (event.data.connected.addr, addr_);
|
1096
|
+
event.data.connected.fd = fd_;
|
1097
|
+
monitor_event (event);
|
1098
|
+
}
|
1099
|
+
}
|
1100
|
+
|
1101
|
+
void zmq::socket_base_t::event_connect_delayed (std::string &addr_, int err_)
|
1102
|
+
{
|
1103
|
+
if (monitor_events & ZMQ_EVENT_CONNECT_DELAYED) {
|
1104
|
+
zmq_event_t event;
|
1105
|
+
event.event = ZMQ_EVENT_CONNECT_DELAYED;
|
1106
|
+
event.data.connect_delayed.addr = (char *) malloc (addr_.size () + 1);
|
1107
|
+
copy_monitor_address (event.data.connect_delayed.addr, addr_);
|
1108
|
+
event.data.connect_delayed.err = err_;
|
1109
|
+
monitor_event (event);
|
1110
|
+
}
|
1111
|
+
}
|
1112
|
+
|
1113
|
+
void zmq::socket_base_t::event_connect_retried (std::string &addr_, int interval_)
|
1114
|
+
{
|
1115
|
+
if (monitor_events & ZMQ_EVENT_CONNECT_RETRIED) {
|
1116
|
+
zmq_event_t event;
|
1117
|
+
event.event = ZMQ_EVENT_CONNECT_RETRIED;
|
1118
|
+
event.data.connect_retried.addr = (char *) malloc (addr_.size () + 1);
|
1119
|
+
copy_monitor_address (event.data.connect_retried.addr, addr_);
|
1120
|
+
event.data.connect_retried.interval = interval_;
|
1121
|
+
monitor_event (event);
|
1122
|
+
}
|
1123
|
+
}
|
1124
|
+
|
1125
|
+
void zmq::socket_base_t::event_listening (std::string &addr_, int fd_)
|
1126
|
+
{
|
1127
|
+
if (monitor_events & ZMQ_EVENT_LISTENING) {
|
1128
|
+
zmq_event_t event;
|
1129
|
+
event.event = ZMQ_EVENT_LISTENING;
|
1130
|
+
event.data.listening.addr = (char *) malloc (addr_.size () + 1);
|
1131
|
+
copy_monitor_address (event.data.listening.addr, addr_);
|
1132
|
+
event.data.listening.fd = fd_;
|
1133
|
+
monitor_event (event);
|
1134
|
+
}
|
1135
|
+
}
|
1136
|
+
|
1137
|
+
void zmq::socket_base_t::event_bind_failed (std::string &addr_, int err_)
|
1138
|
+
{
|
1139
|
+
if (monitor_events & ZMQ_EVENT_BIND_FAILED) {
|
1140
|
+
zmq_event_t event;
|
1141
|
+
event.event = ZMQ_EVENT_BIND_FAILED;
|
1142
|
+
event.data.bind_failed.addr = (char *) malloc (addr_.size () + 1);
|
1143
|
+
copy_monitor_address (event.data.bind_failed.addr, addr_);
|
1144
|
+
event.data.bind_failed.err = err_;
|
1145
|
+
monitor_event (event);
|
1146
|
+
}
|
1147
|
+
}
|
1148
|
+
|
1149
|
+
void zmq::socket_base_t::event_accepted (std::string &addr_, int fd_)
|
1150
|
+
{
|
1151
|
+
if (monitor_events & ZMQ_EVENT_ACCEPTED) {
|
1152
|
+
zmq_event_t event;
|
1153
|
+
event.event = ZMQ_EVENT_ACCEPTED;
|
1154
|
+
event.data.accepted.addr = (char *) malloc (addr_.size () + 1);
|
1155
|
+
copy_monitor_address (event.data.accepted.addr, addr_);
|
1156
|
+
event.data.accepted.fd = fd_;
|
1157
|
+
monitor_event (event);
|
1158
|
+
}
|
1159
|
+
}
|
1160
|
+
|
1161
|
+
void zmq::socket_base_t::event_accept_failed (std::string &addr_, int err_)
|
1162
|
+
{
|
1163
|
+
if (monitor_events & ZMQ_EVENT_ACCEPT_FAILED) {
|
1164
|
+
zmq_event_t event;
|
1165
|
+
event.event = ZMQ_EVENT_ACCEPT_FAILED;
|
1166
|
+
event.data.accept_failed.addr = (char *) malloc (addr_.size () + 1);
|
1167
|
+
copy_monitor_address (event.data.accept_failed.addr, addr_);
|
1168
|
+
event.data.accept_failed.err= err_;
|
1169
|
+
monitor_event (event);
|
1170
|
+
}
|
1171
|
+
}
|
1172
|
+
|
1173
|
+
void zmq::socket_base_t::event_closed (std::string &addr_, int fd_)
|
1174
|
+
{
|
1175
|
+
if (monitor_events & ZMQ_EVENT_CLOSED) {
|
1176
|
+
zmq_event_t event;
|
1177
|
+
event.event = ZMQ_EVENT_CLOSED;
|
1178
|
+
event.data.closed.addr = (char *) malloc (addr_.size () + 1);
|
1179
|
+
copy_monitor_address (event.data.closed.addr, addr_);
|
1180
|
+
event.data.closed.fd = fd_;
|
1181
|
+
monitor_event (event);
|
1182
|
+
}
|
1183
|
+
}
|
1184
|
+
|
1185
|
+
void zmq::socket_base_t::event_close_failed (std::string &addr_, int err_)
|
1186
|
+
{
|
1187
|
+
if (monitor_events & ZMQ_EVENT_CLOSE_FAILED) {
|
1188
|
+
zmq_event_t event;
|
1189
|
+
event.event = ZMQ_EVENT_CLOSE_FAILED;
|
1190
|
+
event.data.close_failed.addr = (char *) malloc (addr_.size () + 1);
|
1191
|
+
copy_monitor_address (event.data.close_failed.addr, addr_);
|
1192
|
+
event.data.close_failed.err = err_;
|
1193
|
+
monitor_event (event);
|
1194
|
+
}
|
1195
|
+
}
|
1196
|
+
|
1197
|
+
void zmq::socket_base_t::event_disconnected (std::string &addr_, int fd_)
|
1198
|
+
{
|
1199
|
+
if (monitor_events & ZMQ_EVENT_DISCONNECTED) {
|
1200
|
+
zmq_event_t event;
|
1201
|
+
event.event = ZMQ_EVENT_DISCONNECTED;
|
1202
|
+
event.data.disconnected.addr = (char *) malloc (addr_.size () + 1);
|
1203
|
+
copy_monitor_address (event.data.disconnected.addr, addr_);
|
1204
|
+
event.data.disconnected.fd = fd_;
|
1205
|
+
monitor_event (event);
|
1206
|
+
}
|
1207
|
+
}
|
1208
|
+
|
1209
|
+
void zmq::socket_base_t::copy_monitor_address (char *dest_, std::string &src_)
|
1210
|
+
{
|
1211
|
+
alloc_assert (dest_);
|
1212
|
+
dest_[src_.size ()] = 0;
|
1213
|
+
memcpy (dest_, src_.c_str (), src_.size ());
|
1214
|
+
}
|
1215
|
+
|
1216
|
+
void zmq::socket_base_t::monitor_event (zmq_event_t event_)
|
1217
|
+
{
|
1218
|
+
if (monitor_socket) {
|
1219
|
+
zmq_msg_t msg;
|
1220
|
+
void *event_data = malloc (sizeof (event_));
|
1221
|
+
alloc_assert (event_data);
|
1222
|
+
memcpy (event_data, &event_, sizeof (event_));
|
1223
|
+
zmq_msg_init_data (&msg, event_data, sizeof (event_), zmq_free_event, NULL);
|
1224
|
+
zmq_sendmsg (monitor_socket, &msg, 0);
|
1225
|
+
zmq_msg_close (&msg);
|
1226
|
+
}
|
1227
|
+
}
|
1228
|
+
|
1229
|
+
void zmq::socket_base_t::stop_monitor()
|
1230
|
+
{
|
1231
|
+
if (monitor_socket) {
|
1232
|
+
zmq_close (monitor_socket);
|
1233
|
+
monitor_socket = NULL;
|
1234
|
+
monitor_events = 0;
|
1235
|
+
}
|
1236
|
+
}
|