code
stringlengths
0
56.1M
repo_name
stringclasses
515 values
path
stringlengths
2
147
language
stringclasses
447 values
license
stringclasses
7 values
size
int64
0
56.8M
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __SOCKS_CONNECTER_HPP_INCLUDED__ #define __SOCKS_CONNECTER_HPP_INCLUDED__ #include "fd.hpp" #include "stream_connecter_base.hpp" #include "stdint.hpp" #include "socks.hpp" namespace zmq { class io_thread_t; class session_base_t; struct address_t; class socks_connecter_t ZMQ_FINAL : public stream_connecter_base_t { public: // If 'delayed_start' is true connecter first waits for a while, // then starts connection process. socks_connecter_t (zmq::io_thread_t *io_thread_, zmq::session_base_t *session_, const options_t &options_, address_t *addr_, address_t *proxy_addr_, bool delayed_start_); ~socks_connecter_t (); void set_auth_method_basic (const std::string &username, const std::string &password); void set_auth_method_none (); private: enum { unplugged, waiting_for_reconnect_time, waiting_for_proxy_connection, sending_greeting, waiting_for_choice, sending_basic_auth_request, waiting_for_auth_response, sending_request, waiting_for_response }; // Method ID enum { socks_no_auth_required = 0x00, socks_basic_auth = 0x02, socks_no_acceptable_method = 0xff }; // Handlers for I/O events. void in_event (); void out_event (); // Internal function to start the actual connection establishment. void start_connecting (); static int process_server_response (const socks_choice_t &response_); static int process_server_response (const socks_response_t &response_); static int process_server_response (const socks_auth_response_t &response_); static int parse_address (const std::string &address_, std::string &hostname_, uint16_t &port_); int connect_to_proxy (); void error (); // Open TCP connecting socket. Returns -1 in case of error, // 0 if connect was successful immediately. Returns -1 with // EAGAIN errno if async connect was launched. int open (); // Get the file descriptor of newly created connection. Returns // retired_fd if the connection was unsuccessful. zmq::fd_t check_proxy_connection () const; socks_greeting_encoder_t _greeting_encoder; socks_choice_decoder_t _choice_decoder; socks_basic_auth_request_encoder_t _basic_auth_request_encoder; socks_auth_response_decoder_t _auth_response_decoder; socks_request_encoder_t _request_encoder; socks_response_decoder_t _response_decoder; // SOCKS address; owned by this connecter. address_t *_proxy_addr; // User defined authentication method int _auth_method; // Credentials for basic authentication std::string _auth_username; std::string _auth_password; int _status; ZMQ_NON_COPYABLE_NOR_MOVABLE (socks_connecter_t) }; } #endif
sophomore_public/libzmq
src/socks_connecter.hpp
C++
gpl-3.0
3,071
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_STDINT_HPP_INCLUDED__ #define __ZMQ_STDINT_HPP_INCLUDED__ #if defined ZMQ_HAVE_SOLARIS || defined ZMQ_HAVE_OPENVMS #include <inttypes.h> #elif defined _MSC_VER && _MSC_VER < 1600 #ifndef int8_t typedef __int8 int8_t; #endif #ifndef int16_t typedef __int16 int16_t; #endif #ifndef int32_t typedef __int32 int32_t; #endif #ifndef int64_t typedef __int64 int64_t; #endif #ifndef uint8_t typedef unsigned __int8 uint8_t; #endif #ifndef uint16_t typedef unsigned __int16 uint16_t; #endif #ifndef uint32_t typedef unsigned __int32 uint32_t; #endif #ifndef uint64_t typedef unsigned __int64 uint64_t; #endif #ifndef UINT16_MAX #define UINT16_MAX _UI16_MAX #endif #ifndef UINT32_MAX #define UINT32_MAX _UI32_MAX #endif #else #include <stdint.h> #endif #ifndef UINT8_MAX #define UINT8_MAX 0xFF #endif #endif
sophomore_public/libzmq
src/stdint.hpp
C++
gpl-3.0
862
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "macros.hpp" #include "stream.hpp" #include "pipe.hpp" #include "wire.hpp" #include "random.hpp" #include "likely.hpp" #include "err.hpp" zmq::stream_t::stream_t (class ctx_t *parent_, uint32_t tid_, int sid_) : routing_socket_base_t (parent_, tid_, sid_), _prefetched (false), _routing_id_sent (false), _current_out (NULL), _more_out (false), _next_integral_routing_id (generate_random ()) { options.type = ZMQ_STREAM; options.raw_socket = true; _prefetched_routing_id.init (); _prefetched_msg.init (); } zmq::stream_t::~stream_t () { _prefetched_routing_id.close (); _prefetched_msg.close (); } void zmq::stream_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_, bool locally_initiated_) { LIBZMQ_UNUSED (subscribe_to_all_); zmq_assert (pipe_); identify_peer (pipe_, locally_initiated_); _fq.attach (pipe_); } void zmq::stream_t::xpipe_terminated (pipe_t *pipe_) { erase_out_pipe (pipe_); _fq.pipe_terminated (pipe_); // TODO router_t calls pipe_->rollback() here; should this be done here as // well? then xpipe_terminated could be pulled up to routing_socket_base_t if (pipe_ == _current_out) _current_out = NULL; } void zmq::stream_t::xread_activated (pipe_t *pipe_) { _fq.activated (pipe_); } int zmq::stream_t::xsend (msg_t *msg_) { // If this is the first part of the message it's the ID of the // peer to send the message to. if (!_more_out) { zmq_assert (!_current_out); // If we have malformed message (prefix with no subsequent message) // then just silently ignore it. // TODO: The connections should be killed instead. if (msg_->flags () & msg_t::more) { // Find the pipe associated with the routing id stored in the prefix. // If there's no such pipe return an error out_pipe_t *out_pipe = lookup_out_pipe ( blob_t (static_cast<unsigned char *> (msg_->data ()), msg_->size (), reference_tag_t ())); if (out_pipe) { _current_out = out_pipe->pipe; if (!_current_out->check_write ()) { out_pipe->active = false; _current_out = NULL; errno = EAGAIN; return -1; } } else { errno = EHOSTUNREACH; return -1; } } // Expect one more message frame. _more_out = true; int rc = msg_->close (); errno_assert (rc == 0); rc = msg_->init (); errno_assert (rc == 0); return 0; } // Ignore the MORE flag msg_->reset_flags (msg_t::more); // This is the last part of the message. _more_out = false; // Push the message into the pipe. If there's no out pipe, just drop it. if (_current_out) { // Close the remote connection if user has asked to do so // by sending zero length message. // Pending messages in the pipe will be dropped (on receiving term- ack) if (msg_->size () == 0) { _current_out->terminate (false); int rc = msg_->close (); errno_assert (rc == 0); rc = msg_->init (); errno_assert (rc == 0); _current_out = NULL; return 0; } const bool ok = _current_out->write (msg_); if (likely (ok)) _current_out->flush (); _current_out = NULL; } else { const int rc = msg_->close (); errno_assert (rc == 0); } // Detach the message from the data buffer. const int rc = msg_->init (); errno_assert (rc == 0); return 0; } int zmq::stream_t::xsetsockopt (int option_, const void *optval_, size_t optvallen_) { switch (option_) { case ZMQ_STREAM_NOTIFY: return do_setsockopt_int_as_bool_strict (optval_, optvallen_, &options.raw_notify); default: return routing_socket_base_t::xsetsockopt (option_, optval_, optvallen_); } } int zmq::stream_t::xrecv (msg_t *msg_) { if (_prefetched) { if (!_routing_id_sent) { const int rc = msg_->move (_prefetched_routing_id); errno_assert (rc == 0); _routing_id_sent = true; } else { const int rc = msg_->move (_prefetched_msg); errno_assert (rc == 0); _prefetched = false; } return 0; } pipe_t *pipe = NULL; int rc = _fq.recvpipe (&_prefetched_msg, &pipe); if (rc != 0) return -1; zmq_assert (pipe != NULL); zmq_assert ((_prefetched_msg.flags () & msg_t::more) == 0); // We have received a frame with TCP data. // Rather than sending this frame, we keep it in prefetched // buffer and send a frame with peer's ID. const blob_t &routing_id = pipe->get_routing_id (); rc = msg_->close (); errno_assert (rc == 0); rc = msg_->init_size (routing_id.size ()); errno_assert (rc == 0); // forward metadata (if any) metadata_t *metadata = _prefetched_msg.metadata (); if (metadata) msg_->set_metadata (metadata); memcpy (msg_->data (), routing_id.data (), routing_id.size ()); msg_->set_flags (msg_t::more); _prefetched = true; _routing_id_sent = true; return 0; } bool zmq::stream_t::xhas_in () { // We may already have a message pre-fetched. if (_prefetched) return true; // Try to read the next message. // The message, if read, is kept in the pre-fetch buffer. pipe_t *pipe = NULL; int rc = _fq.recvpipe (&_prefetched_msg, &pipe); if (rc != 0) return false; zmq_assert (pipe != NULL); zmq_assert ((_prefetched_msg.flags () & msg_t::more) == 0); const blob_t &routing_id = pipe->get_routing_id (); rc = _prefetched_routing_id.init_size (routing_id.size ()); errno_assert (rc == 0); // forward metadata (if any) metadata_t *metadata = _prefetched_msg.metadata (); if (metadata) _prefetched_routing_id.set_metadata (metadata); memcpy (_prefetched_routing_id.data (), routing_id.data (), routing_id.size ()); _prefetched_routing_id.set_flags (msg_t::more); _prefetched = true; _routing_id_sent = false; return true; } bool zmq::stream_t::xhas_out () { // In theory, STREAM socket is always ready for writing. Whether actual // attempt to write succeeds depends on which pipe the message is going // to be routed to. return true; } void zmq::stream_t::identify_peer (pipe_t *pipe_, bool locally_initiated_) { // Always assign routing id for raw-socket unsigned char buffer[5]; buffer[0] = 0; blob_t routing_id; if (locally_initiated_ && connect_routing_id_is_set ()) { const std::string connect_routing_id = extract_connect_routing_id (); routing_id.set ( reinterpret_cast<const unsigned char *> (connect_routing_id.c_str ()), connect_routing_id.length ()); // Not allowed to duplicate an existing rid zmq_assert (!has_out_pipe (routing_id)); } else { put_uint32 (buffer + 1, _next_integral_routing_id++); routing_id.set (buffer, sizeof buffer); memcpy (options.routing_id, routing_id.data (), routing_id.size ()); options.routing_id_size = static_cast<unsigned char> (routing_id.size ()); } pipe_->set_router_socket_routing_id (routing_id); add_out_pipe (ZMQ_MOVE (routing_id), pipe_); }
sophomore_public/libzmq
src/stream.cpp
C++
gpl-3.0
7,925
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_STREAM_HPP_INCLUDED__ #define __ZMQ_STREAM_HPP_INCLUDED__ #include <map> #include "router.hpp" namespace zmq { class ctx_t; class pipe_t; class stream_t ZMQ_FINAL : public routing_socket_base_t { public: stream_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_); ~stream_t (); // Overrides of functions from socket_base_t. void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_, bool locally_initiated_); int xsend (zmq::msg_t *msg_); int xrecv (zmq::msg_t *msg_); bool xhas_in (); bool xhas_out (); void xread_activated (zmq::pipe_t *pipe_); void xpipe_terminated (zmq::pipe_t *pipe_); int xsetsockopt (int option_, const void *optval_, size_t optvallen_); private: // Generate peer's id and update lookup map void identify_peer (pipe_t *pipe_, bool locally_initiated_); // Fair queueing object for inbound pipes. fq_t _fq; // True iff there is a message held in the pre-fetch buffer. bool _prefetched; // If true, the receiver got the message part with // the peer's identity. bool _routing_id_sent; // Holds the prefetched identity. msg_t _prefetched_routing_id; // Holds the prefetched message. msg_t _prefetched_msg; // The pipe we are currently writing to. zmq::pipe_t *_current_out; // If true, more outgoing message parts are expected. bool _more_out; // Routing IDs are generated. It's a simple increment and wrap-over // algorithm. This value is the next ID to use (if not used already). uint32_t _next_integral_routing_id; ZMQ_NON_COPYABLE_NOR_MOVABLE (stream_t) }; } #endif
sophomore_public/libzmq
src/stream.hpp
C++
gpl-3.0
1,751
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "stream_connecter_base.hpp" #include "session_base.hpp" #include "address.hpp" #include "random.hpp" #include "zmtp_engine.hpp" #include "raw_engine.hpp" #ifndef ZMQ_HAVE_WINDOWS #include <unistd.h> #else #include <winsock2.h> #endif #include <limits> zmq::stream_connecter_base_t::stream_connecter_base_t ( zmq::io_thread_t *io_thread_, zmq::session_base_t *session_, const zmq::options_t &options_, zmq::address_t *addr_, bool delayed_start_) : own_t (io_thread_, options_), io_object_t (io_thread_), _addr (addr_), _s (retired_fd), _handle (static_cast<handle_t> (NULL)), _socket (session_->get_socket ()), _delayed_start (delayed_start_), _reconnect_timer_started (false), _current_reconnect_ivl (-1), _session (session_) { zmq_assert (_addr); _addr->to_string (_endpoint); // TODO the return value is unused! what if it fails? if this is impossible // or does not matter, change such that endpoint in initialized using an // initializer, and make endpoint const } zmq::stream_connecter_base_t::~stream_connecter_base_t () { zmq_assert (!_reconnect_timer_started); zmq_assert (!_handle); zmq_assert (_s == retired_fd); } void zmq::stream_connecter_base_t::process_plug () { if (_delayed_start) add_reconnect_timer (); else start_connecting (); } void zmq::stream_connecter_base_t::process_term (int linger_) { if (_reconnect_timer_started) { cancel_timer (reconnect_timer_id); _reconnect_timer_started = false; } if (_handle) { rm_handle (); } if (_s != retired_fd) close (); own_t::process_term (linger_); } void zmq::stream_connecter_base_t::add_reconnect_timer () { if (options.reconnect_ivl > 0) { const int interval = get_new_reconnect_ivl (); add_timer (interval, reconnect_timer_id); _socket->event_connect_retried ( make_unconnected_connect_endpoint_pair (_endpoint), interval); _reconnect_timer_started = true; } } int zmq::stream_connecter_base_t::get_new_reconnect_ivl () { if (options.reconnect_ivl_max > 0) { int candidate_interval = 0; if (_current_reconnect_ivl == -1) candidate_interval = options.reconnect_ivl; else if (_current_reconnect_ivl > std::numeric_limits<int>::max () / 2) candidate_interval = std::numeric_limits<int>::max (); else candidate_interval = _current_reconnect_ivl * 2; if (candidate_interval > options.reconnect_ivl_max) _current_reconnect_ivl = options.reconnect_ivl_max; else _current_reconnect_ivl = candidate_interval; return _current_reconnect_ivl; } else { if (_current_reconnect_ivl == -1) _current_reconnect_ivl = options.reconnect_ivl; // The new interval is the base interval + random value. const int random_jitter = generate_random () % options.reconnect_ivl; const int interval = _current_reconnect_ivl < std::numeric_limits<int>::max () - random_jitter ? _current_reconnect_ivl + random_jitter : std::numeric_limits<int>::max (); return interval; } } void zmq::stream_connecter_base_t::rm_handle () { rm_fd (_handle); _handle = static_cast<handle_t> (NULL); } void zmq::stream_connecter_base_t::close () { // TODO before, this was an assertion for _s != retired_fd, but this does not match usage of close if (_s != retired_fd) { #ifdef ZMQ_HAVE_WINDOWS const int rc = closesocket (_s); wsa_assert (rc != SOCKET_ERROR); #else const int rc = ::close (_s); errno_assert (rc == 0); #endif _socket->event_closed ( make_unconnected_connect_endpoint_pair (_endpoint), _s); _s = retired_fd; } } void zmq::stream_connecter_base_t::in_event () { // We are not polling for incoming data, so we are actually called // because of error here. However, we can get error on out event as well // on some platforms, so we'll simply handle both events in the same way. out_event (); } void zmq::stream_connecter_base_t::create_engine ( fd_t fd_, const std::string &local_address_) { const endpoint_uri_pair_t endpoint_pair (local_address_, _endpoint, endpoint_type_connect); // Create the engine object for this connection. i_engine *engine; if (options.raw_socket) engine = new (std::nothrow) raw_engine_t (fd_, options, endpoint_pair); else engine = new (std::nothrow) zmtp_engine_t (fd_, options, endpoint_pair); alloc_assert (engine); // Attach the engine to the corresponding session object. send_attach (_session, engine); // Shut the connecter down. terminate (); _socket->event_connected (endpoint_pair, fd_); } void zmq::stream_connecter_base_t::timer_event (int id_) { zmq_assert (id_ == reconnect_timer_id); _reconnect_timer_started = false; start_connecting (); }
sophomore_public/libzmq
src/stream_connecter_base.cpp
C++
gpl-3.0
5,172
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __STREAM_CONNECTER_BASE_HPP_INCLUDED__ #define __STREAM_CONNECTER_BASE_HPP_INCLUDED__ #include "fd.hpp" #include "own.hpp" #include "io_object.hpp" namespace zmq { class io_thread_t; class session_base_t; struct address_t; class stream_connecter_base_t : public own_t, public io_object_t { public: // If 'delayed_start' is true connecter first waits for a while, // then starts connection process. stream_connecter_base_t (zmq::io_thread_t *io_thread_, zmq::session_base_t *session_, const options_t &options_, address_t *addr_, bool delayed_start_); ~stream_connecter_base_t () ZMQ_OVERRIDE; protected: // Handlers for incoming commands. void process_plug () ZMQ_FINAL; void process_term (int linger_) ZMQ_OVERRIDE; // Handlers for I/O events. void in_event () ZMQ_OVERRIDE; void timer_event (int id_) ZMQ_OVERRIDE; // Internal function to create the engine after connection was established. virtual void create_engine (fd_t fd, const std::string &local_address_); // Internal function to add a reconnect timer void add_reconnect_timer (); // Removes the handle from the poller. void rm_handle (); // Close the connecting socket. void close (); // Address to connect to. Owned by session_base_t. // It is non-const since some parts may change during opening. address_t *const _addr; // Underlying socket. fd_t _s; // Handle corresponding to the listening socket, if file descriptor is // registered with the poller, or NULL. handle_t _handle; // String representation of endpoint to connect to std::string _endpoint; // Socket zmq::socket_base_t *const _socket; private: // ID of the timer used to delay the reconnection. enum { reconnect_timer_id = 1 }; // Internal function to return a reconnect backoff delay. // Will modify the current_reconnect_ivl used for next call // Returns the currently used interval int get_new_reconnect_ivl (); virtual void start_connecting () = 0; // If true, connecter is waiting a while before trying to connect. const bool _delayed_start; // True iff a timer has been started. bool _reconnect_timer_started; // Current reconnect ivl, updated for backoff strategy int _current_reconnect_ivl; ZMQ_NON_COPYABLE_NOR_MOVABLE (stream_connecter_base_t) protected: // Reference to the session we belong to. zmq::session_base_t *const _session; }; } #endif
sophomore_public/libzmq
src/stream_connecter_base.hpp
C++
gpl-3.0
2,701
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "macros.hpp" #include <limits.h> #include <string.h> #ifndef ZMQ_HAVE_WINDOWS #include <unistd.h> #endif #include <new> #include <sstream> #include "stream_engine_base.hpp" #include "io_thread.hpp" #include "session_base.hpp" #include "v1_encoder.hpp" #include "v1_decoder.hpp" #include "v2_encoder.hpp" #include "v2_decoder.hpp" #include "null_mechanism.hpp" #include "plain_client.hpp" #include "plain_server.hpp" #include "gssapi_client.hpp" #include "gssapi_server.hpp" #include "curve_client.hpp" #include "curve_server.hpp" #include "raw_decoder.hpp" #include "raw_encoder.hpp" #include "config.hpp" #include "err.hpp" #include "ip.hpp" #include "tcp.hpp" #include "likely.hpp" #include "wire.hpp" static std::string get_peer_address (zmq::fd_t s_) { std::string peer_address; const int family = zmq::get_peer_ip_address (s_, peer_address); if (family == 0) peer_address.clear (); #if defined ZMQ_HAVE_SO_PEERCRED else if (family == PF_UNIX) { struct ucred cred; socklen_t size = sizeof (cred); if (!getsockopt (s_, SOL_SOCKET, SO_PEERCRED, &cred, &size)) { std::ostringstream buf; buf << ":" << cred.uid << ":" << cred.gid << ":" << cred.pid; peer_address += buf.str (); } } #elif defined ZMQ_HAVE_LOCAL_PEERCRED else if (family == PF_UNIX) { struct xucred cred; socklen_t size = sizeof (cred); if (!getsockopt (s_, 0, LOCAL_PEERCRED, &cred, &size) && cred.cr_version == XUCRED_VERSION) { std::ostringstream buf; buf << ":" << cred.cr_uid << ":"; if (cred.cr_ngroups > 0) buf << cred.cr_groups[0]; buf << ":"; peer_address += buf.str (); } } #endif return peer_address; } zmq::stream_engine_base_t::stream_engine_base_t ( fd_t fd_, const options_t &options_, const endpoint_uri_pair_t &endpoint_uri_pair_, bool has_handshake_stage_) : _options (options_), _inpos (NULL), _insize (0), _decoder (NULL), _outpos (NULL), _outsize (0), _encoder (NULL), _mechanism (NULL), _next_msg (NULL), _process_msg (NULL), _metadata (NULL), _input_stopped (false), _output_stopped (false), _endpoint_uri_pair (endpoint_uri_pair_), _has_handshake_timer (false), _has_ttl_timer (false), _has_timeout_timer (false), _has_heartbeat_timer (false), _peer_address (get_peer_address (fd_)), _s (fd_), _handle (static_cast<handle_t> (NULL)), _plugged (false), _handshaking (true), _io_error (false), _session (NULL), _socket (NULL), _has_handshake_stage (has_handshake_stage_) { const int rc = _tx_msg.init (); errno_assert (rc == 0); // Put the socket into non-blocking mode. unblock_socket (_s); } zmq::stream_engine_base_t::~stream_engine_base_t () { zmq_assert (!_plugged); if (_s != retired_fd) { #ifdef ZMQ_HAVE_WINDOWS const int rc = closesocket (_s); wsa_assert (rc != SOCKET_ERROR); #else int rc = close (_s); #if defined(__FreeBSD_kernel__) || defined(__FreeBSD__) // FreeBSD may return ECONNRESET on close() under load but this is not // an error. if (rc == -1 && errno == ECONNRESET) rc = 0; #endif errno_assert (rc == 0); #endif _s = retired_fd; } const int rc = _tx_msg.close (); errno_assert (rc == 0); // Drop reference to metadata and destroy it if we are // the only user. if (_metadata != NULL) { if (_metadata->drop_ref ()) { LIBZMQ_DELETE (_metadata); } } LIBZMQ_DELETE (_encoder); LIBZMQ_DELETE (_decoder); LIBZMQ_DELETE (_mechanism); } void zmq::stream_engine_base_t::plug (io_thread_t *io_thread_, session_base_t *session_) { zmq_assert (!_plugged); _plugged = true; // Connect to session object. zmq_assert (!_session); zmq_assert (session_); _session = session_; _socket = _session->get_socket (); // Connect to I/O threads poller object. io_object_t::plug (io_thread_); _handle = add_fd (_s); _io_error = false; plug_internal (); } void zmq::stream_engine_base_t::unplug () { zmq_assert (_plugged); _plugged = false; // Cancel all timers. if (_has_handshake_timer) { cancel_timer (handshake_timer_id); _has_handshake_timer = false; } if (_has_ttl_timer) { cancel_timer (heartbeat_ttl_timer_id); _has_ttl_timer = false; } if (_has_timeout_timer) { cancel_timer (heartbeat_timeout_timer_id); _has_timeout_timer = false; } if (_has_heartbeat_timer) { cancel_timer (heartbeat_ivl_timer_id); _has_heartbeat_timer = false; } // Cancel all fd subscriptions. if (!_io_error) rm_fd (_handle); // Disconnect from I/O threads poller object. io_object_t::unplug (); _session = NULL; } void zmq::stream_engine_base_t::terminate () { unplug (); delete this; } void zmq::stream_engine_base_t::in_event () { // ignore errors const bool res = in_event_internal (); LIBZMQ_UNUSED (res); } bool zmq::stream_engine_base_t::in_event_internal () { zmq_assert (!_io_error); // If still handshaking, receive and process the greeting message. if (unlikely (_handshaking)) { if (handshake ()) { // Handshaking was successful. // Switch into the normal message flow. _handshaking = false; if (_mechanism == NULL && _has_handshake_stage) { _session->engine_ready (); if (_has_handshake_timer) { cancel_timer (handshake_timer_id); _has_handshake_timer = false; } } } else return false; } zmq_assert (_decoder); // If there has been an I/O error, stop polling. if (_input_stopped) { rm_fd (_handle); _io_error = true; return true; // TODO or return false in this case too? } // If there's no data to process in the buffer... if (!_insize) { // Retrieve the buffer and read as much data as possible. // Note that buffer can be arbitrarily large. However, we assume // the underlying TCP layer has fixed buffer size and thus the // number of bytes read will be always limited. size_t bufsize = 0; _decoder->get_buffer (&_inpos, &bufsize); const int rc = read (_inpos, bufsize); if (rc == -1) { if (errno != EAGAIN) { error (connection_error); return false; } return true; } // Adjust input size _insize = static_cast<size_t> (rc); // Adjust buffer size to received bytes _decoder->resize_buffer (_insize); } int rc = 0; size_t processed = 0; while (_insize > 0) { rc = _decoder->decode (_inpos, _insize, processed); zmq_assert (processed <= _insize); _inpos += processed; _insize -= processed; if (rc == 0 || rc == -1) break; rc = (this->*_process_msg) (_decoder->msg ()); if (rc == -1) break; } // Tear down the connection if we have failed to decode input data // or the session has rejected the message. if (rc == -1) { if (errno != EAGAIN) { // In cases where the src/dst have the same IP and the dst uses an ephemeral port, reconnection // eventually results in the src and dest IP and port clashing (google tcp self connection) // While this is a protocol_error (you have the single zmq socket handshaking with itself) // we do not want to to stop reconnection from happening if (!_endpoint_uri_pair.clash ()) { error (protocol_error); return false; } } _input_stopped = true; reset_pollin (_handle); } _session->flush (); return true; } void zmq::stream_engine_base_t::out_event () { zmq_assert (!_io_error); // If write buffer is empty, try to read new data from the encoder. if (!_outsize) { // Even when we stop polling as soon as there is no // data to send, the poller may invoke out_event one // more time due to 'speculative write' optimisation. if (unlikely (_encoder == NULL)) { zmq_assert (_handshaking); return; } _outpos = NULL; _outsize = _encoder->encode (&_outpos, 0); while (_outsize < static_cast<size_t> (_options.out_batch_size)) { if ((this->*_next_msg) (&_tx_msg) == -1) { // ws_engine can cause an engine error and delete it, so // bail out immediately to avoid use-after-free if (errno == ECONNRESET) return; else break; } _encoder->load_msg (&_tx_msg); unsigned char *bufptr = _outpos + _outsize; const size_t n = _encoder->encode (&bufptr, _options.out_batch_size - _outsize); zmq_assert (n > 0); if (_outpos == NULL) _outpos = bufptr; _outsize += n; } // If there is no data to send, stop polling for output. if (_outsize == 0) { _output_stopped = true; reset_pollout (); return; } } // If there are any data to write in write buffer, write as much as // possible to the socket. Note that amount of data to write can be // arbitrarily large. However, we assume that underlying TCP layer has // limited transmission buffer and thus the actual number of bytes // written should be reasonably modest. const int nbytes = write (_outpos, _outsize); // IO error has occurred. We stop waiting for output events. // The engine is not terminated until we detect input error; // this is necessary to prevent losing incoming messages. if (nbytes == -1) { reset_pollout (); return; } _outpos += nbytes; _outsize -= nbytes; // If we are still handshaking and there are no data // to send, stop polling for output. if (unlikely (_handshaking)) if (_outsize == 0) reset_pollout (); } void zmq::stream_engine_base_t::restart_output () { if (unlikely (_io_error)) return; if (likely (_output_stopped)) { set_pollout (); _output_stopped = false; } // Speculative write: The assumption is that at the moment new message // was sent by the user the socket is probably available for writing. // Thus we try to write the data to socket avoiding polling for POLLOUT. // Consequently, the latency should be better in request/reply scenarios. out_event (); } bool zmq::stream_engine_base_t::restart_input () { zmq_assert (_input_stopped); zmq_assert (_session != NULL); zmq_assert (_decoder != NULL); int rc = (this->*_process_msg) (_decoder->msg ()); if (rc == -1) { if (errno == EAGAIN) _session->flush (); else { error (protocol_error); return false; } return true; } while (_insize > 0) { size_t processed = 0; rc = _decoder->decode (_inpos, _insize, processed); zmq_assert (processed <= _insize); _inpos += processed; _insize -= processed; if (rc == 0 || rc == -1) break; rc = (this->*_process_msg) (_decoder->msg ()); if (rc == -1) break; } if (rc == -1 && errno == EAGAIN) _session->flush (); else if (_io_error) { error (connection_error); return false; } else if (rc == -1) { error (protocol_error); return false; } else { _input_stopped = false; set_pollin (); _session->flush (); // Speculative read. if (!in_event_internal ()) return false; } return true; } int zmq::stream_engine_base_t::next_handshake_command (msg_t *msg_) { zmq_assert (_mechanism != NULL); if (_mechanism->status () == mechanism_t::ready) { mechanism_ready (); return pull_and_encode (msg_); } if (_mechanism->status () == mechanism_t::error) { errno = EPROTO; return -1; } const int rc = _mechanism->next_handshake_command (msg_); if (rc == 0) msg_->set_flags (msg_t::command); return rc; } int zmq::stream_engine_base_t::process_handshake_command (msg_t *msg_) { zmq_assert (_mechanism != NULL); const int rc = _mechanism->process_handshake_command (msg_); if (rc == 0) { if (_mechanism->status () == mechanism_t::ready) mechanism_ready (); else if (_mechanism->status () == mechanism_t::error) { errno = EPROTO; return -1; } if (_output_stopped) restart_output (); } return rc; } void zmq::stream_engine_base_t::zap_msg_available () { zmq_assert (_mechanism != NULL); const int rc = _mechanism->zap_msg_available (); if (rc == -1) { error (protocol_error); return; } if (_input_stopped) if (!restart_input ()) return; if (_output_stopped) restart_output (); } const zmq::endpoint_uri_pair_t &zmq::stream_engine_base_t::get_endpoint () const { return _endpoint_uri_pair; } void zmq::stream_engine_base_t::mechanism_ready () { if (_options.heartbeat_interval > 0 && !_has_heartbeat_timer) { add_timer (_options.heartbeat_interval, heartbeat_ivl_timer_id); _has_heartbeat_timer = true; } if (_has_handshake_stage) _session->engine_ready (); bool flush_session = false; if (_options.recv_routing_id) { msg_t routing_id; _mechanism->peer_routing_id (&routing_id); const int rc = _session->push_msg (&routing_id); if (rc == -1 && errno == EAGAIN) { // If the write is failing at this stage with // an EAGAIN the pipe must be being shut down, // so we can just bail out of the routing id set. return; } errno_assert (rc == 0); flush_session = true; } if (_options.router_notify & ZMQ_NOTIFY_CONNECT) { msg_t connect_notification; connect_notification.init (); const int rc = _session->push_msg (&connect_notification); if (rc == -1 && errno == EAGAIN) { // If the write is failing at this stage with // an EAGAIN the pipe must be being shut down, // so we can just bail out of the notification. return; } errno_assert (rc == 0); flush_session = true; } if (flush_session) _session->flush (); _next_msg = &stream_engine_base_t::pull_and_encode; _process_msg = &stream_engine_base_t::write_credential; // Compile metadata. properties_t properties; init_properties (properties); // Add ZAP properties. const properties_t &zap_properties = _mechanism->get_zap_properties (); properties.insert (zap_properties.begin (), zap_properties.end ()); // Add ZMTP properties. const properties_t &zmtp_properties = _mechanism->get_zmtp_properties (); properties.insert (zmtp_properties.begin (), zmtp_properties.end ()); zmq_assert (_metadata == NULL); if (!properties.empty ()) { _metadata = new (std::nothrow) metadata_t (properties); alloc_assert (_metadata); } if (_has_handshake_timer) { cancel_timer (handshake_timer_id); _has_handshake_timer = false; } _socket->event_handshake_succeeded (_endpoint_uri_pair, 0); } int zmq::stream_engine_base_t::write_credential (msg_t *msg_) { zmq_assert (_mechanism != NULL); zmq_assert (_session != NULL); const blob_t &credential = _mechanism->get_user_id (); if (credential.size () > 0) { msg_t msg; int rc = msg.init_size (credential.size ()); zmq_assert (rc == 0); memcpy (msg.data (), credential.data (), credential.size ()); msg.set_flags (msg_t::credential); rc = _session->push_msg (&msg); if (rc == -1) { rc = msg.close (); errno_assert (rc == 0); return -1; } } _process_msg = &stream_engine_base_t::decode_and_push; return decode_and_push (msg_); } int zmq::stream_engine_base_t::pull_and_encode (msg_t *msg_) { zmq_assert (_mechanism != NULL); if (_session->pull_msg (msg_) == -1) return -1; if (_mechanism->encode (msg_) == -1) return -1; return 0; } int zmq::stream_engine_base_t::decode_and_push (msg_t *msg_) { zmq_assert (_mechanism != NULL); if (_mechanism->decode (msg_) == -1) return -1; if (_has_timeout_timer) { _has_timeout_timer = false; cancel_timer (heartbeat_timeout_timer_id); } if (_has_ttl_timer) { _has_ttl_timer = false; cancel_timer (heartbeat_ttl_timer_id); } if (msg_->flags () & msg_t::command) { process_command_message (msg_); } if (_metadata) msg_->set_metadata (_metadata); if (_session->push_msg (msg_) == -1) { if (errno == EAGAIN) _process_msg = &stream_engine_base_t::push_one_then_decode_and_push; return -1; } return 0; } int zmq::stream_engine_base_t::push_one_then_decode_and_push (msg_t *msg_) { const int rc = _session->push_msg (msg_); if (rc == 0) _process_msg = &stream_engine_base_t::decode_and_push; return rc; } int zmq::stream_engine_base_t::pull_msg_from_session (msg_t *msg_) { return _session->pull_msg (msg_); } int zmq::stream_engine_base_t::push_msg_to_session (msg_t *msg_) { return _session->push_msg (msg_); } void zmq::stream_engine_base_t::error (error_reason_t reason_) { zmq_assert (_session); if ((_options.router_notify & ZMQ_NOTIFY_DISCONNECT) && !_handshaking) { // For router sockets with disconnect notification, rollback // any incomplete message in the pipe, and push the disconnect // notification message. _session->rollback (); msg_t disconnect_notification; disconnect_notification.init (); _session->push_msg (&disconnect_notification); } // protocol errors have been signaled already at the point where they occurred if (reason_ != protocol_error && (_mechanism == NULL || _mechanism->status () == mechanism_t::handshaking)) { const int err = errno; _socket->event_handshake_failed_no_detail (_endpoint_uri_pair, err); // special case: connecting to non-ZMTP process which immediately drops connection, // or which never responds with greeting, should be treated as a protocol error // (i.e. stop reconnect) if (((reason_ == connection_error) || (reason_ == timeout_error)) && (_options.reconnect_stop & ZMQ_RECONNECT_STOP_HANDSHAKE_FAILED)) { reason_ = protocol_error; } } _socket->event_disconnected (_endpoint_uri_pair, _s); _session->flush (); _session->engine_error ( !_handshaking && (_mechanism == NULL || _mechanism->status () != mechanism_t::handshaking), reason_); unplug (); delete this; } void zmq::stream_engine_base_t::set_handshake_timer () { zmq_assert (!_has_handshake_timer); if (_options.handshake_ivl > 0) { add_timer (_options.handshake_ivl, handshake_timer_id); _has_handshake_timer = true; } } bool zmq::stream_engine_base_t::init_properties (properties_t &properties_) { if (_peer_address.empty ()) return false; properties_.ZMQ_MAP_INSERT_OR_EMPLACE ( std::string (ZMQ_MSG_PROPERTY_PEER_ADDRESS), _peer_address); // Private property to support deprecated SRCFD std::ostringstream stream; stream << static_cast<int> (_s); std::string fd_string = stream.str (); properties_.ZMQ_MAP_INSERT_OR_EMPLACE (std::string ("__fd"), ZMQ_MOVE (fd_string)); return true; } void zmq::stream_engine_base_t::timer_event (int id_) { if (id_ == handshake_timer_id) { _has_handshake_timer = false; // handshake timer expired before handshake completed, so engine fail error (timeout_error); } else if (id_ == heartbeat_ivl_timer_id) { _next_msg = &stream_engine_base_t::produce_ping_message; out_event (); add_timer (_options.heartbeat_interval, heartbeat_ivl_timer_id); } else if (id_ == heartbeat_ttl_timer_id) { _has_ttl_timer = false; error (timeout_error); } else if (id_ == heartbeat_timeout_timer_id) { _has_timeout_timer = false; error (timeout_error); } else // There are no other valid timer ids! assert (false); } int zmq::stream_engine_base_t::read (void *data_, size_t size_) { const int rc = zmq::tcp_read (_s, data_, size_); if (rc == 0) { // connection closed by peer errno = EPIPE; return -1; } return rc; } int zmq::stream_engine_base_t::write (const void *data_, size_t size_) { return zmq::tcp_write (_s, data_, size_); }
sophomore_public/libzmq
src/stream_engine_base.cpp
C++
gpl-3.0
21,814
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_STREAM_ENGINE_BASE_HPP_INCLUDED__ #define __ZMQ_STREAM_ENGINE_BASE_HPP_INCLUDED__ #include <stddef.h> #include "fd.hpp" #include "i_engine.hpp" #include "io_object.hpp" #include "i_encoder.hpp" #include "i_decoder.hpp" #include "options.hpp" #include "socket_base.hpp" #include "metadata.hpp" #include "msg.hpp" #include "tcp.hpp" namespace zmq { class io_thread_t; class session_base_t; class mechanism_t; // This engine handles any socket with SOCK_STREAM semantics, // e.g. TCP socket or an UNIX domain socket. class stream_engine_base_t : public io_object_t, public i_engine { public: stream_engine_base_t (fd_t fd_, const options_t &options_, const endpoint_uri_pair_t &endpoint_uri_pair_, bool has_handshake_stage_); ~stream_engine_base_t () ZMQ_OVERRIDE; // i_engine interface implementation. bool has_handshake_stage () ZMQ_FINAL { return _has_handshake_stage; }; void plug (zmq::io_thread_t *io_thread_, zmq::session_base_t *session_) ZMQ_FINAL; void terminate () ZMQ_FINAL; bool restart_input () ZMQ_FINAL; void restart_output () ZMQ_FINAL; void zap_msg_available () ZMQ_FINAL; const endpoint_uri_pair_t &get_endpoint () const ZMQ_FINAL; // i_poll_events interface implementation. void in_event () ZMQ_FINAL; void out_event () ZMQ_OVERRIDE; void timer_event (int id_) ZMQ_FINAL; protected: typedef metadata_t::dict_t properties_t; bool init_properties (properties_t &properties_); // Function to handle network disconnections. virtual void error (error_reason_t reason_); int next_handshake_command (msg_t *msg_); int process_handshake_command (msg_t *msg_); int pull_msg_from_session (msg_t *msg_); int push_msg_to_session (msg_t *msg_); int pull_and_encode (msg_t *msg_); virtual int decode_and_push (msg_t *msg_); int push_one_then_decode_and_push (msg_t *msg_); void set_handshake_timer (); virtual bool handshake () { return true; }; virtual void plug_internal () {}; virtual int process_command_message (msg_t *msg_) { LIBZMQ_UNUSED (msg_); return -1; }; virtual int produce_ping_message (msg_t *msg_) { LIBZMQ_UNUSED (msg_); return -1; }; virtual int process_heartbeat_message (msg_t *msg_) { LIBZMQ_UNUSED (msg_); return -1; }; virtual int produce_pong_message (msg_t *msg_) { LIBZMQ_UNUSED (msg_); return -1; }; virtual int read (void *data, size_t size_); virtual int write (const void *data_, size_t size_); void reset_pollout () { io_object_t::reset_pollout (_handle); } void set_pollout () { io_object_t::set_pollout (_handle); } void set_pollin () { io_object_t::set_pollin (_handle); } session_base_t *session () { return _session; } socket_base_t *socket () { return _socket; } const options_t _options; unsigned char *_inpos; size_t _insize; i_decoder *_decoder; unsigned char *_outpos; size_t _outsize; i_encoder *_encoder; mechanism_t *_mechanism; int (stream_engine_base_t::*_next_msg) (msg_t *msg_); int (stream_engine_base_t::*_process_msg) (msg_t *msg_); // Metadata to be attached to received messages. May be NULL. metadata_t *_metadata; // True iff the engine couldn't consume the last decoded message. bool _input_stopped; // True iff the engine doesn't have any message to encode. bool _output_stopped; // Representation of the connected endpoints. const endpoint_uri_pair_t _endpoint_uri_pair; // ID of the handshake timer enum { handshake_timer_id = 0x40 }; // True is linger timer is running. bool _has_handshake_timer; // Heartbeat stuff enum { heartbeat_ivl_timer_id = 0x80, heartbeat_timeout_timer_id = 0x81, heartbeat_ttl_timer_id = 0x82 }; bool _has_ttl_timer; bool _has_timeout_timer; bool _has_heartbeat_timer; const std::string _peer_address; private: bool in_event_internal (); // Unplug the engine from the session. void unplug (); int write_credential (msg_t *msg_); void mechanism_ready (); // Underlying socket. fd_t _s; handle_t _handle; bool _plugged; // When true, we are still trying to determine whether // the peer is using versioned protocol, and if so, which // version. When false, normal message flow has started. bool _handshaking; msg_t _tx_msg; bool _io_error; // The session this engine is attached to. zmq::session_base_t *_session; // Socket zmq::socket_base_t *_socket; // Indicate if engine has an handshake stage, if it does, engine must call session.engine_ready // when handshake is completed. bool _has_handshake_stage; ZMQ_NON_COPYABLE_NOR_MOVABLE (stream_engine_base_t) }; } #endif
sophomore_public/libzmq
src/stream_engine_base.hpp
C++
gpl-3.0
5,085
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "stream_listener_base.hpp" #include "session_base.hpp" #include "socket_base.hpp" #include "zmtp_engine.hpp" #include "raw_engine.hpp" #ifndef ZMQ_HAVE_WINDOWS #include <unistd.h> #else #include <winsock2.h> #endif zmq::stream_listener_base_t::stream_listener_base_t ( zmq::io_thread_t *io_thread_, zmq::socket_base_t *socket_, const zmq::options_t &options_) : own_t (io_thread_, options_), io_object_t (io_thread_), _s (retired_fd), _handle (static_cast<handle_t> (NULL)), _socket (socket_) { } zmq::stream_listener_base_t::~stream_listener_base_t () { zmq_assert (_s == retired_fd); zmq_assert (!_handle); } int zmq::stream_listener_base_t::get_local_address (std::string &addr_) const { addr_ = get_socket_name (_s, socket_end_local); return addr_.empty () ? -1 : 0; } void zmq::stream_listener_base_t::process_plug () { // Start polling for incoming connections. _handle = add_fd (_s); set_pollin (_handle); } void zmq::stream_listener_base_t::process_term (int linger_) { rm_fd (_handle); _handle = static_cast<handle_t> (NULL); close (); own_t::process_term (linger_); } int zmq::stream_listener_base_t::close () { // TODO this is identical to stream_connector_base_t::close zmq_assert (_s != retired_fd); #ifdef ZMQ_HAVE_WINDOWS const int rc = closesocket (_s); wsa_assert (rc != SOCKET_ERROR); #else const int rc = ::close (_s); errno_assert (rc == 0); #endif _socket->event_closed (make_unconnected_bind_endpoint_pair (_endpoint), _s); _s = retired_fd; return 0; } void zmq::stream_listener_base_t::create_engine (fd_t fd_) { const endpoint_uri_pair_t endpoint_pair ( get_socket_name (fd_, socket_end_local), get_socket_name (fd_, socket_end_remote), endpoint_type_bind); i_engine *engine; if (options.raw_socket) engine = new (std::nothrow) raw_engine_t (fd_, options, endpoint_pair); else engine = new (std::nothrow) zmtp_engine_t (fd_, options, endpoint_pair); alloc_assert (engine); // Choose I/O thread to run connecter in. Given that we are already // running in an I/O thread, there must be at least one available. io_thread_t *io_thread = choose_io_thread (options.affinity); zmq_assert (io_thread); // Create and launch a session object. session_base_t *session = session_base_t::create (io_thread, false, _socket, options, NULL); errno_assert (session); session->inc_seqnum (); launch_child (session); send_attach (session, engine, false); _socket->event_accepted (endpoint_pair, fd_); }
sophomore_public/libzmq
src/stream_listener_base.cpp
C++
gpl-3.0
2,706
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_STREAM_LISTENER_BASE_HPP_INCLUDED__ #define __ZMQ_STREAM_LISTENER_BASE_HPP_INCLUDED__ #include <string> #include "fd.hpp" #include "own.hpp" #include "stdint.hpp" #include "io_object.hpp" #include "address.hpp" namespace zmq { class io_thread_t; class socket_base_t; class stream_listener_base_t : public own_t, public io_object_t { public: stream_listener_base_t (zmq::io_thread_t *io_thread_, zmq::socket_base_t *socket_, const options_t &options_); ~stream_listener_base_t () ZMQ_OVERRIDE; // Get the bound address for use with wildcards int get_local_address (std::string &addr_) const; protected: virtual std::string get_socket_name (fd_t fd_, socket_end_t socket_end_) const = 0; private: // Handlers for incoming commands. void process_plug () ZMQ_FINAL; void process_term (int linger_) ZMQ_FINAL; protected: // Close the listening socket. virtual int close (); virtual void create_engine (fd_t fd); // Underlying socket. fd_t _s; // Handle corresponding to the listening socket. handle_t _handle; // Socket the listener belongs to. zmq::socket_base_t *_socket; // String representation of endpoint to bind to std::string _endpoint; ZMQ_NON_COPYABLE_NOR_MOVABLE (stream_listener_base_t) }; } #endif
sophomore_public/libzmq
src/stream_listener_base.hpp
C++
gpl-3.0
1,465
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "sub.hpp" #include "msg.hpp" zmq::sub_t::sub_t (class ctx_t *parent_, uint32_t tid_, int sid_) : xsub_t (parent_, tid_, sid_) { options.type = ZMQ_SUB; // Switch filtering messages on (as opposed to XSUB which where the // filtering is off). options.filter = true; } zmq::sub_t::~sub_t () { } int zmq::sub_t::xsetsockopt (int option_, const void *optval_, size_t optvallen_) { if (option_ != ZMQ_SUBSCRIBE && option_ != ZMQ_UNSUBSCRIBE) { errno = EINVAL; return -1; } // Create the subscription message. msg_t msg; int rc; const unsigned char *data = static_cast<const unsigned char *> (optval_); if (option_ == ZMQ_SUBSCRIBE) { rc = msg.init_subscribe (optvallen_, data); } else { rc = msg.init_cancel (optvallen_, data); } errno_assert (rc == 0); // Pass it further on in the stack. rc = xsub_t::xsend (&msg); return close_and_return (&msg, rc); } int zmq::sub_t::xsend (msg_t *) { // Override the XSUB's send. errno = ENOTSUP; return -1; } bool zmq::sub_t::xhas_out () { // Override the XSUB's send. return false; }
sophomore_public/libzmq
src/sub.cpp
C++
gpl-3.0
1,291
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_SUB_HPP_INCLUDED__ #define __ZMQ_SUB_HPP_INCLUDED__ #include "xsub.hpp" namespace zmq { class ctx_t; class msg_t; class io_thread_t; class socket_base_t; class sub_t ZMQ_FINAL : public xsub_t { public: sub_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_); ~sub_t (); protected: int xsetsockopt (int option_, const void *optval_, size_t optvallen_); int xsend (zmq::msg_t *msg_); bool xhas_out (); ZMQ_NON_COPYABLE_NOR_MOVABLE (sub_t) }; } #endif
sophomore_public/libzmq
src/sub.hpp
C++
gpl-3.0
534
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "macros.hpp" #include "ip.hpp" #include "tcp.hpp" #include "err.hpp" #include "options.hpp" #if !defined ZMQ_HAVE_WINDOWS #include <fcntl.h> #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <unistd.h> #ifdef ZMQ_HAVE_VXWORKS #include <sockLib.h> #endif #endif #if defined ZMQ_HAVE_OPENVMS #include <ioctl.h> #endif #ifdef __APPLE__ #include <TargetConditionals.h> #endif int zmq::tune_tcp_socket (fd_t s_) { // Disable Nagle's algorithm. We are doing data batching on 0MQ level, // so using Nagle wouldn't improve throughput in anyway, but it would // hurt latency. int nodelay = 1; const int rc = setsockopt (s_, IPPROTO_TCP, TCP_NODELAY, reinterpret_cast<char *> (&nodelay), sizeof (int)); assert_success_or_recoverable (s_, rc); if (rc != 0) return rc; #ifdef ZMQ_HAVE_OPENVMS // Disable delayed acknowledgements as they hurt latency significantly. int nodelack = 1; rc = setsockopt (s_, IPPROTO_TCP, TCP_NODELACK, (char *) &nodelack, sizeof (int)); assert_success_or_recoverable (s_, rc); #endif return rc; } int zmq::set_tcp_send_buffer (fd_t sockfd_, int bufsize_) { const int rc = setsockopt (sockfd_, SOL_SOCKET, SO_SNDBUF, reinterpret_cast<char *> (&bufsize_), sizeof bufsize_); assert_success_or_recoverable (sockfd_, rc); return rc; } int zmq::set_tcp_receive_buffer (fd_t sockfd_, int bufsize_) { const int rc = setsockopt (sockfd_, SOL_SOCKET, SO_RCVBUF, reinterpret_cast<char *> (&bufsize_), sizeof bufsize_); assert_success_or_recoverable (sockfd_, rc); return rc; } int zmq::tune_tcp_keepalives (fd_t s_, int keepalive_, int keepalive_cnt_, int keepalive_idle_, int keepalive_intvl_) { // These options are used only under certain #ifdefs below. LIBZMQ_UNUSED (keepalive_); LIBZMQ_UNUSED (keepalive_cnt_); LIBZMQ_UNUSED (keepalive_idle_); LIBZMQ_UNUSED (keepalive_intvl_); // If none of the #ifdefs apply, then s_ is unused. LIBZMQ_UNUSED (s_); // Tuning TCP keep-alives if platform allows it // All values = -1 means skip and leave it for OS #ifdef ZMQ_HAVE_WINDOWS if (keepalive_ != -1) { tcp_keepalive keepalive_opts; keepalive_opts.onoff = keepalive_; keepalive_opts.keepalivetime = keepalive_idle_ != -1 ? keepalive_idle_ * 1000 : 7200000; keepalive_opts.keepaliveinterval = keepalive_intvl_ != -1 ? keepalive_intvl_ * 1000 : 1000; DWORD num_bytes_returned; const int rc = WSAIoctl (s_, SIO_KEEPALIVE_VALS, &keepalive_opts, sizeof (keepalive_opts), NULL, 0, &num_bytes_returned, NULL, NULL); assert_success_or_recoverable (s_, rc); if (rc == SOCKET_ERROR) return rc; } #else #ifdef ZMQ_HAVE_SO_KEEPALIVE if (keepalive_ != -1) { int rc = setsockopt (s_, SOL_SOCKET, SO_KEEPALIVE, reinterpret_cast<char *> (&keepalive_), sizeof (int)); assert_success_or_recoverable (s_, rc); if (rc != 0) return rc; #ifdef ZMQ_HAVE_TCP_KEEPCNT if (keepalive_cnt_ != -1) { int rc = setsockopt (s_, IPPROTO_TCP, TCP_KEEPCNT, &keepalive_cnt_, sizeof (int)); assert_success_or_recoverable (s_, rc); if (rc != 0) return rc; } #endif // ZMQ_HAVE_TCP_KEEPCNT #ifdef ZMQ_HAVE_TCP_KEEPIDLE if (keepalive_idle_ != -1) { int rc = setsockopt (s_, IPPROTO_TCP, TCP_KEEPIDLE, &keepalive_idle_, sizeof (int)); assert_success_or_recoverable (s_, rc); if (rc != 0) return rc; } #else // ZMQ_HAVE_TCP_KEEPIDLE #ifdef ZMQ_HAVE_TCP_KEEPALIVE if (keepalive_idle_ != -1) { int rc = setsockopt (s_, IPPROTO_TCP, TCP_KEEPALIVE, &keepalive_idle_, sizeof (int)); assert_success_or_recoverable (s_, rc); if (rc != 0) return rc; } #endif // ZMQ_HAVE_TCP_KEEPALIVE #endif // ZMQ_HAVE_TCP_KEEPIDLE #ifdef ZMQ_HAVE_TCP_KEEPINTVL if (keepalive_intvl_ != -1) { int rc = setsockopt (s_, IPPROTO_TCP, TCP_KEEPINTVL, &keepalive_intvl_, sizeof (int)); assert_success_or_recoverable (s_, rc); if (rc != 0) return rc; } #endif // ZMQ_HAVE_TCP_KEEPINTVL } #endif // ZMQ_HAVE_SO_KEEPALIVE #endif // ZMQ_HAVE_WINDOWS return 0; } int zmq::tune_tcp_maxrt (fd_t sockfd_, int timeout_) { if (timeout_ <= 0) return 0; LIBZMQ_UNUSED (sockfd_); #if defined(ZMQ_HAVE_WINDOWS) && defined(TCP_MAXRT) // msdn says it's supported in >= Vista, >= Windows Server 2003 timeout_ /= 1000; // in seconds const int rc = setsockopt (sockfd_, IPPROTO_TCP, TCP_MAXRT, reinterpret_cast<char *> (&timeout_), sizeof (timeout_)); assert_success_or_recoverable (sockfd_, rc); return rc; // FIXME: should be ZMQ_HAVE_TCP_USER_TIMEOUT #elif defined(TCP_USER_TIMEOUT) int rc = setsockopt (sockfd_, IPPROTO_TCP, TCP_USER_TIMEOUT, &timeout_, sizeof (timeout_)); assert_success_or_recoverable (sockfd_, rc); return rc; #else return 0; #endif } int zmq::tcp_write (fd_t s_, const void *data_, size_t size_) { #ifdef ZMQ_HAVE_WINDOWS const int nbytes = send (s_, (char *) data_, static_cast<int> (size_), 0); // If not a single byte can be written to the socket in non-blocking mode // we'll get an error (this may happen during the speculative write). const int last_error = WSAGetLastError (); if (nbytes == SOCKET_ERROR && last_error == WSAEWOULDBLOCK) return 0; // Signalise peer failure. if (nbytes == SOCKET_ERROR && (last_error == WSAENETDOWN || last_error == WSAENETRESET || last_error == WSAEHOSTUNREACH || last_error == WSAECONNABORTED || last_error == WSAETIMEDOUT || last_error == WSAECONNRESET)) return -1; // Circumvent a Windows bug: // See https://support.microsoft.com/en-us/kb/201213 // See https://zeromq.jira.com/browse/LIBZMQ-195 if (nbytes == SOCKET_ERROR && last_error == WSAENOBUFS) return 0; wsa_assert (nbytes != SOCKET_ERROR); return nbytes; #else ssize_t nbytes = send (s_, static_cast<const char *> (data_), size_, 0); // Several errors are OK. When speculative write is being done we may not // be able to write a single byte from the socket. Also, SIGSTOP issued // by a debugging tool can result in EINTR error. if (nbytes == -1 && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) return 0; // Signalise peer failure. if (nbytes == -1) { #if !defined(TARGET_OS_IPHONE) || !TARGET_OS_IPHONE errno_assert (errno != EACCES && errno != EBADF && errno != EDESTADDRREQ && errno != EFAULT && errno != EISCONN && errno != EMSGSIZE && errno != ENOMEM && errno != ENOTSOCK && errno != EOPNOTSUPP); #else errno_assert (errno != EACCES && errno != EDESTADDRREQ && errno != EFAULT && errno != EISCONN && errno != EMSGSIZE && errno != ENOMEM && errno != ENOTSOCK && errno != EOPNOTSUPP); #endif return -1; } return static_cast<int> (nbytes); #endif } int zmq::tcp_read (fd_t s_, void *data_, size_t size_) { #ifdef ZMQ_HAVE_WINDOWS const int rc = recv (s_, static_cast<char *> (data_), static_cast<int> (size_), 0); // If not a single byte can be read from the socket in non-blocking mode // we'll get an error (this may happen during the speculative read). if (rc == SOCKET_ERROR) { const int last_error = WSAGetLastError (); if (last_error == WSAEWOULDBLOCK) { errno = EAGAIN; } else { wsa_assert ( last_error == WSAENETDOWN || last_error == WSAENETRESET || last_error == WSAECONNABORTED || last_error == WSAETIMEDOUT || last_error == WSAECONNRESET || last_error == WSAECONNREFUSED || last_error == WSAENOTCONN || last_error == WSAENOBUFS); errno = wsa_error_to_errno (last_error); } } return rc == SOCKET_ERROR ? -1 : rc; #else const ssize_t rc = recv (s_, static_cast<char *> (data_), size_, 0); // Several errors are OK. When speculative read is being done we may not // be able to read a single byte from the socket. Also, SIGSTOP issued // by a debugging tool can result in EINTR error. if (rc == -1) { #if !defined(TARGET_OS_IPHONE) || !TARGET_OS_IPHONE errno_assert (errno != EBADF && errno != EFAULT && errno != ENOMEM && errno != ENOTSOCK); #else errno_assert (errno != EFAULT && errno != ENOMEM && errno != ENOTSOCK); #endif if (errno == EWOULDBLOCK || errno == EINTR) errno = EAGAIN; } return static_cast<int> (rc); #endif } void zmq::tcp_tune_loopback_fast_path (const fd_t socket_) { #if defined ZMQ_HAVE_WINDOWS && defined SIO_LOOPBACK_FAST_PATH int sio_loopback_fastpath = 1; DWORD number_of_bytes_returned = 0; const int rc = WSAIoctl ( socket_, SIO_LOOPBACK_FAST_PATH, &sio_loopback_fastpath, sizeof sio_loopback_fastpath, NULL, 0, &number_of_bytes_returned, 0, 0); if (SOCKET_ERROR == rc) { const DWORD last_error = ::WSAGetLastError (); if (WSAEOPNOTSUPP == last_error) { // This system is not Windows 8 or Server 2012, and the call is not supported. } else { wsa_assert (false); } } #else LIBZMQ_UNUSED (socket_); #endif } void zmq::tune_tcp_busy_poll (fd_t socket_, int busy_poll_) { #if defined(ZMQ_HAVE_BUSY_POLL) if (busy_poll_ > 0) { const int rc = setsockopt (socket_, SOL_SOCKET, SO_BUSY_POLL, reinterpret_cast<char *> (&busy_poll_), sizeof (int)); assert_success_or_recoverable (socket_, rc); } #else LIBZMQ_UNUSED (socket_); LIBZMQ_UNUSED (busy_poll_); #endif } zmq::fd_t zmq::tcp_open_socket (const char *address_, const zmq::options_t &options_, bool local_, bool fallback_to_ipv4_, zmq::tcp_address_t *out_tcp_addr_) { // Convert the textual address into address structure. int rc = out_tcp_addr_->resolve (address_, local_, options_.ipv6); if (rc != 0) return retired_fd; // Create the socket. fd_t s = open_socket (out_tcp_addr_->family (), SOCK_STREAM, IPPROTO_TCP); // IPv6 address family not supported, try automatic downgrade to IPv4. if (s == retired_fd && fallback_to_ipv4_ && out_tcp_addr_->family () == AF_INET6 && errno == EAFNOSUPPORT && options_.ipv6) { rc = out_tcp_addr_->resolve (address_, local_, false); if (rc != 0) { return retired_fd; } s = open_socket (AF_INET, SOCK_STREAM, IPPROTO_TCP); } if (s == retired_fd) { return retired_fd; } // On some systems, IPv4 mapping in IPv6 sockets is disabled by default. // Switch it on in such cases. if (out_tcp_addr_->family () == AF_INET6) enable_ipv4_mapping (s); // Set the IP Type-Of-Service priority for this socket if (options_.tos != 0) set_ip_type_of_service (s, options_.tos); // Set the protocol-defined priority for this socket if (options_.priority != 0) set_socket_priority (s, options_.priority); // Set the socket to loopback fastpath if configured. if (options_.loopback_fastpath) tcp_tune_loopback_fast_path (s); // Bind the socket to a device if applicable if (!options_.bound_device.empty ()) if (bind_to_device (s, options_.bound_device) == -1) goto setsockopt_error; // Set the socket buffer limits for the underlying socket. if (options_.sndbuf >= 0) set_tcp_send_buffer (s, options_.sndbuf); if (options_.rcvbuf >= 0) set_tcp_receive_buffer (s, options_.rcvbuf); // This option removes several delays caused by scheduling, interrupts and context switching. if (options_.busy_poll) tune_tcp_busy_poll (s, options_.busy_poll); return s; setsockopt_error: #ifdef ZMQ_HAVE_WINDOWS rc = closesocket (s); wsa_assert (rc != SOCKET_ERROR); #else rc = ::close (s); errno_assert (rc == 0); #endif return retired_fd; }
sophomore_public/libzmq
src/tcp.cpp
C++
gpl-3.0
13,101
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_TCP_HPP_INCLUDED__ #define __ZMQ_TCP_HPP_INCLUDED__ #include "fd.hpp" namespace zmq { class tcp_address_t; struct options_t; // Tunes the supplied TCP socket for the best latency. int tune_tcp_socket (fd_t s_); // Sets the socket send buffer size. int set_tcp_send_buffer (fd_t sockfd_, int bufsize_); // Sets the socket receive buffer size. int set_tcp_receive_buffer (fd_t sockfd_, int bufsize_); // Tunes TCP keep-alives int tune_tcp_keepalives (fd_t s_, int keepalive_, int keepalive_cnt_, int keepalive_idle_, int keepalive_intvl_); // Tunes TCP max retransmit timeout int tune_tcp_maxrt (fd_t sockfd_, int timeout_); // Writes data to the socket. Returns the number of bytes actually // written (even zero is to be considered to be a success). In case // of error or orderly shutdown by the other peer -1 is returned. int tcp_write (fd_t s_, const void *data_, size_t size_); // Reads data from the socket (up to 'size' bytes). // Returns the number of bytes actually read or -1 on error. // Zero indicates the peer has closed the connection. int tcp_read (fd_t s_, void *data_, size_t size_); void tcp_tune_loopback_fast_path (fd_t socket_); void tune_tcp_busy_poll (fd_t socket_, int busy_poll_); // Resolves the given address_ string, opens a socket and sets socket options // according to the passed options_. On success, returns the socket // descriptor and assigns the resolved address to out_tcp_addr_. In case of // an error, retired_fd is returned, and the value of out_tcp_addr_ is undefined. // errno is set to an error code describing the cause of the error. fd_t tcp_open_socket (const char *address_, const options_t &options_, bool local_, bool fallback_to_ipv4_, tcp_address_t *out_tcp_addr_); } #endif
sophomore_public/libzmq
src/tcp.hpp
C++
gpl-3.0
1,996
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <string> #include "macros.hpp" #include "tcp_address.hpp" #include "stdint.hpp" #include "err.hpp" #include "ip.hpp" #ifndef ZMQ_HAVE_WINDOWS #include <sys/types.h> #include <arpa/inet.h> #include <netinet/tcp.h> #include <net/if.h> #include <netdb.h> #include <ctype.h> #include <unistd.h> #include <stdlib.h> #endif #include <limits.h> zmq::tcp_address_t::tcp_address_t () : _has_src_addr (false) { memset (&_address, 0, sizeof (_address)); memset (&_source_address, 0, sizeof (_source_address)); } zmq::tcp_address_t::tcp_address_t (const sockaddr *sa_, socklen_t sa_len_) : _has_src_addr (false) { zmq_assert (sa_ && sa_len_ > 0); memset (&_address, 0, sizeof (_address)); memset (&_source_address, 0, sizeof (_source_address)); if (sa_->sa_family == AF_INET && sa_len_ >= static_cast<socklen_t> (sizeof (_address.ipv4))) memcpy (&_address.ipv4, sa_, sizeof (_address.ipv4)); else if (sa_->sa_family == AF_INET6 && sa_len_ >= static_cast<socklen_t> (sizeof (_address.ipv6))) memcpy (&_address.ipv6, sa_, sizeof (_address.ipv6)); } int zmq::tcp_address_t::resolve (const char *name_, bool local_, bool ipv6_) { // Test the ';' to know if we have a source address in name_ const char *src_delimiter = strrchr (name_, ';'); if (src_delimiter) { const std::string src_name (name_, src_delimiter - name_); ip_resolver_options_t src_resolver_opts; src_resolver_opts .bindable (true) // Restrict hostname/service to literals to avoid any DNS // lookups or service-name irregularity due to // indeterminate socktype. .allow_dns (false) .allow_nic_name (true) .ipv6 (ipv6_) .expect_port (true); ip_resolver_t src_resolver (src_resolver_opts); const int rc = src_resolver.resolve (&_source_address, src_name.c_str ()); if (rc != 0) return -1; name_ = src_delimiter + 1; _has_src_addr = true; } ip_resolver_options_t resolver_opts; resolver_opts.bindable (local_) .allow_dns (true) .allow_nic_name (local_) .ipv6 (ipv6_) .expect_port (true); ip_resolver_t resolver (resolver_opts); return resolver.resolve (&_address, name_); } template <size_t N1, size_t N2> static std::string make_address_string (const char *hbuf_, uint16_t port_, const char (&ipv6_prefix_)[N1], const char (&ipv6_suffix_)[N2]) { const size_t max_port_str_length = 5; char buf[NI_MAXHOST + sizeof ipv6_prefix_ + sizeof ipv6_suffix_ + max_port_str_length]; char *pos = buf; memcpy (pos, ipv6_prefix_, sizeof ipv6_prefix_ - 1); pos += sizeof ipv6_prefix_ - 1; const size_t hbuf_len = strlen (hbuf_); memcpy (pos, hbuf_, hbuf_len); pos += hbuf_len; memcpy (pos, ipv6_suffix_, sizeof ipv6_suffix_ - 1); pos += sizeof ipv6_suffix_ - 1; int res = snprintf (pos, max_port_str_length + 1, "%d", ntohs (port_)); zmq_assert (res > 0 && res < (int) (max_port_str_length + 1)); pos += res; return std::string (buf, pos - buf); } int zmq::tcp_address_t::to_string (std::string &addr_) const { if (_address.family () != AF_INET && _address.family () != AF_INET6) { addr_.clear (); return -1; } // Not using service resolving because of // https://github.com/zeromq/libzmq/commit/1824574f9b5a8ce786853320e3ea09fe1f822bc4 char hbuf[NI_MAXHOST]; const int rc = getnameinfo (addr (), addrlen (), hbuf, sizeof (hbuf), NULL, 0, NI_NUMERICHOST); if (rc != 0) { addr_.clear (); return rc; } const char ipv4_prefix[] = "tcp://"; const char ipv4_suffix[] = ":"; const char ipv6_prefix[] = "tcp://["; const char ipv6_suffix[] = "]:"; if (_address.family () == AF_INET6) { addr_ = make_address_string (hbuf, _address.ipv6.sin6_port, ipv6_prefix, ipv6_suffix); } else { addr_ = make_address_string (hbuf, _address.ipv4.sin_port, ipv4_prefix, ipv4_suffix); } return 0; } const sockaddr *zmq::tcp_address_t::addr () const { return _address.as_sockaddr (); } socklen_t zmq::tcp_address_t::addrlen () const { return _address.sockaddr_len (); } const sockaddr *zmq::tcp_address_t::src_addr () const { return _source_address.as_sockaddr (); } socklen_t zmq::tcp_address_t::src_addrlen () const { return _source_address.sockaddr_len (); } bool zmq::tcp_address_t::has_src_addr () const { return _has_src_addr; } #if defined ZMQ_HAVE_WINDOWS unsigned short zmq::tcp_address_t::family () const #else sa_family_t zmq::tcp_address_t::family () const #endif { return _address.family (); } zmq::tcp_address_mask_t::tcp_address_mask_t () : _address_mask (-1) { memset (&_network_address, 0, sizeof (_network_address)); } int zmq::tcp_address_mask_t::resolve (const char *name_, bool ipv6_) { // Find '/' at the end that separates address from the cidr mask number. // Allow empty mask clause and treat it like '/32' for ipv4 or '/128' for ipv6. std::string addr_str, mask_str; const char *delimiter = strrchr (name_, '/'); if (delimiter != NULL) { addr_str.assign (name_, delimiter - name_); mask_str.assign (delimiter + 1); if (mask_str.empty ()) { errno = EINVAL; return -1; } } else addr_str.assign (name_); // Parse address part using standard routines. ip_resolver_options_t resolver_opts; resolver_opts.bindable (false) .allow_dns (false) .allow_nic_name (false) .ipv6 (ipv6_) .expect_port (false); ip_resolver_t resolver (resolver_opts); const int rc = resolver.resolve (&_network_address, addr_str.c_str ()); if (rc != 0) return rc; // Parse the cidr mask number. const int full_mask_ipv4 = sizeof (_network_address.ipv4.sin_addr) * CHAR_BIT; const int full_mask_ipv6 = sizeof (_network_address.ipv6.sin6_addr) * CHAR_BIT; if (mask_str.empty ()) { _address_mask = _network_address.family () == AF_INET6 ? full_mask_ipv6 : full_mask_ipv4; } else if (mask_str == "0") _address_mask = 0; else { const long mask = strtol (mask_str.c_str (), NULL, 10); if ((mask < 1) || (_network_address.family () == AF_INET6 && mask > full_mask_ipv6) || (_network_address.family () != AF_INET6 && mask > full_mask_ipv4)) { errno = EINVAL; return -1; } _address_mask = static_cast<int> (mask); } return 0; } bool zmq::tcp_address_mask_t::match_address (const struct sockaddr *ss_, const socklen_t ss_len_) const { zmq_assert (_address_mask != -1 && ss_ != NULL && ss_len_ >= static_cast<socklen_t> (sizeof (struct sockaddr))); if (ss_->sa_family != _network_address.generic.sa_family) return false; if (_address_mask > 0) { int mask; const uint8_t *our_bytes, *their_bytes; if (ss_->sa_family == AF_INET6) { zmq_assert (ss_len_ == sizeof (struct sockaddr_in6)); their_bytes = reinterpret_cast<const uint8_t *> ( &((reinterpret_cast<const struct sockaddr_in6 *> (ss_)) ->sin6_addr)); our_bytes = reinterpret_cast<const uint8_t *> ( &_network_address.ipv6.sin6_addr); mask = sizeof (struct in6_addr) * 8; } else { zmq_assert (ss_len_ == sizeof (struct sockaddr_in)); their_bytes = reinterpret_cast<const uint8_t *> (&( (reinterpret_cast<const struct sockaddr_in *> (ss_))->sin_addr)); our_bytes = reinterpret_cast<const uint8_t *> ( &_network_address.ipv4.sin_addr); mask = sizeof (struct in_addr) * 8; } if (_address_mask < mask) mask = _address_mask; const size_t full_bytes = mask / 8; if (memcmp (our_bytes, their_bytes, full_bytes) != 0) return false; const uint8_t last_byte_bits = 0xffU << (8 - mask % 8); if (last_byte_bits) { if ((their_bytes[full_bytes] & last_byte_bits) != (our_bytes[full_bytes] & last_byte_bits)) return false; } } return true; }
sophomore_public/libzmq
src/tcp_address.cpp
C++
gpl-3.0
8,814
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_TCP_ADDRESS_HPP_INCLUDED__ #define __ZMQ_TCP_ADDRESS_HPP_INCLUDED__ #if !defined ZMQ_HAVE_WINDOWS #include <sys/socket.h> #include <netinet/in.h> #endif #include "ip_resolver.hpp" namespace zmq { class tcp_address_t { public: tcp_address_t (); tcp_address_t (const sockaddr *sa_, socklen_t sa_len_); // This function translates textual TCP address into an address // structure. If 'local' is true, names are resolved as local interface // names. If it is false, names are resolved as remote hostnames. // If 'ipv6' is true, the name may resolve to IPv6 address. int resolve (const char *name_, bool local_, bool ipv6_); // The opposite to resolve() int to_string (std::string &addr_) const; #if defined ZMQ_HAVE_WINDOWS unsigned short family () const; #else sa_family_t family () const; #endif const sockaddr *addr () const; socklen_t addrlen () const; const sockaddr *src_addr () const; socklen_t src_addrlen () const; bool has_src_addr () const; private: ip_addr_t _address; ip_addr_t _source_address; bool _has_src_addr; }; class tcp_address_mask_t { public: tcp_address_mask_t (); // This function enhances tcp_address_t::resolve() with ability to parse // additional cidr-like(/xx) mask value at the end of the name string. // Works only with remote hostnames. int resolve (const char *name_, bool ipv6_); bool match_address (const struct sockaddr *ss_, socklen_t ss_len_) const; private: ip_addr_t _network_address; int _address_mask; }; } #endif
sophomore_public/libzmq
src/tcp_address.hpp
C++
gpl-3.0
1,641
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <new> #include <string> #include "macros.hpp" #include "tcp_connecter.hpp" #include "io_thread.hpp" #include "err.hpp" #include "ip.hpp" #include "tcp.hpp" #include "address.hpp" #include "tcp_address.hpp" #include "session_base.hpp" #if !defined ZMQ_HAVE_WINDOWS #include <unistd.h> #include <sys/types.h> #include <sys/socket.h> #include <arpa/inet.h> #include <netinet/tcp.h> #include <netinet/in.h> #include <netdb.h> #include <fcntl.h> #ifdef ZMQ_HAVE_VXWORKS #include <sockLib.h> #endif #ifdef ZMQ_HAVE_OPENVMS #include <ioctl.h> #endif #endif #ifdef __APPLE__ #include <TargetConditionals.h> #endif zmq::tcp_connecter_t::tcp_connecter_t (class io_thread_t *io_thread_, class session_base_t *session_, const options_t &options_, address_t *addr_, bool delayed_start_) : stream_connecter_base_t ( io_thread_, session_, options_, addr_, delayed_start_), _connect_timer_started (false) { zmq_assert (_addr->protocol == protocol_name::tcp); } zmq::tcp_connecter_t::~tcp_connecter_t () { zmq_assert (!_connect_timer_started); } void zmq::tcp_connecter_t::process_term (int linger_) { if (_connect_timer_started) { cancel_timer (connect_timer_id); _connect_timer_started = false; } stream_connecter_base_t::process_term (linger_); } void zmq::tcp_connecter_t::out_event () { if (_connect_timer_started) { cancel_timer (connect_timer_id); _connect_timer_started = false; } // TODO this is still very similar to (t)ipc_connecter_t, maybe the // differences can be factored out rm_handle (); const fd_t fd = connect (); if (fd == retired_fd && ((options.reconnect_stop & ZMQ_RECONNECT_STOP_CONN_REFUSED) && errno == ECONNREFUSED)) { send_conn_failed (_session); close (); terminate (); return; } // Handle the error condition by attempt to reconnect. if (fd == retired_fd || !tune_socket (fd)) { close (); add_reconnect_timer (); return; } create_engine (fd, get_socket_name<tcp_address_t> (fd, socket_end_local)); } void zmq::tcp_connecter_t::timer_event (int id_) { if (id_ == connect_timer_id) { _connect_timer_started = false; rm_handle (); close (); add_reconnect_timer (); } else stream_connecter_base_t::timer_event (id_); } void zmq::tcp_connecter_t::start_connecting () { // Open the connecting socket. const int rc = open (); // Connect may succeed in synchronous manner. if (rc == 0) { _handle = add_fd (_s); out_event (); } // Connection establishment may be delayed. Poll for its completion. else if (rc == -1 && errno == EINPROGRESS) { _handle = add_fd (_s); set_pollout (_handle); _socket->event_connect_delayed ( make_unconnected_connect_endpoint_pair (_endpoint), zmq_errno ()); // add userspace connect timeout add_connect_timer (); } // Handle any other error condition by eventual reconnect. else { if (_s != retired_fd) close (); add_reconnect_timer (); } } void zmq::tcp_connecter_t::add_connect_timer () { if (options.connect_timeout > 0) { add_timer (options.connect_timeout, connect_timer_id); _connect_timer_started = true; } } int zmq::tcp_connecter_t::open () { zmq_assert (_s == retired_fd); // Resolve the address if (_addr->resolved.tcp_addr != NULL) { LIBZMQ_DELETE (_addr->resolved.tcp_addr); } _addr->resolved.tcp_addr = new (std::nothrow) tcp_address_t (); alloc_assert (_addr->resolved.tcp_addr); _s = tcp_open_socket (_addr->address.c_str (), options, false, true, _addr->resolved.tcp_addr); if (_s == retired_fd) { // TODO we should emit some event in this case! LIBZMQ_DELETE (_addr->resolved.tcp_addr); return -1; } zmq_assert (_addr->resolved.tcp_addr != NULL); // Set the socket to non-blocking mode so that we get async connect(). unblock_socket (_s); const tcp_address_t *const tcp_addr = _addr->resolved.tcp_addr; int rc; // Set a source address for conversations if (tcp_addr->has_src_addr ()) { // Allow reusing of the address, to connect to different servers // using the same source port on the client. int flag = 1; #ifdef ZMQ_HAVE_WINDOWS rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast<const char *> (&flag), sizeof (int)); wsa_assert (rc != SOCKET_ERROR); #elif defined ZMQ_HAVE_VXWORKS rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, (char *) &flag, sizeof (int)); errno_assert (rc == 0); #else rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof (int)); errno_assert (rc == 0); #endif #if defined ZMQ_HAVE_VXWORKS rc = ::bind (_s, (sockaddr *) tcp_addr->src_addr (), tcp_addr->src_addrlen ()); #else rc = ::bind (_s, tcp_addr->src_addr (), tcp_addr->src_addrlen ()); #endif if (rc == -1) return -1; } // Connect to the remote peer. #if defined ZMQ_HAVE_VXWORKS rc = ::connect (_s, (sockaddr *) tcp_addr->addr (), tcp_addr->addrlen ()); #else rc = ::connect (_s, tcp_addr->addr (), tcp_addr->addrlen ()); #endif // Connect was successful immediately. if (rc == 0) { return 0; } // Translate error codes indicating asynchronous connect has been // launched to a uniform EINPROGRESS. #ifdef ZMQ_HAVE_WINDOWS const int last_error = WSAGetLastError (); if (last_error == WSAEINPROGRESS || last_error == WSAEWOULDBLOCK) errno = EINPROGRESS; else errno = wsa_error_to_errno (last_error); #else if (errno == EINTR) errno = EINPROGRESS; #endif return -1; } zmq::fd_t zmq::tcp_connecter_t::connect () { // Async connect has finished. Check whether an error occurred int err = 0; #if defined ZMQ_HAVE_HPUX || defined ZMQ_HAVE_VXWORKS int len = sizeof err; #else socklen_t len = sizeof err; #endif const int rc = getsockopt (_s, SOL_SOCKET, SO_ERROR, reinterpret_cast<char *> (&err), &len); // Assert if the error was caused by 0MQ bug. // Networking problems are OK. No need to assert. #ifdef ZMQ_HAVE_WINDOWS zmq_assert (rc == 0); if (err != 0) { if (err == WSAEBADF || err == WSAENOPROTOOPT || err == WSAENOTSOCK || err == WSAENOBUFS) { wsa_assert_no (err); } errno = wsa_error_to_errno (err); return retired_fd; } #else // Following code should handle both Berkeley-derived socket // implementations and Solaris. if (rc == -1) err = errno; if (err != 0) { errno = err; #if !defined(TARGET_OS_IPHONE) || !TARGET_OS_IPHONE errno_assert (errno != EBADF && errno != ENOPROTOOPT && errno != ENOTSOCK && errno != ENOBUFS); #else errno_assert (errno != ENOPROTOOPT && errno != ENOTSOCK && errno != ENOBUFS); #endif return retired_fd; } #endif // Return the newly connected socket. const fd_t result = _s; _s = retired_fd; return result; } bool zmq::tcp_connecter_t::tune_socket (const fd_t fd_) { const int rc = tune_tcp_socket (fd_) | tune_tcp_keepalives ( fd_, options.tcp_keepalive, options.tcp_keepalive_cnt, options.tcp_keepalive_idle, options.tcp_keepalive_intvl) | tune_tcp_maxrt (fd_, options.tcp_maxrt); return rc == 0; }
sophomore_public/libzmq
src/tcp_connecter.cpp
C++
gpl-3.0
8,007
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __TCP_CONNECTER_HPP_INCLUDED__ #define __TCP_CONNECTER_HPP_INCLUDED__ #include "fd.hpp" #include "stdint.hpp" #include "stream_connecter_base.hpp" namespace zmq { class tcp_connecter_t ZMQ_FINAL : public stream_connecter_base_t { public: // If 'delayed_start' is true connecter first waits for a while, // then starts connection process. tcp_connecter_t (zmq::io_thread_t *io_thread_, zmq::session_base_t *session_, const options_t &options_, address_t *addr_, bool delayed_start_); ~tcp_connecter_t (); private: // ID of the timer used to check the connect timeout, must be different from stream_connecter_base_t::reconnect_timer_id. enum { connect_timer_id = 2 }; // Handlers for incoming commands. void process_term (int linger_); // Handlers for I/O events. void out_event (); void timer_event (int id_); // Internal function to start the actual connection establishment. void start_connecting (); // Internal function to add a connect timer void add_connect_timer (); // Open TCP connecting socket. Returns -1 in case of error, // 0 if connect was successful immediately. Returns -1 with // EAGAIN errno if async connect was launched. int open (); // Get the file descriptor of newly created connection. Returns // retired_fd if the connection was unsuccessful. fd_t connect (); // Tunes a connected socket. bool tune_socket (fd_t fd_); // True iff a timer has been started. bool _connect_timer_started; ZMQ_NON_COPYABLE_NOR_MOVABLE (tcp_connecter_t) }; } #endif
sophomore_public/libzmq
src/tcp_connecter.hpp
C++
gpl-3.0
1,755
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <new> #include <string> #include <stdio.h> #include "tcp_listener.hpp" #include "io_thread.hpp" #include "config.hpp" #include "err.hpp" #include "ip.hpp" #include "tcp.hpp" #include "socket_base.hpp" #include "address.hpp" #ifndef ZMQ_HAVE_WINDOWS #include <unistd.h> #include <sys/socket.h> #include <arpa/inet.h> #include <netinet/tcp.h> #include <netinet/in.h> #include <netdb.h> #include <fcntl.h> #ifdef ZMQ_HAVE_VXWORKS #include <sockLib.h> #endif #endif #ifdef ZMQ_HAVE_OPENVMS #include <ioctl.h> #endif zmq::tcp_listener_t::tcp_listener_t (io_thread_t *io_thread_, socket_base_t *socket_, const options_t &options_) : stream_listener_base_t (io_thread_, socket_, options_) { } void zmq::tcp_listener_t::in_event () { const fd_t fd = accept (); // If connection was reset by the peer in the meantime, just ignore it. // TODO: Handle specific errors like ENFILE/EMFILE etc. if (fd == retired_fd) { _socket->event_accept_failed ( make_unconnected_bind_endpoint_pair (_endpoint), zmq_errno ()); return; } int rc = tune_tcp_socket (fd); rc = rc | tune_tcp_keepalives ( fd, options.tcp_keepalive, options.tcp_keepalive_cnt, options.tcp_keepalive_idle, options.tcp_keepalive_intvl); rc = rc | tune_tcp_maxrt (fd, options.tcp_maxrt); if (rc != 0) { _socket->event_accept_failed ( make_unconnected_bind_endpoint_pair (_endpoint), zmq_errno ()); return; } // Create the engine object for this connection. create_engine (fd); } std::string zmq::tcp_listener_t::get_socket_name (zmq::fd_t fd_, socket_end_t socket_end_) const { return zmq::get_socket_name<tcp_address_t> (fd_, socket_end_); } int zmq::tcp_listener_t::create_socket (const char *addr_) { _s = tcp_open_socket (addr_, options, true, true, &_address); if (_s == retired_fd) { return -1; } // TODO why is this only done for the listener? make_socket_noninheritable (_s); // Allow reusing of the address. int flag = 1; int rc; #ifdef ZMQ_HAVE_WINDOWS // TODO this was changed for Windows from SO_REUSEADDRE to // SE_EXCLUSIVEADDRUSE by 0ab65324195ad70205514d465b03d851a6de051c, // so the comment above is no longer correct; also, now the settings are // different between listener and connecter with a src address. // is this intentional? rc = setsockopt (_s, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, reinterpret_cast<const char *> (&flag), sizeof (int)); wsa_assert (rc != SOCKET_ERROR); #elif defined ZMQ_HAVE_VXWORKS rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, (char *) &flag, sizeof (int)); errno_assert (rc == 0); #else rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof (int)); errno_assert (rc == 0); #endif // Bind the socket to the network interface and port. #if defined ZMQ_HAVE_VXWORKS rc = bind (_s, (sockaddr *) _address.addr (), _address.addrlen ()); #else rc = bind (_s, _address.addr (), _address.addrlen ()); #endif #ifdef ZMQ_HAVE_WINDOWS if (rc == SOCKET_ERROR) { errno = wsa_error_to_errno (WSAGetLastError ()); goto error; } #else if (rc != 0) goto error; #endif // Listen for incoming connections. rc = listen (_s, options.backlog); #ifdef ZMQ_HAVE_WINDOWS if (rc == SOCKET_ERROR) { errno = wsa_error_to_errno (WSAGetLastError ()); goto error; } #else if (rc != 0) goto error; #endif return 0; error: const int err = errno; close (); errno = err; return -1; } int zmq::tcp_listener_t::set_local_address (const char *addr_) { if (options.use_fd != -1) { // in this case, the addr_ passed is not used and ignored, since the // socket was already created by the application _s = options.use_fd; } else { if (create_socket (addr_) == -1) return -1; } _endpoint = get_socket_name (_s, socket_end_local); _socket->event_listening (make_unconnected_bind_endpoint_pair (_endpoint), _s); return 0; } zmq::fd_t zmq::tcp_listener_t::accept () { // The situation where connection cannot be accepted due to insufficient // resources is considered valid and treated by ignoring the connection. // Accept one connection and deal with different failure modes. zmq_assert (_s != retired_fd); struct sockaddr_storage ss; memset (&ss, 0, sizeof (ss)); #if defined ZMQ_HAVE_HPUX || defined ZMQ_HAVE_VXWORKS int ss_len = sizeof (ss); #else socklen_t ss_len = sizeof (ss); #endif #if defined ZMQ_HAVE_SOCK_CLOEXEC && defined HAVE_ACCEPT4 fd_t sock = ::accept4 (_s, reinterpret_cast<struct sockaddr *> (&ss), &ss_len, SOCK_CLOEXEC); #else const fd_t sock = ::accept (_s, reinterpret_cast<struct sockaddr *> (&ss), &ss_len); #endif if (sock == retired_fd) { #if defined ZMQ_HAVE_WINDOWS const int last_error = WSAGetLastError (); wsa_assert (last_error == WSAEWOULDBLOCK || last_error == WSAECONNRESET || last_error == WSAEMFILE || last_error == WSAENOBUFS); #elif defined ZMQ_HAVE_ANDROID errno_assert (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR || errno == ECONNABORTED || errno == EPROTO || errno == ENOBUFS || errno == ENOMEM || errno == EMFILE || errno == ENFILE || errno == EINVAL); #else errno_assert (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR || errno == ECONNABORTED || errno == EPROTO || errno == ENOBUFS || errno == ENOMEM || errno == EMFILE || errno == ENFILE); #endif return retired_fd; } make_socket_noninheritable (sock); if (!options.tcp_accept_filters.empty ()) { bool matched = false; for (options_t::tcp_accept_filters_t::size_type i = 0, size = options.tcp_accept_filters.size (); i != size; ++i) { if (options.tcp_accept_filters[i].match_address ( reinterpret_cast<struct sockaddr *> (&ss), ss_len)) { matched = true; break; } } if (!matched) { #ifdef ZMQ_HAVE_WINDOWS const int rc = closesocket (sock); wsa_assert (rc != SOCKET_ERROR); #else int rc = ::close (sock); errno_assert (rc == 0); #endif return retired_fd; } } if (zmq::set_nosigpipe (sock)) { #ifdef ZMQ_HAVE_WINDOWS const int rc = closesocket (sock); wsa_assert (rc != SOCKET_ERROR); #else int rc = ::close (sock); errno_assert (rc == 0); #endif return retired_fd; } // Set the IP Type-Of-Service priority for this client socket if (options.tos != 0) set_ip_type_of_service (sock, options.tos); // Set the protocol-defined priority for this client socket if (options.priority != 0) set_socket_priority (sock, options.priority); return sock; }
sophomore_public/libzmq
src/tcp_listener.cpp
C++
gpl-3.0
7,401
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_TCP_LISTENER_HPP_INCLUDED__ #define __ZMQ_TCP_LISTENER_HPP_INCLUDED__ #include "fd.hpp" #include "tcp_address.hpp" #include "stream_listener_base.hpp" namespace zmq { class tcp_listener_t ZMQ_FINAL : public stream_listener_base_t { public: tcp_listener_t (zmq::io_thread_t *io_thread_, zmq::socket_base_t *socket_, const options_t &options_); // Set address to listen on. int set_local_address (const char *addr_); protected: std::string get_socket_name (fd_t fd_, socket_end_t socket_end_) const; private: // Handlers for I/O events. void in_event (); // Accept the new connection. Returns the file descriptor of the // newly created connection. The function may return retired_fd // if the connection was dropped while waiting in the listen backlog // or was denied because of accept filters. fd_t accept (); int create_socket (const char *addr_); // Address to listen on. tcp_address_t _address; ZMQ_NON_COPYABLE_NOR_MOVABLE (tcp_listener_t) }; } #endif
sophomore_public/libzmq
src/tcp_listener.hpp
C++
gpl-3.0
1,134
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "macros.hpp" #include "thread.hpp" #include "err.hpp" #ifdef ZMQ_HAVE_WINDOWS #include <winnt.h> #endif #ifdef __MINGW32__ #include "pthread.h" #endif bool zmq::thread_t::get_started () const { return _started; } #ifdef ZMQ_HAVE_WINDOWS extern "C" { #if defined _WIN32_WCE static DWORD thread_routine (LPVOID arg_) #else static unsigned int __stdcall thread_routine (void *arg_) #endif { zmq::thread_t *self = static_cast<zmq::thread_t *> (arg_); self->applyThreadName (); self->_tfn (self->_arg); return 0; } } void zmq::thread_t::start (thread_fn *tfn_, void *arg_, const char *name_) { _tfn = tfn_; _arg = arg_; if (name_) strncpy (_name, name_, sizeof (_name) - 1); // set default stack size to 4MB to avoid std::map stack overflow on x64 unsigned int stack = 0; #if defined _WIN64 stack = 0x400000; #endif #if defined _WIN32_WCE _descriptor = (HANDLE) CreateThread (NULL, stack, &::thread_routine, this, 0, &_thread_id); #else _descriptor = (HANDLE) _beginthreadex (NULL, stack, &::thread_routine, this, 0, &_thread_id); #endif win_assert (_descriptor != NULL); _started = true; } bool zmq::thread_t::is_current_thread () const { return GetCurrentThreadId () == _thread_id; } void zmq::thread_t::stop () { if (_started) { const DWORD rc = WaitForSingleObject (_descriptor, INFINITE); win_assert (rc != WAIT_FAILED); const BOOL rc2 = CloseHandle (_descriptor); win_assert (rc2 != 0); } } void zmq::thread_t::setSchedulingParameters ( int priority_, int scheduling_policy_, const std::set<int> &affinity_cpus_) { // not implemented LIBZMQ_UNUSED (priority_); LIBZMQ_UNUSED (scheduling_policy_); LIBZMQ_UNUSED (affinity_cpus_); } void zmq::thread_t:: applySchedulingParameters () // to be called in secondary thread context { // not implemented } #ifdef _MSC_VER namespace { #pragma pack(push, 8) struct thread_info_t { DWORD _type; LPCSTR _name; DWORD _thread_id; DWORD _flags; }; #pragma pack(pop) } #endif void zmq::thread_t:: applyThreadName () // to be called in secondary thread context { if (!_name[0] || !IsDebuggerPresent ()) return; #ifdef _MSC_VER thread_info_t thread_info; thread_info._type = 0x1000; thread_info._name = _name; thread_info._thread_id = -1; thread_info._flags = 0; __try { const DWORD MS_VC_EXCEPTION = 0x406D1388; RaiseException (MS_VC_EXCEPTION, 0, sizeof (thread_info) / sizeof (ULONG_PTR), (ULONG_PTR *) &thread_info); } __except (EXCEPTION_CONTINUE_EXECUTION) { } #elif defined(__MINGW32__) int rc = pthread_setname_np (pthread_self (), _name); if (rc) return; #else // not implemented #endif } #elif defined ZMQ_HAVE_VXWORKS extern "C" { static void *thread_routine (void *arg_) { zmq::thread_t *self = (zmq::thread_t *) arg_; self->applySchedulingParameters (); self->_tfn (self->_arg); return NULL; } } void zmq::thread_t::start (thread_fn *tfn_, void *arg_, const char *name_) { LIBZMQ_UNUSED (name_); _tfn = tfn_; _arg = arg_; _descriptor = taskSpawn (NULL, DEFAULT_PRIORITY, DEFAULT_OPTIONS, DEFAULT_STACK_SIZE, (FUNCPTR) thread_routine, (int) this, 0, 0, 0, 0, 0, 0, 0, 0, 0); if (_descriptor != NULL || _descriptor > 0) _started = true; } void zmq::thread_t::stop () { if (_started) while ((_descriptor != NULL || _descriptor > 0) && taskIdVerify (_descriptor) == 0) { } } bool zmq::thread_t::is_current_thread () const { return taskIdSelf () == _descriptor; } void zmq::thread_t::setSchedulingParameters ( int priority_, int schedulingPolicy_, const std::set<int> &affinity_cpus_) { _thread_priority = priority_; _thread_sched_policy = schedulingPolicy_; _thread_affinity_cpus = affinity_cpus_; } void zmq::thread_t:: applySchedulingParameters () // to be called in secondary thread context { int priority = (_thread_priority >= 0 ? _thread_priority : DEFAULT_PRIORITY); priority = (priority < UCHAR_MAX ? priority : DEFAULT_PRIORITY); if (_descriptor != NULL || _descriptor > 0) { taskPrioritySet (_descriptor, priority); } } void zmq::thread_t:: applyThreadName () // to be called in secondary thread context { // not implemented } #else #include <signal.h> #include <unistd.h> #include <sys/time.h> #include <sys/resource.h> extern "C" { static void *thread_routine (void *arg_) { #if !defined ZMQ_HAVE_OPENVMS && !defined ZMQ_HAVE_ANDROID // Following code will guarantee more predictable latencies as it'll // disallow any signal handling in the I/O thread. sigset_t signal_set; int rc = sigfillset (&signal_set); errno_assert (rc == 0); rc = pthread_sigmask (SIG_BLOCK, &signal_set, NULL); posix_assert (rc); #endif zmq::thread_t *self = (zmq::thread_t *) arg_; self->applySchedulingParameters (); self->applyThreadName (); self->_tfn (self->_arg); return NULL; } } void zmq::thread_t::start (thread_fn *tfn_, void *arg_, const char *name_) { _tfn = tfn_; _arg = arg_; if (name_) strncpy (_name, name_, sizeof (_name) - 1); int rc = pthread_create (&_descriptor, NULL, thread_routine, this); posix_assert (rc); _started = true; } void zmq::thread_t::stop () { if (_started) { int rc = pthread_join (_descriptor, NULL); posix_assert (rc); } } bool zmq::thread_t::is_current_thread () const { return bool (pthread_equal (pthread_self (), _descriptor)); } void zmq::thread_t::setSchedulingParameters ( int priority_, int scheduling_policy_, const std::set<int> &affinity_cpus_) { _thread_priority = priority_; _thread_sched_policy = scheduling_policy_; _thread_affinity_cpus = affinity_cpus_; } void zmq::thread_t:: applySchedulingParameters () // to be called in secondary thread context { #if defined _POSIX_THREAD_PRIORITY_SCHEDULING \ && _POSIX_THREAD_PRIORITY_SCHEDULING >= 0 int policy = 0; struct sched_param param; #if _POSIX_THREAD_PRIORITY_SCHEDULING == 0 \ && defined _SC_THREAD_PRIORITY_SCHEDULING if (sysconf (_SC_THREAD_PRIORITY_SCHEDULING) < 0) { return; } #endif int rc = pthread_getschedparam (pthread_self (), &policy, &param); posix_assert (rc); if (_thread_sched_policy != ZMQ_THREAD_SCHED_POLICY_DFLT) { policy = _thread_sched_policy; } /* Quoting docs: "Linux allows the static priority range 1 to 99 for the SCHED_FIFO and SCHED_RR policies, and the priority 0 for the remaining policies." Other policies may use the "nice value" in place of the priority: */ bool use_nice_instead_priority = (policy != SCHED_FIFO) && (policy != SCHED_RR); if (use_nice_instead_priority) param.sched_priority = 0; // this is the only supported priority for most scheduling policies else if (_thread_priority != ZMQ_THREAD_PRIORITY_DFLT) param.sched_priority = _thread_priority; // user should provide a value between 1 and 99 #ifdef __NetBSD__ if (policy == SCHED_OTHER) param.sched_priority = -1; #endif rc = pthread_setschedparam (pthread_self (), policy, &param); #if defined(__FreeBSD_kernel__) || defined(__FreeBSD__) // If this feature is unavailable at run-time, don't abort. if (rc == ENOSYS) return; #endif posix_assert (rc); #if !defined ZMQ_HAVE_VXWORKS if (use_nice_instead_priority && _thread_priority != ZMQ_THREAD_PRIORITY_DFLT && _thread_priority > 0) { // assume the user wants to decrease the thread's nice value // i.e., increase the chance of this thread being scheduled: try setting that to // maximum priority. rc = nice (-20); errno_assert (rc != -1); // IMPORTANT: EPERM is typically returned for unprivileged processes: that's because // CAP_SYS_NICE capability is required or RLIMIT_NICE resource limit should be changed to avoid EPERM! } #endif #ifdef ZMQ_HAVE_PTHREAD_SET_AFFINITY if (!_thread_affinity_cpus.empty ()) { cpu_set_t cpuset; CPU_ZERO (&cpuset); for (std::set<int>::const_iterator it = _thread_affinity_cpus.begin (), end = _thread_affinity_cpus.end (); it != end; it++) { CPU_SET ((int) (*it), &cpuset); } rc = pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset); posix_assert (rc); } #endif #endif } void zmq::thread_t:: applyThreadName () // to be called in secondary thread context { /* The thread name is a cosmetic string, added to ease debugging of * multi-threaded applications. It is not a big issue if this value * can not be set for any reason (such as Permission denied in some * cases where the application changes its EUID, etc.) The value of * "int rc" is retained where available, to help debuggers stepping * through code to see its value - but otherwise it is ignored. */ if (!_name[0]) return; /* Fails with permission denied on Android 5/6 */ #if defined(ZMQ_HAVE_ANDROID) return; #endif #if defined(ZMQ_HAVE_PTHREAD_SETNAME_1) int rc = pthread_setname_np (_name); if (rc) return; #elif defined(ZMQ_HAVE_PTHREAD_SETNAME_2) int rc = pthread_setname_np (pthread_self (), _name); if (rc) return; #elif defined(ZMQ_HAVE_PTHREAD_SETNAME_3) int rc = pthread_setname_np (pthread_self (), _name, NULL); if (rc) return; #elif defined(ZMQ_HAVE_PTHREAD_SET_NAME) pthread_set_name_np (pthread_self (), _name); #endif } #endif
sophomore_public/libzmq
src/thread.cpp
C++
gpl-3.0
10,124
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_THREAD_HPP_INCLUDED__ #define __ZMQ_THREAD_HPP_INCLUDED__ #if defined ZMQ_HAVE_VXWORKS #include <vxWorks.h> #include <taskLib.h> #elif !defined ZMQ_HAVE_WINDOWS #include <pthread.h> #endif #include <set> #include <cstring> namespace zmq { typedef void (thread_fn) (void *); // Class encapsulating OS thread. Thread initiation/termination is done // using special functions rather than in constructor/destructor so that // thread isn't created during object construction by accident, causing // newly created thread to access half-initialised object. Same applies // to the destruction process: Thread should be terminated before object // destruction begins, otherwise it can access half-destructed object. class thread_t { public: thread_t () : _tfn (NULL), _arg (NULL), _started (false), _thread_priority (ZMQ_THREAD_PRIORITY_DFLT), _thread_sched_policy (ZMQ_THREAD_SCHED_POLICY_DFLT) { memset (_name, 0, sizeof (_name)); } #ifdef ZMQ_HAVE_VXWORKS ~thread_t () { if (descriptor != NULL || descriptor > 0) { taskDelete (descriptor); } } #endif // Creates OS thread. 'tfn' is main thread function. It'll be passed // 'arg' as an argument. // Name is 16 characters max including terminating NUL. Thread naming is // implemented only for pthread, and windows when a debugger is attached. void start (thread_fn *tfn_, void *arg_, const char *name_); // Returns whether the thread was started, i.e. start was called. bool get_started () const; // Returns whether the executing thread is the thread represented by the // thread object. bool is_current_thread () const; // Waits for thread termination. void stop (); // Sets the thread scheduling parameters. Only implemented for // pthread. Has no effect on other platforms. void setSchedulingParameters (int priority_, int scheduling_policy_, const std::set<int> &affinity_cpus_); // These are internal members. They should be private, however then // they would not be accessible from the main C routine of the thread. void applySchedulingParameters (); void applyThreadName (); thread_fn *_tfn; void *_arg; char _name[16]; private: bool _started; #ifdef ZMQ_HAVE_WINDOWS HANDLE _descriptor; #if defined _WIN32_WCE DWORD _thread_id; #else unsigned int _thread_id; #endif #elif defined ZMQ_HAVE_VXWORKS int _descriptor; enum { DEFAULT_PRIORITY = 100, DEFAULT_OPTIONS = 0, DEFAULT_STACK_SIZE = 4000 }; #else pthread_t _descriptor; #endif // Thread scheduling parameters. int _thread_priority; int _thread_sched_policy; std::set<int> _thread_affinity_cpus; ZMQ_NON_COPYABLE_NOR_MOVABLE (thread_t) }; } #endif
sophomore_public/libzmq
src/thread.hpp
C++
gpl-3.0
2,980
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "timers.hpp" #include "err.hpp" #include <algorithm> zmq::timers_t::timers_t () : _tag (0xCAFEDADA), _next_timer_id (0) { } zmq::timers_t::~timers_t () { // Mark the timers as dead _tag = 0xdeadbeef; } bool zmq::timers_t::check_tag () const { return _tag == 0xCAFEDADA; } int zmq::timers_t::add (size_t interval_, timers_timer_fn handler_, void *arg_) { if (handler_ == NULL) { errno = EFAULT; return -1; } uint64_t when = _clock.now_ms () + interval_; timer_t timer = {++_next_timer_id, interval_, handler_, arg_}; _timers.insert (timersmap_t::value_type (when, timer)); return timer.timer_id; } struct zmq::timers_t::match_by_id { match_by_id (int timer_id_) : _timer_id (timer_id_) {} bool operator() (timersmap_t::value_type const &entry_) const { return entry_.second.timer_id == _timer_id; } private: int _timer_id; }; int zmq::timers_t::cancel (int timer_id_) { // check first if timer exists at all if (_timers.end () == std::find_if (_timers.begin (), _timers.end (), match_by_id (timer_id_))) { errno = EINVAL; return -1; } // check if timer was already canceled if (_cancelled_timers.count (timer_id_)) { errno = EINVAL; return -1; } _cancelled_timers.insert (timer_id_); return 0; } int zmq::timers_t::set_interval (int timer_id_, size_t interval_) { const timersmap_t::iterator end = _timers.end (); const timersmap_t::iterator it = std::find_if (_timers.begin (), end, match_by_id (timer_id_)); if (it != end) { timer_t timer = it->second; timer.interval = interval_; uint64_t when = _clock.now_ms () + interval_; _timers.erase (it); _timers.insert (timersmap_t::value_type (when, timer)); return 0; } errno = EINVAL; return -1; } int zmq::timers_t::reset (int timer_id_) { const timersmap_t::iterator end = _timers.end (); const timersmap_t::iterator it = std::find_if (_timers.begin (), end, match_by_id (timer_id_)); if (it != end) { timer_t timer = it->second; uint64_t when = _clock.now_ms () + timer.interval; _timers.erase (it); _timers.insert (timersmap_t::value_type (when, timer)); return 0; } errno = EINVAL; return -1; } long zmq::timers_t::timeout () { const uint64_t now = _clock.now_ms (); long res = -1; const timersmap_t::iterator begin = _timers.begin (); const timersmap_t::iterator end = _timers.end (); timersmap_t::iterator it = begin; for (; it != end; ++it) { if (0 == _cancelled_timers.erase (it->second.timer_id)) { // Live timer, lets return the timeout res = std::max (static_cast<long> (it->first - now), 0l); break; } } // Remove timed-out timers _timers.erase (begin, it); return res; } int zmq::timers_t::execute () { const uint64_t now = _clock.now_ms (); const timersmap_t::iterator begin = _timers.begin (); const timersmap_t::iterator end = _timers.end (); timersmap_t::iterator it = _timers.begin (); for (; it != end; ++it) { if (0 == _cancelled_timers.erase (it->second.timer_id)) { // Timer is not cancelled // Map is ordered, if we have to wait for current timer we can stop. if (it->first > now) break; const timer_t &timer = it->second; timer.handler (timer.timer_id, timer.arg); _timers.insert ( timersmap_t::value_type (now + timer.interval, timer)); } } _timers.erase (begin, it); return 0; }
sophomore_public/libzmq
src/timers.cpp
C++
gpl-3.0
3,834
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_TIMERS_HPP_INCLUDED__ #define __ZMQ_TIMERS_HPP_INCLUDED__ #include <stddef.h> #include <map> #include <set> #include "clock.hpp" namespace zmq { typedef void (timers_timer_fn) (int timer_id_, void *arg_); class timers_t { public: timers_t (); ~timers_t (); // Add timer to the set, timer repeats forever, or until cancel is called. // Returns a timer_id that is used to cancel the timer. // Returns -1 if there was an error. int add (size_t interval_, timers_timer_fn handler_, void *arg_); // Set the interval of the timer. // This method is slow, cancelling exsting and adding a new timer yield better performance. // Returns 0 on success and -1 on error. int set_interval (int timer_id_, size_t interval_); // Reset the timer. // This method is slow, cancelling exsting and adding a new timer yield better performance. // Returns 0 on success and -1 on error. int reset (int timer_id_); // Cancel a timer. // Returns 0 on success and -1 on error. int cancel (int timer_id_); // Returns the time in millisecond until the next timer. // Returns -1 if no timer is due. long timeout (); // Execute timers. // Return 0 if all succeed and -1 if error. int execute (); // Return false if object is not a timers class. bool check_tag () const; private: // Used to check whether the object is a timers class. uint32_t _tag; int _next_timer_id; // Clock instance. clock_t _clock; typedef struct timer_t { int timer_id; size_t interval; timers_timer_fn *handler; void *arg; } timer_t; typedef std::multimap<uint64_t, timer_t> timersmap_t; timersmap_t _timers; typedef std::set<int> cancelled_timers_t; cancelled_timers_t _cancelled_timers; struct match_by_id; ZMQ_NON_COPYABLE_NOR_MOVABLE (timers_t) }; } #endif
sophomore_public/libzmq
src/timers.hpp
C++
gpl-3.0
1,988
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "tipc_address.hpp" #if defined ZMQ_HAVE_TIPC #include "err.hpp" #include <string> #include <sstream> zmq::tipc_address_t::tipc_address_t () { memset (&address, 0, sizeof address); _random = false; } zmq::tipc_address_t::tipc_address_t (const sockaddr *sa_, socklen_t sa_len_) { zmq_assert (sa_ && sa_len_ > 0); memset (&address, 0, sizeof address); if (sa_->sa_family == AF_TIPC) memcpy (&address, sa_, sa_len_); _random = false; } void zmq::tipc_address_t::set_random () { _random = true; } bool zmq::tipc_address_t::is_random () const { return _random; } bool zmq::tipc_address_t::is_service () const { if (address.addrtype == TIPC_ADDR_ID) return false; return true; } int zmq::tipc_address_t::resolve (const char *name_) { unsigned int type = 0; unsigned int lower = 0; unsigned int upper = 0; unsigned int ref = 0; unsigned int z = 1, c = 0, n = 0; char eof; const char *domain; int res; if (strncmp (name_, "<*>", 3) == 0) { set_random (); address.family = AF_TIPC; address.addrtype = TIPC_ADDR_ID; address.addr.id.node = 0; address.addr.id.ref = 0; address.scope = 0; return 0; } res = sscanf (name_, "{%u,%u,%u}", &type, &lower, &upper); /* Fetch optional domain suffix. */ if ((domain = strchr (name_, '@'))) { if (sscanf (domain, "@%u.%u.%u%c", &z, &c, &n, &eof) != 3) return EINVAL; } if (res == 3) { if (type < TIPC_RESERVED_TYPES || upper < lower) return EINVAL; address.family = AF_TIPC; address.addrtype = TIPC_ADDR_NAMESEQ; address.addr.nameseq.type = type; address.addr.nameseq.lower = lower; address.addr.nameseq.upper = upper; address.scope = TIPC_ZONE_SCOPE; return 0; } if (res == 2 && type > TIPC_RESERVED_TYPES) { address.family = AF_TIPC; address.addrtype = TIPC_ADDR_NAME; address.addr.name.name.type = type; address.addr.name.name.instance = lower; address.addr.name.domain = tipc_addr (z, c, n); address.scope = 0; return 0; } else if (res == 0) { res = sscanf (name_, "<%u.%u.%u:%u>", &z, &c, &n, &ref); if (res == 4) { address.family = AF_TIPC; address.addrtype = TIPC_ADDR_ID; address.addr.id.node = tipc_addr (z, c, n); address.addr.id.ref = ref; address.scope = 0; return 0; } } return EINVAL; } int zmq::tipc_address_t::to_string (std::string &addr_) const { if (address.family != AF_TIPC) { addr_.clear (); return -1; } std::stringstream s; if (address.addrtype == TIPC_ADDR_NAMESEQ || address.addrtype == TIPC_ADDR_NAME) { s << "tipc://" << "{" << address.addr.nameseq.type; s << ", " << address.addr.nameseq.lower; s << ", " << address.addr.nameseq.upper << "}"; addr_ = s.str (); } else if (address.addrtype == TIPC_ADDR_ID || is_random ()) { s << "tipc://" << "<" << tipc_zone (address.addr.id.node); s << "." << tipc_cluster (address.addr.id.node); s << "." << tipc_node (address.addr.id.node); s << ":" << address.addr.id.ref << ">"; addr_ = s.str (); } else { addr_.clear (); return -1; } return 0; } const sockaddr *zmq::tipc_address_t::addr () const { return (sockaddr *) &address; } socklen_t zmq::tipc_address_t::addrlen () const { return static_cast<socklen_t> (sizeof address); } #endif
sophomore_public/libzmq
src/tipc_address.cpp
C++
gpl-3.0
3,719
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_TIPC_ADDRESS_HPP_INCLUDED__ #define __ZMQ_TIPC_ADDRESS_HPP_INCLUDED__ #include <string> #include "platform.hpp" #if defined ZMQ_HAVE_TIPC #include <sys/socket.h> #if defined ZMQ_HAVE_VXWORKS #include <tipc/tipc.h> #else #include <linux/tipc.h> #endif namespace zmq { class tipc_address_t { public: tipc_address_t (); tipc_address_t (const sockaddr *sa, socklen_t sa_len); // This function sets up the address "{type, lower, upper}" for TIPC transport int resolve (const char *name); // The opposite to resolve() int to_string (std::string &addr_) const; // Handling different TIPC address types bool is_service () const; bool is_random () const; void set_random (); const sockaddr *addr () const; socklen_t addrlen () const; private: bool _random; struct sockaddr_tipc address; }; } #endif #endif
sophomore_public/libzmq
src/tipc_address.hpp
C++
gpl-3.0
925
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "tipc_connecter.hpp" #if defined ZMQ_HAVE_TIPC #include <new> #include <string> #include "io_thread.hpp" #include "platform.hpp" #include "random.hpp" #include "err.hpp" #include "ip.hpp" #include "address.hpp" #include "tipc_address.hpp" #include "session_base.hpp" #include <unistd.h> #include <sys/types.h> #include <sys/socket.h> #ifdef ZMQ_HAVE_VXWORKS #include <sockLib.h> #endif zmq::tipc_connecter_t::tipc_connecter_t (class io_thread_t *io_thread_, class session_base_t *session_, const options_t &options_, address_t *addr_, bool delayed_start_) : stream_connecter_base_t ( io_thread_, session_, options_, addr_, delayed_start_) { zmq_assert (_addr->protocol == "tipc"); } void zmq::tipc_connecter_t::out_event () { fd_t fd = connect (); rm_handle (); // Handle the error condition by attempt to reconnect. if (fd == retired_fd) { close (); add_reconnect_timer (); return; } create_engine (fd, get_socket_name<tipc_address_t> (fd, socket_end_local)); } void zmq::tipc_connecter_t::start_connecting () { // Open the connecting socket. int rc = open (); // Connect may succeed in synchronous manner. if (rc == 0) { _handle = add_fd (_s); out_event (); } // Connection establishment may be delayed. Poll for its completion. else if (rc == -1 && errno == EINPROGRESS) { _handle = add_fd (_s); set_pollout (_handle); _socket->event_connect_delayed ( make_unconnected_connect_endpoint_pair (_endpoint), zmq_errno ()); } // Handle any other error condition by eventual reconnect. else { if (_s != retired_fd) close (); add_reconnect_timer (); } } int zmq::tipc_connecter_t::open () { zmq_assert (_s == retired_fd); // Cannot connect to random tipc addresses if (_addr->resolved.tipc_addr->is_random ()) { errno = EINVAL; return -1; } // Create the socket. _s = open_socket (AF_TIPC, SOCK_STREAM, 0); if (_s == retired_fd) return -1; // Set the non-blocking flag. unblock_socket (_s); // Connect to the remote peer. #ifdef ZMQ_HAVE_VXWORKS int rc = ::connect (s, (sockaddr *) addr->resolved.tipc_addr->addr (), addr->resolved.tipc_addr->addrlen ()); #else int rc = ::connect (_s, _addr->resolved.tipc_addr->addr (), _addr->resolved.tipc_addr->addrlen ()); #endif // Connect was successful immediately. if (rc == 0) return 0; // Translate other error codes indicating asynchronous connect has been // launched to a uniform EINPROGRESS. if (rc == -1 && errno == EINTR) { errno = EINPROGRESS; return -1; } // Forward the error. return -1; } zmq::fd_t zmq::tipc_connecter_t::connect () { // Following code should handle both Berkeley-derived socket // implementations and Solaris. int err = 0; #ifdef ZMQ_HAVE_VXWORKS int len = sizeof (err); #else socklen_t len = sizeof (err); #endif int rc = getsockopt (_s, SOL_SOCKET, SO_ERROR, reinterpret_cast<char *> (&err), &len); if (rc == -1) err = errno; if (err != 0) { // Assert if the error was caused by 0MQ bug. // Networking problems are OK. No need to assert. errno = err; errno_assert (errno == ECONNREFUSED || errno == ECONNRESET || errno == ETIMEDOUT || errno == EHOSTUNREACH || errno == ENETUNREACH || errno == ENETDOWN); return retired_fd; } fd_t result = _s; _s = retired_fd; return result; } #endif
sophomore_public/libzmq
src/tipc_connecter.cpp
C++
gpl-3.0
3,956
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __TIPC_CONNECTER_HPP_INCLUDED__ #define __TIPC_CONNECTER_HPP_INCLUDED__ #include "platform.hpp" #if defined ZMQ_HAVE_TIPC #include "fd.hpp" #include "stream_connecter_base.hpp" namespace zmq { class tipc_connecter_t ZMQ_FINAL : public stream_connecter_base_t { public: // If 'delayed_start' is true connecter first waits for a while, // then starts connection process. tipc_connecter_t (zmq::io_thread_t *io_thread_, zmq::session_base_t *session_, const options_t &options_, address_t *addr_, bool delayed_start_); private: // Handlers for I/O events. void out_event () ZMQ_FINAL; // Internal function to start the actual connection establishment. void start_connecting () ZMQ_FINAL; // Get the file descriptor of newly created connection. Returns // retired_fd if the connection was unsuccessful. fd_t connect (); // Open IPC connecting socket. Returns -1 in case of error, // 0 if connect was successful immediately. Returns -1 with // EAGAIN errno if async connect was launched. int open (); ZMQ_NON_COPYABLE_NOR_MOVABLE (tipc_connecter_t) }; } #endif #endif
sophomore_public/libzmq
src/tipc_connecter.hpp
C++
gpl-3.0
1,282
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "tipc_listener.hpp" #if defined ZMQ_HAVE_TIPC #include <new> #include <string.h> #include "tipc_address.hpp" #include "io_thread.hpp" #include "config.hpp" #include "err.hpp" #include "ip.hpp" #include "socket_base.hpp" #include "address.hpp" #include <unistd.h> #include <sys/socket.h> #include <fcntl.h> #if defined ZMQ_HAVE_VXWORKS #include <sockLib.h> #include <tipc/tipc.h> #else #include <linux/tipc.h> #endif zmq::tipc_listener_t::tipc_listener_t (io_thread_t *io_thread_, socket_base_t *socket_, const options_t &options_) : stream_listener_base_t (io_thread_, socket_, options_) { } void zmq::tipc_listener_t::in_event () { fd_t fd = accept (); // If connection was reset by the peer in the meantime, just ignore it. // TODO: Handle specific errors like ENFILE/EMFILE etc. if (fd == retired_fd) { _socket->event_accept_failed ( make_unconnected_bind_endpoint_pair (_endpoint), zmq_errno ()); return; } // Create the engine object for this connection. create_engine (fd); } std::string zmq::tipc_listener_t::get_socket_name (zmq::fd_t fd_, socket_end_t socket_end_) const { return zmq::get_socket_name<tipc_address_t> (fd_, socket_end_); } int zmq::tipc_listener_t::set_local_address (const char *addr_) { // Convert str to address struct int rc = _address.resolve (addr_); if (rc != 0) return -1; // Cannot bind non-random Port Identity const sockaddr_tipc *const a = reinterpret_cast<const sockaddr_tipc *> (_address.addr ()); if (!_address.is_random () && a->addrtype == TIPC_ADDR_ID) { errno = EINVAL; return -1; } // Create a listening socket. _s = open_socket (AF_TIPC, SOCK_STREAM, 0); if (_s == retired_fd) return -1; // If random Port Identity, update address object to reflect the assigned address if (_address.is_random ()) { struct sockaddr_storage ss; const zmq_socklen_t sl = get_socket_address (_s, socket_end_local, &ss); if (sl == 0) goto error; _address = tipc_address_t (reinterpret_cast<struct sockaddr *> (&ss), sl); } _address.to_string (_endpoint); // Bind the socket to tipc name if (_address.is_service ()) { #ifdef ZMQ_HAVE_VXWORKS rc = bind (_s, (sockaddr *) address.addr (), address.addrlen ()); #else rc = bind (_s, _address.addr (), _address.addrlen ()); #endif if (rc != 0) goto error; } // Listen for incoming connections. rc = listen (_s, options.backlog); if (rc != 0) goto error; _socket->event_listening (make_unconnected_bind_endpoint_pair (_endpoint), _s); return 0; error: int err = errno; close (); errno = err; return -1; } zmq::fd_t zmq::tipc_listener_t::accept () { // Accept one connection and deal with different failure modes. // The situation where connection cannot be accepted due to insufficient // resources is considered valid and treated by ignoring the connection. struct sockaddr_storage ss = {}; socklen_t ss_len = sizeof (ss); zmq_assert (_s != retired_fd); #ifdef ZMQ_HAVE_VXWORKS fd_t sock = ::accept (_s, (struct sockaddr *) &ss, (int *) &ss_len); #else fd_t sock = ::accept (_s, reinterpret_cast<struct sockaddr *> (&ss), &ss_len); #endif if (sock == -1) { errno_assert (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS || errno == EINTR || errno == ECONNABORTED || errno == EPROTO || errno == EMFILE || errno == ENFILE); return retired_fd; } /*FIXME Accept filters?*/ return sock; } #endif
sophomore_public/libzmq
src/tipc_listener.cpp
C++
gpl-3.0
3,966
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_TIPC_LISTENER_HPP_INCLUDED__ #define __ZMQ_TIPC_LISTENER_HPP_INCLUDED__ #include "platform.hpp" #if defined ZMQ_HAVE_TIPC #include <string> #include "fd.hpp" #include "stream_listener_base.hpp" #include "tipc_address.hpp" namespace zmq { class tipc_listener_t ZMQ_FINAL : public stream_listener_base_t { public: tipc_listener_t (zmq::io_thread_t *io_thread_, zmq::socket_base_t *socket_, const options_t &options_); // Set address to listen on. int set_local_address (const char *addr_); protected: std::string get_socket_name (fd_t fd_, socket_end_t socket_end_) const ZMQ_FINAL; private: // Handlers for I/O events. void in_event () ZMQ_FINAL; // Accept the new connection. Returns the file descriptor of the // newly created connection. The function may return retired_fd // if the connection was dropped while waiting in the listen backlog. fd_t accept (); // Address to listen on tipc_address_t _address; ZMQ_NON_COPYABLE_NOR_MOVABLE (tipc_listener_t) }; } #endif #endif
sophomore_public/libzmq
src/tipc_listener.hpp
C++
gpl-3.0
1,181
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "macros.hpp" #include "err.hpp" #include "trie.hpp" #include <stdlib.h> #include <new> #include <algorithm> zmq::trie_t::trie_t () : _refcnt (0), _min (0), _count (0), _live_nodes (0) { } zmq::trie_t::~trie_t () { if (_count == 1) { zmq_assert (_next.node); LIBZMQ_DELETE (_next.node); } else if (_count > 1) { for (unsigned short i = 0; i != _count; ++i) { LIBZMQ_DELETE (_next.table[i]); } free (_next.table); } } bool zmq::trie_t::add (unsigned char *prefix_, size_t size_) { // We are at the node corresponding to the prefix. We are done. if (!size_) { ++_refcnt; return _refcnt == 1; } const unsigned char c = *prefix_; if (c < _min || c >= _min + _count) { // The character is out of range of currently handled // characters. We have to extend the table. if (!_count) { _min = c; _count = 1; _next.node = NULL; } else if (_count == 1) { const unsigned char oldc = _min; trie_t *oldp = _next.node; _count = (_min < c ? c - _min : _min - c) + 1; _next.table = static_cast<trie_t **> (malloc (sizeof (trie_t *) * _count)); alloc_assert (_next.table); for (unsigned short i = 0; i != _count; ++i) _next.table[i] = 0; _min = std::min (_min, c); _next.table[oldc - _min] = oldp; } else if (_min < c) { // The new character is above the current character range. const unsigned short old_count = _count; _count = c - _min + 1; _next.table = static_cast<trie_t **> ( realloc (_next.table, sizeof (trie_t *) * _count)); zmq_assert (_next.table); for (unsigned short i = old_count; i != _count; i++) _next.table[i] = NULL; } else { // The new character is below the current character range. const unsigned short old_count = _count; _count = (_min + old_count) - c; _next.table = static_cast<trie_t **> ( realloc (_next.table, sizeof (trie_t *) * _count)); zmq_assert (_next.table); memmove (_next.table + _min - c, _next.table, old_count * sizeof (trie_t *)); for (unsigned short i = 0; i != _min - c; i++) _next.table[i] = NULL; _min = c; } } // If next node does not exist, create one. if (_count == 1) { if (!_next.node) { _next.node = new (std::nothrow) trie_t; alloc_assert (_next.node); ++_live_nodes; zmq_assert (_live_nodes == 1); } return _next.node->add (prefix_ + 1, size_ - 1); } if (!_next.table[c - _min]) { _next.table[c - _min] = new (std::nothrow) trie_t; alloc_assert (_next.table[c - _min]); ++_live_nodes; zmq_assert (_live_nodes > 1); } return _next.table[c - _min]->add (prefix_ + 1, size_ - 1); } bool zmq::trie_t::rm (unsigned char *prefix_, size_t size_) { // TODO: Shouldn't an error be reported if the key does not exist? if (!size_) { if (!_refcnt) return false; _refcnt--; return _refcnt == 0; } const unsigned char c = *prefix_; if (!_count || c < _min || c >= _min + _count) return false; trie_t *next_node = _count == 1 ? _next.node : _next.table[c - _min]; if (!next_node) return false; const bool ret = next_node->rm (prefix_ + 1, size_ - 1); // Prune redundant nodes if (next_node->is_redundant ()) { LIBZMQ_DELETE (next_node); zmq_assert (_count > 0); if (_count == 1) { // The just pruned node is was the only live node _next.node = 0; _count = 0; --_live_nodes; zmq_assert (_live_nodes == 0); } else { _next.table[c - _min] = 0; zmq_assert (_live_nodes > 1); --_live_nodes; // Compact the table if possible if (_live_nodes == 1) { // We can switch to using the more compact single-node // representation since the table only contains one live node trie_t *node = 0; // Since we always compact the table the pruned node must // either be the left-most or right-most ptr in the node // table if (c == _min) { // The pruned node is the left-most node ptr in the // node table => keep the right-most node node = _next.table[_count - 1]; _min += _count - 1; } else if (c == _min + _count - 1) { // The pruned node is the right-most node ptr in the // node table => keep the left-most node node = _next.table[0]; } zmq_assert (node); free (_next.table); _next.node = node; _count = 1; } else if (c == _min) { // We can compact the table "from the left". // Find the left-most non-null node ptr, which we'll use as // our new min unsigned char new_min = _min; for (unsigned short i = 1; i < _count; ++i) { if (_next.table[i]) { new_min = i + _min; break; } } zmq_assert (new_min != _min); trie_t **old_table = _next.table; zmq_assert (new_min > _min); zmq_assert (_count > new_min - _min); _count = _count - (new_min - _min); _next.table = static_cast<trie_t **> (malloc (sizeof (trie_t *) * _count)); alloc_assert (_next.table); memmove (_next.table, old_table + (new_min - _min), sizeof (trie_t *) * _count); free (old_table); _min = new_min; } else if (c == _min + _count - 1) { // We can compact the table "from the right". // Find the right-most non-null node ptr, which we'll use to // determine the new table size unsigned short new_count = _count; for (unsigned short i = 1; i < _count; ++i) { if (_next.table[_count - 1 - i]) { new_count = _count - i; break; } } zmq_assert (new_count != _count); _count = new_count; trie_t **old_table = _next.table; _next.table = static_cast<trie_t **> (malloc (sizeof (trie_t *) * _count)); alloc_assert (_next.table); memmove (_next.table, old_table, sizeof (trie_t *) * _count); free (old_table); } } } return ret; } bool zmq::trie_t::check (const unsigned char *data_, size_t size_) const { // This function is on critical path. It deliberately doesn't use // recursion to get a bit better performance. const trie_t *current = this; while (true) { // We've found a corresponding subscription! if (current->_refcnt) return true; // We've checked all the data and haven't found matching subscription. if (!size_) return false; // If there's no corresponding slot for the first character // of the prefix, the message does not match. const unsigned char c = *data_; if (c < current->_min || c >= current->_min + current->_count) return false; // Move to the next character. if (current->_count == 1) current = current->_next.node; else { current = current->_next.table[c - current->_min]; if (!current) return false; } data_++; size_--; } } void zmq::trie_t::apply ( void (*func_) (unsigned char *data_, size_t size_, void *arg_), void *arg_) { unsigned char *buff = NULL; apply_helper (&buff, 0, 0, func_, arg_); free (buff); } void zmq::trie_t::apply_helper (unsigned char **buff_, size_t buffsize_, size_t maxbuffsize_, void (*func_) (unsigned char *data_, size_t size_, void *arg_), void *arg_) const { // If this node is a subscription, apply the function. if (_refcnt) func_ (*buff_, buffsize_, arg_); // Adjust the buffer. if (buffsize_ >= maxbuffsize_) { maxbuffsize_ = buffsize_ + 256; *buff_ = static_cast<unsigned char *> (realloc (*buff_, maxbuffsize_)); zmq_assert (*buff_); } // If there are no subnodes in the trie, return. if (_count == 0) return; // If there's one subnode (optimisation). if (_count == 1) { (*buff_)[buffsize_] = _min; buffsize_++; _next.node->apply_helper (buff_, buffsize_, maxbuffsize_, func_, arg_); return; } // If there are multiple subnodes. for (unsigned short c = 0; c != _count; c++) { (*buff_)[buffsize_] = _min + c; if (_next.table[c]) _next.table[c]->apply_helper (buff_, buffsize_ + 1, maxbuffsize_, func_, arg_); } } bool zmq::trie_t::is_redundant () const { return _refcnt == 0 && _live_nodes == 0; }
sophomore_public/libzmq
src/trie.cpp
C++
gpl-3.0
10,064
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_TRIE_HPP_INCLUDED__ #define __ZMQ_TRIE_HPP_INCLUDED__ #include <stddef.h> #include "macros.hpp" #include "stdint.hpp" #include "atomic_counter.hpp" namespace zmq { class trie_t { public: trie_t (); ~trie_t (); // Add key to the trie. Returns true if this is a new item in the trie // rather than a duplicate. bool add (unsigned char *prefix_, size_t size_); // Remove key from the trie. Returns true if the item is actually // removed from the trie. bool rm (unsigned char *prefix_, size_t size_); // Check whether particular key is in the trie. bool check (const unsigned char *data_, size_t size_) const; // Apply the function supplied to each subscription in the trie. void apply (void (*func_) (unsigned char *data_, size_t size_, void *arg_), void *arg_); private: void apply_helper (unsigned char **buff_, size_t buffsize_, size_t maxbuffsize_, void (*func_) (unsigned char *data_, size_t size_, void *arg_), void *arg_) const; bool is_redundant () const; uint32_t _refcnt; unsigned char _min; unsigned short _count; unsigned short _live_nodes; union { class trie_t *node; class trie_t **table; } _next; ZMQ_NON_COPYABLE_NOR_MOVABLE (trie_t) }; // lightweight wrapper around trie_t adding tracking of total number of prefixes class trie_with_size_t { public: trie_with_size_t () {} ~trie_with_size_t () {} bool add (unsigned char *prefix_, size_t size_) { if (_trie.add (prefix_, size_)) { _num_prefixes.add (1); return true; } else return false; } bool rm (unsigned char *prefix_, size_t size_) { if (_trie.rm (prefix_, size_)) { _num_prefixes.sub (1); return true; } else return false; } bool check (const unsigned char *data_, size_t size_) const { return _trie.check (data_, size_); } void apply (void (*func_) (unsigned char *data_, size_t size_, void *arg_), void *arg_) { _trie.apply (func_, arg_); } // Retrieve the number of prefixes stored in this trie (added - removed) // Note this is a multithread safe function. uint32_t num_prefixes () const { return _num_prefixes.get (); } private: atomic_counter_t _num_prefixes; trie_t _trie; }; } #endif
sophomore_public/libzmq
src/trie.hpp
C++
gpl-3.0
2,653
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <string> #include <sstream> #include "macros.hpp" #include "udp_address.hpp" #include "stdint.hpp" #include "err.hpp" #include "ip.hpp" #ifndef ZMQ_HAVE_WINDOWS #include <sys/types.h> #include <arpa/inet.h> #include <netdb.h> #include <net/if.h> #include <ctype.h> #endif zmq::udp_address_t::udp_address_t () : _bind_interface (-1), _is_multicast (false) { _bind_address = ip_addr_t::any (AF_INET); _target_address = ip_addr_t::any (AF_INET); } zmq::udp_address_t::~udp_address_t () { } int zmq::udp_address_t::resolve (const char *name_, bool bind_, bool ipv6_) { // No IPv6 support yet bool has_interface = false; _address = name_; // If we have a semicolon then we should have an interface specifier in the // URL const char *src_delimiter = strrchr (name_, ';'); if (src_delimiter) { const std::string src_name (name_, src_delimiter - name_); ip_resolver_options_t src_resolver_opts; src_resolver_opts .bindable (true) // Restrict hostname/service to literals to avoid any DNS // lookups or service-name irregularity due to // indeterminate socktype. .allow_dns (false) .allow_nic_name (true) .ipv6 (ipv6_) .expect_port (false); ip_resolver_t src_resolver (src_resolver_opts); const int rc = src_resolver.resolve (&_bind_address, src_name.c_str ()); if (rc != 0) { return -1; } if (_bind_address.is_multicast ()) { // It doesn't make sense to have a multicast address as a source errno = EINVAL; return -1; } // This is a hack because we need the interface index when binding // multicast IPv6, we can't do it by address. Unfortunately for the // time being we don't have a generic platform-independent function to // resolve an interface index from an address, so we only support it // when an actual interface name is provided. if (src_name == "*") { _bind_interface = 0; } else { #ifdef HAVE_IF_NAMETOINDEX _bind_interface = if_nametoindex (src_name.c_str ()); if (_bind_interface == 0) { // Error, probably not an interface name. _bind_interface = -1; } #endif } has_interface = true; name_ = src_delimiter + 1; } ip_resolver_options_t resolver_opts; resolver_opts.bindable (bind_) .allow_dns (true) .allow_nic_name (bind_) .expect_port (true) .ipv6 (ipv6_); ip_resolver_t resolver (resolver_opts); const int rc = resolver.resolve (&_target_address, name_); if (rc != 0) { return -1; } _is_multicast = _target_address.is_multicast (); const uint16_t port = _target_address.port (); if (has_interface) { // If we have an interface specifier then the target address must be a // multicast address if (!_is_multicast) { errno = EINVAL; return -1; } _bind_address.set_port (port); } else { // If we don't have an explicit interface specifier then the URL is // ambiguous: if the target address is multicast then it's the // destination address and the bind address is ANY, if it's unicast // then it's the bind address when 'bind_' is true and the destination // otherwise if (_is_multicast || !bind_) { _bind_address = ip_addr_t::any (_target_address.family ()); _bind_address.set_port (port); _bind_interface = 0; } else { // If we were asked for a bind socket and the address // provided was not multicast then it was really meant as // a bind address and the target_address is useless. _bind_address = _target_address; } } if (_bind_address.family () != _target_address.family ()) { errno = EINVAL; return -1; } // For IPv6 multicast we *must* have an interface index since we can't // bind by address. if (ipv6_ && _is_multicast && _bind_interface < 0) { errno = ENODEV; return -1; } return 0; } int zmq::udp_address_t::family () const { return _bind_address.family (); } bool zmq::udp_address_t::is_mcast () const { return _is_multicast; } const zmq::ip_addr_t *zmq::udp_address_t::bind_addr () const { return &_bind_address; } int zmq::udp_address_t::bind_if () const { return _bind_interface; } const zmq::ip_addr_t *zmq::udp_address_t::target_addr () const { return &_target_address; } int zmq::udp_address_t::to_string (std::string &addr_) { // XXX what do (factor TCP code?) addr_ = _address; return 0; }
sophomore_public/libzmq
src/udp_address.cpp
C++
gpl-3.0
4,936
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_UDP_ADDRESS_HPP_INCLUDED__ #define __ZMQ_UDP_ADDRESS_HPP_INCLUDED__ #if !defined ZMQ_HAVE_WINDOWS #include <sys/socket.h> #include <netinet/in.h> #endif #include <string> #include "ip_resolver.hpp" namespace zmq { class udp_address_t { public: udp_address_t (); virtual ~udp_address_t (); int resolve (const char *name_, bool bind_, bool ipv6_); // The opposite to resolve() virtual int to_string (std::string &addr_); int family () const; bool is_mcast () const; const ip_addr_t *bind_addr () const; int bind_if () const; const ip_addr_t *target_addr () const; private: ip_addr_t _bind_address; int _bind_interface; ip_addr_t _target_address; bool _is_multicast; std::string _address; }; } #endif
sophomore_public/libzmq
src/udp_address.hpp
C++
gpl-3.0
829
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #if !defined ZMQ_HAVE_WINDOWS #include <sys/types.h> #include <unistd.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #ifdef ZMQ_HAVE_VXWORKS #include <sockLib.h> #endif #endif #include "udp_address.hpp" #include "udp_engine.hpp" #include "session_base.hpp" #include "err.hpp" #include "ip.hpp" // OSX uses a different name for this socket option #ifndef IPV6_ADD_MEMBERSHIP #define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP #endif #ifdef __APPLE__ #include <TargetConditionals.h> #endif zmq::udp_engine_t::udp_engine_t (const options_t &options_) : _plugged (false), _fd (-1), _session (NULL), _handle (static_cast<handle_t> (NULL)), _address (NULL), _options (options_), _send_enabled (false), _recv_enabled (false) { } zmq::udp_engine_t::~udp_engine_t () { zmq_assert (!_plugged); if (_fd != retired_fd) { #ifdef ZMQ_HAVE_WINDOWS const int rc = closesocket (_fd); wsa_assert (rc != SOCKET_ERROR); #else int rc = close (_fd); errno_assert (rc == 0); #endif _fd = retired_fd; } } int zmq::udp_engine_t::init (address_t *address_, bool send_, bool recv_) { zmq_assert (address_); zmq_assert (send_ || recv_); _send_enabled = send_; _recv_enabled = recv_; _address = address_; _fd = open_socket (_address->resolved.udp_addr->family (), SOCK_DGRAM, IPPROTO_UDP); if (_fd == retired_fd) return -1; unblock_socket (_fd); return 0; } void zmq::udp_engine_t::plug (io_thread_t *io_thread_, session_base_t *session_) { zmq_assert (!_plugged); _plugged = true; zmq_assert (!_session); zmq_assert (session_); _session = session_; // Connect to I/O threads poller object. io_object_t::plug (io_thread_); _handle = add_fd (_fd); const udp_address_t *const udp_addr = _address->resolved.udp_addr; int rc = 0; // Bind the socket to a device if applicable if (!_options.bound_device.empty ()) { rc = rc | bind_to_device (_fd, _options.bound_device); if (rc != 0) { assert_success_or_recoverable (_fd, rc); error (connection_error); return; } } if (_send_enabled) { if (!_options.raw_socket) { const ip_addr_t *out = udp_addr->target_addr (); _out_address = out->as_sockaddr (); _out_address_len = out->sockaddr_len (); if (out->is_multicast ()) { const bool is_ipv6 = (out->family () == AF_INET6); rc = rc | set_udp_multicast_loop (_fd, is_ipv6, _options.multicast_loop); if (_options.multicast_hops > 0) { rc = rc | set_udp_multicast_ttl (_fd, is_ipv6, _options.multicast_hops); } rc = rc | set_udp_multicast_iface (_fd, is_ipv6, udp_addr); } } else { /// XXX fixme ? _out_address = reinterpret_cast<sockaddr *> (&_raw_address); _out_address_len = static_cast<zmq_socklen_t> (sizeof (sockaddr_in)); } } if (_recv_enabled) { rc = rc | set_udp_reuse_address (_fd, true); const ip_addr_t *bind_addr = udp_addr->bind_addr (); ip_addr_t any = ip_addr_t::any (bind_addr->family ()); const ip_addr_t *real_bind_addr; const bool multicast = udp_addr->is_mcast (); if (multicast) { // Multicast addresses should be allowed to bind to more than // one port as all ports should receive the message rc = rc | set_udp_reuse_port (_fd, true); // In multicast we should bind ANY and use the mreq struct to // specify the interface any.set_port (bind_addr->port ()); real_bind_addr = &any; } else { real_bind_addr = bind_addr; } if (rc != 0) { error (protocol_error); return; } #ifdef ZMQ_HAVE_VXWORKS rc = rc | bind (_fd, (sockaddr *) real_bind_addr->as_sockaddr (), real_bind_addr->sockaddr_len ()); #else rc = rc | bind (_fd, real_bind_addr->as_sockaddr (), real_bind_addr->sockaddr_len ()); #endif if (rc != 0) { assert_success_or_recoverable (_fd, rc); error (protocol_error); return; } if (multicast) { rc = rc | add_membership (_fd, udp_addr); } } if (rc != 0) { error (protocol_error); } else { if (_send_enabled) { set_pollout (_handle); } if (_recv_enabled) { set_pollin (_handle); // Call restart output to drop all join/leave commands restart_output (); } } } int zmq::udp_engine_t::set_udp_multicast_loop (fd_t s_, bool is_ipv6_, bool loop_) { int level; int optname; if (is_ipv6_) { level = IPPROTO_IPV6; optname = IPV6_MULTICAST_LOOP; } else { level = IPPROTO_IP; optname = IP_MULTICAST_LOOP; } int loop = loop_ ? 1 : 0; const int rc = setsockopt (s_, level, optname, reinterpret_cast<char *> (&loop), sizeof (loop)); assert_success_or_recoverable (s_, rc); return rc; } int zmq::udp_engine_t::set_udp_multicast_ttl (fd_t s_, bool is_ipv6_, int hops_) { int level; if (is_ipv6_) { level = IPPROTO_IPV6; } else { level = IPPROTO_IP; } const int rc = setsockopt (s_, level, IP_MULTICAST_TTL, reinterpret_cast<char *> (&hops_), sizeof (hops_)); assert_success_or_recoverable (s_, rc); return rc; } int zmq::udp_engine_t::set_udp_multicast_iface (fd_t s_, bool is_ipv6_, const udp_address_t *addr_) { int rc = 0; if (is_ipv6_) { int bind_if = addr_->bind_if (); if (bind_if > 0) { // If a bind interface is provided we tell the // kernel to use it to send multicast packets rc = setsockopt (s_, IPPROTO_IPV6, IPV6_MULTICAST_IF, reinterpret_cast<char *> (&bind_if), sizeof (bind_if)); } } else { struct in_addr bind_addr = addr_->bind_addr ()->ipv4.sin_addr; if (bind_addr.s_addr != INADDR_ANY) { rc = setsockopt (s_, IPPROTO_IP, IP_MULTICAST_IF, reinterpret_cast<char *> (&bind_addr), sizeof (bind_addr)); } } assert_success_or_recoverable (s_, rc); return rc; } int zmq::udp_engine_t::set_udp_reuse_address (fd_t s_, bool on_) { int on = on_ ? 1 : 0; const int rc = setsockopt (s_, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast<char *> (&on), sizeof (on)); assert_success_or_recoverable (s_, rc); return rc; } int zmq::udp_engine_t::set_udp_reuse_port (fd_t s_, bool on_) { #ifndef SO_REUSEPORT return 0; #else int on = on_ ? 1 : 0; int rc = setsockopt (s_, SOL_SOCKET, SO_REUSEPORT, reinterpret_cast<char *> (&on), sizeof (on)); assert_success_or_recoverable (s_, rc); return rc; #endif } int zmq::udp_engine_t::add_membership (fd_t s_, const udp_address_t *addr_) { const ip_addr_t *mcast_addr = addr_->target_addr (); int rc = 0; if (mcast_addr->family () == AF_INET) { struct ip_mreq mreq; mreq.imr_multiaddr = mcast_addr->ipv4.sin_addr; mreq.imr_interface = addr_->bind_addr ()->ipv4.sin_addr; rc = setsockopt (s_, IPPROTO_IP, IP_ADD_MEMBERSHIP, reinterpret_cast<char *> (&mreq), sizeof (mreq)); } else if (mcast_addr->family () == AF_INET6) { struct ipv6_mreq mreq; const int iface = addr_->bind_if (); zmq_assert (iface >= -1); mreq.ipv6mr_multiaddr = mcast_addr->ipv6.sin6_addr; mreq.ipv6mr_interface = iface; rc = setsockopt (s_, IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, reinterpret_cast<char *> (&mreq), sizeof (mreq)); } assert_success_or_recoverable (s_, rc); return rc; } void zmq::udp_engine_t::error (error_reason_t reason_) { zmq_assert (_session); _session->engine_error (false, reason_); terminate (); } void zmq::udp_engine_t::terminate () { zmq_assert (_plugged); _plugged = false; rm_fd (_handle); // Disconnect from I/O threads poller object. io_object_t::unplug (); delete this; } void zmq::udp_engine_t::sockaddr_to_msg (zmq::msg_t *msg_, const sockaddr_in *addr_) { const char *const name = inet_ntoa (addr_->sin_addr); char port[6]; const int port_len = snprintf (port, 6, "%d", static_cast<int> (ntohs (addr_->sin_port))); zmq_assert (port_len > 0 && port_len < 6); const size_t name_len = strlen (name); const int size = static_cast<int> (name_len) + 1 /* colon */ + port_len + 1; // terminating NUL const int rc = msg_->init_size (size); errno_assert (rc == 0); msg_->set_flags (msg_t::more); // use memcpy instead of strcpy/strcat, since this is more efficient when // we already know the lengths, which we calculated above char *address = static_cast<char *> (msg_->data ()); memcpy (address, name, name_len); address += name_len; *address++ = ':'; memcpy (address, port, static_cast<size_t> (port_len)); address += port_len; *address = 0; } int zmq::udp_engine_t::resolve_raw_address (const char *name_, size_t length_) { memset (&_raw_address, 0, sizeof _raw_address); const char *delimiter = NULL; // Find delimiter, cannot use memrchr as it is not supported on windows if (length_ != 0) { int chars_left = static_cast<int> (length_); const char *current_char = name_ + length_; do { if (*(--current_char) == ':') { delimiter = current_char; break; } } while (--chars_left != 0); } if (!delimiter) { errno = EINVAL; return -1; } const std::string addr_str (name_, delimiter - name_); const std::string port_str (delimiter + 1, name_ + length_ - delimiter - 1); // Parse the port number (0 is not a valid port). const uint16_t port = static_cast<uint16_t> (atoi (port_str.c_str ())); if (port == 0) { errno = EINVAL; return -1; } _raw_address.sin_family = AF_INET; _raw_address.sin_port = htons (port); _raw_address.sin_addr.s_addr = inet_addr (addr_str.c_str ()); if (_raw_address.sin_addr.s_addr == INADDR_NONE) { errno = EINVAL; return -1; } return 0; } void zmq::udp_engine_t::out_event () { msg_t group_msg; int rc = _session->pull_msg (&group_msg); errno_assert (rc == 0 || (rc == -1 && errno == EAGAIN)); if (rc == 0) { msg_t body_msg; rc = _session->pull_msg (&body_msg); // If there's a group, there should also be a body errno_assert (rc == 0); const size_t group_size = group_msg.size (); const size_t body_size = body_msg.size (); size_t size; if (_options.raw_socket) { rc = resolve_raw_address (static_cast<char *> (group_msg.data ()), group_size); // We discard the message if address is not valid if (rc != 0) { rc = group_msg.close (); errno_assert (rc == 0); rc = body_msg.close (); errno_assert (rc == 0); return; } size = body_size; memcpy (_out_buffer, body_msg.data (), body_size); } else { size = group_size + body_size + 1; // TODO: check if larger than maximum size _out_buffer[0] = static_cast<unsigned char> (group_size); memcpy (_out_buffer + 1, group_msg.data (), group_size); memcpy (_out_buffer + 1 + group_size, body_msg.data (), body_size); } rc = group_msg.close (); errno_assert (rc == 0); body_msg.close (); errno_assert (rc == 0); #ifdef ZMQ_HAVE_WINDOWS rc = sendto (_fd, _out_buffer, static_cast<int> (size), 0, _out_address, _out_address_len); #elif defined ZMQ_HAVE_VXWORKS rc = sendto (_fd, reinterpret_cast<caddr_t> (_out_buffer), size, 0, (sockaddr *) _out_address, _out_address_len); #else rc = sendto (_fd, _out_buffer, size, 0, _out_address, _out_address_len); #endif if (rc < 0) { #ifdef ZMQ_HAVE_WINDOWS if (WSAGetLastError () != WSAEWOULDBLOCK) { assert_success_or_recoverable (_fd, rc); error (connection_error); } #else if (rc != EWOULDBLOCK) { assert_success_or_recoverable (_fd, rc); error (connection_error); } #endif } } else { reset_pollout (_handle); } } const zmq::endpoint_uri_pair_t &zmq::udp_engine_t::get_endpoint () const { return _empty_endpoint; } void zmq::udp_engine_t::restart_output () { // If we don't support send we just drop all messages if (!_send_enabled) { msg_t msg; while (_session->pull_msg (&msg) == 0) msg.close (); } else { set_pollout (_handle); out_event (); } } void zmq::udp_engine_t::in_event () { sockaddr_storage in_address; zmq_socklen_t in_addrlen = static_cast<zmq_socklen_t> (sizeof (sockaddr_storage)); const int nbytes = recvfrom (_fd, _in_buffer, MAX_UDP_MSG, 0, reinterpret_cast<sockaddr *> (&in_address), &in_addrlen); if (nbytes < 0) { #ifdef ZMQ_HAVE_WINDOWS if (WSAGetLastError () != WSAEWOULDBLOCK) { assert_success_or_recoverable (_fd, nbytes); error (connection_error); } #else if (nbytes != EWOULDBLOCK) { assert_success_or_recoverable (_fd, nbytes); error (connection_error); } #endif return; } int rc; int body_size; int body_offset; msg_t msg; if (_options.raw_socket) { zmq_assert (in_address.ss_family == AF_INET); sockaddr_to_msg (&msg, reinterpret_cast<sockaddr_in *> (&in_address)); body_size = nbytes; body_offset = 0; } else { // TODO in out_event, the group size is an *unsigned* char. what is // the maximum value? const char *group_buffer = _in_buffer + 1; const int group_size = _in_buffer[0]; rc = msg.init_size (group_size); errno_assert (rc == 0); msg.set_flags (msg_t::more); memcpy (msg.data (), group_buffer, group_size); // This doesn't fit, just ignore if (nbytes - 1 < group_size) return; body_size = nbytes - 1 - group_size; body_offset = 1 + group_size; } // Push group description to session rc = _session->push_msg (&msg); errno_assert (rc == 0 || (rc == -1 && errno == EAGAIN)); // Group description message doesn't fit in the pipe, drop if (rc != 0) { rc = msg.close (); errno_assert (rc == 0); reset_pollin (_handle); return; } rc = msg.close (); errno_assert (rc == 0); rc = msg.init_size (body_size); errno_assert (rc == 0); memcpy (msg.data (), _in_buffer + body_offset, body_size); // Push message body to session rc = _session->push_msg (&msg); // Message body doesn't fit in the pipe, drop and reset session state if (rc != 0) { rc = msg.close (); errno_assert (rc == 0); _session->reset (); reset_pollin (_handle); return; } rc = msg.close (); errno_assert (rc == 0); _session->flush (); } bool zmq::udp_engine_t::restart_input () { if (_recv_enabled) { set_pollin (_handle); in_event (); } return true; }
sophomore_public/libzmq
src/udp_engine.cpp
C++
gpl-3.0
16,665
#ifndef __ZMQ_UDP_ENGINE_HPP_INCLUDED__ #define __ZMQ_UDP_ENGINE_HPP_INCLUDED__ #include "io_object.hpp" #include "i_engine.hpp" #include "address.hpp" #include "msg.hpp" #define MAX_UDP_MSG 8192 namespace zmq { class io_thread_t; class session_base_t; class udp_engine_t ZMQ_FINAL : public io_object_t, public i_engine { public: udp_engine_t (const options_t &options_); ~udp_engine_t (); int init (address_t *address_, bool send_, bool recv_); bool has_handshake_stage () ZMQ_FINAL { return false; }; // i_engine interface implementation. // Plug the engine to the session. void plug (zmq::io_thread_t *io_thread_, class session_base_t *session_); // Terminate and deallocate the engine. Note that 'detached' // events are not fired on termination. void terminate (); // This method is called by the session to signalise that more // messages can be written to the pipe. bool restart_input (); // This method is called by the session to signalise that there // are messages to send available. void restart_output (); void zap_msg_available () {}; void in_event (); void out_event (); const endpoint_uri_pair_t &get_endpoint () const; private: int resolve_raw_address (const char *name_, size_t length_); static void sockaddr_to_msg (zmq::msg_t *msg_, const sockaddr_in *addr_); static int set_udp_reuse_address (fd_t s_, bool on_); static int set_udp_reuse_port (fd_t s_, bool on_); // Indicate, if the multicast data being sent should be looped back static int set_udp_multicast_loop (fd_t s_, bool is_ipv6_, bool loop_); // Set multicast TTL static int set_udp_multicast_ttl (fd_t s_, bool is_ipv6_, int hops_); // Set multicast address/interface int set_udp_multicast_iface (fd_t s_, bool is_ipv6_, const udp_address_t *addr_); // Join a multicast group int add_membership (fd_t s_, const udp_address_t *addr_); // Function to handle network issues. void error (error_reason_t reason_); const endpoint_uri_pair_t _empty_endpoint; bool _plugged; fd_t _fd; session_base_t *_session; handle_t _handle; address_t *_address; options_t _options; sockaddr_in _raw_address; const struct sockaddr *_out_address; zmq_socklen_t _out_address_len; char _out_buffer[MAX_UDP_MSG]; char _in_buffer[MAX_UDP_MSG]; bool _send_enabled; bool _recv_enabled; }; } #endif
sophomore_public/libzmq
src/udp_engine.hpp
C++
gpl-3.0
2,548
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <stdlib.h> #include <string.h> #include <limits> #include <limits.h> #include "decoder.hpp" #include "v1_decoder.hpp" #include "likely.hpp" #include "wire.hpp" #include "err.hpp" zmq::v1_decoder_t::v1_decoder_t (size_t bufsize_, int64_t maxmsgsize_) : decoder_base_t<v1_decoder_t> (bufsize_), _max_msg_size (maxmsgsize_) { int rc = _in_progress.init (); errno_assert (rc == 0); // At the beginning, read one byte and go to one_byte_size_ready state. next_step (_tmpbuf, 1, &v1_decoder_t::one_byte_size_ready); } zmq::v1_decoder_t::~v1_decoder_t () { const int rc = _in_progress.close (); errno_assert (rc == 0); } int zmq::v1_decoder_t::one_byte_size_ready (unsigned char const *) { // First byte of size is read. If it is UCHAR_MAX (0xff) read 8-byte size. // Otherwise allocate the buffer for message data and read the // message data into it. if (*_tmpbuf == UCHAR_MAX) next_step (_tmpbuf, 8, &v1_decoder_t::eight_byte_size_ready); else { // There has to be at least one byte (the flags) in the message). if (!*_tmpbuf) { errno = EPROTO; return -1; } if (_max_msg_size >= 0 && static_cast<int64_t> (*_tmpbuf - 1) > _max_msg_size) { errno = EMSGSIZE; return -1; } int rc = _in_progress.close (); assert (rc == 0); rc = _in_progress.init_size (*_tmpbuf - 1); if (rc != 0) { errno_assert (errno == ENOMEM); rc = _in_progress.init (); errno_assert (rc == 0); errno = ENOMEM; return -1; } next_step (_tmpbuf, 1, &v1_decoder_t::flags_ready); } return 0; } int zmq::v1_decoder_t::eight_byte_size_ready (unsigned char const *) { // 8-byte payload length is read. Allocate the buffer // for message body and read the message data into it. const uint64_t payload_length = get_uint64 (_tmpbuf); // There has to be at least one byte (the flags) in the message). if (payload_length == 0) { errno = EPROTO; return -1; } // Message size must not exceed the maximum allowed size. if (_max_msg_size >= 0 && payload_length - 1 > static_cast<uint64_t> (_max_msg_size)) { errno = EMSGSIZE; return -1; } #ifndef __aarch64__ // Message size must fit within range of size_t data type. if (payload_length - 1 > std::numeric_limits<size_t>::max ()) { errno = EMSGSIZE; return -1; } #endif const size_t msg_size = static_cast<size_t> (payload_length - 1); int rc = _in_progress.close (); assert (rc == 0); rc = _in_progress.init_size (msg_size); if (rc != 0) { errno_assert (errno == ENOMEM); rc = _in_progress.init (); errno_assert (rc == 0); errno = ENOMEM; return -1; } next_step (_tmpbuf, 1, &v1_decoder_t::flags_ready); return 0; } int zmq::v1_decoder_t::flags_ready (unsigned char const *) { // Store the flags from the wire into the message structure. _in_progress.set_flags (_tmpbuf[0] & msg_t::more); next_step (_in_progress.data (), _in_progress.size (), &v1_decoder_t::message_ready); return 0; } int zmq::v1_decoder_t::message_ready (unsigned char const *) { // Message is completely read. Push it further and start reading // new message. (in_progress is a 0-byte message after this point.) next_step (_tmpbuf, 1, &v1_decoder_t::one_byte_size_ready); return 1; }
sophomore_public/libzmq
src/v1_decoder.cpp
C++
gpl-3.0
3,656
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_V1_DECODER_HPP_INCLUDED__ #define __ZMQ_V1_DECODER_HPP_INCLUDED__ #include "decoder.hpp" namespace zmq { // Decoder for ZMTP/1.0 protocol. Converts data batches into messages. class v1_decoder_t ZMQ_FINAL : public decoder_base_t<v1_decoder_t> { public: v1_decoder_t (size_t bufsize_, int64_t maxmsgsize_); ~v1_decoder_t (); msg_t *msg () { return &_in_progress; } private: int one_byte_size_ready (unsigned char const *); int eight_byte_size_ready (unsigned char const *); int flags_ready (unsigned char const *); int message_ready (unsigned char const *); unsigned char _tmpbuf[8]; msg_t _in_progress; const int64_t _max_msg_size; ZMQ_NON_COPYABLE_NOR_MOVABLE (v1_decoder_t) }; } #endif
sophomore_public/libzmq
src/v1_decoder.hpp
C++
gpl-3.0
800
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "encoder.hpp" #include "v1_encoder.hpp" #include "msg.hpp" #include "wire.hpp" #include <limits.h> zmq::v1_encoder_t::v1_encoder_t (size_t bufsize_) : encoder_base_t<v1_encoder_t> (bufsize_) { // Write 0 bytes to the batch and go to message_ready state. next_step (NULL, 0, &v1_encoder_t::message_ready, true); } zmq::v1_encoder_t::~v1_encoder_t () { } void zmq::v1_encoder_t::size_ready () { // Write message body into the buffer. next_step (in_progress ()->data (), in_progress ()->size (), &v1_encoder_t::message_ready, true); } void zmq::v1_encoder_t::message_ready () { size_t header_size = 2; // flags byte + size byte // Get the message size. size_t size = in_progress ()->size (); // Account for the 'flags' byte. size++; // Account for the subscribe/cancel byte. if (in_progress ()->is_subscribe () || in_progress ()->is_cancel ()) size++; // For messages less than 255 bytes long, write one byte of message size. // For longer messages write 0xff escape character followed by 8-byte // message size. In both cases 'flags' field follows. if (size < UCHAR_MAX) { _tmpbuf[0] = static_cast<unsigned char> (size); _tmpbuf[1] = (in_progress ()->flags () & msg_t::more); } else { _tmpbuf[0] = UCHAR_MAX; put_uint64 (_tmpbuf + 1, size); _tmpbuf[9] = (in_progress ()->flags () & msg_t::more); header_size = 10; } // Encode the subscribe/cancel byte. This is done in the encoder as // opposed to when the subscribe message is created to allow different // protocol behaviour on the wire in the v3.1 and legacy encoders. // It results in the work being done multiple times in case the sub // is sending the subscription/cancel to multiple pubs, but it cannot // be avoided. This processing can be moved to xsub once support for // ZMTP < 3.1 is dropped. if (in_progress ()->is_subscribe ()) _tmpbuf[header_size++] = 1; else if (in_progress ()->is_cancel ()) _tmpbuf[header_size++] = 0; next_step (_tmpbuf, header_size, &v1_encoder_t::size_ready, false); }
sophomore_public/libzmq
src/v1_encoder.cpp
C++
gpl-3.0
2,255
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_V1_ENCODER_HPP_INCLUDED__ #define __ZMQ_V1_ENCODER_HPP_INCLUDED__ #include "encoder.hpp" namespace zmq { // Encoder for ZMTP/1.0 protocol. Converts messages into data batches. class v1_encoder_t ZMQ_FINAL : public encoder_base_t<v1_encoder_t> { public: v1_encoder_t (size_t bufsize_); ~v1_encoder_t (); private: void size_ready (); void message_ready (); unsigned char _tmpbuf[11]; ZMQ_NON_COPYABLE_NOR_MOVABLE (v1_encoder_t) }; } #endif
sophomore_public/libzmq
src/v1_encoder.hpp
C++
gpl-3.0
528
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <stdlib.h> #include <string.h> #include <cmath> #include "v2_protocol.hpp" #include "v2_decoder.hpp" #include "likely.hpp" #include "wire.hpp" #include "err.hpp" zmq::v2_decoder_t::v2_decoder_t (size_t bufsize_, int64_t maxmsgsize_, bool zero_copy_) : decoder_base_t<v2_decoder_t, shared_message_memory_allocator> (bufsize_), _msg_flags (0), _zero_copy (zero_copy_), _max_msg_size (maxmsgsize_) { int rc = _in_progress.init (); errno_assert (rc == 0); // At the beginning, read one byte and go to flags_ready state. next_step (_tmpbuf, 1, &v2_decoder_t::flags_ready); } zmq::v2_decoder_t::~v2_decoder_t () { const int rc = _in_progress.close (); errno_assert (rc == 0); } int zmq::v2_decoder_t::flags_ready (unsigned char const *) { _msg_flags = 0; if (_tmpbuf[0] & v2_protocol_t::more_flag) _msg_flags |= msg_t::more; if (_tmpbuf[0] & v2_protocol_t::command_flag) _msg_flags |= msg_t::command; // The payload length is either one or eight bytes, // depending on whether the 'large' bit is set. if (_tmpbuf[0] & v2_protocol_t::large_flag) next_step (_tmpbuf, 8, &v2_decoder_t::eight_byte_size_ready); else next_step (_tmpbuf, 1, &v2_decoder_t::one_byte_size_ready); return 0; } int zmq::v2_decoder_t::one_byte_size_ready (unsigned char const *read_from_) { return size_ready (_tmpbuf[0], read_from_); } int zmq::v2_decoder_t::eight_byte_size_ready (unsigned char const *read_from_) { // The payload size is encoded as 64-bit unsigned integer. // The most significant byte comes first. const uint64_t msg_size = get_uint64 (_tmpbuf); return size_ready (msg_size, read_from_); } int zmq::v2_decoder_t::size_ready (uint64_t msg_size_, unsigned char const *read_pos_) { // Message size must not exceed the maximum allowed size. if (_max_msg_size >= 0) if (unlikely (msg_size_ > static_cast<uint64_t> (_max_msg_size))) { errno = EMSGSIZE; return -1; } // Message size must fit into size_t data type. if (unlikely (msg_size_ != static_cast<size_t> (msg_size_))) { errno = EMSGSIZE; return -1; } int rc = _in_progress.close (); assert (rc == 0); // the current message can exceed the current buffer. We have to copy the buffer // data into a new message and complete it in the next receive. shared_message_memory_allocator &allocator = get_allocator (); if (unlikely (!_zero_copy || msg_size_ > static_cast<size_t> ( allocator.data () + allocator.size () - read_pos_))) { // a new message has started, but the size would exceed the pre-allocated arena // this happens every time when a message does not fit completely into the buffer rc = _in_progress.init_size (static_cast<size_t> (msg_size_)); } else { // construct message using n bytes from the buffer as storage // increase buffer ref count // if the message will be a large message, pass a valid refcnt memory location as well rc = _in_progress.init (const_cast<unsigned char *> (read_pos_), static_cast<size_t> (msg_size_), shared_message_memory_allocator::call_dec_ref, allocator.buffer (), allocator.provide_content ()); // For small messages, data has been copied and refcount does not have to be increased if (_in_progress.is_zcmsg ()) { allocator.advance_content (); allocator.inc_ref (); } } if (unlikely (rc)) { errno_assert (errno == ENOMEM); rc = _in_progress.init (); errno_assert (rc == 0); errno = ENOMEM; return -1; } _in_progress.set_flags (_msg_flags); // this sets read_pos to // the message data address if the data needs to be copied // for small message / messages exceeding the current buffer // or // to the current start address in the buffer because the message // was constructed to use n bytes from the address passed as argument next_step (_in_progress.data (), _in_progress.size (), &v2_decoder_t::message_ready); return 0; } int zmq::v2_decoder_t::message_ready (unsigned char const *) { // Message is completely read. Signal this to the caller // and prepare to decode next message. next_step (_tmpbuf, 1, &v2_decoder_t::flags_ready); return 1; }
sophomore_public/libzmq
src/v2_decoder.cpp
C++
gpl-3.0
4,722
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_V2_DECODER_HPP_INCLUDED__ #define __ZMQ_V2_DECODER_HPP_INCLUDED__ #include "decoder.hpp" #include "decoder_allocators.hpp" namespace zmq { // Decoder for ZMTP/2.x framing protocol. Converts data stream into messages. // The class has to inherit from shared_message_memory_allocator because // the base class calls allocate in its constructor. class v2_decoder_t ZMQ_FINAL : public decoder_base_t<v2_decoder_t, shared_message_memory_allocator> { public: v2_decoder_t (size_t bufsize_, int64_t maxmsgsize_, bool zero_copy_); ~v2_decoder_t (); // i_decoder interface. msg_t *msg () { return &_in_progress; } private: int flags_ready (unsigned char const *); int one_byte_size_ready (unsigned char const *); int eight_byte_size_ready (unsigned char const *); int message_ready (unsigned char const *); int size_ready (uint64_t size_, unsigned char const *); unsigned char _tmpbuf[8]; unsigned char _msg_flags; msg_t _in_progress; const bool _zero_copy; const int64_t _max_msg_size; ZMQ_NON_COPYABLE_NOR_MOVABLE (v2_decoder_t) }; } #endif
sophomore_public/libzmq
src/v2_decoder.hpp
C++
gpl-3.0
1,169
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "v2_protocol.hpp" #include "v2_encoder.hpp" #include "msg.hpp" #include "likely.hpp" #include "wire.hpp" #include <limits.h> zmq::v2_encoder_t::v2_encoder_t (size_t bufsize_) : encoder_base_t<v2_encoder_t> (bufsize_) { // Write 0 bytes to the batch and go to message_ready state. next_step (NULL, 0, &v2_encoder_t::message_ready, true); } zmq::v2_encoder_t::~v2_encoder_t () { } void zmq::v2_encoder_t::message_ready () { // Encode flags. size_t size = in_progress ()->size (); size_t header_size = 2; // flags byte + size byte unsigned char &protocol_flags = _tmp_buf[0]; protocol_flags = 0; if (in_progress ()->flags () & msg_t::more) protocol_flags |= v2_protocol_t::more_flag; if (in_progress ()->size () > UCHAR_MAX) protocol_flags |= v2_protocol_t::large_flag; if (in_progress ()->flags () & msg_t::command) protocol_flags |= v2_protocol_t::command_flag; if (in_progress ()->is_subscribe () || in_progress ()->is_cancel ()) ++size; // Encode the message length. For messages less then 256 bytes, // the length is encoded as 8-bit unsigned integer. For larger // messages, 64-bit unsigned integer in network byte order is used. if (unlikely (size > UCHAR_MAX)) { put_uint64 (_tmp_buf + 1, size); header_size = 9; // flags byte + size 8 bytes } else { _tmp_buf[1] = static_cast<uint8_t> (size); } // Encode the subscribe/cancel byte. This is done in the encoder as // opposed to when the subscribe message is created to allow different // protocol behaviour on the wire in the v3.1 and legacy encoders. // It results in the work being done multiple times in case the sub // is sending the subscription/cancel to multiple pubs, but it cannot // be avoided. This processing can be moved to xsub once support for // ZMTP < 3.1 is dropped. if (in_progress ()->is_subscribe ()) _tmp_buf[header_size++] = 1; else if (in_progress ()->is_cancel ()) _tmp_buf[header_size++] = 0; next_step (_tmp_buf, header_size, &v2_encoder_t::size_ready, false); } void zmq::v2_encoder_t::size_ready () { // Write message body into the buffer. next_step (in_progress ()->data (), in_progress ()->size (), &v2_encoder_t::message_ready, true); }
sophomore_public/libzmq
src/v2_encoder.cpp
C++
gpl-3.0
2,428
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_V2_ENCODER_HPP_INCLUDED__ #define __ZMQ_V2_ENCODER_HPP_INCLUDED__ #include "encoder.hpp" namespace zmq { // Encoder for 0MQ framing protocol. Converts messages into data stream. class v2_encoder_t ZMQ_FINAL : public encoder_base_t<v2_encoder_t> { public: v2_encoder_t (size_t bufsize_); ~v2_encoder_t (); private: void size_ready (); void message_ready (); // flags byte + size byte (or 8 bytes) + sub/cancel byte unsigned char _tmp_buf[10]; ZMQ_NON_COPYABLE_NOR_MOVABLE (v2_encoder_t) }; } #endif
sophomore_public/libzmq
src/v2_encoder.hpp
C++
gpl-3.0
593
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_V2_PROTOCOL_HPP_INCLUDED__ #define __ZMQ_V2_PROTOCOL_HPP_INCLUDED__ namespace zmq { // Definition of constants for ZMTP/2.0 transport protocol. class v2_protocol_t { public: // Message flags. enum { more_flag = 1, large_flag = 2, command_flag = 4 }; }; } #endif
sophomore_public/libzmq
src/v2_protocol.hpp
C++
gpl-3.0
362
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "v2_protocol.hpp" #include "v3_1_encoder.hpp" #include "msg.hpp" #include "likely.hpp" #include "wire.hpp" #include <limits.h> zmq::v3_1_encoder_t::v3_1_encoder_t (size_t bufsize_) : encoder_base_t<v3_1_encoder_t> (bufsize_) { // Write 0 bytes to the batch and go to message_ready state. next_step (NULL, 0, &v3_1_encoder_t::message_ready, true); } zmq::v3_1_encoder_t::~v3_1_encoder_t () { } void zmq::v3_1_encoder_t::message_ready () { // Encode flags. size_t size = in_progress ()->size (); size_t header_size = 2; // flags byte + size byte unsigned char &protocol_flags = _tmp_buf[0]; protocol_flags = 0; if (in_progress ()->flags () & msg_t::more) protocol_flags |= v2_protocol_t::more_flag; if (in_progress ()->flags () & msg_t::command || in_progress ()->is_subscribe () || in_progress ()->is_cancel ()) { protocol_flags |= v2_protocol_t::command_flag; if (in_progress ()->is_subscribe ()) size += zmq::msg_t::sub_cmd_name_size; else if (in_progress ()->is_cancel ()) size += zmq::msg_t::cancel_cmd_name_size; } // Calculate large_flag after command_flag. Subscribe or cancel commands // increase the message size. if (size > UCHAR_MAX) protocol_flags |= v2_protocol_t::large_flag; // Encode the message length. For messages less then 256 bytes, // the length is encoded as 8-bit unsigned integer. For larger // messages, 64-bit unsigned integer in network byte order is used. if (unlikely (size > UCHAR_MAX)) { put_uint64 (_tmp_buf + 1, size); header_size = 9; // flags byte + size 8 bytes } else { _tmp_buf[1] = static_cast<uint8_t> (size); } // Encode the sub/cancel command string. This is done in the encoder as // opposed to when the subscribe message is created to allow different // protocol behaviour on the wire in the v3.1 and legacy encoders. // It results in the work being done multiple times in case the sub // is sending the subscription/cancel to multiple pubs, but it cannot // be avoided. This processing can be moved to xsub once support for // ZMTP < 3.1 is dropped. if (in_progress ()->is_subscribe ()) { memcpy (_tmp_buf + header_size, zmq::sub_cmd_name, zmq::msg_t::sub_cmd_name_size); header_size += zmq::msg_t::sub_cmd_name_size; } else if (in_progress ()->is_cancel ()) { memcpy (_tmp_buf + header_size, zmq::cancel_cmd_name, zmq::msg_t::cancel_cmd_name_size); header_size += zmq::msg_t::cancel_cmd_name_size; } next_step (_tmp_buf, header_size, &v3_1_encoder_t::size_ready, false); } void zmq::v3_1_encoder_t::size_ready () { // Write message body into the buffer. next_step (in_progress ()->data (), in_progress ()->size (), &v3_1_encoder_t::message_ready, true); }
sophomore_public/libzmq
src/v3_1_encoder.cpp
C++
gpl-3.0
3,006
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_V3_1_ENCODER_HPP_INCLUDED__ #define __ZMQ_V3_1_ENCODER_HPP_INCLUDED__ #include "encoder.hpp" #include "msg.hpp" namespace zmq { // Encoder for 0MQ framing protocol. Converts messages into data stream. class v3_1_encoder_t ZMQ_FINAL : public encoder_base_t<v3_1_encoder_t> { public: v3_1_encoder_t (size_t bufsize_); ~v3_1_encoder_t () ZMQ_FINAL; private: void size_ready (); void message_ready (); unsigned char _tmp_buf[9 + zmq::msg_t::sub_cmd_name_size]; ZMQ_NON_COPYABLE_NOR_MOVABLE (v3_1_encoder_t) }; } #endif
sophomore_public/libzmq
src/v3_1_encoder.hpp
C++
gpl-3.0
605
///////////////////////////////////////////////////////////////////////////// // // VERSIONINFO resource // // http://msdn.microsoft.com/en-us/library/windows/desktop/aa381058(v=vs.85).aspx // @MAJOR@,@MINOR@,@BUILD@,@PATCH@ #define VER_FILEVERSION @ZMQ_VERSION_MAJOR@,@ZMQ_VERSION_MINOR@,@ZMQ_VERSION_PATCH@,0 #define VER_FILEVERSION_STR "@ZMQ_VERSION_MAJOR@.@ZMQ_VERSION_MINOR@.@ZMQ_VERSION_PATCH@.0\0" #define VER_PRODUCTVERSION VER_FILEVERSION #define VER_PRODUCTVERSION_STR VER_FILEVERSION_STR // versionID // Version-information resource identifier. This value must be 1. 1 VERSIONINFO //// fixed-info // Binary version number for the file. FILEVERSION VER_FILEVERSION // Binary version number for the product with which the file is distributed. PRODUCTVERSION VER_PRODUCTVERSION // Bits in the FILEFLAGS statement are valid. FILEFLAGSMASK 0x17L // Attributes of the file. // VS_FF_DEBUG = 1 : File contains debugging information or is compiled with debugging features enabled. // VS_FF_PATCHED = 4 : File has been modified and is not identical to the original shipping file of the // same version number. // VS_FF_PRERELEASE = 2 : File is a development version, not a commercially released product. // VS_FF_PRIVATEBUILD = 8 : File was not built using standard release procedures. // VS_FF_SPECIALBUILD = 20 : File was built by the original company using standard release procedures but is a // : variation of the standard file of the same version number. #ifdef _DEBUG FILEFLAGS 0x1L #else FILEFLAGS 0x2L #endif // Operating system for which this file was designed. // VOS_DOS = 0x10000 : File was designed for MS-DOS. // VOS_NT = 0x40000 : File was designed for 32-bit Windows. // VOS_WINDOWS16 = 0x1 : File was designed for 16-bit Windows. // VOS_WINDOWS32 = 0x4 : File was designed for 32-bit Windows. // VOS_DOS_WINDOWS16 = 0x10001 : File was designed for 16-bit Windows running with MS-DOS. // VOS_DOS_WINDOWS32 = 0x10004 : File was designed for 32-bit Windows running with MS-DOS. // VOS_NT_WINDOWS32 = 0x40004 : File was designed for 32-bit Windows. // NB: appears obsolete, nothing for x64. FILEOS 0x4L // General type of file. // VFT_APP = 0x1 : File contains an application. // VFT_DLL = 0x2 : File contains a dynamic-link library (DLL). // VFT_DRV = 0x3 : File contains a device driver. // VFT_FONT = 0x4 : File contains a font. // VFT_VXD = 0x5 : File contains a virtual device. // VFT_STATIC_LIB = 0x7 : File contains a static-link library. FILETYPE 0x2L // Function of the file. FILESUBTYPE 0x0L BEGIN BLOCK "StringFileInfo" BEGIN BLOCK "080904b0" BEGIN VALUE "CompanyName", "iMatix Corporation" VALUE "FileDescription", "ZeroMQ lightweight messaging kernel" VALUE "FileVersion", VER_FILEVERSION_STR VALUE "InternalName", "zeromq" VALUE "LegalCopyright", "Copyright (c) 2012 The ZeroMQ Authors." VALUE "OriginalFilename", "libzmq.dll" VALUE "ProductName", "ZeroMQ" VALUE "ProductVersion", VER_PRODUCTVERSION_STR END END BLOCK "VarFileInfo" BEGIN // langID, one of the following language codes. // 0x409 : U.S. English // 0x809 : U.K. English // charsetID, one of the following character-set identifiers. // 1200 : Unicode VALUE "Translation", 0x809, 1200 END END // end of file.
sophomore_public/libzmq
src/version.rc.in
in
gpl-3.0
3,521
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "ip.hpp" #include "vmci.hpp" #include "vmci_address.hpp" #if defined ZMQ_HAVE_VMCI #include <cassert> #include <vmci_sockets.h> void zmq::tune_vmci_buffer_size (ctx_t *context_, fd_t sockfd_, uint64_t default_size_, uint64_t min_size_, uint64_t max_size_) { int family = context_->get_vmci_socket_family (); assert (family != -1); if (default_size_ != 0) { int rc = setsockopt (sockfd_, family, SO_VMCI_BUFFER_SIZE, (char *) &default_size_, sizeof default_size_); #if defined ZMQ_HAVE_WINDOWS wsa_assert (rc != SOCKET_ERROR); #else errno_assert (rc == 0); #endif } if (min_size_ != 0) { int rc = setsockopt (sockfd_, family, SO_VMCI_BUFFER_SIZE, (char *) &min_size_, sizeof min_size_); #if defined ZMQ_HAVE_WINDOWS wsa_assert (rc != SOCKET_ERROR); #else errno_assert (rc == 0); #endif } if (max_size_ != 0) { int rc = setsockopt (sockfd_, family, SO_VMCI_BUFFER_SIZE, (char *) &max_size_, sizeof max_size_); #if defined ZMQ_HAVE_WINDOWS wsa_assert (rc != SOCKET_ERROR); #else errno_assert (rc == 0); #endif } } #if defined ZMQ_HAVE_WINDOWS void zmq::tune_vmci_connect_timeout (ctx_t *context_, fd_t sockfd_, DWORD timeout_) #else void zmq::tune_vmci_connect_timeout (ctx_t *context_, fd_t sockfd_, struct timeval timeout_) #endif { int family = context_->get_vmci_socket_family (); assert (family != -1); int rc = setsockopt (sockfd_, family, SO_VMCI_CONNECT_TIMEOUT, (char *) &timeout_, sizeof timeout_); #if defined ZMQ_HAVE_WINDOWS wsa_assert (rc != SOCKET_ERROR); #else errno_assert (rc == 0); #endif } zmq::fd_t zmq::vmci_open_socket (const char *address_, const zmq::options_t &options_, zmq::vmci_address_t *out_vmci_addr_) { // Convert the textual address into address structure. int rc = out_vmci_addr_->resolve (address_); if (rc != 0) return retired_fd; // Create the socket. fd_t s = open_socket (out_vmci_addr_->family (), SOCK_STREAM, 0); if (s == retired_fd) { return retired_fd; } return s; } #endif
sophomore_public/libzmq
src/vmci.cpp
C++
gpl-3.0
2,637
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_VMCI_HPP_INCLUDED__ #define __ZMQ_VMCI_HPP_INCLUDED__ #include <string> #include "platform.hpp" #include "fd.hpp" #include "ctx.hpp" #if defined ZMQ_HAVE_VMCI #if defined ZMQ_HAVE_WINDOWS #include "windows.hpp" #else #include <sys/time.h> #endif namespace zmq { void tune_vmci_buffer_size (ctx_t *context_, fd_t sockfd_, uint64_t default_size_, uint64_t min_size_, uint64_t max_size_); #if defined ZMQ_HAVE_WINDOWS void tune_vmci_connect_timeout (ctx_t *context_, fd_t sockfd_, DWORD timeout_); #else void tune_vmci_connect_timeout (ctx_t *context_, fd_t sockfd_, struct timeval timeout_); #endif fd_t vmci_open_socket (const char *address_, const options_t &options_, vmci_address_t *out_vmci_addr_); } #endif #endif
sophomore_public/libzmq
src/vmci.hpp
C++
gpl-3.0
1,003
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "vmci_address.hpp" #if defined(ZMQ_HAVE_VMCI) #include <climits> #include <string> #include <sstream> #include "err.hpp" zmq::vmci_address_t::vmci_address_t () { memset (&address, 0, sizeof address); } zmq::vmci_address_t::vmci_address_t (ctx_t *parent_) : parent (parent_) { memset (&address, 0, sizeof address); } zmq::vmci_address_t::vmci_address_t (const sockaddr *sa, socklen_t sa_len, ctx_t *parent_) : parent (parent_) { zmq_assert (sa && sa_len > 0); memset (&address, 0, sizeof address); if (sa->sa_family == parent->get_vmci_socket_family ()) memcpy (&address, sa, sa_len); } int zmq::vmci_address_t::resolve (const char *path_) { // Find the ':' at end that separates address from the port number. const char *delimiter = strrchr (path_, ':'); if (!delimiter) { errno = EINVAL; return -1; } // Separate the address/port. std::string addr_str (path_, delimiter - path_); std::string port_str (delimiter + 1); unsigned int cid = VMADDR_CID_ANY; unsigned int port = VMADDR_PORT_ANY; if (!addr_str.length ()) { errno = EINVAL; return -1; } else if (addr_str == "@") { cid = VMCISock_GetLocalCID (); if (cid == VMADDR_CID_ANY) { errno = ENODEV; return -1; } } else if (addr_str != "*" && addr_str != "-1") { const char *begin = addr_str.c_str (); char *end = NULL; unsigned long l = strtoul (begin, &end, 10); if ((l == 0 && end == begin) || (l == ULONG_MAX && errno == ERANGE) || l > UINT_MAX) { errno = EINVAL; return -1; } cid = static_cast<unsigned int> (l); } if (!port_str.length ()) { errno = EINVAL; return -1; } else if (port_str != "*" && port_str != "-1") { const char *begin = port_str.c_str (); char *end = NULL; unsigned long l = strtoul (begin, &end, 10); if ((l == 0 && end == begin) || (l == ULONG_MAX && errno == ERANGE) || l > UINT_MAX) { errno = EINVAL; return -1; } port = static_cast<unsigned int> (l); } address.svm_family = static_cast<sa_family_t> (parent->get_vmci_socket_family ()); address.svm_cid = cid; address.svm_port = port; return 0; } int zmq::vmci_address_t::to_string (std::string &addr_) const { if (address.svm_family != parent->get_vmci_socket_family ()) { addr_.clear (); return -1; } std::stringstream s; s << "vmci://"; if (address.svm_cid == VMADDR_CID_ANY) { s << "*"; } else { s << address.svm_cid; } s << ":"; if (address.svm_port == VMADDR_PORT_ANY) { s << "*"; } else { s << address.svm_port; } addr_ = s.str (); return 0; } const sockaddr *zmq::vmci_address_t::addr () const { return reinterpret_cast<const sockaddr *> (&address); } socklen_t zmq::vmci_address_t::addrlen () const { return static_cast<socklen_t> (sizeof address); } #if defined ZMQ_HAVE_WINDOWS unsigned short zmq::vmci_address_t::family () const #else sa_family_t zmq::vmci_address_t::family () const #endif { return parent->get_vmci_socket_family (); } #endif
sophomore_public/libzmq
src/vmci_address.cpp
C++
gpl-3.0
3,462
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_VMCI_ADDRESS_HPP_INCLUDED__ #define __ZMQ_VMCI_ADDRESS_HPP_INCLUDED__ #include <string> #include "platform.hpp" #include "ctx.hpp" #if defined(ZMQ_HAVE_VMCI) #include <vmci_sockets.h> namespace zmq { class vmci_address_t { public: vmci_address_t (); vmci_address_t (ctx_t *parent_); vmci_address_t (const sockaddr *sa, socklen_t sa_len, ctx_t *parent_); // This function sets up the address for VMCI transport. int resolve (const char *path_); // The opposite to resolve() int to_string (std::string &addr_) const; #if defined ZMQ_HAVE_WINDOWS unsigned short family () const; #else sa_family_t family () const; #endif const sockaddr *addr () const; socklen_t addrlen () const; private: struct sockaddr_vm address; ctx_t *parent; ZMQ_NON_COPYABLE_NOR_MOVABLE (vmci_address_t) }; } #endif #endif
sophomore_public/libzmq
src/vmci_address.hpp
C++
gpl-3.0
921
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "vmci_connecter.hpp" #if defined ZMQ_HAVE_VMCI #include <new> #include "io_thread.hpp" #include "platform.hpp" #include "random.hpp" #include "err.hpp" #include "ip.hpp" #include "address.hpp" #include "vmci_address.hpp" #include "vmci.hpp" #include "session_base.hpp" zmq::vmci_connecter_t::vmci_connecter_t (class io_thread_t *io_thread_, class session_base_t *session_, const options_t &options_, address_t *addr_, bool delayed_start_) : stream_connecter_base_t ( io_thread_, session_, options_, addr_, delayed_start_), _connect_timer_started (false) { zmq_assert (_addr->protocol == protocol_name::vmci); } zmq::vmci_connecter_t::~vmci_connecter_t () { zmq_assert (!_connect_timer_started); } void zmq::vmci_connecter_t::process_term (int linger_) { if (_connect_timer_started) { cancel_timer (connect_timer_id); _connect_timer_started = false; } stream_connecter_base_t::process_term (linger_); } void zmq::vmci_connecter_t::in_event () { // We are not polling for incoming data, so we are actually called // because of error here. However, we can get error on out event as well // on some platforms, so we'll simply handle both events in the same way. out_event (); } void zmq::vmci_connecter_t::out_event () { if (_connect_timer_started) { cancel_timer (connect_timer_id); _connect_timer_started = false; } // TODO this is still very similar to (t)ipc_connecter_t, maybe the // differences can be factored out rm_handle (); const fd_t fd = connect (); if (fd == retired_fd && ((options.reconnect_stop & ZMQ_RECONNECT_STOP_CONN_REFUSED) && errno == ECONNREFUSED)) { send_conn_failed (_session); close (); terminate (); return; } // Handle the error condition by attempt to reconnect. if (fd == retired_fd) { close (); add_reconnect_timer (); return; } tune_vmci_buffer_size (this->get_ctx (), fd, options.vmci_buffer_size, options.vmci_buffer_min_size, options.vmci_buffer_max_size); if (options.vmci_connect_timeout > 0) { #if defined ZMQ_HAVE_WINDOWS tune_vmci_connect_timeout (this->get_ctx (), fd, options.vmci_connect_timeout); #else struct timeval timeout = {0, options.vmci_connect_timeout * 1000}; tune_vmci_connect_timeout (this->get_ctx (), fd, timeout); #endif } create_engine ( fd, zmq::vmci_connecter_t::get_socket_name (fd, socket_end_local)); } std::string zmq::vmci_connecter_t::get_socket_name (zmq::fd_t fd_, socket_end_t socket_end_) const { struct sockaddr_storage ss; const zmq_socklen_t sl = get_socket_address (fd_, socket_end_, &ss); if (sl == 0) { return std::string (); } const vmci_address_t addr (reinterpret_cast<struct sockaddr *> (&ss), sl, this->get_ctx ()); std::string address_string; addr.to_string (address_string); return address_string; } void zmq::vmci_connecter_t::timer_event (int id_) { if (id_ == connect_timer_id) { _connect_timer_started = false; rm_handle (); close (); add_reconnect_timer (); } else stream_connecter_base_t::timer_event (id_); } void zmq::vmci_connecter_t::start_connecting () { // Open the connecting socket. const int rc = open (); // Connect may succeed in synchronous manner. if (rc == 0) { _handle = add_fd (_s); out_event (); } // Connection establishment may be delayed. Poll for its completion. else if (rc == -1 && errno == EINPROGRESS) { _handle = add_fd (_s); set_pollout (_handle); _socket->event_connect_delayed ( make_unconnected_connect_endpoint_pair (_endpoint), zmq_errno ()); // add userspace connect timeout add_connect_timer (); } // Handle any other error condition by eventual reconnect. else { if (_s != retired_fd) close (); add_reconnect_timer (); } } void zmq::vmci_connecter_t::add_connect_timer () { if (options.connect_timeout > 0) { add_timer (options.connect_timeout, connect_timer_id); _connect_timer_started = true; } } int zmq::vmci_connecter_t::open () { zmq_assert (_s == retired_fd); // Resolve the address if (_addr->resolved.vmci_addr != NULL) { LIBZMQ_DELETE (_addr->resolved.vmci_addr); } _addr->resolved.vmci_addr = new (std::nothrow) vmci_address_t (this->get_ctx ()); alloc_assert (_addr->resolved.vmci_addr); _s = vmci_open_socket (_addr->address.c_str (), options, _addr->resolved.vmci_addr); if (_s == retired_fd) { // TODO we should emit some event in this case! LIBZMQ_DELETE (_addr->resolved.vmci_addr); return -1; } zmq_assert (_addr->resolved.vmci_addr != NULL); // Set the socket to non-blocking mode so that we get async connect(). unblock_socket (_s); const vmci_address_t *const vmci_addr = _addr->resolved.vmci_addr; int rc; // Connect to the remote peer. #if defined ZMQ_HAVE_VXWORKS rc = ::connect (_s, (sockaddr *) vmci_addr->addr (), vmci_addr->addrlen ()); #else rc = ::connect (_s, vmci_addr->addr (), vmci_addr->addrlen ()); #endif // Connect was successful immediately. if (rc == 0) { return 0; } // Translate error codes indicating asynchronous connect has been // launched to a uniform EINPROGRESS. #ifdef ZMQ_HAVE_WINDOWS const int last_error = WSAGetLastError (); if (last_error == WSAEINPROGRESS || last_error == WSAEWOULDBLOCK) errno = EINPROGRESS; else errno = wsa_error_to_errno (last_error); #else if (errno == EINTR) errno = EINPROGRESS; #endif return -1; } zmq::fd_t zmq::vmci_connecter_t::connect () { // Async connect has finished. Check whether an error occurred int err = 0; #if defined ZMQ_HAVE_HPUX || defined ZMQ_HAVE_VXWORKS int len = sizeof err; #else socklen_t len = sizeof err; #endif const int rc = getsockopt (_s, SOL_SOCKET, SO_ERROR, reinterpret_cast<char *> (&err), &len); // Assert if the error was caused by 0MQ bug. // Networking problems are OK. No need to assert. #ifdef ZMQ_HAVE_WINDOWS zmq_assert (rc == 0); if (err != 0) { if (err == WSAEBADF || err == WSAENOPROTOOPT || err == WSAENOTSOCK || err == WSAENOBUFS) { wsa_assert_no (err); } errno = wsa_error_to_errno (err); return retired_fd; } #else // Following code should handle both Berkeley-derived socket // implementations and Solaris. if (rc == -1) err = errno; if (err != 0) { errno = err; #if !defined(TARGET_OS_IPHONE) || !TARGET_OS_IPHONE errno_assert (errno != EBADF && errno != ENOPROTOOPT && errno != ENOTSOCK && errno != ENOBUFS); #else errno_assert (errno != ENOPROTOOPT && errno != ENOTSOCK && errno != ENOBUFS); #endif return retired_fd; } #endif // Return the newly connected socket. const fd_t result = _s; _s = retired_fd; return result; } #endif
sophomore_public/libzmq
src/vmci_connecter.cpp
C++
gpl-3.0
7,701
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_VMCI_CONNECTER_HPP_INCLUDED__ #define __ZMQ_VMCI_CONNECTER_HPP_INCLUDED__ #include "platform.hpp" #if defined ZMQ_HAVE_VMCI #include "fd.hpp" #include "own.hpp" #include "stdint.hpp" #include "io_object.hpp" #include "stream_connecter_base.hpp" namespace zmq { class io_thread_t; class session_base_t; struct address_t; class vmci_connecter_t ZMQ_FINAL : public stream_connecter_base_t { public: // If 'delayed_start' is true connecter first waits for a while, // then starts connection process. vmci_connecter_t (zmq::io_thread_t *io_thread_, zmq::session_base_t *session_, const options_t &options_, address_t *addr_, bool delayed_start_); ~vmci_connecter_t (); protected: std::string get_socket_name (fd_t fd_, socket_end_t socket_end_) const; private: // ID of the timer used to check the connect timeout, must be different from stream_connecter_base_t::reconnect_timer_id. enum { connect_timer_id = 2 }; // Handlers for incoming commands. void process_term (int linger_); // Handlers for I/O events. void in_event (); void out_event (); void timer_event (int id_); // Internal function to start the actual connection establishment. void start_connecting (); // Internal function to add a connect timer void add_connect_timer (); // Internal function to return a reconnect backoff delay. // Will modify the current_reconnect_ivl used for next call // Returns the currently used interval int get_new_reconnect_ivl (); // Open VMCI connecting socket. Returns -1 in case of error, // 0 if connect was successful immediately. Returns -1 with // EAGAIN errno if async connect was launched. int open (); // Get the file descriptor of newly created connection. Returns // retired_fd if the connection was unsuccessful. fd_t connect (); // True iff a timer has been started. bool _connect_timer_started; ZMQ_NON_COPYABLE_NOR_MOVABLE (vmci_connecter_t) }; } #endif #endif
sophomore_public/libzmq
src/vmci_connecter.hpp
C++
gpl-3.0
2,189
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "vmci_listener.hpp" #if defined ZMQ_HAVE_VMCI #include <new> //#include "stream_engine.hpp" #include "vmci_address.hpp" #include "io_thread.hpp" #include "session_base.hpp" #include "config.hpp" #include "err.hpp" #include "ip.hpp" #include "socket_base.hpp" #include "vmci.hpp" #if defined ZMQ_HAVE_WINDOWS #include "windows.hpp" #else #include <unistd.h> #include <fcntl.h> #endif zmq::vmci_listener_t::vmci_listener_t (io_thread_t *io_thread_, socket_base_t *socket_, const options_t &options_) : stream_listener_base_t (io_thread_, socket_, options_) { } void zmq::vmci_listener_t::in_event () { fd_t fd = accept (); // If connection was reset by the peer in the meantime, just ignore it. if (fd == retired_fd) { _socket->event_accept_failed ( make_unconnected_bind_endpoint_pair (_endpoint), zmq_errno ()); return; } tune_vmci_buffer_size (this->get_ctx (), fd, options.vmci_buffer_size, options.vmci_buffer_min_size, options.vmci_buffer_max_size); if (options.vmci_connect_timeout > 0) { #if defined ZMQ_HAVE_WINDOWS tune_vmci_connect_timeout (this->get_ctx (), fd, options.vmci_connect_timeout); #else struct timeval timeout = {0, options.vmci_connect_timeout * 1000}; tune_vmci_connect_timeout (this->get_ctx (), fd, timeout); #endif } // Create the engine object for this connection. create_engine (fd); } std::string zmq::vmci_listener_t::get_socket_name (zmq::fd_t fd_, socket_end_t socket_end_) const { struct sockaddr_storage ss; const zmq_socklen_t sl = get_socket_address (fd_, socket_end_, &ss); if (sl == 0) { return std::string (); } const vmci_address_t addr (reinterpret_cast<struct sockaddr *> (&ss), sl, this->get_ctx ()); std::string address_string; addr.to_string (address_string); return address_string; } int zmq::vmci_listener_t::set_local_address (const char *addr_) { // Create addr on stack for auto-cleanup std::string addr (addr_); // Initialise the address structure. vmci_address_t address (this->get_ctx ()); int rc = address.resolve (addr.c_str ()); if (rc != 0) return -1; // Create a listening socket. _s = open_socket (this->get_ctx ()->get_vmci_socket_family (), SOCK_STREAM, 0); #ifdef ZMQ_HAVE_WINDOWS if (s == INVALID_SOCKET) { errno = wsa_error_to_errno (WSAGetLastError ()); return -1; } #if !defined _WIN32_WCE // On Windows, preventing sockets to be inherited by child processes. BOOL brc = SetHandleInformation ((HANDLE) _s, HANDLE_FLAG_INHERIT, 0); win_assert (brc); #endif #else if (_s == -1) return -1; #endif address.to_string (_endpoint); // Bind the socket. rc = bind (_s, address.addr (), address.addrlen ()); #ifdef ZMQ_HAVE_WINDOWS if (rc == SOCKET_ERROR) { errno = wsa_error_to_errno (WSAGetLastError ()); goto error; } #else if (rc != 0) goto error; #endif // Listen for incoming connections. rc = listen (_s, options.backlog); #ifdef ZMQ_HAVE_WINDOWS if (rc == SOCKET_ERROR) { errno = wsa_error_to_errno (WSAGetLastError ()); goto error; } #else if (rc != 0) goto error; #endif _socket->event_listening (make_unconnected_bind_endpoint_pair (_endpoint), _s); return 0; error: int err = errno; close (); errno = err; return -1; } zmq::fd_t zmq::vmci_listener_t::accept () { // Accept one connection and deal with different failure modes. // The situation where connection cannot be accepted due to insufficient // resources is considered valid and treated by ignoring the connection. zmq_assert (_s != retired_fd); fd_t sock = ::accept (_s, NULL, NULL); #ifdef ZMQ_HAVE_WINDOWS if (sock == INVALID_SOCKET) { wsa_assert (WSAGetLastError () == WSAEWOULDBLOCK || WSAGetLastError () == WSAECONNRESET || WSAGetLastError () == WSAEMFILE || WSAGetLastError () == WSAENOBUFS); return retired_fd; } #if !defined _WIN32_WCE // On Windows, preventing sockets to be inherited by child processes. BOOL brc = SetHandleInformation ((HANDLE) sock, HANDLE_FLAG_INHERIT, 0); win_assert (brc); #endif #else if (sock == -1) { errno_assert (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR || errno == ECONNABORTED || errno == EPROTO || errno == ENOBUFS || errno == ENOMEM || errno == EMFILE || errno == ENFILE); return retired_fd; } #endif // Race condition can cause socket not to be closed (if fork happens // between accept and this point). #ifdef FD_CLOEXEC int rc = fcntl (sock, F_SETFD, FD_CLOEXEC); errno_assert (rc != -1); #endif return sock; } #endif
sophomore_public/libzmq
src/vmci_listener.cpp
C++
gpl-3.0
5,256
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_VMCI_LISTENER_HPP_INCLUDED__ #define __ZMQ_VMCI_LISTENER_HPP_INCLUDED__ #include "platform.hpp" #if defined ZMQ_HAVE_VMCI #include <string> #include "fd.hpp" #include "vmci_address.hpp" #include "stream_listener_base.hpp" namespace zmq { class vmci_listener_t ZMQ_FINAL : public stream_listener_base_t { public: vmci_listener_t (zmq::io_thread_t *io_thread_, zmq::socket_base_t *socket_, const options_t &options_); // Set address to listen on. int set_local_address (const char *addr_); protected: std::string get_socket_name (fd_t fd_, socket_end_t socket_end_) const; private: // Handlers for I/O events. void in_event (); // Accept the new connection. Returns the file descriptor of the // newly created connection. The function may return retired_fd // if the connection was dropped while waiting in the listen backlog. fd_t accept (); int create_socket (const char *addr_); // Address to listen on. vmci_address_t _address; ZMQ_NON_COPYABLE_NOR_MOVABLE (vmci_listener_t) }; } #endif #endif
sophomore_public/libzmq
src/vmci_listener.hpp
C++
gpl-3.0
1,174
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_WINDOWS_HPP_INCLUDED__ #define __ZMQ_WINDOWS_HPP_INCLUDED__ #ifndef _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_WARNINGS #endif #ifndef NOMINMAX #define NOMINMAX // Macros min(a,b) and max(a,b) #endif // Set target version to Windows Server 2008, Windows Vista or higher. // Windows XP (0x0501) is supported but without client & server socket types. #if !defined _WIN32_WINNT && !defined ZMQ_HAVE_WINDOWS_UWP #define _WIN32_WINNT 0x0600 #endif #ifdef __MINGW32__ // Require Windows XP or higher with MinGW for getaddrinfo(). #if (_WIN32_WINNT >= 0x0501) #else #error You need at least Windows XP target #endif #endif #include <winsock2.h> #include <windows.h> #include <mswsock.h> #include <iphlpapi.h> #include <string> #include <vector> #if !defined __MINGW32__ #include <mstcpip.h> #endif // Workaround missing mstcpip.h in mingw32 (MinGW64 provides this) // __MINGW64_VERSION_MAJOR is only defined when using in mingw-w64 #if defined __MINGW32__ && !defined SIO_KEEPALIVE_VALS \ && !defined __MINGW64_VERSION_MAJOR struct tcp_keepalive { u_long onoff; u_long keepalivetime; u_long keepaliveinterval; }; #define SIO_KEEPALIVE_VALS _WSAIOW (IOC_VENDOR, 4) #endif #include <ws2tcpip.h> #include <ipexport.h> #if !defined _WIN32_WCE #include <process.h> #endif #if defined ZMQ_IOTHREAD_POLLER_USE_POLL || defined ZMQ_POLL_BASED_ON_POLL static inline int poll (struct pollfd *pfd, unsigned long nfds, int timeout) { return WSAPoll (pfd, nfds, timeout); } #endif // In MinGW environment AI_NUMERICSERV is not defined. #ifndef AI_NUMERICSERV #define AI_NUMERICSERV 0x0400 #endif // Need unlink() and rmdir() functions that take utf-8 encoded file path. static inline std::wstring utf8_to_utf16 (const char *utf8_string) { std::wstring retVal; if (utf8_string && *utf8_string) { const int utf16_length = ::MultiByteToWideChar ( CP_UTF8, MB_ERR_INVALID_CHARS, utf8_string, -1, // assume the input string is null-terminated NULL, 0); if (utf16_length > 0) { retVal.resize (utf16_length); const int conversion_result = ::MultiByteToWideChar ( CP_UTF8, MB_ERR_INVALID_CHARS, utf8_string, -1, // assume the input string is null-terminated &retVal[0], static_cast<int> (retVal.size ())); if (conversion_result == 0) retVal.clear (); } } return retVal; } static inline int unlink_utf8 (const char *filename) { return _wunlink (utf8_to_utf16 (filename).c_str ()); } static inline int rmdir_utf8 (const char *filename) { return _wrmdir (utf8_to_utf16 (filename).c_str ()); } // In MSVC prior to v14, snprintf is not available // The closest implementation is the _snprintf_s function #if defined(_MSC_VER) && _MSC_VER < 1900 #define snprintf(buffer_, count_, format_, ...) \ _snprintf_s (buffer_, count_, _TRUNCATE, format_, __VA_ARGS__) #endif // Workaround missing struct sockaddr_un in afunix.h. // Fix #3949. #if defined(ZMQ_HAVE_IPC) && !defined(ZMQ_HAVE_STRUCT_SOCKADDR_UN) struct sockaddr_un { ADDRESS_FAMILY sun_family; /* AF_UNIX */ char sun_path[108]; /* pathname */ }; #endif #endif
sophomore_public/libzmq
src/windows.hpp
C++
gpl-3.0
3,333
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_WIRE_HPP_INCLUDED__ #define __ZMQ_WIRE_HPP_INCLUDED__ #include "stdint.hpp" namespace zmq { // Helper functions to convert different integer types to/from network // byte order. inline void put_uint8 (unsigned char *buffer_, uint8_t value_) { *buffer_ = value_; } inline uint8_t get_uint8 (const unsigned char *buffer_) { return *buffer_; } inline void put_uint16 (unsigned char *buffer_, uint16_t value_) { buffer_[0] = static_cast<unsigned char> (((value_) >> 8) & 0xff); buffer_[1] = static_cast<unsigned char> (value_ & 0xff); } inline uint16_t get_uint16 (const unsigned char *buffer_) { return ((static_cast<uint16_t> (buffer_[0])) << 8) | (static_cast<uint16_t> (buffer_[1])); } inline void put_uint32 (unsigned char *buffer_, uint32_t value_) { buffer_[0] = static_cast<unsigned char> (((value_) >> 24) & 0xff); buffer_[1] = static_cast<unsigned char> (((value_) >> 16) & 0xff); buffer_[2] = static_cast<unsigned char> (((value_) >> 8) & 0xff); buffer_[3] = static_cast<unsigned char> (value_ & 0xff); } inline uint32_t get_uint32 (const unsigned char *buffer_) { return ((static_cast<uint32_t> (buffer_[0])) << 24) | ((static_cast<uint32_t> (buffer_[1])) << 16) | ((static_cast<uint32_t> (buffer_[2])) << 8) | (static_cast<uint32_t> (buffer_[3])); } inline void put_uint64 (unsigned char *buffer_, uint64_t value_) { buffer_[0] = static_cast<unsigned char> (((value_) >> 56) & 0xff); buffer_[1] = static_cast<unsigned char> (((value_) >> 48) & 0xff); buffer_[2] = static_cast<unsigned char> (((value_) >> 40) & 0xff); buffer_[3] = static_cast<unsigned char> (((value_) >> 32) & 0xff); buffer_[4] = static_cast<unsigned char> (((value_) >> 24) & 0xff); buffer_[5] = static_cast<unsigned char> (((value_) >> 16) & 0xff); buffer_[6] = static_cast<unsigned char> (((value_) >> 8) & 0xff); buffer_[7] = static_cast<unsigned char> (value_ & 0xff); } inline uint64_t get_uint64 (const unsigned char *buffer_) { return ((static_cast<uint64_t> (buffer_[0])) << 56) | ((static_cast<uint64_t> (buffer_[1])) << 48) | ((static_cast<uint64_t> (buffer_[2])) << 40) | ((static_cast<uint64_t> (buffer_[3])) << 32) | ((static_cast<uint64_t> (buffer_[4])) << 24) | ((static_cast<uint64_t> (buffer_[5])) << 16) | ((static_cast<uint64_t> (buffer_[6])) << 8) | (static_cast<uint64_t> (buffer_[7])); } } #endif
sophomore_public/libzmq
src/wire.hpp
C++
gpl-3.0
2,561
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <string> #include <sstream> #include "macros.hpp" #include "ws_address.hpp" #include "stdint.hpp" #include "err.hpp" #include "ip.hpp" #ifndef ZMQ_HAVE_WINDOWS #include <sys/types.h> #include <arpa/inet.h> #include <netinet/tcp.h> #include <net/if.h> #include <netdb.h> #include <ctype.h> #include <unistd.h> #include <stdlib.h> #endif #include <limits.h> zmq::ws_address_t::ws_address_t () { memset (&_address, 0, sizeof (_address)); } zmq::ws_address_t::ws_address_t (const sockaddr *sa_, socklen_t sa_len_) { zmq_assert (sa_ && sa_len_ > 0); memset (&_address, 0, sizeof (_address)); if (sa_->sa_family == AF_INET && sa_len_ >= static_cast<socklen_t> (sizeof (_address.ipv4))) memcpy (&_address.ipv4, sa_, sizeof (_address.ipv4)); else if (sa_->sa_family == AF_INET6 && sa_len_ >= static_cast<socklen_t> (sizeof (_address.ipv6))) memcpy (&_address.ipv6, sa_, sizeof (_address.ipv6)); _path = std::string (""); char hbuf[NI_MAXHOST]; const int rc = getnameinfo (addr (), addrlen (), hbuf, sizeof (hbuf), NULL, 0, NI_NUMERICHOST); if (rc != 0) { _host = std::string ("localhost"); return; } std::ostringstream os; if (_address.family () == AF_INET6) os << std::string ("["); os << std::string (hbuf); if (_address.family () == AF_INET6) os << std::string ("]"); _host = os.str (); } int zmq::ws_address_t::resolve (const char *name_, bool local_, bool ipv6_) { // find the host part, It's important to use str*r*chr to only get // the latest colon since IPv6 addresses use colons as delemiters. const char *delim = strrchr (name_, ':'); if (delim == NULL) { errno = EINVAL; return -1; } _host = std::string (name_, delim - name_); // find the path part, which is optional delim = strrchr (name_, '/'); std::string host_name; if (delim) { _path = std::string (delim); // remove the path, otherwise resolving the port will fail with wildcard host_name = std::string (name_, delim - name_); } else { _path = std::string ("/"); host_name = name_; } ip_resolver_options_t resolver_opts; resolver_opts.bindable (local_) .allow_dns (!local_) .allow_nic_name (local_) .ipv6 (ipv6_) .allow_path (true) .expect_port (true); ip_resolver_t resolver (resolver_opts); return resolver.resolve (&_address, host_name.c_str ()); } int zmq::ws_address_t::to_string (std::string &addr_) const { std::ostringstream os; os << std::string ("ws://") << host () << std::string (":") << _address.port () << _path; addr_ = os.str (); return 0; } const sockaddr *zmq::ws_address_t::addr () const { return _address.as_sockaddr (); } socklen_t zmq::ws_address_t::addrlen () const { return _address.sockaddr_len (); } const char *zmq::ws_address_t::host () const { return _host.c_str (); } const char *zmq::ws_address_t::path () const { return _path.c_str (); } #if defined ZMQ_HAVE_WINDOWS unsigned short zmq::ws_address_t::family () const #else sa_family_t zmq::ws_address_t::family () const #endif { return _address.family (); }
sophomore_public/libzmq
src/ws_address.cpp
C++
gpl-3.0
3,359
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_WS_ADDRESS_HPP_INCLUDED__ #define __ZMQ_WS_ADDRESS_HPP_INCLUDED__ #if !defined ZMQ_HAVE_WINDOWS #include <sys/socket.h> #include <netinet/in.h> #endif #include "ip_resolver.hpp" namespace zmq { class ws_address_t { public: ws_address_t (); ws_address_t (const sockaddr *sa_, socklen_t sa_len_); // This function translates textual WS address into an address // structure. If 'local' is true, names are resolved as local interface // names. If it is false, names are resolved as remote hostnames. // If 'ipv6' is true, the name may resolve to IPv6 address. int resolve (const char *name_, bool local_, bool ipv6_); // The opposite to resolve() int to_string (std::string &addr_) const; #if defined ZMQ_HAVE_WINDOWS unsigned short family () const; #else sa_family_t family () const; #endif const sockaddr *addr () const; socklen_t addrlen () const; const char *host () const; const char *path () const; protected: ip_addr_t _address; private: std::string _host; std::string _path; }; } #endif
sophomore_public/libzmq
src/ws_address.hpp
C++
gpl-3.0
1,138
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <new> #include <string> #include "macros.hpp" #include "ws_connecter.hpp" #include "io_thread.hpp" #include "err.hpp" #include "ip.hpp" #include "tcp.hpp" #include "address.hpp" #include "ws_address.hpp" #include "ws_engine.hpp" #include "session_base.hpp" #ifdef ZMQ_HAVE_WSS #include "wss_engine.hpp" #include "wss_address.hpp" #endif #if !defined ZMQ_HAVE_WINDOWS #include <unistd.h> #include <sys/types.h> #include <sys/socket.h> #include <arpa/inet.h> #include <netinet/tcp.h> #include <netinet/in.h> #include <netdb.h> #include <fcntl.h> #ifdef ZMQ_HAVE_VXWORKS #include <sockLib.h> #endif #ifdef ZMQ_HAVE_OPENVMS #include <ioctl.h> #endif #endif #ifdef __APPLE__ #include <TargetConditionals.h> #endif zmq::ws_connecter_t::ws_connecter_t (class io_thread_t *io_thread_, class session_base_t *session_, const options_t &options_, address_t *addr_, bool delayed_start_, bool wss_, const std::string &tls_hostname_) : stream_connecter_base_t ( io_thread_, session_, options_, addr_, delayed_start_), _connect_timer_started (false), _wss (wss_), _hostname (tls_hostname_) { } zmq::ws_connecter_t::~ws_connecter_t () { zmq_assert (!_connect_timer_started); } void zmq::ws_connecter_t::process_term (int linger_) { if (_connect_timer_started) { cancel_timer (connect_timer_id); _connect_timer_started = false; } stream_connecter_base_t::process_term (linger_); } void zmq::ws_connecter_t::out_event () { if (_connect_timer_started) { cancel_timer (connect_timer_id); _connect_timer_started = false; } // TODO this is still very similar to (t)ipc_connecter_t, maybe the // differences can be factored out rm_handle (); const fd_t fd = connect (); // Handle the error condition by attempt to reconnect. if (fd == retired_fd || !tune_socket (fd)) { close (); add_reconnect_timer (); return; } if (_wss) #ifdef ZMQ_HAVE_WSS create_engine (fd, get_socket_name<wss_address_t> (fd, socket_end_local)); #else assert (false); #endif else create_engine (fd, get_socket_name<ws_address_t> (fd, socket_end_local)); } void zmq::ws_connecter_t::timer_event (int id_) { if (id_ == connect_timer_id) { _connect_timer_started = false; rm_handle (); close (); add_reconnect_timer (); } else stream_connecter_base_t::timer_event (id_); } void zmq::ws_connecter_t::start_connecting () { // Open the connecting socket. const int rc = open (); // Connect may succeed in synchronous manner. if (rc == 0) { _handle = add_fd (_s); out_event (); } // Connection establishment may be delayed. Poll for its completion. else if (rc == -1 && errno == EINPROGRESS) { _handle = add_fd (_s); set_pollout (_handle); _socket->event_connect_delayed ( make_unconnected_connect_endpoint_pair (_endpoint), zmq_errno ()); // add userspace connect timeout add_connect_timer (); } // Handle any other error condition by eventual reconnect. else { if (_s != retired_fd) close (); add_reconnect_timer (); } } void zmq::ws_connecter_t::add_connect_timer () { if (options.connect_timeout > 0) { add_timer (options.connect_timeout, connect_timer_id); _connect_timer_started = true; } } int zmq::ws_connecter_t::open () { zmq_assert (_s == retired_fd); tcp_address_t tcp_addr; _s = tcp_open_socket (_addr->address.c_str (), options, false, true, &tcp_addr); if (_s == retired_fd) return -1; // Set the socket to non-blocking mode so that we get async connect(). unblock_socket (_s); // Connect to the remote peer. #ifdef ZMQ_HAVE_VXWORKS int rc = ::connect (_s, (sockaddr *) tcp_addr.addr (), tcp_addr.addrlen ()); #else const int rc = ::connect (_s, tcp_addr.addr (), tcp_addr.addrlen ()); #endif // Connect was successful immediately. if (rc == 0) { return 0; } // Translate error codes indicating asynchronous connect has been // launched to a uniform EINPROGRESS. #ifdef ZMQ_HAVE_WINDOWS const int last_error = WSAGetLastError (); if (last_error == WSAEINPROGRESS || last_error == WSAEWOULDBLOCK) errno = EINPROGRESS; else errno = wsa_error_to_errno (last_error); #else if (errno == EINTR) errno = EINPROGRESS; #endif return -1; } zmq::fd_t zmq::ws_connecter_t::connect () { // Async connect has finished. Check whether an error occurred int err = 0; #if defined ZMQ_HAVE_HPUX || defined ZMQ_HAVE_VXWORKS int len = sizeof err; #else socklen_t len = sizeof err; #endif const int rc = getsockopt (_s, SOL_SOCKET, SO_ERROR, reinterpret_cast<char *> (&err), &len); // Assert if the error was caused by 0MQ bug. // Networking problems are OK. No need to assert. #ifdef ZMQ_HAVE_WINDOWS zmq_assert (rc == 0); if (err != 0) { if (err == WSAEBADF || err == WSAENOPROTOOPT || err == WSAENOTSOCK || err == WSAENOBUFS) { wsa_assert_no (err); } return retired_fd; } #else // Following code should handle both Berkeley-derived socket // implementations and Solaris. if (rc == -1) err = errno; if (err != 0) { errno = err; #if !defined(TARGET_OS_IPHONE) || !TARGET_OS_IPHONE errno_assert (errno != EBADF && errno != ENOPROTOOPT && errno != ENOTSOCK && errno != ENOBUFS); #else errno_assert (errno != ENOPROTOOPT && errno != ENOTSOCK && errno != ENOBUFS); #endif return retired_fd; } #endif // Return the newly connected socket. const fd_t result = _s; _s = retired_fd; return result; } bool zmq::ws_connecter_t::tune_socket (const fd_t fd_) { const int rc = tune_tcp_socket (fd_) | tune_tcp_maxrt (fd_, options.tcp_maxrt); return rc == 0; } void zmq::ws_connecter_t::create_engine (fd_t fd_, const std::string &local_address_) { const endpoint_uri_pair_t endpoint_pair (local_address_, _endpoint, endpoint_type_connect); // Create the engine object for this connection. i_engine *engine = NULL; if (_wss) { #ifdef ZMQ_HAVE_WSS engine = new (std::nothrow) wss_engine_t (fd_, options, endpoint_pair, *_addr->resolved.ws_addr, true, NULL, _hostname); #else LIBZMQ_UNUSED (_hostname); assert (false); #endif } else engine = new (std::nothrow) ws_engine_t ( fd_, options, endpoint_pair, *_addr->resolved.ws_addr, true); alloc_assert (engine); // Attach the engine to the corresponding session object. send_attach (_session, engine); // Shut the connecter down. terminate (); _socket->event_connected (endpoint_pair, fd_); }
sophomore_public/libzmq
src/ws_connecter.cpp
C++
gpl-3.0
7,436
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __WS_CONNECTER_HPP_INCLUDED__ #define __WS_CONNECTER_HPP_INCLUDED__ #include "fd.hpp" #include "stdint.hpp" #include "stream_connecter_base.hpp" namespace zmq { class ws_connecter_t ZMQ_FINAL : public stream_connecter_base_t { public: // If 'delayed_start' is true connecter first waits for a while, // then starts connection process. ws_connecter_t (zmq::io_thread_t *io_thread_, zmq::session_base_t *session_, const options_t &options_, address_t *addr_, bool delayed_start_, bool wss_, const std::string &tls_hostname_); ~ws_connecter_t (); protected: void create_engine (fd_t fd, const std::string &local_address_); private: // ID of the timer used to check the connect timeout, must be different from stream_connecter_base_t::reconnect_timer_id. enum { connect_timer_id = 2 }; // Handlers for incoming commands. void process_term (int linger_); // Handlers for I/O events. void out_event (); void timer_event (int id_); // Internal function to start the actual connection establishment. void start_connecting (); // Internal function to add a connect timer void add_connect_timer (); // Open TCP connecting socket. Returns -1 in case of error, // 0 if connect was successful immediately. Returns -1 with // EAGAIN errno if async connect was launched. int open (); // Get the file descriptor of newly created connection. Returns // retired_fd if the connection was unsuccessful. fd_t connect (); // Tunes a connected socket. bool tune_socket (fd_t fd_); // True iff a timer has been started. bool _connect_timer_started; bool _wss; const std::string &_hostname; ZMQ_NON_COPYABLE_NOR_MOVABLE (ws_connecter_t) }; } #endif
sophomore_public/libzmq
src/ws_connecter.hpp
C++
gpl-3.0
1,963
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <stdlib.h> #include <string.h> #include <cmath> #include "ws_protocol.hpp" #include "ws_decoder.hpp" #include "likely.hpp" #include "wire.hpp" #include "err.hpp" zmq::ws_decoder_t::ws_decoder_t (size_t bufsize_, int64_t maxmsgsize_, bool zero_copy_, bool must_mask_) : decoder_base_t<ws_decoder_t, shared_message_memory_allocator> (bufsize_), _msg_flags (0), _zero_copy (zero_copy_), _max_msg_size (maxmsgsize_), _must_mask (must_mask_), _size (0) { memset (_tmpbuf, 0, sizeof (_tmpbuf)); int rc = _in_progress.init (); errno_assert (rc == 0); // At the beginning, read one byte and go to opcode_ready state. next_step (_tmpbuf, 1, &ws_decoder_t::opcode_ready); } zmq::ws_decoder_t::~ws_decoder_t () { const int rc = _in_progress.close (); errno_assert (rc == 0); } int zmq::ws_decoder_t::opcode_ready (unsigned char const *) { const bool final = (_tmpbuf[0] & 0x80) != 0; // final bit if (!final) return -1; // non final messages are not supported _opcode = static_cast<zmq::ws_protocol_t::opcode_t> (_tmpbuf[0] & 0xF); _msg_flags = 0; switch (_opcode) { case zmq::ws_protocol_t::opcode_binary: break; case zmq::ws_protocol_t::opcode_close: _msg_flags = msg_t::command | msg_t::close_cmd; break; case zmq::ws_protocol_t::opcode_ping: _msg_flags = msg_t::ping | msg_t::command; break; case zmq::ws_protocol_t::opcode_pong: _msg_flags = msg_t::pong | msg_t::command; break; default: return -1; } next_step (_tmpbuf, 1, &ws_decoder_t::size_first_byte_ready); return 0; } int zmq::ws_decoder_t::size_first_byte_ready (unsigned char const *read_from_) { const bool is_masked = (_tmpbuf[0] & 0x80) != 0; if (is_masked != _must_mask) // wrong mask value return -1; _size = static_cast<uint64_t> (_tmpbuf[0] & 0x7F); if (_size < 126) { if (_must_mask) next_step (_tmpbuf, 4, &ws_decoder_t::mask_ready); else if (_opcode == ws_protocol_t::opcode_binary) { if (_size == 0) return -1; next_step (_tmpbuf, 1, &ws_decoder_t::flags_ready); } else return size_ready (read_from_); } else if (_size == 126) next_step (_tmpbuf, 2, &ws_decoder_t::short_size_ready); else next_step (_tmpbuf, 8, &ws_decoder_t::long_size_ready); return 0; } int zmq::ws_decoder_t::short_size_ready (unsigned char const *read_from_) { _size = (_tmpbuf[0] << 8) | _tmpbuf[1]; if (_must_mask) next_step (_tmpbuf, 4, &ws_decoder_t::mask_ready); else if (_opcode == ws_protocol_t::opcode_binary) { if (_size == 0) return -1; next_step (_tmpbuf, 1, &ws_decoder_t::flags_ready); } else return size_ready (read_from_); return 0; } int zmq::ws_decoder_t::long_size_ready (unsigned char const *read_from_) { // The payload size is encoded as 64-bit unsigned integer. // The most significant byte comes first. _size = get_uint64 (_tmpbuf); if (_must_mask) next_step (_tmpbuf, 4, &ws_decoder_t::mask_ready); else if (_opcode == ws_protocol_t::opcode_binary) { if (_size == 0) return -1; next_step (_tmpbuf, 1, &ws_decoder_t::flags_ready); } else return size_ready (read_from_); return 0; } int zmq::ws_decoder_t::mask_ready (unsigned char const *read_from_) { memcpy (_mask, _tmpbuf, 4); if (_opcode == ws_protocol_t::opcode_binary) { if (_size == 0) return -1; next_step (_tmpbuf, 1, &ws_decoder_t::flags_ready); } else return size_ready (read_from_); return 0; } int zmq::ws_decoder_t::flags_ready (unsigned char const *read_from_) { unsigned char flags; if (_must_mask) flags = _tmpbuf[0] ^ _mask[0]; else flags = _tmpbuf[0]; if (flags & ws_protocol_t::more_flag) _msg_flags |= msg_t::more; if (flags & ws_protocol_t::command_flag) _msg_flags |= msg_t::command; _size--; return size_ready (read_from_); } int zmq::ws_decoder_t::size_ready (unsigned char const *read_pos_) { // Message size must not exceed the maximum allowed size. if (_max_msg_size >= 0) if (unlikely (_size > static_cast<uint64_t> (_max_msg_size))) { errno = EMSGSIZE; return -1; } // Message size must fit into size_t data type. if (unlikely (_size != static_cast<size_t> (_size))) { errno = EMSGSIZE; return -1; } int rc = _in_progress.close (); assert (rc == 0); // the current message can exceed the current buffer. We have to copy the buffer // data into a new message and complete it in the next receive. shared_message_memory_allocator &allocator = get_allocator (); if (unlikely (!_zero_copy || allocator.data () > read_pos_ || static_cast<size_t> (read_pos_ - allocator.data ()) > allocator.size () || _size > static_cast<size_t> ( allocator.data () + allocator.size () - read_pos_))) { // a new message has started, but the size would exceed the pre-allocated arena // (or read_pos_ is in the initial handshake buffer) // this happens every time when a message does not fit completely into the buffer rc = _in_progress.init_size (static_cast<size_t> (_size)); } else { // construct message using n bytes from the buffer as storage // increase buffer ref count // if the message will be a large message, pass a valid refcnt memory location as well rc = _in_progress.init ( const_cast<unsigned char *> (read_pos_), static_cast<size_t> (_size), shared_message_memory_allocator::call_dec_ref, allocator.buffer (), allocator.provide_content ()); // For small messages, data has been copied and refcount does not have to be increased if (_in_progress.is_zcmsg ()) { allocator.advance_content (); allocator.inc_ref (); } } if (unlikely (rc)) { errno_assert (errno == ENOMEM); rc = _in_progress.init (); errno_assert (rc == 0); errno = ENOMEM; return -1; } _in_progress.set_flags (_msg_flags); // this sets read_pos to // the message data address if the data needs to be copied // for small message / messages exceeding the current buffer // or // to the current start address in the buffer because the message // was constructed to use n bytes from the address passed as argument next_step (_in_progress.data (), _in_progress.size (), &ws_decoder_t::message_ready); return 0; } int zmq::ws_decoder_t::message_ready (unsigned char const *) { if (_must_mask) { int mask_index = _opcode == ws_protocol_t::opcode_binary ? 1 : 0; unsigned char *data = static_cast<unsigned char *> (_in_progress.data ()); for (size_t i = 0; i < _size; ++i, mask_index++) data[i] = data[i] ^ _mask[mask_index % 4]; } // Message is completely read. Signal this to the caller // and prepare to decode next message. next_step (_tmpbuf, 1, &ws_decoder_t::opcode_ready); return 1; }
sophomore_public/libzmq
src/ws_decoder.cpp
C++
gpl-3.0
7,624
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_WS_DECODER_HPP_INCLUDED__ #define __ZMQ_WS_DECODER_HPP_INCLUDED__ #include "decoder.hpp" #include "decoder_allocators.hpp" #include "ws_protocol.hpp" namespace zmq { // Decoder for Web socket framing protocol. Converts data stream into messages. // The class has to inherit from shared_message_memory_allocator because // the base class calls allocate in its constructor. class ws_decoder_t ZMQ_FINAL : public decoder_base_t<ws_decoder_t, shared_message_memory_allocator> { public: ws_decoder_t (size_t bufsize_, int64_t maxmsgsize_, bool zero_copy_, bool must_mask_); ~ws_decoder_t (); // i_decoder interface. msg_t *msg () { return &_in_progress; } private: int opcode_ready (unsigned char const *); int size_first_byte_ready (unsigned char const *); int short_size_ready (unsigned char const *); int long_size_ready (unsigned char const *); int mask_ready (unsigned char const *); int flags_ready (unsigned char const *); int message_ready (unsigned char const *); int size_ready (unsigned char const *); unsigned char _tmpbuf[8]; unsigned char _msg_flags; msg_t _in_progress; const bool _zero_copy; const int64_t _max_msg_size; const bool _must_mask; uint64_t _size; zmq::ws_protocol_t::opcode_t _opcode; unsigned char _mask[4]; ZMQ_NON_COPYABLE_NOR_MOVABLE (ws_decoder_t) }; } #endif
sophomore_public/libzmq
src/ws_decoder.hpp
C++
gpl-3.0
1,506
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "ws_protocol.hpp" #include "ws_encoder.hpp" #include "msg.hpp" #include "likely.hpp" #include "wire.hpp" #include "random.hpp" #include <limits.h> zmq::ws_encoder_t::ws_encoder_t (size_t bufsize_, bool must_mask_) : encoder_base_t<ws_encoder_t> (bufsize_), _must_mask (must_mask_) { // Write 0 bytes to the batch and go to message_ready state. next_step (NULL, 0, &ws_encoder_t::message_ready, true); _masked_msg.init (); } zmq::ws_encoder_t::~ws_encoder_t () { _masked_msg.close (); } void zmq::ws_encoder_t::message_ready () { int offset = 0; _is_binary = false; if (in_progress ()->is_ping ()) _tmp_buf[offset++] = 0x80 | zmq::ws_protocol_t::opcode_ping; else if (in_progress ()->is_pong ()) _tmp_buf[offset++] = 0x80 | zmq::ws_protocol_t::opcode_pong; else if (in_progress ()->is_close_cmd ()) _tmp_buf[offset++] = 0x80 | zmq::ws_protocol_t::opcode_close; else { _tmp_buf[offset++] = 0x82; // Final | binary _is_binary = true; } _tmp_buf[offset] = _must_mask ? 0x80 : 0x00; size_t size = in_progress ()->size (); if (_is_binary) size++; // TODO: create an opcode for subscribe/cancel if (in_progress ()->is_subscribe () || in_progress ()->is_cancel ()) size++; if (size <= 125) _tmp_buf[offset++] |= static_cast<unsigned char> (size & 127); else if (size <= 0xFFFF) { _tmp_buf[offset++] |= 126; _tmp_buf[offset++] = static_cast<unsigned char> ((size >> 8) & 0xFF); _tmp_buf[offset++] = static_cast<unsigned char> (size & 0xFF); } else { _tmp_buf[offset++] |= 127; put_uint64 (_tmp_buf + offset, size); offset += 8; } if (_must_mask) { const uint32_t random = generate_random (); put_uint32 (_tmp_buf + offset, random); put_uint32 (_mask, random); offset += 4; } int mask_index = 0; if (_is_binary) { // Encode flags. unsigned char protocol_flags = 0; if (in_progress ()->flags () & msg_t::more) protocol_flags |= ws_protocol_t::more_flag; if (in_progress ()->flags () & msg_t::command) protocol_flags |= ws_protocol_t::command_flag; _tmp_buf[offset++] = _must_mask ? protocol_flags ^ _mask[mask_index++] : protocol_flags; } // Encode the subscribe/cancel byte. // TODO: remove once there is an opcode for subscribe/cancel if (in_progress ()->is_subscribe ()) _tmp_buf[offset++] = _must_mask ? 1 ^ _mask[mask_index++] : 1; else if (in_progress ()->is_cancel ()) _tmp_buf[offset++] = _must_mask ? 0 ^ _mask[mask_index++] : 0; next_step (_tmp_buf, offset, &ws_encoder_t::size_ready, false); } void zmq::ws_encoder_t::size_ready () { if (_must_mask) { assert (in_progress () != &_masked_msg); const size_t size = in_progress ()->size (); unsigned char *src = static_cast<unsigned char *> (in_progress ()->data ()); unsigned char *dest = src; // If msg is shared or data is constant we cannot mask in-place, allocate a new msg for it if (in_progress ()->flags () & msg_t::shared || in_progress ()->is_cmsg ()) { _masked_msg.close (); _masked_msg.init_size (size); dest = static_cast<unsigned char *> (_masked_msg.data ()); } int mask_index = 0; if (_is_binary) ++mask_index; // TODO: remove once there is an opcode for subscribe/cancel if (in_progress ()->is_subscribe () || in_progress ()->is_cancel ()) ++mask_index; for (size_t i = 0; i < size; ++i, mask_index++) dest[i] = src[i] ^ _mask[mask_index % 4]; next_step (dest, size, &ws_encoder_t::message_ready, true); } else { next_step (in_progress ()->data (), in_progress ()->size (), &ws_encoder_t::message_ready, true); } }
sophomore_public/libzmq
src/ws_encoder.cpp
C++
gpl-3.0
4,076
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_WS_ENCODER_HPP_INCLUDED__ #define __ZMQ_WS_ENCODER_HPP_INCLUDED__ #include "encoder.hpp" namespace zmq { // Encoder for web socket framing protocol. Converts messages into data stream. class ws_encoder_t ZMQ_FINAL : public encoder_base_t<ws_encoder_t> { public: ws_encoder_t (size_t bufsize_, bool must_mask_); ~ws_encoder_t (); private: void size_ready (); void message_ready (); unsigned char _tmp_buf[16]; bool _must_mask; unsigned char _mask[4]; msg_t _masked_msg; bool _is_binary; ZMQ_NON_COPYABLE_NOR_MOVABLE (ws_encoder_t) }; } #endif
sophomore_public/libzmq
src/ws_encoder.hpp
C++
gpl-3.0
648
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #ifdef ZMQ_USE_NSS #include <secoid.h> #include <sechash.h> #define SHA_DIGEST_LENGTH 20 #elif defined ZMQ_USE_BUILTIN_SHA1 #include "../external/sha1/sha1.h" #elif defined ZMQ_USE_GNUTLS #define SHA_DIGEST_LENGTH 20 #include <gnutls/gnutls.h> #include <gnutls/crypto.h> #endif #if !defined ZMQ_HAVE_WINDOWS #include <sys/types.h> #include <unistd.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #ifdef ZMQ_HAVE_VXWORKS #include <sockLib.h> #endif #endif #include <cstring> #include "compat.hpp" #include "tcp.hpp" #include "ws_engine.hpp" #include "session_base.hpp" #include "err.hpp" #include "ip.hpp" #include "random.hpp" #include "ws_decoder.hpp" #include "ws_encoder.hpp" #include "null_mechanism.hpp" #include "plain_server.hpp" #include "plain_client.hpp" #ifdef ZMQ_HAVE_CURVE #include "curve_client.hpp" #include "curve_server.hpp" #endif // OSX uses a different name for this socket option #ifndef IPV6_ADD_MEMBERSHIP #define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP #endif #ifdef __APPLE__ #include <TargetConditionals.h> #endif static int encode_base64 (const unsigned char *in_, int in_len_, char *out_, int out_len_); static void compute_accept_key (char *key_, unsigned char hash_[SHA_DIGEST_LENGTH]); zmq::ws_engine_t::ws_engine_t (fd_t fd_, const options_t &options_, const endpoint_uri_pair_t &endpoint_uri_pair_, const ws_address_t &address_, bool client_) : stream_engine_base_t (fd_, options_, endpoint_uri_pair_, true), _client (client_), _address (address_), _client_handshake_state (client_handshake_initial), _server_handshake_state (handshake_initial), _header_name_position (0), _header_value_position (0), _header_upgrade_websocket (false), _header_connection_upgrade (false), _heartbeat_timeout (0) { memset (_websocket_key, 0, MAX_HEADER_VALUE_LENGTH + 1); memset (_websocket_accept, 0, MAX_HEADER_VALUE_LENGTH + 1); memset (_websocket_protocol, 0, 256); _next_msg = &ws_engine_t::next_handshake_command; _process_msg = &ws_engine_t::process_handshake_command; _close_msg.init (); if (_options.heartbeat_interval > 0) { _heartbeat_timeout = _options.heartbeat_timeout; if (_heartbeat_timeout == -1) _heartbeat_timeout = _options.heartbeat_interval; } } zmq::ws_engine_t::~ws_engine_t () { _close_msg.close (); } void zmq::ws_engine_t::start_ws_handshake () { if (_client) { const char *protocol; if (_options.mechanism == ZMQ_NULL) protocol = "ZWS2.0/NULL,ZWS2.0"; else if (_options.mechanism == ZMQ_PLAIN) protocol = "ZWS2.0/PLAIN"; #ifdef ZMQ_HAVE_CURVE else if (_options.mechanism == ZMQ_CURVE) protocol = "ZWS2.0/CURVE"; #endif else { // Avoid uninitialized variable error breaking UWP build protocol = ""; assert (false); } unsigned char nonce[16]; int *p = reinterpret_cast<int *> (nonce); // The nonce doesn't have to be secure one, it is just use to avoid proxy cache *p = zmq::generate_random (); *(p + 1) = zmq::generate_random (); *(p + 2) = zmq::generate_random (); *(p + 3) = zmq::generate_random (); int size = encode_base64 (nonce, 16, _websocket_key, MAX_HEADER_VALUE_LENGTH); assert (size > 0); size = snprintf ( reinterpret_cast<char *> (_write_buffer), WS_BUFFER_SIZE, "GET %s HTTP/1.1\r\n" "Host: %s\r\n" "Upgrade: websocket\r\n" "Connection: Upgrade\r\n" "Sec-WebSocket-Key: %s\r\n" "Sec-WebSocket-Protocol: %s\r\n" "Sec-WebSocket-Version: 13\r\n\r\n", _address.path (), _address.host (), _websocket_key, protocol); assert (size > 0 && size < WS_BUFFER_SIZE); _outpos = _write_buffer; _outsize = size; set_pollout (); } } void zmq::ws_engine_t::plug_internal () { start_ws_handshake (); set_pollin (); in_event (); } int zmq::ws_engine_t::routing_id_msg (msg_t *msg_) { const int rc = msg_->init_size (_options.routing_id_size); errno_assert (rc == 0); if (_options.routing_id_size > 0) memcpy (msg_->data (), _options.routing_id, _options.routing_id_size); _next_msg = &ws_engine_t::pull_msg_from_session; return 0; } int zmq::ws_engine_t::process_routing_id_msg (msg_t *msg_) { if (_options.recv_routing_id) { msg_->set_flags (msg_t::routing_id); const int rc = session ()->push_msg (msg_); errno_assert (rc == 0); } else { int rc = msg_->close (); errno_assert (rc == 0); rc = msg_->init (); errno_assert (rc == 0); } _process_msg = &ws_engine_t::push_msg_to_session; return 0; } bool zmq::ws_engine_t::select_protocol (const char *protocol_) { if (_options.mechanism == ZMQ_NULL && (strcmp ("ZWS2.0", protocol_) == 0)) { _next_msg = static_cast<int (stream_engine_base_t::*) (msg_t *)> ( &ws_engine_t::routing_id_msg); _process_msg = static_cast<int (stream_engine_base_t::*) (msg_t *)> ( &ws_engine_t::process_routing_id_msg); // No mechanism in place, enabling heartbeat if (_options.heartbeat_interval > 0 && !_has_heartbeat_timer) { add_timer (_options.heartbeat_interval, heartbeat_ivl_timer_id); _has_heartbeat_timer = true; } return true; } if (_options.mechanism == ZMQ_NULL && strcmp ("ZWS2.0/NULL", protocol_) == 0) { _mechanism = new (std::nothrow) null_mechanism_t (session (), _peer_address, _options); alloc_assert (_mechanism); return true; } else if (_options.mechanism == ZMQ_PLAIN && strcmp ("ZWS2.0/PLAIN", protocol_) == 0) { if (_options.as_server) _mechanism = new (std::nothrow) plain_server_t (session (), _peer_address, _options); else _mechanism = new (std::nothrow) plain_client_t (session (), _options); alloc_assert (_mechanism); return true; } #ifdef ZMQ_HAVE_CURVE else if (_options.mechanism == ZMQ_CURVE && strcmp ("ZWS2.0/CURVE", protocol_) == 0) { if (_options.as_server) _mechanism = new (std::nothrow) curve_server_t (session (), _peer_address, _options, false); else _mechanism = new (std::nothrow) curve_client_t (session (), _options, false); alloc_assert (_mechanism); return true; } #endif return false; } bool zmq::ws_engine_t::handshake () { bool complete; if (_client) complete = client_handshake (); else complete = server_handshake (); if (complete) { _encoder = new (std::nothrow) ws_encoder_t (_options.out_batch_size, _client); alloc_assert (_encoder); _decoder = new (std::nothrow) ws_decoder_t (_options.in_batch_size, _options.maxmsgsize, _options.zero_copy, !_client); alloc_assert (_decoder); socket ()->event_handshake_succeeded (_endpoint_uri_pair, 0); set_pollout (); } return complete; } bool zmq::ws_engine_t::server_handshake () { const int nbytes = read (_read_buffer, WS_BUFFER_SIZE); if (nbytes == -1) { if (errno != EAGAIN) error (zmq::i_engine::connection_error); return false; } _inpos = _read_buffer; _insize = nbytes; while (_insize > 0) { const char c = static_cast<char> (*_inpos); switch (_server_handshake_state) { case handshake_initial: if (c == 'G') _server_handshake_state = request_line_G; else _server_handshake_state = handshake_error; break; case request_line_G: if (c == 'E') _server_handshake_state = request_line_GE; else _server_handshake_state = handshake_error; break; case request_line_GE: if (c == 'T') _server_handshake_state = request_line_GET; else _server_handshake_state = handshake_error; break; case request_line_GET: if (c == ' ') _server_handshake_state = request_line_GET_space; else _server_handshake_state = handshake_error; break; case request_line_GET_space: if (c == '\r' || c == '\n') _server_handshake_state = handshake_error; // TODO: instead of check what is not allowed check what is allowed if (c != ' ') _server_handshake_state = request_line_resource; else _server_handshake_state = request_line_GET_space; break; case request_line_resource: if (c == '\r' || c == '\n') _server_handshake_state = handshake_error; else if (c == ' ') _server_handshake_state = request_line_resource_space; else _server_handshake_state = request_line_resource; break; case request_line_resource_space: if (c == 'H') _server_handshake_state = request_line_H; else _server_handshake_state = handshake_error; break; case request_line_H: if (c == 'T') _server_handshake_state = request_line_HT; else _server_handshake_state = handshake_error; break; case request_line_HT: if (c == 'T') _server_handshake_state = request_line_HTT; else _server_handshake_state = handshake_error; break; case request_line_HTT: if (c == 'P') _server_handshake_state = request_line_HTTP; else _server_handshake_state = handshake_error; break; case request_line_HTTP: if (c == '/') _server_handshake_state = request_line_HTTP_slash; else _server_handshake_state = handshake_error; break; case request_line_HTTP_slash: if (c == '1') _server_handshake_state = request_line_HTTP_slash_1; else _server_handshake_state = handshake_error; break; case request_line_HTTP_slash_1: if (c == '.') _server_handshake_state = request_line_HTTP_slash_1_dot; else _server_handshake_state = handshake_error; break; case request_line_HTTP_slash_1_dot: if (c == '1') _server_handshake_state = request_line_HTTP_slash_1_dot_1; else _server_handshake_state = handshake_error; break; case request_line_HTTP_slash_1_dot_1: if (c == '\r') _server_handshake_state = request_line_cr; else _server_handshake_state = handshake_error; break; case request_line_cr: if (c == '\n') _server_handshake_state = header_field_begin_name; else _server_handshake_state = handshake_error; break; case header_field_begin_name: switch (c) { case '\r': _server_handshake_state = handshake_end_line_cr; break; case '\n': _server_handshake_state = handshake_error; break; default: _header_name[0] = c; _header_name_position = 1; _server_handshake_state = header_field_name; break; } break; case header_field_name: if (c == '\r' || c == '\n') _server_handshake_state = handshake_error; else if (c == ':') { _header_name[_header_name_position] = '\0'; _server_handshake_state = header_field_colon; } else if (_header_name_position + 1 > MAX_HEADER_NAME_LENGTH) _server_handshake_state = handshake_error; else { _header_name[_header_name_position] = c; _header_name_position++; _server_handshake_state = header_field_name; } break; case header_field_colon: case header_field_value_trailing_space: if (c == '\n') _server_handshake_state = handshake_error; else if (c == '\r') _server_handshake_state = header_field_cr; else if (c == ' ') _server_handshake_state = header_field_value_trailing_space; else { _header_value[0] = c; _header_value_position = 1; _server_handshake_state = header_field_value; } break; case header_field_value: if (c == '\n') _server_handshake_state = handshake_error; else if (c == '\r') { _header_value[_header_value_position] = '\0'; if (strcasecmp ("upgrade", _header_name) == 0) _header_upgrade_websocket = strcasecmp ("websocket", _header_value) == 0; else if (strcasecmp ("connection", _header_name) == 0) { char *rest = NULL; char *element = strtok_r (_header_value, ",", &rest); while (element != NULL) { while (*element == ' ') element++; if (strcasecmp ("upgrade", element) == 0) { _header_connection_upgrade = true; break; } element = strtok_r (NULL, ",", &rest); } } else if (strcasecmp ("Sec-WebSocket-Key", _header_name) == 0) strcpy_s (_websocket_key, _header_value); else if (strcasecmp ("Sec-WebSocket-Protocol", _header_name) == 0) { // Currently only the ZWS2.0 is supported // Sec-WebSocket-Protocol can appear multiple times or be a comma separated list // if _websocket_protocol is already set we skip the check if (_websocket_protocol[0] == '\0') { char *rest = NULL; char *p = strtok_r (_header_value, ",", &rest); while (p != NULL) { if (*p == ' ') p++; if (select_protocol (p)) { strcpy_s (_websocket_protocol, p); break; } p = strtok_r (NULL, ",", &rest); } } } _server_handshake_state = header_field_cr; } else if (_header_value_position + 1 > MAX_HEADER_VALUE_LENGTH) _server_handshake_state = handshake_error; else { _header_value[_header_value_position] = c; _header_value_position++; _server_handshake_state = header_field_value; } break; case header_field_cr: if (c == '\n') _server_handshake_state = header_field_begin_name; else _server_handshake_state = handshake_error; break; case handshake_end_line_cr: if (c == '\n') { if (_header_connection_upgrade && _header_upgrade_websocket && _websocket_protocol[0] != '\0' && _websocket_key[0] != '\0') { _server_handshake_state = handshake_complete; unsigned char hash[SHA_DIGEST_LENGTH]; compute_accept_key (_websocket_key, hash); const int accept_key_len = encode_base64 ( hash, SHA_DIGEST_LENGTH, _websocket_accept, MAX_HEADER_VALUE_LENGTH); assert (accept_key_len > 0); _websocket_accept[accept_key_len] = '\0'; const int written = snprintf (reinterpret_cast<char *> (_write_buffer), WS_BUFFER_SIZE, "HTTP/1.1 101 Switching Protocols\r\n" "Upgrade: websocket\r\n" "Connection: Upgrade\r\n" "Sec-WebSocket-Accept: %s\r\n" "Sec-WebSocket-Protocol: %s\r\n" "\r\n", _websocket_accept, _websocket_protocol); assert (written >= 0 && written < WS_BUFFER_SIZE); _outpos = _write_buffer; _outsize = written; _inpos++; _insize--; return true; } _server_handshake_state = handshake_error; } else _server_handshake_state = handshake_error; break; default: assert (false); } _inpos++; _insize--; if (_server_handshake_state == handshake_error) { // TODO: send bad request socket ()->event_handshake_failed_protocol ( _endpoint_uri_pair, ZMQ_PROTOCOL_ERROR_WS_UNSPECIFIED); error (zmq::i_engine::protocol_error); return false; } } return false; } bool zmq::ws_engine_t::client_handshake () { const int nbytes = read (_read_buffer, WS_BUFFER_SIZE); if (nbytes == -1) { if (errno != EAGAIN) error (zmq::i_engine::connection_error); return false; } _inpos = _read_buffer; _insize = nbytes; while (_insize > 0) { const char c = static_cast<char> (*_inpos); switch (_client_handshake_state) { case client_handshake_initial: if (c == 'H') _client_handshake_state = response_line_H; else _client_handshake_state = client_handshake_error; break; case response_line_H: if (c == 'T') _client_handshake_state = response_line_HT; else _client_handshake_state = client_handshake_error; break; case response_line_HT: if (c == 'T') _client_handshake_state = response_line_HTT; else _client_handshake_state = client_handshake_error; break; case response_line_HTT: if (c == 'P') _client_handshake_state = response_line_HTTP; else _client_handshake_state = client_handshake_error; break; case response_line_HTTP: if (c == '/') _client_handshake_state = response_line_HTTP_slash; else _client_handshake_state = client_handshake_error; break; case response_line_HTTP_slash: if (c == '1') _client_handshake_state = response_line_HTTP_slash_1; else _client_handshake_state = client_handshake_error; break; case response_line_HTTP_slash_1: if (c == '.') _client_handshake_state = response_line_HTTP_slash_1_dot; else _client_handshake_state = client_handshake_error; break; case response_line_HTTP_slash_1_dot: if (c == '1') _client_handshake_state = response_line_HTTP_slash_1_dot_1; else _client_handshake_state = client_handshake_error; break; case response_line_HTTP_slash_1_dot_1: if (c == ' ') _client_handshake_state = response_line_HTTP_slash_1_dot_1_space; else _client_handshake_state = client_handshake_error; break; case response_line_HTTP_slash_1_dot_1_space: if (c == ' ') _client_handshake_state = response_line_HTTP_slash_1_dot_1_space; else if (c == '1') _client_handshake_state = response_line_status_1; else _client_handshake_state = client_handshake_error; break; case response_line_status_1: if (c == '0') _client_handshake_state = response_line_status_10; else _client_handshake_state = client_handshake_error; break; case response_line_status_10: if (c == '1') _client_handshake_state = response_line_status_101; else _client_handshake_state = client_handshake_error; break; case response_line_status_101: if (c == ' ') _client_handshake_state = response_line_status_101_space; else _client_handshake_state = client_handshake_error; break; case response_line_status_101_space: if (c == ' ') _client_handshake_state = response_line_status_101_space; else if (c == 'S') _client_handshake_state = response_line_s; else _client_handshake_state = client_handshake_error; break; case response_line_s: if (c == 'w') _client_handshake_state = response_line_sw; else _client_handshake_state = client_handshake_error; break; case response_line_sw: if (c == 'i') _client_handshake_state = response_line_swi; else _client_handshake_state = client_handshake_error; break; case response_line_swi: if (c == 't') _client_handshake_state = response_line_swit; else _client_handshake_state = client_handshake_error; break; case response_line_swit: if (c == 'c') _client_handshake_state = response_line_switc; else _client_handshake_state = client_handshake_error; break; case response_line_switc: if (c == 'h') _client_handshake_state = response_line_switch; else _client_handshake_state = client_handshake_error; break; case response_line_switch: if (c == 'i') _client_handshake_state = response_line_switchi; else _client_handshake_state = client_handshake_error; break; case response_line_switchi: if (c == 'n') _client_handshake_state = response_line_switchin; else _client_handshake_state = client_handshake_error; break; case response_line_switchin: if (c == 'g') _client_handshake_state = response_line_switching; else _client_handshake_state = client_handshake_error; break; case response_line_switching: if (c == ' ') _client_handshake_state = response_line_switching_space; else _client_handshake_state = client_handshake_error; break; case response_line_switching_space: if (c == 'P') _client_handshake_state = response_line_p; else _client_handshake_state = client_handshake_error; break; case response_line_p: if (c == 'r') _client_handshake_state = response_line_pr; else _client_handshake_state = client_handshake_error; break; case response_line_pr: if (c == 'o') _client_handshake_state = response_line_pro; else _client_handshake_state = client_handshake_error; break; case response_line_pro: if (c == 't') _client_handshake_state = response_line_prot; else _client_handshake_state = client_handshake_error; break; case response_line_prot: if (c == 'o') _client_handshake_state = response_line_proto; else _client_handshake_state = client_handshake_error; break; case response_line_proto: if (c == 'c') _client_handshake_state = response_line_protoc; else _client_handshake_state = client_handshake_error; break; case response_line_protoc: if (c == 'o') _client_handshake_state = response_line_protoco; else _client_handshake_state = client_handshake_error; break; case response_line_protoco: if (c == 'l') _client_handshake_state = response_line_protocol; else _client_handshake_state = client_handshake_error; break; case response_line_protocol: if (c == 's') _client_handshake_state = response_line_protocols; else _client_handshake_state = client_handshake_error; break; case response_line_protocols: if (c == '\r') _client_handshake_state = response_line_cr; else _client_handshake_state = client_handshake_error; break; case response_line_cr: if (c == '\n') _client_handshake_state = client_header_field_begin_name; else _client_handshake_state = client_handshake_error; break; case client_header_field_begin_name: switch (c) { case '\r': _client_handshake_state = client_handshake_end_line_cr; break; case '\n': _client_handshake_state = client_handshake_error; break; default: _header_name[0] = c; _header_name_position = 1; _client_handshake_state = client_header_field_name; break; } break; case client_header_field_name: if (c == '\r' || c == '\n') _client_handshake_state = client_handshake_error; else if (c == ':') { _header_name[_header_name_position] = '\0'; _client_handshake_state = client_header_field_colon; } else if (_header_name_position + 1 > MAX_HEADER_NAME_LENGTH) _client_handshake_state = client_handshake_error; else { _header_name[_header_name_position] = c; _header_name_position++; _client_handshake_state = client_header_field_name; } break; case client_header_field_colon: case client_header_field_value_trailing_space: if (c == '\n') _client_handshake_state = client_handshake_error; else if (c == '\r') _client_handshake_state = client_header_field_cr; else if (c == ' ') _client_handshake_state = client_header_field_value_trailing_space; else { _header_value[0] = c; _header_value_position = 1; _client_handshake_state = client_header_field_value; } break; case client_header_field_value: if (c == '\n') _client_handshake_state = client_handshake_error; else if (c == '\r') { _header_value[_header_value_position] = '\0'; if (strcasecmp ("upgrade", _header_name) == 0) _header_upgrade_websocket = strcasecmp ("websocket", _header_value) == 0; else if (strcasecmp ("connection", _header_name) == 0) _header_connection_upgrade = strcasecmp ("upgrade", _header_value) == 0; else if (strcasecmp ("Sec-WebSocket-Accept", _header_name) == 0) strcpy_s (_websocket_accept, _header_value); else if (strcasecmp ("Sec-WebSocket-Protocol", _header_name) == 0) { if (_mechanism) { _client_handshake_state = client_handshake_error; break; } if (select_protocol (_header_value)) strcpy_s (_websocket_protocol, _header_value); } _client_handshake_state = client_header_field_cr; } else if (_header_value_position + 1 > MAX_HEADER_VALUE_LENGTH) _client_handshake_state = client_handshake_error; else { _header_value[_header_value_position] = c; _header_value_position++; _client_handshake_state = client_header_field_value; } break; case client_header_field_cr: if (c == '\n') _client_handshake_state = client_header_field_begin_name; else _client_handshake_state = client_handshake_error; break; case client_handshake_end_line_cr: if (c == '\n') { if (_header_connection_upgrade && _header_upgrade_websocket && _websocket_protocol[0] != '\0' && _websocket_accept[0] != '\0') { _client_handshake_state = client_handshake_complete; // TODO: validate accept key _inpos++; _insize--; return true; } _client_handshake_state = client_handshake_error; } else _client_handshake_state = client_handshake_error; break; default: assert (false); } _inpos++; _insize--; if (_client_handshake_state == client_handshake_error) { socket ()->event_handshake_failed_protocol ( _endpoint_uri_pair, ZMQ_PROTOCOL_ERROR_WS_UNSPECIFIED); error (zmq::i_engine::protocol_error); return false; } } return false; } int zmq::ws_engine_t::decode_and_push (msg_t *msg_) { zmq_assert (_mechanism != NULL); // with WS engine, ping and pong commands are control messages and should not go through any mechanism if (msg_->is_ping () || msg_->is_pong () || msg_->is_close_cmd ()) { if (process_command_message (msg_) == -1) return -1; } else if (_mechanism->decode (msg_) == -1) return -1; if (_has_timeout_timer) { _has_timeout_timer = false; cancel_timer (heartbeat_timeout_timer_id); } if (msg_->flags () & msg_t::command && !msg_->is_ping () && !msg_->is_pong () && !msg_->is_close_cmd ()) process_command_message (msg_); if (_metadata) msg_->set_metadata (_metadata); if (session ()->push_msg (msg_) == -1) { if (errno == EAGAIN) _process_msg = &ws_engine_t::push_one_then_decode_and_push; return -1; } return 0; } int zmq::ws_engine_t::produce_close_message (msg_t *msg_) { int rc = msg_->move (_close_msg); errno_assert (rc == 0); _next_msg = static_cast<int (stream_engine_base_t::*) (msg_t *)> ( &ws_engine_t::produce_no_msg_after_close); return rc; } int zmq::ws_engine_t::produce_no_msg_after_close (msg_t *msg_) { LIBZMQ_UNUSED (msg_); _next_msg = static_cast<int (stream_engine_base_t::*) (msg_t *)> ( &ws_engine_t::close_connection_after_close); errno = EAGAIN; return -1; } int zmq::ws_engine_t::close_connection_after_close (msg_t *msg_) { LIBZMQ_UNUSED (msg_); error (connection_error); errno = ECONNRESET; return -1; } int zmq::ws_engine_t::produce_ping_message (msg_t *msg_) { int rc = msg_->init (); errno_assert (rc == 0); msg_->set_flags (msg_t::command | msg_t::ping); _next_msg = &ws_engine_t::pull_and_encode; if (!_has_timeout_timer && _heartbeat_timeout > 0) { add_timer (_heartbeat_timeout, heartbeat_timeout_timer_id); _has_timeout_timer = true; } return rc; } int zmq::ws_engine_t::produce_pong_message (msg_t *msg_) { int rc = msg_->init (); errno_assert (rc == 0); msg_->set_flags (msg_t::command | msg_t::pong); _next_msg = &ws_engine_t::pull_and_encode; return rc; } int zmq::ws_engine_t::process_command_message (msg_t *msg_) { if (msg_->is_ping ()) { _next_msg = static_cast<int (stream_engine_base_t::*) (msg_t *)> ( &ws_engine_t::produce_pong_message); out_event (); } else if (msg_->is_close_cmd ()) { int rc = _close_msg.copy (*msg_); errno_assert (rc == 0); _next_msg = static_cast<int (stream_engine_base_t::*) (msg_t *)> ( &ws_engine_t::produce_close_message); out_event (); } return 0; } static int encode_base64 (const unsigned char *in_, int in_len_, char *out_, int out_len_) { static const unsigned char base64enc_tab[65] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; int io = 0; uint32_t v = 0; int rem = 0; for (int ii = 0; ii < in_len_; ii++) { unsigned char ch; ch = in_[ii]; v = (v << 8) | ch; rem += 8; while (rem >= 6) { rem -= 6; if (io >= out_len_) return -1; /* truncation is failure */ out_[io++] = base64enc_tab[(v >> rem) & 63]; } } if (rem) { v <<= (6 - rem); if (io >= out_len_) return -1; /* truncation is failure */ out_[io++] = base64enc_tab[v & 63]; } while (io & 3) { if (io >= out_len_) return -1; /* truncation is failure */ out_[io++] = '='; } if (io >= out_len_) return -1; /* no room for null terminator */ out_[io] = 0; return io; } static void compute_accept_key (char *key_, unsigned char *hash_) { const char *magic_string = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; #ifdef ZMQ_USE_NSS unsigned int len; HASH_HashType type = HASH_GetHashTypeByOidTag (SEC_OID_SHA1); HASHContext *ctx = HASH_Create (type); assert (ctx); HASH_Begin (ctx); HASH_Update (ctx, (unsigned char *) key_, (unsigned int) strlen (key_)); HASH_Update (ctx, (unsigned char *) magic_string, (unsigned int) strlen (magic_string)); HASH_End (ctx, hash_, &len, SHA_DIGEST_LENGTH); HASH_Destroy (ctx); #elif defined ZMQ_USE_BUILTIN_SHA1 sha1_ctxt ctx; SHA1_Init (&ctx); SHA1_Update (&ctx, (unsigned char *) key_, strlen (key_)); SHA1_Update (&ctx, (unsigned char *) magic_string, strlen (magic_string)); SHA1_Final (hash_, &ctx); #elif defined ZMQ_USE_GNUTLS gnutls_hash_hd_t hd; gnutls_hash_init (&hd, GNUTLS_DIG_SHA1); gnutls_hash (hd, key_, strlen (key_)); gnutls_hash (hd, magic_string, strlen (magic_string)); gnutls_hash_deinit (hd, hash_); #else #error "No sha1 implementation set" #endif }
sophomore_public/libzmq
src/ws_engine.cpp
C++
gpl-3.0
38,824
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_WS_ENGINE_HPP_INCLUDED__ #define __ZMQ_WS_ENGINE_HPP_INCLUDED__ #include "io_object.hpp" #include "address.hpp" #include "msg.hpp" #include "stream_engine_base.hpp" #include "ws_address.hpp" #define WS_BUFFER_SIZE 8192 #define MAX_HEADER_NAME_LENGTH 1024 #define MAX_HEADER_VALUE_LENGTH 2048 namespace zmq { class io_thread_t; class session_base_t; typedef enum { handshake_initial = 0, request_line_G, request_line_GE, request_line_GET, request_line_GET_space, request_line_resource, request_line_resource_space, request_line_H, request_line_HT, request_line_HTT, request_line_HTTP, request_line_HTTP_slash, request_line_HTTP_slash_1, request_line_HTTP_slash_1_dot, request_line_HTTP_slash_1_dot_1, request_line_cr, header_field_begin_name, header_field_name, header_field_colon, header_field_value_trailing_space, header_field_value, header_field_cr, handshake_end_line_cr, handshake_complete, handshake_error = -1 } ws_server_handshake_state_t; typedef enum { client_handshake_initial = 0, response_line_H, response_line_HT, response_line_HTT, response_line_HTTP, response_line_HTTP_slash, response_line_HTTP_slash_1, response_line_HTTP_slash_1_dot, response_line_HTTP_slash_1_dot_1, response_line_HTTP_slash_1_dot_1_space, response_line_status_1, response_line_status_10, response_line_status_101, response_line_status_101_space, response_line_s, response_line_sw, response_line_swi, response_line_swit, response_line_switc, response_line_switch, response_line_switchi, response_line_switchin, response_line_switching, response_line_switching_space, response_line_p, response_line_pr, response_line_pro, response_line_prot, response_line_proto, response_line_protoc, response_line_protoco, response_line_protocol, response_line_protocols, response_line_cr, client_header_field_begin_name, client_header_field_name, client_header_field_colon, client_header_field_value_trailing_space, client_header_field_value, client_header_field_cr, client_handshake_end_line_cr, client_handshake_complete, client_handshake_error = -1 } ws_client_handshake_state_t; class ws_engine_t : public stream_engine_base_t { public: ws_engine_t (fd_t fd_, const options_t &options_, const endpoint_uri_pair_t &endpoint_uri_pair_, const ws_address_t &address_, bool client_); ~ws_engine_t (); protected: int decode_and_push (msg_t *msg_); int process_command_message (msg_t *msg_); int produce_pong_message (msg_t *msg_); int produce_ping_message (msg_t *msg_); bool handshake (); void plug_internal (); void start_ws_handshake (); private: int routing_id_msg (msg_t *msg_); int process_routing_id_msg (msg_t *msg_); int produce_close_message (msg_t *msg_); int produce_no_msg_after_close (msg_t *msg_); int close_connection_after_close (msg_t *msg_); bool select_protocol (const char *protocol); bool client_handshake (); bool server_handshake (); bool _client; ws_address_t _address; ws_client_handshake_state_t _client_handshake_state; ws_server_handshake_state_t _server_handshake_state; unsigned char _read_buffer[WS_BUFFER_SIZE]; unsigned char _write_buffer[WS_BUFFER_SIZE]; char _header_name[MAX_HEADER_NAME_LENGTH + 1]; int _header_name_position; char _header_value[MAX_HEADER_VALUE_LENGTH + 1]; int _header_value_position; bool _header_upgrade_websocket; bool _header_connection_upgrade; char _websocket_protocol[256]; char _websocket_key[MAX_HEADER_VALUE_LENGTH + 1]; char _websocket_accept[MAX_HEADER_VALUE_LENGTH + 1]; int _heartbeat_timeout; msg_t _close_msg; }; } #endif
sophomore_public/libzmq
src/ws_engine.hpp
C++
gpl-3.0
4,019
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <new> #include <string> #include <stdio.h> #include "ws_listener.hpp" #include "io_thread.hpp" #include "config.hpp" #include "err.hpp" #include "ip.hpp" #include "tcp.hpp" #include "socket_base.hpp" #include "address.hpp" #include "ws_engine.hpp" #include "session_base.hpp" #ifdef ZMQ_HAVE_WSS #include "wss_engine.hpp" #include "wss_address.hpp" #endif #ifndef ZMQ_HAVE_WINDOWS #include <unistd.h> #include <sys/socket.h> #include <arpa/inet.h> #include <netinet/tcp.h> #include <netinet/in.h> #include <netdb.h> #include <fcntl.h> #ifdef ZMQ_HAVE_VXWORKS #include <sockLib.h> #endif #endif #ifdef ZMQ_HAVE_OPENVMS #include <ioctl.h> #endif zmq::ws_listener_t::ws_listener_t (io_thread_t *io_thread_, socket_base_t *socket_, const options_t &options_, bool wss_) : stream_listener_base_t (io_thread_, socket_, options_), _wss (wss_) { #ifdef ZMQ_HAVE_WSS if (_wss) { int rc = gnutls_certificate_allocate_credentials (&_tls_cred); zmq_assert (rc == GNUTLS_E_SUCCESS); gnutls_datum_t cert = {(unsigned char *) options_.wss_cert_pem.c_str (), (unsigned int) options_.wss_cert_pem.length ()}; gnutls_datum_t key = {(unsigned char *) options_.wss_key_pem.c_str (), (unsigned int) options_.wss_key_pem.length ()}; rc = gnutls_certificate_set_x509_key_mem (_tls_cred, &cert, &key, GNUTLS_X509_FMT_PEM); zmq_assert (rc == GNUTLS_E_SUCCESS); } #endif } zmq::ws_listener_t::~ws_listener_t () { #ifdef ZMQ_HAVE_WSS if (_wss) gnutls_certificate_free_credentials (_tls_cred); #endif } void zmq::ws_listener_t::in_event () { const fd_t fd = accept (); // If connection was reset by the peer in the meantime, just ignore it. // TODO: Handle specific errors like ENFILE/EMFILE etc. if (fd == retired_fd) { _socket->event_accept_failed ( make_unconnected_bind_endpoint_pair (_endpoint), zmq_errno ()); return; } int rc = tune_tcp_socket (fd); rc = rc | tune_tcp_maxrt (fd, options.tcp_maxrt); if (rc != 0) { _socket->event_accept_failed ( make_unconnected_bind_endpoint_pair (_endpoint), zmq_errno ()); return; } // Create the engine object for this connection. create_engine (fd); } std::string zmq::ws_listener_t::get_socket_name (zmq::fd_t fd_, socket_end_t socket_end_) const { std::string socket_name; #ifdef ZMQ_HAVE_WSS if (_wss) socket_name = zmq::get_socket_name<wss_address_t> (fd_, socket_end_); else #endif socket_name = zmq::get_socket_name<ws_address_t> (fd_, socket_end_); return socket_name + _address.path (); } int zmq::ws_listener_t::create_socket (const char *addr_) { tcp_address_t address; _s = tcp_open_socket (addr_, options, true, true, &address); if (_s == retired_fd) { return -1; } // TODO why is this only done for the listener? make_socket_noninheritable (_s); // Allow reusing of the address. int flag = 1; int rc; #ifdef ZMQ_HAVE_WINDOWS // TODO this was changed for Windows from SO_REUSEADDRE to // SE_EXCLUSIVEADDRUSE by 0ab65324195ad70205514d465b03d851a6de051c, // so the comment above is no longer correct; also, now the settings are // different between listener and connecter with a src address. // is this intentional? rc = setsockopt (_s, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, reinterpret_cast<const char *> (&flag), sizeof (int)); wsa_assert (rc != SOCKET_ERROR); #elif defined ZMQ_HAVE_VXWORKS rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, (char *) &flag, sizeof (int)); errno_assert (rc == 0); #else rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof (int)); errno_assert (rc == 0); #endif // Bind the socket to the network interface and port. #if defined ZMQ_HAVE_VXWORKS rc = bind (_s, (sockaddr *) _address.addr (), _address.addrlen ()); #else rc = bind (_s, address.addr (), address.addrlen ()); #endif #ifdef ZMQ_HAVE_WINDOWS if (rc == SOCKET_ERROR) { errno = wsa_error_to_errno (WSAGetLastError ()); goto error; } #else if (rc != 0) goto error; #endif // Listen for incoming connections. rc = listen (_s, options.backlog); #ifdef ZMQ_HAVE_WINDOWS if (rc == SOCKET_ERROR) { errno = wsa_error_to_errno (WSAGetLastError ()); goto error; } #else if (rc != 0) goto error; #endif return 0; error: const int err = errno; close (); errno = err; return -1; } int zmq::ws_listener_t::set_local_address (const char *addr_) { if (options.use_fd != -1) { // in this case, the addr_ passed is not used and ignored, since the // socket was already created by the application _s = options.use_fd; } else { const int rc = _address.resolve (addr_, true, options.ipv6); if (rc != 0) return -1; // remove the path, otherwise resolving the port will fail with wildcard const char *delim = strrchr (addr_, '/'); std::string host_address; if (delim) { host_address = std::string (addr_, delim - addr_); } else { host_address = addr_; } if (create_socket (host_address.c_str ()) == -1) return -1; } _endpoint = get_socket_name (_s, socket_end_local); _socket->event_listening (make_unconnected_bind_endpoint_pair (_endpoint), _s); return 0; } zmq::fd_t zmq::ws_listener_t::accept () { // The situation where connection cannot be accepted due to insufficient // resources is considered valid and treated by ignoring the connection. // Accept one connection and deal with different failure modes. zmq_assert (_s != retired_fd); struct sockaddr_storage ss; memset (&ss, 0, sizeof (ss)); #if defined ZMQ_HAVE_HPUX || defined ZMQ_HAVE_VXWORKS int ss_len = sizeof (ss); #else socklen_t ss_len = sizeof (ss); #endif #if defined ZMQ_HAVE_SOCK_CLOEXEC && defined HAVE_ACCEPT4 fd_t sock = ::accept4 (_s, reinterpret_cast<struct sockaddr *> (&ss), &ss_len, SOCK_CLOEXEC); #else const fd_t sock = ::accept (_s, reinterpret_cast<struct sockaddr *> (&ss), &ss_len); #endif if (sock == retired_fd) { #if defined ZMQ_HAVE_WINDOWS const int last_error = WSAGetLastError (); wsa_assert (last_error == WSAEWOULDBLOCK || last_error == WSAECONNRESET || last_error == WSAEMFILE || last_error == WSAENOBUFS); #elif defined ZMQ_HAVE_ANDROID errno_assert (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR || errno == ECONNABORTED || errno == EPROTO || errno == ENOBUFS || errno == ENOMEM || errno == EMFILE || errno == ENFILE || errno == EINVAL); #else errno_assert (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR || errno == ECONNABORTED || errno == EPROTO || errno == ENOBUFS || errno == ENOMEM || errno == EMFILE || errno == ENFILE); #endif return retired_fd; } make_socket_noninheritable (sock); if (zmq::set_nosigpipe (sock)) { #ifdef ZMQ_HAVE_WINDOWS const int rc = closesocket (sock); wsa_assert (rc != SOCKET_ERROR); #else int rc = ::close (sock); errno_assert (rc == 0); #endif return retired_fd; } // Set the IP Type-Of-Service priority for this client socket if (options.tos != 0) set_ip_type_of_service (sock, options.tos); // Set the protocol-defined priority for this client socket if (options.priority != 0) set_socket_priority (sock, options.priority); return sock; } void zmq::ws_listener_t::create_engine (fd_t fd_) { const endpoint_uri_pair_t endpoint_pair ( get_socket_name (fd_, socket_end_local), get_socket_name (fd_, socket_end_remote), endpoint_type_bind); i_engine *engine = NULL; if (_wss) #ifdef ZMQ_HAVE_WSS engine = new (std::nothrow) wss_engine_t (fd_, options, endpoint_pair, _address, false, _tls_cred, std::string ()); #else zmq_assert (false); #endif else engine = new (std::nothrow) ws_engine_t (fd_, options, endpoint_pair, _address, false); alloc_assert (engine); // Choose I/O thread to run connecter in. Given that we are already // running in an I/O thread, there must be at least one available. io_thread_t *io_thread = choose_io_thread (options.affinity); zmq_assert (io_thread); // Create and launch a session object. session_base_t *session = session_base_t::create (io_thread, false, _socket, options, NULL); errno_assert (session); session->inc_seqnum (); launch_child (session); send_attach (session, engine, false); _socket->event_accepted (endpoint_pair, fd_); }
sophomore_public/libzmq
src/ws_listener.cpp
C++
gpl-3.0
9,364
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_WS_LISTENER_HPP_INCLUDED__ #define __ZMQ_WS_LISTENER_HPP_INCLUDED__ #include "fd.hpp" #include "ws_address.hpp" #include "stream_listener_base.hpp" #ifdef ZMQ_USE_GNUTLS #include <gnutls/gnutls.h> #endif namespace zmq { class ws_listener_t ZMQ_FINAL : public stream_listener_base_t { public: ws_listener_t (zmq::io_thread_t *io_thread_, zmq::socket_base_t *socket_, const options_t &options_, bool wss_); ~ws_listener_t (); // Set address to listen on. int set_local_address (const char *addr_); protected: std::string get_socket_name (fd_t fd_, socket_end_t socket_end_) const; void create_engine (fd_t fd); private: // Handlers for I/O events. void in_event (); // Accept the new connection. Returns the file descriptor of the // newly created connection. The function may return retired_fd // if the connection was dropped while waiting in the listen backlog // or was denied because of accept filters. fd_t accept (); int create_socket (const char *addr_); // Address to listen on. ws_address_t _address; bool _wss; #ifdef ZMQ_HAVE_WSS gnutls_certificate_credentials_t _tls_cred; #endif ZMQ_NON_COPYABLE_NOR_MOVABLE (ws_listener_t) }; } #endif
sophomore_public/libzmq
src/ws_listener.hpp
C++
gpl-3.0
1,361
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_WS_PROTOCOL_HPP_INCLUDED__ #define __ZMQ_WS_PROTOCOL_HPP_INCLUDED__ namespace zmq { // Definition of constants for WS transport protocol. class ws_protocol_t { public: // Message flags. enum opcode_t { opcode_continuation = 0, opcode_text = 0x01, opcode_binary = 0x02, opcode_close = 0x08, opcode_ping = 0x09, opcode_pong = 0xA }; enum { more_flag = 1, command_flag = 2 }; }; } #endif
sophomore_public/libzmq
src/ws_protocol.hpp
C++
gpl-3.0
538
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <string> #include <sstream> #include "wss_address.hpp" zmq::wss_address_t::wss_address_t () : ws_address_t () { } zmq::wss_address_t::wss_address_t (const sockaddr *sa_, socklen_t sa_len_) : ws_address_t (sa_, sa_len_) { } int zmq::wss_address_t::to_string (std::string &addr_) const { std::ostringstream os; os << std::string ("wss://") << host () << std::string (":") << _address.port () << path (); addr_ = os.str (); return 0; }
sophomore_public/libzmq
src/wss_address.cpp
C++
gpl-3.0
541
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_WSS_ADDRESS_HPP_INCLUDED__ #define __ZMQ_WSS_ADDRESS_HPP_INCLUDED__ #include "ws_address.hpp" namespace zmq { class wss_address_t : public ws_address_t { public: wss_address_t (); wss_address_t (const sockaddr *sa_, socklen_t sa_len_); // The opposite to resolve() int to_string (std::string &addr_) const; }; } #endif
sophomore_public/libzmq
src/wss_address.hpp
C++
gpl-3.0
395
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "wss_engine.hpp" static int verify_certificate_callback (gnutls_session_t session) { unsigned int status; const char *hostname; // read hostname hostname = (const char *) gnutls_session_get_ptr (session); int rc = gnutls_certificate_verify_peers3 (session, hostname, &status); zmq_assert (rc >= 0); if (status != 0) { // TODO: somehow log the error // Certificate is not trusted return GNUTLS_E_CERTIFICATE_ERROR; } // notify gnutls to continue handshake normally return 0; } zmq::wss_engine_t::wss_engine_t (fd_t fd_, const options_t &options_, const endpoint_uri_pair_t &endpoint_uri_pair_, ws_address_t &address_, bool client_, void *tls_server_cred_, const std::string &hostname_) : ws_engine_t (fd_, options_, endpoint_uri_pair_, address_, client_), _established (false), _tls_client_cred (NULL) { int rc = 0; if (client_) { // TODO: move to session_base, to allow changing the socket options between connect calls rc = gnutls_certificate_allocate_credentials (&_tls_client_cred); zmq_assert (rc == 0); if (options_.wss_trust_system) gnutls_certificate_set_x509_system_trust (_tls_client_cred); if (options_.wss_trust_pem.length () > 0) { gnutls_datum_t trust = { (unsigned char *) options_.wss_trust_pem.c_str (), (unsigned int) options_.wss_trust_pem.length ()}; rc = gnutls_certificate_set_x509_trust_mem ( _tls_client_cred, &trust, GNUTLS_X509_FMT_PEM); zmq_assert (rc >= 0); } gnutls_certificate_set_verify_function (_tls_client_cred, verify_certificate_callback); rc = gnutls_init (&_tls_session, GNUTLS_CLIENT | GNUTLS_NONBLOCK); zmq_assert (rc == GNUTLS_E_SUCCESS); if (!hostname_.empty ()) gnutls_server_name_set (_tls_session, GNUTLS_NAME_DNS, hostname_.c_str (), hostname_.size ()); gnutls_session_set_ptr ( _tls_session, hostname_.empty () ? NULL : const_cast<char *> (hostname_.c_str ())); rc = gnutls_credentials_set (_tls_session, GNUTLS_CRD_CERTIFICATE, _tls_client_cred); zmq_assert (rc == GNUTLS_E_SUCCESS); } else { zmq_assert (tls_server_cred_); rc = gnutls_init (&_tls_session, GNUTLS_SERVER | GNUTLS_NONBLOCK); zmq_assert (rc == GNUTLS_E_SUCCESS); rc = gnutls_credentials_set (_tls_session, GNUTLS_CRD_CERTIFICATE, tls_server_cred_); zmq_assert (rc == GNUTLS_E_SUCCESS); } gnutls_set_default_priority (_tls_session); gnutls_transport_set_int (_tls_session, fd_); } zmq::wss_engine_t::~wss_engine_t () { gnutls_deinit (_tls_session); if (_tls_client_cred) gnutls_certificate_free_credentials (_tls_client_cred); } void zmq::wss_engine_t::plug_internal () { set_pollin (); in_event (); } void zmq::wss_engine_t::out_event () { if (_established) return ws_engine_t::out_event (); do_handshake (); } bool zmq::wss_engine_t::do_handshake () { int rc = gnutls_handshake (_tls_session); reset_pollout (); if (rc == GNUTLS_E_SUCCESS) { start_ws_handshake (); _established = true; return false; } else if (rc == GNUTLS_E_AGAIN) { int direction = gnutls_record_get_direction (_tls_session); if (direction == 1) set_pollout (); return false; } else if (rc == GNUTLS_E_INTERRUPTED || rc == GNUTLS_E_WARNING_ALERT_RECEIVED) { return false; } else { error (zmq::i_engine::connection_error); return false; } return true; } bool zmq::wss_engine_t::handshake () { if (!_established) { if (!do_handshake ()) { return false; } } return ws_engine_t::handshake (); } int zmq::wss_engine_t::read (void *data_, size_t size_) { ssize_t rc = gnutls_record_recv (_tls_session, data_, size_); if (rc == GNUTLS_E_REHANDSHAKE) { gnutls_alert_send (_tls_session, GNUTLS_AL_WARNING, GNUTLS_A_NO_RENEGOTIATION); return 0; } if (rc == GNUTLS_E_INTERRUPTED) { errno = EINTR; return -1; } if (rc == GNUTLS_E_AGAIN) { errno = EAGAIN; return -1; } if (rc == 0) { errno = EPIPE; return -1; } if (rc < 0) { errno = EINVAL; return -1; } // TODO: change return type to ssize_t (signed) return rc; } int zmq::wss_engine_t::write (const void *data_, size_t size_) { ssize_t rc = gnutls_record_send (_tls_session, data_, size_); if (rc == GNUTLS_E_INTERRUPTED || rc == GNUTLS_E_AGAIN) { return 0; } if (rc < 0) { errno = EINVAL; return -1; } // TODO: change return type to ssize_t (signed) return rc; }
sophomore_public/libzmq
src/wss_engine.cpp
C++
gpl-3.0
5,347
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_WSS_ENGINE_HPP_INCLUDED__ #define __ZMQ_WSS_ENGINE_HPP_INCLUDED__ #include <gnutls/gnutls.h> #include "ws_engine.hpp" #define WSS_BUFFER_SIZE 8192 namespace zmq { class wss_engine_t : public ws_engine_t { public: wss_engine_t (fd_t fd_, const options_t &options_, const endpoint_uri_pair_t &endpoint_uri_pair_, ws_address_t &address_, bool client_, void *tls_server_cred_, const std::string &hostname_); ~wss_engine_t (); void out_event (); protected: bool handshake (); void plug_internal (); int read (void *data, size_t size_); int write (const void *data_, size_t size_); private: bool do_handshake (); bool _established; gnutls_certificate_credentials_t _tls_client_cred; gnutls_session_t _tls_session; }; } #endif
sophomore_public/libzmq
src/wss_engine.hpp
C++
gpl-3.0
940
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <string.h> #include "xpub.hpp" #include "pipe.hpp" #include "err.hpp" #include "msg.hpp" #include "macros.hpp" #include "generic_mtrie_impl.hpp" zmq::xpub_t::xpub_t (class ctx_t *parent_, uint32_t tid_, int sid_) : socket_base_t (parent_, tid_, sid_), _verbose_subs (false), _verbose_unsubs (false), _more_send (false), _more_recv (false), _process_subscribe (false), _only_first_subscribe (false), _lossy (true), _manual (false), _send_last_pipe (false), _pending_pipes (), _welcome_msg () { _last_pipe = NULL; options.type = ZMQ_XPUB; _welcome_msg.init (); } zmq::xpub_t::~xpub_t () { _welcome_msg.close (); for (std::deque<metadata_t *>::iterator it = _pending_metadata.begin (), end = _pending_metadata.end (); it != end; ++it) if (*it && (*it)->drop_ref ()) LIBZMQ_DELETE (*it); } void zmq::xpub_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_, bool locally_initiated_) { LIBZMQ_UNUSED (locally_initiated_); zmq_assert (pipe_); _dist.attach (pipe_); // If subscribe_to_all_ is specified, the caller would like to subscribe // to all data on this pipe, implicitly. if (subscribe_to_all_) _subscriptions.add (NULL, 0, pipe_); // if welcome message exists, send a copy of it if (_welcome_msg.size () > 0) { msg_t copy; copy.init (); const int rc = copy.copy (_welcome_msg); errno_assert (rc == 0); const bool ok = pipe_->write (&copy); zmq_assert (ok); pipe_->flush (); } // The pipe is active when attached. Let's read the subscriptions from // it, if any. xread_activated (pipe_); } void zmq::xpub_t::xread_activated (pipe_t *pipe_) { // There are some subscriptions waiting. Let's process them. msg_t msg; while (pipe_->read (&msg)) { metadata_t *metadata = msg.metadata (); unsigned char *msg_data = static_cast<unsigned char *> (msg.data ()), *data = NULL; size_t size = 0; bool subscribe = false; bool is_subscribe_or_cancel = false; bool notify = false; const bool first_part = !_more_recv; _more_recv = (msg.flags () & msg_t::more) != 0; if (first_part || _process_subscribe) { // Apply the subscription to the trie if (msg.is_subscribe () || msg.is_cancel ()) { data = static_cast<unsigned char *> (msg.command_body ()); size = msg.command_body_size (); subscribe = msg.is_subscribe (); is_subscribe_or_cancel = true; } else if (msg.size () > 0 && (*msg_data == 0 || *msg_data == 1)) { data = msg_data + 1; size = msg.size () - 1; subscribe = *msg_data == 1; is_subscribe_or_cancel = true; } } if (first_part) _process_subscribe = !_only_first_subscribe || is_subscribe_or_cancel; if (is_subscribe_or_cancel) { if (_manual) { // Store manual subscription to use on termination if (!subscribe) _manual_subscriptions.rm (data, size, pipe_); else _manual_subscriptions.add (data, size, pipe_); _pending_pipes.push_back (pipe_); } else { if (!subscribe) { const mtrie_t::rm_result rm_result = _subscriptions.rm (data, size, pipe_); // TODO reconsider what to do if rm_result == mtrie_t::not_found notify = rm_result != mtrie_t::values_remain || _verbose_unsubs; } else { const bool first_added = _subscriptions.add (data, size, pipe_); notify = first_added || _verbose_subs; } } // If the request was a new subscription, or the subscription // was removed, or verbose mode or manual mode are enabled, store it // so that it can be passed to the user on next recv call. if (_manual || (options.type == ZMQ_XPUB && notify)) { // ZMTP 3.1 hack: we need to support sub/cancel commands, but // we can't give them back to userspace as it would be an API // breakage since the payload of the message is completely // different. Manually craft an old-style message instead. // Although with other transports it would be possible to simply // reuse the same buffer and prefix a 0/1 byte to the topic, with // inproc the subscribe/cancel command string is not present in // the message, so this optimization is not possible. // The pushback makes a copy of the data array anyway, so the // number of buffer copies does not change. blob_t notification (size + 1); if (subscribe) *notification.data () = 1; else *notification.data () = 0; memcpy (notification.data () + 1, data, size); _pending_data.push_back (ZMQ_MOVE (notification)); if (metadata) metadata->add_ref (); _pending_metadata.push_back (metadata); _pending_flags.push_back (0); } } else if (options.type != ZMQ_PUB) { // Process user message coming upstream from xsub socket, // but not if the type is PUB, which never processes user // messages _pending_data.push_back (blob_t (msg_data, msg.size ())); if (metadata) metadata->add_ref (); _pending_metadata.push_back (metadata); _pending_flags.push_back (msg.flags ()); } msg.close (); } } void zmq::xpub_t::xwrite_activated (pipe_t *pipe_) { _dist.activated (pipe_); } int zmq::xpub_t::xsetsockopt (int option_, const void *optval_, size_t optvallen_) { if (option_ == ZMQ_XPUB_VERBOSE || option_ == ZMQ_XPUB_VERBOSER || option_ == ZMQ_XPUB_MANUAL_LAST_VALUE || option_ == ZMQ_XPUB_NODROP || option_ == ZMQ_XPUB_MANUAL || option_ == ZMQ_ONLY_FIRST_SUBSCRIBE) { if (optvallen_ != sizeof (int) || *static_cast<const int *> (optval_) < 0) { errno = EINVAL; return -1; } if (option_ == ZMQ_XPUB_VERBOSE) { _verbose_subs = (*static_cast<const int *> (optval_) != 0); _verbose_unsubs = false; } else if (option_ == ZMQ_XPUB_VERBOSER) { _verbose_subs = (*static_cast<const int *> (optval_) != 0); _verbose_unsubs = _verbose_subs; } else if (option_ == ZMQ_XPUB_MANUAL_LAST_VALUE) { _manual = (*static_cast<const int *> (optval_) != 0); _send_last_pipe = _manual; } else if (option_ == ZMQ_XPUB_NODROP) _lossy = (*static_cast<const int *> (optval_) == 0); else if (option_ == ZMQ_XPUB_MANUAL) _manual = (*static_cast<const int *> (optval_) != 0); else if (option_ == ZMQ_ONLY_FIRST_SUBSCRIBE) _only_first_subscribe = (*static_cast<const int *> (optval_) != 0); } else if (option_ == ZMQ_SUBSCRIBE && _manual) { if (_last_pipe != NULL) _subscriptions.add ((unsigned char *) optval_, optvallen_, _last_pipe); } else if (option_ == ZMQ_UNSUBSCRIBE && _manual) { if (_last_pipe != NULL) _subscriptions.rm ((unsigned char *) optval_, optvallen_, _last_pipe); } else if (option_ == ZMQ_XPUB_WELCOME_MSG) { _welcome_msg.close (); if (optvallen_ > 0) { const int rc = _welcome_msg.init_size (optvallen_); errno_assert (rc == 0); unsigned char *data = static_cast<unsigned char *> (_welcome_msg.data ()); memcpy (data, optval_, optvallen_); } else _welcome_msg.init (); } else { errno = EINVAL; return -1; } return 0; } int zmq::xpub_t::xgetsockopt (int option_, void *optval_, size_t *optvallen_) { if (option_ == ZMQ_TOPICS_COUNT) { // make sure to use a multi-thread safe function to avoid race conditions with I/O threads // where subscriptions are processed: return do_getsockopt<int> (optval_, optvallen_, (int) _subscriptions.num_prefixes ()); } // room for future options here errno = EINVAL; return -1; } static void stub (zmq::mtrie_t::prefix_t data_, size_t size_, void *arg_) { LIBZMQ_UNUSED (data_); LIBZMQ_UNUSED (size_); LIBZMQ_UNUSED (arg_); } void zmq::xpub_t::xpipe_terminated (pipe_t *pipe_) { if (_manual) { // Remove the pipe from the trie and send corresponding manual // unsubscriptions upstream. _manual_subscriptions.rm (pipe_, send_unsubscription, this, false); // Remove pipe without actually sending the message as it was taken // care of by the manual call above. subscriptions is the real mtrie, // so the pipe must be removed from there or it will be left over. _subscriptions.rm (pipe_, stub, static_cast<void *> (NULL), false); // In case the pipe is currently set as last we must clear it to prevent // subscriptions from being re-added. if (pipe_ == _last_pipe) { _last_pipe = NULL; } } else { // Remove the pipe from the trie. If there are topics that nobody // is interested in anymore, send corresponding unsubscriptions // upstream. _subscriptions.rm (pipe_, send_unsubscription, this, !_verbose_unsubs); } _dist.pipe_terminated (pipe_); } void zmq::xpub_t::mark_as_matching (pipe_t *pipe_, xpub_t *self_) { self_->_dist.match (pipe_); } void zmq::xpub_t::mark_last_pipe_as_matching (pipe_t *pipe_, xpub_t *self_) { if (self_->_last_pipe == pipe_) self_->_dist.match (pipe_); } int zmq::xpub_t::xsend (msg_t *msg_) { const bool msg_more = (msg_->flags () & msg_t::more) != 0; // For the first part of multi-part message, find the matching pipes. if (!_more_send) { // Ensure nothing from previous failed attempt to send is left matched _dist.unmatch (); if (unlikely (_manual && _last_pipe && _send_last_pipe)) { _subscriptions.match (static_cast<unsigned char *> (msg_->data ()), msg_->size (), mark_last_pipe_as_matching, this); _last_pipe = NULL; } else _subscriptions.match (static_cast<unsigned char *> (msg_->data ()), msg_->size (), mark_as_matching, this); // If inverted matching is used, reverse the selection now if (options.invert_matching) { _dist.reverse_match (); } } int rc = -1; // Assume we fail if (_lossy || _dist.check_hwm ()) { if (_dist.send_to_matching (msg_) == 0) { // If we are at the end of multi-part message we can mark // all the pipes as non-matching. if (!msg_more) _dist.unmatch (); _more_send = msg_more; rc = 0; // Yay, sent successfully } } else errno = EAGAIN; return rc; } bool zmq::xpub_t::xhas_out () { return _dist.has_out (); } int zmq::xpub_t::xrecv (msg_t *msg_) { // If there is at least one if (_pending_data.empty ()) { errno = EAGAIN; return -1; } // User is reading a message, set last_pipe and remove it from the deque if (_manual && !_pending_pipes.empty ()) { _last_pipe = _pending_pipes.front (); _pending_pipes.pop_front (); // If the distributor doesn't know about this pipe it must have already // been terminated and thus we can't allow manual subscriptions. if (_last_pipe != NULL && !_dist.has_pipe (_last_pipe)) { _last_pipe = NULL; } } int rc = msg_->close (); errno_assert (rc == 0); rc = msg_->init_size (_pending_data.front ().size ()); errno_assert (rc == 0); memcpy (msg_->data (), _pending_data.front ().data (), _pending_data.front ().size ()); // set metadata only if there is some if (metadata_t *metadata = _pending_metadata.front ()) { msg_->set_metadata (metadata); // Remove ref corresponding to vector placement metadata->drop_ref (); } msg_->set_flags (_pending_flags.front ()); _pending_data.pop_front (); _pending_metadata.pop_front (); _pending_flags.pop_front (); return 0; } bool zmq::xpub_t::xhas_in () { return !_pending_data.empty (); } void zmq::xpub_t::send_unsubscription (zmq::mtrie_t::prefix_t data_, size_t size_, xpub_t *self_) { if (self_->options.type != ZMQ_PUB) { // Place the unsubscription to the queue of pending (un)subscriptions // to be retrieved by the user later on. blob_t unsub (size_ + 1); *unsub.data () = 0; if (size_ > 0) memcpy (unsub.data () + 1, data_, size_); self_->_pending_data.ZMQ_PUSH_OR_EMPLACE_BACK (ZMQ_MOVE (unsub)); self_->_pending_metadata.push_back (NULL); self_->_pending_flags.push_back (0); if (self_->_manual) { self_->_last_pipe = NULL; self_->_pending_pipes.push_back (NULL); } } }
sophomore_public/libzmq
src/xpub.cpp
C++
gpl-3.0
14,177
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_XPUB_HPP_INCLUDED__ #define __ZMQ_XPUB_HPP_INCLUDED__ #include <deque> #include "socket_base.hpp" #include "session_base.hpp" #include "mtrie.hpp" #include "dist.hpp" namespace zmq { class ctx_t; class msg_t; class pipe_t; class io_thread_t; class xpub_t : public socket_base_t { public: xpub_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_); ~xpub_t () ZMQ_OVERRIDE; // Implementations of virtual functions from socket_base_t. void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_ = false, bool locally_initiated_ = false) ZMQ_OVERRIDE; int xsend (zmq::msg_t *msg_) ZMQ_FINAL; bool xhas_out () ZMQ_FINAL; int xrecv (zmq::msg_t *msg_) ZMQ_OVERRIDE; bool xhas_in () ZMQ_OVERRIDE; void xread_activated (zmq::pipe_t *pipe_) ZMQ_FINAL; void xwrite_activated (zmq::pipe_t *pipe_) ZMQ_FINAL; int xsetsockopt (int option_, const void *optval_, size_t optvallen_) ZMQ_FINAL; int xgetsockopt (int option_, void *optval_, size_t *optvallen_) ZMQ_FINAL; void xpipe_terminated (zmq::pipe_t *pipe_) ZMQ_FINAL; private: // Function to be applied to the trie to send all the subscriptions // upstream. static void send_unsubscription (zmq::mtrie_t::prefix_t data_, size_t size_, xpub_t *self_); // Function to be applied to each matching pipes. static void mark_as_matching (zmq::pipe_t *pipe_, xpub_t *self_); // List of all subscriptions mapped to corresponding pipes. mtrie_t _subscriptions; // List of manual subscriptions mapped to corresponding pipes. mtrie_t _manual_subscriptions; // Distributor of messages holding the list of outbound pipes. dist_t _dist; // If true, send all subscription messages upstream, not just // unique ones bool _verbose_subs; // If true, send all unsubscription messages upstream, not just // unique ones bool _verbose_unsubs; // True if we are in the middle of sending a multi-part message. bool _more_send; // True if we are in the middle of receiving a multi-part message. bool _more_recv; // If true, subscribe and cancel messages are processed for the rest // of multipart message. bool _process_subscribe; // This option is enabled with ZMQ_ONLY_FIRST_SUBSCRIBE. // If true, messages following subscribe/unsubscribe in a multipart // message are treated as user data regardless of the first byte. bool _only_first_subscribe; // Drop messages if HWM reached, otherwise return with EAGAIN bool _lossy; // Subscriptions will not bed added automatically, only after calling set option with ZMQ_SUBSCRIBE or ZMQ_UNSUBSCRIBE bool _manual; // Send message to the last pipe, only used if xpub is on manual and after calling set option with ZMQ_SUBSCRIBE bool _send_last_pipe; // Function to be applied to match the last pipe. static void mark_last_pipe_as_matching (zmq::pipe_t *pipe_, xpub_t *self_); // Last pipe that sent subscription message, only used if xpub is on manual pipe_t *_last_pipe; // Pipes that sent subscriptions messages that have not yet been processed, only used if xpub is on manual std::deque<pipe_t *> _pending_pipes; // Welcome message to send to pipe when attached msg_t _welcome_msg; // List of pending (un)subscriptions, ie. those that were already // applied to the trie, but not yet received by the user. std::deque<blob_t> _pending_data; std::deque<metadata_t *> _pending_metadata; std::deque<unsigned char> _pending_flags; ZMQ_NON_COPYABLE_NOR_MOVABLE (xpub_t) }; } #endif
sophomore_public/libzmq
src/xpub.hpp
C++
gpl-3.0
3,807
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include <string.h> #include "macros.hpp" #include "xsub.hpp" #include "err.hpp" zmq::xsub_t::xsub_t (class ctx_t *parent_, uint32_t tid_, int sid_) : socket_base_t (parent_, tid_, sid_), _verbose_unsubs (false), _has_message (false), _more_send (false), _more_recv (false), _process_subscribe (false), _only_first_subscribe (false) { options.type = ZMQ_XSUB; // When socket is being closed down we don't want to wait till pending // subscription commands are sent to the wire. options.linger.store (0); const int rc = _message.init (); errno_assert (rc == 0); } zmq::xsub_t::~xsub_t () { const int rc = _message.close (); errno_assert (rc == 0); } void zmq::xsub_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_, bool locally_initiated_) { LIBZMQ_UNUSED (subscribe_to_all_); LIBZMQ_UNUSED (locally_initiated_); zmq_assert (pipe_); _fq.attach (pipe_); _dist.attach (pipe_); // Send all the cached subscriptions to the new upstream peer. _subscriptions.apply (send_subscription, pipe_); pipe_->flush (); } void zmq::xsub_t::xread_activated (pipe_t *pipe_) { _fq.activated (pipe_); } void zmq::xsub_t::xwrite_activated (pipe_t *pipe_) { _dist.activated (pipe_); } void zmq::xsub_t::xpipe_terminated (pipe_t *pipe_) { _fq.pipe_terminated (pipe_); _dist.pipe_terminated (pipe_); } void zmq::xsub_t::xhiccuped (pipe_t *pipe_) { // Send all the cached subscriptions to the hiccuped pipe. _subscriptions.apply (send_subscription, pipe_); pipe_->flush (); } int zmq::xsub_t::xsetsockopt (int option_, const void *optval_, size_t optvallen_) { if (option_ == ZMQ_ONLY_FIRST_SUBSCRIBE) { if (optvallen_ != sizeof (int) || *static_cast<const int *> (optval_) < 0) { errno = EINVAL; return -1; } _only_first_subscribe = (*static_cast<const int *> (optval_) != 0); return 0; } #ifdef ZMQ_BUILD_DRAFT_API else if (option_ == ZMQ_XSUB_VERBOSE_UNSUBSCRIBE) { _verbose_unsubs = (*static_cast<const int *> (optval_) != 0); return 0; } #endif errno = EINVAL; return -1; } int zmq::xsub_t::xgetsockopt (int option_, void *optval_, size_t *optvallen_) { if (option_ == ZMQ_TOPICS_COUNT) { // make sure to use a multi-thread safe function to avoid race conditions with I/O threads // where subscriptions are processed: #ifdef ZMQ_USE_RADIX_TREE uint64_t num_subscriptions = _subscriptions.size (); #else uint64_t num_subscriptions = _subscriptions.num_prefixes (); #endif return do_getsockopt<int> (optval_, optvallen_, (int) num_subscriptions); } // room for future options here errno = EINVAL; return -1; } int zmq::xsub_t::xsend (msg_t *msg_) { size_t size = msg_->size (); unsigned char *data = static_cast<unsigned char *> (msg_->data ()); const bool first_part = !_more_send; _more_send = (msg_->flags () & msg_t::more) != 0; if (first_part) { _process_subscribe = !_only_first_subscribe; } else if (!_process_subscribe) { // User message sent upstream to XPUB socket return _dist.send_to_all (msg_); } if (msg_->is_subscribe () || (size > 0 && *data == 1)) { // Process subscribe message // This used to filter out duplicate subscriptions, // however this is already done on the XPUB side and // doing it here as well breaks ZMQ_XPUB_VERBOSE // when there are forwarding devices involved. if (!msg_->is_subscribe ()) { data = data + 1; size = size - 1; } _subscriptions.add (data, size); _process_subscribe = true; return _dist.send_to_all (msg_); } if (msg_->is_cancel () || (size > 0 && *data == 0)) { // Process unsubscribe message if (!msg_->is_cancel ()) { data = data + 1; size = size - 1; } _process_subscribe = true; const bool rm_result = _subscriptions.rm (data, size); if (rm_result || _verbose_unsubs) return _dist.send_to_all (msg_); } else // User message sent upstream to XPUB socket return _dist.send_to_all (msg_); int rc = msg_->close (); errno_assert (rc == 0); rc = msg_->init (); errno_assert (rc == 0); return 0; } bool zmq::xsub_t::xhas_out () { // Subscription can be added/removed anytime. return true; } int zmq::xsub_t::xrecv (msg_t *msg_) { // If there's already a message prepared by a previous call to zmq_poll, // return it straight ahead. if (_has_message) { const int rc = msg_->move (_message); errno_assert (rc == 0); _has_message = false; _more_recv = (msg_->flags () & msg_t::more) != 0; return 0; } // TODO: This can result in infinite loop in the case of continuous // stream of non-matching messages which breaks the non-blocking recv // semantics. while (true) { // Get a message using fair queueing algorithm. int rc = _fq.recv (msg_); // If there's no message available, return immediately. // The same when error occurs. if (rc != 0) return -1; // Check whether the message matches at least one subscription. // Non-initial parts of the message are passed if (_more_recv || !options.filter || match (msg_)) { _more_recv = (msg_->flags () & msg_t::more) != 0; return 0; } // Message doesn't match. Pop any remaining parts of the message // from the pipe. while (msg_->flags () & msg_t::more) { rc = _fq.recv (msg_); errno_assert (rc == 0); } } } bool zmq::xsub_t::xhas_in () { // There are subsequent parts of the partly-read message available. if (_more_recv) return true; // If there's already a message prepared by a previous call to zmq_poll, // return straight ahead. if (_has_message) return true; // TODO: This can result in infinite loop in the case of continuous // stream of non-matching messages. while (true) { // Get a message using fair queueing algorithm. int rc = _fq.recv (&_message); // If there's no message available, return immediately. // The same when error occurs. if (rc != 0) { errno_assert (errno == EAGAIN); return false; } // Check whether the message matches at least one subscription. if (!options.filter || match (&_message)) { _has_message = true; return true; } // Message doesn't match. Pop any remaining parts of the message // from the pipe. while (_message.flags () & msg_t::more) { rc = _fq.recv (&_message); errno_assert (rc == 0); } } } bool zmq::xsub_t::match (msg_t *msg_) { const bool matching = _subscriptions.check ( static_cast<unsigned char *> (msg_->data ()), msg_->size ()); return matching ^ options.invert_matching; } void zmq::xsub_t::send_subscription (unsigned char *data_, size_t size_, void *arg_) { pipe_t *pipe = static_cast<pipe_t *> (arg_); // Create the subscription message. msg_t msg; const int rc = msg.init_subscribe (size_, data_); errno_assert (rc == 0); // Send it to the pipe. const bool sent = pipe->write (&msg); // If we reached the SNDHWM, and thus cannot send the subscription, drop // the subscription message instead. This matches the behaviour of // zmq_setsockopt(ZMQ_SUBSCRIBE, ...), which also drops subscriptions // when the SNDHWM is reached. if (!sent) msg.close (); }
sophomore_public/libzmq
src/xsub.cpp
C++
gpl-3.0
8,195
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_XSUB_HPP_INCLUDED__ #define __ZMQ_XSUB_HPP_INCLUDED__ #include "socket_base.hpp" #include "session_base.hpp" #include "dist.hpp" #include "fq.hpp" #ifdef ZMQ_USE_RADIX_TREE #include "radix_tree.hpp" #else #include "trie.hpp" #endif namespace zmq { class ctx_t; class pipe_t; class io_thread_t; class xsub_t : public socket_base_t { public: xsub_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_); ~xsub_t () ZMQ_OVERRIDE; protected: // Overrides of functions from socket_base_t. void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_, bool locally_initiated_) ZMQ_FINAL; int xsetsockopt (int option_, const void *optval_, size_t optvallen_) ZMQ_OVERRIDE; int xgetsockopt (int option_, void *optval_, size_t *optvallen_) ZMQ_FINAL; int xsend (zmq::msg_t *msg_) ZMQ_OVERRIDE; bool xhas_out () ZMQ_OVERRIDE; int xrecv (zmq::msg_t *msg_) ZMQ_FINAL; bool xhas_in () ZMQ_FINAL; void xread_activated (zmq::pipe_t *pipe_) ZMQ_FINAL; void xwrite_activated (zmq::pipe_t *pipe_) ZMQ_FINAL; void xhiccuped (pipe_t *pipe_) ZMQ_FINAL; void xpipe_terminated (zmq::pipe_t *pipe_) ZMQ_FINAL; private: // Check whether the message matches at least one subscription. bool match (zmq::msg_t *msg_); // Function to be applied to the trie to send all the subsciptions // upstream. static void send_subscription (unsigned char *data_, size_t size_, void *arg_); // Fair queueing object for inbound pipes. fq_t _fq; // Object for distributing the subscriptions upstream. dist_t _dist; // The repository of subscriptions. #ifdef ZMQ_USE_RADIX_TREE radix_tree_t _subscriptions; #else trie_with_size_t _subscriptions; #endif // If true, send all unsubscription messages upstream, not just // unique ones bool _verbose_unsubs; // If true, 'message' contains a matching message to return on the // next recv call. bool _has_message; msg_t _message; // If true, part of a multipart message was already sent, but // there are following parts still waiting. bool _more_send; // If true, part of a multipart message was already received, but // there are following parts still waiting. bool _more_recv; // If true, subscribe and cancel messages are processed for the rest // of multipart message. bool _process_subscribe; // This option is enabled with ZMQ_ONLY_FIRST_SUBSCRIBE. // If true, messages following subscribe/unsubscribe in a multipart // message are treated as user data regardless of the first byte. bool _only_first_subscribe; ZMQ_NON_COPYABLE_NOR_MOVABLE (xsub_t) }; } #endif
sophomore_public/libzmq
src/xsub.hpp
C++
gpl-3.0
2,838
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_YPIPE_HPP_INCLUDED__ #define __ZMQ_YPIPE_HPP_INCLUDED__ #include "atomic_ptr.hpp" #include "yqueue.hpp" #include "ypipe_base.hpp" namespace zmq { // Lock-free queue implementation. // Only a single thread can read from the pipe at any specific moment. // Only a single thread can write to the pipe at any specific moment. // T is the type of the object in the queue. // N is granularity of the pipe, i.e. how many items are needed to // perform next memory allocation. template <typename T, int N> class ypipe_t ZMQ_FINAL : public ypipe_base_t<T> { public: // Initialises the pipe. ypipe_t () { // Insert terminator element into the queue. _queue.push (); // Let all the pointers to point to the terminator. // (unless pipe is dead, in which case c is set to NULL). _r = _w = _f = &_queue.back (); _c.set (&_queue.back ()); } // Following function (write) deliberately copies uninitialised data // when used with zmq_msg. Initialising the VSM body for // non-VSM messages won't be good for performance. #ifdef ZMQ_HAVE_OPENVMS #pragma message save #pragma message disable(UNINIT) #endif // Write an item to the pipe. Don't flush it yet. If incomplete is // set to true the item is assumed to be continued by items // subsequently written to the pipe. Incomplete items are never // flushed down the stream. void write (const T &value_, bool incomplete_) { // Place the value to the queue, add new terminator element. _queue.back () = value_; _queue.push (); // Move the "flush up to here" pointer. if (!incomplete_) _f = &_queue.back (); } #ifdef ZMQ_HAVE_OPENVMS #pragma message restore #endif // Pop an incomplete item from the pipe. Returns true if such // item exists, false otherwise. bool unwrite (T *value_) { if (_f == &_queue.back ()) return false; _queue.unpush (); *value_ = _queue.back (); return true; } // Flush all the completed items into the pipe. Returns false if // the reader thread is sleeping. In that case, caller is obliged to // wake the reader up before using the pipe again. bool flush () { // If there are no un-flushed items, do nothing. if (_w == _f) return true; // Try to set 'c' to 'f'. if (_c.cas (_w, _f) != _w) { // Compare-and-swap was unsuccessful because 'c' is NULL. // This means that the reader is asleep. Therefore we don't // care about thread-safeness and update c in non-atomic // manner. We'll return false to let the caller know // that reader is sleeping. _c.set (_f); _w = _f; return false; } // Reader is alive. Nothing special to do now. Just move // the 'first un-flushed item' pointer to 'f'. _w = _f; return true; } // Check whether item is available for reading. bool check_read () { // Was the value prefetched already? If so, return. if (&_queue.front () != _r && _r) return true; // There's no prefetched value, so let us prefetch more values. // Prefetching is to simply retrieve the // pointer from c in atomic fashion. If there are no // items to prefetch, set c to NULL (using compare-and-swap). _r = _c.cas (&_queue.front (), NULL); // If there are no elements prefetched, exit. // During pipe's lifetime r should never be NULL, however, // it can happen during pipe shutdown when items // are being deallocated. if (&_queue.front () == _r || !_r) return false; // There was at least one value prefetched. return true; } // Reads an item from the pipe. Returns false if there is no value. // available. bool read (T *value_) { // Try to prefetch a value. if (!check_read ()) return false; // There was at least one value prefetched. // Return it to the caller. *value_ = _queue.front (); _queue.pop (); return true; } // Applies the function fn to the first element in the pipe // and returns the value returned by the fn. // The pipe mustn't be empty or the function crashes. bool probe (bool (*fn_) (const T &)) { const bool rc = check_read (); zmq_assert (rc); return (*fn_) (_queue.front ()); } protected: // Allocation-efficient queue to store pipe items. // Front of the queue points to the first prefetched item, back of // the pipe points to last un-flushed item. Front is used only by // reader thread, while back is used only by writer thread. yqueue_t<T, N> _queue; // Points to the first un-flushed item. This variable is used // exclusively by writer thread. T *_w; // Points to the first un-prefetched item. This variable is used // exclusively by reader thread. T *_r; // Points to the first item to be flushed in the future. T *_f; // The single point of contention between writer and reader thread. // Points past the last flushed item. If it is NULL, // reader is asleep. This pointer should be always accessed using // atomic operations. atomic_ptr_t<T> _c; ZMQ_NON_COPYABLE_NOR_MOVABLE (ypipe_t) }; } #endif
sophomore_public/libzmq
src/ypipe.hpp
C++
gpl-3.0
5,631
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_YPIPE_BASE_HPP_INCLUDED__ #define __ZMQ_YPIPE_BASE_HPP_INCLUDED__ #include "macros.hpp" namespace zmq { // ypipe_base abstracts ypipe and ypipe_conflate specific // classes, one is selected according to a the conflate // socket option template <typename T> class ypipe_base_t { public: virtual ~ypipe_base_t () ZMQ_DEFAULT; virtual void write (const T &value_, bool incomplete_) = 0; virtual bool unwrite (T *value_) = 0; virtual bool flush () = 0; virtual bool check_read () = 0; virtual bool read (T *value_) = 0; virtual bool probe (bool (*fn_) (const T &)) = 0; }; } #endif
sophomore_public/libzmq
src/ypipe_base.hpp
C++
gpl-3.0
667
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_YPIPE_CONFLATE_HPP_INCLUDED__ #define __ZMQ_YPIPE_CONFLATE_HPP_INCLUDED__ #include "platform.hpp" #include "dbuffer.hpp" #include "ypipe_base.hpp" namespace zmq { // Adapter for dbuffer, to plug it in instead of a queue for the sake // of implementing the conflate socket option, which, if set, makes // the receiving side to discard all incoming messages but the last one. // // reader_awake flag is needed here to mimic ypipe delicate behaviour // around the reader being asleep (see 'c' pointer being NULL in ypipe.hpp) template <typename T> class ypipe_conflate_t ZMQ_FINAL : public ypipe_base_t<T> { public: // Initialises the pipe. ypipe_conflate_t () : reader_awake (false) {} // Following function (write) deliberately copies uninitialised data // when used with zmq_msg. Initialising the VSM body for // non-VSM messages won't be good for performance. #ifdef ZMQ_HAVE_OPENVMS #pragma message save #pragma message disable(UNINIT) #endif void write (const T &value_, bool incomplete_) { (void) incomplete_; dbuffer.write (value_); } #ifdef ZMQ_HAVE_OPENVMS #pragma message restore #endif // There are no incomplete items for conflate ypipe bool unwrite (T *) { return false; } // Flush is no-op for conflate ypipe. Reader asleep behaviour // is as of the usual ypipe. // Returns false if the reader thread is sleeping. In that case, // caller is obliged to wake the reader up before using the pipe again. bool flush () { return reader_awake; } // Check whether item is available for reading. bool check_read () { const bool res = dbuffer.check_read (); if (!res) reader_awake = false; return res; } // Reads an item from the pipe. Returns false if there is no value. // available. bool read (T *value_) { if (!check_read ()) return false; return dbuffer.read (value_); } // Applies the function fn to the first element in the pipe // and returns the value returned by the fn. // The pipe mustn't be empty or the function crashes. bool probe (bool (*fn_) (const T &)) { return dbuffer.probe (fn_); } protected: dbuffer_t<T> dbuffer; bool reader_awake; ZMQ_NON_COPYABLE_NOR_MOVABLE (ypipe_conflate_t) }; } #endif
sophomore_public/libzmq
src/ypipe_conflate.hpp
C++
gpl-3.0
2,416
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_YQUEUE_HPP_INCLUDED__ #define __ZMQ_YQUEUE_HPP_INCLUDED__ #include <stdlib.h> #include <stddef.h> #include "err.hpp" #include "atomic_ptr.hpp" #include "platform.hpp" namespace zmq { // yqueue is an efficient queue implementation. The main goal is // to minimise number of allocations/deallocations needed. Thus yqueue // allocates/deallocates elements in batches of N. // // yqueue allows one thread to use push/back function and another one // to use pop/front functions. However, user must ensure that there's no // pop on the empty queue and that both threads don't access the same // element in unsynchronised manner. // // T is the type of the object in the queue. // N is granularity of the queue (how many pushes have to be done till // actual memory allocation is required). #if defined HAVE_POSIX_MEMALIGN // ALIGN is the memory alignment size to use in the case where we have // posix_memalign available. Default value is 64, this alignment will // prevent two queue chunks from occupying the same CPU cache line on // architectures where cache lines are <= 64 bytes (e.g. most things // except POWER). It is detected at build time to try to account for other // platforms like POWER and s390x. template <typename T, int N, size_t ALIGN = ZMQ_CACHELINE_SIZE> class yqueue_t #else template <typename T, int N> class yqueue_t #endif { public: // Create the queue. inline yqueue_t () { _begin_chunk = allocate_chunk (); alloc_assert (_begin_chunk); _begin_pos = 0; _back_chunk = NULL; _back_pos = 0; _end_chunk = _begin_chunk; _end_pos = 0; } // Destroy the queue. inline ~yqueue_t () { while (true) { if (_begin_chunk == _end_chunk) { free (_begin_chunk); break; } chunk_t *o = _begin_chunk; _begin_chunk = _begin_chunk->next; free (o); } chunk_t *sc = _spare_chunk.xchg (NULL); free (sc); } // Returns reference to the front element of the queue. // If the queue is empty, behaviour is undefined. inline T &front () { return _begin_chunk->values[_begin_pos]; } // Returns reference to the back element of the queue. // If the queue is empty, behaviour is undefined. inline T &back () { return _back_chunk->values[_back_pos]; } // Adds an element to the back end of the queue. inline void push () { _back_chunk = _end_chunk; _back_pos = _end_pos; if (++_end_pos != N) return; chunk_t *sc = _spare_chunk.xchg (NULL); if (sc) { _end_chunk->next = sc; sc->prev = _end_chunk; } else { _end_chunk->next = allocate_chunk (); alloc_assert (_end_chunk->next); _end_chunk->next->prev = _end_chunk; } _end_chunk = _end_chunk->next; _end_pos = 0; } // Removes element from the back end of the queue. In other words // it rollbacks last push to the queue. Take care: Caller is // responsible for destroying the object being unpushed. // The caller must also guarantee that the queue isn't empty when // unpush is called. It cannot be done automatically as the read // side of the queue can be managed by different, completely // unsynchronised thread. inline void unpush () { // First, move 'back' one position backwards. if (_back_pos) --_back_pos; else { _back_pos = N - 1; _back_chunk = _back_chunk->prev; } // Now, move 'end' position backwards. Note that obsolete end chunk // is not used as a spare chunk. The analysis shows that doing so // would require free and atomic operation per chunk deallocated // instead of a simple free. if (_end_pos) --_end_pos; else { _end_pos = N - 1; _end_chunk = _end_chunk->prev; free (_end_chunk->next); _end_chunk->next = NULL; } } // Removes an element from the front end of the queue. inline void pop () { if (++_begin_pos == N) { chunk_t *o = _begin_chunk; _begin_chunk = _begin_chunk->next; _begin_chunk->prev = NULL; _begin_pos = 0; // 'o' has been more recently used than _spare_chunk, // so for cache reasons we'll get rid of the spare and // use 'o' as the spare. chunk_t *cs = _spare_chunk.xchg (o); free (cs); } } private: // Individual memory chunk to hold N elements. struct chunk_t { T values[N]; chunk_t *prev; chunk_t *next; }; static inline chunk_t *allocate_chunk () { #if defined HAVE_POSIX_MEMALIGN void *pv; if (posix_memalign (&pv, ALIGN, sizeof (chunk_t)) == 0) return (chunk_t *) pv; return NULL; #else return static_cast<chunk_t *> (malloc (sizeof (chunk_t))); #endif } // Back position may point to invalid memory if the queue is empty, // while begin & end positions are always valid. Begin position is // accessed exclusively be queue reader (front/pop), while back and // end positions are accessed exclusively by queue writer (back/push). chunk_t *_begin_chunk; int _begin_pos; chunk_t *_back_chunk; int _back_pos; chunk_t *_end_chunk; int _end_pos; // People are likely to produce and consume at similar rates. In // this scenario holding onto the most recently freed chunk saves // us from having to call malloc/free. atomic_ptr_t<chunk_t> _spare_chunk; ZMQ_NON_COPYABLE_NOR_MOVABLE (yqueue_t) }; } #endif
sophomore_public/libzmq
src/yqueue.hpp
C++
gpl-3.0
5,913
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "zap_client.hpp" #include "msg.hpp" #include "session_base.hpp" namespace zmq { const char zap_version[] = "1.0"; const size_t zap_version_len = sizeof (zap_version) - 1; const char id[] = "1"; const size_t id_len = sizeof (id) - 1; zap_client_t::zap_client_t (session_base_t *const session_, const std::string &peer_address_, const options_t &options_) : mechanism_base_t (session_, options_), peer_address (peer_address_) { } void zap_client_t::send_zap_request (const char *mechanism_, size_t mechanism_length_, const uint8_t *credentials_, size_t credentials_size_) { send_zap_request (mechanism_, mechanism_length_, &credentials_, &credentials_size_, 1); } void zap_client_t::send_zap_request (const char *mechanism_, size_t mechanism_length_, const uint8_t **credentials_, size_t *credentials_sizes_, size_t credentials_count_) { // write_zap_msg cannot fail. It could only fail if the HWM was exceeded, // but on the ZAP socket, the HWM is disabled. int rc; msg_t msg; // Address delimiter frame rc = msg.init (); errno_assert (rc == 0); msg.set_flags (msg_t::more); rc = session->write_zap_msg (&msg); errno_assert (rc == 0); // Version frame rc = msg.init_size (zap_version_len); errno_assert (rc == 0); memcpy (msg.data (), zap_version, zap_version_len); msg.set_flags (msg_t::more); rc = session->write_zap_msg (&msg); errno_assert (rc == 0); // Request ID frame rc = msg.init_size (id_len); errno_assert (rc == 0); memcpy (msg.data (), id, id_len); msg.set_flags (msg_t::more); rc = session->write_zap_msg (&msg); errno_assert (rc == 0); // Domain frame rc = msg.init_size (options.zap_domain.length ()); errno_assert (rc == 0); memcpy (msg.data (), options.zap_domain.c_str (), options.zap_domain.length ()); msg.set_flags (msg_t::more); rc = session->write_zap_msg (&msg); errno_assert (rc == 0); // Address frame rc = msg.init_size (peer_address.length ()); errno_assert (rc == 0); memcpy (msg.data (), peer_address.c_str (), peer_address.length ()); msg.set_flags (msg_t::more); rc = session->write_zap_msg (&msg); errno_assert (rc == 0); // Routing id frame rc = msg.init_size (options.routing_id_size); errno_assert (rc == 0); memcpy (msg.data (), options.routing_id, options.routing_id_size); msg.set_flags (msg_t::more); rc = session->write_zap_msg (&msg); errno_assert (rc == 0); // Mechanism frame rc = msg.init_size (mechanism_length_); errno_assert (rc == 0); memcpy (msg.data (), mechanism_, mechanism_length_); if (credentials_count_) msg.set_flags (msg_t::more); rc = session->write_zap_msg (&msg); errno_assert (rc == 0); // Credentials frames for (size_t i = 0; i < credentials_count_; ++i) { rc = msg.init_size (credentials_sizes_[i]); errno_assert (rc == 0); if (i < credentials_count_ - 1) msg.set_flags (msg_t::more); memcpy (msg.data (), credentials_[i], credentials_sizes_[i]); rc = session->write_zap_msg (&msg); errno_assert (rc == 0); } } int zap_client_t::receive_and_process_zap_reply () { int rc = 0; const size_t zap_reply_frame_count = 7; msg_t msg[zap_reply_frame_count]; // Initialize all reply frames for (size_t i = 0; i < zap_reply_frame_count; i++) { rc = msg[i].init (); errno_assert (rc == 0); } for (size_t i = 0; i < zap_reply_frame_count; i++) { rc = session->read_zap_msg (&msg[i]); if (rc == -1) { if (errno == EAGAIN) { return 1; } return close_and_return (msg, -1); } if ((msg[i].flags () & msg_t::more) == (i < zap_reply_frame_count - 1 ? 0 : msg_t::more)) { session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZAP_MALFORMED_REPLY); errno = EPROTO; return close_and_return (msg, -1); } } // Address delimiter frame if (msg[0].size () > 0) { // TODO can a ZAP handler produce such a message at all? session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZAP_UNSPECIFIED); errno = EPROTO; return close_and_return (msg, -1); } // Version frame if (msg[1].size () != zap_version_len || memcmp (msg[1].data (), zap_version, zap_version_len)) { session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZAP_BAD_VERSION); errno = EPROTO; return close_and_return (msg, -1); } // Request id frame if (msg[2].size () != id_len || memcmp (msg[2].data (), id, id_len)) { session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID); errno = EPROTO; return close_and_return (msg, -1); } // Status code frame, only 200, 300, 400 and 500 are valid status codes const char *status_code_data = static_cast<const char *> (msg[3].data ()); if (msg[3].size () != 3 || status_code_data[0] < '2' || status_code_data[0] > '5' || status_code_data[1] != '0' || status_code_data[2] != '0') { session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZAP_INVALID_STATUS_CODE); errno = EPROTO; return close_and_return (msg, -1); } // Save status code status_code.assign (static_cast<char *> (msg[3].data ()), 3); // Save user id set_user_id (msg[5].data (), msg[5].size ()); // Process metadata frame rc = parse_metadata (static_cast<const unsigned char *> (msg[6].data ()), msg[6].size (), true); if (rc != 0) { session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZAP_INVALID_METADATA); errno = EPROTO; return close_and_return (msg, -1); } // Close all reply frames for (size_t i = 0; i < zap_reply_frame_count; i++) { const int rc2 = msg[i].close (); errno_assert (rc2 == 0); } handle_zap_status_code (); return 0; } void zap_client_t::handle_zap_status_code () { // we can assume here that status_code is a valid ZAP status code, // i.e. 200, 300, 400 or 500 int status_code_numeric = 0; switch (status_code[0]) { case '2': return; case '3': status_code_numeric = 300; break; case '4': status_code_numeric = 400; break; case '5': status_code_numeric = 500; break; } session->get_socket ()->event_handshake_failed_auth ( session->get_endpoint (), status_code_numeric); } zap_client_common_handshake_t::zap_client_common_handshake_t ( session_base_t *const session_, const std::string &peer_address_, const options_t &options_, state_t zap_reply_ok_state_) : mechanism_base_t (session_, options_), zap_client_t (session_, peer_address_, options_), state (waiting_for_hello), _zap_reply_ok_state (zap_reply_ok_state_) { } zmq::mechanism_t::status_t zap_client_common_handshake_t::status () const { if (state == ready) return mechanism_t::ready; if (state == error_sent) return mechanism_t::error; return mechanism_t::handshaking; } int zap_client_common_handshake_t::zap_msg_available () { zmq_assert (state == waiting_for_zap_reply); return receive_and_process_zap_reply () == -1 ? -1 : 0; } void zap_client_common_handshake_t::handle_zap_status_code () { zap_client_t::handle_zap_status_code (); // we can assume here that status_code is a valid ZAP status code, // i.e. 200, 300, 400 or 500 switch (status_code[0]) { case '2': state = _zap_reply_ok_state; break; case '3': // a 300 error code (temporary failure) // should NOT result in an ERROR message, but instead the // client should be silently disconnected (see CURVEZMQ RFC) // therefore, go immediately to state error_sent state = error_sent; break; default: state = sending_error; } } int zap_client_common_handshake_t::receive_and_process_zap_reply () { zmq_assert (state == waiting_for_zap_reply); return zap_client_t::receive_and_process_zap_reply (); } }
sophomore_public/libzmq
src/zap_client.cpp
C++
gpl-3.0
9,154
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_ZAP_CLIENT_HPP_INCLUDED__ #define __ZMQ_ZAP_CLIENT_HPP_INCLUDED__ #include "mechanism_base.hpp" namespace zmq { class zap_client_t : public virtual mechanism_base_t { public: zap_client_t (session_base_t *session_, const std::string &peer_address_, const options_t &options_); void send_zap_request (const char *mechanism_, size_t mechanism_length_, const uint8_t *credentials_, size_t credentials_size_); void send_zap_request (const char *mechanism_, size_t mechanism_length_, const uint8_t **credentials_, size_t *credentials_sizes_, size_t credentials_count_); virtual int receive_and_process_zap_reply (); virtual void handle_zap_status_code (); protected: const std::string peer_address; // Status code as received from ZAP handler std::string status_code; }; class zap_client_common_handshake_t : public zap_client_t { protected: enum state_t { waiting_for_hello, sending_welcome, waiting_for_initiate, waiting_for_zap_reply, sending_ready, sending_error, error_sent, ready }; zap_client_common_handshake_t (session_base_t *session_, const std::string &peer_address_, const options_t &options_, state_t zap_reply_ok_state_); // methods from mechanism_t status_t status () const ZMQ_FINAL; int zap_msg_available () ZMQ_FINAL; // zap_client_t methods int receive_and_process_zap_reply () ZMQ_FINAL; void handle_zap_status_code () ZMQ_FINAL; // Current FSM state state_t state; private: const state_t _zap_reply_ok_state; }; } #endif
sophomore_public/libzmq
src/zap_client.hpp
C++
gpl-3.0
1,996
/* SPDX-License-Identifier: MPL-2.0 */ // "Tell them I was a writer. // A maker of software. // A humanist. A father. // And many things. // But above all, a writer. // Thank You. :)" // - Pieter Hintjens #include "precompiled.hpp" #define ZMQ_TYPE_UNSAFE #include "macros.hpp" #include "poller.hpp" #include "peer.hpp" #if !defined ZMQ_HAVE_POLLER // On AIX platform, poll.h has to be included first to get consistent // definition of pollfd structure (AIX uses 'reqevents' and 'retnevents' // instead of 'events' and 'revents' and defines macros to map from POSIX-y // names to AIX-specific names). #if defined ZMQ_POLL_BASED_ON_POLL && !defined ZMQ_HAVE_WINDOWS #include <poll.h> #endif #include "polling_util.hpp" #endif // TODO: determine if this is an issue, since zmq.h is being loaded from pch. // zmq.h must be included *after* poll.h for AIX to build properly //#include "../include/zmq.h" #if !defined ZMQ_HAVE_WINDOWS #include <unistd.h> #ifdef ZMQ_HAVE_VXWORKS #include <strings.h> #endif #endif // XSI vector I/O #if defined ZMQ_HAVE_UIO #include <sys/uio.h> #else struct iovec { void *iov_base; size_t iov_len; }; #endif #include <string.h> #include <stdlib.h> #include <new> #include <climits> #include "proxy.hpp" #include "socket_base.hpp" #include "stdint.hpp" #include "config.hpp" #include "likely.hpp" #include "clock.hpp" #include "ctx.hpp" #include "err.hpp" #include "msg.hpp" #include "fd.hpp" #include "metadata.hpp" #include "socket_poller.hpp" #include "timers.hpp" #include "ip.hpp" #include "address.hpp" #ifdef ZMQ_HAVE_PPOLL #include "polling_util.hpp" #include <sys/select.h> #endif #if defined ZMQ_HAVE_OPENPGM #define __PGM_WININT_H__ #include <pgm/pgm.h> #endif // Compile time check whether msg_t fits into zmq_msg_t. typedef char check_msg_t_size[sizeof (zmq::msg_t) == sizeof (zmq_msg_t) ? 1 : -1]; void zmq_version (int *major_, int *minor_, int *patch_) { *major_ = ZMQ_VERSION_MAJOR; *minor_ = ZMQ_VERSION_MINOR; *patch_ = ZMQ_VERSION_PATCH; } const char *zmq_strerror (int errnum_) { return zmq::errno_to_string (errnum_); } int zmq_errno (void) { return errno; } // New context API void *zmq_ctx_new (void) { // We do this before the ctx constructor since its embedded mailbox_t // object needs the network to be up and running (at least on Windows). if (!zmq::initialize_network ()) { return NULL; } // Create 0MQ context. zmq::ctx_t *ctx = new (std::nothrow) zmq::ctx_t; if (ctx) { if (!ctx->valid ()) { delete ctx; return NULL; } } return ctx; } int zmq_ctx_term (void *ctx_) { if (!ctx_ || !(static_cast<zmq::ctx_t *> (ctx_))->check_tag ()) { errno = EFAULT; return -1; } const int rc = (static_cast<zmq::ctx_t *> (ctx_))->terminate (); const int en = errno; // Shut down only if termination was not interrupted by a signal. if (!rc || en != EINTR) { zmq::shutdown_network (); } errno = en; return rc; } int zmq_ctx_shutdown (void *ctx_) { if (!ctx_ || !(static_cast<zmq::ctx_t *> (ctx_))->check_tag ()) { errno = EFAULT; return -1; } return (static_cast<zmq::ctx_t *> (ctx_))->shutdown (); } int zmq_ctx_set (void *ctx_, int option_, int optval_) { return zmq_ctx_set_ext (ctx_, option_, &optval_, sizeof (int)); } int zmq_ctx_set_ext (void *ctx_, int option_, const void *optval_, size_t optvallen_) { if (!ctx_ || !(static_cast<zmq::ctx_t *> (ctx_))->check_tag ()) { errno = EFAULT; return -1; } return (static_cast<zmq::ctx_t *> (ctx_)) ->set (option_, optval_, optvallen_); } int zmq_ctx_get (void *ctx_, int option_) { if (!ctx_ || !(static_cast<zmq::ctx_t *> (ctx_))->check_tag ()) { errno = EFAULT; return -1; } return (static_cast<zmq::ctx_t *> (ctx_))->get (option_); } int zmq_ctx_get_ext (void *ctx_, int option_, void *optval_, size_t *optvallen_) { if (!ctx_ || !(static_cast<zmq::ctx_t *> (ctx_))->check_tag ()) { errno = EFAULT; return -1; } return (static_cast<zmq::ctx_t *> (ctx_)) ->get (option_, optval_, optvallen_); } // Stable/legacy context API void *zmq_init (int io_threads_) { if (io_threads_ >= 0) { void *ctx = zmq_ctx_new (); zmq_ctx_set (ctx, ZMQ_IO_THREADS, io_threads_); return ctx; } errno = EINVAL; return NULL; } int zmq_term (void *ctx_) { return zmq_ctx_term (ctx_); } int zmq_ctx_destroy (void *ctx_) { return zmq_ctx_term (ctx_); } // Sockets static zmq::socket_base_t *as_socket_base_t (void *s_) { zmq::socket_base_t *s = static_cast<zmq::socket_base_t *> (s_); if (!s_ || !s->check_tag ()) { errno = ENOTSOCK; return NULL; } return s; } void *zmq_socket (void *ctx_, int type_) { if (!ctx_ || !(static_cast<zmq::ctx_t *> (ctx_))->check_tag ()) { errno = EFAULT; return NULL; } zmq::ctx_t *ctx = static_cast<zmq::ctx_t *> (ctx_); zmq::socket_base_t *s = ctx->create_socket (type_); return static_cast<void *> (s); } int zmq_close (void *s_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; s->close (); return 0; } int zmq_setsockopt (void *s_, int option_, const void *optval_, size_t optvallen_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; return s->setsockopt (option_, optval_, optvallen_); } int zmq_getsockopt (void *s_, int option_, void *optval_, size_t *optvallen_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; return s->getsockopt (option_, optval_, optvallen_); } int zmq_socket_monitor_versioned ( void *s_, const char *addr_, uint64_t events_, int event_version_, int type_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; return s->monitor (addr_, events_, event_version_, type_); } int zmq_socket_monitor (void *s_, const char *addr_, int events_) { return zmq_socket_monitor_versioned (s_, addr_, events_, 1, ZMQ_PAIR); } int zmq_join (void *s_, const char *group_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; return s->join (group_); } int zmq_leave (void *s_, const char *group_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; return s->leave (group_); } int zmq_bind (void *s_, const char *addr_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; return s->bind (addr_); } int zmq_connect (void *s_, const char *addr_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; return s->connect (addr_); } uint32_t zmq_connect_peer (void *s_, const char *addr_) { zmq::peer_t *s = static_cast<zmq::peer_t *> (s_); if (!s_ || !s->check_tag ()) { errno = ENOTSOCK; return 0; } int socket_type; size_t socket_type_size = sizeof (socket_type); if (s->getsockopt (ZMQ_TYPE, &socket_type, &socket_type_size) != 0) return 0; if (socket_type != ZMQ_PEER) { errno = ENOTSUP; return 0; } return s->connect_peer (addr_); } int zmq_unbind (void *s_, const char *addr_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; return s->term_endpoint (addr_); } int zmq_disconnect (void *s_, const char *addr_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; return s->term_endpoint (addr_); } // Sending functions. static inline int s_sendmsg (zmq::socket_base_t *s_, zmq_msg_t *msg_, int flags_) { size_t sz = zmq_msg_size (msg_); const int rc = s_->send (reinterpret_cast<zmq::msg_t *> (msg_), flags_); if (unlikely (rc < 0)) return -1; // This is what I'd like to do, my C++ fu is too weak -- PH 2016/02/09 // int max_msgsz = s_->parent->get (ZMQ_MAX_MSGSZ); size_t max_msgsz = INT_MAX; // Truncate returned size to INT_MAX to avoid overflow to negative values return static_cast<int> (sz < max_msgsz ? sz : max_msgsz); } /* To be deprecated once zmq_msg_send() is stable */ int zmq_sendmsg (void *s_, zmq_msg_t *msg_, int flags_) { return zmq_msg_send (msg_, s_, flags_); } int zmq_send (void *s_, const void *buf_, size_t len_, int flags_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; zmq_msg_t msg; int rc = zmq_msg_init_buffer (&msg, buf_, len_); if (unlikely (rc < 0)) return -1; rc = s_sendmsg (s, &msg, flags_); if (unlikely (rc < 0)) { const int err = errno; const int rc2 = zmq_msg_close (&msg); errno_assert (rc2 == 0); errno = err; return -1; } // Note the optimisation here. We don't close the msg object as it is // empty anyway. This may change when implementation of zmq_msg_t changes. return rc; } int zmq_send_const (void *s_, const void *buf_, size_t len_, int flags_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; zmq_msg_t msg; int rc = zmq_msg_init_data (&msg, const_cast<void *> (buf_), len_, NULL, NULL); if (rc != 0) return -1; rc = s_sendmsg (s, &msg, flags_); if (unlikely (rc < 0)) { const int err = errno; const int rc2 = zmq_msg_close (&msg); errno_assert (rc2 == 0); errno = err; return -1; } // Note the optimisation here. We don't close the msg object as it is // empty anyway. This may change when implementation of zmq_msg_t changes. return rc; } // Send multiple messages. // TODO: this function has no man page // // If flag bit ZMQ_SNDMORE is set the vector is treated as // a single multi-part message, i.e. the last message has // ZMQ_SNDMORE bit switched off. // int zmq_sendiov (void *s_, iovec *a_, size_t count_, int flags_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; if (unlikely (count_ <= 0 || !a_)) { errno = EINVAL; return -1; } int rc = 0; zmq_msg_t msg; for (size_t i = 0; i < count_; ++i) { rc = zmq_msg_init_size (&msg, a_[i].iov_len); if (rc != 0) { rc = -1; break; } memcpy (zmq_msg_data (&msg), a_[i].iov_base, a_[i].iov_len); if (i == count_ - 1) flags_ = flags_ & ~ZMQ_SNDMORE; rc = s_sendmsg (s, &msg, flags_); if (unlikely (rc < 0)) { const int err = errno; const int rc2 = zmq_msg_close (&msg); errno_assert (rc2 == 0); errno = err; rc = -1; break; } } return rc; } // Receiving functions. static int s_recvmsg (zmq::socket_base_t *s_, zmq_msg_t *msg_, int flags_) { const int rc = s_->recv (reinterpret_cast<zmq::msg_t *> (msg_), flags_); if (unlikely (rc < 0)) return -1; // Truncate returned size to INT_MAX to avoid overflow to negative values const size_t sz = zmq_msg_size (msg_); return static_cast<int> (sz < INT_MAX ? sz : INT_MAX); } /* To be deprecated once zmq_msg_recv() is stable */ int zmq_recvmsg (void *s_, zmq_msg_t *msg_, int flags_) { return zmq_msg_recv (msg_, s_, flags_); } int zmq_recv (void *s_, void *buf_, size_t len_, int flags_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; zmq_msg_t msg; int rc = zmq_msg_init (&msg); errno_assert (rc == 0); const int nbytes = s_recvmsg (s, &msg, flags_); if (unlikely (nbytes < 0)) { const int err = errno; rc = zmq_msg_close (&msg); errno_assert (rc == 0); errno = err; return -1; } // An oversized message is silently truncated. const size_t to_copy = size_t (nbytes) < len_ ? size_t (nbytes) : len_; // We explicitly allow a null buffer argument if len is zero if (to_copy) { assert (buf_); memcpy (buf_, zmq_msg_data (&msg), to_copy); } rc = zmq_msg_close (&msg); errno_assert (rc == 0); return nbytes; } // Receive a multi-part message // // Receives up to *count_ parts of a multi-part message. // Sets *count_ to the actual number of parts read. // ZMQ_RCVMORE is set to indicate if a complete multi-part message was read. // Returns number of message parts read, or -1 on error. // // Note: even if -1 is returned, some parts of the message // may have been read. Therefore the client must consult // *count_ to retrieve message parts successfully read, // even if -1 is returned. // // The iov_base* buffers of each iovec *a_ filled in by this // function may be freed using free(). // TODO: this function has no man page // int zmq_recviov (void *s_, iovec *a_, size_t *count_, int flags_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; if (unlikely (!count_ || *count_ <= 0 || !a_)) { errno = EINVAL; return -1; } const size_t count = *count_; int nread = 0; bool recvmore = true; *count_ = 0; for (size_t i = 0; recvmore && i < count; ++i) { zmq_msg_t msg; int rc = zmq_msg_init (&msg); errno_assert (rc == 0); const int nbytes = s_recvmsg (s, &msg, flags_); if (unlikely (nbytes < 0)) { const int err = errno; rc = zmq_msg_close (&msg); errno_assert (rc == 0); errno = err; nread = -1; break; } a_[i].iov_len = zmq_msg_size (&msg); a_[i].iov_base = static_cast<char *> (malloc (a_[i].iov_len)); if (unlikely (!a_[i].iov_base)) { errno = ENOMEM; return -1; } memcpy (a_[i].iov_base, static_cast<char *> (zmq_msg_data (&msg)), a_[i].iov_len); // Assume zmq_socket ZMQ_RVCMORE is properly set. const zmq::msg_t *p_msg = reinterpret_cast<const zmq::msg_t *> (&msg); recvmore = p_msg->flags () & zmq::msg_t::more; rc = zmq_msg_close (&msg); errno_assert (rc == 0); ++*count_; ++nread; } return nread; } // Message manipulators. int zmq_msg_init (zmq_msg_t *msg_) { return (reinterpret_cast<zmq::msg_t *> (msg_))->init (); } int zmq_msg_init_size (zmq_msg_t *msg_, size_t size_) { return (reinterpret_cast<zmq::msg_t *> (msg_))->init_size (size_); } int zmq_msg_init_buffer (zmq_msg_t *msg_, const void *buf_, size_t size_) { return (reinterpret_cast<zmq::msg_t *> (msg_))->init_buffer (buf_, size_); } int zmq_msg_init_data ( zmq_msg_t *msg_, void *data_, size_t size_, zmq_free_fn *ffn_, void *hint_) { return (reinterpret_cast<zmq::msg_t *> (msg_)) ->init_data (data_, size_, ffn_, hint_); } int zmq_msg_send (zmq_msg_t *msg_, void *s_, int flags_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; return s_sendmsg (s, msg_, flags_); } int zmq_msg_recv (zmq_msg_t *msg_, void *s_, int flags_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; return s_recvmsg (s, msg_, flags_); } int zmq_msg_close (zmq_msg_t *msg_) { return (reinterpret_cast<zmq::msg_t *> (msg_))->close (); } int zmq_msg_move (zmq_msg_t *dest_, zmq_msg_t *src_) { return (reinterpret_cast<zmq::msg_t *> (dest_)) ->move (*reinterpret_cast<zmq::msg_t *> (src_)); } int zmq_msg_copy (zmq_msg_t *dest_, zmq_msg_t *src_) { return (reinterpret_cast<zmq::msg_t *> (dest_)) ->copy (*reinterpret_cast<zmq::msg_t *> (src_)); } void *zmq_msg_data (zmq_msg_t *msg_) { return (reinterpret_cast<zmq::msg_t *> (msg_))->data (); } size_t zmq_msg_size (const zmq_msg_t *msg_) { return ((zmq::msg_t *) msg_)->size (); } int zmq_msg_more (const zmq_msg_t *msg_) { return zmq_msg_get (msg_, ZMQ_MORE); } int zmq_msg_get (const zmq_msg_t *msg_, int property_) { const char *fd_string; switch (property_) { case ZMQ_MORE: return (((zmq::msg_t *) msg_)->flags () & zmq::msg_t::more) ? 1 : 0; case ZMQ_SRCFD: fd_string = zmq_msg_gets (msg_, "__fd"); if (fd_string == NULL) return -1; return atoi (fd_string); case ZMQ_SHARED: return (((zmq::msg_t *) msg_)->is_cmsg ()) || (((zmq::msg_t *) msg_)->flags () & zmq::msg_t::shared) ? 1 : 0; default: errno = EINVAL; return -1; } } int zmq_msg_set (zmq_msg_t *, int, int) { // No properties supported at present errno = EINVAL; return -1; } int zmq_msg_set_routing_id (zmq_msg_t *msg_, uint32_t routing_id_) { return (reinterpret_cast<zmq::msg_t *> (msg_)) ->set_routing_id (routing_id_); } uint32_t zmq_msg_routing_id (zmq_msg_t *msg_) { return (reinterpret_cast<zmq::msg_t *> (msg_))->get_routing_id (); } int zmq_msg_set_group (zmq_msg_t *msg_, const char *group_) { return (reinterpret_cast<zmq::msg_t *> (msg_))->set_group (group_); } const char *zmq_msg_group (zmq_msg_t *msg_) { return (reinterpret_cast<zmq::msg_t *> (msg_))->group (); } // Get message metadata string const char *zmq_msg_gets (const zmq_msg_t *msg_, const char *property_) { const zmq::metadata_t *metadata = reinterpret_cast<const zmq::msg_t *> (msg_)->metadata (); const char *value = NULL; if (metadata) value = metadata->get (std::string (property_)); if (value) return value; errno = EINVAL; return NULL; } // Polling. #if defined ZMQ_HAVE_POLLER static int zmq_poller_poll (zmq_pollitem_t *items_, int nitems_, long timeout_) { // implement zmq_poll on top of zmq_poller int rc; zmq_poller_event_t *events; zmq::socket_poller_t poller; events = new (std::nothrow) zmq_poller_event_t[nitems_]; alloc_assert (events); bool repeat_items = false; // Register sockets with poller for (int i = 0; i < nitems_; i++) { items_[i].revents = 0; bool modify = false; short e = items_[i].events; if (items_[i].socket) { // Poll item is a 0MQ socket. for (int j = 0; j < i; ++j) { // Check for repeat entries if (items_[j].socket == items_[i].socket) { repeat_items = true; modify = true; e |= items_[j].events; } } if (modify) { rc = zmq_poller_modify (&poller, items_[i].socket, e); } else { rc = zmq_poller_add (&poller, items_[i].socket, NULL, e); } if (rc < 0) { delete[] events; return rc; } } else { // Poll item is a raw file descriptor. for (int j = 0; j < i; ++j) { // Check for repeat entries if (!items_[j].socket && items_[j].fd == items_[i].fd) { repeat_items = true; modify = true; e |= items_[j].events; } } if (modify) { rc = zmq_poller_modify_fd (&poller, items_[i].fd, e); } else { rc = zmq_poller_add_fd (&poller, items_[i].fd, NULL, e); } if (rc < 0) { delete[] events; return rc; } } } // Wait for events rc = zmq_poller_wait_all (&poller, events, nitems_, timeout_); if (rc < 0) { delete[] events; if (zmq_errno () == EAGAIN) { return 0; } return rc; } // Transform poller events into zmq_pollitem events. // items_ contains all items, while events only contains fired events. // If no sockets are repeated (likely), the two are still co-ordered, so step through the items // checking for matches only on the first event. // If there are repeat items, they cannot be assumed to be co-ordered, // so each pollitem must check fired events from the beginning. int j_start = 0, found_events = rc; for (int i = 0; i < nitems_; i++) { for (int j = j_start; j < found_events; ++j) { if ((items_[i].socket && items_[i].socket == events[j].socket) || (!(items_[i].socket || events[j].socket) && items_[i].fd == events[j].fd)) { items_[i].revents = events[j].events & items_[i].events; if (!repeat_items) { // no repeats, we can ignore events we've already seen j_start++; } break; } if (!repeat_items) { // no repeats, never have to look at j > j_start break; } } } // Cleanup delete[] events; return rc; } #endif // ZMQ_HAVE_POLLER int zmq_poll (zmq_pollitem_t *items_, int nitems_, long timeout_) { #if defined ZMQ_HAVE_POLLER // if poller is present, use that if there is at least 1 thread-safe socket, // otherwise fall back to the previous implementation as it's faster. for (int i = 0; i != nitems_; i++) { if (items_[i].socket) { zmq::socket_base_t *s = as_socket_base_t (items_[i].socket); if (s) { if (s->is_thread_safe ()) return zmq_poller_poll (items_, nitems_, timeout_); } else { //as_socket_base_t returned NULL : socket is invalid return -1; } } } #endif // ZMQ_HAVE_POLLER #if defined ZMQ_POLL_BASED_ON_POLL || defined ZMQ_POLL_BASED_ON_SELECT if (unlikely (nitems_ < 0)) { errno = EINVAL; return -1; } if (unlikely (nitems_ == 0)) { if (timeout_ == 0) return 0; #if defined ZMQ_HAVE_WINDOWS Sleep (timeout_ > 0 ? timeout_ : INFINITE); return 0; #elif defined ZMQ_HAVE_VXWORKS struct timespec ns_; ns_.tv_sec = timeout_ / 1000; ns_.tv_nsec = timeout_ % 1000 * 1000000; return nanosleep (&ns_, 0); #else return usleep (timeout_ * 1000); #endif } if (!items_) { errno = EFAULT; return -1; } zmq::clock_t clock; uint64_t now = 0; uint64_t end = 0; #if defined ZMQ_POLL_BASED_ON_POLL zmq::fast_vector_t<pollfd, ZMQ_POLLITEMS_DFLT> pollfds (nitems_); // Build pollset for poll () system call. for (int i = 0; i != nitems_; i++) { // If the poll item is a 0MQ socket, we poll on the file descriptor // retrieved by the ZMQ_FD socket option. if (items_[i].socket) { size_t zmq_fd_size = sizeof (zmq::fd_t); if (zmq_getsockopt (items_[i].socket, ZMQ_FD, &pollfds[i].fd, &zmq_fd_size) == -1) { return -1; } pollfds[i].events = items_[i].events ? POLLIN : 0; } // Else, the poll item is a raw file descriptor. Just convert the // events to normal POLLIN/POLLOUT for poll (). else { pollfds[i].fd = items_[i].fd; pollfds[i].events = (items_[i].events & ZMQ_POLLIN ? POLLIN : 0) | (items_[i].events & ZMQ_POLLOUT ? POLLOUT : 0) | (items_[i].events & ZMQ_POLLPRI ? POLLPRI : 0); } } #else // Ensure we do not attempt to select () on more than FD_SETSIZE // file descriptors. // TODO since this function is called by a client, we could return errno EINVAL/ENOMEM/... here zmq_assert (nitems_ <= FD_SETSIZE); zmq::optimized_fd_set_t pollset_in (nitems_); FD_ZERO (pollset_in.get ()); zmq::optimized_fd_set_t pollset_out (nitems_); FD_ZERO (pollset_out.get ()); zmq::optimized_fd_set_t pollset_err (nitems_); FD_ZERO (pollset_err.get ()); zmq::fd_t maxfd = 0; // Build the fd_sets for passing to select (). for (int i = 0; i != nitems_; i++) { // If the poll item is a 0MQ socket we are interested in input on the // notification file descriptor retrieved by the ZMQ_FD socket option. if (items_[i].socket) { size_t zmq_fd_size = sizeof (zmq::fd_t); zmq::fd_t notify_fd; if (zmq_getsockopt (items_[i].socket, ZMQ_FD, &notify_fd, &zmq_fd_size) == -1) return -1; if (items_[i].events) { FD_SET (notify_fd, pollset_in.get ()); if (maxfd < notify_fd) maxfd = notify_fd; } } // Else, the poll item is a raw file descriptor. Convert the poll item // events to the appropriate fd_sets. else { if (items_[i].events & ZMQ_POLLIN) FD_SET (items_[i].fd, pollset_in.get ()); if (items_[i].events & ZMQ_POLLOUT) FD_SET (items_[i].fd, pollset_out.get ()); if (items_[i].events & ZMQ_POLLERR) FD_SET (items_[i].fd, pollset_err.get ()); if (maxfd < items_[i].fd) maxfd = items_[i].fd; } } zmq::optimized_fd_set_t inset (nitems_); zmq::optimized_fd_set_t outset (nitems_); zmq::optimized_fd_set_t errset (nitems_); #endif bool first_pass = true; int nevents = 0; while (true) { #if defined ZMQ_POLL_BASED_ON_POLL // Compute the timeout for the subsequent poll. const zmq::timeout_t timeout = zmq::compute_timeout (first_pass, timeout_, now, end); // Wait for events. { const int rc = poll (&pollfds[0], nitems_, timeout); if (rc == -1 && errno == EINTR) { return -1; } errno_assert (rc >= 0); } // Check for the events. for (int i = 0; i != nitems_; i++) { items_[i].revents = 0; // The poll item is a 0MQ socket. Retrieve pending events // using the ZMQ_EVENTS socket option. if (items_[i].socket) { size_t zmq_events_size = sizeof (uint32_t); uint32_t zmq_events; if (zmq_getsockopt (items_[i].socket, ZMQ_EVENTS, &zmq_events, &zmq_events_size) == -1) { return -1; } if ((items_[i].events & ZMQ_POLLOUT) && (zmq_events & ZMQ_POLLOUT)) items_[i].revents |= ZMQ_POLLOUT; if ((items_[i].events & ZMQ_POLLIN) && (zmq_events & ZMQ_POLLIN)) items_[i].revents |= ZMQ_POLLIN; } // Else, the poll item is a raw file descriptor, simply convert // the events to zmq_pollitem_t-style format. else { if (pollfds[i].revents & POLLIN) items_[i].revents |= ZMQ_POLLIN; if (pollfds[i].revents & POLLOUT) items_[i].revents |= ZMQ_POLLOUT; if (pollfds[i].revents & POLLPRI) items_[i].revents |= ZMQ_POLLPRI; if (pollfds[i].revents & ~(POLLIN | POLLOUT | POLLPRI)) items_[i].revents |= ZMQ_POLLERR; } if (items_[i].revents) nevents++; } #else // Compute the timeout for the subsequent poll. timeval timeout; timeval *ptimeout; if (first_pass) { timeout.tv_sec = 0; timeout.tv_usec = 0; ptimeout = &timeout; } else if (timeout_ < 0) ptimeout = NULL; else { timeout.tv_sec = static_cast<long> ((end - now) / 1000); timeout.tv_usec = static_cast<long> ((end - now) % 1000 * 1000); ptimeout = &timeout; } // Wait for events. Ignore interrupts if there's infinite timeout. while (true) { memcpy (inset.get (), pollset_in.get (), zmq::valid_pollset_bytes (*pollset_in.get ())); memcpy (outset.get (), pollset_out.get (), zmq::valid_pollset_bytes (*pollset_out.get ())); memcpy (errset.get (), pollset_err.get (), zmq::valid_pollset_bytes (*pollset_err.get ())); #if defined ZMQ_HAVE_WINDOWS int rc = select (0, inset.get (), outset.get (), errset.get (), ptimeout); if (unlikely (rc == SOCKET_ERROR)) { errno = zmq::wsa_error_to_errno (WSAGetLastError ()); wsa_assert (errno == ENOTSOCK); return -1; } #else int rc = select (maxfd + 1, inset.get (), outset.get (), errset.get (), ptimeout); if (unlikely (rc == -1)) { errno_assert (errno == EINTR || errno == EBADF); return -1; } #endif break; } // Check for the events. for (int i = 0; i != nitems_; i++) { items_[i].revents = 0; // The poll item is a 0MQ socket. Retrieve pending events // using the ZMQ_EVENTS socket option. if (items_[i].socket) { size_t zmq_events_size = sizeof (uint32_t); uint32_t zmq_events; if (zmq_getsockopt (items_[i].socket, ZMQ_EVENTS, &zmq_events, &zmq_events_size) == -1) return -1; if ((items_[i].events & ZMQ_POLLOUT) && (zmq_events & ZMQ_POLLOUT)) items_[i].revents |= ZMQ_POLLOUT; if ((items_[i].events & ZMQ_POLLIN) && (zmq_events & ZMQ_POLLIN)) items_[i].revents |= ZMQ_POLLIN; } // Else, the poll item is a raw file descriptor, simply convert // the events to zmq_pollitem_t-style format. else { if (FD_ISSET (items_[i].fd, inset.get ())) items_[i].revents |= ZMQ_POLLIN; if (FD_ISSET (items_[i].fd, outset.get ())) items_[i].revents |= ZMQ_POLLOUT; if (FD_ISSET (items_[i].fd, errset.get ())) items_[i].revents |= ZMQ_POLLERR; } if (items_[i].revents) nevents++; } #endif // If timeout is zero, exit immediately whether there are events or not. if (timeout_ == 0) break; // If there are events to return, we can exit immediately. if (nevents) break; // At this point we are meant to wait for events but there are none. // If timeout is infinite we can just loop until we get some events. if (timeout_ < 0) { if (first_pass) first_pass = false; continue; } // The timeout is finite and there are no events. In the first pass // we get a timestamp of when the polling have begun. (We assume that // first pass have taken negligible time). We also compute the time // when the polling should time out. if (first_pass) { now = clock.now_ms (); end = now + timeout_; if (now == end) break; first_pass = false; continue; } // Find out whether timeout have expired. now = clock.now_ms (); if (now >= end) break; } return nevents; #else // Exotic platforms that support neither poll() nor select(). errno = ENOTSUP; return -1; #endif } #ifdef ZMQ_HAVE_PPOLL // return values of 0 or -1 should be returned from zmq_poll; return value 1 means items passed checks int zmq_poll_check_items_ (zmq_pollitem_t *items_, int nitems_, long timeout_) { if (unlikely (nitems_ < 0)) { errno = EINVAL; return -1; } if (unlikely (nitems_ == 0)) { if (timeout_ == 0) return 0; #if defined ZMQ_HAVE_WINDOWS Sleep (timeout_ > 0 ? timeout_ : INFINITE); return 0; #elif defined ZMQ_HAVE_VXWORKS struct timespec ns_; ns_.tv_sec = timeout_ / 1000; ns_.tv_nsec = timeout_ % 1000 * 1000000; return nanosleep (&ns_, 0); #else return usleep (timeout_ * 1000); #endif } if (!items_) { errno = EFAULT; return -1; } return 1; } struct zmq_poll_select_fds_t_ { explicit zmq_poll_select_fds_t_ (int nitems_) : pollset_in (nitems_), pollset_out (nitems_), pollset_err (nitems_), inset (nitems_), outset (nitems_), errset (nitems_), maxfd (0) { FD_ZERO (pollset_in.get ()); FD_ZERO (pollset_out.get ()); FD_ZERO (pollset_err.get ()); } zmq::optimized_fd_set_t pollset_in; zmq::optimized_fd_set_t pollset_out; zmq::optimized_fd_set_t pollset_err; zmq::optimized_fd_set_t inset; zmq::optimized_fd_set_t outset; zmq::optimized_fd_set_t errset; zmq::fd_t maxfd; }; zmq_poll_select_fds_t_ zmq_poll_build_select_fds_ (zmq_pollitem_t *items_, int nitems_, int &rc) { // Ensure we do not attempt to select () on more than FD_SETSIZE // file descriptors. // TODO since this function is called by a client, we could return errno EINVAL/ENOMEM/... here zmq_assert (nitems_ <= FD_SETSIZE); zmq_poll_select_fds_t_ fds (nitems_); // Build the fd_sets for passing to select (). for (int i = 0; i != nitems_; i++) { // If the poll item is a 0MQ socket we are interested in input on the // notification file descriptor retrieved by the ZMQ_FD socket option. if (items_[i].socket) { size_t zmq_fd_size = sizeof (zmq::fd_t); zmq::fd_t notify_fd; if (zmq_getsockopt (items_[i].socket, ZMQ_FD, &notify_fd, &zmq_fd_size) == -1) { rc = -1; return fds; } if (items_[i].events) { FD_SET (notify_fd, fds.pollset_in.get ()); if (fds.maxfd < notify_fd) fds.maxfd = notify_fd; } } // Else, the poll item is a raw file descriptor. Convert the poll item // events to the appropriate fd_sets. else { if (items_[i].events & ZMQ_POLLIN) FD_SET (items_[i].fd, fds.pollset_in.get ()); if (items_[i].events & ZMQ_POLLOUT) FD_SET (items_[i].fd, fds.pollset_out.get ()); if (items_[i].events & ZMQ_POLLERR) FD_SET (items_[i].fd, fds.pollset_err.get ()); if (fds.maxfd < items_[i].fd) fds.maxfd = items_[i].fd; } } rc = 0; return fds; } timeval *zmq_poll_select_set_timeout_ ( long timeout_, bool first_pass, uint64_t now, uint64_t end, timeval &timeout) { timeval *ptimeout; if (first_pass) { timeout.tv_sec = 0; timeout.tv_usec = 0; ptimeout = &timeout; } else if (timeout_ < 0) ptimeout = NULL; else { timeout.tv_sec = static_cast<long> ((end - now) / 1000); timeout.tv_usec = static_cast<long> ((end - now) % 1000 * 1000); ptimeout = &timeout; } return ptimeout; } timespec *zmq_poll_select_set_timeout_ ( long timeout_, bool first_pass, uint64_t now, uint64_t end, timespec &timeout) { timespec *ptimeout; if (first_pass) { timeout.tv_sec = 0; timeout.tv_nsec = 0; ptimeout = &timeout; } else if (timeout_ < 0) ptimeout = NULL; else { timeout.tv_sec = static_cast<long> ((end - now) / 1000); timeout.tv_nsec = static_cast<long> ((end - now) % 1000 * 1000000); ptimeout = &timeout; } return ptimeout; } int zmq_poll_select_check_events_ (zmq_pollitem_t *items_, int nitems_, zmq_poll_select_fds_t_ &fds, int &nevents) { // Check for the events. for (int i = 0; i != nitems_; i++) { items_[i].revents = 0; // The poll item is a 0MQ socket. Retrieve pending events // using the ZMQ_EVENTS socket option. if (items_[i].socket) { size_t zmq_events_size = sizeof (uint32_t); uint32_t zmq_events; if (zmq_getsockopt (items_[i].socket, ZMQ_EVENTS, &zmq_events, &zmq_events_size) == -1) return -1; if ((items_[i].events & ZMQ_POLLOUT) && (zmq_events & ZMQ_POLLOUT)) items_[i].revents |= ZMQ_POLLOUT; if ((items_[i].events & ZMQ_POLLIN) && (zmq_events & ZMQ_POLLIN)) items_[i].revents |= ZMQ_POLLIN; } // Else, the poll item is a raw file descriptor, simply convert // the events to zmq_pollitem_t-style format. else { if (FD_ISSET (items_[i].fd, fds.inset.get ())) items_[i].revents |= ZMQ_POLLIN; if (FD_ISSET (items_[i].fd, fds.outset.get ())) items_[i].revents |= ZMQ_POLLOUT; if (FD_ISSET (items_[i].fd, fds.errset.get ())) items_[i].revents |= ZMQ_POLLERR; } if (items_[i].revents) nevents++; } return 0; } bool zmq_poll_must_break_loop_ (long timeout_, int nevents, bool &first_pass, zmq::clock_t &clock, uint64_t &now, uint64_t &end) { // If timeout is zero, exit immediately whether there are events or not. if (timeout_ == 0) return true; // If there are events to return, we can exit immediately. if (nevents) return true; // At this point we are meant to wait for events but there are none. // If timeout is infinite we can just loop until we get some events. if (timeout_ < 0) { if (first_pass) first_pass = false; return false; } // The timeout is finite and there are no events. In the first pass // we get a timestamp of when the polling have begun. (We assume that // first pass have taken negligible time). We also compute the time // when the polling should time out. if (first_pass) { now = clock.now_ms (); end = now + timeout_; if (now == end) return true; first_pass = false; return false; } // Find out whether timeout have expired. now = clock.now_ms (); if (now >= end) return true; // finally, in all other cases, we just continue return false; } #endif // ZMQ_HAVE_PPOLL #if !defined _WIN32 int zmq_ppoll (zmq_pollitem_t *items_, int nitems_, long timeout_, const sigset_t *sigmask_) #else // Windows has no sigset_t int zmq_ppoll (zmq_pollitem_t *items_, int nitems_, long timeout_, const void *sigmask_) #endif { #ifdef ZMQ_HAVE_PPOLL int rc = zmq_poll_check_items_ (items_, nitems_, timeout_); if (rc <= 0) { return rc; } zmq::clock_t clock; uint64_t now = 0; uint64_t end = 0; zmq_poll_select_fds_t_ fds = zmq_poll_build_select_fds_ (items_, nitems_, rc); if (rc == -1) { return -1; } bool first_pass = true; int nevents = 0; while (true) { // Compute the timeout for the subsequent poll. timespec timeout; timespec *ptimeout = zmq_poll_select_set_timeout_ (timeout_, first_pass, now, end, timeout); // Wait for events. Ignore interrupts if there's infinite timeout. while (true) { memcpy (fds.inset.get (), fds.pollset_in.get (), zmq::valid_pollset_bytes (*fds.pollset_in.get ())); memcpy (fds.outset.get (), fds.pollset_out.get (), zmq::valid_pollset_bytes (*fds.pollset_out.get ())); memcpy (fds.errset.get (), fds.pollset_err.get (), zmq::valid_pollset_bytes (*fds.pollset_err.get ())); int rc = pselect (fds.maxfd + 1, fds.inset.get (), fds.outset.get (), fds.errset.get (), ptimeout, sigmask_); if (unlikely (rc == -1)) { errno_assert (errno == EINTR || errno == EBADF); return -1; } break; } rc = zmq_poll_select_check_events_ (items_, nitems_, fds, nevents); if (rc < 0) { return rc; } if (zmq_poll_must_break_loop_ (timeout_, nevents, first_pass, clock, now, end)) { break; } } return nevents; #else errno = ENOTSUP; return -1; #endif // ZMQ_HAVE_PPOLL } // The poller functionality void *zmq_poller_new (void) { zmq::socket_poller_t *poller = new (std::nothrow) zmq::socket_poller_t; if (!poller) { errno = ENOMEM; } return poller; } int zmq_poller_destroy (void **poller_p_) { if (poller_p_) { const zmq::socket_poller_t *const poller = static_cast<const zmq::socket_poller_t *> (*poller_p_); if (poller && poller->check_tag ()) { delete poller; *poller_p_ = NULL; return 0; } } errno = EFAULT; return -1; } static int check_poller (void *const poller_) { if (!poller_ || !(static_cast<zmq::socket_poller_t *> (poller_))->check_tag ()) { errno = EFAULT; return -1; } return 0; } static int check_events (const short events_) { if (events_ & ~(ZMQ_POLLIN | ZMQ_POLLOUT | ZMQ_POLLERR | ZMQ_POLLPRI)) { errno = EINVAL; return -1; } return 0; } static int check_poller_registration_args (void *const poller_, void *const s_) { if (-1 == check_poller (poller_)) return -1; if (!s_ || !(static_cast<zmq::socket_base_t *> (s_))->check_tag ()) { errno = ENOTSOCK; return -1; } return 0; } static int check_poller_fd_registration_args (void *const poller_, const zmq::fd_t fd_) { if (-1 == check_poller (poller_)) return -1; if (fd_ == zmq::retired_fd) { errno = EBADF; return -1; } return 0; } int zmq_poller_size (void *poller_) { if (-1 == check_poller (poller_)) return -1; return (static_cast<zmq::socket_poller_t *> (poller_))->size (); } int zmq_poller_add (void *poller_, void *s_, void *user_data_, short events_) { if (-1 == check_poller_registration_args (poller_, s_) || -1 == check_events (events_)) return -1; zmq::socket_base_t *socket = static_cast<zmq::socket_base_t *> (s_); return (static_cast<zmq::socket_poller_t *> (poller_)) ->add (socket, user_data_, events_); } int zmq_poller_add_fd (void *poller_, zmq::fd_t fd_, void *user_data_, short events_) { if (-1 == check_poller_fd_registration_args (poller_, fd_) || -1 == check_events (events_)) return -1; return (static_cast<zmq::socket_poller_t *> (poller_)) ->add_fd (fd_, user_data_, events_); } int zmq_poller_modify (void *poller_, void *s_, short events_) { if (-1 == check_poller_registration_args (poller_, s_) || -1 == check_events (events_)) return -1; const zmq::socket_base_t *const socket = static_cast<const zmq::socket_base_t *> (s_); return (static_cast<zmq::socket_poller_t *> (poller_)) ->modify (socket, events_); } int zmq_poller_modify_fd (void *poller_, zmq::fd_t fd_, short events_) { if (-1 == check_poller_fd_registration_args (poller_, fd_) || -1 == check_events (events_)) return -1; return (static_cast<zmq::socket_poller_t *> (poller_)) ->modify_fd (fd_, events_); } int zmq_poller_remove (void *poller_, void *s_) { if (-1 == check_poller_registration_args (poller_, s_)) return -1; zmq::socket_base_t *socket = static_cast<zmq::socket_base_t *> (s_); return (static_cast<zmq::socket_poller_t *> (poller_))->remove (socket); } int zmq_poller_remove_fd (void *poller_, zmq::fd_t fd_) { if (-1 == check_poller_fd_registration_args (poller_, fd_)) return -1; return (static_cast<zmq::socket_poller_t *> (poller_))->remove_fd (fd_); } int zmq_poller_wait (void *poller_, zmq_poller_event_t *event_, long timeout_) { const int rc = zmq_poller_wait_all (poller_, event_, 1, timeout_); if (rc < 0 && event_) { event_->socket = NULL; event_->fd = zmq::retired_fd; event_->user_data = NULL; event_->events = 0; } // wait_all returns number of events, but we return 0 for any success return rc >= 0 ? 0 : rc; } int zmq_poller_wait_all (void *poller_, zmq_poller_event_t *events_, int n_events_, long timeout_) { if (-1 == check_poller (poller_)) return -1; if (!events_) { errno = EFAULT; return -1; } if (n_events_ < 0) { errno = EINVAL; return -1; } const int rc = (static_cast<zmq::socket_poller_t *> (poller_)) ->wait (reinterpret_cast<zmq::socket_poller_t::event_t *> (events_), n_events_, timeout_); return rc; } int zmq_poller_fd (void *poller_, zmq_fd_t *fd_) { if (!poller_ || !(static_cast<zmq::socket_poller_t *> (poller_)->check_tag ())) { errno = EFAULT; return -1; } return static_cast<zmq::socket_poller_t *> (poller_)->signaler_fd (fd_); } // Peer-specific state int zmq_socket_get_peer_state (void *s_, const void *routing_id_, size_t routing_id_size_) { const zmq::socket_base_t *const s = as_socket_base_t (s_); if (!s) return -1; return s->get_peer_state (routing_id_, routing_id_size_); } // Timers void *zmq_timers_new (void) { zmq::timers_t *timers = new (std::nothrow) zmq::timers_t; alloc_assert (timers); return timers; } int zmq_timers_destroy (void **timers_p_) { void *timers = *timers_p_; if (!timers || !(static_cast<zmq::timers_t *> (timers))->check_tag ()) { errno = EFAULT; return -1; } delete (static_cast<zmq::timers_t *> (timers)); *timers_p_ = NULL; return 0; } int zmq_timers_add (void *timers_, size_t interval_, zmq_timer_fn handler_, void *arg_) { if (!timers_ || !(static_cast<zmq::timers_t *> (timers_))->check_tag ()) { errno = EFAULT; return -1; } return (static_cast<zmq::timers_t *> (timers_)) ->add (interval_, handler_, arg_); } int zmq_timers_cancel (void *timers_, int timer_id_) { if (!timers_ || !(static_cast<zmq::timers_t *> (timers_))->check_tag ()) { errno = EFAULT; return -1; } return (static_cast<zmq::timers_t *> (timers_))->cancel (timer_id_); } int zmq_timers_set_interval (void *timers_, int timer_id_, size_t interval_) { if (!timers_ || !(static_cast<zmq::timers_t *> (timers_))->check_tag ()) { errno = EFAULT; return -1; } return (static_cast<zmq::timers_t *> (timers_)) ->set_interval (timer_id_, interval_); } int zmq_timers_reset (void *timers_, int timer_id_) { if (!timers_ || !(static_cast<zmq::timers_t *> (timers_))->check_tag ()) { errno = EFAULT; return -1; } return (static_cast<zmq::timers_t *> (timers_))->reset (timer_id_); } long zmq_timers_timeout (void *timers_) { if (!timers_ || !(static_cast<zmq::timers_t *> (timers_))->check_tag ()) { errno = EFAULT; return -1; } return (static_cast<zmq::timers_t *> (timers_))->timeout (); } int zmq_timers_execute (void *timers_) { if (!timers_ || !(static_cast<zmq::timers_t *> (timers_))->check_tag ()) { errno = EFAULT; return -1; } return (static_cast<zmq::timers_t *> (timers_))->execute (); } // The proxy functionality int zmq_proxy (void *frontend_, void *backend_, void *capture_) { if (!frontend_ || !backend_) { errno = EFAULT; return -1; } // Runs zmq::proxy_steerable with a NULL control_. return zmq::proxy (static_cast<zmq::socket_base_t *> (frontend_), static_cast<zmq::socket_base_t *> (backend_), static_cast<zmq::socket_base_t *> (capture_)); } int zmq_proxy_steerable (void *frontend_, void *backend_, void *capture_, void *control_) { if (!frontend_ || !backend_) { errno = EFAULT; return -1; } return zmq::proxy_steerable (static_cast<zmq::socket_base_t *> (frontend_), static_cast<zmq::socket_base_t *> (backend_), static_cast<zmq::socket_base_t *> (capture_), static_cast<zmq::socket_base_t *> (control_)); } // The deprecated device functionality int zmq_device (int /* type */, void *frontend_, void *backend_) { return zmq::proxy (static_cast<zmq::socket_base_t *> (frontend_), static_cast<zmq::socket_base_t *> (backend_), NULL); } // Probe library capabilities; for now, reports on transport and security int zmq_has (const char *capability_) { #if defined(ZMQ_HAVE_IPC) if (strcmp (capability_, zmq::protocol_name::ipc) == 0) return true; #endif #if defined(ZMQ_HAVE_OPENPGM) if (strcmp (capability_, zmq::protocol_name::pgm) == 0) return true; #endif #if defined(ZMQ_HAVE_TIPC) if (strcmp (capability_, zmq::protocol_name::tipc) == 0) return true; #endif #if defined(ZMQ_HAVE_NORM) if (strcmp (capability_, zmq::protocol_name::norm) == 0) return true; #endif #if defined(ZMQ_HAVE_CURVE) if (strcmp (capability_, "curve") == 0) return true; #endif #if defined(HAVE_LIBGSSAPI_KRB5) if (strcmp (capability_, "gssapi") == 0) return true; #endif #if defined(ZMQ_HAVE_VMCI) if (strcmp (capability_, zmq::protocol_name::vmci) == 0) return true; #endif #if defined(ZMQ_BUILD_DRAFT_API) if (strcmp (capability_, "draft") == 0) return true; #endif #if defined(ZMQ_HAVE_WS) if (strcmp (capability_, "WS") == 0) return true; #endif #if defined(ZMQ_HAVE_WSS) if (strcmp (capability_, "WSS") == 0) return true; #endif // Whatever the application asked for, we don't have return false; } int zmq_socket_monitor_pipes_stats (void *s_) { zmq::socket_base_t *s = as_socket_base_t (s_); if (!s) return -1; return s->query_pipes_stats (); }
sophomore_public/libzmq
src/zmq.cpp
C++
gpl-3.0
51,872
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_DRAFT_H_INCLUDED__ #define __ZMQ_DRAFT_H_INCLUDED__ /******************************************************************************/ /* These functions are DRAFT and disabled in stable releases, and subject to */ /* change at ANY time until declared stable. */ /******************************************************************************/ #ifndef ZMQ_BUILD_DRAFT_API /* DRAFT Socket types. */ #define ZMQ_SERVER 12 #define ZMQ_CLIENT 13 #define ZMQ_RADIO 14 #define ZMQ_DISH 15 #define ZMQ_GATHER 16 #define ZMQ_SCATTER 17 #define ZMQ_DGRAM 18 #define ZMQ_PEER 19 #define ZMQ_CHANNEL 20 /* DRAFT Socket options. */ #define ZMQ_ZAP_ENFORCE_DOMAIN 93 #define ZMQ_LOOPBACK_FASTPATH 94 #define ZMQ_METADATA 95 #define ZMQ_MULTICAST_LOOP 96 #define ZMQ_ROUTER_NOTIFY 97 #define ZMQ_XPUB_MANUAL_LAST_VALUE 98 #define ZMQ_SOCKS_USERNAME 99 #define ZMQ_SOCKS_PASSWORD 100 #define ZMQ_IN_BATCH_SIZE 101 #define ZMQ_OUT_BATCH_SIZE 102 #define ZMQ_WSS_KEY_PEM 103 #define ZMQ_WSS_CERT_PEM 104 #define ZMQ_WSS_TRUST_PEM 105 #define ZMQ_WSS_HOSTNAME 106 #define ZMQ_WSS_TRUST_SYSTEM 107 #define ZMQ_ONLY_FIRST_SUBSCRIBE 108 #define ZMQ_RECONNECT_STOP 109 #define ZMQ_HELLO_MSG 110 #define ZMQ_DISCONNECT_MSG 111 #define ZMQ_PRIORITY 112 #define ZMQ_BUSY_POLL 113 #define ZMQ_HICCUP_MSG 114 #define ZMQ_XSUB_VERBOSE_UNSUBSCRIBE 115 #define ZMQ_TOPICS_COUNT 116 #define ZMQ_NORM_MODE 117 #define ZMQ_NORM_UNICAST_NACK 118 #define ZMQ_NORM_BUFFER_SIZE 119 #define ZMQ_NORM_SEGMENT_SIZE 120 #define ZMQ_NORM_BLOCK_SIZE 121 #define ZMQ_NORM_NUM_PARITY 122 #define ZMQ_NORM_NUM_AUTOPARITY 123 #define ZMQ_NORM_PUSH 124 /* DRAFT ZMQ_NORM_MODE options */ #define ZMQ_NORM_FIXED 0 #define ZMQ_NORM_CC 1 #define ZMQ_NORM_CCL 2 #define ZMQ_NORM_CCE 3 #define ZMQ_NORM_CCE_ECNONLY 4 /* DRAFT ZMQ_RECONNECT_STOP options */ #define ZMQ_RECONNECT_STOP_CONN_REFUSED 0x1 #define ZMQ_RECONNECT_STOP_HANDSHAKE_FAILED 0x2 #define ZMQ_RECONNECT_STOP_AFTER_DISCONNECT 0x4 /* DRAFT Context options */ #define ZMQ_ZERO_COPY_RECV 10 /* DRAFT Context methods. */ int zmq_ctx_set_ext (void *context_, int option_, const void *optval_, size_t optvallen_); int zmq_ctx_get_ext (void *context_, int option_, void *optval_, size_t *optvallen_); /* DRAFT Socket methods. */ int zmq_join (void *s_, const char *group_); int zmq_leave (void *s_, const char *group_); /* DRAFT Msg methods. */ int zmq_msg_set_routing_id (zmq_msg_t *msg_, uint32_t routing_id_); uint32_t zmq_msg_routing_id (zmq_msg_t *msg_); int zmq_msg_set_group (zmq_msg_t *msg_, const char *group_); const char *zmq_msg_group (zmq_msg_t *msg_); int zmq_msg_init_buffer (zmq_msg_t *msg_, const void *buf_, size_t size_); /* DRAFT Msg property names. */ #define ZMQ_MSG_PROPERTY_ROUTING_ID "Routing-Id" #define ZMQ_MSG_PROPERTY_SOCKET_TYPE "Socket-Type" #define ZMQ_MSG_PROPERTY_USER_ID "User-Id" #define ZMQ_MSG_PROPERTY_PEER_ADDRESS "Peer-Address" /* Router notify options */ #define ZMQ_NOTIFY_CONNECT 1 #define ZMQ_NOTIFY_DISCONNECT 2 /******************************************************************************/ /* Poller polling on sockets,fd and thread-safe sockets */ /******************************************************************************/ #if defined _WIN32 typedef SOCKET zmq_fd_t; #else typedef int zmq_fd_t; #endif typedef struct zmq_poller_event_t { void *socket; zmq_fd_t fd; void *user_data; short events; } zmq_poller_event_t; void *zmq_poller_new (void); int zmq_poller_destroy (void **poller_p_); int zmq_poller_size (void *poller_); int zmq_poller_add (void *poller_, void *socket_, void *user_data_, short events_); int zmq_poller_modify (void *poller_, void *socket_, short events_); int zmq_poller_remove (void *poller_, void *socket_); int zmq_poller_wait (void *poller_, zmq_poller_event_t *event_, long timeout_); int zmq_poller_wait_all (void *poller_, zmq_poller_event_t *events_, int n_events_, long timeout_); zmq_fd_t zmq_poller_fd (void *poller_); int zmq_poller_add_fd (void *poller_, zmq_fd_t fd_, void *user_data_, short events_); int zmq_poller_modify_fd (void *poller_, zmq_fd_t fd_, short events_); int zmq_poller_remove_fd (void *poller_, zmq_fd_t fd_); int zmq_socket_get_peer_state (void *socket_, const void *routing_id_, size_t routing_id_size_); /* DRAFT Socket monitoring events */ #define ZMQ_EVENT_PIPES_STATS 0x10000 #define ZMQ_CURRENT_EVENT_VERSION 1 #define ZMQ_CURRENT_EVENT_VERSION_DRAFT 2 #define ZMQ_EVENT_ALL_V1 ZMQ_EVENT_ALL #define ZMQ_EVENT_ALL_V2 ZMQ_EVENT_ALL_V1 | ZMQ_EVENT_PIPES_STATS int zmq_socket_monitor_versioned ( void *s_, const char *addr_, uint64_t events_, int event_version_, int type_); int zmq_socket_monitor_pipes_stats (void *s_); #if !defined _WIN32 int zmq_ppoll (zmq_pollitem_t *items_, int nitems_, long timeout_, const sigset_t *sigmask_); #else // Windows has no sigset_t int zmq_ppoll (zmq_pollitem_t *items_, int nitems_, long timeout_, const void *sigmask_); #endif #endif // ZMQ_BUILD_DRAFT_API #endif //ifndef __ZMQ_DRAFT_H_INCLUDED__
sophomore_public/libzmq
src/zmq_draft.h
C++
gpl-3.0
6,149
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "macros.hpp" #include "clock.hpp" #include "err.hpp" #include "thread.hpp" #include "atomic_counter.hpp" #include "atomic_ptr.hpp" #include "random.hpp" #include <assert.h> #include <new> #if !defined ZMQ_HAVE_WINDOWS #include <unistd.h> #endif #if defined(ZMQ_USE_LIBSODIUM) #include "sodium.h" #endif void zmq_sleep (int seconds_) { #if defined ZMQ_HAVE_WINDOWS Sleep (seconds_ * 1000); #else sleep (seconds_); #endif } void *zmq_stopwatch_start () { uint64_t *watch = static_cast<uint64_t *> (malloc (sizeof (uint64_t))); alloc_assert (watch); *watch = zmq::clock_t::now_us (); return static_cast<void *> (watch); } unsigned long zmq_stopwatch_intermediate (void *watch_) { const uint64_t end = zmq::clock_t::now_us (); const uint64_t start = *static_cast<uint64_t *> (watch_); return static_cast<unsigned long> (end - start); } unsigned long zmq_stopwatch_stop (void *watch_) { const unsigned long res = zmq_stopwatch_intermediate (watch_); free (watch_); return res; } void *zmq_threadstart (zmq_thread_fn *func_, void *arg_) { zmq::thread_t *thread = new (std::nothrow) zmq::thread_t; alloc_assert (thread); thread->start (func_, arg_, "ZMQapp"); return thread; } void zmq_threadclose (void *thread_) { zmq::thread_t *p_thread = static_cast<zmq::thread_t *> (thread_); p_thread->stop (); LIBZMQ_DELETE (p_thread); } // Z85 codec, taken from 0MQ RFC project, implements RFC32 Z85 encoding // Maps base 256 to base 85 static char encoder[85 + 1] = {"0123456789" "abcdefghij" "klmnopqrst" "uvwxyzABCD" "EFGHIJKLMN" "OPQRSTUVWX" "YZ.-:+=^!/" "*?&<>()[]{" "}@%$#"}; // Maps base 85 to base 256 // We chop off lower 32 and higher 128 ranges // 0xFF denotes invalid characters within this range static uint8_t decoder[96] = { 0xFF, 0x44, 0xFF, 0x54, 0x53, 0x52, 0x48, 0xFF, 0x4B, 0x4C, 0x46, 0x41, 0xFF, 0x3F, 0x3E, 0x45, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x40, 0xFF, 0x49, 0x42, 0x4A, 0x47, 0x51, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x4D, 0xFF, 0x4E, 0x43, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x4F, 0xFF, 0x50, 0xFF, 0xFF}; // -------------------------------------------------------------------------- // Encode a binary frame as a string; destination string MUST be at least // size * 5 / 4 bytes long plus 1 byte for the null terminator. Returns // dest. Size must be a multiple of 4. // Returns NULL and sets errno = EINVAL for invalid input. char *zmq_z85_encode (char *dest_, const uint8_t *data_, size_t size_) { if (size_ % 4 != 0) { errno = EINVAL; return NULL; } unsigned int char_nbr = 0; unsigned int byte_nbr = 0; uint32_t value = 0; while (byte_nbr < size_) { // Accumulate value in base 256 (binary) value = value * 256 + data_[byte_nbr++]; if (byte_nbr % 4 == 0) { // Output value in base 85 unsigned int divisor = 85 * 85 * 85 * 85; while (divisor) { dest_[char_nbr++] = encoder[value / divisor % 85]; divisor /= 85; } value = 0; } } assert (char_nbr == size_ * 5 / 4); dest_[char_nbr] = 0; return dest_; } // -------------------------------------------------------------------------- // Decode an encoded string into a binary frame; dest must be at least // strlen (string) * 4 / 5 bytes long. Returns dest. strlen (string) // must be a multiple of 5. // Returns NULL and sets errno = EINVAL for invalid input. uint8_t *zmq_z85_decode (uint8_t *dest_, const char *string_) { unsigned int byte_nbr = 0; unsigned int char_nbr = 0; uint32_t value = 0; size_t src_len = strlen (string_); if (src_len < 5 || src_len % 5 != 0) goto error_inval; while (string_[char_nbr]) { // Accumulate value in base 85 if (UINT32_MAX / 85 < value) { // Invalid z85 encoding, represented value exceeds 0xffffffff goto error_inval; } value *= 85; const uint8_t index = string_[char_nbr++] - 32; if (index >= sizeof (decoder)) { // Invalid z85 encoding, character outside range goto error_inval; } const uint32_t summand = decoder[index]; if (summand == 0xFF || summand > (UINT32_MAX - value)) { // Invalid z85 encoding, invalid character or represented value exceeds 0xffffffff goto error_inval; } value += summand; if (char_nbr % 5 == 0) { // Output value in base 256 unsigned int divisor = 256 * 256 * 256; while (divisor) { dest_[byte_nbr++] = value / divisor % 256; divisor /= 256; } value = 0; } } if (char_nbr % 5 != 0) { goto error_inval; } assert (byte_nbr == strlen (string_) * 4 / 5); return dest_; error_inval: errno = EINVAL; return NULL; } // -------------------------------------------------------------------------- // Generate a public/private keypair with libsodium. // Generated keys will be 40 byte z85-encoded strings. // Returns 0 on success, -1 on failure, setting errno. // Sets errno = ENOTSUP in the absence of a CURVE library. int zmq_curve_keypair (char *z85_public_key_, char *z85_secret_key_) { #if defined(ZMQ_HAVE_CURVE) #if crypto_box_PUBLICKEYBYTES != 32 || crypto_box_SECRETKEYBYTES != 32 #error "CURVE encryption library not built correctly" #endif uint8_t public_key[32]; uint8_t secret_key[32]; zmq::random_open (); const int res = crypto_box_keypair (public_key, secret_key); zmq_z85_encode (z85_public_key_, public_key, 32); zmq_z85_encode (z85_secret_key_, secret_key, 32); zmq::random_close (); return res; #else (void) z85_public_key_, (void) z85_secret_key_; errno = ENOTSUP; return -1; #endif } // -------------------------------------------------------------------------- // Derive the public key from a private key using libsodium. // Derived key will be 40 byte z85-encoded string. // Returns 0 on success, -1 on failure, setting errno. // Sets errno = ENOTSUP in the absence of a CURVE library. int zmq_curve_public (char *z85_public_key_, const char *z85_secret_key_) { #if defined(ZMQ_HAVE_CURVE) #if crypto_box_PUBLICKEYBYTES != 32 || crypto_box_SECRETKEYBYTES != 32 #error "CURVE encryption library not built correctly" #endif uint8_t public_key[32]; uint8_t secret_key[32]; zmq::random_open (); if (zmq_z85_decode (secret_key, z85_secret_key_) == NULL) return -1; // Return codes are suppressed as none of these can actually fail. crypto_scalarmult_base (public_key, secret_key); zmq_z85_encode (z85_public_key_, public_key, 32); zmq::random_close (); return 0; #else (void) z85_public_key_, (void) z85_secret_key_; errno = ENOTSUP; return -1; #endif } // -------------------------------------------------------------------------- // Initialize a new atomic counter, which is set to zero void *zmq_atomic_counter_new (void) { zmq::atomic_counter_t *counter = new (std::nothrow) zmq::atomic_counter_t; alloc_assert (counter); return counter; } // Se the value of the atomic counter void zmq_atomic_counter_set (void *counter_, int value_) { (static_cast<zmq::atomic_counter_t *> (counter_))->set (value_); } // Increment the atomic counter, and return the old value int zmq_atomic_counter_inc (void *counter_) { return (static_cast<zmq::atomic_counter_t *> (counter_))->add (1); } // Decrement the atomic counter and return 1 (if counter >= 1), or // 0 if counter hit zero. int zmq_atomic_counter_dec (void *counter_) { return (static_cast<zmq::atomic_counter_t *> (counter_))->sub (1) ? 1 : 0; } // Return actual value of atomic counter int zmq_atomic_counter_value (void *counter_) { return (static_cast<zmq::atomic_counter_t *> (counter_))->get (); } // Destroy atomic counter, and set reference to NULL void zmq_atomic_counter_destroy (void **counter_p_) { delete (static_cast<zmq::atomic_counter_t *> (*counter_p_)); *counter_p_ = NULL; }
sophomore_public/libzmq
src/zmq_utils.cpp
C++
gpl-3.0
8,824
/* SPDX-License-Identifier: MPL-2.0 */ #include "precompiled.hpp" #include "macros.hpp" #include <limits.h> #include <string.h> #ifndef ZMQ_HAVE_WINDOWS #include <unistd.h> #endif #include <new> #include <sstream> #include "zmtp_engine.hpp" #include "io_thread.hpp" #include "session_base.hpp" #include "v1_encoder.hpp" #include "v1_decoder.hpp" #include "v2_encoder.hpp" #include "v2_decoder.hpp" #include "v3_1_encoder.hpp" #include "null_mechanism.hpp" #include "plain_client.hpp" #include "plain_server.hpp" #include "gssapi_client.hpp" #include "gssapi_server.hpp" #include "curve_client.hpp" #include "curve_server.hpp" #include "raw_decoder.hpp" #include "raw_encoder.hpp" #include "config.hpp" #include "err.hpp" #include "ip.hpp" #include "likely.hpp" #include "wire.hpp" zmq::zmtp_engine_t::zmtp_engine_t ( fd_t fd_, const options_t &options_, const endpoint_uri_pair_t &endpoint_uri_pair_) : stream_engine_base_t (fd_, options_, endpoint_uri_pair_, true), _greeting_size (v2_greeting_size), _greeting_bytes_read (0), _subscription_required (false), _heartbeat_timeout (0) { _next_msg = static_cast<int (stream_engine_base_t::*) (msg_t *)> ( &zmtp_engine_t::routing_id_msg); _process_msg = static_cast<int (stream_engine_base_t::*) (msg_t *)> ( &zmtp_engine_t::process_routing_id_msg); int rc = _pong_msg.init (); errno_assert (rc == 0); rc = _routing_id_msg.init (); errno_assert (rc == 0); if (_options.heartbeat_interval > 0) { _heartbeat_timeout = _options.heartbeat_timeout; if (_heartbeat_timeout == -1) _heartbeat_timeout = _options.heartbeat_interval; } } zmq::zmtp_engine_t::~zmtp_engine_t () { const int rc = _routing_id_msg.close (); errno_assert (rc == 0); } void zmq::zmtp_engine_t::plug_internal () { // start optional timer, to prevent handshake hanging on no input set_handshake_timer (); // Send the 'length' and 'flags' fields of the routing id message. // The 'length' field is encoded in the long format. _outpos = _greeting_send; _outpos[_outsize++] = UCHAR_MAX; put_uint64 (&_outpos[_outsize], _options.routing_id_size + 1); _outsize += 8; _outpos[_outsize++] = 0x7f; set_pollin (); set_pollout (); // Flush all the data that may have been already received downstream. in_event (); } // Position of the revision and minor fields in the greeting. const size_t revision_pos = 10; const size_t minor_pos = 11; bool zmq::zmtp_engine_t::handshake () { zmq_assert (_greeting_bytes_read < _greeting_size); // Receive the greeting. const int rc = receive_greeting (); if (rc == -1) return false; const bool unversioned = rc != 0; if (!(this ->*select_handshake_fun (unversioned, _greeting_recv[revision_pos], _greeting_recv[minor_pos])) ()) return false; // Start polling for output if necessary. if (_outsize == 0) set_pollout (); return true; } int zmq::zmtp_engine_t::receive_greeting () { bool unversioned = false; while (_greeting_bytes_read < _greeting_size) { const int n = read (_greeting_recv + _greeting_bytes_read, _greeting_size - _greeting_bytes_read); if (n == -1) { if (errno != EAGAIN) error (connection_error); return -1; } _greeting_bytes_read += n; // We have received at least one byte from the peer. // If the first byte is not 0xff, we know that the // peer is using unversioned protocol. if (_greeting_recv[0] != 0xff) { unversioned = true; break; } if (_greeting_bytes_read < signature_size) continue; // Inspect the right-most bit of the 10th byte (which coincides // with the 'flags' field if a regular message was sent). // Zero indicates this is a header of a routing id message // (i.e. the peer is using the unversioned protocol). if (!(_greeting_recv[9] & 0x01)) { unversioned = true; break; } // The peer is using versioned protocol. receive_greeting_versioned (); } return unversioned ? 1 : 0; } void zmq::zmtp_engine_t::receive_greeting_versioned () { // Send the major version number. if (_outpos + _outsize == _greeting_send + signature_size) { if (_outsize == 0) set_pollout (); _outpos[_outsize++] = 3; // Major version number } if (_greeting_bytes_read > signature_size) { if (_outpos + _outsize == _greeting_send + signature_size + 1) { if (_outsize == 0) set_pollout (); // Use ZMTP/2.0 to talk to older peers. if (_greeting_recv[revision_pos] == ZMTP_1_0 || _greeting_recv[revision_pos] == ZMTP_2_0) _outpos[_outsize++] = _options.type; else { _outpos[_outsize++] = 1; // Minor version number memset (_outpos + _outsize, 0, 20); zmq_assert (_options.mechanism == ZMQ_NULL || _options.mechanism == ZMQ_PLAIN || _options.mechanism == ZMQ_CURVE || _options.mechanism == ZMQ_GSSAPI); if (_options.mechanism == ZMQ_NULL) memcpy (_outpos + _outsize, "NULL", 4); else if (_options.mechanism == ZMQ_PLAIN) memcpy (_outpos + _outsize, "PLAIN", 5); else if (_options.mechanism == ZMQ_GSSAPI) memcpy (_outpos + _outsize, "GSSAPI", 6); else if (_options.mechanism == ZMQ_CURVE) memcpy (_outpos + _outsize, "CURVE", 5); _outsize += 20; memset (_outpos + _outsize, 0, 32); _outsize += 32; _greeting_size = v3_greeting_size; } } } } zmq::zmtp_engine_t::handshake_fun_t zmq::zmtp_engine_t::select_handshake_fun ( bool unversioned_, unsigned char revision_, unsigned char minor_) { // Is the peer using ZMTP/1.0 with no revision number? if (unversioned_) { return &zmtp_engine_t::handshake_v1_0_unversioned; } switch (revision_) { case ZMTP_1_0: return &zmtp_engine_t::handshake_v1_0; case ZMTP_2_0: return &zmtp_engine_t::handshake_v2_0; case ZMTP_3_x: switch (minor_) { case 0: return &zmtp_engine_t::handshake_v3_0; default: return &zmtp_engine_t::handshake_v3_1; } default: return &zmtp_engine_t::handshake_v3_1; } } bool zmq::zmtp_engine_t::handshake_v1_0_unversioned () { // We send and receive rest of routing id message if (session ()->zap_enabled ()) { // reject ZMTP 1.0 connections if ZAP is enabled error (protocol_error); return false; } _encoder = new (std::nothrow) v1_encoder_t (_options.out_batch_size); alloc_assert (_encoder); _decoder = new (std::nothrow) v1_decoder_t (_options.in_batch_size, _options.maxmsgsize); alloc_assert (_decoder); // We have already sent the message header. // Since there is no way to tell the encoder to // skip the message header, we simply throw that // header data away. const size_t header_size = _options.routing_id_size + 1 >= UCHAR_MAX ? 10 : 2; unsigned char tmp[10], *bufferp = tmp; // Prepare the routing id message and load it into encoder. // Then consume bytes we have already sent to the peer. int rc = _routing_id_msg.close (); zmq_assert (rc == 0); rc = _routing_id_msg.init_size (_options.routing_id_size); zmq_assert (rc == 0); memcpy (_routing_id_msg.data (), _options.routing_id, _options.routing_id_size); _encoder->load_msg (&_routing_id_msg); const size_t buffer_size = _encoder->encode (&bufferp, header_size); zmq_assert (buffer_size == header_size); // Make sure the decoder sees the data we have already received. _inpos = _greeting_recv; _insize = _greeting_bytes_read; // To allow for interoperability with peers that do not forward // their subscriptions, we inject a phantom subscription message // message into the incoming message stream. if (_options.type == ZMQ_PUB || _options.type == ZMQ_XPUB) _subscription_required = true; // We are sending our routing id now and the next message // will come from the socket. _next_msg = &zmtp_engine_t::pull_msg_from_session; // We are expecting routing id message. _process_msg = static_cast<int (stream_engine_base_t::*) (msg_t *)> ( &zmtp_engine_t::process_routing_id_msg); return true; } bool zmq::zmtp_engine_t::handshake_v1_0 () { if (session ()->zap_enabled ()) { // reject ZMTP 1.0 connections if ZAP is enabled error (protocol_error); return false; } _encoder = new (std::nothrow) v1_encoder_t (_options.out_batch_size); alloc_assert (_encoder); _decoder = new (std::nothrow) v1_decoder_t (_options.in_batch_size, _options.maxmsgsize); alloc_assert (_decoder); return true; } bool zmq::zmtp_engine_t::handshake_v2_0 () { if (session ()->zap_enabled ()) { // reject ZMTP 2.0 connections if ZAP is enabled error (protocol_error); return false; } _encoder = new (std::nothrow) v2_encoder_t (_options.out_batch_size); alloc_assert (_encoder); _decoder = new (std::nothrow) v2_decoder_t ( _options.in_batch_size, _options.maxmsgsize, _options.zero_copy); alloc_assert (_decoder); return true; } bool zmq::zmtp_engine_t::handshake_v3_x (const bool downgrade_sub_) { if (_options.mechanism == ZMQ_NULL && memcmp (_greeting_recv + 12, "NULL\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 20) == 0) { _mechanism = new (std::nothrow) null_mechanism_t (session (), _peer_address, _options); alloc_assert (_mechanism); } else if (_options.mechanism == ZMQ_PLAIN && memcmp (_greeting_recv + 12, "PLAIN\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 20) == 0) { if (_options.as_server) _mechanism = new (std::nothrow) plain_server_t (session (), _peer_address, _options); else _mechanism = new (std::nothrow) plain_client_t (session (), _options); alloc_assert (_mechanism); } #ifdef ZMQ_HAVE_CURVE else if (_options.mechanism == ZMQ_CURVE && memcmp (_greeting_recv + 12, "CURVE\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 20) == 0) { if (_options.as_server) _mechanism = new (std::nothrow) curve_server_t ( session (), _peer_address, _options, downgrade_sub_); else _mechanism = new (std::nothrow) curve_client_t (session (), _options, downgrade_sub_); alloc_assert (_mechanism); } #endif #ifdef HAVE_LIBGSSAPI_KRB5 else if (_options.mechanism == ZMQ_GSSAPI && memcmp (_greeting_recv + 12, "GSSAPI\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 20) == 0) { if (_options.as_server) _mechanism = new (std::nothrow) gssapi_server_t (session (), _peer_address, _options); else _mechanism = new (std::nothrow) gssapi_client_t (session (), _options); alloc_assert (_mechanism); } #endif else { socket ()->event_handshake_failed_protocol ( session ()->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH); error (protocol_error); return false; } #ifndef ZMQ_HAVE_CURVE LIBZMQ_UNUSED (downgrade_sub_); #endif _next_msg = &zmtp_engine_t::next_handshake_command; _process_msg = &zmtp_engine_t::process_handshake_command; return true; } bool zmq::zmtp_engine_t::handshake_v3_0 () { _encoder = new (std::nothrow) v2_encoder_t (_options.out_batch_size); alloc_assert (_encoder); _decoder = new (std::nothrow) v2_decoder_t ( _options.in_batch_size, _options.maxmsgsize, _options.zero_copy); alloc_assert (_decoder); return zmq::zmtp_engine_t::handshake_v3_x (true); } bool zmq::zmtp_engine_t::handshake_v3_1 () { _encoder = new (std::nothrow) v3_1_encoder_t (_options.out_batch_size); alloc_assert (_encoder); _decoder = new (std::nothrow) v2_decoder_t ( _options.in_batch_size, _options.maxmsgsize, _options.zero_copy); alloc_assert (_decoder); return zmq::zmtp_engine_t::handshake_v3_x (false); } int zmq::zmtp_engine_t::routing_id_msg (msg_t *msg_) { const int rc = msg_->init_size (_options.routing_id_size); errno_assert (rc == 0); if (_options.routing_id_size > 0) memcpy (msg_->data (), _options.routing_id, _options.routing_id_size); _next_msg = &zmtp_engine_t::pull_msg_from_session; return 0; } int zmq::zmtp_engine_t::process_routing_id_msg (msg_t *msg_) { if (_options.recv_routing_id) { msg_->set_flags (msg_t::routing_id); const int rc = session ()->push_msg (msg_); errno_assert (rc == 0); } else { int rc = msg_->close (); errno_assert (rc == 0); rc = msg_->init (); errno_assert (rc == 0); } if (_subscription_required) { msg_t subscription; // Inject the subscription message, so that also // ZMQ 2.x peers receive published messages. int rc = subscription.init_size (1); errno_assert (rc == 0); *static_cast<unsigned char *> (subscription.data ()) = 1; rc = session ()->push_msg (&subscription); errno_assert (rc == 0); } _process_msg = &zmtp_engine_t::push_msg_to_session; return 0; } int zmq::zmtp_engine_t::produce_ping_message (msg_t *msg_) { // 16-bit TTL + \4PING == 7 const size_t ping_ttl_len = msg_t::ping_cmd_name_size + 2; zmq_assert (_mechanism != NULL); int rc = msg_->init_size (ping_ttl_len); errno_assert (rc == 0); msg_->set_flags (msg_t::command); // Copy in the command message memcpy (msg_->data (), "\4PING", msg_t::ping_cmd_name_size); uint16_t ttl_val = htons (_options.heartbeat_ttl); memcpy (static_cast<uint8_t *> (msg_->data ()) + msg_t::ping_cmd_name_size, &ttl_val, sizeof (ttl_val)); rc = _mechanism->encode (msg_); _next_msg = &zmtp_engine_t::pull_and_encode; if (!_has_timeout_timer && _heartbeat_timeout > 0) { add_timer (_heartbeat_timeout, heartbeat_timeout_timer_id); _has_timeout_timer = true; } return rc; } int zmq::zmtp_engine_t::produce_pong_message (msg_t *msg_) { zmq_assert (_mechanism != NULL); int rc = msg_->move (_pong_msg); errno_assert (rc == 0); rc = _mechanism->encode (msg_); _next_msg = &zmtp_engine_t::pull_and_encode; return rc; } int zmq::zmtp_engine_t::process_heartbeat_message (msg_t *msg_) { if (msg_->is_ping ()) { // 16-bit TTL + \4PING == 7 const size_t ping_ttl_len = msg_t::ping_cmd_name_size + 2; const size_t ping_max_ctx_len = 16; uint16_t remote_heartbeat_ttl; // Get the remote heartbeat TTL to setup the timer memcpy (&remote_heartbeat_ttl, static_cast<uint8_t *> (msg_->data ()) + msg_t::ping_cmd_name_size, ping_ttl_len - msg_t::ping_cmd_name_size); remote_heartbeat_ttl = ntohs (remote_heartbeat_ttl); // The remote heartbeat is in 10ths of a second // so we multiply it by 100 to get the timer interval in ms. remote_heartbeat_ttl *= 100; if (!_has_ttl_timer && remote_heartbeat_ttl > 0) { add_timer (remote_heartbeat_ttl, heartbeat_ttl_timer_id); _has_ttl_timer = true; } // As per ZMTP 3.1 the PING command might contain an up to 16 bytes // context which needs to be PONGed back, so build the pong message // here and store it. Truncate it if it's too long. // Given the engine goes straight to out_event, sequential PINGs will // not be a problem. const size_t context_len = std::min (msg_->size () - ping_ttl_len, ping_max_ctx_len); const int rc = _pong_msg.init_size (msg_t::ping_cmd_name_size + context_len); errno_assert (rc == 0); _pong_msg.set_flags (msg_t::command); memcpy (_pong_msg.data (), "\4PONG", msg_t::ping_cmd_name_size); if (context_len > 0) memcpy (static_cast<uint8_t *> (_pong_msg.data ()) + msg_t::ping_cmd_name_size, static_cast<uint8_t *> (msg_->data ()) + ping_ttl_len, context_len); _next_msg = static_cast<int (stream_engine_base_t::*) (msg_t *)> ( &zmtp_engine_t::produce_pong_message); out_event (); } return 0; } int zmq::zmtp_engine_t::process_command_message (msg_t *msg_) { const uint8_t cmd_name_size = *(static_cast<const uint8_t *> (msg_->data ())); const size_t ping_name_size = msg_t::ping_cmd_name_size - 1; const size_t sub_name_size = msg_t::sub_cmd_name_size - 1; const size_t cancel_name_size = msg_t::cancel_cmd_name_size - 1; // Malformed command if (unlikely (msg_->size () < cmd_name_size + sizeof (cmd_name_size))) return -1; const uint8_t *const cmd_name = static_cast<const uint8_t *> (msg_->data ()) + 1; if (cmd_name_size == ping_name_size && memcmp (cmd_name, "PING", cmd_name_size) == 0) msg_->set_flags (zmq::msg_t::ping); if (cmd_name_size == ping_name_size && memcmp (cmd_name, "PONG", cmd_name_size) == 0) msg_->set_flags (zmq::msg_t::pong); if (cmd_name_size == sub_name_size && memcmp (cmd_name, "SUBSCRIBE", cmd_name_size) == 0) msg_->set_flags (zmq::msg_t::subscribe); if (cmd_name_size == cancel_name_size && memcmp (cmd_name, "CANCEL", cmd_name_size) == 0) msg_->set_flags (zmq::msg_t::cancel); if (msg_->is_ping () || msg_->is_pong ()) return process_heartbeat_message (msg_); return 0; }
sophomore_public/libzmq
src/zmtp_engine.cpp
C++
gpl-3.0
18,596
/* SPDX-License-Identifier: MPL-2.0 */ #ifndef __ZMQ_ZMTP_ENGINE_HPP_INCLUDED__ #define __ZMQ_ZMTP_ENGINE_HPP_INCLUDED__ #include <stddef.h> #include "fd.hpp" #include "i_engine.hpp" #include "io_object.hpp" #include "i_encoder.hpp" #include "i_decoder.hpp" #include "options.hpp" #include "socket_base.hpp" #include "metadata.hpp" #include "msg.hpp" #include "stream_engine_base.hpp" namespace zmq { // Protocol revisions enum { ZMTP_1_0 = 0, ZMTP_2_0 = 1, ZMTP_3_x = 3 }; class io_thread_t; class session_base_t; class mechanism_t; // This engine handles any socket with SOCK_STREAM semantics, // e.g. TCP socket or an UNIX domain socket. class zmtp_engine_t ZMQ_FINAL : public stream_engine_base_t { public: zmtp_engine_t (fd_t fd_, const options_t &options_, const endpoint_uri_pair_t &endpoint_uri_pair_); ~zmtp_engine_t (); protected: // Detects the protocol used by the peer. bool handshake (); void plug_internal (); int process_command_message (msg_t *msg_); int produce_ping_message (msg_t *msg_); int process_heartbeat_message (msg_t *msg_); int produce_pong_message (msg_t *msg_); private: // Receive the greeting from the peer. int receive_greeting (); void receive_greeting_versioned (); typedef bool (zmtp_engine_t::*handshake_fun_t) (); static handshake_fun_t select_handshake_fun (bool unversioned, unsigned char revision, unsigned char minor); bool handshake_v1_0_unversioned (); bool handshake_v1_0 (); bool handshake_v2_0 (); bool handshake_v3_x (bool downgrade_sub); bool handshake_v3_0 (); bool handshake_v3_1 (); int routing_id_msg (msg_t *msg_); int process_routing_id_msg (msg_t *msg_); msg_t _routing_id_msg; // Need to store PING payload for PONG msg_t _pong_msg; static const size_t signature_size = 10; // Size of ZMTP/1.0 and ZMTP/2.0 greeting message static const size_t v2_greeting_size = 12; // Size of ZMTP/3.0 greeting message static const size_t v3_greeting_size = 64; // Expected greeting size. size_t _greeting_size; // Greeting received from, and sent to peer unsigned char _greeting_recv[v3_greeting_size]; unsigned char _greeting_send[v3_greeting_size]; // Size of greeting received so far unsigned int _greeting_bytes_read; // Indicates whether the engine is to inject a phantom // subscription message into the incoming stream. // Needed to support old peers. bool _subscription_required; int _heartbeat_timeout; ZMQ_NON_COPYABLE_NOR_MOVABLE (zmtp_engine_t) }; } #endif
sophomore_public/libzmq
src/zmtp_engine.hpp
C++
gpl-3.0
2,779
# CMake build script for ZeroMQ tests cmake_minimum_required(VERSION 2.8.1...3.31) # On Windows: solution file will be called tests.sln project(tests) set(tests test_ancillaries test_system test_pair_inproc test_pair_tcp test_reqrep_inproc test_reqrep_tcp test_hwm test_hwm_pubsub test_reqrep_device test_sub_forward test_invalid_rep test_msg_flags test_msg_ffn test_connect_resolve test_immediate test_last_endpoint test_term_endpoint test_router_mandatory test_probe_router test_stream test_stream_empty test_stream_disconnect test_disconnect_inproc test_unbind_wildcard test_ctx_options test_ctx_destroy test_security_no_zap_handler test_security_null test_security_plain test_security_zap test_iov test_spec_req test_spec_rep test_spec_dealer test_spec_router test_spec_pushpull test_req_correlate test_req_relaxed test_conflate test_inproc_connect test_issue_566 test_shutdown_stress test_timeo test_many_sockets test_diffserv test_connect_rid test_xpub_nodrop test_pub_invert_matching test_setsockopt test_sockopt_hwm test_heartbeats test_atomics test_bind_src_address test_capabilities test_metadata test_router_handover test_srcfd test_stream_timeout test_xpub_manual test_xpub_topic test_xpub_welcome_msg test_xpub_verbose test_base85 test_bind_after_connect_tcp test_sodium test_monitor test_socket_null test_reconnect_ivl test_reconnect_options test_tcp_accept_filter test_mock_pub_sub) if(NOT WIN32) list(APPEND tests test_security_gssapi test_socks test_connect_null_fuzzer test_bind_null_fuzzer test_connect_fuzzer test_bind_fuzzer) endif() if(ZMQ_HAVE_CURVE) # TODO: always fails running under Github Actions via CMake if(NOT CMAKE_SYSTEM_NAME MATCHES "Linux") list(APPEND tests test_security_curve) endif() if(NOT WIN32) list(APPEND tests test_connect_curve_fuzzer test_bind_curve_fuzzer test_z85_decode_fuzzer) endif() endif() option(ENABLE_CAPSH "Run tests that require sudo and capsh (for cap_net_admin)" OFF) if(ENABLE_CAPSH) find_program(CAPSH_PROGRAM NAMES capsh) if(CAPSH_PROGRAM) list(APPEND tests test_pair_tcp_cap_net_admin) else() message(STATUS "capsh not found, skipping tests that require CAP_NET_ADMIN") endif() endif() if(ZMQ_HAVE_IPC) list(APPEND tests test_ipc_wildcard test_pair_ipc test_reqrep_ipc test_rebind_ipc) endif() if(NOT WIN32) list( APPEND tests test_proxy test_proxy_hwm test_proxy_single_socket test_proxy_steerable test_proxy_terminate test_getsockopt_memset test_filter_ipc test_stream_exceeds_buffer test_router_mandatory_hwm test_use_fd test_zmq_poll_fd) if(HAVE_FORK) list(APPEND tests test_fork) endif() if(CMAKE_SYSTEM_NAME MATCHES "Linux") list(APPEND tests test_abstract_ipc) if(ZMQ_HAVE_TIPC) list( APPEND tests test_address_tipc test_pair_tipc test_reqrep_device_tipc test_reqrep_tipc test_router_mandatory_tipc test_sub_forward_tipc test_connect_delay_tipc test_shutdown_stress_tipc test_term_endpoint_tipc) endif() endif() endif() if(WITH_VMCI) list(APPEND tests test_pair_vmci test_reqrep_vmci) endif() if(ENABLE_DRAFTS) list( APPEND tests test_poller test_thread_safe test_client_server test_timers test_radio_dish test_scatter_gather test_dgram test_app_meta test_router_notify test_xpub_manual_last_value test_peer test_msg_init test_channel test_hello_msg test_disconnect_msg test_hiccup_msg test_zmq_ppoll_fd test_xsub_verbose test_pubsub_topics_count ) if(HAVE_FORK) list(APPEND tests test_zmq_ppoll_signals) endif() if(ZMQ_HAVE_BUSY_POLL) list(APPEND tests test_busy_poll) endif() endif() if(ZMQ_HAVE_WS) list(APPEND tests test_ws_transport) if(ZMQ_HAVE_WSS) list(APPEND tests test_wss_transport) endif() endif() # add location of platform.hpp for Windows builds if(WIN32) add_definitions(-DZMQ_CUSTOM_PLATFORM_HPP) add_definitions(-D_WINSOCK_DEPRECATED_NO_WARNINGS) # Same name on 64bit systems link_libraries(ws2_32.lib) endif() add_library( unity STATIC "${CMAKE_CURRENT_LIST_DIR}/../external/unity/unity.c" "${CMAKE_CURRENT_LIST_DIR}/../external/unity/unity.h" "${CMAKE_CURRENT_LIST_DIR}/../external/unity/unity_internals.h") set_target_properties(unity PROPERTIES PUBLIC_HEADER "${CMAKE_CURRENT_LIST_DIR}/../external/unity/unity.h") target_compile_definitions(unity PUBLIC "UNITY_USE_COMMAND_LINE_ARGS" "UNITY_EXCLUDE_FLOAT") target_include_directories(unity PUBLIC "${CMAKE_CURRENT_LIST_DIR}/../external/unity") set(TESTUTIL_SOURCES testutil.cpp testutil.hpp testutil_monitoring.cpp testutil_monitoring.hpp testutil_security.cpp testutil_security.hpp testutil_unity.cpp testutil_unity.hpp) if(BUILD_STATIC) add_library(testutil-static STATIC ${TESTUTIL_SOURCES}) target_link_libraries(testutil-static libzmq-static ${OPTIONAL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} unity) endif() if(BUILD_SHARED) add_library(testutil STATIC ${TESTUTIL_SOURCES}) target_link_libraries(testutil libzmq ${OPTIONAL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} unity) endif() if(BUILD_STATIC AND NOT BUILD_SHARED) # use testutil-static for both tests and unit tests set(TESTUTIL_LIB testutil-static) else() # use testutil for tests and testutil-static for unit tests set(TESTUTIL_LIB testutil) endif() if(MSVC_VERSION LESS 1700) set_source_files_properties("${CMAKE_CURRENT_LIST_DIR}/../external/unity/unity.c" PROPERTIES LANGUAGE CXX) endif() if(MSVC_VERSION LESS 1600) target_compile_definitions(unity PUBLIC "UNITY_EXCLUDE_STDINT_H") endif() # add include dirs for all targets include_directories("${ZeroMQ_SOURCE_DIR}/../include" "${ZeroMQ_BINARY_DIR}") if(WIN32) add_definitions(-D_CRT_NONSTDC_NO_DEPRECATE) endif() # Does not work, times out every time if(WIN32) list(REMOVE_ITEM tests test_many_sockets) endif() foreach(test ${tests}) # target_sources not supported before CMake 3.1 if(ZMQ_HAVE_CURVE AND ${test} MATCHES test_security_curve) add_executable(${test} ${test}.cpp "../src/err.cpp" "../src/random.cpp" "../src/clock.cpp") else() add_executable(${test} ${test}.cpp) endif() target_link_libraries(${test} ${TESTUTIL_LIB}) if(WIN32) # This is the output for Debug dynamic builds on Visual Studio 6.0 You should provide the correct directory, don't # know how to do it automatically find_path(LIBZMQ_PATH "libzmq.lib" PATHS "../bin/Win32/Debug/v120/dynamic") if(NOT ${LIBZMQ_PATH} STREQUAL "LIBZMQ_PATH-NOTFOUND") set_target_properties(${test} PROPERTIES LINK_FLAGS "/LIBPATH:${LIBZMQ_PATH}") endif() else() # per-test directories not generated on OS X / Darwin if(NOT APPLE) link_directories(${test} PRIVATE "${ZeroMQ_SOURCE_DIR}/../lib") endif() endif() if(RT_LIBRARY) target_link_libraries(${test} ${RT_LIBRARY}) endif() if (WITH_GSSAPI_KRB5) target_link_libraries(${test} ${GSSAPI_KRB5_LIBRARIES}) endif() if(CMAKE_SYSTEM_NAME MATCHES "QNX") target_link_libraries(${test} socket) target_link_libraries(${test} m) endif() if(WIN32) add_test( NAME ${test} WORKING_DIRECTORY ${LIBRARY_OUTPUT_PATH} COMMAND ${test}) else() if(${test} MATCHES "_cap_net_admin") add_test(NAME ${test} COMMAND sh -c "sudo ${CAPSH_PROGRAM} --caps=cap_net_admin+eip -- -c $<TARGET_FILE:${test}>") else() add_test(NAME ${test} COMMAND ${test}) endif() endif() set_tests_properties(${test} PROPERTIES TIMEOUT 10) set_tests_properties(${test} PROPERTIES SKIP_RETURN_CODE 77) if(QNX) install(TARGETS ${test} RUNTIME DESTINATION bin/) endif() endforeach() # override timeout for these tests set_tests_properties(test_heartbeats PROPERTIES TIMEOUT 60) if(WIN32 AND ENABLE_DRAFTS) set_tests_properties(test_radio_dish PROPERTIES TIMEOUT 30) endif() if(NOT CMAKE_SYSTEM_NAME MATCHES "Linux") if(ZMQ_HAVE_CURVE) set_tests_properties(test_security_curve PROPERTIES TIMEOUT 60) endif() endif() set_tests_properties(test_security_zap PROPERTIES TIMEOUT 60) set_tests_properties(test_reconnect_ivl PROPERTIES TIMEOUT 15) # Check whether all tests in the current folder are present file(READ "${CMAKE_CURRENT_LIST_FILE}" CURRENT_LIST_FILE_CONTENT) file(GLOB ALL_TEST_SOURCES "test_*.cpp") foreach(TEST_SOURCE ${ALL_TEST_SOURCES}) get_filename_component(TESTNAME "${TEST_SOURCE}" NAME_WE) string(REGEX MATCH "${TESTNAME}" MATCH_TESTNAME "${CURRENT_LIST_FILE_CONTENT}") if(NOT MATCH_TESTNAME) message(AUTHOR_WARNING "Test '${TESTNAME}' is not known to CTest.") endif() endforeach()
sophomore_public/libzmq
tests/CMakeLists.txt
Text
gpl-3.0
8,855
# Guidelines for tests Write your test case as if you were writing clean application code. It should be safe to compile on all platforms. Normally, you should only include the header files from the tests directory, e.g. `testutil.hpp`. Do not include files from src. Do not use the internal libzmq API. Tests for these should be placed in unittests instead. If you must write non-portable code, wrap it in #ifdefs to ensure it will compile and run on all systems. Note that testutil.hpp includes platform.h. Do not include it yourself as it changes location depending on the build system and OS. All sources must contain the correct copyright header. Please copy from test_system.cpp if you're not certain. Write new tests using the unity test framework. For an example, see test_sockopt_hwm. Please use only ANSI C99 in test cases, no C++. This is to make the code more reusable. On many slower environments, like embedded systems, VMs or CI systems, tests might fail because it takes time for sockets to settle after a connect. If you need to add a sleep, please be consistent with all the other tests and use: msleep (SETTLE_TIME); # Ensure proper cleanup If a test program uses unity, it will execute test cases individually, and will continue to run further test cases if an assertion in one test case fails. However, the test case that had an assertion failure will be aborted. To ensure that the resources of the test case are properly cleaned up, use appropriate setUp and tearDown functions. These are run by unity before each test case starts resp. after it ended (whether successfully or not). The same setUp and tearDown function is used for all test cases in a test program. For many test cases, the following setUp and tearDown functions will be appropriate: void setUp () { setup_test_context (); } void tearDown () { teardown_test_context (); } Within the tests, do not use zmq_socket and zmq_close then but test_context_socket and test_context_socket_close instead. These functions will register/unregister sockets with the test_context. All sockets not closed when tearDown is executed, with forcibly be closed with linger=0 before terminating the context. Note that it is a misuse not to close sockets during successful test execution, and a warning will be output. # Building tests in Windows The tests are only built via cmake, not when using the checked-in Visual Studio .sln files.
sophomore_public/libzmq
tests/README.md
Markdown
gpl-3.0
2,442
/* SPDX-License-Identifier: MPL-2.0 */ #include "testutil.hpp" #include "testutil_unity.hpp" #include <string.h> SETUP_TEARDOWN_TESTCONTEXT static const char test_endpoint[] = "ipc://@tmp-tester"; static const char test_endpoint_empty[] = "ipc://@"; void test_roundtrip () { void *sb = test_context_socket (ZMQ_DEALER); TEST_ASSERT_SUCCESS_ERRNO (zmq_bind (sb, test_endpoint)); char endpoint[MAX_SOCKET_STRING]; size_t size = sizeof (endpoint); TEST_ASSERT_SUCCESS_ERRNO ( zmq_getsockopt (sb, ZMQ_LAST_ENDPOINT, endpoint, &size)); TEST_ASSERT_EQUAL_INT (0, strncmp (endpoint, test_endpoint, size)); void *sc = test_context_socket (ZMQ_DEALER); TEST_ASSERT_SUCCESS_ERRNO (zmq_connect (sc, test_endpoint)); bounce (sb, sc); test_context_socket_close (sc); test_context_socket_close (sb); } void test_empty_abstract_name () { void *sb = test_context_socket (ZMQ_DEALER); TEST_ASSERT_FAILURE_ERRNO (EINVAL, zmq_bind (sb, test_endpoint_empty)); test_context_socket_close (sb); } int main (void) { setup_test_environment (); UNITY_BEGIN (); RUN_TEST (test_roundtrip); RUN_TEST (test_empty_abstract_name); return UNITY_END (); }
sophomore_public/libzmq
tests/test_abstract_ipc.cpp
C++
gpl-3.0
1,217
/* SPDX-License-Identifier: MPL-2.0 */ #include "testutil.hpp" #include "testutil_unity.hpp" SETUP_TEARDOWN_TESTCONTEXT void test_tipc_port_name_and_domain () { // test Port Name addressing void *sb = test_context_socket (ZMQ_REP); TEST_ASSERT_SUCCESS_ERRNO (zmq_bind (sb, "tipc://{5560,0,0}")); void *sc = test_context_socket (ZMQ_REQ); TEST_ASSERT_SUCCESS_ERRNO (zmq_connect (sc, "tipc://{5560,0}@0.0.0")); bounce (sb, sc); test_context_socket_close (sc); test_context_socket_close (sb); } void test_tipc_port_identity () { char endpoint[256]; unsigned int z, c, n, ref; void *sb = test_context_socket (ZMQ_REP); void *sc = test_context_socket (ZMQ_REQ); // Test binding to random Port Identity and // test resolving assigned address, should return a properly formatted string bind_loopback_tipc (sb, endpoint, sizeof endpoint); int rc = sscanf (&endpoint[0], "tipc://<%u.%u.%u:%u>", &z, &c, &n, &ref); TEST_ASSERT_EQUAL_INT (4, rc); TEST_ASSERT_NOT_EQUAL_MESSAGE ( 0, ref, "tipc port number must not be 0 after random assignment"); TEST_ASSERT_SUCCESS_ERRNO (zmq_connect (sc, endpoint)); bounce (sb, sc); test_context_socket_close (sc); test_context_socket_close (sb); } void test_tipc_bad_addresses () { // Test Port Name addressing void *sb = test_context_socket (ZMQ_REP); // Test binding to a fixed address, should fail TEST_ASSERT_FAILURE_ERRNO (EINVAL, zmq_bind (sb, "tipc://<1.2.3:123123>")); // Test connecting to random identity, should fail TEST_ASSERT_FAILURE_ERRNO (EINVAL, zmq_connect (sb, "tipc://<*>")); // Clean up test_context_socket_close (sb); } int main () { setup_test_environment (); if (!is_tipc_available ()) { printf ("TIPC environment unavailable, skipping test\n"); return 77; } UNITY_BEGIN (); RUN_TEST (test_tipc_port_name_and_domain); RUN_TEST (test_tipc_port_identity); RUN_TEST (test_tipc_bad_addresses); return UNITY_END (); }
sophomore_public/libzmq
tests/test_address_tipc.cpp
C++
gpl-3.0
2,060
/* SPDX-License-Identifier: MPL-2.0 */ /* * File for adding tests for ancillary API methods and other miscellaenous * API internals. Please ensure that when adding such tests into this file, * that they are short-lived so they do not trigger timeouts in the * CI build environments. */ #include "testutil.hpp" #include "testutil_unity.hpp" void setUp () { } void tearDown () { } void test_version () { int major, minor, patch; zmq_version (&major, &minor, &patch); TEST_ASSERT_EQUAL_INT (ZMQ_VERSION_MAJOR, major); TEST_ASSERT_EQUAL_INT (ZMQ_VERSION_MINOR, minor); TEST_ASSERT_EQUAL_INT (ZMQ_VERSION_PATCH, patch); } void test_strerrror () { TEST_ASSERT_NOT_NULL (zmq_strerror (EINVAL)); } int main () { setup_test_environment (); UNITY_BEGIN (); RUN_TEST (test_version); RUN_TEST (test_strerrror); return UNITY_END (); }
sophomore_public/libzmq
tests/test_ancillaries.cpp
C++
gpl-3.0
880
/* SPDX-License-Identifier: MPL-2.0 */ #include "testutil.hpp" #include "testutil_unity.hpp" #include <unity.h> #include <string.h> void setUp () { } void tearDown () { } void test_app_meta_reqrep () { void *ctx; zmq_msg_t msg; void *rep_sock, *req_sock; char connect_address[MAX_SOCKET_STRING]; const char *req_hello = "X-hello:hello"; const char *req_connection = "X-connection:primary"; const char *req_z85 = "X-bin:009c6"; const char *rep_hello = "X-hello:world"; const char *rep_connection = "X-connection:backup"; const char *bad_strings[] = { ":", "key:", ":value", "keyvalue", "", "X-" "KeyTooLongKeyTooLongKeyTooLongKeyTooLongKeyTooLongKeyTooLongKeyTooLongKe" "yTooLongKeyTooLongKeyTooLongKeyTooLongKeyTooLongKeyTooLongKeyTooLongKeyT" "ooLongKeyTooLongKeyTooLongKeyTooLongKeyTooLongKeyTooLongKeyTooLongKeyToo" "LongKeyTooLongKeyTooLongKeyTooLongKeyTooLong:value"}; ctx = zmq_ctx_new (); rep_sock = zmq_socket (ctx, ZMQ_REP); TEST_ASSERT_NOT_NULL (rep_sock); req_sock = zmq_socket (ctx, ZMQ_REQ); TEST_ASSERT_NOT_NULL (req_sock); int rc = zmq_setsockopt (rep_sock, ZMQ_METADATA, rep_hello, strlen (rep_hello)); TEST_ASSERT_EQUAL_INT (0, rc); int l = 0; rc = zmq_setsockopt (rep_sock, ZMQ_LINGER, &l, sizeof (l)); TEST_ASSERT_EQUAL_INT (0, rc); rc = zmq_setsockopt (rep_sock, ZMQ_METADATA, rep_connection, strlen (rep_connection)); TEST_ASSERT_EQUAL_INT (0, rc); for (int i = 0; i < 6; i++) { rc = zmq_setsockopt (rep_sock, ZMQ_METADATA, bad_strings[i], strlen (bad_strings[i])); TEST_ASSERT_EQUAL_INT (-1, rc); } bind_loopback_ipv4 (rep_sock, connect_address, sizeof connect_address); l = 0; rc = zmq_setsockopt (req_sock, ZMQ_LINGER, &l, sizeof (l)); TEST_ASSERT_EQUAL_INT (0, rc); rc = zmq_setsockopt (req_sock, ZMQ_METADATA, req_hello, strlen (req_hello)); TEST_ASSERT_EQUAL_INT (0, rc); rc = zmq_setsockopt (req_sock, ZMQ_METADATA, req_connection, strlen (req_connection)); TEST_ASSERT_EQUAL_INT (0, rc); rc = zmq_setsockopt (req_sock, ZMQ_METADATA, req_z85, strlen (req_z85)); TEST_ASSERT_EQUAL_INT (0, rc); rc = zmq_connect (req_sock, connect_address); TEST_ASSERT_EQUAL_INT (0, rc); rc = zmq_msg_init_size (&msg, 1); TEST_ASSERT_EQUAL_INT (0, rc); char *data = static_cast<char *> (zmq_msg_data (&msg)); data[0] = 1; rc = zmq_msg_send (&msg, req_sock, 0); TEST_ASSERT_EQUAL_INT (1, rc); rc = zmq_msg_init (&msg); TEST_ASSERT_EQUAL_INT (0, rc); rc = zmq_msg_recv (&msg, rep_sock, 0); TEST_ASSERT_EQUAL_INT (1, rc); TEST_ASSERT_EQUAL_STRING ("hello", zmq_msg_gets (&msg, "X-hello")); TEST_ASSERT_EQUAL_STRING ("primary", zmq_msg_gets (&msg, "X-connection")); const char *const bindata = zmq_msg_gets (&msg, "X-bin"); TEST_ASSERT_NOT_NULL (bindata); uint8_t rawdata[4]; const uint8_t *const ret = zmq_z85_decode (rawdata, bindata); TEST_ASSERT_NOT_NULL (ret); TEST_ASSERT_EQUAL_UINT8 (0, rawdata[0]); TEST_ASSERT_EQUAL_UINT8 (1, rawdata[1]); TEST_ASSERT_EQUAL_UINT8 (2, rawdata[2]); TEST_ASSERT_EQUAL_UINT8 (3, rawdata[3]); TEST_ASSERT_NULL (zmq_msg_gets (&msg, "X-foobar")); TEST_ASSERT_NULL (zmq_msg_gets (&msg, "foobar")); rc = zmq_msg_send (&msg, rep_sock, 0); TEST_ASSERT_EQUAL_INT (1, rc); rc = zmq_msg_recv (&msg, req_sock, 0); TEST_ASSERT_EQUAL_INT (1, rc); TEST_ASSERT_EQUAL_STRING ("world", zmq_msg_gets (&msg, "X-hello")); TEST_ASSERT_EQUAL_STRING ("backup", zmq_msg_gets (&msg, "X-connection")); rc = zmq_msg_close (&msg); TEST_ASSERT_EQUAL_INT (0, rc); rc = zmq_close (req_sock); TEST_ASSERT_EQUAL_INT (0, rc); rc = zmq_close (rep_sock); TEST_ASSERT_EQUAL_INT (0, rc); zmq_ctx_term (ctx); } int main () { setup_test_environment (); UNITY_BEGIN (); RUN_TEST (test_app_meta_reqrep); return UNITY_END (); }
sophomore_public/libzmq
tests/test_app_meta.cpp
C++
gpl-3.0
4,127
/* SPDX-License-Identifier: MPL-2.0 */ #include "testutil.hpp" #include "testutil_unity.hpp" #include <unity.h> void setUp () { } void tearDown () { } void test () { void *counter = zmq_atomic_counter_new (); TEST_ASSERT_EQUAL_INT (0, zmq_atomic_counter_value (counter)); TEST_ASSERT_EQUAL_INT (0, zmq_atomic_counter_inc (counter)); TEST_ASSERT_EQUAL_INT (1, zmq_atomic_counter_inc (counter)); TEST_ASSERT_EQUAL_INT (2, zmq_atomic_counter_inc (counter)); TEST_ASSERT_EQUAL_INT (3, zmq_atomic_counter_value (counter)); TEST_ASSERT_EQUAL_INT (1, zmq_atomic_counter_dec (counter)); TEST_ASSERT_EQUAL_INT (1, zmq_atomic_counter_dec (counter)); TEST_ASSERT_EQUAL_INT (0, zmq_atomic_counter_dec (counter)); zmq_atomic_counter_set (counter, 2); TEST_ASSERT_EQUAL_INT (1, zmq_atomic_counter_dec (counter)); TEST_ASSERT_EQUAL_INT (0, zmq_atomic_counter_dec (counter)); zmq_atomic_counter_destroy (&counter); } int main () { UNITY_BEGIN (); RUN_TEST (test); return UNITY_END (); }
sophomore_public/libzmq
tests/test_atomics.cpp
C++
gpl-3.0
1,041
/* SPDX-License-Identifier: MPL-2.0 */ #include "testutil.hpp" #include "testutil_unity.hpp" void setUp () { } void tearDown () { } // Test vector: rfc.zeromq.org/spec:32/Z85 void test__zmq_z85_encode__valid__success () { static const size_t size = 8; static const size_t length = size * 5 / 4; static const uint8_t decoded[size] = {0x86, 0x4F, 0xD2, 0x6F, 0xB5, 0x59, 0xF7, 0x5B}; static const char expected[length + 1] = "HelloWorld"; char out_encoded[length + 1] = {0}; errno = 0; TEST_ASSERT_NOT_NULL (zmq_z85_encode (out_encoded, decoded, size)); TEST_ASSERT_EQUAL_STRING (expected, out_encoded); TEST_ASSERT_EQUAL_INT (0, zmq_errno ()); } // Buffer length must be evenly divisible by 4 or must fail with EINVAL. void test__zmq_z85_encode__invalid__failure (size_t size_) { errno = 0; TEST_ASSERT_NULL (zmq_z85_encode (NULL, NULL, size_)); TEST_ASSERT_EQUAL_INT (EINVAL, zmq_errno ()); } // Test vector: rfc.zeromq.org/spec:32/Z85 void test__zmq_z85_decode__valid__success () { static const size_t size = 10 * 4 / 5; static const uint8_t expected[size] = {0x86, 0x4F, 0xD2, 0x6F, 0xB5, 0x59, 0xF7, 0x5B}; static const char *encoded = "HelloWorld"; uint8_t out_decoded[size] = {0}; errno = 0; TEST_ASSERT_NOT_NULL (zmq_z85_decode (out_decoded, encoded)); TEST_ASSERT_EQUAL_INT (0, zmq_errno ()); TEST_ASSERT_EQUAL_UINT8_ARRAY (expected, out_decoded, size); } // Invalid input data must fail with EINVAL. template <size_t SIZE> void test__zmq_z85_decode__invalid__failure (const char (&encoded_)[SIZE]) { uint8_t decoded[SIZE * 4 / 5 + 1]; errno = 0; TEST_ASSERT_NULL (zmq_z85_decode (decoded, encoded_)); TEST_ASSERT_EQUAL_INT (EINVAL, zmq_errno ()); } // call zmq_z85_encode, then zmq_z85_decode, and compare the results with the original template <size_t SIZE> void test__zmq_z85_encode__zmq_z85_decode__roundtrip ( const uint8_t (&test_data_)[SIZE]) { char test_data_z85[SIZE * 5 / 4 + 1]; const char *const res1 = zmq_z85_encode (test_data_z85, test_data_, SIZE); TEST_ASSERT_NOT_NULL (res1); uint8_t test_data_decoded[SIZE]; const uint8_t *const res2 = zmq_z85_decode (test_data_decoded, test_data_z85); TEST_ASSERT_NOT_NULL (res2); TEST_ASSERT_EQUAL_UINT8_ARRAY (test_data_, test_data_decoded, SIZE); } // call zmq_z85_encode, then zmq_z85_decode, and compare the results with the original template <size_t SIZE> void test__zmq_z85_decode__zmq_z85_encode__roundtrip ( const char (&test_data_)[SIZE]) { const size_t decoded_size = (SIZE - 1) * 4 / 5; uint8_t test_data_decoded[decoded_size]; const uint8_t *const res1 = zmq_z85_decode (test_data_decoded, test_data_); TEST_ASSERT_NOT_NULL (res1); char test_data_z85[SIZE]; const char *const res2 = zmq_z85_encode (test_data_z85, test_data_decoded, decoded_size); TEST_ASSERT_NOT_NULL (res2); TEST_ASSERT_EQUAL_UINT8_ARRAY (test_data_, test_data_z85, SIZE); } #define def_test__zmq_z85_basename(basename, name, param) \ void test__zmq_z85_##basename##_##name () \ { \ test__zmq_z85_##basename (param); \ } #define def_test__zmq_z85_encode__invalid__failure(name, param) \ def_test__zmq_z85_basename (encode__invalid__failure, name, param) def_test__zmq_z85_encode__invalid__failure (1, 1) def_test__zmq_z85_encode__invalid__failure (42, 42) #define def_test__zmq_z85_decode__invalid__failure(name, param) \ def_test__zmq_z85_basename (decode__invalid__failure, name, param) // String length must be evenly divisible by 5 or must fail with EINVAL. def_test__zmq_z85_decode__invalid__failure (indivisble_by_5_multiple_chars, "01234567") def_test__zmq_z85_decode__invalid__failure (indivisble_by_5_one_char, "0") // decode invalid data with the maximum representable value def_test__zmq_z85_decode__invalid__failure (max, "#####") // decode invalid data with the minimum value beyond the limit // "%nSc0" is 0xffffffff def_test__zmq_z85_decode__invalid__failure (above_limit, "%nSc1") // decode invalid data with an invalid character in the range of valid // characters def_test__zmq_z85_decode__invalid__failure (char_within, "####\0047") // decode invalid data with an invalid character just below the range of valid // characters def_test__zmq_z85_decode__invalid__failure (char_adjacent_below, "####\0200") // decode invalid data with an invalid character just above the range of valid // characters def_test__zmq_z85_decode__invalid__failure (char_adjacent_above, "####\0037") #define def_test__encode__zmq_z85_decode__roundtrip(name, param) \ def_test__zmq_z85_basename (encode__zmq_z85_decode__roundtrip, name, param) const uint8_t test_data_min[] = {0x00, 0x00, 0x00, 0x00}; const uint8_t test_data_max[] = {0xff, 0xff, 0xff, 0xff}; def_test__encode__zmq_z85_decode__roundtrip (min, test_data_min) def_test__encode__zmq_z85_decode__roundtrip (max, test_data_max) #define def_test__decode__zmq_z85_encode__roundtrip(name, param) \ def_test__zmq_z85_basename (decode__zmq_z85_encode__roundtrip, name, param) const char test_data_regular[] = "r^/rM9M=rMToK)63O8dCvd9D<PY<7iGlC+{BiSnG"; def_test__decode__zmq_z85_encode__roundtrip (regular, test_data_regular) int main () { UNITY_BEGIN (); RUN_TEST (test__zmq_z85_encode__valid__success); RUN_TEST (test__zmq_z85_encode__invalid__failure_1); RUN_TEST (test__zmq_z85_encode__invalid__failure_42); RUN_TEST (test__zmq_z85_decode__valid__success); RUN_TEST ( test__zmq_z85_decode__invalid__failure_indivisble_by_5_multiple_chars); RUN_TEST (test__zmq_z85_decode__invalid__failure_indivisble_by_5_one_char); RUN_TEST (test__zmq_z85_decode__invalid__failure_max); RUN_TEST (test__zmq_z85_decode__invalid__failure_above_limit); RUN_TEST (test__zmq_z85_decode__invalid__failure_char_within); RUN_TEST (test__zmq_z85_decode__invalid__failure_char_adjacent_below); RUN_TEST (test__zmq_z85_decode__invalid__failure_char_adjacent_above); RUN_TEST (test__zmq_z85_encode__zmq_z85_decode__roundtrip_min); RUN_TEST (test__zmq_z85_encode__zmq_z85_decode__roundtrip_max); RUN_TEST (test__zmq_z85_decode__zmq_z85_encode__roundtrip_regular); return UNITY_END (); }
sophomore_public/libzmq
tests/test_base85.cpp
C++
gpl-3.0
6,673
/* SPDX-License-Identifier: MPL-2.0 */ #include "testutil.hpp" #include "testutil_unity.hpp" SETUP_TEARDOWN_TESTCONTEXT void test_x () { void *sb = test_context_socket (ZMQ_DEALER); void *sc = test_context_socket (ZMQ_DEALER); TEST_ASSERT_SUCCESS_ERRNO (zmq_connect (sc, ENDPOINT_3)); send_string_expect_success (sc, "foobar", 0); send_string_expect_success (sc, "baz", 0); send_string_expect_success (sc, "buzz", 0); TEST_ASSERT_SUCCESS_ERRNO (zmq_bind (sb, ENDPOINT_3)); recv_string_expect_success (sb, "foobar", 0); recv_string_expect_success (sb, "baz", 0); recv_string_expect_success (sb, "buzz", 0); test_context_socket_close (sc); test_context_socket_close (sb); } int main (void) { setup_test_environment (); UNITY_BEGIN (); RUN_TEST (test_x); return UNITY_END (); }
sophomore_public/libzmq
tests/test_bind_after_connect_tcp.cpp
C++
gpl-3.0
849
/* SPDX-License-Identifier: MPL-2.0 */ #ifdef ZMQ_USE_FUZZING_ENGINE #include <fuzzer/FuzzedDataProvider.h> #endif #include <string.h> #include "testutil.hpp" #include "testutil_security.hpp" // Test that the ZMTP engine handles invalid handshake when binding // https://rfc.zeromq.org/spec/37/ extern "C" int LLVMFuzzerTestOneInput (const uint8_t *data, size_t size) { const char *fixed_client_public = "{{k*81)yMWEF{/BxdMd[5RL^qRFxBgoL<8m.D^KD"; const char *fixed_client_secret = "N?Gmik8R[2ACw{b7*[-$S6[4}aO#?DB?#=<OQPc7"; const char *fixed_server_public = "3.9-xXwy{g*w72TP*3iB9IJJRxlBH<ufTAvPd2>C"; const char *fixed_server_secret = "T}t5GLq%&Qm1)y3ywu-}pY3KEA//{^Ut!M1ut+B4"; void *handler; void *zap_thread; void *server; void *server_mon; char my_endpoint[MAX_SOCKET_STRING]; setup_test_context (); memcpy (valid_client_public, fixed_client_public, 41); setup_context_and_server_side ( &handler, &zap_thread, &server, &server_mon, my_endpoint, &zap_handler, &socket_config_curve_server, (void *) fixed_server_secret); fd_t client = connect_socket (my_endpoint); // If there is not enough data for a full greeting, just send what we can // Otherwise send greeting first, as expected by the protocol uint8_t buf[512]; if (size >= 64) { send (client, (void *) data, 64, MSG_NOSIGNAL); data += 64; size -= 64; } recv (client, buf, 64, 0); // Then send HELLO and expect WELCOME if there's enough data if (size >= 202) { send (client, (void *) data, 202, MSG_NOSIGNAL); data += 202; size -= 202; recv (client, buf, 170, MSG_DONTWAIT); } // Then send READY and expect INITIATE if there's enough data if (size >= 301) { send (client, (void *) data, 301, MSG_NOSIGNAL); data += 301; size -= 301; recv (client, buf, 512, MSG_DONTWAIT); } msleep (250); for (ssize_t sent = 0; size > 0 && (sent != -1 || errno == EINTR); size -= sent > 0 ? sent : 0, data += sent > 0 ? sent : 0) sent = send (client, (const char *) data, size, MSG_NOSIGNAL); msleep (250); // Drain the queue, if any zmq_msg_t msg; zmq_msg_init (&msg); while (-1 != zmq_msg_recv (&msg, server, ZMQ_DONTWAIT)) { zmq_msg_close (&msg); zmq_msg_init (&msg); } // A well-behaved client should work while the malformed data from the other // is being received curve_client_data_t curve_client_data = { fixed_server_public, fixed_client_public, fixed_client_secret}; void *client_mon; void *client_good = create_and_connect_client ( my_endpoint, socket_config_curve_client, &curve_client_data, &client_mon); bounce (server, client_good); close (client); test_context_socket_close_zero_linger (client_good); test_context_socket_close_zero_linger (client_mon); shutdown_context_and_server_side (zap_thread, server, server_mon, handler); teardown_test_context (); return 0; } #ifndef ZMQ_USE_FUZZING_ENGINE void test_bind_curve_fuzzer () { uint8_t **data; size_t *len, num_cases = 0; if (fuzzer_corpus_encode ( "tests/libzmq-fuzz-corpora/test_bind_curve_fuzzer_seed_corpus", &data, &len, &num_cases) != 0) exit (77); while (num_cases-- > 0) { TEST_ASSERT_SUCCESS_ERRNO ( LLVMFuzzerTestOneInput (data[num_cases], len[num_cases])); free (data[num_cases]); } free (data); free (len); } int main (int argc, char **argv) { setup_test_environment (); UNITY_BEGIN (); RUN_TEST (test_bind_curve_fuzzer); return UNITY_END (); } #endif
sophomore_public/libzmq
tests/test_bind_curve_fuzzer.cpp
C++
gpl-3.0
3,755