code
stringlengths 0
56.1M
| repo_name
stringclasses 515
values | path
stringlengths 2
147
| language
stringclasses 447
values | license
stringclasses 7
values | size
int64 0
56.8M
|
---|---|---|---|---|---|
/******************************************************************************/
/* 0MQ Internal Use */
/******************************************************************************/
#define LIBZMQ_UNUSED(object) (void) object
#define LIBZMQ_DELETE(p_object) \
{ \
delete p_object; \
p_object = 0; \
}
/******************************************************************************/
#if !defined ZMQ_NOEXCEPT
#if defined ZMQ_HAVE_NOEXCEPT
#define ZMQ_NOEXCEPT noexcept
#else
#define ZMQ_NOEXCEPT
#endif
#endif
#if !defined ZMQ_OVERRIDE
#if defined ZMQ_HAVE_NOEXCEPT
#define ZMQ_OVERRIDE override
#else
#define ZMQ_OVERRIDE
#endif
#endif
#if !defined ZMQ_FINAL
#if defined ZMQ_HAVE_NOEXCEPT
#define ZMQ_FINAL final
#else
#define ZMQ_FINAL
#endif
#endif
#if !defined ZMQ_DEFAULT
#if defined ZMQ_HAVE_NOEXCEPT
#define ZMQ_DEFAULT = default;
#else
#define ZMQ_DEFAULT \
{ \
}
#endif
#endif
#if !defined ZMQ_NON_COPYABLE_NOR_MOVABLE
#if defined ZMQ_HAVE_NOEXCEPT
#define ZMQ_NON_COPYABLE_NOR_MOVABLE(classname) \
public: \
classname (const classname &) = delete; \
classname &operator= (const classname &) = delete; \
classname (classname &&) = delete; \
classname &operator= (classname &&) = delete;
#else
#define ZMQ_NON_COPYABLE_NOR_MOVABLE(classname) \
private: \
classname (const classname &); \
classname &operator= (const classname &);
#endif
#endif
|
sophomore_public/libzmq
|
src/macros.hpp
|
C++
|
gpl-3.0
| 2,185 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "mailbox.hpp"
#include "err.hpp"
zmq::mailbox_t::mailbox_t ()
{
// Get the pipe into passive state. That way, if the users starts by
// polling on the associated file descriptor it will get woken up when
// new command is posted.
const bool ok = _cpipe.check_read ();
zmq_assert (!ok);
_active = false;
}
zmq::mailbox_t::~mailbox_t ()
{
// TODO: Retrieve and deallocate commands inside the _cpipe.
// Work around problem that other threads might still be in our
// send() method, by waiting on the mutex before disappearing.
_sync.lock ();
_sync.unlock ();
}
zmq::fd_t zmq::mailbox_t::get_fd () const
{
return _signaler.get_fd ();
}
void zmq::mailbox_t::send (const command_t &cmd_)
{
_sync.lock ();
_cpipe.write (cmd_, false);
const bool ok = _cpipe.flush ();
_sync.unlock ();
if (!ok)
_signaler.send ();
}
int zmq::mailbox_t::recv (command_t *cmd_, int timeout_)
{
// Try to get the command straight away.
if (_active) {
if (_cpipe.read (cmd_))
return 0;
// If there are no more commands available, switch into passive state.
_active = false;
}
// Wait for signal from the command sender.
int rc = _signaler.wait (timeout_);
if (rc == -1) {
errno_assert (errno == EAGAIN || errno == EINTR);
return -1;
}
// Receive the signal.
rc = _signaler.recv_failable ();
if (rc == -1) {
errno_assert (errno == EAGAIN);
return -1;
}
// Switch into active state.
_active = true;
// Get a command.
const bool ok = _cpipe.read (cmd_);
zmq_assert (ok);
return 0;
}
bool zmq::mailbox_t::valid () const
{
return _signaler.valid ();
}
|
sophomore_public/libzmq
|
src/mailbox.cpp
|
C++
|
gpl-3.0
| 1,832 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_MAILBOX_HPP_INCLUDED__
#define __ZMQ_MAILBOX_HPP_INCLUDED__
#include <stddef.h>
#include "signaler.hpp"
#include "fd.hpp"
#include "config.hpp"
#include "command.hpp"
#include "ypipe.hpp"
#include "mutex.hpp"
#include "i_mailbox.hpp"
namespace zmq
{
class mailbox_t ZMQ_FINAL : public i_mailbox
{
public:
mailbox_t ();
~mailbox_t ();
fd_t get_fd () const;
void send (const command_t &cmd_);
int recv (command_t *cmd_, int timeout_);
bool valid () const;
#ifdef HAVE_FORK
// close the file descriptors in the signaller. This is used in a forked
// child process to close the file descriptors so that they do not interfere
// with the context in the parent process.
void forked () ZMQ_FINAL { _signaler.forked (); }
#endif
private:
// The pipe to store actual commands.
typedef ypipe_t<command_t, command_pipe_granularity> cpipe_t;
cpipe_t _cpipe;
// Signaler to pass signals from writer thread to reader thread.
signaler_t _signaler;
// There's only one thread receiving from the mailbox, but there
// is arbitrary number of threads sending. Given that ypipe requires
// synchronised access on both of its endpoints, we have to synchronise
// the sending side.
mutex_t _sync;
// True if the underlying pipe is active, ie. when we are allowed to
// read commands from it.
bool _active;
ZMQ_NON_COPYABLE_NOR_MOVABLE (mailbox_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/mailbox.hpp
|
C++
|
gpl-3.0
| 1,511 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "mailbox_safe.hpp"
#include "clock.hpp"
#include "err.hpp"
#include <algorithm>
zmq::mailbox_safe_t::mailbox_safe_t (mutex_t *sync_) : _sync (sync_)
{
// Get the pipe into passive state. That way, if the users starts by
// polling on the associated file descriptor it will get woken up when
// new command is posted.
const bool ok = _cpipe.check_read ();
zmq_assert (!ok);
}
zmq::mailbox_safe_t::~mailbox_safe_t ()
{
// TODO: Retrieve and deallocate commands inside the cpipe.
// Work around problem that other threads might still be in our
// send() method, by waiting on the mutex before disappearing.
_sync->lock ();
_sync->unlock ();
}
void zmq::mailbox_safe_t::add_signaler (signaler_t *signaler_)
{
_signalers.push_back (signaler_);
}
void zmq::mailbox_safe_t::remove_signaler (signaler_t *signaler_)
{
// TODO: make a copy of array and signal outside the lock
const std::vector<zmq::signaler_t *>::iterator end = _signalers.end ();
const std::vector<signaler_t *>::iterator it =
std::find (_signalers.begin (), end, signaler_);
if (it != end)
_signalers.erase (it);
}
void zmq::mailbox_safe_t::clear_signalers ()
{
_signalers.clear ();
}
void zmq::mailbox_safe_t::send (const command_t &cmd_)
{
_sync->lock ();
_cpipe.write (cmd_, false);
const bool ok = _cpipe.flush ();
if (!ok) {
_cond_var.broadcast ();
for (std::vector<signaler_t *>::iterator it = _signalers.begin (),
end = _signalers.end ();
it != end; ++it) {
(*it)->send ();
}
}
_sync->unlock ();
}
int zmq::mailbox_safe_t::recv (command_t *cmd_, int timeout_)
{
// Try to get the command straight away.
if (_cpipe.read (cmd_))
return 0;
// If the timeout is zero, it will be quicker to release the lock, giving other a chance to send a command
// and immediately relock it.
if (timeout_ == 0) {
_sync->unlock ();
_sync->lock ();
} else {
// Wait for signal from the command sender.
const int rc = _cond_var.wait (_sync, timeout_);
if (rc == -1) {
errno_assert (errno == EAGAIN || errno == EINTR);
return -1;
}
}
// Another thread may already fetch the command
const bool ok = _cpipe.read (cmd_);
if (!ok) {
errno = EAGAIN;
return -1;
}
return 0;
}
|
sophomore_public/libzmq
|
src/mailbox_safe.cpp
|
C++
|
gpl-3.0
| 2,561 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_MAILBOX_SAFE_HPP_INCLUDED__
#define __ZMQ_MAILBOX_SAFE_HPP_INCLUDED__
#include <vector>
#include <stddef.h>
#include "signaler.hpp"
#include "fd.hpp"
#include "config.hpp"
#include "command.hpp"
#include "ypipe.hpp"
#include "mutex.hpp"
#include "i_mailbox.hpp"
#include "condition_variable.hpp"
namespace zmq
{
class mailbox_safe_t ZMQ_FINAL : public i_mailbox
{
public:
mailbox_safe_t (mutex_t *sync_);
~mailbox_safe_t ();
void send (const command_t &cmd_);
int recv (command_t *cmd_, int timeout_);
// Add signaler to mailbox which will be called when a message is ready
void add_signaler (signaler_t *signaler_);
void remove_signaler (signaler_t *signaler_);
void clear_signalers ();
#ifdef HAVE_FORK
// close the file descriptors in the signaller. This is used in a forked
// child process to close the file descriptors so that they do not interfere
// with the context in the parent process.
void forked () ZMQ_FINAL
{
// TODO: call fork on the condition variable
}
#endif
private:
// The pipe to store actual commands.
typedef ypipe_t<command_t, command_pipe_granularity> cpipe_t;
cpipe_t _cpipe;
// Condition variable to pass signals from writer thread to reader thread.
condition_variable_t _cond_var;
// Synchronize access to the mailbox from receivers and senders
mutex_t *const _sync;
std::vector<zmq::signaler_t *> _signalers;
ZMQ_NON_COPYABLE_NOR_MOVABLE (mailbox_safe_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/mailbox_safe.hpp
|
C++
|
gpl-3.0
| 1,571 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <string.h>
#include <limits.h>
#include "mechanism.hpp"
#include "options.hpp"
#include "msg.hpp"
#include "err.hpp"
#include "wire.hpp"
#include "session_base.hpp"
zmq::mechanism_t::mechanism_t (const options_t &options_) : options (options_)
{
}
zmq::mechanism_t::~mechanism_t ()
{
}
void zmq::mechanism_t::set_peer_routing_id (const void *id_ptr_,
size_t id_size_)
{
_routing_id.set (static_cast<const unsigned char *> (id_ptr_), id_size_);
}
void zmq::mechanism_t::peer_routing_id (msg_t *msg_)
{
const int rc = msg_->init_size (_routing_id.size ());
errno_assert (rc == 0);
memcpy (msg_->data (), _routing_id.data (), _routing_id.size ());
msg_->set_flags (msg_t::routing_id);
}
void zmq::mechanism_t::set_user_id (const void *user_id_, size_t size_)
{
_user_id.set (static_cast<const unsigned char *> (user_id_), size_);
_zap_properties.ZMQ_MAP_INSERT_OR_EMPLACE (
std::string (ZMQ_MSG_PROPERTY_USER_ID),
std::string (reinterpret_cast<const char *> (user_id_), size_));
}
const zmq::blob_t &zmq::mechanism_t::get_user_id () const
{
return _user_id;
}
const char socket_type_pair[] = "PAIR";
const char socket_type_pub[] = "PUB";
const char socket_type_sub[] = "SUB";
const char socket_type_req[] = "REQ";
const char socket_type_rep[] = "REP";
const char socket_type_dealer[] = "DEALER";
const char socket_type_router[] = "ROUTER";
const char socket_type_pull[] = "PULL";
const char socket_type_push[] = "PUSH";
const char socket_type_xpub[] = "XPUB";
const char socket_type_xsub[] = "XSUB";
const char socket_type_stream[] = "STREAM";
#ifdef ZMQ_BUILD_DRAFT_API
const char socket_type_server[] = "SERVER";
const char socket_type_client[] = "CLIENT";
const char socket_type_radio[] = "RADIO";
const char socket_type_dish[] = "DISH";
const char socket_type_gather[] = "GATHER";
const char socket_type_scatter[] = "SCATTER";
const char socket_type_dgram[] = "DGRAM";
const char socket_type_peer[] = "PEER";
const char socket_type_channel[] = "CHANNEL";
#endif
const char *zmq::mechanism_t::socket_type_string (int socket_type_)
{
// TODO the order must of the names must correspond to the values resp. order of ZMQ_* socket type definitions in zmq.h!
static const char *names[] = {socket_type_pair, socket_type_pub,
socket_type_sub, socket_type_req,
socket_type_rep, socket_type_dealer,
socket_type_router, socket_type_pull,
socket_type_push, socket_type_xpub,
socket_type_xsub, socket_type_stream,
#ifdef ZMQ_BUILD_DRAFT_API
socket_type_server, socket_type_client,
socket_type_radio, socket_type_dish,
socket_type_gather, socket_type_scatter,
socket_type_dgram, socket_type_peer,
socket_type_channel
#endif
};
static const size_t names_count = sizeof (names) / sizeof (names[0]);
zmq_assert (socket_type_ >= 0
&& socket_type_ < static_cast<int> (names_count));
return names[socket_type_];
}
const size_t name_len_size = sizeof (unsigned char);
const size_t value_len_size = sizeof (uint32_t);
static size_t property_len (size_t name_len_, size_t value_len_)
{
return name_len_size + name_len_ + value_len_size + value_len_;
}
static size_t name_len (const char *name_)
{
const size_t name_len = strlen (name_);
zmq_assert (name_len <= UCHAR_MAX);
return name_len;
}
size_t zmq::mechanism_t::add_property (unsigned char *ptr_,
size_t ptr_capacity_,
const char *name_,
const void *value_,
size_t value_len_)
{
const size_t name_len = ::name_len (name_);
const size_t total_len = ::property_len (name_len, value_len_);
zmq_assert (total_len <= ptr_capacity_);
*ptr_ = static_cast<unsigned char> (name_len);
ptr_ += name_len_size;
memcpy (ptr_, name_, name_len);
ptr_ += name_len;
zmq_assert (value_len_ <= 0x7FFFFFFF);
put_uint32 (ptr_, static_cast<uint32_t> (value_len_));
ptr_ += value_len_size;
memcpy (ptr_, value_, value_len_);
return total_len;
}
size_t zmq::mechanism_t::property_len (const char *name_, size_t value_len_)
{
return ::property_len (name_len (name_), value_len_);
}
#define ZMTP_PROPERTY_SOCKET_TYPE "Socket-Type"
#define ZMTP_PROPERTY_IDENTITY "Identity"
size_t zmq::mechanism_t::add_basic_properties (unsigned char *ptr_,
size_t ptr_capacity_) const
{
unsigned char *ptr = ptr_;
// Add socket type property
const char *socket_type = socket_type_string (options.type);
ptr += add_property (ptr, ptr_capacity_, ZMTP_PROPERTY_SOCKET_TYPE,
socket_type, strlen (socket_type));
// Add identity (aka routing id) property
if (options.type == ZMQ_REQ || options.type == ZMQ_DEALER
|| options.type == ZMQ_ROUTER) {
ptr += add_property (ptr, ptr_capacity_ - (ptr - ptr_),
ZMTP_PROPERTY_IDENTITY, options.routing_id,
options.routing_id_size);
}
for (std::map<std::string, std::string>::const_iterator
it = options.app_metadata.begin (),
end = options.app_metadata.end ();
it != end; ++it) {
ptr +=
add_property (ptr, ptr_capacity_ - (ptr - ptr_), it->first.c_str (),
it->second.c_str (), strlen (it->second.c_str ()));
}
return ptr - ptr_;
}
size_t zmq::mechanism_t::basic_properties_len () const
{
const char *socket_type = socket_type_string (options.type);
size_t meta_len = 0;
for (std::map<std::string, std::string>::const_iterator
it = options.app_metadata.begin (),
end = options.app_metadata.end ();
it != end; ++it) {
meta_len +=
property_len (it->first.c_str (), strlen (it->second.c_str ()));
}
return property_len (ZMTP_PROPERTY_SOCKET_TYPE, strlen (socket_type))
+ meta_len
+ ((options.type == ZMQ_REQ || options.type == ZMQ_DEALER
|| options.type == ZMQ_ROUTER)
? property_len (ZMTP_PROPERTY_IDENTITY, options.routing_id_size)
: 0);
}
void zmq::mechanism_t::make_command_with_basic_properties (
msg_t *msg_, const char *prefix_, size_t prefix_len_) const
{
const size_t command_size = prefix_len_ + basic_properties_len ();
const int rc = msg_->init_size (command_size);
errno_assert (rc == 0);
unsigned char *ptr = static_cast<unsigned char *> (msg_->data ());
// Add prefix
memcpy (ptr, prefix_, prefix_len_);
ptr += prefix_len_;
add_basic_properties (
ptr, command_size - (ptr - static_cast<unsigned char *> (msg_->data ())));
}
int zmq::mechanism_t::parse_metadata (const unsigned char *ptr_,
size_t length_,
bool zap_flag_)
{
size_t bytes_left = length_;
while (bytes_left > 1) {
const size_t name_length = static_cast<size_t> (*ptr_);
ptr_ += name_len_size;
bytes_left -= name_len_size;
if (bytes_left < name_length)
break;
const std::string name =
std::string (reinterpret_cast<const char *> (ptr_), name_length);
ptr_ += name_length;
bytes_left -= name_length;
if (bytes_left < value_len_size)
break;
const size_t value_length = static_cast<size_t> (get_uint32 (ptr_));
ptr_ += value_len_size;
bytes_left -= value_len_size;
if (bytes_left < value_length)
break;
const uint8_t *value = ptr_;
ptr_ += value_length;
bytes_left -= value_length;
if (name == ZMTP_PROPERTY_IDENTITY && options.recv_routing_id)
set_peer_routing_id (value, value_length);
else if (name == ZMTP_PROPERTY_SOCKET_TYPE) {
if (!check_socket_type (reinterpret_cast<const char *> (value),
value_length)) {
errno = EINVAL;
return -1;
}
} else {
const int rc = property (name, value, value_length);
if (rc == -1)
return -1;
}
(zap_flag_ ? _zap_properties : _zmtp_properties)
.ZMQ_MAP_INSERT_OR_EMPLACE (
name,
std::string (reinterpret_cast<const char *> (value), value_length));
}
if (bytes_left > 0) {
errno = EPROTO;
return -1;
}
return 0;
}
int zmq::mechanism_t::property (const std::string & /* name_ */,
const void * /* value_ */,
size_t /* length_ */)
{
// Default implementation does not check
// property values and returns 0 to signal success.
return 0;
}
template <size_t N>
static bool strequals (const char *actual_type_,
const size_t actual_len_,
const char (&expected_type_)[N])
{
return actual_len_ == N - 1
&& memcmp (actual_type_, expected_type_, N - 1) == 0;
}
bool zmq::mechanism_t::check_socket_type (const char *type_,
const size_t len_) const
{
switch (options.type) {
case ZMQ_REQ:
return strequals (type_, len_, socket_type_rep)
|| strequals (type_, len_, socket_type_router);
case ZMQ_REP:
return strequals (type_, len_, socket_type_req)
|| strequals (type_, len_, socket_type_dealer);
case ZMQ_DEALER:
return strequals (type_, len_, socket_type_rep)
|| strequals (type_, len_, socket_type_dealer)
|| strequals (type_, len_, socket_type_router);
case ZMQ_ROUTER:
return strequals (type_, len_, socket_type_req)
|| strequals (type_, len_, socket_type_dealer)
|| strequals (type_, len_, socket_type_router);
case ZMQ_PUSH:
return strequals (type_, len_, socket_type_pull);
case ZMQ_PULL:
return strequals (type_, len_, socket_type_push);
case ZMQ_PUB:
return strequals (type_, len_, socket_type_sub)
|| strequals (type_, len_, socket_type_xsub);
case ZMQ_SUB:
return strequals (type_, len_, socket_type_pub)
|| strequals (type_, len_, socket_type_xpub);
case ZMQ_XPUB:
return strequals (type_, len_, socket_type_sub)
|| strequals (type_, len_, socket_type_xsub);
case ZMQ_XSUB:
return strequals (type_, len_, socket_type_pub)
|| strequals (type_, len_, socket_type_xpub);
case ZMQ_PAIR:
return strequals (type_, len_, socket_type_pair);
#ifdef ZMQ_BUILD_DRAFT_API
case ZMQ_SERVER:
return strequals (type_, len_, socket_type_client);
case ZMQ_CLIENT:
return strequals (type_, len_, socket_type_server);
case ZMQ_RADIO:
return strequals (type_, len_, socket_type_dish);
case ZMQ_DISH:
return strequals (type_, len_, socket_type_radio);
case ZMQ_GATHER:
return strequals (type_, len_, socket_type_scatter);
case ZMQ_SCATTER:
return strequals (type_, len_, socket_type_gather);
case ZMQ_DGRAM:
return strequals (type_, len_, socket_type_dgram);
case ZMQ_PEER:
return strequals (type_, len_, socket_type_peer);
case ZMQ_CHANNEL:
return strequals (type_, len_, socket_type_channel);
#endif
default:
break;
}
return false;
}
|
sophomore_public/libzmq
|
src/mechanism.cpp
|
C++
|
gpl-3.0
| 12,206 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_MECHANISM_HPP_INCLUDED__
#define __ZMQ_MECHANISM_HPP_INCLUDED__
#include "stdint.hpp"
#include "options.hpp"
#include "blob.hpp"
#include "metadata.hpp"
namespace zmq
{
class msg_t;
class session_base_t;
// Abstract class representing security mechanism.
// Different mechanism extends this class.
class mechanism_t
{
public:
enum status_t
{
handshaking,
ready,
error
};
mechanism_t (const options_t &options_);
virtual ~mechanism_t ();
// Prepare next handshake command that is to be sent to the peer.
virtual int next_handshake_command (msg_t *msg_) = 0;
// Process the handshake command received from the peer.
virtual int process_handshake_command (msg_t *msg_) = 0;
virtual int encode (msg_t *) { return 0; }
virtual int decode (msg_t *) { return 0; }
// Notifies mechanism about availability of ZAP message.
virtual int zap_msg_available () { return 0; }
// Returns the status of this mechanism.
virtual status_t status () const = 0;
void set_peer_routing_id (const void *id_ptr_, size_t id_size_);
void peer_routing_id (msg_t *msg_);
void set_user_id (const void *user_id_, size_t size_);
const blob_t &get_user_id () const;
const metadata_t::dict_t &get_zmtp_properties () const
{
return _zmtp_properties;
}
const metadata_t::dict_t &get_zap_properties () const
{
return _zap_properties;
}
protected:
// Only used to identify the socket for the Socket-Type
// property in the wire protocol.
static const char *socket_type_string (int socket_type_);
static size_t add_property (unsigned char *ptr_,
size_t ptr_capacity_,
const char *name_,
const void *value_,
size_t value_len_);
static size_t property_len (const char *name_, size_t value_len_);
size_t add_basic_properties (unsigned char *ptr_,
size_t ptr_capacity_) const;
size_t basic_properties_len () const;
void make_command_with_basic_properties (msg_t *msg_,
const char *prefix_,
size_t prefix_len_) const;
// Parses a metadata.
// Metadata consists of a list of properties consisting of
// name and value as size-specified strings.
// Returns 0 on success and -1 on error, in which case errno is set.
int parse_metadata (const unsigned char *ptr_,
size_t length_,
bool zap_flag_ = false);
// This is called by parse_property method whenever it
// parses a new property. The function should return 0
// on success and -1 on error, in which case it should
// set errno. Signaling error prevents parser from
// parsing remaining data.
// Derived classes are supposed to override this
// method to handle custom processing.
virtual int
property (const std::string &name_, const void *value_, size_t length_);
const options_t options;
private:
// Properties received from ZMTP peer.
metadata_t::dict_t _zmtp_properties;
// Properties received from ZAP server.
metadata_t::dict_t _zap_properties;
blob_t _routing_id;
blob_t _user_id;
// Returns true iff socket associated with the mechanism
// is compatible with a given socket type 'type_'.
bool check_socket_type (const char *type_, size_t len_) const;
};
}
#endif
|
sophomore_public/libzmq
|
src/mechanism.hpp
|
C++
|
gpl-3.0
| 3,659 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "mechanism_base.hpp"
#include "session_base.hpp"
zmq::mechanism_base_t::mechanism_base_t (session_base_t *const session_,
const options_t &options_) :
mechanism_t (options_), session (session_)
{
}
int zmq::mechanism_base_t::check_basic_command_structure (msg_t *msg_) const
{
if (msg_->size () <= 1
|| msg_->size () <= (static_cast<uint8_t *> (msg_->data ()))[0]) {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (),
ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_UNSPECIFIED);
errno = EPROTO;
return -1;
}
return 0;
}
void zmq::mechanism_base_t::handle_error_reason (const char *error_reason_,
size_t error_reason_len_)
{
const size_t status_code_len = 3;
const char zero_digit = '0';
const size_t significant_digit_index = 0;
const size_t first_zero_digit_index = 1;
const size_t second_zero_digit_index = 2;
const int factor = 100;
if (error_reason_len_ == status_code_len
&& error_reason_[first_zero_digit_index] == zero_digit
&& error_reason_[second_zero_digit_index] == zero_digit
&& error_reason_[significant_digit_index] >= '3'
&& error_reason_[significant_digit_index] <= '5') {
// it is a ZAP error status code (300, 400 or 500), so emit an authentication failure event
session->get_socket ()->event_handshake_failed_auth (
session->get_endpoint (),
(error_reason_[significant_digit_index] - zero_digit) * factor);
} else {
// this is a violation of the ZAP protocol
// TODO zmq_assert in this case?
}
}
bool zmq::mechanism_base_t::zap_required () const
{
return !options.zap_domain.empty ();
}
|
sophomore_public/libzmq
|
src/mechanism_base.cpp
|
C++
|
gpl-3.0
| 1,897 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_MECHANISM_BASE_HPP_INCLUDED__
#define __ZMQ_MECHANISM_BASE_HPP_INCLUDED__
#include "mechanism.hpp"
namespace zmq
{
class msg_t;
class mechanism_base_t : public mechanism_t
{
protected:
mechanism_base_t (session_base_t *session_, const options_t &options_);
session_base_t *const session;
int check_basic_command_structure (msg_t *msg_) const;
void handle_error_reason (const char *error_reason_,
size_t error_reason_len_);
bool zap_required () const;
};
}
#endif
|
sophomore_public/libzmq
|
src/mechanism_base.hpp
|
C++
|
gpl-3.0
| 577 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "metadata.hpp"
zmq::metadata_t::metadata_t (const dict_t &dict_) : _ref_cnt (1), _dict (dict_)
{
}
const char *zmq::metadata_t::get (const std::string &property_) const
{
const dict_t::const_iterator it = _dict.find (property_);
if (it == _dict.end ()) {
/** \todo remove this when support for the deprecated name "Identity" is dropped */
if (property_ == "Identity")
return get (ZMQ_MSG_PROPERTY_ROUTING_ID);
return NULL;
}
return it->second.c_str ();
}
void zmq::metadata_t::add_ref ()
{
_ref_cnt.add (1);
}
bool zmq::metadata_t::drop_ref ()
{
return !_ref_cnt.sub (1);
}
|
sophomore_public/libzmq
|
src/metadata.cpp
|
C++
|
gpl-3.0
| 715 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_METADATA_HPP_INCLUDED__
#define __ZMQ_METADATA_HPP_INCLUDED__
#include <map>
#include <string>
#include "atomic_counter.hpp"
namespace zmq
{
class metadata_t
{
public:
typedef std::map<std::string, std::string> dict_t;
metadata_t (const dict_t &dict_);
// Returns pointer to property value or NULL if
// property is not found.
const char *get (const std::string &property_) const;
void add_ref ();
// Drop reference. Returns true iff the reference
// counter drops to zero.
bool drop_ref ();
private:
// Reference counter.
atomic_counter_t _ref_cnt;
// Dictionary holding metadata.
const dict_t _dict;
ZMQ_NON_COPYABLE_NOR_MOVABLE (metadata_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/metadata.hpp
|
C++
|
gpl-3.0
| 787 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "compat.hpp"
#include "macros.hpp"
#include "msg.hpp"
#include <string.h>
#include <stdlib.h>
#include <new>
#include "stdint.hpp"
#include "likely.hpp"
#include "metadata.hpp"
#include "err.hpp"
// Check whether the sizes of public representation of the message (zmq_msg_t)
// and private representation of the message (zmq::msg_t) match.
typedef char
zmq_msg_size_check[2 * ((sizeof (zmq::msg_t) == sizeof (zmq_msg_t)) != 0)
- 1];
bool zmq::msg_t::check () const
{
return _u.base.type >= type_min && _u.base.type <= type_max;
}
int zmq::msg_t::init (void *data_,
size_t size_,
msg_free_fn *ffn_,
void *hint_,
content_t *content_)
{
if (size_ <= max_vsm_size) {
const int rc = init_size (size_);
if (rc != -1) {
memcpy (data (), data_, size_);
return 0;
}
return -1;
}
if (content_) {
return init_external_storage (content_, data_, size_, ffn_, hint_);
}
return init_data (data_, size_, ffn_, hint_);
}
int zmq::msg_t::init ()
{
_u.vsm.metadata = NULL;
_u.vsm.type = type_vsm;
_u.vsm.flags = 0;
_u.vsm.size = 0;
_u.vsm.group.sgroup.group[0] = '\0';
_u.vsm.group.type = group_type_short;
_u.vsm.routing_id = 0;
return 0;
}
int zmq::msg_t::init_size (size_t size_)
{
if (size_ <= max_vsm_size) {
_u.vsm.metadata = NULL;
_u.vsm.type = type_vsm;
_u.vsm.flags = 0;
_u.vsm.size = static_cast<unsigned char> (size_);
_u.vsm.group.sgroup.group[0] = '\0';
_u.vsm.group.type = group_type_short;
_u.vsm.routing_id = 0;
} else {
_u.lmsg.metadata = NULL;
_u.lmsg.type = type_lmsg;
_u.lmsg.flags = 0;
_u.lmsg.group.sgroup.group[0] = '\0';
_u.lmsg.group.type = group_type_short;
_u.lmsg.routing_id = 0;
_u.lmsg.content = NULL;
if (sizeof (content_t) + size_ > size_)
_u.lmsg.content =
static_cast<content_t *> (malloc (sizeof (content_t) + size_));
if (unlikely (!_u.lmsg.content)) {
errno = ENOMEM;
return -1;
}
_u.lmsg.content->data = _u.lmsg.content + 1;
_u.lmsg.content->size = size_;
_u.lmsg.content->ffn = NULL;
_u.lmsg.content->hint = NULL;
new (&_u.lmsg.content->refcnt) zmq::atomic_counter_t ();
}
return 0;
}
int zmq::msg_t::init_buffer (const void *buf_, size_t size_)
{
const int rc = init_size (size_);
if (unlikely (rc < 0)) {
return -1;
}
if (size_) {
// NULL and zero size is allowed
assert (NULL != buf_);
memcpy (data (), buf_, size_);
}
return 0;
}
int zmq::msg_t::init_external_storage (content_t *content_,
void *data_,
size_t size_,
msg_free_fn *ffn_,
void *hint_)
{
zmq_assert (NULL != data_);
zmq_assert (NULL != content_);
_u.zclmsg.metadata = NULL;
_u.zclmsg.type = type_zclmsg;
_u.zclmsg.flags = 0;
_u.zclmsg.group.sgroup.group[0] = '\0';
_u.zclmsg.group.type = group_type_short;
_u.zclmsg.routing_id = 0;
_u.zclmsg.content = content_;
_u.zclmsg.content->data = data_;
_u.zclmsg.content->size = size_;
_u.zclmsg.content->ffn = ffn_;
_u.zclmsg.content->hint = hint_;
new (&_u.zclmsg.content->refcnt) zmq::atomic_counter_t ();
return 0;
}
int zmq::msg_t::init_data (void *data_,
size_t size_,
msg_free_fn *ffn_,
void *hint_)
{
// If data is NULL and size is not 0, a segfault
// would occur once the data is accessed
zmq_assert (data_ != NULL || size_ == 0);
// Initialize constant message if there's no need to deallocate
if (ffn_ == NULL) {
_u.cmsg.metadata = NULL;
_u.cmsg.type = type_cmsg;
_u.cmsg.flags = 0;
_u.cmsg.data = data_;
_u.cmsg.size = size_;
_u.cmsg.group.sgroup.group[0] = '\0';
_u.cmsg.group.type = group_type_short;
_u.cmsg.routing_id = 0;
} else {
_u.lmsg.metadata = NULL;
_u.lmsg.type = type_lmsg;
_u.lmsg.flags = 0;
_u.lmsg.group.sgroup.group[0] = '\0';
_u.lmsg.group.type = group_type_short;
_u.lmsg.routing_id = 0;
_u.lmsg.content =
static_cast<content_t *> (malloc (sizeof (content_t)));
if (!_u.lmsg.content) {
errno = ENOMEM;
return -1;
}
_u.lmsg.content->data = data_;
_u.lmsg.content->size = size_;
_u.lmsg.content->ffn = ffn_;
_u.lmsg.content->hint = hint_;
new (&_u.lmsg.content->refcnt) zmq::atomic_counter_t ();
}
return 0;
}
int zmq::msg_t::init_delimiter ()
{
_u.delimiter.metadata = NULL;
_u.delimiter.type = type_delimiter;
_u.delimiter.flags = 0;
_u.delimiter.group.sgroup.group[0] = '\0';
_u.delimiter.group.type = group_type_short;
_u.delimiter.routing_id = 0;
return 0;
}
int zmq::msg_t::init_join ()
{
_u.base.metadata = NULL;
_u.base.type = type_join;
_u.base.flags = 0;
_u.base.group.sgroup.group[0] = '\0';
_u.base.group.type = group_type_short;
_u.base.routing_id = 0;
return 0;
}
int zmq::msg_t::init_leave ()
{
_u.base.metadata = NULL;
_u.base.type = type_leave;
_u.base.flags = 0;
_u.base.group.sgroup.group[0] = '\0';
_u.base.group.type = group_type_short;
_u.base.routing_id = 0;
return 0;
}
int zmq::msg_t::init_subscribe (const size_t size_, const unsigned char *topic_)
{
int rc = init_size (size_);
if (rc == 0) {
set_flags (zmq::msg_t::subscribe);
// We explicitly allow a NULL subscription with size zero
if (size_) {
assert (topic_);
memcpy (data (), topic_, size_);
}
}
return rc;
}
int zmq::msg_t::init_cancel (const size_t size_, const unsigned char *topic_)
{
int rc = init_size (size_);
if (rc == 0) {
set_flags (zmq::msg_t::cancel);
// We explicitly allow a NULL subscription with size zero
if (size_) {
assert (topic_);
memcpy (data (), topic_, size_);
}
}
return rc;
}
int zmq::msg_t::close ()
{
// Check the validity of the message.
if (unlikely (!check ())) {
errno = EFAULT;
return -1;
}
if (_u.base.type == type_lmsg) {
// If the content is not shared, or if it is shared and the reference
// count has dropped to zero, deallocate it.
if (!(_u.lmsg.flags & msg_t::shared)
|| !_u.lmsg.content->refcnt.sub (1)) {
// We used "placement new" operator to initialize the reference
// counter so we call the destructor explicitly now.
_u.lmsg.content->refcnt.~atomic_counter_t ();
if (_u.lmsg.content->ffn)
_u.lmsg.content->ffn (_u.lmsg.content->data,
_u.lmsg.content->hint);
free (_u.lmsg.content);
}
}
if (is_zcmsg ()) {
zmq_assert (_u.zclmsg.content->ffn);
// If the content is not shared, or if it is shared and the reference
// count has dropped to zero, deallocate it.
if (!(_u.zclmsg.flags & msg_t::shared)
|| !_u.zclmsg.content->refcnt.sub (1)) {
// We used "placement new" operator to initialize the reference
// counter so we call the destructor explicitly now.
_u.zclmsg.content->refcnt.~atomic_counter_t ();
_u.zclmsg.content->ffn (_u.zclmsg.content->data,
_u.zclmsg.content->hint);
}
}
if (_u.base.metadata != NULL) {
if (_u.base.metadata->drop_ref ()) {
LIBZMQ_DELETE (_u.base.metadata);
}
_u.base.metadata = NULL;
}
if (_u.base.group.type == group_type_long) {
if (!_u.base.group.lgroup.content->refcnt.sub (1)) {
// We used "placement new" operator to initialize the reference
// counter so we call the destructor explicitly now.
_u.base.group.lgroup.content->refcnt.~atomic_counter_t ();
free (_u.base.group.lgroup.content);
}
}
// Make the message invalid.
_u.base.type = 0;
return 0;
}
int zmq::msg_t::move (msg_t &src_)
{
// Check the validity of the source.
if (unlikely (!src_.check ())) {
errno = EFAULT;
return -1;
}
int rc = close ();
if (unlikely (rc < 0))
return rc;
*this = src_;
rc = src_.init ();
if (unlikely (rc < 0))
return rc;
return 0;
}
int zmq::msg_t::copy (msg_t &src_)
{
// Check the validity of the source.
if (unlikely (!src_.check ())) {
errno = EFAULT;
return -1;
}
const int rc = close ();
if (unlikely (rc < 0))
return rc;
// The initial reference count, when a non-shared message is initially
// shared (between the original and the copy we create here).
const atomic_counter_t::integer_t initial_shared_refcnt = 2;
if (src_.is_lmsg () || src_.is_zcmsg ()) {
// One reference is added to shared messages. Non-shared messages
// are turned into shared messages.
if (src_.flags () & msg_t::shared)
src_.refcnt ()->add (1);
else {
src_.set_flags (msg_t::shared);
src_.refcnt ()->set (initial_shared_refcnt);
}
}
if (src_._u.base.metadata != NULL)
src_._u.base.metadata->add_ref ();
if (src_._u.base.group.type == group_type_long)
src_._u.base.group.lgroup.content->refcnt.add (1);
*this = src_;
return 0;
}
void *zmq::msg_t::data ()
{
// Check the validity of the message.
zmq_assert (check ());
switch (_u.base.type) {
case type_vsm:
return _u.vsm.data;
case type_lmsg:
return _u.lmsg.content->data;
case type_cmsg:
return _u.cmsg.data;
case type_zclmsg:
return _u.zclmsg.content->data;
default:
zmq_assert (false);
return NULL;
}
}
size_t zmq::msg_t::size () const
{
// Check the validity of the message.
zmq_assert (check ());
switch (_u.base.type) {
case type_vsm:
return _u.vsm.size;
case type_lmsg:
return _u.lmsg.content->size;
case type_zclmsg:
return _u.zclmsg.content->size;
case type_cmsg:
return _u.cmsg.size;
default:
zmq_assert (false);
return 0;
}
}
void zmq::msg_t::shrink (size_t new_size_)
{
// Check the validity of the message.
zmq_assert (check ());
zmq_assert (new_size_ <= size ());
switch (_u.base.type) {
case type_vsm:
_u.vsm.size = static_cast<unsigned char> (new_size_);
break;
case type_lmsg:
_u.lmsg.content->size = new_size_;
break;
case type_zclmsg:
_u.zclmsg.content->size = new_size_;
break;
case type_cmsg:
_u.cmsg.size = new_size_;
break;
default:
zmq_assert (false);
}
}
unsigned char zmq::msg_t::flags () const
{
return _u.base.flags;
}
void zmq::msg_t::set_flags (unsigned char flags_)
{
_u.base.flags |= flags_;
}
void zmq::msg_t::reset_flags (unsigned char flags_)
{
_u.base.flags &= ~flags_;
}
zmq::metadata_t *zmq::msg_t::metadata () const
{
return _u.base.metadata;
}
void zmq::msg_t::set_metadata (zmq::metadata_t *metadata_)
{
assert (metadata_ != NULL);
assert (_u.base.metadata == NULL);
metadata_->add_ref ();
_u.base.metadata = metadata_;
}
void zmq::msg_t::reset_metadata ()
{
if (_u.base.metadata) {
if (_u.base.metadata->drop_ref ()) {
LIBZMQ_DELETE (_u.base.metadata);
}
_u.base.metadata = NULL;
}
}
bool zmq::msg_t::is_routing_id () const
{
return (_u.base.flags & routing_id) == routing_id;
}
bool zmq::msg_t::is_credential () const
{
return (_u.base.flags & credential) == credential;
}
bool zmq::msg_t::is_delimiter () const
{
return _u.base.type == type_delimiter;
}
bool zmq::msg_t::is_vsm () const
{
return _u.base.type == type_vsm;
}
bool zmq::msg_t::is_cmsg () const
{
return _u.base.type == type_cmsg;
}
bool zmq::msg_t::is_lmsg () const
{
return _u.base.type == type_lmsg;
}
bool zmq::msg_t::is_zcmsg () const
{
return _u.base.type == type_zclmsg;
}
bool zmq::msg_t::is_join () const
{
return _u.base.type == type_join;
}
bool zmq::msg_t::is_leave () const
{
return _u.base.type == type_leave;
}
bool zmq::msg_t::is_ping () const
{
return (_u.base.flags & CMD_TYPE_MASK) == ping;
}
bool zmq::msg_t::is_pong () const
{
return (_u.base.flags & CMD_TYPE_MASK) == pong;
}
bool zmq::msg_t::is_close_cmd () const
{
return (_u.base.flags & CMD_TYPE_MASK) == close_cmd;
}
size_t zmq::msg_t::command_body_size () const
{
if (this->is_ping () || this->is_pong ())
return this->size () - ping_cmd_name_size;
else if (!(this->flags () & msg_t::command)
&& (this->is_subscribe () || this->is_cancel ()))
return this->size ();
else if (this->is_subscribe ())
return this->size () - sub_cmd_name_size;
else if (this->is_cancel ())
return this->size () - cancel_cmd_name_size;
return 0;
}
void *zmq::msg_t::command_body ()
{
unsigned char *data = NULL;
if (this->is_ping () || this->is_pong ())
data =
static_cast<unsigned char *> (this->data ()) + ping_cmd_name_size;
// With inproc, command flag is not set for sub/cancel
else if (!(this->flags () & msg_t::command)
&& (this->is_subscribe () || this->is_cancel ()))
data = static_cast<unsigned char *> (this->data ());
else if (this->is_subscribe ())
data = static_cast<unsigned char *> (this->data ()) + sub_cmd_name_size;
else if (this->is_cancel ())
data =
static_cast<unsigned char *> (this->data ()) + cancel_cmd_name_size;
return data;
}
void zmq::msg_t::add_refs (int refs_)
{
zmq_assert (refs_ >= 0);
// Operation not supported for messages with metadata.
zmq_assert (_u.base.metadata == NULL);
// No copies required.
if (!refs_)
return;
// VSMs, CMSGS and delimiters can be copied straight away. The only
// message type that needs special care are long messages.
if (_u.base.type == type_lmsg || is_zcmsg ()) {
if (_u.base.flags & msg_t::shared)
refcnt ()->add (refs_);
else {
refcnt ()->set (refs_ + 1);
_u.base.flags |= msg_t::shared;
}
}
}
bool zmq::msg_t::rm_refs (int refs_)
{
zmq_assert (refs_ >= 0);
// Operation not supported for messages with metadata.
zmq_assert (_u.base.metadata == NULL);
// No copies required.
if (!refs_)
return true;
// If there's only one reference close the message.
if ((_u.base.type != type_zclmsg && _u.base.type != type_lmsg)
|| !(_u.base.flags & msg_t::shared)) {
close ();
return false;
}
// The only message type that needs special care are long and zcopy messages.
if (_u.base.type == type_lmsg && !_u.lmsg.content->refcnt.sub (refs_)) {
// We used "placement new" operator to initialize the reference
// counter so we call the destructor explicitly now.
_u.lmsg.content->refcnt.~atomic_counter_t ();
if (_u.lmsg.content->ffn)
_u.lmsg.content->ffn (_u.lmsg.content->data, _u.lmsg.content->hint);
free (_u.lmsg.content);
return false;
}
if (is_zcmsg () && !_u.zclmsg.content->refcnt.sub (refs_)) {
// storage for rfcnt is provided externally
if (_u.zclmsg.content->ffn) {
_u.zclmsg.content->ffn (_u.zclmsg.content->data,
_u.zclmsg.content->hint);
}
return false;
}
return true;
}
uint32_t zmq::msg_t::get_routing_id () const
{
return _u.base.routing_id;
}
int zmq::msg_t::set_routing_id (uint32_t routing_id_)
{
if (routing_id_) {
_u.base.routing_id = routing_id_;
return 0;
}
errno = EINVAL;
return -1;
}
int zmq::msg_t::reset_routing_id ()
{
_u.base.routing_id = 0;
return 0;
}
const char *zmq::msg_t::group () const
{
if (_u.base.group.type == group_type_long)
return _u.base.group.lgroup.content->group;
return _u.base.group.sgroup.group;
}
int zmq::msg_t::set_group (const char *group_)
{
size_t length = strnlen (group_, ZMQ_GROUP_MAX_LENGTH);
return set_group (group_, length);
}
int zmq::msg_t::set_group (const char *group_, size_t length_)
{
if (length_ > ZMQ_GROUP_MAX_LENGTH) {
errno = EINVAL;
return -1;
}
if (length_ > 14) {
_u.base.group.lgroup.type = group_type_long;
_u.base.group.lgroup.content =
(long_group_t *) malloc (sizeof (long_group_t));
assert (_u.base.group.lgroup.content);
new (&_u.base.group.lgroup.content->refcnt) zmq::atomic_counter_t ();
_u.base.group.lgroup.content->refcnt.set (1);
strncpy (_u.base.group.lgroup.content->group, group_, length_);
_u.base.group.lgroup.content->group[length_] = '\0';
} else {
strncpy (_u.base.group.sgroup.group, group_, length_);
_u.base.group.sgroup.group[length_] = '\0';
}
return 0;
}
zmq::atomic_counter_t *zmq::msg_t::refcnt ()
{
switch (_u.base.type) {
case type_lmsg:
return &_u.lmsg.content->refcnt;
case type_zclmsg:
return &_u.zclmsg.content->refcnt;
default:
zmq_assert (false);
return NULL;
}
}
|
sophomore_public/libzmq
|
src/msg.cpp
|
C++
|
gpl-3.0
| 18,270 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_MSG_HPP_INCLUDE__
#define __ZMQ_MSG_HPP_INCLUDE__
#include <stddef.h>
#include <stdio.h>
#include "config.hpp"
#include "err.hpp"
#include "fd.hpp"
#include "atomic_counter.hpp"
#include "metadata.hpp"
// bits 2-5
#define CMD_TYPE_MASK 0x1c
// Signature for free function to deallocate the message content.
// Note that it has to be declared as "C" so that it is the same as
// zmq_free_fn defined in zmq.h.
extern "C" {
typedef void (msg_free_fn) (void *data_, void *hint_);
}
namespace zmq
{
// Note that this structure needs to be explicitly constructed
// (init functions) and destructed (close function).
static const char cancel_cmd_name[] = "\6CANCEL";
static const char sub_cmd_name[] = "\x9SUBSCRIBE";
class msg_t
{
public:
// Shared message buffer. Message data are either allocated in one
// continuous block along with this structure - thus avoiding one
// malloc/free pair or they are stored in user-supplied memory.
// In the latter case, ffn member stores pointer to the function to be
// used to deallocate the data. If the buffer is actually shared (there
// are at least 2 references to it) refcount member contains number of
// references.
struct content_t
{
void *data;
size_t size;
msg_free_fn *ffn;
void *hint;
zmq::atomic_counter_t refcnt;
};
// Message flags.
enum
{
more = 1, // Followed by more parts
command = 2, // Command frame (see ZMTP spec)
// Command types, use only bits 2-5 and compare with ==, not bitwise,
// a command can never be of more that one type at the same time
ping = 4,
pong = 8,
subscribe = 12,
cancel = 16,
close_cmd = 20,
credential = 32,
routing_id = 64,
shared = 128
};
bool check () const;
int init ();
int init (void *data_,
size_t size_,
msg_free_fn *ffn_,
void *hint_,
content_t *content_ = NULL);
int init_size (size_t size_);
int init_buffer (const void *buf_, size_t size_);
int init_data (void *data_, size_t size_, msg_free_fn *ffn_, void *hint_);
int init_external_storage (content_t *content_,
void *data_,
size_t size_,
msg_free_fn *ffn_,
void *hint_);
int init_delimiter ();
int init_join ();
int init_leave ();
int init_subscribe (const size_t size_, const unsigned char *topic);
int init_cancel (const size_t size_, const unsigned char *topic);
int close ();
int move (msg_t &src_);
int copy (msg_t &src_);
void *data ();
size_t size () const;
unsigned char flags () const;
void set_flags (unsigned char flags_);
void reset_flags (unsigned char flags_);
metadata_t *metadata () const;
void set_metadata (metadata_t *metadata_);
void reset_metadata ();
bool is_routing_id () const;
bool is_credential () const;
bool is_delimiter () const;
bool is_join () const;
bool is_leave () const;
bool is_ping () const;
bool is_pong () const;
bool is_close_cmd () const;
// These are called on each message received by the session_base class,
// so get them inlined to avoid the overhead of 2 function calls per msg
bool is_subscribe () const
{
return (_u.base.flags & CMD_TYPE_MASK) == subscribe;
}
bool is_cancel () const
{
return (_u.base.flags & CMD_TYPE_MASK) == cancel;
}
size_t command_body_size () const;
void *command_body ();
bool is_vsm () const;
bool is_cmsg () const;
bool is_lmsg () const;
bool is_zcmsg () const;
uint32_t get_routing_id () const;
int set_routing_id (uint32_t routing_id_);
int reset_routing_id ();
const char *group () const;
int set_group (const char *group_);
int set_group (const char *, size_t length_);
// After calling this function you can copy the message in POD-style
// refs_ times. No need to call copy.
void add_refs (int refs_);
// Removes references previously added by add_refs. If the number of
// references drops to 0, the message is closed and false is returned.
bool rm_refs (int refs_);
void shrink (size_t new_size_);
// Size in bytes of the largest message that is still copied around
// rather than being reference-counted.
enum
{
msg_t_size = 64
};
enum
{
max_vsm_size =
msg_t_size - (sizeof (metadata_t *) + 3 + 16 + sizeof (uint32_t))
};
enum
{
ping_cmd_name_size = 5, // 4PING
cancel_cmd_name_size = 7, // 6CANCEL
sub_cmd_name_size = 10 // 9SUBSCRIBE
};
private:
zmq::atomic_counter_t *refcnt ();
// Different message types.
enum type_t
{
type_min = 101,
// VSM messages store the content in the message itself
type_vsm = 101,
// LMSG messages store the content in malloc-ed memory
type_lmsg = 102,
// Delimiter messages are used in envelopes
type_delimiter = 103,
// CMSG messages point to constant data
type_cmsg = 104,
// zero-copy LMSG message for v2_decoder
type_zclmsg = 105,
// Join message for radio_dish
type_join = 106,
// Leave message for radio_dish
type_leave = 107,
type_max = 107
};
enum group_type_t
{
group_type_short,
group_type_long
};
struct long_group_t
{
char group[ZMQ_GROUP_MAX_LENGTH + 1];
atomic_counter_t refcnt;
};
union group_t
{
unsigned char type;
struct
{
unsigned char type;
char group[15];
} sgroup;
struct
{
unsigned char type;
long_group_t *content;
} lgroup;
};
// Note that fields shared between different message types are not
// moved to the parent class (msg_t). This way we get tighter packing
// of the data. Shared fields can be accessed via 'base' member of
// the union.
union
{
struct
{
metadata_t *metadata;
unsigned char unused[msg_t_size
- (sizeof (metadata_t *) + 2
+ sizeof (uint32_t) + sizeof (group_t))];
unsigned char type;
unsigned char flags;
uint32_t routing_id;
group_t group;
} base;
struct
{
metadata_t *metadata;
unsigned char data[max_vsm_size];
unsigned char size;
unsigned char type;
unsigned char flags;
uint32_t routing_id;
group_t group;
} vsm;
struct
{
metadata_t *metadata;
content_t *content;
unsigned char
unused[msg_t_size
- (sizeof (metadata_t *) + sizeof (content_t *) + 2
+ sizeof (uint32_t) + sizeof (group_t))];
unsigned char type;
unsigned char flags;
uint32_t routing_id;
group_t group;
} lmsg;
struct
{
metadata_t *metadata;
content_t *content;
unsigned char
unused[msg_t_size
- (sizeof (metadata_t *) + sizeof (content_t *) + 2
+ sizeof (uint32_t) + sizeof (group_t))];
unsigned char type;
unsigned char flags;
uint32_t routing_id;
group_t group;
} zclmsg;
struct
{
metadata_t *metadata;
void *data;
size_t size;
unsigned char unused[msg_t_size
- (sizeof (metadata_t *) + sizeof (void *)
+ sizeof (size_t) + 2 + sizeof (uint32_t)
+ sizeof (group_t))];
unsigned char type;
unsigned char flags;
uint32_t routing_id;
group_t group;
} cmsg;
struct
{
metadata_t *metadata;
unsigned char unused[msg_t_size
- (sizeof (metadata_t *) + 2
+ sizeof (uint32_t) + sizeof (group_t))];
unsigned char type;
unsigned char flags;
uint32_t routing_id;
group_t group;
} delimiter;
} _u;
};
inline int close_and_return (zmq::msg_t *msg_, int echo_)
{
// Since we abort on close failure we preserve errno for success case.
const int err = errno;
const int rc = msg_->close ();
errno_assert (rc == 0);
errno = err;
return echo_;
}
inline int close_and_return (zmq::msg_t msg_[], int count_, int echo_)
{
for (int i = 0; i < count_; i++)
close_and_return (&msg_[i], 0);
return echo_;
}
}
#endif
|
sophomore_public/libzmq
|
src/msg.hpp
|
C++
|
gpl-3.0
| 9,202 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "mtrie.hpp"
#include "generic_mtrie_impl.hpp"
namespace zmq
{
template class generic_mtrie_t<pipe_t>;
}
|
sophomore_public/libzmq
|
src/mtrie.cpp
|
C++
|
gpl-3.0
| 181 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_MTRIE_HPP_INCLUDED__
#define __ZMQ_MTRIE_HPP_INCLUDED__
#include "generic_mtrie.hpp"
#if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER > 1600)
#define ZMQ_HAS_EXTERN_TEMPLATE 1
#else
#define ZMQ_HAS_EXTERN_TEMPLATE 0
#endif
namespace zmq
{
class pipe_t;
#if ZMQ_HAS_EXTERN_TEMPLATE
extern template class generic_mtrie_t<pipe_t>;
#endif
typedef generic_mtrie_t<pipe_t> mtrie_t;
}
#endif
|
sophomore_public/libzmq
|
src/mtrie.hpp
|
C++
|
gpl-3.0
| 457 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_MUTEX_HPP_INCLUDED__
#define __ZMQ_MUTEX_HPP_INCLUDED__
#include "err.hpp"
#include "macros.hpp"
// Mutex class encapsulates OS mutex in a platform-independent way.
#if defined(ZMQ_HAVE_WINDOWS) && !defined(ZMQ_USE_CV_IMPL_PTHREADS)
#include "windows.hpp"
namespace zmq
{
class mutex_t
{
public:
mutex_t () { InitializeCriticalSection (&_cs); }
~mutex_t () { DeleteCriticalSection (&_cs); }
void lock () { EnterCriticalSection (&_cs); }
bool try_lock () { return (TryEnterCriticalSection (&_cs)) ? true : false; }
void unlock () { LeaveCriticalSection (&_cs); }
CRITICAL_SECTION *get_cs () { return &_cs; }
private:
CRITICAL_SECTION _cs;
ZMQ_NON_COPYABLE_NOR_MOVABLE (mutex_t)
};
}
#elif defined ZMQ_HAVE_VXWORKS
#include <vxWorks.h>
#include <semLib.h>
namespace zmq
{
class mutex_t
{
public:
inline mutex_t ()
{
_semId =
semMCreate (SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE);
}
inline ~mutex_t () { semDelete (_semId); }
inline void lock () { semTake (_semId, WAIT_FOREVER); }
inline bool try_lock ()
{
if (semTake (_semId, NO_WAIT) == OK) {
return true;
}
return false;
}
inline void unlock () { semGive (_semId); }
private:
SEM_ID _semId;
ZMQ_NON_COPYABLE_NOR_MOVABLE (mutex_t)
};
}
#else
#include <pthread.h>
namespace zmq
{
class mutex_t
{
public:
inline mutex_t ()
{
int rc = pthread_mutexattr_init (&_attr);
posix_assert (rc);
rc = pthread_mutexattr_settype (&_attr, PTHREAD_MUTEX_RECURSIVE);
posix_assert (rc);
rc = pthread_mutex_init (&_mutex, &_attr);
posix_assert (rc);
}
inline ~mutex_t ()
{
int rc = pthread_mutex_destroy (&_mutex);
posix_assert (rc);
rc = pthread_mutexattr_destroy (&_attr);
posix_assert (rc);
}
inline void lock ()
{
int rc = pthread_mutex_lock (&_mutex);
posix_assert (rc);
}
inline bool try_lock ()
{
int rc = pthread_mutex_trylock (&_mutex);
if (rc == EBUSY)
return false;
posix_assert (rc);
return true;
}
inline void unlock ()
{
int rc = pthread_mutex_unlock (&_mutex);
posix_assert (rc);
}
inline pthread_mutex_t *get_mutex () { return &_mutex; }
private:
pthread_mutex_t _mutex;
pthread_mutexattr_t _attr;
ZMQ_NON_COPYABLE_NOR_MOVABLE (mutex_t)
};
}
#endif
namespace zmq
{
struct scoped_lock_t
{
scoped_lock_t (mutex_t &mutex_) : _mutex (mutex_) { _mutex.lock (); }
~scoped_lock_t () { _mutex.unlock (); }
private:
mutex_t &_mutex;
ZMQ_NON_COPYABLE_NOR_MOVABLE (scoped_lock_t)
};
struct scoped_optional_lock_t
{
scoped_optional_lock_t (mutex_t *mutex_) : _mutex (mutex_)
{
if (_mutex != NULL)
_mutex->lock ();
}
~scoped_optional_lock_t ()
{
if (_mutex != NULL)
_mutex->unlock ();
}
private:
mutex_t *_mutex;
ZMQ_NON_COPYABLE_NOR_MOVABLE (scoped_optional_lock_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/mutex.hpp
|
C++
|
gpl-3.0
| 3,195 |
#include "precompiled.hpp"
#include "platform.hpp"
#if defined ZMQ_HAVE_NORM
#include "norm_engine.hpp"
#ifdef ZMQ_USE_NORM_SOCKET_WRAPPER
#include "ip.hpp"
#endif
#include "session_base.hpp"
#include "v2_protocol.hpp"
#ifdef ZMQ_USE_NORM_SOCKET_WRAPPER
struct norm_wrapper_thread_args_t
{
NormDescriptor norm_descriptor;
SOCKET wrapper_write_fd;
NormInstanceHandle norm_instance_handle;
};
DWORD WINAPI normWrapperThread (LPVOID lpParam);
#endif
zmq::norm_engine_t::norm_engine_t (io_thread_t *parent_,
const options_t &options_) :
io_object_t (parent_),
zmq_session (NULL),
options (options_),
norm_instance (NORM_INSTANCE_INVALID),
norm_session (NORM_SESSION_INVALID),
is_sender (false),
is_receiver (false),
zmq_encoder (0),
norm_tx_stream (NORM_OBJECT_INVALID),
tx_first_msg (true),
tx_more_bit (false),
zmq_output_ready (false),
norm_tx_ready (false),
tx_index (0),
tx_len (0),
zmq_input_ready (false)
{
int rc = tx_msg.init ();
errno_assert (0 == rc);
}
zmq::norm_engine_t::~norm_engine_t ()
{
shutdown (); // in case it was not already called
}
int zmq::norm_engine_t::init (const char *network_, bool send, bool recv)
{
// Parse the "network_" address int "iface", "addr", and "port"
// norm endpoint format: [id,][<iface>;]<addr>:<port>
// First, look for optional local NormNodeId
// (default NORM_NODE_ANY causes NORM to use host IP addr for NormNodeId)
NormNodeId localId = NORM_NODE_ANY;
const char *ifacePtr = strchr (network_, ',');
if (NULL != ifacePtr) {
size_t idLen = ifacePtr - network_;
if (idLen > 31)
idLen = 31;
char idText[32];
strncpy (idText, network_, idLen);
idText[idLen] = '\0';
localId = (NormNodeId) atoi (idText);
ifacePtr++;
} else {
ifacePtr = network_;
}
// Second, look for optional multicast ifaceName
char ifaceName[256];
const char *addrPtr = strchr (ifacePtr, ';');
if (NULL != addrPtr) {
size_t ifaceLen = addrPtr - ifacePtr;
if (ifaceLen > 255)
ifaceLen = 255; // return error instead?
strncpy (ifaceName, ifacePtr, ifaceLen);
ifaceName[ifaceLen] = '\0';
ifacePtr = ifaceName;
addrPtr++;
} else {
addrPtr = ifacePtr;
ifacePtr = NULL;
}
// Finally, parse IP address and port number
const char *portPtr = strrchr (addrPtr, ':');
if (NULL == portPtr) {
errno = EINVAL;
return -1;
}
char addr[256];
size_t addrLen = portPtr - addrPtr;
if (addrLen > 255)
addrLen = 255;
strncpy (addr, addrPtr, addrLen);
addr[addrLen] = '\0';
portPtr++;
unsigned short portNumber = atoi (portPtr);
if (NORM_INSTANCE_INVALID == norm_instance) {
if (NORM_INSTANCE_INVALID == (norm_instance = NormCreateInstance ())) {
// errno set by whatever caused NormCreateInstance() to fail
return -1;
}
}
// TBD - What do we use for our local NormNodeId?
// (for now we use automatic, IP addr based assignment or passed in 'id')
// a) Use ZMQ Identity somehow?
// b) Add function to use iface addr
// c) Randomize and implement a NORM session layer
// conflict detection/resolution protocol
norm_session = NormCreateSession (norm_instance, addr, portNumber, localId);
if (NORM_SESSION_INVALID == norm_session) {
int savedErrno = errno;
NormDestroyInstance (norm_instance);
norm_instance = NORM_INSTANCE_INVALID;
errno = savedErrno;
return -1;
}
// There's many other useful NORM options that could be applied here
if (!NormIsUnicastAddress (addr)) {
// These only apply for multicast sessions
NormSetTTL (norm_session, options.multicast_hops);
NormSetRxPortReuse (
norm_session,
true); // port reuse doesn't work for non-connected unicast
NormSetLoopback (norm_session,
true); // needed when multicast users on same machine
if (NULL != ifacePtr) {
// Note a bad interface may not be caught until sender or receiver start
// (Since sender/receiver is not yet started, this always succeeds here)
NormSetMulticastInterface (norm_session, ifacePtr);
}
}
if (NormIsUnicastAddress (addr) || options.norm_unicast_nacks) {
NormSetDefaultUnicastNack (norm_session, true);
}
// Set TOS but check TOS ECN bit for CCE modes
if ((options.norm_mode == ZMQ_NORM_CCE
|| options.norm_mode == ZMQ_NORM_CCE_ECNONLY)
&& (options.tos % 4 == 0)) {
// ECN Capable Transport not set, so set it
NormSetTOS (norm_session, options.tos + 1);
} else if ((options.norm_mode == ZMQ_NORM_CCE
|| options.norm_mode == ZMQ_NORM_CCE_ECNONLY)
&& (options.tos % 4 == 3)) {
// Congestion Experienced is an invalid setting, remove one of the bits
NormSetTOS (norm_session, options.tos - 1);
} else {
NormSetTOS (norm_session, options.tos);
}
if (recv) {
// The alternative NORM_SYNC_CURRENT here would provide "instant"
// receiver sync to the sender's _current_ message transmission.
// NORM_SYNC_STREAM tries to get everything the sender has cached/buffered
NormSetDefaultSyncPolicy (norm_session, NORM_SYNC_STREAM);
if (!NormStartReceiver (
norm_session, (unsigned long) options.norm_buffer_size * 1024)) {
// errno set by whatever failed
int savedErrno = errno;
NormDestroyInstance (norm_instance); // session gets closed, too
norm_session = NORM_SESSION_INVALID;
norm_instance = NORM_INSTANCE_INVALID;
errno = savedErrno;
return -1;
}
is_receiver = true;
}
if (send) {
// Handle invalid settings -- num_parity must be >= num_autoparity (which has a default of 0)
unsigned char numparity =
(options.norm_num_parity >= options.norm_num_autoparity
? options.norm_num_parity
: options.norm_num_autoparity);
// Handle invalid settings -- block size must be > effective num_parity (which is <255)
unsigned char blocksize =
(options.norm_block_size > numparity ? options.norm_block_size
: numparity + 1);
// Pick a random sender instance id (aka norm sender session id)
NormSessionId instanceId = NormGetRandomSessionId ();
// TBD - provide "options" for some NORM sender parameters
if (!NormStartSender (norm_session, instanceId,
(unsigned long) options.norm_buffer_size * 1024,
options.norm_segment_size, blocksize,
numparity)) {
// errno set by whatever failed
int savedErrno = errno;
NormDestroyInstance (norm_instance); // session gets closed, too
norm_session = NORM_SESSION_INVALID;
norm_instance = NORM_INSTANCE_INVALID;
errno = savedErrno;
return -1;
}
// Handle NORM mode
if (options.norm_mode == ZMQ_NORM_FIXED) {
NormSetTxRate (norm_session, (double) options.rate * 1000);
} else {
NormSetCongestionControl (norm_session, true);
if (options.norm_mode != ZMQ_NORM_CC) {
NormSetEcnSupport (
norm_session,
((options.norm_mode == ZMQ_NORM_CCE)
|| (options.norm_mode == ZMQ_NORM_CCE_ECNONLY)),
options.norm_mode == ZMQ_NORM_CCE_ECNONLY,
options.norm_mode == ZMQ_NORM_CCL);
}
}
if (options.norm_num_autoparity > 0) {
NormSetAutoParity (norm_session, options.norm_num_autoparity);
}
norm_tx_ready = true;
is_sender = true;
if (NORM_OBJECT_INVALID
== (norm_tx_stream = NormStreamOpen (
norm_session,
(unsigned long) options.norm_buffer_size * 1024))) {
// errno set by whatever failed
int savedErrno = errno;
NormDestroyInstance (norm_instance); // session gets closed, too
norm_session = NORM_SESSION_INVALID;
norm_instance = NORM_INSTANCE_INVALID;
errno = savedErrno;
return -1;
}
// NORM Stream options
NormStreamSetPushEnable (norm_tx_stream, options.norm_push_enable);
}
//NormSetMessageTrace(norm_session, true);
//NormSetDebugLevel(3);
//NormOpenDebugLog(norm_instance, "normLog.txt");
return 0; // no error
} // end zmq::norm_engine_t::init()
void zmq::norm_engine_t::shutdown ()
{
// TBD - implement a more graceful shutdown option
if (is_receiver) {
NormStopReceiver (norm_session);
// delete any active NormRxStreamState
rx_pending_list.Destroy ();
rx_ready_list.Destroy ();
msg_ready_list.Destroy ();
is_receiver = false;
}
if (is_sender) {
NormStopSender (norm_session);
is_sender = false;
}
if (NORM_SESSION_INVALID != norm_session) {
NormDestroySession (norm_session);
norm_session = NORM_SESSION_INVALID;
}
if (NORM_INSTANCE_INVALID != norm_instance) {
NormStopInstance (norm_instance);
NormDestroyInstance (norm_instance);
norm_instance = NORM_INSTANCE_INVALID;
}
} // end zmq::norm_engine_t::shutdown()
void zmq::norm_engine_t::plug (io_thread_t *io_thread_,
session_base_t *session_)
{
#ifdef ZMQ_USE_NORM_SOCKET_WRAPPER
norm_wrapper_thread_args_t *threadArgs = new norm_wrapper_thread_args_t;
int rc = make_fdpair (&wrapper_read_fd, &threadArgs->wrapper_write_fd);
errno_assert (rc != -1);
threadArgs->norm_descriptor = NormGetDescriptor (norm_instance);
threadArgs->norm_instance_handle = norm_instance;
norm_descriptor_handle = add_fd (wrapper_read_fd);
#else
fd_t normDescriptor = NormGetDescriptor (norm_instance);
norm_descriptor_handle = add_fd (normDescriptor);
#endif
// Set POLLIN for notification of pending NormEvents
set_pollin (norm_descriptor_handle);
// TBD - we may assign the NORM engine to an io_thread in the future???
zmq_session = session_;
if (is_sender)
zmq_output_ready = true;
if (is_receiver)
zmq_input_ready = true;
if (is_sender)
send_data ();
#ifdef ZMQ_USE_NORM_SOCKET_WRAPPER
wrapper_thread_handle = CreateThread (NULL, 0, normWrapperThread,
threadArgs, 0, &wrapper_thread_id);
#endif
} // end zmq::norm_engine_t::init()
void zmq::norm_engine_t::unplug ()
{
rm_fd (norm_descriptor_handle);
#ifdef ZMQ_USE_NORM_SOCKET_WRAPPER
PostThreadMessage (wrapper_thread_id, WM_QUIT, (WPARAM) NULL,
(LPARAM) NULL);
WaitForSingleObject (wrapper_thread_handle, INFINITE);
DWORD exitCode;
GetExitCodeThread (wrapper_thread_handle, &exitCode);
zmq_assert (exitCode != -1);
int rc = closesocket (wrapper_read_fd);
errno_assert (rc != -1);
#endif
zmq_session = NULL;
} // end zmq::norm_engine_t::unplug()
void zmq::norm_engine_t::terminate ()
{
unplug ();
shutdown ();
delete this;
}
void zmq::norm_engine_t::restart_output ()
{
// There's new message data available from the session
zmq_output_ready = true;
if (norm_tx_ready)
send_data ();
} // end zmq::norm_engine_t::restart_output()
void zmq::norm_engine_t::send_data ()
{
// Here we write as much as is available or we can
while (zmq_output_ready && norm_tx_ready) {
if (0 == tx_len) {
// Our tx_buffer needs data to send
// Get more data from encoder
size_t space = BUFFER_SIZE;
unsigned char *bufPtr = (unsigned char *) tx_buffer;
tx_len = zmq_encoder.encode (&bufPtr, space);
if (0 == tx_len) {
if (tx_first_msg) {
// We don't need to mark eom/flush until a message is sent
tx_first_msg = false;
} else {
// A prior message was completely written to stream, so
// mark end-of-message and possibly flush (to force packet transmission,
// even if it's not a full segment so message gets delivered quickly)
// NormStreamMarkEom(norm_tx_stream); // the flush below marks eom
// Note NORM_FLUSH_ACTIVE makes NORM fairly chatty for low duty cycle messaging
// but makes sure content is delivered quickly. Positive acknowledgements
// with flush override would make NORM more succinct here
NormStreamFlush (norm_tx_stream, true, NORM_FLUSH_ACTIVE);
}
// Need to pull and load a new message to send
if (-1 == zmq_session->pull_msg (&tx_msg)) {
// We need to wait for "restart_output()" to be called by ZMQ
zmq_output_ready = false;
break;
}
zmq_encoder.load_msg (&tx_msg);
// Should we write message size header for NORM to use? Or expect NORM
// receiver to decode ZMQ message framing format(s)?
// OK - we need to use a byte to denote when the ZMQ frame is the _first_
// frame of a message so it can be decoded properly when a receiver
// 'syncs' mid-stream. We key off the the state of the 'more_flag'
// I.e.,If more_flag _was_ false previously, this is the first
// frame of a ZMQ message.
if (tx_more_bit)
tx_buffer[0] =
(char) 0xff; // this is not first frame of message
else
tx_buffer[0] = 0x00; // this is first frame of message
tx_more_bit = (0 != (tx_msg.flags () & msg_t::more));
// Go ahead an get a first chunk of the message
bufPtr++;
space--;
tx_len = 1 + zmq_encoder.encode (&bufPtr, space);
tx_index = 0;
}
}
// Do we have data in our tx_buffer pending
if (tx_index < tx_len) {
// We have data in our tx_buffer to send, so write it to the stream
tx_index += NormStreamWrite (norm_tx_stream, tx_buffer + tx_index,
tx_len - tx_index);
if (tx_index < tx_len) {
// NORM stream buffer full, wait for NORM_TX_QUEUE_VACANCY
norm_tx_ready = false;
break;
}
tx_len = 0; // all buffered data was written
}
} // end while (zmq_output_ready && norm_tx_ready)
} // end zmq::norm_engine_t::send_data()
void zmq::norm_engine_t::in_event ()
{
// This means a NormEvent is pending, so call NormGetNextEvent() and handle
NormEvent event;
#ifdef ZMQ_USE_NORM_SOCKET_WRAPPER
int rc = recv (wrapper_read_fd, reinterpret_cast<char *> (&event),
sizeof (event), 0);
errno_assert (rc == sizeof (event));
#else
if (!NormGetNextEvent (norm_instance, &event)) {
// NORM has died before we unplugged?!
zmq_assert (false);
return;
}
#endif
switch (event.type) {
case NORM_TX_QUEUE_VACANCY:
case NORM_TX_QUEUE_EMPTY:
if (!norm_tx_ready) {
norm_tx_ready = true;
send_data ();
}
break;
case NORM_RX_OBJECT_NEW:
//break;
case NORM_RX_OBJECT_UPDATED:
recv_data (event.object);
break;
case NORM_RX_OBJECT_ABORTED: {
NormRxStreamState *rxState =
(NormRxStreamState *) NormObjectGetUserData (event.object);
if (NULL != rxState) {
// Remove the state from the list it's in
// This is now unnecessary since deletion takes care of list removal
// but in the interest of being clear ...
NormRxStreamState::List *list = rxState->AccessList ();
if (NULL != list)
list->Remove (*rxState);
}
delete rxState;
break;
}
case NORM_REMOTE_SENDER_INACTIVE:
// Here we free resources used for this formerly active sender.
// Note w/ NORM_SYNC_STREAM, if sender reactivates, we may
// get some messages delivered twice. NORM_SYNC_CURRENT would
// mitigate that but might miss data at startup. Always tradeoffs.
// Instead of immediately deleting, we could instead initiate a
// user configurable timeout here to wait some amount of time
// after this event to declare the remote sender truly dead
// and delete its state???
NormNodeDelete (event.sender);
break;
default:
// We ignore some NORM events
break;
}
} // zmq::norm_engine_t::in_event()
bool zmq::norm_engine_t::restart_input ()
{
// TBD - should we check/assert that zmq_input_ready was false???
zmq_input_ready = true;
// Process any pending received messages
if (!msg_ready_list.IsEmpty ())
recv_data (NORM_OBJECT_INVALID);
return true;
} // end zmq::norm_engine_t::restart_input()
void zmq::norm_engine_t::recv_data (NormObjectHandle object)
{
if (NORM_OBJECT_INVALID != object) {
// Call result of NORM_RX_OBJECT_UPDATED notification
// This is a rx_ready indication for a new or existing rx stream
// First, determine if this is a stream we already know
zmq_assert (NORM_OBJECT_STREAM == NormObjectGetType (object));
// Since there can be multiple senders (publishers), we keep
// state for each separate rx stream.
NormRxStreamState *rxState =
(NormRxStreamState *) NormObjectGetUserData (object);
if (NULL == rxState) {
// This is a new stream, so create rxState with zmq decoder, etc
rxState = new (std::nothrow)
NormRxStreamState (object, options.maxmsgsize, options.zero_copy,
options.in_batch_size);
errno_assert (rxState);
if (!rxState->Init ()) {
errno_assert (false);
delete rxState;
return;
}
NormObjectSetUserData (object, rxState);
} else if (!rxState->IsRxReady ()) {
// Existing non-ready stream, so remove from pending
// list to be promoted to rx_ready_list ...
rx_pending_list.Remove (*rxState);
}
if (!rxState->IsRxReady ()) {
// TBD - prepend up front for immediate service?
rxState->SetRxReady (true);
rx_ready_list.Append (*rxState);
}
}
// This loop repeats until we've read all data available from "rx ready" inbound streams
// and pushed any accumulated messages we can up to the zmq session.
while (!rx_ready_list.IsEmpty ()
|| (zmq_input_ready && !msg_ready_list.IsEmpty ())) {
// Iterate through our rx_ready streams, reading data into the decoder
// (This services incoming "rx ready" streams in a round-robin fashion)
NormRxStreamState::List::Iterator iterator (rx_ready_list);
NormRxStreamState *rxState;
while (NULL != (rxState = iterator.GetNextItem ())) {
switch (rxState->Decode ()) {
case 1: // msg completed
// Complete message decoded, move this stream to msg_ready_list
// to push the message up to the session below. Note the stream
// will be returned to the "rx_ready_list" after that's done
rx_ready_list.Remove (*rxState);
msg_ready_list.Append (*rxState);
continue;
case -1: // decoding error (shouldn't happen w/ NORM, but ...)
// We need to re-sync this stream (decoder buffer was reset)
rxState->SetSync (false);
break;
default: // 0 - need more data
break;
}
// Get more data from this stream
NormObjectHandle stream = rxState->GetStreamHandle ();
// First, make sure we're in sync ...
while (!rxState->InSync ()) {
// seek NORM message start
if (!NormStreamSeekMsgStart (stream)) {
// Need to wait for more data
break;
}
// read message 'flag' byte to see if this it's a 'final' frame
char syncFlag;
unsigned int numBytes = 1;
if (!NormStreamRead (stream, &syncFlag, &numBytes)) {
// broken stream (can happen on late-joining subscriber)
continue;
}
if (0 == numBytes) {
// This probably shouldn't happen either since we found msg start
// Need to wait for more data
break;
}
if (0 == syncFlag)
rxState->SetSync (true);
// else keep seeking ...
} // end while(!rxState->InSync())
if (!rxState->InSync ()) {
// Need more data for this stream, so remove from "rx ready"
// list and iterate to next "rx ready" stream
rxState->SetRxReady (false);
// Move from rx_ready_list to rx_pending_list
rx_ready_list.Remove (*rxState);
rx_pending_list.Append (*rxState);
continue;
}
// Now we're actually ready to read data from the NORM stream to the zmq_decoder
// the underlying zmq_decoder->get_buffer() call sets how much is needed.
unsigned int numBytes = rxState->GetBytesNeeded ();
if (!NormStreamRead (stream, rxState->AccessBuffer (), &numBytes)) {
// broken NORM stream, so re-sync
rxState->Init (); // TBD - check result
// This will retry syncing, and getting data from this stream
// since we don't increment the "it" iterator
continue;
}
rxState->IncrementBufferCount (numBytes);
if (0 == numBytes) {
// All the data available has been read
// Need to wait for NORM_RX_OBJECT_UPDATED for this stream
rxState->SetRxReady (false);
// Move from rx_ready_list to rx_pending_list
rx_ready_list.Remove (*rxState);
rx_pending_list.Append (*rxState);
}
} // end while(NULL != (rxState = iterator.GetNextItem()))
if (zmq_input_ready) {
// At this point, we've made a pass through the "rx_ready" stream list
// Now make a pass through the "msg_pending" list (if the zmq session
// ready for more input). This may possibly return streams back to
// the "rx ready" stream list after their pending message is handled
NormRxStreamState::List::Iterator iterator (msg_ready_list);
NormRxStreamState *rxState;
while (NULL != (rxState = iterator.GetNextItem ())) {
msg_t *msg = rxState->AccessMsg ();
int rc = zmq_session->push_msg (msg);
if (-1 == rc) {
if (EAGAIN == errno) {
// need to wait until session calls "restart_input()"
zmq_input_ready = false;
break;
} else {
// session rejected message?
// TBD - handle this better
zmq_assert (false);
}
}
// else message was accepted.
msg_ready_list.Remove (*rxState);
if (
rxState
->IsRxReady ()) // Move back to "rx_ready" list to read more data
rx_ready_list.Append (*rxState);
else // Move back to "rx_pending" list until NORM_RX_OBJECT_UPDATED
msg_ready_list.Append (*rxState);
} // end while(NULL != (rxState = iterator.GetNextItem()))
} // end if (zmq_input_ready)
} // end while ((!rx_ready_list.empty() || (zmq_input_ready && !msg_ready_list.empty()))
// Alert zmq of the messages we have pushed up
zmq_session->flush ();
} // end zmq::norm_engine_t::recv_data()
zmq::norm_engine_t::NormRxStreamState::NormRxStreamState (
NormObjectHandle normStream,
int64_t maxMsgSize,
bool zeroCopy,
int inBatchSize) :
norm_stream (normStream),
max_msg_size (maxMsgSize),
zero_copy (zeroCopy),
in_batch_size (inBatchSize),
in_sync (false),
rx_ready (false),
zmq_decoder (NULL),
skip_norm_sync (false),
buffer_ptr (NULL),
buffer_size (0),
buffer_count (0),
prev (NULL),
next (NULL),
list (NULL)
{
}
zmq::norm_engine_t::NormRxStreamState::~NormRxStreamState ()
{
if (NULL != zmq_decoder) {
delete zmq_decoder;
zmq_decoder = NULL;
}
if (NULL != list) {
list->Remove (*this);
list = NULL;
}
}
bool zmq::norm_engine_t::NormRxStreamState::Init ()
{
in_sync = false;
skip_norm_sync = false;
if (NULL != zmq_decoder)
delete zmq_decoder;
zmq_decoder =
new (std::nothrow) v2_decoder_t (in_batch_size, max_msg_size, zero_copy);
alloc_assert (zmq_decoder);
if (NULL != zmq_decoder) {
buffer_count = 0;
buffer_size = 0;
zmq_decoder->get_buffer (&buffer_ptr, &buffer_size);
return true;
} else {
return false;
}
} // end zmq::norm_engine_t::NormRxStreamState::Init()
// This decodes any pending data sitting in our stream decoder buffer
// It returns 1 upon message completion, -1 on error, 1 on msg completion
int zmq::norm_engine_t::NormRxStreamState::Decode ()
{
// If we have pending bytes to decode, process those first
while (buffer_count > 0) {
// There's pending data for the decoder to decode
size_t processed = 0;
// This a bit of a kludgy approach used to weed
// out the NORM ZMQ message transport "syncFlag" byte
// from the ZMQ message stream being decoded (but it works!)
if (skip_norm_sync) {
buffer_ptr++;
buffer_count--;
skip_norm_sync = false;
}
int rc = zmq_decoder->decode (buffer_ptr, buffer_count, processed);
buffer_ptr += processed;
buffer_count -= processed;
switch (rc) {
case 1:
// msg completed
if (0 == buffer_count) {
buffer_size = 0;
zmq_decoder->get_buffer (&buffer_ptr, &buffer_size);
}
skip_norm_sync = true;
return 1;
case -1:
// decoder error (reset decoder and state variables)
in_sync = false;
skip_norm_sync = false; // will get consumed by norm sync check
Init ();
break;
case 0:
// need more data, keep decoding until buffer exhausted
break;
}
}
// Reset buffer pointer/count for next read
buffer_count = 0;
buffer_size = 0;
zmq_decoder->get_buffer (&buffer_ptr, &buffer_size);
return 0; // need more data
} // end zmq::norm_engine_t::NormRxStreamState::Decode()
zmq::norm_engine_t::NormRxStreamState::List::List () : head (NULL), tail (NULL)
{
}
zmq::norm_engine_t::NormRxStreamState::List::~List ()
{
Destroy ();
}
void zmq::norm_engine_t::NormRxStreamState::List::Destroy ()
{
NormRxStreamState *item = head;
while (NULL != item) {
Remove (*item);
delete item;
item = head;
}
} // end zmq::norm_engine_t::NormRxStreamState::List::Destroy()
void zmq::norm_engine_t::NormRxStreamState::List::Append (
NormRxStreamState &item)
{
item.prev = tail;
if (NULL != tail)
tail->next = &item;
else
head = &item;
item.next = NULL;
tail = &item;
item.list = this;
} // end zmq::norm_engine_t::NormRxStreamState::List::Append()
void zmq::norm_engine_t::NormRxStreamState::List::Remove (
NormRxStreamState &item)
{
if (NULL != item.prev)
item.prev->next = item.next;
else
head = item.next;
if (NULL != item.next)
item.next->prev = item.prev;
else
tail = item.prev;
item.prev = item.next = NULL;
item.list = NULL;
} // end zmq::norm_engine_t::NormRxStreamState::List::Remove()
zmq::norm_engine_t::NormRxStreamState::List::Iterator::Iterator (
const List &list) :
next_item (list.head)
{
}
zmq::norm_engine_t::NormRxStreamState *
zmq::norm_engine_t::NormRxStreamState::List::Iterator::GetNextItem ()
{
NormRxStreamState *nextItem = next_item;
if (NULL != nextItem)
next_item = nextItem->next;
return nextItem;
} // end zmq::norm_engine_t::NormRxStreamState::List::Iterator::GetNextItem()
const zmq::endpoint_uri_pair_t &zmq::norm_engine_t::get_endpoint () const
{
return _empty_endpoint;
}
#ifdef ZMQ_USE_NORM_SOCKET_WRAPPER
#include <iostream>
DWORD WINAPI normWrapperThread (LPVOID lpParam)
{
norm_wrapper_thread_args_t *norm_wrapper_thread_args =
(norm_wrapper_thread_args_t *) lpParam;
NormEvent message;
DWORD waitRc;
DWORD exitCode = 0;
int rc;
for (;;) {
// wait for norm event or message
waitRc = MsgWaitForMultipleObjectsEx (
1, &norm_wrapper_thread_args->norm_descriptor, INFINITE,
QS_ALLPOSTMESSAGE, 0);
// Check if norm event
if (waitRc == WAIT_OBJECT_0) {
// Process norm event
if (!NormGetNextEvent (
norm_wrapper_thread_args->norm_instance_handle, &message)) {
exitCode = -1;
break;
}
rc =
send (norm_wrapper_thread_args->wrapper_write_fd,
reinterpret_cast<char *> (&message), sizeof (message), 0);
errno_assert (rc != -1);
// Check if message
} else if (waitRc == WAIT_OBJECT_0 + 1) {
// Exit if WM_QUIT is received otherwise do nothing
MSG message;
GetMessage (&message, 0, 0, 0);
if (message.message == WM_QUIT) {
break;
} else {
// do nothing
}
// Otherwise an error occurred
} else {
exitCode = -1;
break;
}
}
// Free resources
rc = closesocket (norm_wrapper_thread_args->wrapper_write_fd);
free (norm_wrapper_thread_args);
errno_assert (rc != -1);
return exitCode;
}
#endif
#endif // ZMQ_HAVE_NORM
|
sophomore_public/libzmq
|
src/norm_engine.cpp
|
C++
|
gpl-3.0
| 31,759 |
#ifndef __ZMQ_NORM_ENGINE_HPP_INCLUDED__
#define __ZMQ_NORM_ENGINE_HPP_INCLUDED__
#if defined ZMQ_HAVE_NORM
#if defined(ZMQ_HAVE_WINDOWS) && defined(ZMQ_IOTHREAD_POLLER_USE_EPOLL)
#define ZMQ_USE_NORM_SOCKET_WRAPPER
#endif
#include "io_object.hpp"
#include "i_engine.hpp"
#include "options.hpp"
#include "v2_decoder.hpp"
#include "v2_encoder.hpp"
#include <normApi.h>
namespace zmq
{
class io_thread_t;
class msg_t;
class session_base_t;
class norm_engine_t ZMQ_FINAL : public io_object_t, public i_engine
{
public:
norm_engine_t (zmq::io_thread_t *parent_, const options_t &options_);
~norm_engine_t () ZMQ_FINAL;
// create NORM instance, session, etc
int init (const char *network_, bool send, bool recv);
void shutdown ();
bool has_handshake_stage () ZMQ_FINAL { return false; };
// i_engine interface implementation.
// Plug the engine to the session.
void plug (zmq::io_thread_t *io_thread_,
class session_base_t *session_) ZMQ_FINAL;
// Terminate and deallocate the engine. Note that 'detached'
// events are not fired on termination.
void terminate () ZMQ_FINAL;
// This method is called by the session to signalise that more
// messages can be written to the pipe.
bool restart_input () ZMQ_FINAL;
// This method is called by the session to signalise that there
// are messages to send available.
void restart_output () ZMQ_FINAL;
void zap_msg_available () ZMQ_FINAL {}
const endpoint_uri_pair_t &get_endpoint () const ZMQ_FINAL;
// i_poll_events interface implementation.
// (we only need in_event() for NormEvent notification)
// (i.e., don't have any output events or timers (yet))
void in_event ();
private:
void unplug ();
void send_data ();
void recv_data (NormObjectHandle stream);
enum
{
BUFFER_SIZE = 2048
};
// Used to keep track of streams from multiple senders
class NormRxStreamState
{
public:
NormRxStreamState (NormObjectHandle normStream,
int64_t maxMsgSize,
bool zeroCopy,
int inBatchSize);
~NormRxStreamState ();
NormObjectHandle GetStreamHandle () const { return norm_stream; }
bool Init ();
void SetRxReady (bool state) { rx_ready = state; }
bool IsRxReady () const { return rx_ready; }
void SetSync (bool state) { in_sync = state; }
bool InSync () const { return in_sync; }
// These are used to feed data to decoder
// and its underlying "msg" buffer
char *AccessBuffer () { return (char *) (buffer_ptr + buffer_count); }
size_t GetBytesNeeded () const { return buffer_size - buffer_count; }
void IncrementBufferCount (size_t count) { buffer_count += count; }
msg_t *AccessMsg () { return zmq_decoder->msg (); }
// This invokes the decoder "decode" method
// returning 0 if more data is needed,
// 1 if the message is complete, If an error
// occurs the 'sync' is dropped and the
// decoder re-initialized
int Decode ();
class List
{
public:
List ();
~List ();
void Append (NormRxStreamState &item);
void Remove (NormRxStreamState &item);
bool IsEmpty () const { return NULL == head; }
void Destroy ();
class Iterator
{
public:
Iterator (const List &list);
NormRxStreamState *GetNextItem ();
private:
NormRxStreamState *next_item;
};
friend class Iterator;
private:
NormRxStreamState *head;
NormRxStreamState *tail;
}; // end class zmq::norm_engine_t::NormRxStreamState::List
friend class List;
List *AccessList () { return list; }
private:
NormObjectHandle norm_stream;
int64_t max_msg_size;
bool zero_copy;
int in_batch_size;
bool in_sync;
bool rx_ready;
v2_decoder_t *zmq_decoder;
bool skip_norm_sync;
unsigned char *buffer_ptr;
size_t buffer_size;
size_t buffer_count;
NormRxStreamState *prev;
NormRxStreamState *next;
NormRxStreamState::List *list;
}; // end class zmq::norm_engine_t::NormRxStreamState
const endpoint_uri_pair_t _empty_endpoint;
session_base_t *zmq_session;
options_t options;
NormInstanceHandle norm_instance;
handle_t norm_descriptor_handle;
NormSessionHandle norm_session;
bool is_sender;
bool is_receiver;
// Sender state
msg_t tx_msg;
v2_encoder_t zmq_encoder; // for tx messages (we use v2 for now)
NormObjectHandle norm_tx_stream;
bool tx_first_msg;
bool tx_more_bit;
bool zmq_output_ready; // zmq has msg(s) to send
bool norm_tx_ready; // norm has tx queue vacancy
// TBD - maybe don't need buffer if can access zmq message buffer directly?
char tx_buffer[BUFFER_SIZE];
unsigned int tx_index;
unsigned int tx_len;
// Receiver state
// Lists of norm rx streams from remote senders
bool zmq_input_ready; // zmq ready to receive msg(s)
NormRxStreamState::List
rx_pending_list; // rx streams waiting for data reception
NormRxStreamState::List
rx_ready_list; // rx streams ready for NormStreamRead()
NormRxStreamState::List
msg_ready_list; // rx streams w/ msg ready for push to zmq
#ifdef ZMQ_USE_NORM_SOCKET_WRAPPER
fd_t
wrapper_read_fd; // filedescriptor used to read norm events through the wrapper
DWORD wrapper_thread_id;
HANDLE wrapper_thread_handle;
#endif
}; // end class norm_engine_t
}
#endif // ZMQ_HAVE_NORM
#endif // !__ZMQ_NORM_ENGINE_HPP_INCLUDED__
|
sophomore_public/libzmq
|
src/norm_engine.hpp
|
C++
|
gpl-3.0
| 5,925 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <stddef.h>
#include <string.h>
#include <stdlib.h>
#include "err.hpp"
#include "msg.hpp"
#include "session_base.hpp"
#include "null_mechanism.hpp"
const char error_command_name[] = "\5ERROR";
const size_t error_command_name_len = sizeof (error_command_name) - 1;
const size_t error_reason_len_size = 1;
const char ready_command_name[] = "\5READY";
const size_t ready_command_name_len = sizeof (ready_command_name) - 1;
zmq::null_mechanism_t::null_mechanism_t (session_base_t *session_,
const std::string &peer_address_,
const options_t &options_) :
mechanism_base_t (session_, options_),
zap_client_t (session_, peer_address_, options_),
_ready_command_sent (false),
_error_command_sent (false),
_ready_command_received (false),
_error_command_received (false),
_zap_request_sent (false),
_zap_reply_received (false)
{
}
zmq::null_mechanism_t::~null_mechanism_t ()
{
}
int zmq::null_mechanism_t::next_handshake_command (msg_t *msg_)
{
if (_ready_command_sent || _error_command_sent) {
errno = EAGAIN;
return -1;
}
if (zap_required () && !_zap_reply_received) {
if (_zap_request_sent) {
errno = EAGAIN;
return -1;
}
// Given this is a backward-incompatible change, it's behind a socket
// option disabled by default.
int rc = session->zap_connect ();
if (rc == -1 && options.zap_enforce_domain) {
session->get_socket ()->event_handshake_failed_no_detail (
session->get_endpoint (), EFAULT);
return -1;
}
if (rc == 0) {
send_zap_request ();
_zap_request_sent = true;
// TODO actually, it is quite unlikely that we can read the ZAP
// reply already, but removing this has some strange side-effect
// (probably because the pipe's in_active flag is true until a read
// is attempted)
rc = receive_and_process_zap_reply ();
if (rc != 0)
return -1;
_zap_reply_received = true;
}
}
if (_zap_reply_received && status_code != "200") {
_error_command_sent = true;
if (status_code != "300") {
const size_t status_code_len = 3;
const int rc = msg_->init_size (
error_command_name_len + error_reason_len_size + status_code_len);
zmq_assert (rc == 0);
unsigned char *msg_data =
static_cast<unsigned char *> (msg_->data ());
memcpy (msg_data, error_command_name, error_command_name_len);
msg_data += error_command_name_len;
*msg_data = status_code_len;
msg_data += error_reason_len_size;
memcpy (msg_data, status_code.c_str (), status_code_len);
return 0;
}
errno = EAGAIN;
return -1;
}
make_command_with_basic_properties (msg_, ready_command_name,
ready_command_name_len);
_ready_command_sent = true;
return 0;
}
int zmq::null_mechanism_t::process_handshake_command (msg_t *msg_)
{
if (_ready_command_received || _error_command_received) {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
errno = EPROTO;
return -1;
}
const unsigned char *cmd_data =
static_cast<unsigned char *> (msg_->data ());
const size_t data_size = msg_->size ();
int rc = 0;
if (data_size >= ready_command_name_len
&& !memcmp (cmd_data, ready_command_name, ready_command_name_len))
rc = process_ready_command (cmd_data, data_size);
else if (data_size >= error_command_name_len
&& !memcmp (cmd_data, error_command_name, error_command_name_len))
rc = process_error_command (cmd_data, data_size);
else {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
errno = EPROTO;
rc = -1;
}
if (rc == 0) {
rc = msg_->close ();
errno_assert (rc == 0);
rc = msg_->init ();
errno_assert (rc == 0);
}
return rc;
}
int zmq::null_mechanism_t::process_ready_command (
const unsigned char *cmd_data_, size_t data_size_)
{
_ready_command_received = true;
return parse_metadata (cmd_data_ + ready_command_name_len,
data_size_ - ready_command_name_len);
}
int zmq::null_mechanism_t::process_error_command (
const unsigned char *cmd_data_, size_t data_size_)
{
const size_t fixed_prefix_size =
error_command_name_len + error_reason_len_size;
if (data_size_ < fixed_prefix_size) {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (),
ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR);
errno = EPROTO;
return -1;
}
const size_t error_reason_len =
static_cast<size_t> (cmd_data_[error_command_name_len]);
if (error_reason_len > data_size_ - fixed_prefix_size) {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (),
ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR);
errno = EPROTO;
return -1;
}
const char *error_reason =
reinterpret_cast<const char *> (cmd_data_) + fixed_prefix_size;
handle_error_reason (error_reason, error_reason_len);
_error_command_received = true;
return 0;
}
int zmq::null_mechanism_t::zap_msg_available ()
{
if (_zap_reply_received) {
errno = EFSM;
return -1;
}
const int rc = receive_and_process_zap_reply ();
if (rc == 0)
_zap_reply_received = true;
return rc == -1 ? -1 : 0;
}
zmq::mechanism_t::status_t zmq::null_mechanism_t::status () const
{
if (_ready_command_sent && _ready_command_received)
return ready;
const bool command_sent = _ready_command_sent || _error_command_sent;
const bool command_received =
_ready_command_received || _error_command_received;
return command_sent && command_received ? error : handshaking;
}
void zmq::null_mechanism_t::send_zap_request ()
{
zap_client_t::send_zap_request ("NULL", 4, NULL, NULL, 0);
}
|
sophomore_public/libzmq
|
src/null_mechanism.cpp
|
C++
|
gpl-3.0
| 6,531 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_NULL_MECHANISM_HPP_INCLUDED__
#define __ZMQ_NULL_MECHANISM_HPP_INCLUDED__
#include "mechanism.hpp"
#include "options.hpp"
#include "zap_client.hpp"
namespace zmq
{
class msg_t;
class session_base_t;
class null_mechanism_t ZMQ_FINAL : public zap_client_t
{
public:
null_mechanism_t (session_base_t *session_,
const std::string &peer_address_,
const options_t &options_);
~null_mechanism_t ();
// mechanism implementation
int next_handshake_command (msg_t *msg_);
int process_handshake_command (msg_t *msg_);
int zap_msg_available ();
status_t status () const;
private:
bool _ready_command_sent;
bool _error_command_sent;
bool _ready_command_received;
bool _error_command_received;
bool _zap_request_sent;
bool _zap_reply_received;
int process_ready_command (const unsigned char *cmd_data_,
size_t data_size_);
int process_error_command (const unsigned char *cmd_data_,
size_t data_size_);
void send_zap_request ();
};
}
#endif
|
sophomore_public/libzmq
|
src/null_mechanism.hpp
|
C++
|
gpl-3.0
| 1,162 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <string.h>
#include <stdarg.h>
#include "object.hpp"
#include "ctx.hpp"
#include "err.hpp"
#include "pipe.hpp"
#include "io_thread.hpp"
#include "session_base.hpp"
#include "socket_base.hpp"
zmq::object_t::object_t (ctx_t *ctx_, uint32_t tid_) : _ctx (ctx_), _tid (tid_)
{
}
zmq::object_t::object_t (object_t *parent_) :
_ctx (parent_->_ctx), _tid (parent_->_tid)
{
}
zmq::object_t::~object_t ()
{
}
uint32_t zmq::object_t::get_tid () const
{
return _tid;
}
void zmq::object_t::set_tid (uint32_t id_)
{
_tid = id_;
}
zmq::ctx_t *zmq::object_t::get_ctx () const
{
return _ctx;
}
void zmq::object_t::process_command (const command_t &cmd_)
{
switch (cmd_.type) {
case command_t::activate_read:
process_activate_read ();
break;
case command_t::activate_write:
process_activate_write (cmd_.args.activate_write.msgs_read);
break;
case command_t::stop:
process_stop ();
break;
case command_t::plug:
process_plug ();
process_seqnum ();
break;
case command_t::own:
process_own (cmd_.args.own.object);
process_seqnum ();
break;
case command_t::attach:
process_attach (cmd_.args.attach.engine);
process_seqnum ();
break;
case command_t::bind:
process_bind (cmd_.args.bind.pipe);
process_seqnum ();
break;
case command_t::hiccup:
process_hiccup (cmd_.args.hiccup.pipe);
break;
case command_t::pipe_peer_stats:
process_pipe_peer_stats (cmd_.args.pipe_peer_stats.queue_count,
cmd_.args.pipe_peer_stats.socket_base,
cmd_.args.pipe_peer_stats.endpoint_pair);
break;
case command_t::pipe_stats_publish:
process_pipe_stats_publish (
cmd_.args.pipe_stats_publish.outbound_queue_count,
cmd_.args.pipe_stats_publish.inbound_queue_count,
cmd_.args.pipe_stats_publish.endpoint_pair);
break;
case command_t::pipe_term:
process_pipe_term ();
break;
case command_t::pipe_term_ack:
process_pipe_term_ack ();
break;
case command_t::pipe_hwm:
process_pipe_hwm (cmd_.args.pipe_hwm.inhwm,
cmd_.args.pipe_hwm.outhwm);
break;
case command_t::term_req:
process_term_req (cmd_.args.term_req.object);
break;
case command_t::term:
process_term (cmd_.args.term.linger);
break;
case command_t::term_ack:
process_term_ack ();
break;
case command_t::term_endpoint:
process_term_endpoint (cmd_.args.term_endpoint.endpoint);
break;
case command_t::reap:
process_reap (cmd_.args.reap.socket);
break;
case command_t::reaped:
process_reaped ();
break;
case command_t::inproc_connected:
process_seqnum ();
break;
case command_t::conn_failed:
process_conn_failed ();
break;
case command_t::done:
default:
zmq_assert (false);
}
}
int zmq::object_t::register_endpoint (const char *addr_,
const endpoint_t &endpoint_)
{
return _ctx->register_endpoint (addr_, endpoint_);
}
int zmq::object_t::unregister_endpoint (const std::string &addr_,
socket_base_t *socket_)
{
return _ctx->unregister_endpoint (addr_, socket_);
}
void zmq::object_t::unregister_endpoints (socket_base_t *socket_)
{
return _ctx->unregister_endpoints (socket_);
}
zmq::endpoint_t zmq::object_t::find_endpoint (const char *addr_) const
{
return _ctx->find_endpoint (addr_);
}
void zmq::object_t::pend_connection (const std::string &addr_,
const endpoint_t &endpoint_,
pipe_t **pipes_)
{
_ctx->pend_connection (addr_, endpoint_, pipes_);
}
void zmq::object_t::connect_pending (const char *addr_,
zmq::socket_base_t *bind_socket_)
{
return _ctx->connect_pending (addr_, bind_socket_);
}
void zmq::object_t::destroy_socket (socket_base_t *socket_)
{
_ctx->destroy_socket (socket_);
}
zmq::io_thread_t *zmq::object_t::choose_io_thread (uint64_t affinity_) const
{
return _ctx->choose_io_thread (affinity_);
}
void zmq::object_t::send_stop ()
{
// 'stop' command goes always from administrative thread to
// the current object.
command_t cmd;
cmd.destination = this;
cmd.type = command_t::stop;
_ctx->send_command (_tid, cmd);
}
void zmq::object_t::send_plug (own_t *destination_, bool inc_seqnum_)
{
if (inc_seqnum_)
destination_->inc_seqnum ();
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::plug;
send_command (cmd);
}
void zmq::object_t::send_own (own_t *destination_, own_t *object_)
{
destination_->inc_seqnum ();
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::own;
cmd.args.own.object = object_;
send_command (cmd);
}
void zmq::object_t::send_attach (session_base_t *destination_,
i_engine *engine_,
bool inc_seqnum_)
{
if (inc_seqnum_)
destination_->inc_seqnum ();
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::attach;
cmd.args.attach.engine = engine_;
send_command (cmd);
}
void zmq::object_t::send_conn_failed (session_base_t *destination_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::conn_failed;
send_command (cmd);
}
void zmq::object_t::send_bind (own_t *destination_,
pipe_t *pipe_,
bool inc_seqnum_)
{
if (inc_seqnum_)
destination_->inc_seqnum ();
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::bind;
cmd.args.bind.pipe = pipe_;
send_command (cmd);
}
void zmq::object_t::send_activate_read (pipe_t *destination_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::activate_read;
send_command (cmd);
}
void zmq::object_t::send_activate_write (pipe_t *destination_,
uint64_t msgs_read_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::activate_write;
cmd.args.activate_write.msgs_read = msgs_read_;
send_command (cmd);
}
void zmq::object_t::send_hiccup (pipe_t *destination_, void *pipe_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::hiccup;
cmd.args.hiccup.pipe = pipe_;
send_command (cmd);
}
void zmq::object_t::send_pipe_peer_stats (pipe_t *destination_,
uint64_t queue_count_,
own_t *socket_base_,
endpoint_uri_pair_t *endpoint_pair_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::pipe_peer_stats;
cmd.args.pipe_peer_stats.queue_count = queue_count_;
cmd.args.pipe_peer_stats.socket_base = socket_base_;
cmd.args.pipe_peer_stats.endpoint_pair = endpoint_pair_;
send_command (cmd);
}
void zmq::object_t::send_pipe_stats_publish (
own_t *destination_,
uint64_t outbound_queue_count_,
uint64_t inbound_queue_count_,
endpoint_uri_pair_t *endpoint_pair_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::pipe_stats_publish;
cmd.args.pipe_stats_publish.outbound_queue_count = outbound_queue_count_;
cmd.args.pipe_stats_publish.inbound_queue_count = inbound_queue_count_;
cmd.args.pipe_stats_publish.endpoint_pair = endpoint_pair_;
send_command (cmd);
}
void zmq::object_t::send_pipe_term (pipe_t *destination_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::pipe_term;
send_command (cmd);
}
void zmq::object_t::send_pipe_term_ack (pipe_t *destination_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::pipe_term_ack;
send_command (cmd);
}
void zmq::object_t::send_pipe_hwm (pipe_t *destination_,
int inhwm_,
int outhwm_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::pipe_hwm;
cmd.args.pipe_hwm.inhwm = inhwm_;
cmd.args.pipe_hwm.outhwm = outhwm_;
send_command (cmd);
}
void zmq::object_t::send_term_req (own_t *destination_, own_t *object_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::term_req;
cmd.args.term_req.object = object_;
send_command (cmd);
}
void zmq::object_t::send_term (own_t *destination_, int linger_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::term;
cmd.args.term.linger = linger_;
send_command (cmd);
}
void zmq::object_t::send_term_ack (own_t *destination_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::term_ack;
send_command (cmd);
}
void zmq::object_t::send_term_endpoint (own_t *destination_,
std::string *endpoint_)
{
command_t cmd;
cmd.destination = destination_;
cmd.type = command_t::term_endpoint;
cmd.args.term_endpoint.endpoint = endpoint_;
send_command (cmd);
}
void zmq::object_t::send_reap (class socket_base_t *socket_)
{
command_t cmd;
cmd.destination = _ctx->get_reaper ();
cmd.type = command_t::reap;
cmd.args.reap.socket = socket_;
send_command (cmd);
}
void zmq::object_t::send_reaped ()
{
command_t cmd;
cmd.destination = _ctx->get_reaper ();
cmd.type = command_t::reaped;
send_command (cmd);
}
void zmq::object_t::send_inproc_connected (zmq::socket_base_t *socket_)
{
command_t cmd;
cmd.destination = socket_;
cmd.type = command_t::inproc_connected;
send_command (cmd);
}
void zmq::object_t::send_done ()
{
command_t cmd;
cmd.destination = NULL;
cmd.type = command_t::done;
_ctx->send_command (ctx_t::term_tid, cmd);
}
void zmq::object_t::process_stop ()
{
zmq_assert (false);
}
void zmq::object_t::process_plug ()
{
zmq_assert (false);
}
void zmq::object_t::process_own (own_t *)
{
zmq_assert (false);
}
void zmq::object_t::process_attach (i_engine *)
{
zmq_assert (false);
}
void zmq::object_t::process_bind (pipe_t *)
{
zmq_assert (false);
}
void zmq::object_t::process_activate_read ()
{
zmq_assert (false);
}
void zmq::object_t::process_activate_write (uint64_t)
{
zmq_assert (false);
}
void zmq::object_t::process_hiccup (void *)
{
zmq_assert (false);
}
void zmq::object_t::process_pipe_peer_stats (uint64_t,
own_t *,
endpoint_uri_pair_t *)
{
zmq_assert (false);
}
void zmq::object_t::process_pipe_stats_publish (uint64_t,
uint64_t,
endpoint_uri_pair_t *)
{
zmq_assert (false);
}
void zmq::object_t::process_pipe_term ()
{
zmq_assert (false);
}
void zmq::object_t::process_pipe_term_ack ()
{
zmq_assert (false);
}
void zmq::object_t::process_pipe_hwm (int, int)
{
zmq_assert (false);
}
void zmq::object_t::process_term_req (own_t *)
{
zmq_assert (false);
}
void zmq::object_t::process_term (int)
{
zmq_assert (false);
}
void zmq::object_t::process_term_ack ()
{
zmq_assert (false);
}
void zmq::object_t::process_term_endpoint (std::string *)
{
zmq_assert (false);
}
void zmq::object_t::process_reap (class socket_base_t *)
{
zmq_assert (false);
}
void zmq::object_t::process_reaped ()
{
zmq_assert (false);
}
void zmq::object_t::process_seqnum ()
{
zmq_assert (false);
}
void zmq::object_t::process_conn_failed ()
{
zmq_assert (false);
}
void zmq::object_t::send_command (const command_t &cmd_)
{
_ctx->send_command (cmd_.destination->get_tid (), cmd_);
}
|
sophomore_public/libzmq
|
src/object.cpp
|
C++
|
gpl-3.0
| 12,566 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_OBJECT_HPP_INCLUDED__
#define __ZMQ_OBJECT_HPP_INCLUDED__
#include <string>
#include "endpoint.hpp"
#include "macros.hpp"
#include "stdint.hpp"
namespace zmq
{
struct i_engine;
struct endpoint_t;
struct pending_connection_t;
struct command_t;
class ctx_t;
class pipe_t;
class socket_base_t;
class session_base_t;
class io_thread_t;
class own_t;
// Base class for all objects that participate in inter-thread
// communication.
class object_t
{
public:
object_t (zmq::ctx_t *ctx_, uint32_t tid_);
object_t (object_t *parent_);
virtual ~object_t ();
uint32_t get_tid () const;
void set_tid (uint32_t id_);
ctx_t *get_ctx () const;
void process_command (const zmq::command_t &cmd_);
void send_inproc_connected (zmq::socket_base_t *socket_);
void send_bind (zmq::own_t *destination_,
zmq::pipe_t *pipe_,
bool inc_seqnum_ = true);
protected:
// Using following function, socket is able to access global
// repository of inproc endpoints.
int register_endpoint (const char *addr_, const zmq::endpoint_t &endpoint_);
int unregister_endpoint (const std::string &addr_, socket_base_t *socket_);
void unregister_endpoints (zmq::socket_base_t *socket_);
zmq::endpoint_t find_endpoint (const char *addr_) const;
void pend_connection (const std::string &addr_,
const endpoint_t &endpoint_,
pipe_t **pipes_);
void connect_pending (const char *addr_, zmq::socket_base_t *bind_socket_);
void destroy_socket (zmq::socket_base_t *socket_);
// Logs an message.
void log (const char *format_, ...);
// Chooses least loaded I/O thread.
zmq::io_thread_t *choose_io_thread (uint64_t affinity_) const;
// Derived object can use these functions to send commands
// to other objects.
void send_stop ();
void send_plug (zmq::own_t *destination_, bool inc_seqnum_ = true);
void send_own (zmq::own_t *destination_, zmq::own_t *object_);
void send_attach (zmq::session_base_t *destination_,
zmq::i_engine *engine_,
bool inc_seqnum_ = true);
void send_activate_read (zmq::pipe_t *destination_);
void send_activate_write (zmq::pipe_t *destination_, uint64_t msgs_read_);
void send_hiccup (zmq::pipe_t *destination_, void *pipe_);
void send_pipe_peer_stats (zmq::pipe_t *destination_,
uint64_t queue_count_,
zmq::own_t *socket_base,
endpoint_uri_pair_t *endpoint_pair_);
void send_pipe_stats_publish (zmq::own_t *destination_,
uint64_t outbound_queue_count_,
uint64_t inbound_queue_count_,
endpoint_uri_pair_t *endpoint_pair_);
void send_pipe_term (zmq::pipe_t *destination_);
void send_pipe_term_ack (zmq::pipe_t *destination_);
void send_pipe_hwm (zmq::pipe_t *destination_, int inhwm_, int outhwm_);
void send_term_req (zmq::own_t *destination_, zmq::own_t *object_);
void send_term (zmq::own_t *destination_, int linger_);
void send_term_ack (zmq::own_t *destination_);
void send_term_endpoint (own_t *destination_, std::string *endpoint_);
void send_reap (zmq::socket_base_t *socket_);
void send_reaped ();
void send_done ();
void send_conn_failed (zmq::session_base_t *destination_);
// These handlers can be overridden by the derived objects. They are
// called when command arrives from another thread.
virtual void process_stop ();
virtual void process_plug ();
virtual void process_own (zmq::own_t *object_);
virtual void process_attach (zmq::i_engine *engine_);
virtual void process_bind (zmq::pipe_t *pipe_);
virtual void process_activate_read ();
virtual void process_activate_write (uint64_t msgs_read_);
virtual void process_hiccup (void *pipe_);
virtual void process_pipe_peer_stats (uint64_t queue_count_,
zmq::own_t *socket_base_,
endpoint_uri_pair_t *endpoint_pair_);
virtual void
process_pipe_stats_publish (uint64_t outbound_queue_count_,
uint64_t inbound_queue_count_,
endpoint_uri_pair_t *endpoint_pair_);
virtual void process_pipe_term ();
virtual void process_pipe_term_ack ();
virtual void process_pipe_hwm (int inhwm_, int outhwm_);
virtual void process_term_req (zmq::own_t *object_);
virtual void process_term (int linger_);
virtual void process_term_ack ();
virtual void process_term_endpoint (std::string *endpoint_);
virtual void process_reap (zmq::socket_base_t *socket_);
virtual void process_reaped ();
virtual void process_conn_failed ();
// Special handler called after a command that requires a seqnum
// was processed. The implementation should catch up with its counter
// of processed commands here.
virtual void process_seqnum ();
private:
// Context provides access to the global state.
zmq::ctx_t *const _ctx;
// Thread ID of the thread the object belongs to.
uint32_t _tid;
void send_command (const command_t &cmd_);
ZMQ_NON_COPYABLE_NOR_MOVABLE (object_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/object.hpp
|
C++
|
gpl-3.0
| 5,462 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <string.h>
#include <limits.h>
#include <set>
#include "options.hpp"
#include "err.hpp"
#include "macros.hpp"
#ifndef ZMQ_HAVE_WINDOWS
#include <net/if.h>
#endif
#if defined IFNAMSIZ
#define BINDDEVSIZ IFNAMSIZ
#else
#define BINDDEVSIZ 16
#endif
static int sockopt_invalid ()
{
#if defined(ZMQ_ACT_MILITANT)
zmq_assert (false);
#endif
errno = EINVAL;
return -1;
}
int zmq::do_getsockopt (void *const optval_,
size_t *const optvallen_,
const std::string &value_)
{
return do_getsockopt (optval_, optvallen_, value_.c_str (),
value_.size () + 1);
}
int zmq::do_getsockopt (void *const optval_,
size_t *const optvallen_,
const void *value_,
const size_t value_len_)
{
// TODO behaviour is inconsistent with options_t::getsockopt; there, an
// *exact* length match is required except for string-like (but not the
// CURVE keys!) (and therefore null-ing remaining memory is a no-op, see
// comment below)
if (*optvallen_ < value_len_) {
return sockopt_invalid ();
}
memcpy (optval_, value_, value_len_);
// TODO why is the remaining memory null-ed?
memset (static_cast<char *> (optval_) + value_len_, 0,
*optvallen_ - value_len_);
*optvallen_ = value_len_;
return 0;
}
#ifdef ZMQ_HAVE_CURVE
static int do_getsockopt_curve_key (void *const optval_,
const size_t *const optvallen_,
const uint8_t (&curve_key_)[CURVE_KEYSIZE])
{
if (*optvallen_ == CURVE_KEYSIZE) {
memcpy (optval_, curve_key_, CURVE_KEYSIZE);
return 0;
}
if (*optvallen_ == CURVE_KEYSIZE_Z85 + 1) {
zmq_z85_encode (static_cast<char *> (optval_), curve_key_,
CURVE_KEYSIZE);
return 0;
}
return sockopt_invalid ();
}
#endif
template <typename T>
static int do_setsockopt (const void *const optval_,
const size_t optvallen_,
T *const out_value_)
{
if (optvallen_ == sizeof (T)) {
memcpy (out_value_, optval_, sizeof (T));
return 0;
}
return sockopt_invalid ();
}
int zmq::do_setsockopt_int_as_bool_strict (const void *const optval_,
const size_t optvallen_,
bool *const out_value_)
{
// TODO handling of values other than 0 or 1 is not consistent,
// here it is disallowed, but for other options such as
// ZMQ_ROUTER_RAW any positive value is accepted
int value = -1;
if (do_setsockopt (optval_, optvallen_, &value) == -1)
return -1;
if (value == 0 || value == 1) {
*out_value_ = (value != 0);
return 0;
}
return sockopt_invalid ();
}
int zmq::do_setsockopt_int_as_bool_relaxed (const void *const optval_,
const size_t optvallen_,
bool *const out_value_)
{
int value = -1;
if (do_setsockopt (optval_, optvallen_, &value) == -1)
return -1;
*out_value_ = (value != 0);
return 0;
}
static int
do_setsockopt_string_allow_empty_strict (const void *const optval_,
const size_t optvallen_,
std::string *const out_value_,
const size_t max_len_)
{
// TODO why is optval_ != NULL not allowed in case of optvallen_== 0?
// TODO why are empty strings allowed for some socket options, but not for others?
if (optval_ == NULL && optvallen_ == 0) {
out_value_->clear ();
return 0;
}
if (optval_ != NULL && optvallen_ > 0 && optvallen_ <= max_len_) {
out_value_->assign (static_cast<const char *> (optval_), optvallen_);
return 0;
}
return sockopt_invalid ();
}
static int
do_setsockopt_string_allow_empty_relaxed (const void *const optval_,
const size_t optvallen_,
std::string *const out_value_,
const size_t max_len_)
{
// TODO use either do_setsockopt_string_allow_empty_relaxed or
// do_setsockopt_string_allow_empty_strict everywhere
if (optvallen_ > 0 && optvallen_ <= max_len_) {
out_value_->assign (static_cast<const char *> (optval_), optvallen_);
return 0;
}
return sockopt_invalid ();
}
template <typename T>
static int do_setsockopt_set (const void *const optval_,
const size_t optvallen_,
std::set<T> *const set_)
{
if (optvallen_ == 0 && optval_ == NULL) {
set_->clear ();
return 0;
}
if (optvallen_ == sizeof (T) && optval_ != NULL) {
set_->insert (*(static_cast<const T *> (optval_)));
return 0;
}
return sockopt_invalid ();
}
// TODO why is 1000 a sensible default?
const int default_hwm = 1000;
zmq::options_t::options_t () :
sndhwm (default_hwm),
rcvhwm (default_hwm),
affinity (0),
routing_id_size (0),
rate (100),
recovery_ivl (10000),
multicast_hops (1),
multicast_maxtpdu (1500),
sndbuf (-1),
rcvbuf (-1),
tos (0),
priority (0),
type (-1),
linger (-1),
connect_timeout (0),
tcp_maxrt (0),
reconnect_stop (0),
reconnect_ivl (100),
reconnect_ivl_max (0),
backlog (100),
maxmsgsize (-1),
rcvtimeo (-1),
sndtimeo (-1),
ipv6 (false),
immediate (0),
filter (false),
invert_matching (false),
recv_routing_id (false),
raw_socket (false),
raw_notify (true),
tcp_keepalive (-1),
tcp_keepalive_cnt (-1),
tcp_keepalive_idle (-1),
tcp_keepalive_intvl (-1),
mechanism (ZMQ_NULL),
as_server (0),
gss_principal_nt (ZMQ_GSSAPI_NT_HOSTBASED),
gss_service_principal_nt (ZMQ_GSSAPI_NT_HOSTBASED),
gss_plaintext (false),
socket_id (0),
conflate (false),
handshake_ivl (30000),
connected (false),
heartbeat_ttl (0),
heartbeat_interval (0),
heartbeat_timeout (-1),
use_fd (-1),
zap_enforce_domain (false),
loopback_fastpath (false),
multicast_loop (true),
in_batch_size (8192),
out_batch_size (8192),
zero_copy (true),
router_notify (0),
monitor_event_version (1),
wss_trust_system (false),
hello_msg (),
can_send_hello_msg (false),
disconnect_msg (),
can_recv_disconnect_msg (false),
hiccup_msg (),
can_recv_hiccup_msg (false),
norm_mode (ZMQ_NORM_CC),
norm_unicast_nacks (false),
norm_buffer_size (2048),
norm_segment_size (1400),
norm_block_size (16),
norm_num_parity (4),
norm_num_autoparity (0),
norm_push_enable (false),
busy_poll (0)
{
memset (curve_public_key, 0, CURVE_KEYSIZE);
memset (curve_secret_key, 0, CURVE_KEYSIZE);
memset (curve_server_key, 0, CURVE_KEYSIZE);
#if defined ZMQ_HAVE_VMCI
vmci_buffer_size = 0;
vmci_buffer_min_size = 0;
vmci_buffer_max_size = 0;
vmci_connect_timeout = -1;
#endif
}
int zmq::options_t::set_curve_key (uint8_t *destination_,
const void *optval_,
size_t optvallen_)
{
switch (optvallen_) {
case CURVE_KEYSIZE:
memcpy (destination_, optval_, optvallen_);
mechanism = ZMQ_CURVE;
return 0;
case CURVE_KEYSIZE_Z85 + 1: {
const std::string s (static_cast<const char *> (optval_),
optvallen_);
if (zmq_z85_decode (destination_, s.c_str ())) {
mechanism = ZMQ_CURVE;
return 0;
}
break;
}
case CURVE_KEYSIZE_Z85:
char z85_key[CURVE_KEYSIZE_Z85 + 1];
memcpy (z85_key, reinterpret_cast<const char *> (optval_),
optvallen_);
z85_key[CURVE_KEYSIZE_Z85] = 0;
if (zmq_z85_decode (destination_, z85_key)) {
mechanism = ZMQ_CURVE;
return 0;
}
break;
default:
break;
}
return -1;
}
const int deciseconds_per_millisecond = 100;
int zmq::options_t::setsockopt (int option_,
const void *optval_,
size_t optvallen_)
{
const bool is_int = (optvallen_ == sizeof (int));
int value = 0;
if (is_int)
memcpy (&value, optval_, sizeof (int));
#if defined(ZMQ_ACT_MILITANT)
bool malformed = true; // Did caller pass a bad option value?
#endif
switch (option_) {
case ZMQ_SNDHWM:
if (is_int && value >= 0) {
sndhwm = value;
return 0;
}
break;
case ZMQ_RCVHWM:
if (is_int && value >= 0) {
rcvhwm = value;
return 0;
}
break;
case ZMQ_AFFINITY:
return do_setsockopt (optval_, optvallen_, &affinity);
case ZMQ_ROUTING_ID:
// Routing id is any binary string from 1 to 255 octets
if (optvallen_ > 0 && optvallen_ <= UCHAR_MAX) {
routing_id_size = static_cast<unsigned char> (optvallen_);
memcpy (routing_id, optval_, routing_id_size);
return 0;
}
break;
case ZMQ_RATE:
if (is_int && value > 0) {
rate = value;
return 0;
}
break;
case ZMQ_RECOVERY_IVL:
if (is_int && value >= 0) {
recovery_ivl = value;
return 0;
}
break;
case ZMQ_SNDBUF:
if (is_int && value >= -1) {
sndbuf = value;
return 0;
}
break;
case ZMQ_RCVBUF:
if (is_int && value >= -1) {
rcvbuf = value;
return 0;
}
break;
case ZMQ_TOS:
if (is_int && value >= 0) {
tos = value;
return 0;
}
break;
case ZMQ_LINGER:
if (is_int && value >= -1) {
linger.store (value);
return 0;
}
break;
case ZMQ_CONNECT_TIMEOUT:
if (is_int && value >= 0) {
connect_timeout = value;
return 0;
}
break;
case ZMQ_TCP_MAXRT:
if (is_int && value >= 0) {
tcp_maxrt = value;
return 0;
}
break;
case ZMQ_RECONNECT_STOP:
if (is_int) {
reconnect_stop = value;
return 0;
}
break;
case ZMQ_RECONNECT_IVL:
if (is_int && value >= -1) {
reconnect_ivl = value;
return 0;
}
break;
case ZMQ_RECONNECT_IVL_MAX:
if (is_int && value >= 0) {
reconnect_ivl_max = value;
return 0;
}
break;
case ZMQ_BACKLOG:
if (is_int && value >= 0) {
backlog = value;
return 0;
}
break;
case ZMQ_MAXMSGSIZE:
return do_setsockopt (optval_, optvallen_, &maxmsgsize);
case ZMQ_MULTICAST_HOPS:
if (is_int && value > 0) {
multicast_hops = value;
return 0;
}
break;
case ZMQ_MULTICAST_MAXTPDU:
if (is_int && value > 0) {
multicast_maxtpdu = value;
return 0;
}
break;
case ZMQ_RCVTIMEO:
if (is_int && value >= -1) {
rcvtimeo = value;
return 0;
}
break;
case ZMQ_SNDTIMEO:
if (is_int && value >= -1) {
sndtimeo = value;
return 0;
}
break;
/* Deprecated in favor of ZMQ_IPV6 */
case ZMQ_IPV4ONLY: {
bool value;
const int rc =
do_setsockopt_int_as_bool_strict (optval_, optvallen_, &value);
if (rc == 0)
ipv6 = !value;
return rc;
}
/* To replace the somewhat surprising IPV4ONLY */
case ZMQ_IPV6:
return do_setsockopt_int_as_bool_strict (optval_, optvallen_,
&ipv6);
case ZMQ_SOCKS_PROXY:
return do_setsockopt_string_allow_empty_strict (
optval_, optvallen_, &socks_proxy_address, SIZE_MAX);
case ZMQ_SOCKS_USERNAME:
/* Make empty string or NULL equivalent. */
if (optval_ == NULL || optvallen_ == 0) {
socks_proxy_username.clear ();
return 0;
} else {
return do_setsockopt_string_allow_empty_strict (
optval_, optvallen_, &socks_proxy_username, 255);
}
case ZMQ_SOCKS_PASSWORD:
/* Make empty string or NULL equivalent. */
if (optval_ == NULL || optvallen_ == 0) {
socks_proxy_password.clear ();
return 0;
} else {
return do_setsockopt_string_allow_empty_strict (
optval_, optvallen_, &socks_proxy_password, 255);
}
case ZMQ_TCP_KEEPALIVE:
if (is_int && (value == -1 || value == 0 || value == 1)) {
tcp_keepalive = value;
return 0;
}
break;
case ZMQ_TCP_KEEPALIVE_CNT:
if (is_int && (value == -1 || value >= 0)) {
tcp_keepalive_cnt = value;
return 0;
}
break;
case ZMQ_TCP_KEEPALIVE_IDLE:
if (is_int && (value == -1 || value >= 0)) {
tcp_keepalive_idle = value;
return 0;
}
break;
case ZMQ_TCP_KEEPALIVE_INTVL:
if (is_int && (value == -1 || value >= 0)) {
tcp_keepalive_intvl = value;
return 0;
}
break;
case ZMQ_IMMEDIATE:
// TODO why is immediate not bool (and called non_immediate, as its meaning appears to be reversed)
if (is_int && (value == 0 || value == 1)) {
immediate = value;
return 0;
}
break;
case ZMQ_TCP_ACCEPT_FILTER: {
std::string filter_str;
int rc = do_setsockopt_string_allow_empty_strict (
optval_, optvallen_, &filter_str, UCHAR_MAX);
if (rc == 0) {
if (filter_str.empty ()) {
tcp_accept_filters.clear ();
} else {
tcp_address_mask_t mask;
rc = mask.resolve (filter_str.c_str (), ipv6);
if (rc == 0) {
tcp_accept_filters.push_back (mask);
}
}
}
return rc;
}
#if defined ZMQ_HAVE_SO_PEERCRED || defined ZMQ_HAVE_LOCAL_PEERCRED
case ZMQ_IPC_FILTER_UID:
return do_setsockopt_set (optval_, optvallen_,
&ipc_uid_accept_filters);
case ZMQ_IPC_FILTER_GID:
return do_setsockopt_set (optval_, optvallen_,
&ipc_gid_accept_filters);
#endif
#if defined ZMQ_HAVE_SO_PEERCRED
case ZMQ_IPC_FILTER_PID:
return do_setsockopt_set (optval_, optvallen_,
&ipc_pid_accept_filters);
#endif
case ZMQ_PLAIN_SERVER:
if (is_int && (value == 0 || value == 1)) {
as_server = value;
mechanism = value ? ZMQ_PLAIN : ZMQ_NULL;
return 0;
}
break;
case ZMQ_PLAIN_USERNAME:
if (optvallen_ == 0 && optval_ == NULL) {
mechanism = ZMQ_NULL;
return 0;
} else if (optvallen_ > 0 && optvallen_ <= UCHAR_MAX
&& optval_ != NULL) {
plain_username.assign (static_cast<const char *> (optval_),
optvallen_);
as_server = 0;
mechanism = ZMQ_PLAIN;
return 0;
}
break;
case ZMQ_PLAIN_PASSWORD:
if (optvallen_ == 0 && optval_ == NULL) {
mechanism = ZMQ_NULL;
return 0;
} else if (optvallen_ > 0 && optvallen_ <= UCHAR_MAX
&& optval_ != NULL) {
plain_password.assign (static_cast<const char *> (optval_),
optvallen_);
as_server = 0;
mechanism = ZMQ_PLAIN;
return 0;
}
break;
case ZMQ_ZAP_DOMAIN:
return do_setsockopt_string_allow_empty_relaxed (
optval_, optvallen_, &zap_domain, UCHAR_MAX);
// If curve encryption isn't built, these options provoke EINVAL
#ifdef ZMQ_HAVE_CURVE
case ZMQ_CURVE_SERVER:
if (is_int && (value == 0 || value == 1)) {
as_server = value;
mechanism = value ? ZMQ_CURVE : ZMQ_NULL;
return 0;
}
break;
case ZMQ_CURVE_PUBLICKEY:
if (0 == set_curve_key (curve_public_key, optval_, optvallen_)) {
return 0;
}
break;
case ZMQ_CURVE_SECRETKEY:
if (0 == set_curve_key (curve_secret_key, optval_, optvallen_)) {
return 0;
}
break;
case ZMQ_CURVE_SERVERKEY:
if (0 == set_curve_key (curve_server_key, optval_, optvallen_)) {
as_server = 0;
return 0;
}
break;
#endif
case ZMQ_CONFLATE:
return do_setsockopt_int_as_bool_strict (optval_, optvallen_,
&conflate);
// If libgssapi isn't installed, these options provoke EINVAL
#ifdef HAVE_LIBGSSAPI_KRB5
case ZMQ_GSSAPI_SERVER:
if (is_int && (value == 0 || value == 1)) {
as_server = value;
mechanism = ZMQ_GSSAPI;
return 0;
}
break;
case ZMQ_GSSAPI_PRINCIPAL:
if (optvallen_ > 0 && optvallen_ <= UCHAR_MAX && optval_ != NULL) {
gss_principal.assign ((const char *) optval_, optvallen_);
mechanism = ZMQ_GSSAPI;
return 0;
}
break;
case ZMQ_GSSAPI_SERVICE_PRINCIPAL:
if (optvallen_ > 0 && optvallen_ <= UCHAR_MAX && optval_ != NULL) {
gss_service_principal.assign ((const char *) optval_,
optvallen_);
mechanism = ZMQ_GSSAPI;
as_server = 0;
return 0;
}
break;
case ZMQ_GSSAPI_PLAINTEXT:
return do_setsockopt_int_as_bool_strict (optval_, optvallen_,
&gss_plaintext);
case ZMQ_GSSAPI_PRINCIPAL_NAMETYPE:
if (is_int
&& (value == ZMQ_GSSAPI_NT_HOSTBASED
|| value == ZMQ_GSSAPI_NT_USER_NAME
|| value == ZMQ_GSSAPI_NT_KRB5_PRINCIPAL)) {
gss_principal_nt = value;
return 0;
}
break;
case ZMQ_GSSAPI_SERVICE_PRINCIPAL_NAMETYPE:
if (is_int
&& (value == ZMQ_GSSAPI_NT_HOSTBASED
|| value == ZMQ_GSSAPI_NT_USER_NAME
|| value == ZMQ_GSSAPI_NT_KRB5_PRINCIPAL)) {
gss_service_principal_nt = value;
return 0;
}
break;
#endif
case ZMQ_HANDSHAKE_IVL:
if (is_int && value >= 0) {
handshake_ivl = value;
return 0;
}
break;
case ZMQ_INVERT_MATCHING:
return do_setsockopt_int_as_bool_relaxed (optval_, optvallen_,
&invert_matching);
case ZMQ_HEARTBEAT_IVL:
if (is_int && value >= 0) {
heartbeat_interval = value;
return 0;
}
break;
case ZMQ_HEARTBEAT_TTL:
// Convert this to deciseconds from milliseconds
value = value / deciseconds_per_millisecond;
if (is_int && value >= 0 && value <= UINT16_MAX) {
heartbeat_ttl = static_cast<uint16_t> (value);
return 0;
}
break;
case ZMQ_HEARTBEAT_TIMEOUT:
if (is_int && value >= 0) {
heartbeat_timeout = value;
return 0;
}
break;
#ifdef ZMQ_HAVE_VMCI
case ZMQ_VMCI_BUFFER_SIZE:
return do_setsockopt (optval_, optvallen_, &vmci_buffer_size);
case ZMQ_VMCI_BUFFER_MIN_SIZE:
return do_setsockopt (optval_, optvallen_, &vmci_buffer_min_size);
case ZMQ_VMCI_BUFFER_MAX_SIZE:
return do_setsockopt (optval_, optvallen_, &vmci_buffer_max_size);
case ZMQ_VMCI_CONNECT_TIMEOUT:
return do_setsockopt (optval_, optvallen_, &vmci_connect_timeout);
#endif
case ZMQ_USE_FD:
if (is_int && value >= -1) {
use_fd = value;
return 0;
}
break;
case ZMQ_BINDTODEVICE:
return do_setsockopt_string_allow_empty_strict (
optval_, optvallen_, &bound_device, BINDDEVSIZ);
case ZMQ_ZAP_ENFORCE_DOMAIN:
return do_setsockopt_int_as_bool_relaxed (optval_, optvallen_,
&zap_enforce_domain);
case ZMQ_LOOPBACK_FASTPATH:
return do_setsockopt_int_as_bool_relaxed (optval_, optvallen_,
&loopback_fastpath);
case ZMQ_METADATA:
if (optvallen_ > 0 && !is_int) {
const std::string s (static_cast<const char *> (optval_),
optvallen_);
const size_t pos = s.find (':');
if (pos != std::string::npos && pos != 0
&& pos != s.length () - 1) {
const std::string key = s.substr (0, pos);
if (key.compare (0, 2, "X-") == 0
&& key.length () <= UCHAR_MAX) {
std::string val = s.substr (pos + 1, s.length ());
app_metadata.insert (
std::pair<std::string, std::string> (key, val));
return 0;
}
}
}
errno = EINVAL;
return -1;
case ZMQ_MULTICAST_LOOP:
return do_setsockopt_int_as_bool_relaxed (optval_, optvallen_,
&multicast_loop);
#ifdef ZMQ_BUILD_DRAFT_API
case ZMQ_IN_BATCH_SIZE:
if (is_int && value > 0) {
in_batch_size = value;
return 0;
}
break;
case ZMQ_OUT_BATCH_SIZE:
if (is_int && value > 0) {
out_batch_size = value;
return 0;
}
break;
case ZMQ_BUSY_POLL:
if (is_int) {
busy_poll = value;
return 0;
}
break;
#ifdef ZMQ_HAVE_WSS
case ZMQ_WSS_KEY_PEM:
// TODO: check if valid certificate
wss_key_pem = std::string ((char *) optval_, optvallen_);
return 0;
case ZMQ_WSS_CERT_PEM:
// TODO: check if valid certificate
wss_cert_pem = std::string ((char *) optval_, optvallen_);
return 0;
case ZMQ_WSS_TRUST_PEM:
// TODO: check if valid certificate
wss_trust_pem = std::string ((char *) optval_, optvallen_);
return 0;
case ZMQ_WSS_HOSTNAME:
wss_hostname = std::string ((char *) optval_, optvallen_);
return 0;
case ZMQ_WSS_TRUST_SYSTEM:
return do_setsockopt_int_as_bool_strict (optval_, optvallen_,
&wss_trust_system);
#endif
#ifdef ZMQ_HAVE_NORM
case ZMQ_NORM_MODE:
if (is_int && value >= 0 && value <= 4) {
norm_mode = value;
return 0;
}
break;
case ZMQ_NORM_UNICAST_NACK:
return do_setsockopt_int_as_bool_strict (optval_, optvallen_,
&norm_unicast_nacks);
case ZMQ_NORM_BUFFER_SIZE:
if (is_int && value > 0) {
norm_buffer_size = value;
return 0;
}
break;
case ZMQ_NORM_SEGMENT_SIZE:
if (is_int && value > 0) {
norm_segment_size = value;
return 0;
}
break;
case ZMQ_NORM_BLOCK_SIZE:
if (is_int && value > 0 && value <= 255) {
norm_block_size = value;
return 0;
}
break;
case ZMQ_NORM_NUM_PARITY:
if (is_int && value >= 0 && value < 255) {
norm_num_parity = value;
return 0;
}
break;
case ZMQ_NORM_NUM_AUTOPARITY:
if (is_int && value >= 0 && value < 255) {
norm_num_autoparity = value;
return 0;
}
break;
case ZMQ_NORM_PUSH:
return do_setsockopt_int_as_bool_strict (optval_, optvallen_,
&norm_push_enable);
#endif //ZMQ_HAVE_NORM
case ZMQ_HELLO_MSG:
if (optvallen_ > 0) {
unsigned char *bytes = (unsigned char *) optval_;
hello_msg =
std::vector<unsigned char> (bytes, bytes + optvallen_);
} else {
hello_msg = std::vector<unsigned char> ();
}
return 0;
case ZMQ_DISCONNECT_MSG:
if (optvallen_ > 0) {
unsigned char *bytes = (unsigned char *) optval_;
disconnect_msg =
std::vector<unsigned char> (bytes, bytes + optvallen_);
} else {
disconnect_msg = std::vector<unsigned char> ();
}
return 0;
case ZMQ_PRIORITY:
if (is_int && value >= 0) {
priority = value;
return 0;
}
break;
case ZMQ_HICCUP_MSG:
if (optvallen_ > 0) {
unsigned char *bytes = (unsigned char *) optval_;
hiccup_msg =
std::vector<unsigned char> (bytes, bytes + optvallen_);
} else {
hiccup_msg = std::vector<unsigned char> ();
}
return 0;
#endif
default:
#if defined(ZMQ_ACT_MILITANT)
// There are valid scenarios for probing with unknown socket option
// values, e.g. to check if security is enabled or not. This will not
// provoke a militant assert. However, passing bad values to a valid
// socket option will, if ZMQ_ACT_MILITANT is defined.
malformed = false;
#endif
break;
}
// TODO mechanism should either be set explicitly, or determined when
// connecting. currently, it depends on the order of setsockopt calls
// if there is some inconsistency, which is confusing. in addition,
// the assumed or set mechanism should be queryable (as a socket option)
#if defined(ZMQ_ACT_MILITANT)
// There is no valid use case for passing an error back to the application
// when it sent malformed arguments to a socket option. Use ./configure
// --with-militant to enable this checking.
if (malformed)
zmq_assert (false);
#endif
errno = EINVAL;
return -1;
}
int zmq::options_t::getsockopt (int option_,
void *optval_,
size_t *optvallen_) const
{
const bool is_int = (*optvallen_ == sizeof (int));
int *value = static_cast<int *> (optval_);
#if defined(ZMQ_ACT_MILITANT)
bool malformed = true; // Did caller pass a bad option value?
#endif
switch (option_) {
case ZMQ_SNDHWM:
if (is_int) {
*value = sndhwm;
return 0;
}
break;
case ZMQ_RCVHWM:
if (is_int) {
*value = rcvhwm;
return 0;
}
break;
case ZMQ_AFFINITY:
if (*optvallen_ == sizeof (uint64_t)) {
*(static_cast<uint64_t *> (optval_)) = affinity;
return 0;
}
break;
case ZMQ_ROUTING_ID:
return do_getsockopt (optval_, optvallen_, routing_id,
routing_id_size);
case ZMQ_RATE:
if (is_int) {
*value = rate;
return 0;
}
break;
case ZMQ_RECOVERY_IVL:
if (is_int) {
*value = recovery_ivl;
return 0;
}
break;
case ZMQ_SNDBUF:
if (is_int) {
*value = sndbuf;
return 0;
}
break;
case ZMQ_RCVBUF:
if (is_int) {
*value = rcvbuf;
return 0;
}
break;
case ZMQ_TOS:
if (is_int) {
*value = tos;
return 0;
}
break;
case ZMQ_TYPE:
if (is_int) {
*value = type;
return 0;
}
break;
case ZMQ_LINGER:
if (is_int) {
*value = linger.load ();
return 0;
}
break;
case ZMQ_CONNECT_TIMEOUT:
if (is_int) {
*value = connect_timeout;
return 0;
}
break;
case ZMQ_TCP_MAXRT:
if (is_int) {
*value = tcp_maxrt;
return 0;
}
break;
case ZMQ_RECONNECT_STOP:
if (is_int) {
*value = reconnect_stop;
return 0;
}
break;
case ZMQ_RECONNECT_IVL:
if (is_int) {
*value = reconnect_ivl;
return 0;
}
break;
case ZMQ_RECONNECT_IVL_MAX:
if (is_int) {
*value = reconnect_ivl_max;
return 0;
}
break;
case ZMQ_BACKLOG:
if (is_int) {
*value = backlog;
return 0;
}
break;
case ZMQ_MAXMSGSIZE:
if (*optvallen_ == sizeof (int64_t)) {
*(static_cast<int64_t *> (optval_)) = maxmsgsize;
*optvallen_ = sizeof (int64_t);
return 0;
}
break;
case ZMQ_MULTICAST_HOPS:
if (is_int) {
*value = multicast_hops;
return 0;
}
break;
case ZMQ_MULTICAST_MAXTPDU:
if (is_int) {
*value = multicast_maxtpdu;
return 0;
}
break;
case ZMQ_RCVTIMEO:
if (is_int) {
*value = rcvtimeo;
return 0;
}
break;
case ZMQ_SNDTIMEO:
if (is_int) {
*value = sndtimeo;
return 0;
}
break;
case ZMQ_IPV4ONLY:
if (is_int) {
*value = 1 - ipv6;
return 0;
}
break;
case ZMQ_IPV6:
if (is_int) {
*value = ipv6;
return 0;
}
break;
case ZMQ_IMMEDIATE:
if (is_int) {
*value = immediate;
return 0;
}
break;
case ZMQ_SOCKS_PROXY:
return do_getsockopt (optval_, optvallen_, socks_proxy_address);
case ZMQ_SOCKS_USERNAME:
return do_getsockopt (optval_, optvallen_, socks_proxy_username);
case ZMQ_SOCKS_PASSWORD:
return do_getsockopt (optval_, optvallen_, socks_proxy_password);
case ZMQ_TCP_KEEPALIVE:
if (is_int) {
*value = tcp_keepalive;
return 0;
}
break;
case ZMQ_TCP_KEEPALIVE_CNT:
if (is_int) {
*value = tcp_keepalive_cnt;
return 0;
}
break;
case ZMQ_TCP_KEEPALIVE_IDLE:
if (is_int) {
*value = tcp_keepalive_idle;
return 0;
}
break;
case ZMQ_TCP_KEEPALIVE_INTVL:
if (is_int) {
*value = tcp_keepalive_intvl;
return 0;
}
break;
case ZMQ_MECHANISM:
if (is_int) {
*value = mechanism;
return 0;
}
break;
case ZMQ_PLAIN_SERVER:
if (is_int) {
*value = as_server && mechanism == ZMQ_PLAIN;
return 0;
}
break;
case ZMQ_PLAIN_USERNAME:
return do_getsockopt (optval_, optvallen_, plain_username);
case ZMQ_PLAIN_PASSWORD:
return do_getsockopt (optval_, optvallen_, plain_password);
case ZMQ_ZAP_DOMAIN:
return do_getsockopt (optval_, optvallen_, zap_domain);
// If curve encryption isn't built, these options provoke EINVAL
#ifdef ZMQ_HAVE_CURVE
case ZMQ_CURVE_SERVER:
if (is_int) {
*value = as_server && mechanism == ZMQ_CURVE;
return 0;
}
break;
case ZMQ_CURVE_PUBLICKEY:
return do_getsockopt_curve_key (optval_, optvallen_,
curve_public_key);
case ZMQ_CURVE_SECRETKEY:
return do_getsockopt_curve_key (optval_, optvallen_,
curve_secret_key);
case ZMQ_CURVE_SERVERKEY:
return do_getsockopt_curve_key (optval_, optvallen_,
curve_server_key);
#endif
case ZMQ_CONFLATE:
if (is_int) {
*value = conflate;
return 0;
}
break;
// If libgssapi isn't installed, these options provoke EINVAL
#ifdef HAVE_LIBGSSAPI_KRB5
case ZMQ_GSSAPI_SERVER:
if (is_int) {
*value = as_server && mechanism == ZMQ_GSSAPI;
return 0;
}
break;
case ZMQ_GSSAPI_PRINCIPAL:
return do_getsockopt (optval_, optvallen_, gss_principal);
case ZMQ_GSSAPI_SERVICE_PRINCIPAL:
return do_getsockopt (optval_, optvallen_, gss_service_principal);
case ZMQ_GSSAPI_PLAINTEXT:
if (is_int) {
*value = gss_plaintext;
return 0;
}
break;
case ZMQ_GSSAPI_PRINCIPAL_NAMETYPE:
if (is_int) {
*value = gss_principal_nt;
return 0;
}
break;
case ZMQ_GSSAPI_SERVICE_PRINCIPAL_NAMETYPE:
if (is_int) {
*value = gss_service_principal_nt;
return 0;
}
break;
#endif
case ZMQ_HANDSHAKE_IVL:
if (is_int) {
*value = handshake_ivl;
return 0;
}
break;
case ZMQ_INVERT_MATCHING:
if (is_int) {
*value = invert_matching;
return 0;
}
break;
case ZMQ_HEARTBEAT_IVL:
if (is_int) {
*value = heartbeat_interval;
return 0;
}
break;
case ZMQ_HEARTBEAT_TTL:
if (is_int) {
// Convert the internal deciseconds value to milliseconds
*value = heartbeat_ttl * 100;
return 0;
}
break;
case ZMQ_HEARTBEAT_TIMEOUT:
if (is_int) {
*value = heartbeat_timeout;
return 0;
}
break;
case ZMQ_USE_FD:
if (is_int) {
*value = use_fd;
return 0;
}
break;
case ZMQ_BINDTODEVICE:
return do_getsockopt (optval_, optvallen_, bound_device);
case ZMQ_ZAP_ENFORCE_DOMAIN:
if (is_int) {
*value = zap_enforce_domain;
return 0;
}
break;
case ZMQ_LOOPBACK_FASTPATH:
if (is_int) {
*value = loopback_fastpath;
return 0;
}
break;
case ZMQ_MULTICAST_LOOP:
if (is_int) {
*value = multicast_loop;
return 0;
}
break;
#ifdef ZMQ_BUILD_DRAFT_API
case ZMQ_ROUTER_NOTIFY:
if (is_int) {
*value = router_notify;
return 0;
}
break;
case ZMQ_IN_BATCH_SIZE:
if (is_int) {
*value = in_batch_size;
return 0;
}
break;
case ZMQ_OUT_BATCH_SIZE:
if (is_int) {
*value = out_batch_size;
return 0;
}
break;
case ZMQ_PRIORITY:
if (is_int) {
*value = priority;
return 0;
}
break;
case ZMQ_BUSY_POLL:
if (is_int) {
*value = busy_poll;
}
break;
#ifdef ZMQ_HAVE_NORM
case ZMQ_NORM_MODE:
if (is_int) {
*value = norm_mode;
return 0;
}
break;
case ZMQ_NORM_UNICAST_NACK:
if (is_int) {
*value = norm_unicast_nacks;
return 0;
}
break;
case ZMQ_NORM_BUFFER_SIZE:
if (is_int) {
*value = norm_buffer_size;
return 0;
}
break;
case ZMQ_NORM_SEGMENT_SIZE:
if (is_int) {
*value = norm_segment_size;
return 0;
}
break;
case ZMQ_NORM_BLOCK_SIZE:
if (is_int) {
*value = norm_block_size;
return 0;
}
break;
case ZMQ_NORM_NUM_PARITY:
if (is_int) {
*value = norm_num_parity;
return 0;
}
break;
case ZMQ_NORM_NUM_AUTOPARITY:
if (is_int) {
*value = norm_num_autoparity;
return 0;
}
break;
case ZMQ_NORM_PUSH:
if (is_int) {
*value = norm_push_enable;
return 0;
}
break;
#endif //ZMQ_HAVE_NORM
#endif
default:
#if defined(ZMQ_ACT_MILITANT)
malformed = false;
#endif
break;
}
#if defined(ZMQ_ACT_MILITANT)
if (malformed)
zmq_assert (false);
#endif
errno = EINVAL;
return -1;
}
|
sophomore_public/libzmq
|
src/options.cpp
|
C++
|
gpl-3.0
| 40,854 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_OPTIONS_HPP_INCLUDED__
#define __ZMQ_OPTIONS_HPP_INCLUDED__
#include <string>
#include <vector>
#include <map>
#include "atomic_ptr.hpp"
#include "stddef.h"
#include "stdint.hpp"
#include "tcp_address.hpp"
#if defined ZMQ_HAVE_SO_PEERCRED || defined ZMQ_HAVE_LOCAL_PEERCRED
#include <set>
#include <sys/types.h>
#endif
#ifdef ZMQ_HAVE_LOCAL_PEERCRED
#include <sys/ucred.h>
#endif
#if __cplusplus >= 201103L || (defined _MSC_VER && _MSC_VER >= 1700)
#include <type_traits>
#endif
// Normal base 256 key is 32 bytes
#define CURVE_KEYSIZE 32
// Key encoded using Z85 is 40 bytes
#define CURVE_KEYSIZE_Z85 40
namespace zmq
{
struct options_t
{
options_t ();
int set_curve_key (uint8_t *destination_,
const void *optval_,
size_t optvallen_);
int setsockopt (int option_, const void *optval_, size_t optvallen_);
int getsockopt (int option_, void *optval_, size_t *optvallen_) const;
// High-water marks for message pipes.
int sndhwm;
int rcvhwm;
// I/O thread affinity.
uint64_t affinity;
// Socket routing id.
unsigned char routing_id_size;
unsigned char routing_id[256];
// Maximum transfer rate [kb/s]. Default 100kb/s.
int rate;
// Reliability time interval [ms]. Default 10 seconds.
int recovery_ivl;
// Sets the time-to-live field in every multicast packet sent.
int multicast_hops;
// Sets the maximum transport data unit size in every multicast
// packet sent.
int multicast_maxtpdu;
// SO_SNDBUF and SO_RCVBUF to be passed to underlying transport sockets.
int sndbuf;
int rcvbuf;
// Type of service (containing DSCP and ECN socket options)
int tos;
// Protocol-defined priority
int priority;
// Socket type.
int8_t type;
// Linger time, in milliseconds.
atomic_value_t linger;
// Maximum interval in milliseconds beyond which userspace will
// timeout connect().
// Default 0 (unused)
int connect_timeout;
// Maximum interval in milliseconds beyond which TCP will timeout
// retransmitted packets.
// Default 0 (unused)
int tcp_maxrt;
// Disable reconnect under certain conditions
// Default 0
int reconnect_stop;
// Minimum interval between attempts to reconnect, in milliseconds.
// Default 100ms
int reconnect_ivl;
// Maximum interval between attempts to reconnect, in milliseconds.
// Default 0ms (meaning maximum interval is disabled)
int reconnect_ivl_max;
// Maximum backlog for pending connections.
int backlog;
// Maximal size of message to handle.
int64_t maxmsgsize;
// The timeout for send/recv operations for this socket, in milliseconds.
int rcvtimeo;
int sndtimeo;
// If true, IPv6 is enabled (as well as IPv4)
bool ipv6;
// If 1, connecting pipes are not attached immediately, meaning a send()
// on a socket with only connecting pipes would block
int immediate;
// If 1, (X)SUB socket should filter the messages. If 0, it should not.
bool filter;
// If true, the subscription matching on (X)PUB and (X)SUB sockets
// is reversed. Messages are sent to and received by non-matching
// sockets.
bool invert_matching;
// If true, the routing id message is forwarded to the socket.
bool recv_routing_id;
// if true, router socket accepts non-zmq tcp connections
bool raw_socket;
bool raw_notify; // Provide connect notifications
// Address of SOCKS proxy
std::string socks_proxy_address;
// Credentials for SOCKS proxy.
// Connection method will be basic auth if username
// is not empty, no auth otherwise.
std::string socks_proxy_username;
std::string socks_proxy_password;
// TCP keep-alive settings.
// Defaults to -1 = do not change socket options
int tcp_keepalive;
int tcp_keepalive_cnt;
int tcp_keepalive_idle;
int tcp_keepalive_intvl;
// TCP accept() filters
typedef std::vector<tcp_address_mask_t> tcp_accept_filters_t;
tcp_accept_filters_t tcp_accept_filters;
// IPC accept() filters
#if defined ZMQ_HAVE_SO_PEERCRED || defined ZMQ_HAVE_LOCAL_PEERCRED
typedef std::set<uid_t> ipc_uid_accept_filters_t;
ipc_uid_accept_filters_t ipc_uid_accept_filters;
typedef std::set<gid_t> ipc_gid_accept_filters_t;
ipc_gid_accept_filters_t ipc_gid_accept_filters;
#endif
#if defined ZMQ_HAVE_SO_PEERCRED
typedef std::set<pid_t> ipc_pid_accept_filters_t;
ipc_pid_accept_filters_t ipc_pid_accept_filters;
#endif
// Security mechanism for all connections on this socket
int mechanism;
// If peer is acting as server for PLAIN or CURVE mechanisms
int as_server;
// ZAP authentication domain
std::string zap_domain;
// Security credentials for PLAIN mechanism
std::string plain_username;
std::string plain_password;
// Security credentials for CURVE mechanism
uint8_t curve_public_key[CURVE_KEYSIZE];
uint8_t curve_secret_key[CURVE_KEYSIZE];
uint8_t curve_server_key[CURVE_KEYSIZE];
// Principals for GSSAPI mechanism
std::string gss_principal;
std::string gss_service_principal;
// Name types GSSAPI principals
int gss_principal_nt;
int gss_service_principal_nt;
// If true, gss encryption will be disabled
bool gss_plaintext;
// ID of the socket.
int socket_id;
// If true, socket conflates outgoing/incoming messages.
// Applicable to dealer, push/pull, pub/sub socket types.
// Cannot receive multi-part messages.
// Ignores hwm
bool conflate;
// If connection handshake is not done after this many milliseconds,
// close socket. Default is 30 secs. 0 means no handshake timeout.
int handshake_ivl;
bool connected;
// If remote peer receives a PING message and doesn't receive another
// message within the ttl value, it should close the connection
// (measured in tenths of a second)
uint16_t heartbeat_ttl;
// Time in milliseconds between sending heartbeat PING messages.
int heartbeat_interval;
// Time in milliseconds to wait for a PING response before disconnecting
int heartbeat_timeout;
#if defined ZMQ_HAVE_VMCI
uint64_t vmci_buffer_size;
uint64_t vmci_buffer_min_size;
uint64_t vmci_buffer_max_size;
int vmci_connect_timeout;
#endif
// When creating a new ZMQ socket, if this option is set the value
// will be used as the File Descriptor instead of allocating a new
// one via the socket () system call.
int use_fd;
// Device to bind the underlying socket to, eg. VRF or interface
std::string bound_device;
// Enforce a non-empty ZAP domain requirement for PLAIN auth
bool zap_enforce_domain;
// Use of loopback fastpath.
bool loopback_fastpath;
// Loop sent multicast packets to local sockets
bool multicast_loop;
// Maximal batching size for engines with receiving functionality.
// So, if there are 10 messages that fit into the batch size, all of
// them may be read by a single 'recv' system call, thus avoiding
// unnecessary network stack traversals.
int in_batch_size;
// Maximal batching size for engines with sending functionality.
// So, if there are 10 messages that fit into the batch size, all of
// them may be written by a single 'send' system call, thus avoiding
// unnecessary network stack traversals.
int out_batch_size;
// Use zero copy strategy for storing message content when decoding.
bool zero_copy;
// Router socket ZMQ_NOTIFY_CONNECT/ZMQ_NOTIFY_DISCONNECT notifications
int router_notify;
// Application metadata
std::map<std::string, std::string> app_metadata;
// Version of monitor events to emit
int monitor_event_version;
// WSS Keys
std::string wss_key_pem;
std::string wss_cert_pem;
std::string wss_trust_pem;
std::string wss_hostname;
bool wss_trust_system;
// Hello msg
std::vector<unsigned char> hello_msg;
bool can_send_hello_msg;
// Disconnect msg
std::vector<unsigned char> disconnect_msg;
bool can_recv_disconnect_msg;
// Hiccup msg
std::vector<unsigned char> hiccup_msg;
bool can_recv_hiccup_msg;
// NORM Options
int norm_mode;
bool norm_unicast_nacks;
int norm_buffer_size;
int norm_segment_size;
int norm_block_size;
int norm_num_parity;
int norm_num_autoparity;
bool norm_push_enable;
// This option removes several delays caused by scheduling, interrupts and context switching.
int busy_poll;
};
inline bool get_effective_conflate_option (const options_t &options)
{
// conflate is only effective for some socket types
return options.conflate
&& (options.type == ZMQ_DEALER || options.type == ZMQ_PULL
|| options.type == ZMQ_PUSH || options.type == ZMQ_PUB
|| options.type == ZMQ_SUB);
}
int do_getsockopt (void *optval_,
size_t *optvallen_,
const void *value_,
size_t value_len_);
template <typename T>
int do_getsockopt (void *const optval_, size_t *const optvallen_, T value_)
{
#if __cplusplus >= 201103L && (!defined(__GNUC__) || __GNUC__ > 5)
static_assert (std::is_trivially_copyable<T>::value,
"invalid use of do_getsockopt");
#endif
return do_getsockopt (optval_, optvallen_, &value_, sizeof (T));
}
int do_getsockopt (void *optval_,
size_t *optvallen_,
const std::string &value_);
int do_setsockopt_int_as_bool_strict (const void *optval_,
size_t optvallen_,
bool *out_value_);
int do_setsockopt_int_as_bool_relaxed (const void *optval_,
size_t optvallen_,
bool *out_value_);
}
#endif
|
sophomore_public/libzmq
|
src/options.hpp
|
C++
|
gpl-3.0
| 10,118 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "own.hpp"
#include "err.hpp"
#include "io_thread.hpp"
zmq::own_t::own_t (class ctx_t *parent_, uint32_t tid_) :
object_t (parent_, tid_),
_terminating (false),
_sent_seqnum (0),
_processed_seqnum (0),
_owner (NULL),
_term_acks (0)
{
}
zmq::own_t::own_t (io_thread_t *io_thread_, const options_t &options_) :
object_t (io_thread_),
options (options_),
_terminating (false),
_sent_seqnum (0),
_processed_seqnum (0),
_owner (NULL),
_term_acks (0)
{
}
zmq::own_t::~own_t ()
{
}
void zmq::own_t::set_owner (own_t *owner_)
{
zmq_assert (!_owner);
_owner = owner_;
}
void zmq::own_t::inc_seqnum ()
{
// This function may be called from a different thread!
_sent_seqnum.add (1);
}
void zmq::own_t::process_seqnum ()
{
// Catch up with counter of processed commands.
_processed_seqnum++;
// We may have caught up and still have pending terms acks.
check_term_acks ();
}
void zmq::own_t::launch_child (own_t *object_)
{
// Specify the owner of the object.
object_->set_owner (this);
// Plug the object into the I/O thread.
send_plug (object_);
// Take ownership of the object.
send_own (this, object_);
}
void zmq::own_t::term_child (own_t *object_)
{
process_term_req (object_);
}
void zmq::own_t::process_term_req (own_t *object_)
{
// When shutting down we can ignore termination requests from owned
// objects. The termination request was already sent to the object.
if (_terminating)
return;
// If not found, we assume that termination request was already sent to
// the object so we can safely ignore the request.
if (0 == _owned.erase (object_))
return;
// If I/O object is well and alive let's ask it to terminate.
register_term_acks (1);
// Note that this object is the root of the (partial shutdown) thus, its
// value of linger is used, rather than the value stored by the children.
send_term (object_, options.linger.load ());
}
void zmq::own_t::process_own (own_t *object_)
{
// If the object is already being shut down, new owned objects are
// immediately asked to terminate. Note that linger is set to zero.
if (_terminating) {
register_term_acks (1);
send_term (object_, 0);
return;
}
// Store the reference to the owned object.
_owned.insert (object_);
}
void zmq::own_t::terminate ()
{
// If termination is already underway, there's no point
// in starting it anew.
if (_terminating)
return;
// As for the root of the ownership tree, there's no one to terminate it,
// so it has to terminate itself.
if (!_owner) {
process_term (options.linger.load ());
return;
}
// If I am an owned object, I'll ask my owner to terminate me.
send_term_req (_owner, this);
}
bool zmq::own_t::is_terminating () const
{
return _terminating;
}
void zmq::own_t::process_term (int linger_)
{
// Double termination should never happen.
zmq_assert (!_terminating);
// Send termination request to all owned objects.
for (owned_t::iterator it = _owned.begin (), end = _owned.end (); it != end;
++it)
send_term (*it, linger_);
register_term_acks (static_cast<int> (_owned.size ()));
_owned.clear ();
// Start termination process and check whether by chance we cannot
// terminate immediately.
_terminating = true;
check_term_acks ();
}
void zmq::own_t::register_term_acks (int count_)
{
_term_acks += count_;
}
void zmq::own_t::unregister_term_ack ()
{
zmq_assert (_term_acks > 0);
_term_acks--;
// This may be a last ack we are waiting for before termination...
check_term_acks ();
}
void zmq::own_t::process_term_ack ()
{
unregister_term_ack ();
}
void zmq::own_t::check_term_acks ()
{
if (_terminating && _processed_seqnum == _sent_seqnum.get ()
&& _term_acks == 0) {
// Sanity check. There should be no active children at this point.
zmq_assert (_owned.empty ());
// The root object has nobody to confirm the termination to.
// Other nodes will confirm the termination to the owner.
if (_owner)
send_term_ack (_owner);
// Deallocate the resources.
process_destroy ();
}
}
void zmq::own_t::process_destroy ()
{
delete this;
}
|
sophomore_public/libzmq
|
src/own.cpp
|
C++
|
gpl-3.0
| 4,503 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_OWN_HPP_INCLUDED__
#define __ZMQ_OWN_HPP_INCLUDED__
#include <set>
#include "object.hpp"
#include "options.hpp"
#include "atomic_counter.hpp"
#include "stdint.hpp"
namespace zmq
{
class ctx_t;
class io_thread_t;
// Base class for objects forming a part of ownership hierarchy.
// It handles initialisation and destruction of such objects.
class own_t : public object_t
{
public:
// Note that the owner is unspecified in the constructor.
// It'll be supplied later on when the object is plugged in.
// The object is not living within an I/O thread. It has it's own
// thread outside of 0MQ infrastructure.
own_t (zmq::ctx_t *parent_, uint32_t tid_);
// The object is living within I/O thread.
own_t (zmq::io_thread_t *io_thread_, const options_t &options_);
// When another owned object wants to send command to this object
// it calls this function to let it know it should not shut down
// before the command is delivered.
void inc_seqnum ();
// Use following two functions to wait for arbitrary events before
// terminating. Just add number of events to wait for using
// register_tem_acks functions. When event occurs, call
// remove_term_ack. When number of pending acks reaches zero
// object will be deallocated.
void register_term_acks (int count_);
void unregister_term_ack ();
protected:
// Launch the supplied object and become its owner.
void launch_child (own_t *object_);
// Terminate owned object
void term_child (own_t *object_);
// Ask owner object to terminate this object. It may take a while
// while actual termination is started. This function should not be
// called more than once.
void terminate ();
// Returns true if the object is in process of termination.
bool is_terminating () const;
// Derived object destroys own_t. There's no point in allowing
// others to invoke the destructor. At the same time, it has to be
// virtual so that generic own_t deallocation mechanism destroys
// specific type of the owned object correctly.
~own_t () ZMQ_OVERRIDE;
// Term handler is protected rather than private so that it can
// be intercepted by the derived class. This is useful to add custom
// steps to the beginning of the termination process.
void process_term (int linger_) ZMQ_OVERRIDE;
// A place to hook in when physical destruction of the object
// is to be delayed.
virtual void process_destroy ();
// Socket options associated with this object.
options_t options;
private:
// Set owner of the object
void set_owner (own_t *owner_);
// Handlers for incoming commands.
void process_own (own_t *object_) ZMQ_OVERRIDE;
void process_term_req (own_t *object_) ZMQ_OVERRIDE;
void process_term_ack () ZMQ_OVERRIDE;
void process_seqnum () ZMQ_OVERRIDE;
// Check whether all the pending term acks were delivered.
// If so, deallocate this object.
void check_term_acks ();
// True if termination was already initiated. If so, we can destroy
// the object if there are no more child objects or pending term acks.
bool _terminating;
// Sequence number of the last command sent to this object.
atomic_counter_t _sent_seqnum;
// Sequence number of the last command processed by this object.
uint64_t _processed_seqnum;
// Socket owning this object. It's responsible for shutting down
// this object.
own_t *_owner;
// List of all objects owned by this socket. We are responsible
// for deallocating them before we quit.
typedef std::set<own_t *> owned_t;
owned_t _owned;
// Number of events we have to get before we can destroy the object.
int _term_acks;
ZMQ_NON_COPYABLE_NOR_MOVABLE (own_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/own.hpp
|
C++
|
gpl-3.0
| 3,930 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include "pair.hpp"
#include "err.hpp"
#include "pipe.hpp"
#include "msg.hpp"
zmq::pair_t::pair_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
socket_base_t (parent_, tid_, sid_), _pipe (NULL)
{
options.type = ZMQ_PAIR;
}
zmq::pair_t::~pair_t ()
{
zmq_assert (!_pipe);
}
void zmq::pair_t::xattach_pipe (pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_)
{
LIBZMQ_UNUSED (subscribe_to_all_);
LIBZMQ_UNUSED (locally_initiated_);
zmq_assert (pipe_ != NULL);
// ZMQ_PAIR socket can only be connected to a single peer.
// The socket rejects any further connection requests.
if (_pipe == NULL)
_pipe = pipe_;
else
pipe_->terminate (false);
}
void zmq::pair_t::xpipe_terminated (pipe_t *pipe_)
{
if (pipe_ == _pipe) {
_pipe = NULL;
}
}
void zmq::pair_t::xread_activated (pipe_t *)
{
// There's just one pipe. No lists of active and inactive pipes.
// There's nothing to do here.
}
void zmq::pair_t::xwrite_activated (pipe_t *)
{
// There's just one pipe. No lists of active and inactive pipes.
// There's nothing to do here.
}
int zmq::pair_t::xsend (msg_t *msg_)
{
if (!_pipe || !_pipe->write (msg_)) {
errno = EAGAIN;
return -1;
}
if (!(msg_->flags () & msg_t::more))
_pipe->flush ();
// Detach the original message from the data buffer.
const int rc = msg_->init ();
errno_assert (rc == 0);
return 0;
}
int zmq::pair_t::xrecv (msg_t *msg_)
{
// Deallocate old content of the message.
int rc = msg_->close ();
errno_assert (rc == 0);
if (!_pipe || !_pipe->read (msg_)) {
// Initialise the output parameter to be a 0-byte message.
rc = msg_->init ();
errno_assert (rc == 0);
errno = EAGAIN;
return -1;
}
return 0;
}
bool zmq::pair_t::xhas_in ()
{
if (!_pipe)
return false;
return _pipe->check_read ();
}
bool zmq::pair_t::xhas_out ()
{
if (!_pipe)
return false;
return _pipe->check_write ();
}
|
sophomore_public/libzmq
|
src/pair.cpp
|
C++
|
gpl-3.0
| 2,223 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PAIR_HPP_INCLUDED__
#define __ZMQ_PAIR_HPP_INCLUDED__
#include "blob.hpp"
#include "socket_base.hpp"
#include "session_base.hpp"
namespace zmq
{
class ctx_t;
class msg_t;
class pipe_t;
class io_thread_t;
class pair_t ZMQ_FINAL : public socket_base_t
{
public:
pair_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
~pair_t ();
// Overrides of functions from socket_base_t.
void xattach_pipe (zmq::pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_);
int xsend (zmq::msg_t *msg_);
int xrecv (zmq::msg_t *msg_);
bool xhas_in ();
bool xhas_out ();
void xread_activated (zmq::pipe_t *pipe_);
void xwrite_activated (zmq::pipe_t *pipe_);
void xpipe_terminated (zmq::pipe_t *pipe_);
private:
zmq::pipe_t *_pipe;
ZMQ_NON_COPYABLE_NOR_MOVABLE (pair_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/pair.hpp
|
C++
|
gpl-3.0
| 931 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include "peer.hpp"
#include "pipe.hpp"
#include "wire.hpp"
#include "random.hpp"
#include "likely.hpp"
#include "err.hpp"
zmq::peer_t::peer_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
server_t (parent_, tid_, sid_)
{
options.type = ZMQ_PEER;
options.can_send_hello_msg = true;
options.can_recv_disconnect_msg = true;
options.can_recv_hiccup_msg = true;
}
uint32_t zmq::peer_t::connect_peer (const char *endpoint_uri_)
{
scoped_optional_lock_t sync_lock (&_sync);
// connect_peer cannot work with immediate enabled
if (options.immediate == 1) {
errno = EFAULT;
return 0;
}
int rc = socket_base_t::connect_internal (endpoint_uri_);
if (rc != 0)
return 0;
return _peer_last_routing_id;
}
void zmq::peer_t::xattach_pipe (pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_)
{
server_t::xattach_pipe (pipe_, subscribe_to_all_, locally_initiated_);
_peer_last_routing_id = pipe_->get_server_socket_routing_id ();
}
|
sophomore_public/libzmq
|
src/peer.cpp
|
C++
|
gpl-3.0
| 1,168 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PEER_HPP_INCLUDED__
#define __ZMQ_PEER_HPP_INCLUDED__
#include <map>
#include "socket_base.hpp"
#include "server.hpp"
#include "session_base.hpp"
#include "stdint.hpp"
#include "blob.hpp"
#include "fq.hpp"
namespace zmq
{
class ctx_t;
class msg_t;
class pipe_t;
class peer_t ZMQ_FINAL : public server_t
{
public:
peer_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
// Overrides of functions from socket_base_t.
void xattach_pipe (zmq::pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_);
uint32_t connect_peer (const char *endpoint_uri_);
private:
uint32_t _peer_last_routing_id;
ZMQ_NON_COPYABLE_NOR_MOVABLE (peer_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/peer.hpp
|
C++
|
gpl-3.0
| 783 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#if defined ZMQ_HAVE_OPENPGM
#include <new>
#include "pgm_receiver.hpp"
#include "session_base.hpp"
#include "v1_decoder.hpp"
#include "stdint.hpp"
#include "wire.hpp"
#include "err.hpp"
zmq::pgm_receiver_t::pgm_receiver_t (class io_thread_t *parent_,
const options_t &options_) :
io_object_t (parent_),
has_rx_timer (false),
pgm_socket (true, options_),
options (options_),
session (NULL),
active_tsi (NULL),
insize (0)
{
}
zmq::pgm_receiver_t::~pgm_receiver_t ()
{
// Destructor should not be called before unplug.
zmq_assert (peers.empty ());
}
int zmq::pgm_receiver_t::init (bool udp_encapsulation_, const char *network_)
{
return pgm_socket.init (udp_encapsulation_, network_);
}
void zmq::pgm_receiver_t::plug (io_thread_t *io_thread_,
session_base_t *session_)
{
LIBZMQ_UNUSED (io_thread_);
// Retrieve PGM fds and start polling.
fd_t socket_fd = retired_fd;
fd_t waiting_pipe_fd = retired_fd;
pgm_socket.get_receiver_fds (&socket_fd, &waiting_pipe_fd);
socket_handle = add_fd (socket_fd);
pipe_handle = add_fd (waiting_pipe_fd);
set_pollin (pipe_handle);
set_pollin (socket_handle);
session = session_;
// If there are any subscriptions already queued in the session, drop them.
drop_subscriptions ();
}
void zmq::pgm_receiver_t::unplug ()
{
// Delete decoders.
for (peers_t::iterator it = peers.begin (), end = peers.end (); it != end;
++it) {
if (it->second.decoder != NULL) {
LIBZMQ_DELETE (it->second.decoder);
}
}
peers.clear ();
active_tsi = NULL;
if (has_rx_timer) {
cancel_timer (rx_timer_id);
has_rx_timer = false;
}
rm_fd (socket_handle);
rm_fd (pipe_handle);
session = NULL;
}
void zmq::pgm_receiver_t::terminate ()
{
unplug ();
delete this;
}
void zmq::pgm_receiver_t::restart_output ()
{
drop_subscriptions ();
}
bool zmq::pgm_receiver_t::restart_input ()
{
zmq_assert (session != NULL);
zmq_assert (active_tsi != NULL);
const peers_t::iterator it = peers.find (*active_tsi);
zmq_assert (it != peers.end ());
zmq_assert (it->second.joined);
// Push the pending message into the session.
int rc = session->push_msg (it->second.decoder->msg ());
errno_assert (rc == 0);
if (insize > 0) {
rc = process_input (it->second.decoder);
if (rc == -1) {
// HWM reached; we will try later.
if (errno == EAGAIN) {
session->flush ();
return true;
}
// Data error. Delete message decoder, mark the
// peer as not joined and drop remaining data.
it->second.joined = false;
LIBZMQ_DELETE (it->second.decoder);
insize = 0;
}
}
// Resume polling.
set_pollin (pipe_handle);
set_pollin (socket_handle);
active_tsi = NULL;
in_event ();
return true;
}
const zmq::endpoint_uri_pair_t &zmq::pgm_receiver_t::get_endpoint () const
{
return _empty_endpoint;
}
void zmq::pgm_receiver_t::in_event ()
{
// If active_tsi is not null, there is a pending restart_input.
// Keep the internal state as is so that restart_input would process the right data
if (active_tsi) {
return;
}
// Read data from the underlying pgm_socket.
const pgm_tsi_t *tsi = NULL;
if (has_rx_timer) {
cancel_timer (rx_timer_id);
has_rx_timer = false;
}
// TODO: This loop can effectively block other engines in the same I/O
// thread in the case of high load.
while (true) {
// Get new batch of data.
// Note the workaround made not to break strict-aliasing rules.
insize = 0;
void *tmp = NULL;
ssize_t received = pgm_socket.receive (&tmp, &tsi);
// No data to process. This may happen if the packet received is
// neither ODATA nor ODATA.
if (received == 0) {
if (errno == ENOMEM || errno == EBUSY) {
const long timeout = pgm_socket.get_rx_timeout ();
add_timer (timeout, rx_timer_id);
has_rx_timer = true;
}
break;
}
// Find the peer based on its TSI.
peers_t::iterator it = peers.find (*tsi);
// Data loss. Delete decoder and mark the peer as disjoint.
if (received == -1) {
if (it != peers.end ()) {
it->second.joined = false;
if (it->second.decoder != NULL) {
LIBZMQ_DELETE (it->second.decoder);
}
}
break;
}
// New peer. Add it to the list of know but unjoint peers.
if (it == peers.end ()) {
peer_info_t peer_info = {false, NULL};
it = peers.ZMQ_MAP_INSERT_OR_EMPLACE (*tsi, peer_info).first;
}
insize = static_cast<size_t> (received);
inpos = (unsigned char *) tmp;
// Read the offset of the fist message in the current packet.
zmq_assert (insize >= sizeof (uint16_t));
uint16_t offset = get_uint16 (inpos);
inpos += sizeof (uint16_t);
insize -= sizeof (uint16_t);
// Join the stream if needed.
if (!it->second.joined) {
// There is no beginning of the message in current packet.
// Ignore the data.
if (offset == 0xffff)
continue;
zmq_assert (offset <= insize);
zmq_assert (it->second.decoder == NULL);
// We have to move data to the beginning of the first message.
inpos += offset;
insize -= offset;
// Mark the stream as joined.
it->second.joined = true;
// Create and connect decoder for the peer.
it->second.decoder =
new (std::nothrow) v1_decoder_t (0, options.maxmsgsize);
alloc_assert (it->second.decoder);
}
int rc = process_input (it->second.decoder);
if (rc == -1) {
if (errno == EAGAIN) {
active_tsi = tsi;
// Stop polling.
reset_pollin (pipe_handle);
reset_pollin (socket_handle);
break;
}
it->second.joined = false;
LIBZMQ_DELETE (it->second.decoder);
insize = 0;
}
}
// Flush any messages decoder may have produced.
session->flush ();
}
int zmq::pgm_receiver_t::process_input (v1_decoder_t *decoder)
{
zmq_assert (session != NULL);
while (insize > 0) {
size_t n = 0;
int rc = decoder->decode (inpos, insize, n);
if (rc == -1)
return -1;
inpos += n;
insize -= n;
if (rc == 0)
break;
rc = session->push_msg (decoder->msg ());
if (rc == -1) {
errno_assert (errno == EAGAIN);
return -1;
}
}
return 0;
}
void zmq::pgm_receiver_t::timer_event (int token)
{
zmq_assert (token == rx_timer_id);
// Timer cancels on return by poller_base.
has_rx_timer = false;
in_event ();
}
void zmq::pgm_receiver_t::drop_subscriptions ()
{
msg_t msg;
msg.init ();
while (session->pull_msg (&msg) == 0)
msg.close ();
}
#endif
|
sophomore_public/libzmq
|
src/pgm_receiver.cpp
|
C++
|
gpl-3.0
| 7,565 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PGM_RECEIVER_HPP_INCLUDED__
#define __ZMQ_PGM_RECEIVER_HPP_INCLUDED__
#if defined ZMQ_HAVE_OPENPGM
#include <map>
#include <algorithm>
#include "io_object.hpp"
#include "i_engine.hpp"
#include "options.hpp"
#include "v1_decoder.hpp"
#include "pgm_socket.hpp"
namespace zmq
{
class io_thread_t;
class session_base_t;
class pgm_receiver_t ZMQ_FINAL : public io_object_t, public i_engine
{
public:
pgm_receiver_t (zmq::io_thread_t *parent_, const options_t &options_);
~pgm_receiver_t ();
int init (bool udp_encapsulation_, const char *network_);
// i_engine interface implementation.
bool has_handshake_stage () { return false; };
void plug (zmq::io_thread_t *io_thread_, zmq::session_base_t *session_);
void terminate ();
bool restart_input ();
void restart_output ();
void zap_msg_available () {}
const endpoint_uri_pair_t &get_endpoint () const;
// i_poll_events interface implementation.
void in_event ();
void timer_event (int token);
private:
// Unplug the engine from the session.
void unplug ();
// Decode received data (inpos, insize) and forward decoded
// messages to the session.
int process_input (v1_decoder_t *decoder);
// PGM is not able to move subscriptions upstream. Thus, drop all
// the pending subscriptions.
void drop_subscriptions ();
// RX timeout timer ID.
enum
{
rx_timer_id = 0xa1
};
const endpoint_uri_pair_t _empty_endpoint;
// RX timer is running.
bool has_rx_timer;
// If joined is true we are already getting messages from the peer.
// It it's false, we are getting data but still we haven't seen
// beginning of a message.
struct peer_info_t
{
bool joined;
v1_decoder_t *decoder;
};
struct tsi_comp
{
bool operator() (const pgm_tsi_t <si, const pgm_tsi_t &rtsi) const
{
uint32_t ll[2], rl[2];
memcpy (ll, <si, sizeof (ll));
memcpy (rl, &rtsi, sizeof (rl));
return (ll[0] < rl[0]) || (ll[0] == rl[0] && ll[1] < rl[1]);
}
};
typedef std::map<pgm_tsi_t, peer_info_t, tsi_comp> peers_t;
peers_t peers;
// PGM socket.
pgm_socket_t pgm_socket;
// Socket options.
options_t options;
// Associated session.
zmq::session_base_t *session;
const pgm_tsi_t *active_tsi;
// Number of bytes not consumed by the decoder due to pipe overflow.
size_t insize;
// Pointer to data still waiting to be processed by the decoder.
const unsigned char *inpos;
// Poll handle associated with PGM socket.
handle_t socket_handle;
// Poll handle associated with engine PGM waiting pipe.
handle_t pipe_handle;
ZMQ_NON_COPYABLE_NOR_MOVABLE (pgm_receiver_t)
};
}
#endif
#endif
|
sophomore_public/libzmq
|
src/pgm_receiver.hpp
|
C++
|
gpl-3.0
| 2,906 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#if defined ZMQ_HAVE_OPENPGM
#include <stdlib.h>
#include "io_thread.hpp"
#include "pgm_sender.hpp"
#include "session_base.hpp"
#include "err.hpp"
#include "wire.hpp"
#include "stdint.hpp"
#include "macros.hpp"
zmq::pgm_sender_t::pgm_sender_t (io_thread_t *parent_,
const options_t &options_) :
io_object_t (parent_),
has_tx_timer (false),
has_rx_timer (false),
session (NULL),
encoder (0),
more_flag (false),
pgm_socket (false, options_),
options (options_),
handle (static_cast<handle_t> (NULL)),
uplink_handle (static_cast<handle_t> (NULL)),
rdata_notify_handle (static_cast<handle_t> (NULL)),
pending_notify_handle (static_cast<handle_t> (NULL)),
out_buffer (NULL),
out_buffer_size (0),
write_size (0)
{
int rc = msg.init ();
errno_assert (rc == 0);
}
int zmq::pgm_sender_t::init (bool udp_encapsulation_, const char *network_)
{
int rc = pgm_socket.init (udp_encapsulation_, network_);
if (rc != 0)
return rc;
out_buffer_size = pgm_socket.get_max_tsdu_size ();
out_buffer = (unsigned char *) malloc (out_buffer_size);
alloc_assert (out_buffer);
return rc;
}
void zmq::pgm_sender_t::plug (io_thread_t *io_thread_, session_base_t *session_)
{
LIBZMQ_UNUSED (io_thread_);
// Allocate 2 fds for PGM socket.
fd_t downlink_socket_fd = retired_fd;
fd_t uplink_socket_fd = retired_fd;
fd_t rdata_notify_fd = retired_fd;
fd_t pending_notify_fd = retired_fd;
session = session_;
// Fill fds from PGM transport and add them to the poller.
pgm_socket.get_sender_fds (&downlink_socket_fd, &uplink_socket_fd,
&rdata_notify_fd, &pending_notify_fd);
handle = add_fd (downlink_socket_fd);
uplink_handle = add_fd (uplink_socket_fd);
rdata_notify_handle = add_fd (rdata_notify_fd);
pending_notify_handle = add_fd (pending_notify_fd);
// Set POLLIN. We will never want to stop polling for uplink = we never
// want to stop processing NAKs.
set_pollin (uplink_handle);
set_pollin (rdata_notify_handle);
set_pollin (pending_notify_handle);
// Set POLLOUT for downlink_socket_handle.
set_pollout (handle);
}
void zmq::pgm_sender_t::unplug ()
{
if (has_rx_timer) {
cancel_timer (rx_timer_id);
has_rx_timer = false;
}
if (has_tx_timer) {
cancel_timer (tx_timer_id);
has_tx_timer = false;
}
rm_fd (handle);
rm_fd (uplink_handle);
rm_fd (rdata_notify_handle);
rm_fd (pending_notify_handle);
session = NULL;
}
void zmq::pgm_sender_t::terminate ()
{
unplug ();
delete this;
}
void zmq::pgm_sender_t::restart_output ()
{
set_pollout (handle);
out_event ();
}
bool zmq::pgm_sender_t::restart_input ()
{
zmq_assert (false);
return true;
}
const zmq::endpoint_uri_pair_t &zmq::pgm_sender_t::get_endpoint () const
{
return _empty_endpoint;
}
zmq::pgm_sender_t::~pgm_sender_t ()
{
int rc = msg.close ();
errno_assert (rc == 0);
if (out_buffer) {
free (out_buffer);
out_buffer = NULL;
}
}
void zmq::pgm_sender_t::in_event ()
{
if (has_rx_timer) {
cancel_timer (rx_timer_id);
has_rx_timer = false;
}
// In-event on sender side means NAK or SPMR receiving from some peer.
pgm_socket.process_upstream ();
if (errno == ENOMEM || errno == EBUSY) {
const long timeout = pgm_socket.get_rx_timeout ();
add_timer (timeout, rx_timer_id);
has_rx_timer = true;
}
}
void zmq::pgm_sender_t::out_event ()
{
// POLLOUT event from send socket. If write buffer is empty,
// try to read new data from the encoder.
if (write_size == 0) {
// First two bytes (sizeof uint16_t) are used to store message
// offset in following steps. Note that by passing our buffer to
// the get data function we prevent it from returning its own buffer.
unsigned char *bf = out_buffer + sizeof (uint16_t);
size_t bfsz = out_buffer_size - sizeof (uint16_t);
uint16_t offset = 0xffff;
size_t bytes = encoder.encode (&bf, bfsz);
while (bytes < bfsz) {
if (!more_flag && offset == 0xffff)
offset = static_cast<uint16_t> (bytes);
int rc = session->pull_msg (&msg);
if (rc == -1)
break;
more_flag = msg.flags () & msg_t::more;
encoder.load_msg (&msg);
bf = out_buffer + sizeof (uint16_t) + bytes;
bytes += encoder.encode (&bf, bfsz - bytes);
}
// If there are no data to write stop polling for output.
if (bytes == 0) {
reset_pollout (handle);
return;
}
write_size = sizeof (uint16_t) + bytes;
// Put offset information in the buffer.
put_uint16 (out_buffer, offset);
}
if (has_tx_timer) {
cancel_timer (tx_timer_id);
set_pollout (handle);
has_tx_timer = false;
}
// Send the data.
size_t nbytes = pgm_socket.send (out_buffer, write_size);
// We can write either all data or 0 which means rate limit reached.
if (nbytes == write_size)
write_size = 0;
else {
zmq_assert (nbytes == 0);
if (errno == ENOMEM) {
// Stop polling handle and wait for tx timeout
const long timeout = pgm_socket.get_tx_timeout ();
add_timer (timeout, tx_timer_id);
reset_pollout (handle);
has_tx_timer = true;
} else
errno_assert (errno == EBUSY);
}
}
void zmq::pgm_sender_t::timer_event (int token)
{
// Timer cancels on return by poller_base.
if (token == rx_timer_id) {
has_rx_timer = false;
in_event ();
} else if (token == tx_timer_id) {
// Restart polling handle and retry sending
has_tx_timer = false;
set_pollout (handle);
out_event ();
} else
zmq_assert (false);
}
#endif
|
sophomore_public/libzmq
|
src/pgm_sender.cpp
|
C++
|
gpl-3.0
| 6,142 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PGM_SENDER_HPP_INCLUDED__
#define __ZMQ_PGM_SENDER_HPP_INCLUDED__
#if defined ZMQ_HAVE_OPENPGM
#include "stdint.hpp"
#include "io_object.hpp"
#include "i_engine.hpp"
#include "options.hpp"
#include "pgm_socket.hpp"
#include "v1_encoder.hpp"
#include "msg.hpp"
namespace zmq
{
class io_thread_t;
class session_base_t;
class pgm_sender_t ZMQ_FINAL : public io_object_t, public i_engine
{
public:
pgm_sender_t (zmq::io_thread_t *parent_, const options_t &options_);
~pgm_sender_t ();
int init (bool udp_encapsulation_, const char *network_);
// i_engine interface implementation.
bool has_handshake_stage () { return false; };
void plug (zmq::io_thread_t *io_thread_, zmq::session_base_t *session_);
void terminate ();
bool restart_input ();
void restart_output ();
void zap_msg_available () {}
const endpoint_uri_pair_t &get_endpoint () const;
// i_poll_events interface implementation.
void in_event ();
void out_event ();
void timer_event (int token);
private:
// Unplug the engine from the session.
void unplug ();
// TX and RX timeout timer ID's.
enum
{
tx_timer_id = 0xa0,
rx_timer_id = 0xa1
};
const endpoint_uri_pair_t _empty_endpoint;
// Timers are running.
bool has_tx_timer;
bool has_rx_timer;
session_base_t *session;
// Message encoder.
v1_encoder_t encoder;
msg_t msg;
// Keeps track of message boundaries.
bool more_flag;
// PGM socket.
pgm_socket_t pgm_socket;
// Socket options.
options_t options;
// Poll handle associated with PGM socket.
handle_t handle;
handle_t uplink_handle;
handle_t rdata_notify_handle;
handle_t pending_notify_handle;
// Output buffer from pgm_socket.
unsigned char *out_buffer;
// Output buffer size.
size_t out_buffer_size;
// Number of bytes in the buffer to be written to the socket.
// If zero, there are no data to be sent.
size_t write_size;
ZMQ_NON_COPYABLE_NOR_MOVABLE (pgm_sender_t)
};
}
#endif
#endif
|
sophomore_public/libzmq
|
src/pgm_sender.hpp
|
C++
|
gpl-3.0
| 2,155 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#ifdef ZMQ_HAVE_OPENPGM
#ifdef ZMQ_HAVE_LINUX
#include <poll.h>
#endif
#include <stdlib.h>
#include <string.h>
#include <string>
#include "options.hpp"
#include "pgm_socket.hpp"
#include "config.hpp"
#include "err.hpp"
#include "random.hpp"
#include "stdint.hpp"
#ifndef MSG_ERRQUEUE
#define MSG_ERRQUEUE 0x2000
#endif
zmq::pgm_socket_t::pgm_socket_t (bool receiver_, const options_t &options_) :
sock (NULL),
options (options_),
receiver (receiver_),
pgm_msgv (NULL),
pgm_msgv_len (0),
nbytes_rec (0),
nbytes_processed (0),
pgm_msgv_processed (0)
{
}
// Resolve PGM socket address.
// network_ of the form <interface & multicast group decls>:<IP port>
// e.g. eth0;239.192.0.1:7500
// link-local;224.250.0.1,224.250.0.2;224.250.0.3:8000
// ;[fe80::1%en0]:7500
int zmq::pgm_socket_t::init_address (const char *network_,
struct pgm_addrinfo_t **res,
uint16_t *port_number)
{
// Parse port number, start from end for IPv6
const char *port_delim = strrchr (network_, ':');
if (!port_delim) {
errno = EINVAL;
return -1;
}
*port_number = atoi (port_delim + 1);
char network[256];
if (port_delim - network_ >= (int) sizeof (network) - 1) {
errno = EINVAL;
return -1;
}
memset (network, '\0', sizeof (network));
memcpy (network, network_, port_delim - network_);
pgm_error_t *pgm_error = NULL;
struct pgm_addrinfo_t hints;
memset (&hints, 0, sizeof (hints));
hints.ai_family = AF_UNSPEC;
if (!pgm_getaddrinfo (network, NULL, res, &pgm_error)) {
// Invalid parameters don't set pgm_error_t.
zmq_assert (pgm_error != NULL);
if (pgm_error->domain == PGM_ERROR_DOMAIN_IF &&
// NB: cannot catch EAI_BADFLAGS.
(pgm_error->code != PGM_ERROR_SERVICE
&& pgm_error->code != PGM_ERROR_SOCKTNOSUPPORT)) {
// User, host, or network configuration or transient error.
pgm_error_free (pgm_error);
errno = EINVAL;
return -1;
}
// Fatal OpenPGM internal error.
zmq_assert (false);
}
return 0;
}
// Create, bind and connect PGM socket.
int zmq::pgm_socket_t::init (bool udp_encapsulation_, const char *network_)
{
// Can not open transport before destroying old one.
zmq_assert (sock == NULL);
zmq_assert (options.rate > 0);
// Zero counter used in msgrecv.
nbytes_rec = 0;
nbytes_processed = 0;
pgm_msgv_processed = 0;
uint16_t port_number;
struct pgm_addrinfo_t *res = NULL;
sa_family_t sa_family;
pgm_error_t *pgm_error = NULL;
if (init_address (network_, &res, &port_number) < 0) {
goto err_abort;
}
zmq_assert (res != NULL);
// Pick up detected IP family.
sa_family = res->ai_send_addrs[0].gsr_group.ss_family;
// Create IP/PGM or UDP/PGM socket.
if (udp_encapsulation_) {
if (!pgm_socket (&sock, sa_family, SOCK_SEQPACKET, IPPROTO_UDP,
&pgm_error)) {
// Invalid parameters don't set pgm_error_t.
zmq_assert (pgm_error != NULL);
if (pgm_error->domain == PGM_ERROR_DOMAIN_SOCKET
&& (pgm_error->code != PGM_ERROR_BADF
&& pgm_error->code != PGM_ERROR_FAULT
&& pgm_error->code != PGM_ERROR_NOPROTOOPT
&& pgm_error->code != PGM_ERROR_FAILED))
// User, host, or network configuration or transient error.
goto err_abort;
// Fatal OpenPGM internal error.
zmq_assert (false);
}
// All options are of data type int
const int encapsulation_port = port_number;
if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_UDP_ENCAP_UCAST_PORT,
&encapsulation_port, sizeof (encapsulation_port)))
goto err_abort;
if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_UDP_ENCAP_MCAST_PORT,
&encapsulation_port, sizeof (encapsulation_port)))
goto err_abort;
} else {
if (!pgm_socket (&sock, sa_family, SOCK_SEQPACKET, IPPROTO_PGM,
&pgm_error)) {
// Invalid parameters don't set pgm_error_t.
zmq_assert (pgm_error != NULL);
if (pgm_error->domain == PGM_ERROR_DOMAIN_SOCKET
&& (pgm_error->code != PGM_ERROR_BADF
&& pgm_error->code != PGM_ERROR_FAULT
&& pgm_error->code != PGM_ERROR_NOPROTOOPT
&& pgm_error->code != PGM_ERROR_FAILED))
// User, host, or network configuration or transient error.
goto err_abort;
// Fatal OpenPGM internal error.
zmq_assert (false);
}
}
{
const int rcvbuf = (int) options.rcvbuf;
if (rcvbuf >= 0) {
if (!pgm_setsockopt (sock, SOL_SOCKET, SO_RCVBUF, &rcvbuf,
sizeof (rcvbuf)))
goto err_abort;
}
const int sndbuf = (int) options.sndbuf;
if (sndbuf >= 0) {
if (!pgm_setsockopt (sock, SOL_SOCKET, SO_SNDBUF, &sndbuf,
sizeof (sndbuf)))
goto err_abort;
}
const int max_tpdu = (int) options.multicast_maxtpdu;
if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_MTU, &max_tpdu,
sizeof (max_tpdu)))
goto err_abort;
}
if (receiver) {
const int recv_only = 1, rxw_max_tpdu = (int) options.multicast_maxtpdu,
rxw_sqns = compute_sqns (rxw_max_tpdu),
peer_expiry = pgm_secs (300), spmr_expiry = pgm_msecs (25),
nak_bo_ivl = pgm_msecs (50), nak_rpt_ivl = pgm_msecs (200),
nak_rdata_ivl = pgm_msecs (200), nak_data_retries = 50,
nak_ncf_retries = 50;
if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_RECV_ONLY, &recv_only,
sizeof (recv_only))
|| !pgm_setsockopt (sock, IPPROTO_PGM, PGM_RXW_SQNS, &rxw_sqns,
sizeof (rxw_sqns))
|| !pgm_setsockopt (sock, IPPROTO_PGM, PGM_PEER_EXPIRY,
&peer_expiry, sizeof (peer_expiry))
|| !pgm_setsockopt (sock, IPPROTO_PGM, PGM_SPMR_EXPIRY,
&spmr_expiry, sizeof (spmr_expiry))
|| !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_BO_IVL, &nak_bo_ivl,
sizeof (nak_bo_ivl))
|| !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_RPT_IVL,
&nak_rpt_ivl, sizeof (nak_rpt_ivl))
|| !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_RDATA_IVL,
&nak_rdata_ivl, sizeof (nak_rdata_ivl))
|| !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_DATA_RETRIES,
&nak_data_retries, sizeof (nak_data_retries))
|| !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_NCF_RETRIES,
&nak_ncf_retries, sizeof (nak_ncf_retries)))
goto err_abort;
} else {
const int send_only = 1, max_rte = (int) ((options.rate * 1000) / 8),
txw_max_tpdu = (int) options.multicast_maxtpdu,
txw_sqns = compute_sqns (txw_max_tpdu),
ambient_spm = pgm_secs (30),
heartbeat_spm[] = {
pgm_msecs (100), pgm_msecs (100), pgm_msecs (100),
pgm_msecs (100), pgm_msecs (1300), pgm_secs (7),
pgm_secs (16), pgm_secs (25), pgm_secs (30)};
if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_SEND_ONLY, &send_only,
sizeof (send_only))
|| !pgm_setsockopt (sock, IPPROTO_PGM, PGM_ODATA_MAX_RTE, &max_rte,
sizeof (max_rte))
|| !pgm_setsockopt (sock, IPPROTO_PGM, PGM_TXW_SQNS, &txw_sqns,
sizeof (txw_sqns))
|| !pgm_setsockopt (sock, IPPROTO_PGM, PGM_AMBIENT_SPM,
&ambient_spm, sizeof (ambient_spm))
|| !pgm_setsockopt (sock, IPPROTO_PGM, PGM_HEARTBEAT_SPM,
&heartbeat_spm, sizeof (heartbeat_spm)))
goto err_abort;
}
// PGM transport GSI.
struct pgm_sockaddr_t addr;
memset (&addr, 0, sizeof (addr));
addr.sa_port = port_number;
addr.sa_addr.sport = DEFAULT_DATA_SOURCE_PORT;
// Create random GSI.
uint32_t buf[2];
buf[0] = generate_random ();
buf[1] = generate_random ();
if (!pgm_gsi_create_from_data (&addr.sa_addr.gsi, (uint8_t *) buf, 8))
goto err_abort;
// Bind a transport to the specified network devices.
struct pgm_interface_req_t if_req;
memset (&if_req, 0, sizeof (if_req));
if_req.ir_interface = res->ai_recv_addrs[0].gsr_interface;
if_req.ir_scope_id = 0;
if (AF_INET6 == sa_family) {
struct sockaddr_in6 sa6;
memcpy (&sa6, &res->ai_recv_addrs[0].gsr_group, sizeof (sa6));
if_req.ir_scope_id = sa6.sin6_scope_id;
}
if (!pgm_bind3 (sock, &addr, sizeof (addr), &if_req, sizeof (if_req),
&if_req, sizeof (if_req), &pgm_error)) {
// Invalid parameters don't set pgm_error_t.
zmq_assert (pgm_error != NULL);
if ((pgm_error->domain == PGM_ERROR_DOMAIN_SOCKET
|| pgm_error->domain == PGM_ERROR_DOMAIN_IF)
&& (pgm_error->code != PGM_ERROR_INVAL
&& pgm_error->code != PGM_ERROR_BADF
&& pgm_error->code != PGM_ERROR_FAULT))
// User, host, or network configuration or transient error.
goto err_abort;
// Fatal OpenPGM internal error.
zmq_assert (false);
}
// Join IP multicast groups.
for (unsigned i = 0; i < res->ai_recv_addrs_len; i++) {
if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_JOIN_GROUP,
&res->ai_recv_addrs[i], sizeof (struct group_req)))
goto err_abort;
}
if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_SEND_GROUP,
&res->ai_send_addrs[0], sizeof (struct group_req)))
goto err_abort;
pgm_freeaddrinfo (res);
res = NULL;
// Set IP level parameters.
{
// Multicast loopback disabled by default
const int multicast_loop = 0;
if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_MULTICAST_LOOP,
&multicast_loop, sizeof (multicast_loop)))
goto err_abort;
const int multicast_hops = options.multicast_hops;
if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_MULTICAST_HOPS,
&multicast_hops, sizeof (multicast_hops)))
goto err_abort;
// Expedited Forwarding PHB for network elements, no ECN.
// Ignore return value due to varied runtime support.
const int dscp = 0x2e << 2;
if (AF_INET6 != sa_family)
pgm_setsockopt (sock, IPPROTO_PGM, PGM_TOS, &dscp, sizeof (dscp));
const int nonblocking = 1;
if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_NOBLOCK, &nonblocking,
sizeof (nonblocking)))
goto err_abort;
}
// Connect PGM transport to start state machine.
if (!pgm_connect (sock, &pgm_error)) {
// Invalid parameters don't set pgm_error_t.
zmq_assert (pgm_error != NULL);
goto err_abort;
}
// For receiver transport preallocate pgm_msgv array.
if (receiver) {
zmq_assert (options.in_batch_size > 0);
size_t max_tsdu_size = get_max_tsdu_size ();
pgm_msgv_len = (int) options.in_batch_size / max_tsdu_size;
if ((int) options.in_batch_size % max_tsdu_size)
pgm_msgv_len++;
zmq_assert (pgm_msgv_len);
pgm_msgv = (pgm_msgv_t *) malloc (sizeof (pgm_msgv_t) * pgm_msgv_len);
alloc_assert (pgm_msgv);
}
return 0;
err_abort:
if (sock != NULL) {
pgm_close (sock, FALSE);
sock = NULL;
}
if (res != NULL) {
pgm_freeaddrinfo (res);
res = NULL;
}
if (pgm_error != NULL) {
pgm_error_free (pgm_error);
pgm_error = NULL;
}
errno = EINVAL;
return -1;
}
zmq::pgm_socket_t::~pgm_socket_t ()
{
if (pgm_msgv)
free (pgm_msgv);
if (sock)
pgm_close (sock, TRUE);
}
// Get receiver fds. receive_fd_ is signaled for incoming packets,
// waiting_pipe_fd_ is signaled for state driven events and data.
void zmq::pgm_socket_t::get_receiver_fds (fd_t *receive_fd_,
fd_t *waiting_pipe_fd_)
{
socklen_t socklen;
bool rc;
zmq_assert (receive_fd_);
zmq_assert (waiting_pipe_fd_);
socklen = sizeof (*receive_fd_);
rc =
pgm_getsockopt (sock, IPPROTO_PGM, PGM_RECV_SOCK, receive_fd_, &socklen);
zmq_assert (rc);
zmq_assert (socklen == sizeof (*receive_fd_));
socklen = sizeof (*waiting_pipe_fd_);
rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_PENDING_SOCK, waiting_pipe_fd_,
&socklen);
zmq_assert (rc);
zmq_assert (socklen == sizeof (*waiting_pipe_fd_));
}
// Get fds and store them into user allocated memory.
// send_fd is for non-blocking send wire notifications.
// receive_fd_ is for incoming back-channel protocol packets.
// rdata_notify_fd_ is raised for waiting repair transmissions.
// pending_notify_fd_ is for state driven events.
void zmq::pgm_socket_t::get_sender_fds (fd_t *send_fd_,
fd_t *receive_fd_,
fd_t *rdata_notify_fd_,
fd_t *pending_notify_fd_)
{
socklen_t socklen;
bool rc;
zmq_assert (send_fd_);
zmq_assert (receive_fd_);
zmq_assert (rdata_notify_fd_);
zmq_assert (pending_notify_fd_);
socklen = sizeof (*send_fd_);
rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_SEND_SOCK, send_fd_, &socklen);
zmq_assert (rc);
zmq_assert (socklen == sizeof (*receive_fd_));
socklen = sizeof (*receive_fd_);
rc =
pgm_getsockopt (sock, IPPROTO_PGM, PGM_RECV_SOCK, receive_fd_, &socklen);
zmq_assert (rc);
zmq_assert (socklen == sizeof (*receive_fd_));
socklen = sizeof (*rdata_notify_fd_);
rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_REPAIR_SOCK, rdata_notify_fd_,
&socklen);
zmq_assert (rc);
zmq_assert (socklen == sizeof (*rdata_notify_fd_));
socklen = sizeof (*pending_notify_fd_);
rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_PENDING_SOCK,
pending_notify_fd_, &socklen);
zmq_assert (rc);
zmq_assert (socklen == sizeof (*pending_notify_fd_));
}
// Send one APDU, transmit window owned memory.
// data_len_ must be less than one TPDU.
size_t zmq::pgm_socket_t::send (unsigned char *data_, size_t data_len_)
{
size_t nbytes = 0;
const int status = pgm_send (sock, data_, data_len_, &nbytes);
// We have to write all data as one packet.
if (nbytes > 0) {
zmq_assert (status == PGM_IO_STATUS_NORMAL);
zmq_assert (nbytes == data_len_);
} else {
zmq_assert (status == PGM_IO_STATUS_RATE_LIMITED
|| status == PGM_IO_STATUS_WOULD_BLOCK);
if (status == PGM_IO_STATUS_RATE_LIMITED)
errno = ENOMEM;
else
errno = EBUSY;
}
// Save return value.
last_tx_status = status;
return nbytes;
}
long zmq::pgm_socket_t::get_rx_timeout ()
{
if (last_rx_status != PGM_IO_STATUS_RATE_LIMITED
&& last_rx_status != PGM_IO_STATUS_TIMER_PENDING)
return -1;
struct timeval tv;
socklen_t optlen = sizeof (tv);
const bool rc = pgm_getsockopt (sock, IPPROTO_PGM,
last_rx_status == PGM_IO_STATUS_RATE_LIMITED
? PGM_RATE_REMAIN
: PGM_TIME_REMAIN,
&tv, &optlen);
zmq_assert (rc);
const long timeout = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
return timeout;
}
long zmq::pgm_socket_t::get_tx_timeout ()
{
if (last_tx_status != PGM_IO_STATUS_RATE_LIMITED)
return -1;
struct timeval tv;
socklen_t optlen = sizeof (tv);
const bool rc =
pgm_getsockopt (sock, IPPROTO_PGM, PGM_RATE_REMAIN, &tv, &optlen);
zmq_assert (rc);
const long timeout = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
return timeout;
}
// Return max TSDU size without fragmentation from current PGM transport.
size_t zmq::pgm_socket_t::get_max_tsdu_size ()
{
int max_tsdu = 0;
socklen_t optlen = sizeof (max_tsdu);
bool rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_MSS, &max_tsdu, &optlen);
zmq_assert (rc);
zmq_assert (optlen == sizeof (max_tsdu));
return (size_t) max_tsdu;
}
// pgm_recvmsgv is called to fill the pgm_msgv array up to pgm_msgv_len.
// In subsequent calls data from pgm_msgv structure are returned.
ssize_t zmq::pgm_socket_t::receive (void **raw_data_, const pgm_tsi_t **tsi_)
{
size_t raw_data_len = 0;
// We just sent all data from pgm_transport_recvmsgv up
// and have to return 0 that another engine in this thread is scheduled.
if (nbytes_rec == nbytes_processed && nbytes_rec > 0) {
// Reset all the counters.
nbytes_rec = 0;
nbytes_processed = 0;
pgm_msgv_processed = 0;
errno = EAGAIN;
return 0;
}
// If we have are going first time or if we have processed all pgm_msgv_t
// structure previously read from the pgm socket.
if (nbytes_rec == nbytes_processed) {
// Check program flow.
zmq_assert (pgm_msgv_processed == 0);
zmq_assert (nbytes_processed == 0);
zmq_assert (nbytes_rec == 0);
// Receive a vector of Application Protocol Domain Unit's (APDUs)
// from the transport.
pgm_error_t *pgm_error = NULL;
const int status = pgm_recvmsgv (sock, pgm_msgv, pgm_msgv_len,
MSG_ERRQUEUE, &nbytes_rec, &pgm_error);
// Invalid parameters.
zmq_assert (status != PGM_IO_STATUS_ERROR);
last_rx_status = status;
// In a case when no ODATA/RDATA fired POLLIN event (SPM...)
// pgm_recvmsg returns PGM_IO_STATUS_TIMER_PENDING.
if (status == PGM_IO_STATUS_TIMER_PENDING) {
zmq_assert (nbytes_rec == 0);
// In case if no RDATA/ODATA caused POLLIN 0 is
// returned.
nbytes_rec = 0;
errno = EBUSY;
return 0;
}
// Send SPMR, NAK, ACK is rate limited.
if (status == PGM_IO_STATUS_RATE_LIMITED) {
zmq_assert (nbytes_rec == 0);
// In case if no RDATA/ODATA caused POLLIN 0 is returned.
nbytes_rec = 0;
errno = ENOMEM;
return 0;
}
// No peers and hence no incoming packets.
if (status == PGM_IO_STATUS_WOULD_BLOCK) {
zmq_assert (nbytes_rec == 0);
// In case if no RDATA/ODATA caused POLLIN 0 is returned.
nbytes_rec = 0;
errno = EAGAIN;
return 0;
}
// Data loss.
if (status == PGM_IO_STATUS_RESET) {
struct pgm_sk_buff_t *skb = pgm_msgv[0].msgv_skb[0];
// Save lost data TSI.
*tsi_ = &skb->tsi;
nbytes_rec = 0;
// In case of dala loss -1 is returned.
errno = EINVAL;
pgm_free_skb (skb);
return -1;
}
zmq_assert (status == PGM_IO_STATUS_NORMAL);
} else {
zmq_assert (pgm_msgv_processed <= pgm_msgv_len);
}
// Zero byte payloads are valid in PGM, but not 0MQ protocol.
zmq_assert (nbytes_rec > 0);
// Only one APDU per pgm_msgv_t structure is allowed.
zmq_assert (pgm_msgv[pgm_msgv_processed].msgv_len == 1);
struct pgm_sk_buff_t *skb = pgm_msgv[pgm_msgv_processed].msgv_skb[0];
// Take pointers from pgm_msgv_t structure.
*raw_data_ = skb->data;
raw_data_len = skb->len;
// Save current TSI.
*tsi_ = &skb->tsi;
// Move the the next pgm_msgv_t structure.
pgm_msgv_processed++;
zmq_assert (pgm_msgv_processed <= pgm_msgv_len);
nbytes_processed += raw_data_len;
return raw_data_len;
}
void zmq::pgm_socket_t::process_upstream ()
{
pgm_msgv_t dummy_msg;
size_t dummy_bytes = 0;
pgm_error_t *pgm_error = NULL;
const int status = pgm_recvmsgv (sock, &dummy_msg, 1, MSG_ERRQUEUE,
&dummy_bytes, &pgm_error);
// Invalid parameters.
zmq_assert (status != PGM_IO_STATUS_ERROR);
// No data should be returned.
zmq_assert (dummy_bytes == 0
&& (status == PGM_IO_STATUS_TIMER_PENDING
|| status == PGM_IO_STATUS_RATE_LIMITED
|| status == PGM_IO_STATUS_WOULD_BLOCK));
last_rx_status = status;
if (status == PGM_IO_STATUS_TIMER_PENDING)
errno = EBUSY;
else if (status == PGM_IO_STATUS_RATE_LIMITED)
errno = ENOMEM;
else
errno = EAGAIN;
}
int zmq::pgm_socket_t::compute_sqns (int tpdu_)
{
// Convert rate into B/ms.
uint64_t rate = uint64_t (options.rate) / 8;
// Compute the size of the buffer in bytes.
uint64_t size = uint64_t (options.recovery_ivl) * rate;
// Translate the size into number of packets.
uint64_t sqns = size / tpdu_;
// Buffer should be able to hold at least one packet.
if (sqns == 0)
sqns = 1;
return (int) sqns;
}
#endif
|
sophomore_public/libzmq
|
src/pgm_socket.cpp
|
C++
|
gpl-3.0
| 22,084 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __PGM_SOCKET_HPP_INCLUDED__
#define __PGM_SOCKET_HPP_INCLUDED__
#if defined ZMQ_HAVE_OPENPGM
#ifdef ZMQ_HAVE_WINDOWS
#define __PGM_WININT_H__
#endif
#include <pgm/pgm.h>
#if defined(ZMQ_HAVE_OSX) || defined(ZMQ_HAVE_NETBSD)
#include <pgm/in.h>
#endif
#include "fd.hpp"
#include "options.hpp"
namespace zmq
{
// Encapsulates PGM socket.
class pgm_socket_t
{
public:
// If receiver_ is true PGM transport is not generating SPM packets.
pgm_socket_t (bool receiver_, const options_t &options_);
// Closes the transport.
~pgm_socket_t ();
// Initialize PGM network structures (GSI, GSRs).
int init (bool udp_encapsulation_, const char *network_);
// Resolve PGM socket address.
static int init_address (const char *network_,
struct pgm_addrinfo_t **addr,
uint16_t *port_number);
// Get receiver fds and store them into user allocated memory.
void get_receiver_fds (fd_t *receive_fd_, fd_t *waiting_pipe_fd_);
// Get sender and receiver fds and store it to user allocated
// memory. Receive fd is used to process NAKs from peers.
void get_sender_fds (fd_t *send_fd_,
fd_t *receive_fd_,
fd_t *rdata_notify_fd_,
fd_t *pending_notify_fd_);
// Send data as one APDU, transmit window owned memory.
size_t send (unsigned char *data_, size_t data_len_);
// Returns max tsdu size without fragmentation.
size_t get_max_tsdu_size ();
// Receive data from pgm socket.
ssize_t receive (void **data_, const pgm_tsi_t **tsi_);
long get_rx_timeout ();
long get_tx_timeout ();
// POLLIN on sender side should mean NAK or SPMR receiving.
// process_upstream function is used to handle such a situation.
void process_upstream ();
private:
// Compute size of the buffer based on rate and recovery interval.
int compute_sqns (int tpdu_);
// OpenPGM transport.
pgm_sock_t *sock;
int last_rx_status, last_tx_status;
// Associated socket options.
options_t options;
// true when pgm_socket should create receiving side.
bool receiver;
// Array of pgm_msgv_t structures to store received data
// from the socket (pgm_transport_recvmsgv).
pgm_msgv_t *pgm_msgv;
// Size of pgm_msgv array.
size_t pgm_msgv_len;
// How many bytes were read from pgm socket.
size_t nbytes_rec;
// How many bytes were processed from last pgm socket read.
size_t nbytes_processed;
// How many messages from pgm_msgv were already sent up.
size_t pgm_msgv_processed;
};
}
#endif
#endif
|
sophomore_public/libzmq
|
src/pgm_socket.hpp
|
C++
|
gpl-3.0
| 2,744 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <new>
#include <stddef.h>
#include "macros.hpp"
#include "pipe.hpp"
#include "err.hpp"
#include "ypipe.hpp"
#include "ypipe_conflate.hpp"
int zmq::pipepair (object_t *parents_[2],
pipe_t *pipes_[2],
const int hwms_[2],
const bool conflate_[2])
{
// Creates two pipe objects. These objects are connected by two ypipes,
// each to pass messages in one direction.
typedef ypipe_t<msg_t, message_pipe_granularity> upipe_normal_t;
typedef ypipe_conflate_t<msg_t> upipe_conflate_t;
pipe_t::upipe_t *upipe1;
if (conflate_[0])
upipe1 = new (std::nothrow) upipe_conflate_t ();
else
upipe1 = new (std::nothrow) upipe_normal_t ();
alloc_assert (upipe1);
pipe_t::upipe_t *upipe2;
if (conflate_[1])
upipe2 = new (std::nothrow) upipe_conflate_t ();
else
upipe2 = new (std::nothrow) upipe_normal_t ();
alloc_assert (upipe2);
pipes_[0] = new (std::nothrow)
pipe_t (parents_[0], upipe1, upipe2, hwms_[1], hwms_[0], conflate_[0]);
alloc_assert (pipes_[0]);
pipes_[1] = new (std::nothrow)
pipe_t (parents_[1], upipe2, upipe1, hwms_[0], hwms_[1], conflate_[1]);
alloc_assert (pipes_[1]);
pipes_[0]->set_peer (pipes_[1]);
pipes_[1]->set_peer (pipes_[0]);
return 0;
}
void zmq::send_routing_id (pipe_t *pipe_, const options_t &options_)
{
zmq::msg_t id;
const int rc = id.init_size (options_.routing_id_size);
errno_assert (rc == 0);
memcpy (id.data (), options_.routing_id, options_.routing_id_size);
id.set_flags (zmq::msg_t::routing_id);
const bool written = pipe_->write (&id);
zmq_assert (written);
pipe_->flush ();
}
void zmq::send_hello_msg (pipe_t *pipe_, const options_t &options_)
{
zmq::msg_t hello;
const int rc =
hello.init_buffer (&options_.hello_msg[0], options_.hello_msg.size ());
errno_assert (rc == 0);
const bool written = pipe_->write (&hello);
zmq_assert (written);
pipe_->flush ();
}
zmq::pipe_t::pipe_t (object_t *parent_,
upipe_t *inpipe_,
upipe_t *outpipe_,
int inhwm_,
int outhwm_,
bool conflate_) :
object_t (parent_),
_in_pipe (inpipe_),
_out_pipe (outpipe_),
_in_active (true),
_out_active (true),
_hwm (outhwm_),
_lwm (compute_lwm (inhwm_)),
_in_hwm_boost (-1),
_out_hwm_boost (-1),
_msgs_read (0),
_msgs_written (0),
_peers_msgs_read (0),
_peer (NULL),
_sink (NULL),
_state (active),
_delay (true),
_server_socket_routing_id (0),
_conflate (conflate_)
{
_disconnect_msg.init ();
}
zmq::pipe_t::~pipe_t ()
{
_disconnect_msg.close ();
}
void zmq::pipe_t::set_peer (pipe_t *peer_)
{
// Peer can be set once only.
zmq_assert (!_peer);
_peer = peer_;
}
void zmq::pipe_t::set_event_sink (i_pipe_events *sink_)
{
// Sink can be set once only.
zmq_assert (!_sink);
_sink = sink_;
}
void zmq::pipe_t::set_server_socket_routing_id (
uint32_t server_socket_routing_id_)
{
_server_socket_routing_id = server_socket_routing_id_;
}
uint32_t zmq::pipe_t::get_server_socket_routing_id () const
{
return _server_socket_routing_id;
}
void zmq::pipe_t::set_router_socket_routing_id (
const blob_t &router_socket_routing_id_)
{
_router_socket_routing_id.set_deep_copy (router_socket_routing_id_);
}
const zmq::blob_t &zmq::pipe_t::get_routing_id () const
{
return _router_socket_routing_id;
}
bool zmq::pipe_t::check_read ()
{
if (unlikely (!_in_active))
return false;
if (unlikely (_state != active && _state != waiting_for_delimiter))
return false;
// Check if there's an item in the pipe.
if (!_in_pipe->check_read ()) {
_in_active = false;
return false;
}
// If the next item in the pipe is message delimiter,
// initiate termination process.
if (_in_pipe->probe (is_delimiter)) {
msg_t msg;
const bool ok = _in_pipe->read (&msg);
zmq_assert (ok);
process_delimiter ();
return false;
}
return true;
}
bool zmq::pipe_t::read (msg_t *msg_)
{
if (unlikely (!_in_active))
return false;
if (unlikely (_state != active && _state != waiting_for_delimiter))
return false;
while (true) {
if (!_in_pipe->read (msg_)) {
_in_active = false;
return false;
}
// If this is a credential, ignore it and receive next message.
if (unlikely (msg_->is_credential ())) {
const int rc = msg_->close ();
zmq_assert (rc == 0);
} else {
break;
}
}
// If delimiter was read, start termination process of the pipe.
if (msg_->is_delimiter ()) {
process_delimiter ();
return false;
}
if (!(msg_->flags () & msg_t::more) && !msg_->is_routing_id ())
_msgs_read++;
if (_lwm > 0 && _msgs_read % _lwm == 0)
send_activate_write (_peer, _msgs_read);
return true;
}
bool zmq::pipe_t::check_write ()
{
if (unlikely (!_out_active || _state != active))
return false;
const bool full = !check_hwm ();
if (unlikely (full)) {
_out_active = false;
return false;
}
return true;
}
bool zmq::pipe_t::write (const msg_t *msg_)
{
if (unlikely (!check_write ()))
return false;
const bool more = (msg_->flags () & msg_t::more) != 0;
const bool is_routing_id = msg_->is_routing_id ();
_out_pipe->write (*msg_, more);
if (!more && !is_routing_id)
_msgs_written++;
return true;
}
void zmq::pipe_t::rollback () const
{
// Remove incomplete message from the outbound pipe.
msg_t msg;
if (_out_pipe) {
while (_out_pipe->unwrite (&msg)) {
zmq_assert (msg.flags () & msg_t::more);
const int rc = msg.close ();
errno_assert (rc == 0);
}
}
}
void zmq::pipe_t::flush ()
{
// The peer does not exist anymore at this point.
if (_state == term_ack_sent)
return;
if (_out_pipe && !_out_pipe->flush ())
send_activate_read (_peer);
}
void zmq::pipe_t::process_activate_read ()
{
if (!_in_active && (_state == active || _state == waiting_for_delimiter)) {
_in_active = true;
_sink->read_activated (this);
}
}
void zmq::pipe_t::process_activate_write (uint64_t msgs_read_)
{
// Remember the peer's message sequence number.
_peers_msgs_read = msgs_read_;
if (!_out_active && _state == active) {
_out_active = true;
_sink->write_activated (this);
}
}
void zmq::pipe_t::process_hiccup (void *pipe_)
{
// Destroy old outpipe. Note that the read end of the pipe was already
// migrated to this thread.
zmq_assert (_out_pipe);
_out_pipe->flush ();
msg_t msg;
while (_out_pipe->read (&msg)) {
if (!(msg.flags () & msg_t::more))
_msgs_written--;
const int rc = msg.close ();
errno_assert (rc == 0);
}
LIBZMQ_DELETE (_out_pipe);
// Plug in the new outpipe.
zmq_assert (pipe_);
_out_pipe = static_cast<upipe_t *> (pipe_);
_out_active = true;
// If appropriate, notify the user about the hiccup.
if (_state == active)
_sink->hiccuped (this);
}
void zmq::pipe_t::process_pipe_term ()
{
zmq_assert (_state == active || _state == delimiter_received
|| _state == term_req_sent1);
// This is the simple case of peer-induced termination. If there are no
// more pending messages to read, or if the pipe was configured to drop
// pending messages, we can move directly to the term_ack_sent state.
// Otherwise we'll hang up in waiting_for_delimiter state till all
// pending messages are read.
if (_state == active) {
if (_delay)
_state = waiting_for_delimiter;
else {
_state = term_ack_sent;
_out_pipe = NULL;
send_pipe_term_ack (_peer);
}
}
// Delimiter happened to arrive before the term command. Now we have the
// term command as well, so we can move straight to term_ack_sent state.
else if (_state == delimiter_received) {
_state = term_ack_sent;
_out_pipe = NULL;
send_pipe_term_ack (_peer);
}
// This is the case where both ends of the pipe are closed in parallel.
// We simply reply to the request by ack and continue waiting for our
// own ack.
else if (_state == term_req_sent1) {
_state = term_req_sent2;
_out_pipe = NULL;
send_pipe_term_ack (_peer);
}
}
void zmq::pipe_t::process_pipe_term_ack ()
{
// Notify the user that all the references to the pipe should be dropped.
zmq_assert (_sink);
_sink->pipe_terminated (this);
// In term_ack_sent and term_req_sent2 states there's nothing to do.
// Simply deallocate the pipe. In term_req_sent1 state we have to ack
// the peer before deallocating this side of the pipe.
// All the other states are invalid.
if (_state == term_req_sent1) {
_out_pipe = NULL;
send_pipe_term_ack (_peer);
} else
zmq_assert (_state == term_ack_sent || _state == term_req_sent2);
// We'll deallocate the inbound pipe, the peer will deallocate the outbound
// pipe (which is an inbound pipe from its point of view).
// First, delete all the unread messages in the pipe. We have to do it by
// hand because msg_t doesn't have automatic destructor. Then deallocate
// the ypipe itself.
if (!_conflate) {
msg_t msg;
while (_in_pipe->read (&msg)) {
const int rc = msg.close ();
errno_assert (rc == 0);
}
}
LIBZMQ_DELETE (_in_pipe);
// Deallocate the pipe object
delete this;
}
void zmq::pipe_t::process_pipe_hwm (int inhwm_, int outhwm_)
{
set_hwms (inhwm_, outhwm_);
}
void zmq::pipe_t::set_nodelay ()
{
this->_delay = false;
}
void zmq::pipe_t::terminate (bool delay_)
{
// Overload the value specified at pipe creation.
_delay = delay_;
// If terminate was already called, we can ignore the duplicate invocation.
if (_state == term_req_sent1 || _state == term_req_sent2) {
return;
}
// If the pipe is in the final phase of async termination, it's going to
// closed anyway. No need to do anything special here.
if (_state == term_ack_sent) {
return;
}
// The simple sync termination case. Ask the peer to terminate and wait
// for the ack.
if (_state == active) {
send_pipe_term (_peer);
_state = term_req_sent1;
}
// There are still pending messages available, but the user calls
// 'terminate'. We can act as if all the pending messages were read.
else if (_state == waiting_for_delimiter && !_delay) {
// Drop any unfinished outbound messages.
rollback ();
_out_pipe = NULL;
send_pipe_term_ack (_peer);
_state = term_ack_sent;
}
// If there are pending messages still available, do nothing.
else if (_state == waiting_for_delimiter) {
}
// We've already got delimiter, but not term command yet. We can ignore
// the delimiter and ack synchronously terminate as if we were in
// active state.
else if (_state == delimiter_received) {
send_pipe_term (_peer);
_state = term_req_sent1;
}
// There are no other states.
else {
zmq_assert (false);
}
// Stop outbound flow of messages.
_out_active = false;
if (_out_pipe) {
// Drop any unfinished outbound messages.
rollback ();
// Write the delimiter into the pipe. Note that watermarks are not
// checked; thus the delimiter can be written even when the pipe is full.
msg_t msg;
msg.init_delimiter ();
_out_pipe->write (msg, false);
flush ();
}
}
bool zmq::pipe_t::is_delimiter (const msg_t &msg_)
{
return msg_.is_delimiter ();
}
int zmq::pipe_t::compute_lwm (int hwm_)
{
// Compute the low water mark. Following point should be taken
// into consideration:
//
// 1. LWM has to be less than HWM.
// 2. LWM cannot be set to very low value (such as zero) as after filling
// the queue it would start to refill only after all the messages are
// read from it and thus unnecessarily hold the progress back.
// 3. LWM cannot be set to very high value (such as HWM-1) as it would
// result in lock-step filling of the queue - if a single message is
// read from a full queue, writer thread is resumed to write exactly one
// message to the queue and go back to sleep immediately. This would
// result in low performance.
//
// Given the 3. it would be good to keep HWM and LWM as far apart as
// possible to reduce the thread switching overhead to almost zero.
// Let's make LWM 1/2 of HWM.
const int result = (hwm_ + 1) / 2;
return result;
}
void zmq::pipe_t::process_delimiter ()
{
zmq_assert (_state == active || _state == waiting_for_delimiter);
if (_state == active)
_state = delimiter_received;
else {
rollback ();
_out_pipe = NULL;
send_pipe_term_ack (_peer);
_state = term_ack_sent;
}
}
void zmq::pipe_t::hiccup ()
{
// If termination is already under way do nothing.
if (_state != active)
return;
// We'll drop the pointer to the inpipe. From now on, the peer is
// responsible for deallocating it.
// Create new inpipe.
_in_pipe =
_conflate
? static_cast<upipe_t *> (new (std::nothrow) ypipe_conflate_t<msg_t> ())
: new (std::nothrow) ypipe_t<msg_t, message_pipe_granularity> ();
alloc_assert (_in_pipe);
_in_active = true;
// Notify the peer about the hiccup.
send_hiccup (_peer, _in_pipe);
}
void zmq::pipe_t::set_hwms (int inhwm_, int outhwm_)
{
int in = inhwm_ + std::max (_in_hwm_boost, 0);
int out = outhwm_ + std::max (_out_hwm_boost, 0);
// if either send or recv side has hwm <= 0 it means infinite so we should set hwms infinite
if (inhwm_ <= 0 || _in_hwm_boost == 0)
in = 0;
if (outhwm_ <= 0 || _out_hwm_boost == 0)
out = 0;
_lwm = compute_lwm (in);
_hwm = out;
}
void zmq::pipe_t::set_hwms_boost (int inhwmboost_, int outhwmboost_)
{
_in_hwm_boost = inhwmboost_;
_out_hwm_boost = outhwmboost_;
}
bool zmq::pipe_t::check_hwm () const
{
const bool full =
_hwm > 0 && _msgs_written - _peers_msgs_read >= uint64_t (_hwm);
return !full;
}
void zmq::pipe_t::send_hwms_to_peer (int inhwm_, int outhwm_)
{
if (_state == active)
send_pipe_hwm (_peer, inhwm_, outhwm_);
}
void zmq::pipe_t::set_endpoint_pair (zmq::endpoint_uri_pair_t endpoint_pair_)
{
_endpoint_pair = ZMQ_MOVE (endpoint_pair_);
}
const zmq::endpoint_uri_pair_t &zmq::pipe_t::get_endpoint_pair () const
{
return _endpoint_pair;
}
void zmq::pipe_t::send_stats_to_peer (own_t *socket_base_)
{
if (_state == active) {
endpoint_uri_pair_t *ep =
new (std::nothrow) endpoint_uri_pair_t (_endpoint_pair);
send_pipe_peer_stats (_peer, _msgs_written - _peers_msgs_read,
socket_base_, ep);
}
}
void zmq::pipe_t::process_pipe_peer_stats (uint64_t queue_count_,
own_t *socket_base_,
endpoint_uri_pair_t *endpoint_pair_)
{
send_pipe_stats_publish (socket_base_, queue_count_,
_msgs_written - _peers_msgs_read, endpoint_pair_);
}
void zmq::pipe_t::send_disconnect_msg ()
{
if (_disconnect_msg.size () > 0 && _out_pipe) {
// Rollback any incomplete message in the pipe, and push the disconnect message.
rollback ();
_out_pipe->write (_disconnect_msg, false);
flush ();
_disconnect_msg.init ();
}
}
void zmq::pipe_t::set_disconnect_msg (
const std::vector<unsigned char> &disconnect_)
{
_disconnect_msg.close ();
const int rc =
_disconnect_msg.init_buffer (&disconnect_[0], disconnect_.size ());
errno_assert (rc == 0);
}
void zmq::pipe_t::send_hiccup_msg (const std::vector<unsigned char> &hiccup_)
{
if (!hiccup_.empty () && _out_pipe) {
msg_t msg;
const int rc = msg.init_buffer (&hiccup_[0], hiccup_.size ());
errno_assert (rc == 0);
_out_pipe->write (msg, false);
flush ();
}
}
|
sophomore_public/libzmq
|
src/pipe.cpp
|
C++
|
gpl-3.0
| 16,837 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PIPE_HPP_INCLUDED__
#define __ZMQ_PIPE_HPP_INCLUDED__
#include "ypipe_base.hpp"
#include "config.hpp"
#include "object.hpp"
#include "stdint.hpp"
#include "array.hpp"
#include "blob.hpp"
#include "options.hpp"
#include "endpoint.hpp"
#include "msg.hpp"
namespace zmq
{
class pipe_t;
// Create a pipepair for bi-directional transfer of messages.
// First HWM is for messages passed from first pipe to the second pipe.
// Second HWM is for messages passed from second pipe to the first pipe.
// Delay specifies how the pipe behaves when the peer terminates. If true
// pipe receives all the pending messages before terminating, otherwise it
// terminates straight away.
// If conflate is true, only the most recently arrived message could be
// read (older messages are discarded)
int pipepair (zmq::object_t *parents_[2],
zmq::pipe_t *pipes_[2],
const int hwms_[2],
const bool conflate_[2]);
struct i_pipe_events
{
virtual ~i_pipe_events () ZMQ_DEFAULT;
virtual void read_activated (zmq::pipe_t *pipe_) = 0;
virtual void write_activated (zmq::pipe_t *pipe_) = 0;
virtual void hiccuped (zmq::pipe_t *pipe_) = 0;
virtual void pipe_terminated (zmq::pipe_t *pipe_) = 0;
};
// Note that pipe can be stored in three different arrays.
// The array of inbound pipes (1), the array of outbound pipes (2) and
// the generic array of pipes to be deallocated (3).
class pipe_t ZMQ_FINAL : public object_t,
public array_item_t<1>,
public array_item_t<2>,
public array_item_t<3>
{
// This allows pipepair to create pipe objects.
friend int pipepair (zmq::object_t *parents_[2],
zmq::pipe_t *pipes_[2],
const int hwms_[2],
const bool conflate_[2]);
public:
// Specifies the object to send events to.
void set_event_sink (i_pipe_events *sink_);
// Pipe endpoint can store an routing ID to be used by its clients.
void set_server_socket_routing_id (uint32_t server_socket_routing_id_);
uint32_t get_server_socket_routing_id () const;
// Pipe endpoint can store an opaque ID to be used by its clients.
void set_router_socket_routing_id (const blob_t &router_socket_routing_id_);
const blob_t &get_routing_id () const;
// Returns true if there is at least one message to read in the pipe.
bool check_read ();
// Reads a message to the underlying pipe.
bool read (msg_t *msg_);
// Checks whether messages can be written to the pipe. If the pipe is
// closed or if writing the message would cause high watermark the
// function returns false.
bool check_write ();
// Writes a message to the underlying pipe. Returns false if the
// message does not pass check_write. If false, the message object
// retains ownership of its message buffer.
bool write (const msg_t *msg_);
// Remove unfinished parts of the outbound message from the pipe.
void rollback () const;
// Flush the messages downstream.
void flush ();
// Temporarily disconnects the inbound message stream and drops
// all the messages on the fly. Causes 'hiccuped' event to be generated
// in the peer.
void hiccup ();
// Ensure the pipe won't block on receiving pipe_term.
void set_nodelay ();
// Ask pipe to terminate. The termination will happen asynchronously
// and user will be notified about actual deallocation by 'terminated'
// event. If delay is true, the pending messages will be processed
// before actual shutdown.
void terminate (bool delay_);
// Set the high water marks.
void set_hwms (int inhwm_, int outhwm_);
// Set the boost to high water marks, used by inproc sockets so total hwm are sum of connect and bind sockets watermarks
void set_hwms_boost (int inhwmboost_, int outhwmboost_);
// send command to peer for notify the change of hwm
void send_hwms_to_peer (int inhwm_, int outhwm_);
// Returns true if HWM is not reached
bool check_hwm () const;
void set_endpoint_pair (endpoint_uri_pair_t endpoint_pair_);
const endpoint_uri_pair_t &get_endpoint_pair () const;
void send_stats_to_peer (own_t *socket_base_);
void send_disconnect_msg ();
void set_disconnect_msg (const std::vector<unsigned char> &disconnect_);
void send_hiccup_msg (const std::vector<unsigned char> &hiccup_);
private:
// Type of the underlying lock-free pipe.
typedef ypipe_base_t<msg_t> upipe_t;
// Command handlers.
void process_activate_read () ZMQ_OVERRIDE;
void process_activate_write (uint64_t msgs_read_) ZMQ_OVERRIDE;
void process_hiccup (void *pipe_) ZMQ_OVERRIDE;
void
process_pipe_peer_stats (uint64_t queue_count_,
own_t *socket_base_,
endpoint_uri_pair_t *endpoint_pair_) ZMQ_OVERRIDE;
void process_pipe_term () ZMQ_OVERRIDE;
void process_pipe_term_ack () ZMQ_OVERRIDE;
void process_pipe_hwm (int inhwm_, int outhwm_) ZMQ_OVERRIDE;
// Handler for delimiter read from the pipe.
void process_delimiter ();
// Constructor is private. Pipe can only be created using
// pipepair function.
pipe_t (object_t *parent_,
upipe_t *inpipe_,
upipe_t *outpipe_,
int inhwm_,
int outhwm_,
bool conflate_);
// Pipepair uses this function to let us know about
// the peer pipe object.
void set_peer (pipe_t *peer_);
// Destructor is private. Pipe objects destroy themselves.
~pipe_t () ZMQ_OVERRIDE;
// Underlying pipes for both directions.
upipe_t *_in_pipe;
upipe_t *_out_pipe;
// Can the pipe be read from / written to?
bool _in_active;
bool _out_active;
// High watermark for the outbound pipe.
int _hwm;
// Low watermark for the inbound pipe.
int _lwm;
// boosts for high and low watermarks, used with inproc sockets so hwm are sum of send and recv hmws on each side of pipe
int _in_hwm_boost;
int _out_hwm_boost;
// Number of messages read and written so far.
uint64_t _msgs_read;
uint64_t _msgs_written;
// Last received peer's msgs_read. The actual number in the peer
// can be higher at the moment.
uint64_t _peers_msgs_read;
// The pipe object on the other side of the pipepair.
pipe_t *_peer;
// Sink to send events to.
i_pipe_events *_sink;
// States of the pipe endpoint:
// active: common state before any termination begins,
// delimiter_received: delimiter was read from pipe before
// term command was received,
// waiting_for_delimiter: term command was already received
// from the peer but there are still pending messages to read,
// term_ack_sent: all pending messages were already read and
// all we are waiting for is ack from the peer,
// term_req_sent1: 'terminate' was explicitly called by the user,
// term_req_sent2: user called 'terminate' and then we've got
// term command from the peer as well.
enum
{
active,
delimiter_received,
waiting_for_delimiter,
term_ack_sent,
term_req_sent1,
term_req_sent2
} _state;
// If true, we receive all the pending inbound messages before
// terminating. If false, we terminate immediately when the peer
// asks us to.
bool _delay;
// Routing id of the writer. Used uniquely by the reader side.
blob_t _router_socket_routing_id;
// Routing id of the writer. Used uniquely by the reader side.
int _server_socket_routing_id;
// Returns true if the message is delimiter; false otherwise.
static bool is_delimiter (const msg_t &msg_);
// Computes appropriate low watermark from the given high watermark.
static int compute_lwm (int hwm_);
const bool _conflate;
// The endpoints of this pipe.
endpoint_uri_pair_t _endpoint_pair;
// Disconnect msg
msg_t _disconnect_msg;
ZMQ_NON_COPYABLE_NOR_MOVABLE (pipe_t)
};
void send_routing_id (pipe_t *pipe_, const options_t &options_);
void send_hello_msg (pipe_t *pipe_, const options_t &options_);
}
#endif
|
sophomore_public/libzmq
|
src/pipe.hpp
|
C++
|
gpl-3.0
| 8,424 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include <string>
#include <limits.h>
#include "msg.hpp"
#include "err.hpp"
#include "plain_client.hpp"
#include "session_base.hpp"
#include "plain_common.hpp"
zmq::plain_client_t::plain_client_t (session_base_t *const session_,
const options_t &options_) :
mechanism_base_t (session_, options_), _state (sending_hello)
{
}
zmq::plain_client_t::~plain_client_t ()
{
}
int zmq::plain_client_t::next_handshake_command (msg_t *msg_)
{
int rc = 0;
switch (_state) {
case sending_hello:
produce_hello (msg_);
_state = waiting_for_welcome;
break;
case sending_initiate:
produce_initiate (msg_);
_state = waiting_for_ready;
break;
default:
errno = EAGAIN;
rc = -1;
}
return rc;
}
int zmq::plain_client_t::process_handshake_command (msg_t *msg_)
{
const unsigned char *cmd_data =
static_cast<unsigned char *> (msg_->data ());
const size_t data_size = msg_->size ();
int rc = 0;
if (data_size >= welcome_prefix_len
&& !memcmp (cmd_data, welcome_prefix, welcome_prefix_len))
rc = process_welcome (cmd_data, data_size);
else if (data_size >= ready_prefix_len
&& !memcmp (cmd_data, ready_prefix, ready_prefix_len))
rc = process_ready (cmd_data, data_size);
else if (data_size >= error_prefix_len
&& !memcmp (cmd_data, error_prefix, error_prefix_len))
rc = process_error (cmd_data, data_size);
else {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
errno = EPROTO;
rc = -1;
}
if (rc == 0) {
rc = msg_->close ();
errno_assert (rc == 0);
rc = msg_->init ();
errno_assert (rc == 0);
}
return rc;
}
zmq::mechanism_t::status_t zmq::plain_client_t::status () const
{
switch (_state) {
case ready:
return mechanism_t::ready;
case error_command_received:
return mechanism_t::error;
default:
return mechanism_t::handshaking;
}
}
void zmq::plain_client_t::produce_hello (msg_t *msg_) const
{
const std::string username = options.plain_username;
zmq_assert (username.length () <= UCHAR_MAX);
const std::string password = options.plain_password;
zmq_assert (password.length () <= UCHAR_MAX);
const size_t command_size = hello_prefix_len + brief_len_size
+ username.length () + brief_len_size
+ password.length ();
const int rc = msg_->init_size (command_size);
errno_assert (rc == 0);
unsigned char *ptr = static_cast<unsigned char *> (msg_->data ());
memcpy (ptr, hello_prefix, hello_prefix_len);
ptr += hello_prefix_len;
*ptr++ = static_cast<unsigned char> (username.length ());
memcpy (ptr, username.c_str (), username.length ());
ptr += username.length ();
*ptr++ = static_cast<unsigned char> (password.length ());
memcpy (ptr, password.c_str (), password.length ());
}
int zmq::plain_client_t::process_welcome (const unsigned char *cmd_data_,
size_t data_size_)
{
LIBZMQ_UNUSED (cmd_data_);
if (_state != waiting_for_welcome) {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
errno = EPROTO;
return -1;
}
if (data_size_ != welcome_prefix_len) {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (),
ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_WELCOME);
errno = EPROTO;
return -1;
}
_state = sending_initiate;
return 0;
}
void zmq::plain_client_t::produce_initiate (msg_t *msg_) const
{
make_command_with_basic_properties (msg_, initiate_prefix,
initiate_prefix_len);
}
int zmq::plain_client_t::process_ready (const unsigned char *cmd_data_,
size_t data_size_)
{
if (_state != waiting_for_ready) {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
errno = EPROTO;
return -1;
}
const int rc = parse_metadata (cmd_data_ + ready_prefix_len,
data_size_ - ready_prefix_len);
if (rc == 0)
_state = ready;
else
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_INVALID_METADATA);
return rc;
}
int zmq::plain_client_t::process_error (const unsigned char *cmd_data_,
size_t data_size_)
{
if (_state != waiting_for_welcome && _state != waiting_for_ready) {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
errno = EPROTO;
return -1;
}
const size_t start_of_error_reason = error_prefix_len + brief_len_size;
if (data_size_ < start_of_error_reason) {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (),
ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR);
errno = EPROTO;
return -1;
}
const size_t error_reason_len =
static_cast<size_t> (cmd_data_[error_prefix_len]);
if (error_reason_len > data_size_ - start_of_error_reason) {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (),
ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR);
errno = EPROTO;
return -1;
}
const char *error_reason =
reinterpret_cast<const char *> (cmd_data_) + start_of_error_reason;
handle_error_reason (error_reason, error_reason_len);
_state = error_command_received;
return 0;
}
|
sophomore_public/libzmq
|
src/plain_client.cpp
|
C++
|
gpl-3.0
| 6,218 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PLAIN_CLIENT_HPP_INCLUDED__
#define __ZMQ_PLAIN_CLIENT_HPP_INCLUDED__
#include "mechanism_base.hpp"
#include "options.hpp"
namespace zmq
{
class msg_t;
class plain_client_t ZMQ_FINAL : public mechanism_base_t
{
public:
plain_client_t (session_base_t *session_, const options_t &options_);
~plain_client_t ();
// mechanism implementation
int next_handshake_command (msg_t *msg_);
int process_handshake_command (msg_t *msg_);
status_t status () const;
private:
enum state_t
{
sending_hello,
waiting_for_welcome,
sending_initiate,
waiting_for_ready,
error_command_received,
ready
};
state_t _state;
void produce_hello (msg_t *msg_) const;
void produce_initiate (msg_t *msg_) const;
int process_welcome (const unsigned char *cmd_data_, size_t data_size_);
int process_ready (const unsigned char *cmd_data_, size_t data_size_);
int process_error (const unsigned char *cmd_data_, size_t data_size_);
};
}
#endif
|
sophomore_public/libzmq
|
src/plain_client.hpp
|
C++
|
gpl-3.0
| 1,081 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PLAIN_COMMON_HPP_INCLUDED__
#define __ZMQ_PLAIN_COMMON_HPP_INCLUDED__
namespace zmq
{
const char hello_prefix[] = "\x05HELLO";
const size_t hello_prefix_len = sizeof (hello_prefix) - 1;
const char welcome_prefix[] = "\x07WELCOME";
const size_t welcome_prefix_len = sizeof (welcome_prefix) - 1;
const char initiate_prefix[] = "\x08INITIATE";
const size_t initiate_prefix_len = sizeof (initiate_prefix) - 1;
const char ready_prefix[] = "\x05READY";
const size_t ready_prefix_len = sizeof (ready_prefix) - 1;
const char error_prefix[] = "\x05ERROR";
const size_t error_prefix_len = sizeof (error_prefix) - 1;
const size_t brief_len_size = sizeof (char);
}
#endif
|
sophomore_public/libzmq
|
src/plain_common.hpp
|
C++
|
gpl-3.0
| 721 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <string>
#include "msg.hpp"
#include "session_base.hpp"
#include "err.hpp"
#include "plain_server.hpp"
#include "wire.hpp"
#include "plain_common.hpp"
zmq::plain_server_t::plain_server_t (session_base_t *session_,
const std::string &peer_address_,
const options_t &options_) :
mechanism_base_t (session_, options_),
zap_client_common_handshake_t (
session_, peer_address_, options_, sending_welcome)
{
// Note that there is no point to PLAIN if ZAP is not set up to handle the
// username and password, so if ZAP is not configured it is considered a
// failure.
// Given this is a backward-incompatible change, it's behind a socket
// option disabled by default.
if (options.zap_enforce_domain)
zmq_assert (zap_required ());
}
zmq::plain_server_t::~plain_server_t ()
{
}
int zmq::plain_server_t::next_handshake_command (msg_t *msg_)
{
int rc = 0;
switch (state) {
case sending_welcome:
produce_welcome (msg_);
state = waiting_for_initiate;
break;
case sending_ready:
produce_ready (msg_);
state = ready;
break;
case sending_error:
produce_error (msg_);
state = error_sent;
break;
default:
errno = EAGAIN;
rc = -1;
}
return rc;
}
int zmq::plain_server_t::process_handshake_command (msg_t *msg_)
{
int rc = 0;
switch (state) {
case waiting_for_hello:
rc = process_hello (msg_);
break;
case waiting_for_initiate:
rc = process_initiate (msg_);
break;
default:
// TODO see comment in curve_server_t::process_handshake_command
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNSPECIFIED);
errno = EPROTO;
rc = -1;
break;
}
if (rc == 0) {
rc = msg_->close ();
errno_assert (rc == 0);
rc = msg_->init ();
errno_assert (rc == 0);
}
return rc;
}
int zmq::plain_server_t::process_hello (msg_t *msg_)
{
int rc = check_basic_command_structure (msg_);
if (rc == -1)
return -1;
const char *ptr = static_cast<char *> (msg_->data ());
size_t bytes_left = msg_->size ();
if (bytes_left < hello_prefix_len
|| memcmp (ptr, hello_prefix, hello_prefix_len) != 0) {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
errno = EPROTO;
return -1;
}
ptr += hello_prefix_len;
bytes_left -= hello_prefix_len;
if (bytes_left < 1) {
// PLAIN I: invalid PLAIN client, did not send username
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (),
ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO);
errno = EPROTO;
return -1;
}
const uint8_t username_length = *ptr++;
bytes_left -= sizeof (username_length);
if (bytes_left < username_length) {
// PLAIN I: invalid PLAIN client, sent malformed username
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (),
ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO);
errno = EPROTO;
return -1;
}
const std::string username = std::string (ptr, username_length);
ptr += username_length;
bytes_left -= username_length;
if (bytes_left < 1) {
// PLAIN I: invalid PLAIN client, did not send password
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (),
ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO);
errno = EPROTO;
return -1;
}
const uint8_t password_length = *ptr++;
bytes_left -= sizeof (password_length);
if (bytes_left != password_length) {
// PLAIN I: invalid PLAIN client, sent malformed password or
// extraneous data
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (),
ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO);
errno = EPROTO;
return -1;
}
const std::string password = std::string (ptr, password_length);
// Use ZAP protocol (RFC 27) to authenticate the user.
rc = session->zap_connect ();
if (rc != 0) {
session->get_socket ()->event_handshake_failed_no_detail (
session->get_endpoint (), EFAULT);
return -1;
}
send_zap_request (username, password);
state = waiting_for_zap_reply;
// TODO actually, it is quite unlikely that we can read the ZAP
// reply already, but removing this has some strange side-effect
// (probably because the pipe's in_active flag is true until a read
// is attempted)
return receive_and_process_zap_reply () == -1 ? -1 : 0;
}
void zmq::plain_server_t::produce_welcome (msg_t *msg_)
{
const int rc = msg_->init_size (welcome_prefix_len);
errno_assert (rc == 0);
memcpy (msg_->data (), welcome_prefix, welcome_prefix_len);
}
int zmq::plain_server_t::process_initiate (msg_t *msg_)
{
const unsigned char *ptr = static_cast<unsigned char *> (msg_->data ());
const size_t bytes_left = msg_->size ();
if (bytes_left < initiate_prefix_len
|| memcmp (ptr, initiate_prefix, initiate_prefix_len) != 0) {
session->get_socket ()->event_handshake_failed_protocol (
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
errno = EPROTO;
return -1;
}
const int rc = parse_metadata (ptr + initiate_prefix_len,
bytes_left - initiate_prefix_len);
if (rc == 0)
state = sending_ready;
return rc;
}
void zmq::plain_server_t::produce_ready (msg_t *msg_) const
{
make_command_with_basic_properties (msg_, ready_prefix, ready_prefix_len);
}
void zmq::plain_server_t::produce_error (msg_t *msg_) const
{
const char expected_status_code_len = 3;
zmq_assert (status_code.length ()
== static_cast<size_t> (expected_status_code_len));
const size_t status_code_len_size = sizeof (expected_status_code_len);
const int rc = msg_->init_size (error_prefix_len + status_code_len_size
+ expected_status_code_len);
zmq_assert (rc == 0);
char *msg_data = static_cast<char *> (msg_->data ());
memcpy (msg_data, error_prefix, error_prefix_len);
msg_data[error_prefix_len] = expected_status_code_len;
memcpy (msg_data + error_prefix_len + status_code_len_size,
status_code.c_str (), status_code.length ());
}
void zmq::plain_server_t::send_zap_request (const std::string &username_,
const std::string &password_)
{
const uint8_t *credentials[] = {
reinterpret_cast<const uint8_t *> (username_.c_str ()),
reinterpret_cast<const uint8_t *> (password_.c_str ())};
size_t credentials_sizes[] = {username_.size (), password_.size ()};
const char plain_mechanism_name[] = "PLAIN";
zap_client_t::send_zap_request (
plain_mechanism_name, sizeof (plain_mechanism_name) - 1, credentials,
credentials_sizes, sizeof (credentials) / sizeof (credentials[0]));
}
|
sophomore_public/libzmq
|
src/plain_server.cpp
|
C++
|
gpl-3.0
| 7,591 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PLAIN_SERVER_HPP_INCLUDED__
#define __ZMQ_PLAIN_SERVER_HPP_INCLUDED__
#include "options.hpp"
#include "zap_client.hpp"
namespace zmq
{
class msg_t;
class session_base_t;
class plain_server_t ZMQ_FINAL : public zap_client_common_handshake_t
{
public:
plain_server_t (session_base_t *session_,
const std::string &peer_address_,
const options_t &options_);
~plain_server_t ();
// mechanism implementation
int next_handshake_command (msg_t *msg_);
int process_handshake_command (msg_t *msg_);
private:
static void produce_welcome (msg_t *msg_);
void produce_ready (msg_t *msg_) const;
void produce_error (msg_t *msg_) const;
int process_hello (msg_t *msg_);
int process_initiate (msg_t *msg_);
void send_zap_request (const std::string &username_,
const std::string &password_);
};
}
#endif
|
sophomore_public/libzmq
|
src/plain_server.hpp
|
C++
|
gpl-3.0
| 963 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "poll.hpp"
#if defined ZMQ_IOTHREAD_POLLER_USE_POLL
#include <sys/types.h>
#include <sys/time.h>
#include <poll.h>
#include <algorithm>
#include "poll.hpp"
#include "err.hpp"
#include "config.hpp"
#include "i_poll_events.hpp"
zmq::poll_t::poll_t (const zmq::thread_ctx_t &ctx_) :
worker_poller_base_t (ctx_), retired (false)
{
}
zmq::poll_t::~poll_t ()
{
stop_worker ();
}
zmq::poll_t::handle_t zmq::poll_t::add_fd (fd_t fd_, i_poll_events *events_)
{
check_thread ();
zmq_assert (fd_ != retired_fd);
// If the file descriptor table is too small expand it.
fd_table_t::size_type sz = fd_table.size ();
if (sz <= (fd_table_t::size_type) fd_) {
fd_table.resize (fd_ + 1);
while (sz != (fd_table_t::size_type) (fd_ + 1)) {
fd_table[sz].index = retired_fd;
++sz;
}
}
pollfd pfd = {fd_, 0, 0};
pollset.push_back (pfd);
zmq_assert (fd_table[fd_].index == retired_fd);
fd_table[fd_].index = pollset.size () - 1;
fd_table[fd_].events = events_;
// Increase the load metric of the thread.
adjust_load (1);
return fd_;
}
void zmq::poll_t::rm_fd (handle_t handle_)
{
check_thread ();
fd_t index = fd_table[handle_].index;
zmq_assert (index != retired_fd);
// Mark the fd as unused.
pollset[index].fd = retired_fd;
fd_table[handle_].index = retired_fd;
retired = true;
// Decrease the load metric of the thread.
adjust_load (-1);
}
void zmq::poll_t::set_pollin (handle_t handle_)
{
check_thread ();
fd_t index = fd_table[handle_].index;
pollset[index].events |= POLLIN;
}
void zmq::poll_t::reset_pollin (handle_t handle_)
{
check_thread ();
fd_t index = fd_table[handle_].index;
pollset[index].events &= ~((short) POLLIN);
}
void zmq::poll_t::set_pollout (handle_t handle_)
{
check_thread ();
fd_t index = fd_table[handle_].index;
pollset[index].events |= POLLOUT;
}
void zmq::poll_t::reset_pollout (handle_t handle_)
{
check_thread ();
fd_t index = fd_table[handle_].index;
pollset[index].events &= ~((short) POLLOUT);
}
void zmq::poll_t::stop ()
{
check_thread ();
// no-op... thread is stopped when no more fds or timers are registered
}
int zmq::poll_t::max_fds ()
{
return -1;
}
void zmq::poll_t::loop ()
{
while (true) {
// Execute any due timers.
int timeout = (int) execute_timers ();
cleanup_retired ();
if (pollset.empty ()) {
zmq_assert (get_load () == 0);
if (timeout == 0)
break;
// TODO sleep for timeout
continue;
}
// Wait for events.
int rc = poll (&pollset[0], static_cast<nfds_t> (pollset.size ()),
timeout ? timeout : -1);
if (rc == -1) {
errno_assert (errno == EINTR);
continue;
}
// If there are no events (i.e. it's a timeout) there's no point
// in checking the pollset.
if (rc == 0)
continue;
for (pollset_t::size_type i = 0; i != pollset.size (); i++) {
zmq_assert (!(pollset[i].revents & POLLNVAL));
if (pollset[i].fd == retired_fd)
continue;
if (pollset[i].revents & (POLLERR | POLLHUP))
fd_table[pollset[i].fd].events->in_event ();
if (pollset[i].fd == retired_fd)
continue;
if (pollset[i].revents & POLLOUT)
fd_table[pollset[i].fd].events->out_event ();
if (pollset[i].fd == retired_fd)
continue;
if (pollset[i].revents & POLLIN)
fd_table[pollset[i].fd].events->in_event ();
}
}
}
void zmq::poll_t::cleanup_retired ()
{
// Clean up the pollset and update the fd_table accordingly.
if (retired) {
pollset_t::size_type i = 0;
while (i < pollset.size ()) {
if (pollset[i].fd == retired_fd)
pollset.erase (pollset.begin () + i);
else {
fd_table[pollset[i].fd].index = i;
i++;
}
}
retired = false;
}
}
#endif
|
sophomore_public/libzmq
|
src/poll.cpp
|
C++
|
gpl-3.0
| 4,297 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_POLL_HPP_INCLUDED__
#define __ZMQ_POLL_HPP_INCLUDED__
// poller.hpp decides which polling mechanism to use.
#include "poller.hpp"
#if defined ZMQ_IOTHREAD_POLLER_USE_POLL
#if defined ZMQ_HAVE_WINDOWS
#error \
"poll is broken on Windows for the purpose of the I/O thread poller, use select instead; "\
"see https://github.com/zeromq/libzmq/issues/3107"
#endif
#include <poll.h>
#include <stddef.h>
#include <vector>
#include "ctx.hpp"
#include "fd.hpp"
#include "thread.hpp"
#include "poller_base.hpp"
namespace zmq
{
struct i_poll_events;
// Implements socket polling mechanism using the POSIX.1-2001
// poll() system call.
class poll_t ZMQ_FINAL : public worker_poller_base_t
{
public:
typedef fd_t handle_t;
poll_t (const thread_ctx_t &ctx_);
~poll_t ();
// "poller" concept.
// These methods may only be called from an event callback; add_fd may also be called before start.
handle_t add_fd (fd_t fd_, zmq::i_poll_events *events_);
void rm_fd (handle_t handle_);
void set_pollin (handle_t handle_);
void reset_pollin (handle_t handle_);
void set_pollout (handle_t handle_);
void reset_pollout (handle_t handle_);
void stop ();
static int max_fds ();
private:
// Main event loop.
void loop () ZMQ_FINAL;
void cleanup_retired ();
struct fd_entry_t
{
fd_t index;
zmq::i_poll_events *events;
};
// This table stores data for registered descriptors.
typedef std::vector<fd_entry_t> fd_table_t;
fd_table_t fd_table;
// Pollset to pass to the poll function.
typedef std::vector<pollfd> pollset_t;
pollset_t pollset;
// If true, there's at least one retired event source.
bool retired;
ZMQ_NON_COPYABLE_NOR_MOVABLE (poll_t)
};
typedef poll_t poller_t;
}
#endif
#endif
|
sophomore_public/libzmq
|
src/poll.hpp
|
C++
|
gpl-3.0
| 1,954 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_POLLER_HPP_INCLUDED__
#define __ZMQ_POLLER_HPP_INCLUDED__
#if defined ZMQ_IOTHREAD_POLLER_USE_KQUEUE \
+ defined ZMQ_IOTHREAD_POLLER_USE_EPOLL \
+ defined ZMQ_IOTHREAD_POLLER_USE_DEVPOLL \
+ defined ZMQ_IOTHREAD_POLLER_USE_POLLSET \
+ defined ZMQ_IOTHREAD_POLLER_USE_POLL \
+ defined ZMQ_IOTHREAD_POLLER_USE_SELECT \
> 1
#error More than one of the ZMQ_IOTHREAD_POLLER_USE_* macros defined
#endif
#if defined ZMQ_IOTHREAD_POLLER_USE_KQUEUE
#include "kqueue.hpp"
#elif defined ZMQ_IOTHREAD_POLLER_USE_EPOLL
#include "epoll.hpp"
#elif defined ZMQ_IOTHREAD_POLLER_USE_DEVPOLL
#include "devpoll.hpp"
#elif defined ZMQ_IOTHREAD_POLLER_USE_POLLSET
#include "pollset.hpp"
#elif defined ZMQ_IOTHREAD_POLLER_USE_POLL
#include "poll.hpp"
#elif defined ZMQ_IOTHREAD_POLLER_USE_SELECT
#include "select.hpp"
#elif defined ZMQ_HAVE_GNU
#define ZMQ_IOTHREAD_POLLER_USE_POLL
#include "poll.hpp"
#else
#error None of the ZMQ_IOTHREAD_POLLER_USE_* macros defined
#endif
#if (defined ZMQ_POLL_BASED_ON_SELECT + defined ZMQ_POLL_BASED_ON_POLL) > 1
#error More than one of the ZMQ_POLL_BASED_ON_* macros defined
#elif (defined ZMQ_POLL_BASED_ON_SELECT + defined ZMQ_POLL_BASED_ON_POLL) == 0
#error None of the ZMQ_POLL_BASED_ON_* macros defined
#endif
#endif
|
sophomore_public/libzmq
|
src/poller.hpp
|
C++
|
gpl-3.0
| 1,525 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "poller_base.hpp"
#include "i_poll_events.hpp"
#include "err.hpp"
zmq::poller_base_t::~poller_base_t ()
{
// Make sure there is no more load on the shutdown.
zmq_assert (get_load () == 0);
}
int zmq::poller_base_t::get_load () const
{
return _load.get ();
}
void zmq::poller_base_t::adjust_load (int amount_)
{
if (amount_ > 0)
_load.add (amount_);
else if (amount_ < 0)
_load.sub (-amount_);
}
void zmq::poller_base_t::add_timer (int timeout_, i_poll_events *sink_, int id_)
{
uint64_t expiration = _clock.now_ms () + timeout_;
timer_info_t info = {sink_, id_};
_timers.insert (timers_t::value_type (expiration, info));
}
void zmq::poller_base_t::cancel_timer (i_poll_events *sink_, int id_)
{
// Complexity of this operation is O(n). We assume it is rarely used.
for (timers_t::iterator it = _timers.begin (), end = _timers.end ();
it != end; ++it)
if (it->second.sink == sink_ && it->second.id == id_) {
_timers.erase (it);
return;
}
// We should generally never get here. Calling 'cancel_timer ()' on
// an already expired or canceled timer (or even worse - on a timer which
// never existed, supplying bad sink_ and/or id_ values) does not make any
// sense.
// But in some edge cases this might happen. As described in issue #3645
// `timer_event ()` call from `execute_timers ()` might call `cancel_timer ()`
// on already canceled (deleted) timer.
// As soon as that is resolved an 'assert (false)' should be put here.
}
uint64_t zmq::poller_base_t::execute_timers ()
{
// Fast track.
if (_timers.empty ())
return 0;
// Get the current time.
const uint64_t current = _clock.now_ms ();
// Execute the timers that are already due.
uint64_t res = 0;
timer_info_t timer_temp;
timers_t::iterator it;
do {
it = _timers.begin ();
// If we have to wait to execute the item, same will be true for
// all the following items because multimap is sorted. Thus we can
// stop checking the subsequent timers.
if (it->first > current) {
res = it->first - current;
break;
}
// Save and remove the timer because timer_event() call might delete
// exactly this timer and then the iterator will be invalid.
timer_temp = it->second;
_timers.erase (it);
// Trigger the timer.
timer_temp.sink->timer_event (timer_temp.id);
} while (!_timers.empty ());
// Return the time to wait for the next timer (at least 1ms), or 0, if
// there are no more timers.
return res;
}
zmq::worker_poller_base_t::worker_poller_base_t (const thread_ctx_t &ctx_) :
_ctx (ctx_)
{
}
void zmq::worker_poller_base_t::stop_worker ()
{
_worker.stop ();
}
void zmq::worker_poller_base_t::start (const char *name_)
{
zmq_assert (get_load () > 0);
_ctx.start_thread (_worker, worker_routine, this, name_);
}
void zmq::worker_poller_base_t::check_thread () const
{
#ifndef NDEBUG
zmq_assert (!_worker.get_started () || _worker.is_current_thread ());
#endif
}
void zmq::worker_poller_base_t::worker_routine (void *arg_)
{
(static_cast<worker_poller_base_t *> (arg_))->loop ();
}
|
sophomore_public/libzmq
|
src/poller_base.cpp
|
C++
|
gpl-3.0
| 3,390 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_POLLER_BASE_HPP_INCLUDED__
#define __ZMQ_POLLER_BASE_HPP_INCLUDED__
#include <map>
#include "clock.hpp"
#include "atomic_counter.hpp"
#include "ctx.hpp"
namespace zmq
{
struct i_poll_events;
// A build of libzmq must provide an implementation of the poller_t concept. By
// convention, this is done via a typedef.
//
// At the time of writing, the following implementations of the poller_t
// concept exist: zmq::devpoll_t, zmq::epoll_t, zmq::kqueue_t, zmq::poll_t,
// zmq::pollset_t, zmq::select_t
//
// An implementation of the poller_t concept must provide the following public
// methods:
// Returns load of the poller.
// int get_load() const;
//
// Add a timeout to expire in timeout_ milliseconds. After the
// expiration, timer_event on sink_ object will be called with
// argument set to id_.
// void add_timer(int timeout_, zmq::i_poll_events *sink_, int id_);
//
// Cancel the timer created by sink_ object with ID equal to id_.
// void cancel_timer(zmq::i_poll_events *sink_, int id_);
//
// Adds a fd to the poller. Initially, no events are activated. These must
// be activated by the set_* methods using the returned handle_.
// handle_t add_fd(fd_t fd_, zmq::i_poll_events *events_);
//
// Deactivates any events that may be active for the given handle_, and
// removes the fd associated with the given handle_.
// void rm_fd(handle_t handle_);
//
// The set_* and reset_* methods activate resp. deactivate polling for
// input/output readiness on the respective handle_, such that the
// in_event/out_event methods on the associated zmq::i_poll_events object
// will be called.
// Note: while handle_t and fd_t may be the same type, and may even have the
// same values for some implementation, this may not be assumed in general.
// The methods may only be called with the handle returned by add_fd.
// void set_pollin(handle_t handle_);
// void reset_pollin(handle_t handle_);
// void set_pollout(handle_t handle_);//
// void reset_pollout(handle_t handle_);
//
// Starts operation of the poller. See below for details.
// void start();
//
// Request termination of the poller.
// TODO: might be removed in the future, as it has no effect.
// void stop();
//
// Returns the maximum number of fds that can be added to an instance of the
// poller at the same time, or -1 if there is no such fixed limit.
// static int max_fds();
//
// Most of the methods may only be called from a zmq::i_poll_events callback
// function when invoked by the poller (and, therefore, typically from the
// poller's worker thread), with the following exceptions:
// - get_load may be called from outside
// - add_fd and add_timer may be called from outside before start
// - start may be called from outside once
//
// After a poller is started, it waits for the registered events (input/output
// readiness, timeout) to happen, and calls the respective functions on the
// zmq::i_poll_events object. It terminates when no further registrations (fds
// or timers) exist.
//
// Before start, add_fd must have been called at least once. Behavior may be
// undefined otherwise.
//
// If the poller is implemented by a single worker thread (the
// worker_poller_base_t base class may be used to implement such a poller),
// no synchronization is required for the data structures modified by
// add_fd, rm_fd, add_timer, cancel_timer, (re)set_poll(in|out). However,
// reentrancy must be considered, e.g. when one of the functions modifies
// a container that is being iterated by the poller.
// A class that can be used as abase class for implementations of the poller
// concept.
//
// For documentation of the public methods, see the description of the poller_t
// concept.
class poller_base_t
{
public:
poller_base_t () ZMQ_DEFAULT;
virtual ~poller_base_t ();
// Methods from the poller concept.
int get_load () const;
void add_timer (int timeout_, zmq::i_poll_events *sink_, int id_);
void cancel_timer (zmq::i_poll_events *sink_, int id_);
protected:
// Called by individual poller implementations to manage the load.
void adjust_load (int amount_);
// Executes any timers that are due. Returns number of milliseconds
// to wait to match the next timer or 0 meaning "no timers".
uint64_t execute_timers ();
private:
// Clock instance private to this I/O thread.
clock_t _clock;
// List of active timers.
struct timer_info_t
{
zmq::i_poll_events *sink;
int id;
};
typedef std::multimap<uint64_t, timer_info_t> timers_t;
timers_t _timers;
// Load of the poller. Currently the number of file descriptors
// registered.
atomic_counter_t _load;
ZMQ_NON_COPYABLE_NOR_MOVABLE (poller_base_t)
};
// Base class for a poller with a single worker thread.
class worker_poller_base_t : public poller_base_t
{
public:
worker_poller_base_t (const thread_ctx_t &ctx_);
// Methods from the poller concept.
void start (const char *name = NULL);
protected:
// Checks whether the currently executing thread is the worker thread
// via an assertion.
// Should be called by the add_fd, removed_fd, set_*, reset_* functions
// to ensure correct usage.
void check_thread () const;
// Stops the worker thread. Should be called from the destructor of the
// leaf class.
void stop_worker ();
private:
// Main worker thread routine.
static void worker_routine (void *arg_);
virtual void loop () = 0;
// Reference to ZMQ context.
const thread_ctx_t &_ctx;
// Handle of the physical thread doing the I/O work.
thread_t _worker;
};
}
#endif
|
sophomore_public/libzmq
|
src/poller_base.hpp
|
C++
|
gpl-3.0
| 5,736 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "polling_util.hpp"
#if defined ZMQ_POLL_BASED_ON_POLL
#include <limits.h>
#include <algorithm>
zmq::timeout_t zmq::compute_timeout (const bool first_pass_,
const long timeout_,
const uint64_t now_,
const uint64_t end_)
{
if (first_pass_)
return 0;
if (timeout_ < 0)
return -1;
return static_cast<zmq::timeout_t> (
std::min<uint64_t> (end_ - now_, INT_MAX));
}
#endif
|
sophomore_public/libzmq
|
src/polling_util.cpp
|
C++
|
gpl-3.0
| 592 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_SOCKET_POLLING_UTIL_HPP_INCLUDED__
#define __ZMQ_SOCKET_POLLING_UTIL_HPP_INCLUDED__
#include <stdlib.h>
#include <vector>
#if defined ZMQ_HAVE_WINDOWS
#include <winsock.h>
#else
#include <sys/select.h>
#endif
#include "macros.hpp"
#include "stdint.hpp"
#include "platform.hpp"
#include "err.hpp"
namespace zmq
{
template <typename T, size_t S> class fast_vector_t
{
public:
explicit fast_vector_t (const size_t nitems_)
{
if (nitems_ > S) {
_buf = new (std::nothrow) T[nitems_];
// TODO since this function is called by a client, we could return errno == ENOMEM here
alloc_assert (_buf);
} else {
_buf = _static_buf;
}
}
T &operator[] (const size_t i) { return _buf[i]; }
~fast_vector_t ()
{
if (_buf != _static_buf)
delete[] _buf;
}
private:
T _static_buf[S];
T *_buf;
ZMQ_NON_COPYABLE_NOR_MOVABLE (fast_vector_t)
};
template <typename T, size_t S> class resizable_fast_vector_t
{
public:
resizable_fast_vector_t () : _dynamic_buf (NULL) {}
void resize (const size_t nitems_)
{
if (_dynamic_buf) {
_dynamic_buf->resize (nitems_);
} else if (nitems_ > S) {
_dynamic_buf = new (std::nothrow) std::vector<T> (nitems_);
// TODO since this function is called by a client, we could return errno == ENOMEM here
alloc_assert (_dynamic_buf);
memcpy (&(*_dynamic_buf)[0], _static_buf, sizeof _static_buf);
}
}
T *get_buf ()
{
// e.g. MSVC 2008 does not have std::vector::data, so we use &...[0]
return _dynamic_buf ? &(*_dynamic_buf)[0] : _static_buf;
}
T &operator[] (const size_t i) { return get_buf ()[i]; }
~resizable_fast_vector_t () { delete _dynamic_buf; }
private:
T _static_buf[S];
std::vector<T> *_dynamic_buf;
ZMQ_NON_COPYABLE_NOR_MOVABLE (resizable_fast_vector_t)
};
#if defined ZMQ_POLL_BASED_ON_POLL
typedef int timeout_t;
timeout_t
compute_timeout (bool first_pass_, long timeout_, uint64_t now_, uint64_t end_);
#endif
#if (!defined ZMQ_POLL_BASED_ON_POLL && defined ZMQ_POLL_BASED_ON_SELECT) \
|| defined ZMQ_HAVE_PPOLL
#if defined ZMQ_HAVE_WINDOWS
inline size_t valid_pollset_bytes (const fd_set &pollset_)
{
// On Windows we don't need to copy the whole fd_set.
// SOCKETS are continuous from the beginning of fd_array in fd_set.
// We just need to copy fd_count elements of fd_array.
// We gain huge memcpy() improvement if number of used SOCKETs is much lower than FD_SETSIZE.
return reinterpret_cast<const char *> (
&pollset_.fd_array[pollset_.fd_count])
- reinterpret_cast<const char *> (&pollset_);
}
#else
inline size_t valid_pollset_bytes (const fd_set & /*pollset_*/)
{
return sizeof (fd_set);
}
#endif
#if defined ZMQ_HAVE_WINDOWS
// struct fd_set {
// u_int fd_count;
// SOCKET fd_array[1];
// };
// NOTE: offsetof(fd_set, fd_array)==sizeof(SOCKET) on both x86 and x64
// due to alignment bytes for the latter.
class optimized_fd_set_t
{
public:
explicit optimized_fd_set_t (size_t nevents_) : _fd_set (1 + nevents_) {}
fd_set *get () { return reinterpret_cast<fd_set *> (&_fd_set[0]); }
private:
fast_vector_t<SOCKET, 1 + ZMQ_POLLITEMS_DFLT> _fd_set;
};
class resizable_optimized_fd_set_t
{
public:
void resize (size_t nevents_) { _fd_set.resize (1 + nevents_); }
fd_set *get () { return reinterpret_cast<fd_set *> (&_fd_set[0]); }
private:
resizable_fast_vector_t<SOCKET, 1 + ZMQ_POLLITEMS_DFLT> _fd_set;
};
#else
class optimized_fd_set_t
{
public:
explicit optimized_fd_set_t (size_t /*nevents_*/) {}
fd_set *get () { return &_fd_set; }
private:
fd_set _fd_set;
};
class resizable_optimized_fd_set_t : public optimized_fd_set_t
{
public:
resizable_optimized_fd_set_t () : optimized_fd_set_t (0) {}
void resize (size_t /*nevents_*/) {}
};
#endif
#endif
}
#endif
|
sophomore_public/libzmq
|
src/polling_util.hpp
|
C++
|
gpl-3.0
| 4,081 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "pollset.hpp"
#if defined ZMQ_IOTHREAD_POLLER_USE_POLLSET
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <algorithm>
#include <new>
#include "macros.hpp"
#include "err.hpp"
#include "config.hpp"
#include "i_poll_events.hpp"
zmq::pollset_t::pollset_t (const zmq::thread_ctx_t &ctx_) :
ctx (ctx_), stopping (false)
{
pollset_fd = pollset_create (-1);
errno_assert (pollset_fd != -1);
}
zmq::pollset_t::~pollset_t ()
{
// Wait till the worker thread exits.
worker.stop ();
pollset_destroy (pollset_fd);
for (retired_t::iterator it = retired.begin (); it != retired.end (); ++it)
LIBZMQ_DELETE (*it);
}
zmq::pollset_t::handle_t zmq::pollset_t::add_fd (fd_t fd_,
i_poll_events *events_)
{
poll_entry_t *pe = new (std::nothrow) poll_entry_t;
alloc_assert (pe);
pe->fd = fd_;
pe->flag_pollin = false;
pe->flag_pollout = false;
pe->events = events_;
struct poll_ctl pc;
pc.fd = fd_;
pc.cmd = PS_ADD;
pc.events = 0;
int rc = pollset_ctl (pollset_fd, &pc, 1);
errno_assert (rc != -1);
// Increase the load metric of the thread.
adjust_load (1);
if (fd_ >= fd_table.size ()) {
fd_table.resize (fd_ + 1, NULL);
}
fd_table[fd_] = pe;
return pe;
}
void zmq::pollset_t::rm_fd (handle_t handle_)
{
poll_entry_t *pe = (poll_entry_t *) handle_;
struct poll_ctl pc;
pc.fd = pe->fd;
pc.cmd = PS_DELETE;
pc.events = 0;
pollset_ctl (pollset_fd, &pc, 1);
fd_table[pe->fd] = NULL;
pe->fd = retired_fd;
retired.push_back (pe);
// Decrease the load metric of the thread.
adjust_load (-1);
}
void zmq::pollset_t::set_pollin (handle_t handle_)
{
poll_entry_t *pe = (poll_entry_t *) handle_;
if (likely (!pe->flag_pollin)) {
struct poll_ctl pc;
pc.fd = pe->fd;
pc.cmd = PS_MOD;
pc.events = POLLIN;
const int rc = pollset_ctl (pollset_fd, &pc, 1);
errno_assert (rc != -1);
pe->flag_pollin = true;
}
}
void zmq::pollset_t::reset_pollin (handle_t handle_)
{
poll_entry_t *pe = (poll_entry_t *) handle_;
if (unlikely (!pe->flag_pollin)) {
return;
}
struct poll_ctl pc;
pc.fd = pe->fd;
pc.events = 0;
pc.cmd = PS_DELETE;
int rc = pollset_ctl (pollset_fd, &pc, 1);
if (pe->flag_pollout) {
pc.events = POLLOUT;
pc.cmd = PS_MOD;
rc = pollset_ctl (pollset_fd, &pc, 1);
errno_assert (rc != -1);
}
pe->flag_pollin = false;
}
void zmq::pollset_t::set_pollout (handle_t handle_)
{
poll_entry_t *pe = (poll_entry_t *) handle_;
if (likely (!pe->flag_pollout)) {
struct poll_ctl pc;
pc.fd = pe->fd;
pc.cmd = PS_MOD;
pc.events = POLLOUT;
const int rc = pollset_ctl (pollset_fd, &pc, 1);
errno_assert (rc != -1);
pe->flag_pollout = true;
}
}
void zmq::pollset_t::reset_pollout (handle_t handle_)
{
poll_entry_t *pe = (poll_entry_t *) handle_;
if (unlikely (!pe->flag_pollout)) {
return;
}
struct poll_ctl pc;
pc.fd = pe->fd;
pc.events = 0;
pc.cmd = PS_DELETE;
int rc = pollset_ctl (pollset_fd, &pc, 1);
errno_assert (rc != -1);
if (pe->flag_pollin) {
pc.cmd = PS_MOD;
pc.events = POLLIN;
rc = pollset_ctl (pollset_fd, &pc, 1);
errno_assert (rc != -1);
}
pe->flag_pollout = false;
}
void zmq::pollset_t::start ()
{
ctx.start_thread (worker, worker_routine, this);
}
void zmq::pollset_t::stop ()
{
stopping = true;
}
int zmq::pollset_t::max_fds ()
{
return -1;
}
void zmq::pollset_t::loop ()
{
struct pollfd polldata_array[max_io_events];
while (!stopping) {
// Execute any due timers.
int timeout = (int) execute_timers ();
// Wait for events.
int n = pollset_poll (pollset_fd, polldata_array, max_io_events,
timeout ? timeout : -1);
if (n == -1) {
errno_assert (errno == EINTR);
continue;
}
for (int i = 0; i < n; i++) {
poll_entry_t *pe = fd_table[polldata_array[i].fd];
if (!pe)
continue;
if (pe->fd == retired_fd)
continue;
if (polldata_array[i].revents & (POLLERR | POLLHUP))
pe->events->in_event ();
if (pe->fd == retired_fd)
continue;
if (polldata_array[i].revents & POLLOUT)
pe->events->out_event ();
if (pe->fd == retired_fd)
continue;
if (polldata_array[i].revents & POLLIN)
pe->events->in_event ();
}
// Destroy retired event sources.
for (retired_t::iterator it = retired.begin (); it != retired.end ();
++it)
LIBZMQ_DELETE (*it);
retired.clear ();
}
}
void zmq::pollset_t::worker_routine (void *arg_)
{
((pollset_t *) arg_)->loop ();
}
#endif
|
sophomore_public/libzmq
|
src/pollset.cpp
|
C++
|
gpl-3.0
| 5,174 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_POLLSET_HPP_INCLUDED__
#define __ZMQ_POLLSET_HPP_INCLUDED__
// poller.hpp decides which polling mechanism to use.
#include "poller.hpp"
#if defined ZMQ_IOTHREAD_POLLER_USE_POLLSET
#include <sys/poll.h>
#include <sys/pollset.h>
#include <vector>
#include "ctx.hpp"
#include "fd.hpp"
#include "thread.hpp"
#include "poller_base.hpp"
namespace zmq
{
struct i_poll_events;
// This class implements socket polling mechanism using the AIX-specific
// pollset mechanism.
class pollset_t ZMQ_FINAL : public poller_base_t
{
public:
typedef void *handle_t;
pollset_t (const thread_ctx_t &ctx_);
~pollset_t () ZMQ_FINAL;
// "poller" concept.
handle_t add_fd (fd_t fd_, zmq::i_poll_events *events_);
void rm_fd (handle_t handle_);
void set_pollin (handle_t handle_);
void reset_pollin (handle_t handle_);
void set_pollout (handle_t handle_);
void reset_pollout (handle_t handle_);
void start ();
void stop ();
static int max_fds ();
private:
// Main worker thread routine.
static void worker_routine (void *arg_);
// Main event loop.
void loop () ZMQ_FINAL;
// Reference to ZMQ context.
const thread_ctx_t &ctx;
// Main pollset file descriptor
::pollset_t pollset_fd;
struct poll_entry_t
{
fd_t fd;
bool flag_pollin;
bool flag_pollout;
zmq::i_poll_events *events;
};
// List of retired event sources.
typedef std::vector<poll_entry_t *> retired_t;
retired_t retired;
// This table stores data for registered descriptors.
typedef std::vector<poll_entry_t *> fd_table_t;
fd_table_t fd_table;
// If true, thread is in the process of shutting down.
bool stopping;
// Handle of the physical thread doing the I/O work.
thread_t worker;
ZMQ_NON_COPYABLE_NOR_MOVABLE (pollset_t)
};
typedef pollset_t poller_t;
}
#endif
#endif
|
sophomore_public/libzmq
|
src/pollset.hpp
|
C++
|
gpl-3.0
| 1,969 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
|
sophomore_public/libzmq
|
src/precompiled.cpp
|
C++
|
gpl-3.0
| 67 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PRECOMPILED_HPP_INCLUDED__
#define __ZMQ_PRECOMPILED_HPP_INCLUDED__
// On AIX platform, poll.h has to be included first to get consistent
// definition of pollfd structure (AIX uses 'reqevents' and 'retnevents'
// instead of 'events' and 'revents' and defines macros to map from POSIX-y
// names to AIX-specific names).
// zmq.h must be included *after* poll.h for AIX to build properly.
// precompiled.hpp includes include/zmq.h
#if defined ZMQ_POLL_BASED_ON_POLL && defined ZMQ_HAVE_AIX
#include <poll.h>
#endif
#include "platform.hpp"
#define __STDC_LIMIT_MACROS
// This must be included before any windows headers are compiled.
#if defined ZMQ_HAVE_WINDOWS
#include "windows.hpp"
#endif
#if defined ZMQ_HAVE_OPENBSD
#define ucred sockpeercred
#endif
// 0MQ definitions and exported functions
#include "../include/zmq.h"
// 0MQ DRAFT definitions and exported functions
#include "zmq_draft.h"
// TODO: expand pch implementation to non-windows builds.
#ifdef _MSC_VER
// standard C headers
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <io.h>
#include <ipexport.h>
#include <iphlpapi.h>
#include <limits.h>
#include <mstcpip.h>
#include <mswsock.h>
#include <process.h>
#include <rpc.h>
#include <signal.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <time.h>
#include <winsock2.h>
#include <ws2tcpip.h>
// standard C++ headers
#include <algorithm>
#include <climits>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <deque>
#include <limits>
#include <map>
#include <new>
#include <set>
#include <sstream>
#include <string>
#include <vector>
#if _MSC_VER >= 1800
#include <inttypes.h>
#endif
#if _MSC_VER >= 1700
#include <atomic>
#endif
#if defined _WIN32_WCE
#include <cmnintrin.h>
#else
#include <intrin.h>
#endif
#if defined HAVE_LIBGSSAPI_KRB5
#include "err.hpp"
#include "msg.hpp"
#include "mechanism.hpp"
#include "session_base.hpp"
#include "gssapi_server.hpp"
#include "wire.hpp"
#include <gssapi/gssapi.h>
#include <gssapi/gssapi_krb5.h>
#endif
#include "options.hpp"
#endif // _MSC_VER
#endif //ifndef __ZMQ_PRECOMPILED_HPP_INCLUDED__
|
sophomore_public/libzmq
|
src/precompiled.hpp
|
C++
|
gpl-3.0
| 2,289 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <stddef.h>
#include "poller.hpp"
#include "proxy.hpp"
#include "likely.hpp"
#include "msg.hpp"
#if defined ZMQ_POLL_BASED_ON_POLL && !defined ZMQ_HAVE_WINDOWS \
&& !defined ZMQ_HAVE_AIX
#include <poll.h>
#endif
// These headers end up pulling in zmq.h somewhere in their include
// dependency chain
#include "socket_base.hpp"
#include "err.hpp"
int zmq::proxy (class socket_base_t *frontend_,
class socket_base_t *backend_,
class socket_base_t *capture_)
{
return zmq::proxy_steerable (frontend_, backend_, capture_, NULL);
}
#ifdef ZMQ_HAVE_POLLER
#include "socket_poller.hpp"
// Macros for repetitive code.
// PROXY_CLEANUP() must not be used before these variables are initialized.
#define PROXY_CLEANUP() \
do { \
delete poller_all; \
delete poller_in; \
delete poller_receive_blocked; \
delete poller_send_blocked; \
delete poller_both_blocked; \
delete poller_frontend_only; \
delete poller_backend_only; \
} while (false)
#define CHECK_RC_EXIT_ON_FAILURE() \
do { \
if (rc < 0) { \
PROXY_CLEANUP (); \
return close_and_return (&msg, -1); \
} \
} while (false)
#endif // ZMQ_HAVE_POLLER
static int
capture (class zmq::socket_base_t *capture_, zmq::msg_t *msg_, int more_ = 0)
{
// Copy message to capture socket if any
if (capture_) {
zmq::msg_t ctrl;
int rc = ctrl.init ();
if (unlikely (rc < 0))
return -1;
rc = ctrl.copy (*msg_);
if (unlikely (rc < 0))
return -1;
rc = capture_->send (&ctrl, more_ ? ZMQ_SNDMORE : 0);
if (unlikely (rc < 0))
return -1;
}
return 0;
}
struct stats_socket
{
uint64_t count, bytes;
};
struct stats_endpoint
{
stats_socket send, recv;
};
struct stats_proxy
{
stats_endpoint frontend, backend;
};
static int forward (class zmq::socket_base_t *from_,
class zmq::socket_base_t *to_,
class zmq::socket_base_t *capture_,
zmq::msg_t *msg_,
stats_socket &recving,
stats_socket &sending)
{
// Forward a burst of messages
for (unsigned int i = 0; i < zmq::proxy_burst_size; i++) {
int more;
size_t moresz;
// Forward all the parts of one message
while (true) {
int rc = from_->recv (msg_, ZMQ_DONTWAIT);
if (rc < 0) {
if (likely (errno == EAGAIN && i > 0))
return 0; // End of burst
return -1;
}
size_t nbytes = msg_->size ();
recving.count += 1;
recving.bytes += nbytes;
moresz = sizeof more;
rc = from_->getsockopt (ZMQ_RCVMORE, &more, &moresz);
if (unlikely (rc < 0))
return -1;
// Copy message to capture socket if any
rc = capture (capture_, msg_, more);
if (unlikely (rc < 0))
return -1;
rc = to_->send (msg_, more ? ZMQ_SNDMORE : 0);
if (unlikely (rc < 0))
return -1;
sending.count += 1;
sending.bytes += nbytes;
if (more == 0)
break;
}
}
return 0;
}
enum proxy_state_t
{
active,
paused,
terminated
};
// Handle control request [5]PAUSE, [6]RESUME, [9]TERMINATE,
// [10]STATISTICS. Only STATISTICS results in a send.
static int handle_control (class zmq::socket_base_t *control_,
proxy_state_t &state,
const stats_proxy &stats)
{
zmq::msg_t cmsg;
int rc = cmsg.init ();
if (rc != 0) {
return -1;
}
rc = control_->recv (&cmsg, ZMQ_DONTWAIT);
if (rc < 0) {
return -1;
}
uint8_t *const command = static_cast<uint8_t *> (cmsg.data ());
const size_t msiz = cmsg.size ();
if (msiz == 10 && 0 == memcmp (command, "STATISTICS", 10)) {
// The stats are a cross product:
//
// (Front,Back) X (Recv,Sent) X (Number,Bytes).
//
// that is flattened into sequence of 8 message parts according to the
// zmq_proxy_steerable(3) documentation as:
//
// (frn, frb, fsn, fsb, brn, brb, bsn, bsb)
//
// f=front/b=back, r=recv/s=send, n=number/b=bytes.
const uint64_t stat_vals[8] = {
stats.frontend.recv.count, stats.frontend.recv.bytes,
stats.frontend.send.count, stats.frontend.send.bytes,
stats.backend.recv.count, stats.backend.recv.bytes,
stats.backend.send.count, stats.backend.send.bytes};
for (size_t ind = 0; ind < 8; ++ind) {
cmsg.init_size (sizeof (uint64_t));
memcpy (cmsg.data (), stat_vals + ind, sizeof (uint64_t));
rc = control_->send (&cmsg, ind < 7 ? ZMQ_SNDMORE : 0);
if (unlikely (rc < 0)) {
return -1;
}
}
return 0;
}
if (msiz == 5 && 0 == memcmp (command, "PAUSE", 5)) {
state = paused;
} else if (msiz == 6 && 0 == memcmp (command, "RESUME", 6)) {
state = active;
} else if (msiz == 9 && 0 == memcmp (command, "TERMINATE", 9)) {
state = terminated;
}
int type;
size_t sz = sizeof (type);
zmq_getsockopt (control_, ZMQ_TYPE, &type, &sz);
if (type == ZMQ_REP) {
// satisfy REP duty and reply no matter what.
cmsg.init_size (0);
rc = control_->send (&cmsg, 0);
if (unlikely (rc < 0)) {
return -1;
}
}
return 0;
}
#ifdef ZMQ_HAVE_POLLER
int zmq::proxy_steerable (class socket_base_t *frontend_,
class socket_base_t *backend_,
class socket_base_t *capture_,
class socket_base_t *control_)
{
msg_t msg;
int rc = msg.init ();
if (rc != 0)
return -1;
// The algorithm below assumes ratio of requests and replies processed
// under full load to be 1:1.
// Proxy can be in these three states
proxy_state_t state = active;
bool frontend_equal_to_backend;
bool frontend_in = false;
bool frontend_out = false;
bool backend_in = false;
bool backend_out = false;
zmq::socket_poller_t::event_t events[4];
int nevents = 3; // increase to 4 if we have control_
stats_proxy stats = {{{0, 0}, {0, 0}}, {{0, 0}, {0, 0}}};
// Don't allocate these pollers from stack because they will take more than 900 kB of stack!
// On Windows this blows up default stack of 1 MB and aborts the program.
// I wanted to use std::shared_ptr here as the best solution but that requires C++11...
zmq::socket_poller_t *poller_all =
new (std::nothrow) zmq::socket_poller_t; // Poll for everything.
zmq::socket_poller_t *poller_in = new (std::nothrow) zmq::
socket_poller_t; // Poll only 'ZMQ_POLLIN' on all sockets. Initial blocking poll in loop.
zmq::socket_poller_t *poller_receive_blocked = new (std::nothrow)
zmq::socket_poller_t; // All except 'ZMQ_POLLIN' on 'frontend_'.
// If frontend_==backend_ 'poller_send_blocked' and 'poller_receive_blocked' are the same, 'ZMQ_POLLIN' is ignored.
// In that case 'poller_send_blocked' is not used. We need only 'poller_receive_blocked'.
// We also don't need 'poller_both_blocked', 'poller_backend_only' nor 'poller_frontend_only' no need to initialize it.
// We save some RAM and time for initialization.
zmq::socket_poller_t *poller_send_blocked =
NULL; // All except 'ZMQ_POLLIN' on 'backend_'.
zmq::socket_poller_t *poller_both_blocked =
NULL; // All except 'ZMQ_POLLIN' on both 'frontend_' and 'backend_'.
zmq::socket_poller_t *poller_frontend_only =
NULL; // Only 'ZMQ_POLLIN' and 'ZMQ_POLLOUT' on 'frontend_'.
zmq::socket_poller_t *poller_backend_only =
NULL; // Only 'ZMQ_POLLIN' and 'ZMQ_POLLOUT' on 'backend_'.
if (frontend_ != backend_) {
poller_send_blocked = new (std::nothrow)
zmq::socket_poller_t; // All except 'ZMQ_POLLIN' on 'backend_'.
poller_both_blocked = new (std::nothrow) zmq::
socket_poller_t; // All except 'ZMQ_POLLIN' on both 'frontend_' and 'backend_'.
poller_frontend_only = new (std::nothrow) zmq::
socket_poller_t; // Only 'ZMQ_POLLIN' and 'ZMQ_POLLOUT' on 'frontend_'.
poller_backend_only = new (std::nothrow) zmq::
socket_poller_t; // Only 'ZMQ_POLLIN' and 'ZMQ_POLLOUT' on 'backend_'.
frontend_equal_to_backend = false;
} else
frontend_equal_to_backend = true;
if (poller_all == NULL || poller_in == NULL
|| poller_receive_blocked == NULL
|| ((poller_send_blocked == NULL || poller_both_blocked == NULL)
&& !frontend_equal_to_backend)) {
PROXY_CLEANUP ();
return close_and_return (&msg, -1);
}
zmq::socket_poller_t *poller_wait =
poller_in; // Poller for blocking wait, initially all 'ZMQ_POLLIN'.
// Register 'frontend_' and 'backend_' with pollers.
rc = poller_all->add (frontend_, NULL,
ZMQ_POLLIN | ZMQ_POLLOUT); // Everything.
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_in->add (frontend_, NULL, ZMQ_POLLIN); // All 'ZMQ_POLLIN's.
CHECK_RC_EXIT_ON_FAILURE ();
if (frontend_equal_to_backend) {
// If frontend_==backend_ 'poller_send_blocked' and 'poller_receive_blocked' are the same,
// so we don't need 'poller_send_blocked'. We need only 'poller_receive_blocked'.
// We also don't need 'poller_both_blocked', no need to initialize it.
rc = poller_receive_blocked->add (frontend_, NULL, ZMQ_POLLOUT);
CHECK_RC_EXIT_ON_FAILURE ();
} else {
rc = poller_all->add (backend_, NULL,
ZMQ_POLLIN | ZMQ_POLLOUT); // Everything.
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_in->add (backend_, NULL, ZMQ_POLLIN); // All 'ZMQ_POLLIN's.
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_both_blocked->add (
frontend_, NULL, ZMQ_POLLOUT); // Waiting only for 'ZMQ_POLLOUT'.
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_both_blocked->add (
backend_, NULL, ZMQ_POLLOUT); // Waiting only for 'ZMQ_POLLOUT'.
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_send_blocked->add (
backend_, NULL,
ZMQ_POLLOUT); // All except 'ZMQ_POLLIN' on 'backend_'.
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_send_blocked->add (
frontend_, NULL,
ZMQ_POLLIN | ZMQ_POLLOUT); // All except 'ZMQ_POLLIN' on 'backend_'.
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_receive_blocked->add (
frontend_, NULL,
ZMQ_POLLOUT); // All except 'ZMQ_POLLIN' on 'frontend_'.
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_receive_blocked->add (
backend_, NULL,
ZMQ_POLLIN | ZMQ_POLLOUT); // All except 'ZMQ_POLLIN' on 'frontend_'.
CHECK_RC_EXIT_ON_FAILURE ();
rc =
poller_frontend_only->add (frontend_, NULL, ZMQ_POLLIN | ZMQ_POLLOUT);
CHECK_RC_EXIT_ON_FAILURE ();
rc =
poller_backend_only->add (backend_, NULL, ZMQ_POLLIN | ZMQ_POLLOUT);
CHECK_RC_EXIT_ON_FAILURE ();
}
if (control_) {
++nevents;
// wherever you go, there you are.
rc = poller_all->add (control_, NULL, ZMQ_POLLIN);
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_in->add (control_, NULL, ZMQ_POLLIN);
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_receive_blocked->add (control_, NULL, ZMQ_POLLIN);
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_send_blocked->add (control_, NULL, ZMQ_POLLIN);
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_both_blocked->add (control_, NULL, ZMQ_POLLIN);
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_frontend_only->add (control_, NULL, ZMQ_POLLIN);
CHECK_RC_EXIT_ON_FAILURE ();
rc = poller_backend_only->add (control_, NULL, ZMQ_POLLIN);
CHECK_RC_EXIT_ON_FAILURE ();
}
bool request_processed = false, reply_processed = false;
while (state != terminated) {
// Blocking wait initially only for 'ZMQ_POLLIN' - 'poller_wait' points to 'poller_in'.
// If one of receiving end's queue is full ('ZMQ_POLLOUT' not available),
// 'poller_wait' is pointed to 'poller_receive_blocked', 'poller_send_blocked' or 'poller_both_blocked'.
rc = poller_wait->wait (events, nevents, -1);
if (rc < 0 && errno == EAGAIN)
rc = 0;
CHECK_RC_EXIT_ON_FAILURE ();
// Some of events waited for by 'poller_wait' have arrived, now poll for everything without blocking.
rc = poller_all->wait (events, nevents, 0);
if (rc < 0 && errno == EAGAIN)
rc = 0;
CHECK_RC_EXIT_ON_FAILURE ();
// Process events.
for (int i = 0; i < rc; i++) {
if (control_ && events[i].socket == control_) {
rc = handle_control (control_, state, stats);
CHECK_RC_EXIT_ON_FAILURE ();
continue;
}
if (events[i].socket == frontend_) {
frontend_in = (events[i].events & ZMQ_POLLIN) != 0;
frontend_out = (events[i].events & ZMQ_POLLOUT) != 0;
} else
// This 'if' needs to be after check for 'frontend_' in order never
// to be reached in case frontend_==backend_, so we ensure backend_in=false in that case.
if (events[i].socket == backend_) {
backend_in = (events[i].events & ZMQ_POLLIN) != 0;
backend_out = (events[i].events & ZMQ_POLLOUT) != 0;
}
}
if (state == active) {
// Process a request, 'ZMQ_POLLIN' on 'frontend_' and 'ZMQ_POLLOUT' on 'backend_'.
// In case of frontend_==backend_ there's no 'ZMQ_POLLOUT' event.
if (frontend_in && (backend_out || frontend_equal_to_backend)) {
rc = forward (frontend_, backend_, capture_, &msg,
stats.frontend.recv, stats.backend.send);
CHECK_RC_EXIT_ON_FAILURE ();
request_processed = true;
frontend_in = backend_out = false;
} else
request_processed = false;
// Process a reply, 'ZMQ_POLLIN' on 'backend_' and 'ZMQ_POLLOUT' on 'frontend_'.
// If 'frontend_' and 'backend_' are the same this is not needed because previous processing
// covers all of the cases. 'backend_in' is always false if frontend_==backend_ due to
// design in 'for' event processing loop.
if (backend_in && frontend_out) {
rc = forward (backend_, frontend_, capture_, &msg,
stats.backend.recv, stats.frontend.send);
CHECK_RC_EXIT_ON_FAILURE ();
reply_processed = true;
backend_in = frontend_out = false;
} else
reply_processed = false;
if (request_processed || reply_processed) {
// If request/reply is processed that means we had at least one 'ZMQ_POLLOUT' event.
// Enable corresponding 'ZMQ_POLLIN' for blocking wait if any was disabled.
if (poller_wait != poller_in) {
if (request_processed) { // 'frontend_' -> 'backend_'
if (poller_wait == poller_both_blocked)
poller_wait = poller_send_blocked;
else if (poller_wait == poller_receive_blocked
|| poller_wait == poller_frontend_only)
poller_wait = poller_in;
}
if (reply_processed) { // 'backend_' -> 'frontend_'
if (poller_wait == poller_both_blocked)
poller_wait = poller_receive_blocked;
else if (poller_wait == poller_send_blocked
|| poller_wait == poller_backend_only)
poller_wait = poller_in;
}
}
} else {
// No requests have been processed, there were no 'ZMQ_POLLIN' with corresponding 'ZMQ_POLLOUT' events.
// That means that out queue(s) is/are full or one out queue is full and second one has no messages to process.
// Disable receiving 'ZMQ_POLLIN' for sockets for which there's no 'ZMQ_POLLOUT',
// or wait only on both 'backend_''s or 'frontend_''s 'ZMQ_POLLIN' and 'ZMQ_POLLOUT'.
if (frontend_in) {
if (frontend_out)
// If frontend_in and frontend_out are true, obviously backend_in and backend_out are both false.
// In that case we need to wait for both 'ZMQ_POLLIN' and 'ZMQ_POLLOUT' only on 'backend_'.
// We'll never get here in case of frontend_==backend_ because then frontend_out will always be false.
poller_wait = poller_backend_only;
else {
if (poller_wait == poller_send_blocked)
poller_wait = poller_both_blocked;
else if (poller_wait == poller_in)
poller_wait = poller_receive_blocked;
}
}
if (backend_in) {
// Will never be reached if frontend_==backend_, 'backend_in' will
// always be false due to design in 'for' event processing loop.
if (backend_out)
// If backend_in and backend_out are true, obviously frontend_in and frontend_out are both false.
// In that case we need to wait for both 'ZMQ_POLLIN' and 'ZMQ_POLLOUT' only on 'frontend_'.
poller_wait = poller_frontend_only;
else {
if (poller_wait == poller_receive_blocked)
poller_wait = poller_both_blocked;
else if (poller_wait == poller_in)
poller_wait = poller_send_blocked;
}
}
}
}
}
PROXY_CLEANUP ();
return close_and_return (&msg, 0);
}
#else // ZMQ_HAVE_POLLER
int zmq::proxy_steerable (class socket_base_t *frontend_,
class socket_base_t *backend_,
class socket_base_t *capture_,
class socket_base_t *control_)
{
msg_t msg;
int rc = msg.init ();
if (rc != 0)
return -1;
// The algorithm below assumes ratio of requests and replies processed
// under full load to be 1:1.
zmq_pollitem_t items[] = {{frontend_, 0, ZMQ_POLLIN, 0},
{backend_, 0, ZMQ_POLLIN, 0},
{control_, 0, ZMQ_POLLIN, 0}};
const int qt_poll_items = control_ ? 3 : 2;
zmq_pollitem_t itemsout[] = {{frontend_, 0, ZMQ_POLLOUT, 0},
{backend_, 0, ZMQ_POLLOUT, 0}};
stats_proxy stats = {0};
// Proxy can be in these three states
proxy_state_t state = active;
while (state != terminated) {
// Wait while there are either requests or replies to process.
rc = zmq_poll (&items[0], qt_poll_items, -1);
if (unlikely (rc < 0))
return close_and_return (&msg, -1);
if (control_ && items[2].revents & ZMQ_POLLIN) {
rc = handle_control (control_, state, stats);
if (unlikely (rc < 0))
return close_and_return (&msg, -1);
}
// Get the pollout separately because when combining this with pollin it maxes the CPU
// because pollout shall most of the time return directly.
// POLLOUT is only checked when frontend and backend sockets are not the same.
if (frontend_ != backend_) {
rc = zmq_poll (&itemsout[0], 2, 0);
if (unlikely (rc < 0)) {
return close_and_return (&msg, -1);
}
}
if (state == active && items[0].revents & ZMQ_POLLIN
&& (frontend_ == backend_ || itemsout[1].revents & ZMQ_POLLOUT)) {
rc = forward (frontend_, backend_, capture_, &msg,
stats.frontend.recv, stats.backend.send);
if (unlikely (rc < 0))
return close_and_return (&msg, -1);
}
// Process a reply
if (state == active && frontend_ != backend_
&& items[1].revents & ZMQ_POLLIN
&& itemsout[0].revents & ZMQ_POLLOUT) {
rc = forward (backend_, frontend_, capture_, &msg,
stats.backend.recv, stats.frontend.send);
if (unlikely (rc < 0))
return close_and_return (&msg, -1);
}
}
return close_and_return (&msg, 0);
}
#endif // ZMQ_HAVE_POLLER
|
sophomore_public/libzmq
|
src/proxy.cpp
|
C++
|
gpl-3.0
| 22,275 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PROXY_HPP_INCLUDED__
#define __ZMQ_PROXY_HPP_INCLUDED__
namespace zmq
{
int proxy (class socket_base_t *frontend_,
class socket_base_t *backend_,
class socket_base_t *capture_);
int proxy_steerable (class socket_base_t *frontend_,
class socket_base_t *backend_,
class socket_base_t *capture_,
class socket_base_t *control_);
}
#endif
|
sophomore_public/libzmq
|
src/proxy.hpp
|
C++
|
gpl-3.0
| 476 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "pub.hpp"
#include "pipe.hpp"
#include "err.hpp"
#include "msg.hpp"
zmq::pub_t::pub_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
xpub_t (parent_, tid_, sid_)
{
options.type = ZMQ_PUB;
}
zmq::pub_t::~pub_t ()
{
}
void zmq::pub_t::xattach_pipe (pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_)
{
zmq_assert (pipe_);
// Don't delay pipe termination as there is no one
// to receive the delimiter.
pipe_->set_nodelay ();
xpub_t::xattach_pipe (pipe_, subscribe_to_all_, locally_initiated_);
}
int zmq::pub_t::xrecv (class msg_t *)
{
// Messages cannot be received from PUB socket.
errno = ENOTSUP;
return -1;
}
bool zmq::pub_t::xhas_in ()
{
return false;
}
|
sophomore_public/libzmq
|
src/pub.cpp
|
C++
|
gpl-3.0
| 866 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PUB_HPP_INCLUDED__
#define __ZMQ_PUB_HPP_INCLUDED__
#include "xpub.hpp"
namespace zmq
{
class ctx_t;
class io_thread_t;
class socket_base_t;
class msg_t;
class pub_t ZMQ_FINAL : public xpub_t
{
public:
pub_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
~pub_t ();
// Implementations of virtual functions from socket_base_t.
void xattach_pipe (zmq::pipe_t *pipe_,
bool subscribe_to_all_ = false,
bool locally_initiated_ = false);
int xrecv (zmq::msg_t *msg_);
bool xhas_in ();
ZMQ_NON_COPYABLE_NOR_MOVABLE (pub_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/pub.hpp
|
C++
|
gpl-3.0
| 665 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include "pull.hpp"
#include "err.hpp"
#include "msg.hpp"
#include "pipe.hpp"
zmq::pull_t::pull_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
socket_base_t (parent_, tid_, sid_)
{
options.type = ZMQ_PULL;
}
zmq::pull_t::~pull_t ()
{
}
void zmq::pull_t::xattach_pipe (pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_)
{
LIBZMQ_UNUSED (subscribe_to_all_);
LIBZMQ_UNUSED (locally_initiated_);
zmq_assert (pipe_);
_fq.attach (pipe_);
}
void zmq::pull_t::xread_activated (pipe_t *pipe_)
{
_fq.activated (pipe_);
}
void zmq::pull_t::xpipe_terminated (pipe_t *pipe_)
{
_fq.pipe_terminated (pipe_);
}
int zmq::pull_t::xrecv (msg_t *msg_)
{
return _fq.recv (msg_);
}
bool zmq::pull_t::xhas_in ()
{
return _fq.has_in ();
}
|
sophomore_public/libzmq
|
src/pull.cpp
|
C++
|
gpl-3.0
| 934 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PULL_HPP_INCLUDED__
#define __ZMQ_PULL_HPP_INCLUDED__
#include "socket_base.hpp"
#include "session_base.hpp"
#include "fq.hpp"
namespace zmq
{
class ctx_t;
class pipe_t;
class msg_t;
class io_thread_t;
class pull_t ZMQ_FINAL : public socket_base_t
{
public:
pull_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
~pull_t ();
protected:
// Overrides of functions from socket_base_t.
void xattach_pipe (zmq::pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_);
int xrecv (zmq::msg_t *msg_);
bool xhas_in ();
void xread_activated (zmq::pipe_t *pipe_);
void xpipe_terminated (zmq::pipe_t *pipe_);
private:
// Fair queueing object for inbound pipes.
fq_t _fq;
ZMQ_NON_COPYABLE_NOR_MOVABLE (pull_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/pull.hpp
|
C++
|
gpl-3.0
| 876 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include "push.hpp"
#include "pipe.hpp"
#include "err.hpp"
#include "msg.hpp"
zmq::push_t::push_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
socket_base_t (parent_, tid_, sid_)
{
options.type = ZMQ_PUSH;
}
zmq::push_t::~push_t ()
{
}
void zmq::push_t::xattach_pipe (pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_)
{
LIBZMQ_UNUSED (subscribe_to_all_);
LIBZMQ_UNUSED (locally_initiated_);
// Don't delay pipe termination as there is no one
// to receive the delimiter.
pipe_->set_nodelay ();
zmq_assert (pipe_);
_lb.attach (pipe_);
}
void zmq::push_t::xwrite_activated (pipe_t *pipe_)
{
_lb.activated (pipe_);
}
void zmq::push_t::xpipe_terminated (pipe_t *pipe_)
{
_lb.pipe_terminated (pipe_);
}
int zmq::push_t::xsend (msg_t *msg_)
{
return _lb.send (msg_);
}
bool zmq::push_t::xhas_out ()
{
return _lb.has_out ();
}
|
sophomore_public/libzmq
|
src/push.cpp
|
C++
|
gpl-3.0
| 1,055 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_PUSH_HPP_INCLUDED__
#define __ZMQ_PUSH_HPP_INCLUDED__
#include "socket_base.hpp"
#include "session_base.hpp"
#include "lb.hpp"
namespace zmq
{
class ctx_t;
class pipe_t;
class msg_t;
class io_thread_t;
class push_t ZMQ_FINAL : public socket_base_t
{
public:
push_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
~push_t ();
protected:
// Overrides of functions from socket_base_t.
void xattach_pipe (zmq::pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_);
int xsend (zmq::msg_t *msg_);
bool xhas_out ();
void xwrite_activated (zmq::pipe_t *pipe_);
void xpipe_terminated (zmq::pipe_t *pipe_);
private:
// Load balancer managing the outbound pipes.
lb_t _lb;
ZMQ_NON_COPYABLE_NOR_MOVABLE (push_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/push.hpp
|
C++
|
gpl-3.0
| 881 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <string.h>
#include "radio.hpp"
#include "macros.hpp"
#include "pipe.hpp"
#include "err.hpp"
#include "msg.hpp"
zmq::radio_t::radio_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
socket_base_t (parent_, tid_, sid_, true), _lossy (true)
{
options.type = ZMQ_RADIO;
}
zmq::radio_t::~radio_t ()
{
}
void zmq::radio_t::xattach_pipe (pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_)
{
LIBZMQ_UNUSED (subscribe_to_all_);
LIBZMQ_UNUSED (locally_initiated_);
zmq_assert (pipe_);
// Don't delay pipe termination as there is no one
// to receive the delimiter.
pipe_->set_nodelay ();
_dist.attach (pipe_);
if (subscribe_to_all_)
_udp_pipes.push_back (pipe_);
// The pipe is active when attached. Let's read the subscriptions from
// it, if any.
else
xread_activated (pipe_);
}
void zmq::radio_t::xread_activated (pipe_t *pipe_)
{
// There are some subscriptions waiting. Let's process them.
msg_t msg;
while (pipe_->read (&msg)) {
// Apply the subscription to the trie
if (msg.is_join () || msg.is_leave ()) {
std::string group = std::string (msg.group ());
if (msg.is_join ())
_subscriptions.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (group),
pipe_);
else {
std::pair<subscriptions_t::iterator, subscriptions_t::iterator>
range = _subscriptions.equal_range (group);
for (subscriptions_t::iterator it = range.first;
it != range.second; ++it) {
if (it->second == pipe_) {
_subscriptions.erase (it);
break;
}
}
}
}
msg.close ();
}
}
void zmq::radio_t::xwrite_activated (pipe_t *pipe_)
{
_dist.activated (pipe_);
}
int zmq::radio_t::xsetsockopt (int option_,
const void *optval_,
size_t optvallen_)
{
if (optvallen_ != sizeof (int) || *static_cast<const int *> (optval_) < 0) {
errno = EINVAL;
return -1;
}
if (option_ == ZMQ_XPUB_NODROP)
_lossy = (*static_cast<const int *> (optval_) == 0);
else {
errno = EINVAL;
return -1;
}
return 0;
}
void zmq::radio_t::xpipe_terminated (pipe_t *pipe_)
{
for (subscriptions_t::iterator it = _subscriptions.begin (),
end = _subscriptions.end ();
it != end;) {
if (it->second == pipe_) {
#if __cplusplus >= 201103L || (defined _MSC_VER && _MSC_VER >= 1700)
it = _subscriptions.erase (it);
#else
_subscriptions.erase (it++);
#endif
} else {
++it;
}
}
{
const udp_pipes_t::iterator end = _udp_pipes.end ();
const udp_pipes_t::iterator it =
std::find (_udp_pipes.begin (), end, pipe_);
if (it != end)
_udp_pipes.erase (it);
}
_dist.pipe_terminated (pipe_);
}
int zmq::radio_t::xsend (msg_t *msg_)
{
// Radio sockets do not allow multipart data (ZMQ_SNDMORE)
if (msg_->flags () & msg_t::more) {
errno = EINVAL;
return -1;
}
_dist.unmatch ();
const std::pair<subscriptions_t::iterator, subscriptions_t::iterator>
range = _subscriptions.equal_range (std::string (msg_->group ()));
for (subscriptions_t::iterator it = range.first; it != range.second; ++it)
_dist.match (it->second);
for (udp_pipes_t::iterator it = _udp_pipes.begin (),
end = _udp_pipes.end ();
it != end; ++it)
_dist.match (*it);
int rc = -1;
if (_lossy || _dist.check_hwm ()) {
if (_dist.send_to_matching (msg_) == 0) {
rc = 0; // Yay, sent successfully
}
} else
errno = EAGAIN;
return rc;
}
bool zmq::radio_t::xhas_out ()
{
return _dist.has_out ();
}
int zmq::radio_t::xrecv (msg_t *msg_)
{
// Messages cannot be received from PUB socket.
LIBZMQ_UNUSED (msg_);
errno = ENOTSUP;
return -1;
}
bool zmq::radio_t::xhas_in ()
{
return false;
}
zmq::radio_session_t::radio_session_t (io_thread_t *io_thread_,
bool connect_,
socket_base_t *socket_,
const options_t &options_,
address_t *addr_) :
session_base_t (io_thread_, connect_, socket_, options_, addr_),
_state (group)
{
}
zmq::radio_session_t::~radio_session_t ()
{
}
int zmq::radio_session_t::push_msg (msg_t *msg_)
{
if (msg_->flags () & msg_t::command) {
char *command_data = static_cast<char *> (msg_->data ());
const size_t data_size = msg_->size ();
int group_length;
const char *group;
msg_t join_leave_msg;
int rc;
// Set the msg type to either JOIN or LEAVE
if (data_size >= 5 && memcmp (command_data, "\4JOIN", 5) == 0) {
group_length = static_cast<int> (data_size) - 5;
group = command_data + 5;
rc = join_leave_msg.init_join ();
} else if (data_size >= 6 && memcmp (command_data, "\5LEAVE", 6) == 0) {
group_length = static_cast<int> (data_size) - 6;
group = command_data + 6;
rc = join_leave_msg.init_leave ();
}
// If it is not a JOIN or LEAVE just push the message
else
return session_base_t::push_msg (msg_);
errno_assert (rc == 0);
// Set the group
rc = join_leave_msg.set_group (group, group_length);
errno_assert (rc == 0);
// Close the current command
rc = msg_->close ();
errno_assert (rc == 0);
// Push the join or leave command
*msg_ = join_leave_msg;
return session_base_t::push_msg (msg_);
}
return session_base_t::push_msg (msg_);
}
int zmq::radio_session_t::pull_msg (msg_t *msg_)
{
if (_state == group) {
int rc = session_base_t::pull_msg (&_pending_msg);
if (rc != 0)
return rc;
const char *group = _pending_msg.group ();
const int length = static_cast<int> (strlen (group));
// First frame is the group
rc = msg_->init_size (length);
errno_assert (rc == 0);
msg_->set_flags (msg_t::more);
memcpy (msg_->data (), group, length);
// Next status is the body
_state = body;
return 0;
}
*msg_ = _pending_msg;
_state = group;
return 0;
}
void zmq::radio_session_t::reset ()
{
session_base_t::reset ();
_state = group;
}
|
sophomore_public/libzmq
|
src/radio.cpp
|
C++
|
gpl-3.0
| 6,960 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_RADIO_HPP_INCLUDED__
#define __ZMQ_RADIO_HPP_INCLUDED__
#include <map>
#include <string>
#include <vector>
#include "socket_base.hpp"
#include "session_base.hpp"
#include "dist.hpp"
#include "msg.hpp"
namespace zmq
{
class ctx_t;
class pipe_t;
class io_thread_t;
class radio_t ZMQ_FINAL : public socket_base_t
{
public:
radio_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
~radio_t ();
// Implementations of virtual functions from socket_base_t.
void xattach_pipe (zmq::pipe_t *pipe_,
bool subscribe_to_all_ = false,
bool locally_initiated_ = false);
int xsend (zmq::msg_t *msg_);
bool xhas_out ();
int xrecv (zmq::msg_t *msg_);
bool xhas_in ();
void xread_activated (zmq::pipe_t *pipe_);
void xwrite_activated (zmq::pipe_t *pipe_);
int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
void xpipe_terminated (zmq::pipe_t *pipe_);
private:
// List of all subscriptions mapped to corresponding pipes.
typedef std::multimap<std::string, pipe_t *> subscriptions_t;
subscriptions_t _subscriptions;
// List of udp pipes
typedef std::vector<pipe_t *> udp_pipes_t;
udp_pipes_t _udp_pipes;
// Distributor of messages holding the list of outbound pipes.
dist_t _dist;
// Drop messages if HWM reached, otherwise return with EAGAIN
bool _lossy;
ZMQ_NON_COPYABLE_NOR_MOVABLE (radio_t)
};
class radio_session_t ZMQ_FINAL : public session_base_t
{
public:
radio_session_t (zmq::io_thread_t *io_thread_,
bool connect_,
zmq::socket_base_t *socket_,
const options_t &options_,
address_t *addr_);
~radio_session_t ();
// Overrides of the functions from session_base_t.
int push_msg (msg_t *msg_);
int pull_msg (msg_t *msg_);
void reset ();
private:
enum
{
group,
body
} _state;
msg_t _pending_msg;
ZMQ_NON_COPYABLE_NOR_MOVABLE (radio_session_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/radio.hpp
|
C++
|
gpl-3.0
| 2,125 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include "err.hpp"
#include "radix_tree.hpp"
#include <stdlib.h>
#include <string.h>
#include <iterator>
#include <vector>
node_t::node_t (unsigned char *data_) : _data (data_)
{
}
uint32_t node_t::refcount ()
{
uint32_t u32;
memcpy (&u32, _data, sizeof (u32));
return u32;
}
void node_t::set_refcount (uint32_t value_)
{
memcpy (_data, &value_, sizeof (value_));
}
uint32_t node_t::prefix_length ()
{
uint32_t u32;
memcpy (&u32, _data + sizeof (uint32_t), sizeof (u32));
return u32;
}
void node_t::set_prefix_length (uint32_t value_)
{
memcpy (_data + sizeof (value_), &value_, sizeof (value_));
}
uint32_t node_t::edgecount ()
{
uint32_t u32;
memcpy (&u32, _data + 2 * sizeof (uint32_t), sizeof (u32));
return u32;
}
void node_t::set_edgecount (uint32_t value_)
{
memcpy (_data + 2 * sizeof (value_), &value_, sizeof (value_));
}
unsigned char *node_t::prefix ()
{
return _data + 3 * sizeof (uint32_t);
}
void node_t::set_prefix (const unsigned char *bytes_)
{
memcpy (prefix (), bytes_, prefix_length ());
}
unsigned char *node_t::first_bytes ()
{
return prefix () + prefix_length ();
}
void node_t::set_first_bytes (const unsigned char *bytes_)
{
memcpy (first_bytes (), bytes_, edgecount ());
}
unsigned char node_t::first_byte_at (size_t index_)
{
zmq_assert (index_ < edgecount ());
return first_bytes ()[index_];
}
void node_t::set_first_byte_at (size_t index_, unsigned char byte_)
{
zmq_assert (index_ < edgecount ());
first_bytes ()[index_] = byte_;
}
unsigned char *node_t::node_pointers ()
{
return prefix () + prefix_length () + edgecount ();
}
void node_t::set_node_pointers (const unsigned char *pointers_)
{
memcpy (node_pointers (), pointers_, edgecount () * sizeof (void *));
}
node_t node_t::node_at (size_t index_)
{
zmq_assert (index_ < edgecount ());
unsigned char *data;
memcpy (&data, node_pointers () + index_ * sizeof (void *), sizeof (data));
return node_t (data);
}
void node_t::set_node_at (size_t index_, node_t node_)
{
zmq_assert (index_ < edgecount ());
memcpy (node_pointers () + index_ * sizeof (void *), &node_._data,
sizeof (node_._data));
}
void node_t::set_edge_at (size_t index_,
unsigned char first_byte_,
node_t node_)
{
set_first_byte_at (index_, first_byte_);
set_node_at (index_, node_);
}
bool node_t::operator== (node_t other_) const
{
return _data == other_._data;
}
bool node_t::operator!= (node_t other_) const
{
return !(*this == other_);
}
void node_t::resize (size_t prefix_length_, size_t edgecount_)
{
const size_t node_size = 3 * sizeof (uint32_t) + prefix_length_
+ edgecount_ * (1 + sizeof (void *));
unsigned char *new_data =
static_cast<unsigned char *> (realloc (_data, node_size));
zmq_assert (new_data);
_data = new_data;
set_prefix_length (static_cast<uint32_t> (prefix_length_));
set_edgecount (static_cast<uint32_t> (edgecount_));
}
node_t make_node (size_t refcount_, size_t prefix_length_, size_t edgecount_)
{
const size_t node_size = 3 * sizeof (uint32_t) + prefix_length_
+ edgecount_ * (1 + sizeof (void *));
unsigned char *data = static_cast<unsigned char *> (malloc (node_size));
zmq_assert (data);
node_t node (data);
node.set_refcount (static_cast<uint32_t> (refcount_));
node.set_prefix_length (static_cast<uint32_t> (prefix_length_));
node.set_edgecount (static_cast<uint32_t> (edgecount_));
return node;
}
// ----------------------------------------------------------------------
zmq::radix_tree_t::radix_tree_t () : _root (make_node (0, 0, 0)), _size (0)
{
}
static void free_nodes (node_t node_)
{
for (size_t i = 0, count = node_.edgecount (); i < count; ++i)
free_nodes (node_.node_at (i));
free (node_._data);
}
zmq::radix_tree_t::~radix_tree_t ()
{
free_nodes (_root);
}
match_result_t::match_result_t (size_t key_bytes_matched_,
size_t prefix_bytes_matched_,
size_t edge_index_,
size_t parent_edge_index_,
node_t current_,
node_t parent_,
node_t grandparent_) :
_key_bytes_matched (key_bytes_matched_),
_prefix_bytes_matched (prefix_bytes_matched_),
_edge_index (edge_index_),
_parent_edge_index (parent_edge_index_),
_current_node (current_),
_parent_node (parent_),
_grandparent_node (grandparent_)
{
}
match_result_t zmq::radix_tree_t::match (const unsigned char *key_,
size_t key_size_,
bool is_lookup_ = false) const
{
zmq_assert (key_);
// Node we're currently at in the traversal and its predecessors.
node_t current_node = _root;
node_t parent_node = current_node;
node_t grandparent_node = current_node;
// Index of the next byte to match in the key.
size_t key_byte_index = 0;
// Index of the next byte to match in the current node's prefix.
size_t prefix_byte_index = 0;
// Index of the edge from parent to current node.
size_t edge_index = 0;
// Index of the edge from grandparent to parent.
size_t parent_edge_index = 0;
while (current_node.prefix_length () > 0 || current_node.edgecount () > 0) {
const unsigned char *const prefix = current_node.prefix ();
const size_t prefix_length = current_node.prefix_length ();
for (prefix_byte_index = 0;
prefix_byte_index < prefix_length && key_byte_index < key_size_;
++prefix_byte_index, ++key_byte_index) {
if (prefix[prefix_byte_index] != key_[key_byte_index])
break;
}
// Even if a prefix of the key matches and we're doing a
// lookup, this means we've found a matching subscription.
if (is_lookup_ && prefix_byte_index == prefix_length
&& current_node.refcount () > 0) {
key_byte_index = key_size_;
break;
}
// There was a mismatch or we've matched the whole key, so
// there's nothing more to do.
if (prefix_byte_index != prefix_length || key_byte_index == key_size_)
break;
// We need to match the rest of the key. Check if there's an
// outgoing edge from this node.
node_t next_node = current_node;
for (size_t i = 0, edgecount = current_node.edgecount (); i < edgecount;
++i) {
if (current_node.first_byte_at (i) == key_[key_byte_index]) {
parent_edge_index = edge_index;
edge_index = i;
next_node = current_node.node_at (i);
break;
}
}
if (next_node == current_node)
break; // No outgoing edge.
grandparent_node = parent_node;
parent_node = current_node;
current_node = next_node;
}
return match_result_t (key_byte_index, prefix_byte_index, edge_index,
parent_edge_index, current_node, parent_node,
grandparent_node);
}
bool zmq::radix_tree_t::add (const unsigned char *key_, size_t key_size_)
{
const match_result_t match_result = match (key_, key_size_);
const size_t key_bytes_matched = match_result._key_bytes_matched;
const size_t prefix_bytes_matched = match_result._prefix_bytes_matched;
const size_t edge_index = match_result._edge_index;
node_t current_node = match_result._current_node;
node_t parent_node = match_result._parent_node;
if (key_bytes_matched != key_size_) {
// Not all characters match, we might have to split the node.
if (prefix_bytes_matched == current_node.prefix_length ()) {
// The mismatch is at one of the outgoing edges, so we
// create an edge from the current node to a new leaf node
// that has the rest of the key as the prefix.
node_t key_node = make_node (1, key_size_ - key_bytes_matched, 0);
key_node.set_prefix (key_ + key_bytes_matched);
// Reallocate for one more edge.
current_node.resize (current_node.prefix_length (),
current_node.edgecount () + 1);
// Make room for the new edge. We need to shift the chunk
// of node pointers one byte to the right. Since resize()
// increments the edgecount by 1, node_pointers() tells us the
// destination address. The chunk of node pointers starts
// at one byte to the left of this destination.
//
// Since the regions can overlap, we use memmove.
memmove (current_node.node_pointers (),
current_node.node_pointers () - 1,
(current_node.edgecount () - 1) * sizeof (void *));
// Add an edge to the new node.
current_node.set_edge_at (current_node.edgecount () - 1,
key_[key_bytes_matched], key_node);
// We need to update all pointers to the current node
// after the call to resize().
if (current_node.prefix_length () == 0)
_root._data = current_node._data;
else
parent_node.set_node_at (edge_index, current_node);
_size.add (1);
return true;
}
// There was a mismatch, so we need to split this node.
//
// Create two nodes that will be reachable from the parent.
// One node will have the rest of the characters from the key,
// and the other node will have the rest of the characters
// from the current node's prefix.
node_t key_node = make_node (1, key_size_ - key_bytes_matched, 0);
node_t split_node =
make_node (current_node.refcount (),
current_node.prefix_length () - prefix_bytes_matched,
current_node.edgecount ());
// Copy the prefix chunks to the new nodes.
key_node.set_prefix (key_ + key_bytes_matched);
split_node.set_prefix (current_node.prefix () + prefix_bytes_matched);
// Copy the current node's edges to the new node.
split_node.set_first_bytes (current_node.first_bytes ());
split_node.set_node_pointers (current_node.node_pointers ());
// Resize the current node to accommodate a prefix comprising
// the matched characters and 2 outgoing edges to the above
// nodes. Set the refcount to 0 since this node doesn't hold a
// key.
current_node.resize (prefix_bytes_matched, 2);
current_node.set_refcount (0);
// Add links to the new nodes. We don't need to copy the
// prefix since resize() retains it in the current node.
current_node.set_edge_at (0, key_node.prefix ()[0], key_node);
current_node.set_edge_at (1, split_node.prefix ()[0], split_node);
_size.add (1);
parent_node.set_node_at (edge_index, current_node);
return true;
}
// All characters in the key match, but we still might need to split.
if (prefix_bytes_matched != current_node.prefix_length ()) {
// All characters in the key match, but not all characters
// from the current node's prefix match.
// Create a node that contains the rest of the characters from
// the current node's prefix and the outgoing edges from the
// current node.
node_t split_node =
make_node (current_node.refcount (),
current_node.prefix_length () - prefix_bytes_matched,
current_node.edgecount ());
split_node.set_prefix (current_node.prefix () + prefix_bytes_matched);
split_node.set_first_bytes (current_node.first_bytes ());
split_node.set_node_pointers (current_node.node_pointers ());
// Resize the current node to hold only the matched characters
// from its prefix and one edge to the new node.
current_node.resize (prefix_bytes_matched, 1);
// Add an edge to the split node and set the refcount to 1
// since this key wasn't inserted earlier. We don't need to
// set the prefix because the first `prefix_bytes_matched` bytes
// in the prefix are preserved by resize().
current_node.set_edge_at (0, split_node.prefix ()[0], split_node);
current_node.set_refcount (1);
_size.add (1);
parent_node.set_node_at (edge_index, current_node);
return true;
}
zmq_assert (key_bytes_matched == key_size_);
zmq_assert (prefix_bytes_matched == current_node.prefix_length ());
_size.add (1);
current_node.set_refcount (current_node.refcount () + 1);
return current_node.refcount () == 1;
}
bool zmq::radix_tree_t::rm (const unsigned char *key_, size_t key_size_)
{
const match_result_t match_result = match (key_, key_size_);
const size_t key_bytes_matched = match_result._key_bytes_matched;
const size_t prefix_bytes_matched = match_result._prefix_bytes_matched;
const size_t edge_index = match_result._edge_index;
const size_t parent_edge_index = match_result._parent_edge_index;
node_t current_node = match_result._current_node;
node_t parent_node = match_result._parent_node;
node_t grandparent_node = match_result._grandparent_node;
if (key_bytes_matched != key_size_
|| prefix_bytes_matched != current_node.prefix_length ()
|| current_node.refcount () == 0)
return false;
current_node.set_refcount (current_node.refcount () - 1);
_size.sub (1);
if (current_node.refcount () > 0)
return false;
// Don't delete the root node.
if (current_node == _root)
return true;
const size_t outgoing_edges = current_node.edgecount ();
if (outgoing_edges > 1)
// This node can't be merged with any other node, so there's
// nothing more to do.
return true;
if (outgoing_edges == 1) {
// Merge this node with the single child node.
node_t child = current_node.node_at (0);
// Make room for the child node's prefix and edges. We need to
// keep the old prefix length since resize() will overwrite
// it.
const uint32_t old_prefix_length = current_node.prefix_length ();
current_node.resize (old_prefix_length + child.prefix_length (),
child.edgecount ());
// Append the child node's prefix to the current node.
memcpy (current_node.prefix () + old_prefix_length, child.prefix (),
child.prefix_length ());
// Copy the rest of child node's data to the current node.
current_node.set_first_bytes (child.first_bytes ());
current_node.set_node_pointers (child.node_pointers ());
current_node.set_refcount (child.refcount ());
free (child._data);
parent_node.set_node_at (edge_index, current_node);
return true;
}
if (parent_node.edgecount () == 2 && parent_node.refcount () == 0
&& parent_node != _root) {
// Removing this node leaves the parent with one child.
// If the parent doesn't hold a key or if it isn't the root,
// we can merge it with its single child node.
zmq_assert (edge_index < 2);
node_t other_child = parent_node.node_at (!edge_index);
// Make room for the child node's prefix and edges. We need to
// keep the old prefix length since resize() will overwrite
// it.
const uint32_t old_prefix_length = parent_node.prefix_length ();
parent_node.resize (old_prefix_length + other_child.prefix_length (),
other_child.edgecount ());
// Append the child node's prefix to the current node.
memcpy (parent_node.prefix () + old_prefix_length,
other_child.prefix (), other_child.prefix_length ());
// Copy the rest of child node's data to the current node.
parent_node.set_first_bytes (other_child.first_bytes ());
parent_node.set_node_pointers (other_child.node_pointers ());
parent_node.set_refcount (other_child.refcount ());
free (current_node._data);
free (other_child._data);
grandparent_node.set_node_at (parent_edge_index, parent_node);
return true;
}
// This is a leaf node that doesn't leave its parent with one
// outgoing edge. Remove the outgoing edge to this node from the
// parent.
zmq_assert (outgoing_edges == 0);
// Replace the edge to the current node with the last edge. An
// edge consists of a byte and a pointer to the next node. First
// replace the byte.
const size_t last_index = parent_node.edgecount () - 1;
const unsigned char last_byte = parent_node.first_byte_at (last_index);
const node_t last_node = parent_node.node_at (last_index);
parent_node.set_edge_at (edge_index, last_byte, last_node);
// Move the chunk of pointers one byte to the left, effectively
// deleting the last byte in the region of first bytes by
// overwriting it.
memmove (parent_node.node_pointers () - 1, parent_node.node_pointers (),
parent_node.edgecount () * sizeof (void *));
// Shrink the parent node to the new size, which "deletes" the
// last pointer in the chunk of node pointers.
parent_node.resize (parent_node.prefix_length (),
parent_node.edgecount () - 1);
// Nothing points to this node now, so we can reclaim it.
free (current_node._data);
if (parent_node.prefix_length () == 0)
_root._data = parent_node._data;
else
grandparent_node.set_node_at (parent_edge_index, parent_node);
return true;
}
bool zmq::radix_tree_t::check (const unsigned char *key_, size_t key_size_)
{
if (_root.refcount () > 0)
return true;
match_result_t match_result = match (key_, key_size_, true);
return match_result._key_bytes_matched == key_size_
&& match_result._prefix_bytes_matched
== match_result._current_node.prefix_length ()
&& match_result._current_node.refcount () > 0;
}
static void
visit_keys (node_t node_,
std::vector<unsigned char> &buffer_,
void (*func_) (unsigned char *data_, size_t size_, void *arg_),
void *arg_)
{
const size_t prefix_length = node_.prefix_length ();
buffer_.reserve (buffer_.size () + prefix_length);
std::copy (node_.prefix (), node_.prefix () + prefix_length,
std::back_inserter (buffer_));
if (node_.refcount () > 0) {
zmq_assert (!buffer_.empty ());
func_ (&buffer_[0], buffer_.size (), arg_);
}
for (size_t i = 0, edgecount = node_.edgecount (); i < edgecount; ++i) {
visit_keys (node_.node_at (i), buffer_, func_, arg_);
}
buffer_.resize (static_cast<uint32_t> (buffer_.size () - prefix_length));
}
void zmq::radix_tree_t::apply (
void (*func_) (unsigned char *data_, size_t size_, void *arg_), void *arg_)
{
if (_root.refcount () > 0)
func_ (NULL, 0, arg_); // Root node is always empty.
std::vector<unsigned char> buffer;
for (size_t i = 0; i < _root.edgecount (); ++i)
visit_keys (_root.node_at (i), buffer, func_, arg_);
}
size_t zmq::radix_tree_t::size () const
{
return _size.get ();
}
|
sophomore_public/libzmq
|
src/radix_tree.cpp
|
C++
|
gpl-3.0
| 19,811 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef RADIX_TREE_HPP
#define RADIX_TREE_HPP
#include <stddef.h>
#include "stdint.hpp"
#include "atomic_counter.hpp"
// Wrapper type for a node's data layout.
//
// There are 3 32-bit unsigned integers that act as a header. These
// integers represent the following values in this order:
//
// (1) The reference count of the key held by the node. This is 0 if
// the node doesn't hold a key.
//
// (2) The number of characters in the node's prefix. The prefix is a
// part of one or more keys in the tree, e.g. the prefix of each node
// in a trie consists of a single character.
//
// (3) The number of outgoing edges from this node.
//
// The rest of the layout consists of 3 chunks in this order:
//
// (1) The node's prefix as a sequence of one or more bytes. The root
// node always has an empty prefix, unlike other nodes in the tree.
//
// (2) The first byte of the prefix of each of this node's children.
//
// (3) The pointer to each child node.
//
// The link to each child is looked up using its index, e.g. the child
// with index 0 will have its first byte and node pointer at the start
// of the chunk of first bytes and node pointers respectively.
struct node_t
{
explicit node_t (unsigned char *data_);
bool operator== (node_t other_) const;
bool operator!= (node_t other_) const;
uint32_t refcount ();
uint32_t prefix_length ();
uint32_t edgecount ();
unsigned char *prefix ();
unsigned char *first_bytes ();
unsigned char first_byte_at (size_t index_);
unsigned char *node_pointers ();
node_t node_at (size_t index_);
void set_refcount (uint32_t value_);
void set_prefix_length (uint32_t value_);
void set_edgecount (uint32_t value_);
void set_prefix (const unsigned char *bytes_);
void set_first_bytes (const unsigned char *bytes_);
void set_first_byte_at (size_t index_, unsigned char byte_);
void set_node_pointers (const unsigned char *pointers_);
void set_node_at (size_t index_, node_t node_);
void set_edge_at (size_t index_, unsigned char first_byte_, node_t node_);
void resize (size_t prefix_length_, size_t edgecount_);
unsigned char *_data;
};
node_t make_node (size_t refcount_, size_t prefix_length_, size_t edgecount_);
struct match_result_t
{
match_result_t (size_t key_bytes_matched_,
size_t prefix_bytes_matched_,
size_t edge_index_,
size_t parent_edge_index_,
node_t current_,
node_t parent_,
node_t grandparent);
size_t _key_bytes_matched;
size_t _prefix_bytes_matched;
size_t _edge_index;
size_t _parent_edge_index;
node_t _current_node;
node_t _parent_node;
node_t _grandparent_node;
};
namespace zmq
{
class radix_tree_t
{
public:
radix_tree_t ();
~radix_tree_t ();
// Add key to the tree. Returns true if this was a new key rather
// than a duplicate.
bool add (const unsigned char *key_, size_t key_size_);
// Remove key from the tree. Returns true if the item is actually
// removed from the tree.
bool rm (const unsigned char *key_, size_t key_size_);
// Check whether particular key is in the tree.
bool check (const unsigned char *key_, size_t key_size_);
// Apply the function supplied to each key in the tree.
void apply (void (*func_) (unsigned char *data, size_t size, void *arg),
void *arg_);
// Retrieve size of the radix tree. Note this is a multithread safe function.
size_t size () const;
private:
match_result_t
match (const unsigned char *key_, size_t key_size_, bool is_lookup_) const;
node_t _root;
atomic_counter_t _size;
};
}
#endif
|
sophomore_public/libzmq
|
src/radix_tree.hpp
|
C++
|
gpl-3.0
| 3,793 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <stdlib.h>
#if !defined ZMQ_HAVE_WINDOWS
#include <unistd.h>
#endif
#include "random.hpp"
#include "stdint.hpp"
#include "clock.hpp"
#include "mutex.hpp"
#include "macros.hpp"
#if defined(ZMQ_USE_LIBSODIUM)
#include "sodium.h"
#endif
void zmq::seed_random ()
{
#if defined ZMQ_HAVE_WINDOWS
const int pid = static_cast<int> (GetCurrentProcessId ());
#else
int pid = static_cast<int> (getpid ());
#endif
srand (static_cast<unsigned int> (clock_t::now_us () + pid));
}
uint32_t zmq::generate_random ()
{
// Compensate for the fact that rand() returns signed integer.
const uint32_t low = static_cast<uint32_t> (rand ());
uint32_t high = static_cast<uint32_t> (rand ());
high <<= (sizeof (int) * 8 - 1);
return high | low;
}
static void manage_random (bool init_)
{
#if defined(ZMQ_USE_LIBSODIUM)
if (init_) {
// sodium_init() is now documented as thread-safe in recent versions
int rc = sodium_init ();
zmq_assert (rc != -1);
#if defined(ZMQ_LIBSODIUM_RANDOMBYTES_CLOSE)
} else {
// randombytes_close either a no-op or not threadsafe
// doing this without refcounting can cause crashes
// if called while a context is active
randombytes_close ();
#endif
}
#else
LIBZMQ_UNUSED (init_);
#endif
}
void zmq::random_open ()
{
manage_random (true);
}
void zmq::random_close ()
{
manage_random (false);
}
|
sophomore_public/libzmq
|
src/random.cpp
|
C++
|
gpl-3.0
| 1,493 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_RANDOM_HPP_INCLUDED__
#define __ZMQ_RANDOM_HPP_INCLUDED__
#include "stdint.hpp"
namespace zmq
{
// Seeds the random number generator.
void seed_random ();
// Generates random value.
uint32_t generate_random ();
// [De-]Initialise crypto library, if needed.
// Serialised and refcounted, so that it can be called
// from multiple threads, each with its own context, and from
// the various zmq_utils curve functions safely.
void random_open ();
void random_close ();
}
#endif
|
sophomore_public/libzmq
|
src/random.hpp
|
C++
|
gpl-3.0
| 540 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <stdlib.h>
#include <string.h>
#include "raw_decoder.hpp"
#include "err.hpp"
zmq::raw_decoder_t::raw_decoder_t (size_t bufsize_) : _allocator (bufsize_, 1)
{
const int rc = _in_progress.init ();
errno_assert (rc == 0);
}
zmq::raw_decoder_t::~raw_decoder_t ()
{
const int rc = _in_progress.close ();
errno_assert (rc == 0);
}
void zmq::raw_decoder_t::get_buffer (unsigned char **data_, size_t *size_)
{
*data_ = _allocator.allocate ();
*size_ = _allocator.size ();
}
int zmq::raw_decoder_t::decode (const uint8_t *data_,
size_t size_,
size_t &bytes_used_)
{
const int rc =
_in_progress.init (const_cast<unsigned char *> (data_), size_,
shared_message_memory_allocator::call_dec_ref,
_allocator.buffer (), _allocator.provide_content ());
// if the buffer serves as memory for a zero-copy message, release it
// and allocate a new buffer in get_buffer for the next decode
if (_in_progress.is_zcmsg ()) {
_allocator.advance_content ();
_allocator.release ();
}
errno_assert (rc != -1);
bytes_used_ = size_;
return 1;
}
|
sophomore_public/libzmq
|
src/raw_decoder.cpp
|
C++
|
gpl-3.0
| 1,290 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_RAW_DECODER_HPP_INCLUDED__
#define __ZMQ_RAW_DECODER_HPP_INCLUDED__
#include "msg.hpp"
#include "i_decoder.hpp"
#include "stdint.hpp"
#include "decoder_allocators.hpp"
namespace zmq
{
// Decoder for 0MQ v1 framing protocol. Converts data stream into messages.
class raw_decoder_t ZMQ_FINAL : public i_decoder
{
public:
raw_decoder_t (size_t bufsize_);
~raw_decoder_t ();
// i_decoder interface.
void get_buffer (unsigned char **data_, size_t *size_);
int decode (const unsigned char *data_, size_t size_, size_t &bytes_used_);
msg_t *msg () { return &_in_progress; }
void resize_buffer (size_t) {}
private:
msg_t _in_progress;
shared_message_memory_allocator _allocator;
ZMQ_NON_COPYABLE_NOR_MOVABLE (raw_decoder_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/raw_decoder.hpp
|
C++
|
gpl-3.0
| 840 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "encoder.hpp"
#include "raw_encoder.hpp"
#include "msg.hpp"
zmq::raw_encoder_t::raw_encoder_t (size_t bufsize_) :
encoder_base_t<raw_encoder_t> (bufsize_)
{
// Write 0 bytes to the batch and go to message_ready state.
next_step (NULL, 0, &raw_encoder_t::raw_message_ready, true);
}
zmq::raw_encoder_t::~raw_encoder_t ()
{
}
void zmq::raw_encoder_t::raw_message_ready ()
{
next_step (in_progress ()->data (), in_progress ()->size (),
&raw_encoder_t::raw_message_ready, true);
}
|
sophomore_public/libzmq
|
src/raw_encoder.cpp
|
C++
|
gpl-3.0
| 588 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_RAW_ENCODER_HPP_INCLUDED__
#define __ZMQ_RAW_ENCODER_HPP_INCLUDED__
#include <stddef.h>
#include <string.h>
#include <stdlib.h>
#include "encoder.hpp"
namespace zmq
{
// Encoder for 0MQ framing protocol. Converts messages into data batches.
class raw_encoder_t ZMQ_FINAL : public encoder_base_t<raw_encoder_t>
{
public:
raw_encoder_t (size_t bufsize_);
~raw_encoder_t ();
private:
void raw_message_ready ();
ZMQ_NON_COPYABLE_NOR_MOVABLE (raw_encoder_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/raw_encoder.hpp
|
C++
|
gpl-3.0
| 547 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include <limits.h>
#include <string.h>
#ifndef ZMQ_HAVE_WINDOWS
#include <unistd.h>
#endif
#include <new>
#include <sstream>
#include "raw_engine.hpp"
#include "io_thread.hpp"
#include "session_base.hpp"
#include "v1_encoder.hpp"
#include "v1_decoder.hpp"
#include "v2_encoder.hpp"
#include "v2_decoder.hpp"
#include "null_mechanism.hpp"
#include "plain_client.hpp"
#include "plain_server.hpp"
#include "gssapi_client.hpp"
#include "gssapi_server.hpp"
#include "curve_client.hpp"
#include "curve_server.hpp"
#include "raw_decoder.hpp"
#include "raw_encoder.hpp"
#include "config.hpp"
#include "err.hpp"
#include "ip.hpp"
#include "tcp.hpp"
#include "likely.hpp"
#include "wire.hpp"
zmq::raw_engine_t::raw_engine_t (
fd_t fd_,
const options_t &options_,
const endpoint_uri_pair_t &endpoint_uri_pair_) :
stream_engine_base_t (fd_, options_, endpoint_uri_pair_, false)
{
}
zmq::raw_engine_t::~raw_engine_t ()
{
}
void zmq::raw_engine_t::plug_internal ()
{
// no handshaking for raw sock, instantiate raw encoder and decoders
_encoder = new (std::nothrow) raw_encoder_t (_options.out_batch_size);
alloc_assert (_encoder);
_decoder = new (std::nothrow) raw_decoder_t (_options.in_batch_size);
alloc_assert (_decoder);
_next_msg = &raw_engine_t::pull_msg_from_session;
_process_msg = static_cast<int (stream_engine_base_t::*) (msg_t *)> (
&raw_engine_t::push_raw_msg_to_session);
properties_t properties;
if (init_properties (properties)) {
// Compile metadata.
zmq_assert (_metadata == NULL);
_metadata = new (std::nothrow) metadata_t (properties);
alloc_assert (_metadata);
}
if (_options.raw_notify) {
// For raw sockets, send an initial 0-length message to the
// application so that it knows a peer has connected.
msg_t connector;
connector.init ();
push_raw_msg_to_session (&connector);
connector.close ();
session ()->flush ();
}
set_pollin ();
set_pollout ();
// Flush all the data that may have been already received downstream.
in_event ();
}
bool zmq::raw_engine_t::handshake ()
{
return true;
}
void zmq::raw_engine_t::error (error_reason_t reason_)
{
if (_options.raw_socket && _options.raw_notify) {
// For raw sockets, send a final 0-length message to the application
// so that it knows the peer has been disconnected.
msg_t terminator;
terminator.init ();
push_raw_msg_to_session (&terminator);
terminator.close ();
}
stream_engine_base_t::error (reason_);
}
int zmq::raw_engine_t::push_raw_msg_to_session (msg_t *msg_)
{
if (_metadata && _metadata != msg_->metadata ())
msg_->set_metadata (_metadata);
return push_msg_to_session (msg_);
}
|
sophomore_public/libzmq
|
src/raw_engine.cpp
|
C++
|
gpl-3.0
| 2,909 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_RAW_ENGINE_HPP_INCLUDED__
#define __ZMQ_RAW_ENGINE_HPP_INCLUDED__
#include <stddef.h>
#include "fd.hpp"
#include "i_engine.hpp"
#include "io_object.hpp"
#include "i_encoder.hpp"
#include "i_decoder.hpp"
#include "options.hpp"
#include "socket_base.hpp"
#include "metadata.hpp"
#include "msg.hpp"
#include "stream_engine_base.hpp"
namespace zmq
{
// Protocol revisions
class io_thread_t;
class session_base_t;
class mechanism_t;
// This engine handles any socket with SOCK_STREAM semantics,
// e.g. TCP socket or an UNIX domain socket.
class raw_engine_t ZMQ_FINAL : public stream_engine_base_t
{
public:
raw_engine_t (fd_t fd_,
const options_t &options_,
const endpoint_uri_pair_t &endpoint_uri_pair_);
~raw_engine_t ();
protected:
void error (error_reason_t reason_);
void plug_internal ();
bool handshake ();
private:
int push_raw_msg_to_session (msg_t *msg_);
ZMQ_NON_COPYABLE_NOR_MOVABLE (raw_engine_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/raw_engine.hpp
|
C++
|
gpl-3.0
| 1,056 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include "reaper.hpp"
#include "socket_base.hpp"
#include "err.hpp"
zmq::reaper_t::reaper_t (class ctx_t *ctx_, uint32_t tid_) :
object_t (ctx_, tid_),
_mailbox_handle (static_cast<poller_t::handle_t> (NULL)),
_poller (NULL),
_sockets (0),
_terminating (false)
{
if (!_mailbox.valid ())
return;
_poller = new (std::nothrow) poller_t (*ctx_);
alloc_assert (_poller);
if (_mailbox.get_fd () != retired_fd) {
_mailbox_handle = _poller->add_fd (_mailbox.get_fd (), this);
_poller->set_pollin (_mailbox_handle);
}
#ifdef HAVE_FORK
_pid = getpid ();
#endif
}
zmq::reaper_t::~reaper_t ()
{
LIBZMQ_DELETE (_poller);
}
zmq::mailbox_t *zmq::reaper_t::get_mailbox ()
{
return &_mailbox;
}
void zmq::reaper_t::start ()
{
zmq_assert (_mailbox.valid ());
// Start the thread.
_poller->start ("Reaper");
}
void zmq::reaper_t::stop ()
{
if (get_mailbox ()->valid ()) {
send_stop ();
}
}
void zmq::reaper_t::in_event ()
{
while (true) {
#ifdef HAVE_FORK
if (unlikely (_pid != getpid ())) {
//printf("zmq::reaper_t::in_event return in child process %d\n", (int)getpid());
return;
}
#endif
// Get the next command. If there is none, exit.
command_t cmd;
const int rc = _mailbox.recv (&cmd, 0);
if (rc != 0 && errno == EINTR)
continue;
if (rc != 0 && errno == EAGAIN)
break;
errno_assert (rc == 0);
// Process the command.
cmd.destination->process_command (cmd);
}
}
void zmq::reaper_t::out_event ()
{
zmq_assert (false);
}
void zmq::reaper_t::timer_event (int)
{
zmq_assert (false);
}
void zmq::reaper_t::process_stop ()
{
_terminating = true;
// If there are no sockets being reaped finish immediately.
if (!_sockets) {
send_done ();
_poller->rm_fd (_mailbox_handle);
_poller->stop ();
}
}
void zmq::reaper_t::process_reap (socket_base_t *socket_)
{
// Add the socket to the poller.
socket_->start_reaping (_poller);
++_sockets;
}
void zmq::reaper_t::process_reaped ()
{
--_sockets;
// If reaped was already asked to terminate and there are no more sockets,
// finish immediately.
if (!_sockets && _terminating) {
send_done ();
_poller->rm_fd (_mailbox_handle);
_poller->stop ();
}
}
|
sophomore_public/libzmq
|
src/reaper.cpp
|
C++
|
gpl-3.0
| 2,526 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_REAPER_HPP_INCLUDED__
#define __ZMQ_REAPER_HPP_INCLUDED__
#include "object.hpp"
#include "mailbox.hpp"
#include "poller.hpp"
#include "i_poll_events.hpp"
namespace zmq
{
class ctx_t;
class socket_base_t;
class reaper_t ZMQ_FINAL : public object_t, public i_poll_events
{
public:
reaper_t (zmq::ctx_t *ctx_, uint32_t tid_);
~reaper_t ();
mailbox_t *get_mailbox ();
void start ();
void stop ();
// i_poll_events implementation.
void in_event ();
void out_event ();
void timer_event (int id_);
private:
// Command handlers.
void process_stop ();
void process_reap (zmq::socket_base_t *socket_);
void process_reaped ();
// Reaper thread accesses incoming commands via this mailbox.
mailbox_t _mailbox;
// Handle associated with mailbox' file descriptor.
poller_t::handle_t _mailbox_handle;
// I/O multiplexing is performed using a poller object.
poller_t *_poller;
// Number of sockets being reaped at the moment.
int _sockets;
// If true, we were already asked to terminate.
bool _terminating;
#ifdef HAVE_FORK
// the process that created this context. Used to detect forking.
pid_t _pid;
#endif
ZMQ_NON_COPYABLE_NOR_MOVABLE (reaper_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/reaper.hpp
|
C++
|
gpl-3.0
| 1,329 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "rep.hpp"
#include "err.hpp"
#include "msg.hpp"
zmq::rep_t::rep_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
router_t (parent_, tid_, sid_),
_sending_reply (false),
_request_begins (true)
{
options.type = ZMQ_REP;
}
zmq::rep_t::~rep_t ()
{
}
int zmq::rep_t::xsend (msg_t *msg_)
{
// If we are in the middle of receiving a request, we cannot send reply.
if (!_sending_reply) {
errno = EFSM;
return -1;
}
const bool more = (msg_->flags () & msg_t::more) != 0;
// Push message to the reply pipe.
const int rc = router_t::xsend (msg_);
if (rc != 0)
return rc;
// If the reply is complete flip the FSM back to request receiving state.
if (!more)
_sending_reply = false;
return 0;
}
int zmq::rep_t::xrecv (msg_t *msg_)
{
// If we are in middle of sending a reply, we cannot receive next request.
if (_sending_reply) {
errno = EFSM;
return -1;
}
// First thing to do when receiving a request is to copy all the labels
// to the reply pipe.
if (_request_begins) {
while (true) {
int rc = router_t::xrecv (msg_);
if (rc != 0)
return rc;
if ((msg_->flags () & msg_t::more)) {
// Empty message part delimits the traceback stack.
const bool bottom = (msg_->size () == 0);
// Push it to the reply pipe.
rc = router_t::xsend (msg_);
errno_assert (rc == 0);
if (bottom)
break;
} else {
// If the traceback stack is malformed, discard anything
// already sent to pipe (we're at end of invalid message).
rc = router_t::rollback ();
errno_assert (rc == 0);
}
}
_request_begins = false;
}
// Get next message part to return to the user.
const int rc = router_t::xrecv (msg_);
if (rc != 0)
return rc;
// If whole request is read, flip the FSM to reply-sending state.
if (!(msg_->flags () & msg_t::more)) {
_sending_reply = true;
_request_begins = true;
}
return 0;
}
bool zmq::rep_t::xhas_in ()
{
if (_sending_reply)
return false;
return router_t::xhas_in ();
}
bool zmq::rep_t::xhas_out ()
{
if (!_sending_reply)
return false;
return router_t::xhas_out ();
}
|
sophomore_public/libzmq
|
src/rep.cpp
|
C++
|
gpl-3.0
| 2,550 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_REP_HPP_INCLUDED__
#define __ZMQ_REP_HPP_INCLUDED__
#include "router.hpp"
namespace zmq
{
class ctx_t;
class msg_t;
class io_thread_t;
class socket_base_t;
class rep_t ZMQ_FINAL : public router_t
{
public:
rep_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
~rep_t ();
// Overrides of functions from socket_base_t.
int xsend (zmq::msg_t *msg_);
int xrecv (zmq::msg_t *msg_);
bool xhas_in ();
bool xhas_out ();
private:
// If true, we are in process of sending the reply. If false we are
// in process of receiving a request.
bool _sending_reply;
// If true, we are starting to receive a request. The beginning
// of the request is the backtrace stack.
bool _request_begins;
ZMQ_NON_COPYABLE_NOR_MOVABLE (rep_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/rep.hpp
|
C++
|
gpl-3.0
| 852 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include "req.hpp"
#include "err.hpp"
#include "msg.hpp"
#include "wire.hpp"
#include "random.hpp"
#include "likely.hpp"
zmq::req_t::req_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
dealer_t (parent_, tid_, sid_),
_receiving_reply (false),
_message_begins (true),
_reply_pipe (NULL),
_request_id_frames_enabled (false),
_request_id (generate_random ()),
_strict (true)
{
options.type = ZMQ_REQ;
}
zmq::req_t::~req_t ()
{
}
int zmq::req_t::xsend (msg_t *msg_)
{
// If we've sent a request and we still haven't got the reply,
// we can't send another request unless the strict option is disabled.
if (_receiving_reply) {
if (_strict) {
errno = EFSM;
return -1;
}
_receiving_reply = false;
_message_begins = true;
}
// First part of the request is the request routing id.
if (_message_begins) {
_reply_pipe = NULL;
if (_request_id_frames_enabled) {
_request_id++;
msg_t id;
int rc = id.init_size (sizeof (uint32_t));
memcpy (id.data (), &_request_id, sizeof (uint32_t));
errno_assert (rc == 0);
id.set_flags (msg_t::more);
rc = dealer_t::sendpipe (&id, &_reply_pipe);
if (rc != 0) {
return -1;
}
}
msg_t bottom;
int rc = bottom.init ();
errno_assert (rc == 0);
bottom.set_flags (msg_t::more);
rc = dealer_t::sendpipe (&bottom, &_reply_pipe);
if (rc != 0)
return -1;
zmq_assert (_reply_pipe);
_message_begins = false;
// Eat all currently available messages before the request is fully
// sent. This is done to avoid:
// REQ sends request to A, A replies, B replies too.
// A's reply was first and matches, that is used.
// An hour later REQ sends a request to B. B's old reply is used.
msg_t drop;
while (true) {
rc = drop.init ();
errno_assert (rc == 0);
rc = dealer_t::xrecv (&drop);
if (rc != 0)
break;
drop.close ();
}
}
bool more = (msg_->flags () & msg_t::more) != 0;
int rc = dealer_t::xsend (msg_);
if (rc != 0)
return rc;
// If the request was fully sent, flip the FSM into reply-receiving state.
if (!more) {
_receiving_reply = true;
_message_begins = true;
}
return 0;
}
int zmq::req_t::xrecv (msg_t *msg_)
{
// If request wasn't send, we can't wait for reply.
if (!_receiving_reply) {
errno = EFSM;
return -1;
}
// Skip messages until one with the right first frames is found.
while (_message_begins) {
// If enabled, the first frame must have the correct request_id.
if (_request_id_frames_enabled) {
int rc = recv_reply_pipe (msg_);
if (rc != 0)
return rc;
if (unlikely (!(msg_->flags () & msg_t::more)
|| msg_->size () != sizeof (_request_id)
|| *static_cast<uint32_t *> (msg_->data ())
!= _request_id)) {
// Skip the remaining frames and try the next message
while (msg_->flags () & msg_t::more) {
rc = recv_reply_pipe (msg_);
errno_assert (rc == 0);
}
continue;
}
}
// The next frame must be 0.
// TODO: Failing this check should also close the connection with the peer!
int rc = recv_reply_pipe (msg_);
if (rc != 0)
return rc;
if (unlikely (!(msg_->flags () & msg_t::more) || msg_->size () != 0)) {
// Skip the remaining frames and try the next message
while (msg_->flags () & msg_t::more) {
rc = recv_reply_pipe (msg_);
errno_assert (rc == 0);
}
continue;
}
_message_begins = false;
}
const int rc = recv_reply_pipe (msg_);
if (rc != 0)
return rc;
// If the reply is fully received, flip the FSM into request-sending state.
if (!(msg_->flags () & msg_t::more)) {
_receiving_reply = false;
_message_begins = true;
}
return 0;
}
bool zmq::req_t::xhas_in ()
{
// TODO: Duplicates should be removed here.
if (!_receiving_reply)
return false;
return dealer_t::xhas_in ();
}
bool zmq::req_t::xhas_out ()
{
if (_receiving_reply && _strict)
return false;
return dealer_t::xhas_out ();
}
int zmq::req_t::xsetsockopt (int option_,
const void *optval_,
size_t optvallen_)
{
const bool is_int = (optvallen_ == sizeof (int));
int value = 0;
if (is_int)
memcpy (&value, optval_, sizeof (int));
switch (option_) {
case ZMQ_REQ_CORRELATE:
if (is_int && value >= 0) {
_request_id_frames_enabled = (value != 0);
return 0;
}
break;
case ZMQ_REQ_RELAXED:
if (is_int && value >= 0) {
_strict = (value == 0);
return 0;
}
break;
default:
break;
}
return dealer_t::xsetsockopt (option_, optval_, optvallen_);
}
void zmq::req_t::xpipe_terminated (pipe_t *pipe_)
{
if (_reply_pipe == pipe_)
_reply_pipe = NULL;
dealer_t::xpipe_terminated (pipe_);
}
int zmq::req_t::recv_reply_pipe (msg_t *msg_)
{
while (true) {
pipe_t *pipe = NULL;
const int rc = dealer_t::recvpipe (msg_, &pipe);
if (rc != 0)
return rc;
if (!_reply_pipe || pipe == _reply_pipe)
return 0;
}
}
zmq::req_session_t::req_session_t (io_thread_t *io_thread_,
bool connect_,
socket_base_t *socket_,
const options_t &options_,
address_t *addr_) :
session_base_t (io_thread_, connect_, socket_, options_, addr_),
_state (bottom)
{
}
zmq::req_session_t::~req_session_t ()
{
}
int zmq::req_session_t::push_msg (msg_t *msg_)
{
// Ignore commands, they are processed by the engine and should not
// affect the state machine.
if (unlikely (msg_->flags () & msg_t::command))
return 0;
switch (_state) {
case bottom:
if (msg_->flags () == msg_t::more) {
// In case option ZMQ_CORRELATE is on, allow request_id to be
// transferred as first frame (would be too cumbersome to check
// whether the option is actually on or not).
if (msg_->size () == sizeof (uint32_t)) {
_state = request_id;
return session_base_t::push_msg (msg_);
}
if (msg_->size () == 0) {
_state = body;
return session_base_t::push_msg (msg_);
}
}
break;
case request_id:
if (msg_->flags () == msg_t::more && msg_->size () == 0) {
_state = body;
return session_base_t::push_msg (msg_);
}
break;
case body:
if (msg_->flags () == msg_t::more)
return session_base_t::push_msg (msg_);
if (msg_->flags () == 0) {
_state = bottom;
return session_base_t::push_msg (msg_);
}
break;
}
errno = EFAULT;
return -1;
}
void zmq::req_session_t::reset ()
{
session_base_t::reset ();
_state = bottom;
}
|
sophomore_public/libzmq
|
src/req.cpp
|
C++
|
gpl-3.0
| 7,986 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_REQ_HPP_INCLUDED__
#define __ZMQ_REQ_HPP_INCLUDED__
#include "dealer.hpp"
#include "stdint.hpp"
namespace zmq
{
class ctx_t;
class msg_t;
class io_thread_t;
class socket_base_t;
class req_t ZMQ_FINAL : public dealer_t
{
public:
req_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
~req_t ();
// Overrides of functions from socket_base_t.
int xsend (zmq::msg_t *msg_);
int xrecv (zmq::msg_t *msg_);
bool xhas_in ();
bool xhas_out ();
int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
void xpipe_terminated (zmq::pipe_t *pipe_);
protected:
// Receive only from the pipe the request was sent to, discarding
// frames from other pipes.
int recv_reply_pipe (zmq::msg_t *msg_);
private:
// If true, request was already sent and reply wasn't received yet or
// was received partially.
bool _receiving_reply;
// If true, we are starting to send/recv a message. The first part
// of the message must be empty message part (backtrace stack bottom).
bool _message_begins;
// The pipe the request was sent to and where the reply is expected.
zmq::pipe_t *_reply_pipe;
// Whether request id frames shall be sent and expected.
bool _request_id_frames_enabled;
// The current request id. It is incremented every time before a new
// request is sent.
uint32_t _request_id;
// If false, send() will reset its internal state and terminate the
// reply_pipe's connection instead of failing if a previous request is
// still pending.
bool _strict;
ZMQ_NON_COPYABLE_NOR_MOVABLE (req_t)
};
class req_session_t ZMQ_FINAL : public session_base_t
{
public:
req_session_t (zmq::io_thread_t *io_thread_,
bool connect_,
zmq::socket_base_t *socket_,
const options_t &options_,
address_t *addr_);
~req_session_t ();
// Overrides of the functions from session_base_t.
int push_msg (msg_t *msg_);
void reset ();
private:
enum
{
bottom,
request_id,
body
} _state;
ZMQ_NON_COPYABLE_NOR_MOVABLE (req_session_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/req.hpp
|
C++
|
gpl-3.0
| 2,262 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include "router.hpp"
#include "pipe.hpp"
#include "wire.hpp"
#include "random.hpp"
#include "likely.hpp"
#include "err.hpp"
zmq::router_t::router_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
routing_socket_base_t (parent_, tid_, sid_),
_prefetched (false),
_routing_id_sent (false),
_current_in (NULL),
_terminate_current_in (false),
_more_in (false),
_current_out (NULL),
_more_out (false),
_next_integral_routing_id (generate_random ()),
_mandatory (false),
// raw_socket functionality in ROUTER is deprecated
_raw_socket (false),
_probe_router (false),
_handover (false)
{
options.type = ZMQ_ROUTER;
options.recv_routing_id = true;
options.raw_socket = false;
options.can_send_hello_msg = true;
options.can_recv_disconnect_msg = true;
_prefetched_id.init ();
_prefetched_msg.init ();
}
zmq::router_t::~router_t ()
{
zmq_assert (_anonymous_pipes.empty ());
_prefetched_id.close ();
_prefetched_msg.close ();
}
void zmq::router_t::xattach_pipe (pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_)
{
LIBZMQ_UNUSED (subscribe_to_all_);
zmq_assert (pipe_);
if (_probe_router) {
msg_t probe_msg;
int rc = probe_msg.init ();
errno_assert (rc == 0);
rc = pipe_->write (&probe_msg);
// zmq_assert (rc) is not applicable here, since it is not a bug.
LIBZMQ_UNUSED (rc);
pipe_->flush ();
rc = probe_msg.close ();
errno_assert (rc == 0);
}
const bool routing_id_ok = identify_peer (pipe_, locally_initiated_);
if (routing_id_ok)
_fq.attach (pipe_);
else
_anonymous_pipes.insert (pipe_);
}
int zmq::router_t::xsetsockopt (int option_,
const void *optval_,
size_t optvallen_)
{
const bool is_int = (optvallen_ == sizeof (int));
int value = 0;
if (is_int)
memcpy (&value, optval_, sizeof (int));
switch (option_) {
case ZMQ_ROUTER_RAW:
if (is_int && value >= 0) {
_raw_socket = (value != 0);
if (_raw_socket) {
options.recv_routing_id = false;
options.raw_socket = true;
}
return 0;
}
break;
case ZMQ_ROUTER_MANDATORY:
if (is_int && value >= 0) {
_mandatory = (value != 0);
return 0;
}
break;
case ZMQ_PROBE_ROUTER:
if (is_int && value >= 0) {
_probe_router = (value != 0);
return 0;
}
break;
case ZMQ_ROUTER_HANDOVER:
if (is_int && value >= 0) {
_handover = (value != 0);
return 0;
}
break;
#ifdef ZMQ_BUILD_DRAFT_API
case ZMQ_ROUTER_NOTIFY:
if (is_int && value >= 0
&& value <= (ZMQ_NOTIFY_CONNECT | ZMQ_NOTIFY_DISCONNECT)) {
options.router_notify = value;
return 0;
}
break;
#endif
default:
return routing_socket_base_t::xsetsockopt (option_, optval_,
optvallen_);
}
errno = EINVAL;
return -1;
}
void zmq::router_t::xpipe_terminated (pipe_t *pipe_)
{
if (0 == _anonymous_pipes.erase (pipe_)) {
erase_out_pipe (pipe_);
_fq.pipe_terminated (pipe_);
pipe_->rollback ();
if (pipe_ == _current_out)
_current_out = NULL;
}
}
void zmq::router_t::xread_activated (pipe_t *pipe_)
{
const std::set<pipe_t *>::iterator it = _anonymous_pipes.find (pipe_);
if (it == _anonymous_pipes.end ())
_fq.activated (pipe_);
else {
const bool routing_id_ok = identify_peer (pipe_, false);
if (routing_id_ok) {
_anonymous_pipes.erase (it);
_fq.attach (pipe_);
}
}
}
int zmq::router_t::xsend (msg_t *msg_)
{
// If this is the first part of the message it's the ID of the
// peer to send the message to.
if (!_more_out) {
zmq_assert (!_current_out);
// If we have malformed message (prefix with no subsequent message)
// then just silently ignore it.
// TODO: The connections should be killed instead.
if (msg_->flags () & msg_t::more) {
_more_out = true;
// Find the pipe associated with the routing id stored in the prefix.
// If there's no such pipe just silently ignore the message, unless
// router_mandatory is set.
out_pipe_t *out_pipe = lookup_out_pipe (
blob_t (static_cast<unsigned char *> (msg_->data ()),
msg_->size (), zmq::reference_tag_t ()));
if (out_pipe) {
_current_out = out_pipe->pipe;
// Check whether pipe is closed or not
if (!_current_out->check_write ()) {
// Check whether pipe is full or not
const bool pipe_full = !_current_out->check_hwm ();
out_pipe->active = false;
_current_out = NULL;
if (_mandatory) {
_more_out = false;
if (pipe_full)
errno = EAGAIN;
else
errno = EHOSTUNREACH;
return -1;
}
}
} else if (_mandatory) {
_more_out = false;
errno = EHOSTUNREACH;
return -1;
}
}
int rc = msg_->close ();
errno_assert (rc == 0);
rc = msg_->init ();
errno_assert (rc == 0);
return 0;
}
// Ignore the MORE flag for raw-sock or assert?
if (options.raw_socket)
msg_->reset_flags (msg_t::more);
// Check whether this is the last part of the message.
_more_out = (msg_->flags () & msg_t::more) != 0;
// Push the message into the pipe. If there's no out pipe, just drop it.
if (_current_out) {
// Close the remote connection if user has asked to do so
// by sending zero length message.
// Pending messages in the pipe will be dropped (on receiving term- ack)
if (_raw_socket && msg_->size () == 0) {
_current_out->terminate (false);
int rc = msg_->close ();
errno_assert (rc == 0);
rc = msg_->init ();
errno_assert (rc == 0);
_current_out = NULL;
return 0;
}
const bool ok = _current_out->write (msg_);
if (unlikely (!ok)) {
// Message failed to send - we must close it ourselves.
const int rc = msg_->close ();
errno_assert (rc == 0);
// HWM was checked before, so the pipe must be gone. Roll back
// messages that were piped, for example REP labels.
_current_out->rollback ();
_current_out = NULL;
} else {
if (!_more_out) {
_current_out->flush ();
_current_out = NULL;
}
}
} else {
const int rc = msg_->close ();
errno_assert (rc == 0);
}
// Detach the message from the data buffer.
const int rc = msg_->init ();
errno_assert (rc == 0);
return 0;
}
int zmq::router_t::xrecv (msg_t *msg_)
{
if (_prefetched) {
if (!_routing_id_sent) {
const int rc = msg_->move (_prefetched_id);
errno_assert (rc == 0);
_routing_id_sent = true;
} else {
const int rc = msg_->move (_prefetched_msg);
errno_assert (rc == 0);
_prefetched = false;
}
_more_in = (msg_->flags () & msg_t::more) != 0;
if (!_more_in) {
if (_terminate_current_in) {
_current_in->terminate (true);
_terminate_current_in = false;
}
_current_in = NULL;
}
return 0;
}
pipe_t *pipe = NULL;
int rc = _fq.recvpipe (msg_, &pipe);
// It's possible that we receive peer's routing id. That happens
// after reconnection. The current implementation assumes that
// the peer always uses the same routing id.
while (rc == 0 && msg_->is_routing_id ())
rc = _fq.recvpipe (msg_, &pipe);
if (rc != 0)
return -1;
zmq_assert (pipe != NULL);
// If we are in the middle of reading a message, just return the next part.
if (_more_in) {
_more_in = (msg_->flags () & msg_t::more) != 0;
if (!_more_in) {
if (_terminate_current_in) {
_current_in->terminate (true);
_terminate_current_in = false;
}
_current_in = NULL;
}
} else {
// We are at the beginning of a message.
// Keep the message part we have in the prefetch buffer
// and return the ID of the peer instead.
rc = _prefetched_msg.move (*msg_);
errno_assert (rc == 0);
_prefetched = true;
_current_in = pipe;
const blob_t &routing_id = pipe->get_routing_id ();
rc = msg_->init_size (routing_id.size ());
errno_assert (rc == 0);
memcpy (msg_->data (), routing_id.data (), routing_id.size ());
msg_->set_flags (msg_t::more);
if (_prefetched_msg.metadata ())
msg_->set_metadata (_prefetched_msg.metadata ());
_routing_id_sent = true;
}
return 0;
}
int zmq::router_t::rollback ()
{
if (_current_out) {
_current_out->rollback ();
_current_out = NULL;
_more_out = false;
}
return 0;
}
bool zmq::router_t::xhas_in ()
{
// If we are in the middle of reading the messages, there are
// definitely more parts available.
if (_more_in)
return true;
// We may already have a message pre-fetched.
if (_prefetched)
return true;
// Try to read the next message.
// The message, if read, is kept in the pre-fetch buffer.
pipe_t *pipe = NULL;
int rc = _fq.recvpipe (&_prefetched_msg, &pipe);
// It's possible that we receive peer's routing id. That happens
// after reconnection. The current implementation assumes that
// the peer always uses the same routing id.
// TODO: handle the situation when the peer changes its routing id.
while (rc == 0 && _prefetched_msg.is_routing_id ())
rc = _fq.recvpipe (&_prefetched_msg, &pipe);
if (rc != 0)
return false;
zmq_assert (pipe != NULL);
const blob_t &routing_id = pipe->get_routing_id ();
rc = _prefetched_id.init_size (routing_id.size ());
errno_assert (rc == 0);
memcpy (_prefetched_id.data (), routing_id.data (), routing_id.size ());
_prefetched_id.set_flags (msg_t::more);
if (_prefetched_msg.metadata ())
_prefetched_id.set_metadata (_prefetched_msg.metadata ());
_prefetched = true;
_routing_id_sent = false;
_current_in = pipe;
return true;
}
static bool check_pipe_hwm (const zmq::pipe_t &pipe_)
{
return pipe_.check_hwm ();
}
bool zmq::router_t::xhas_out ()
{
// In theory, ROUTER socket is always ready for writing (except when
// MANDATORY is set). Whether actual attempt to write succeeds depends
// on which pipe the message is going to be routed to.
if (!_mandatory)
return true;
return any_of_out_pipes (check_pipe_hwm);
}
int zmq::router_t::get_peer_state (const void *routing_id_,
size_t routing_id_size_) const
{
int res = 0;
// TODO remove the const_cast, see comment in lookup_out_pipe
const blob_t routing_id_blob (
static_cast<unsigned char *> (const_cast<void *> (routing_id_)),
routing_id_size_, reference_tag_t ());
const out_pipe_t *out_pipe = lookup_out_pipe (routing_id_blob);
if (!out_pipe) {
errno = EHOSTUNREACH;
return -1;
}
if (out_pipe->pipe->check_hwm ())
res |= ZMQ_POLLOUT;
/** \todo does it make any sense to check the inpipe as well? */
return res;
}
bool zmq::router_t::identify_peer (pipe_t *pipe_, bool locally_initiated_)
{
msg_t msg;
blob_t routing_id;
if (locally_initiated_ && connect_routing_id_is_set ()) {
const std::string connect_routing_id = extract_connect_routing_id ();
routing_id.set (
reinterpret_cast<const unsigned char *> (connect_routing_id.c_str ()),
connect_routing_id.length ());
// Not allowed to duplicate an existing rid
zmq_assert (!has_out_pipe (routing_id));
} else if (
options
.raw_socket) { // Always assign an integral routing id for raw-socket
unsigned char buf[5];
buf[0] = 0;
put_uint32 (buf + 1, _next_integral_routing_id++);
routing_id.set (buf, sizeof buf);
} else if (!options.raw_socket) {
// Pick up handshake cases and also case where next integral routing id is set
msg.init ();
const bool ok = pipe_->read (&msg);
if (!ok)
return false;
if (msg.size () == 0) {
// Fall back on the auto-generation
unsigned char buf[5];
buf[0] = 0;
put_uint32 (buf + 1, _next_integral_routing_id++);
routing_id.set (buf, sizeof buf);
msg.close ();
} else {
routing_id.set (static_cast<unsigned char *> (msg.data ()),
msg.size ());
msg.close ();
// Try to remove an existing routing id entry to allow the new
// connection to take the routing id.
const out_pipe_t *const existing_outpipe =
lookup_out_pipe (routing_id);
if (existing_outpipe) {
if (!_handover)
// Ignore peers with duplicate ID
return false;
// We will allow the new connection to take over this
// routing id. Temporarily assign a new routing id to the
// existing pipe so we can terminate it asynchronously.
unsigned char buf[5];
buf[0] = 0;
put_uint32 (buf + 1, _next_integral_routing_id++);
blob_t new_routing_id (buf, sizeof buf);
pipe_t *const old_pipe = existing_outpipe->pipe;
erase_out_pipe (old_pipe);
old_pipe->set_router_socket_routing_id (new_routing_id);
add_out_pipe (ZMQ_MOVE (new_routing_id), old_pipe);
if (old_pipe == _current_in)
_terminate_current_in = true;
else
old_pipe->terminate (true);
}
}
}
pipe_->set_router_socket_routing_id (routing_id);
add_out_pipe (ZMQ_MOVE (routing_id), pipe_);
return true;
}
|
sophomore_public/libzmq
|
src/router.cpp
|
C++
|
gpl-3.0
| 15,340 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_ROUTER_HPP_INCLUDED__
#define __ZMQ_ROUTER_HPP_INCLUDED__
#include <map>
#include "socket_base.hpp"
#include "session_base.hpp"
#include "stdint.hpp"
#include "blob.hpp"
#include "msg.hpp"
#include "fq.hpp"
namespace zmq
{
class ctx_t;
class pipe_t;
// TODO: This class uses O(n) scheduling. Rewrite it to use O(1) algorithm.
class router_t : public routing_socket_base_t
{
public:
router_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
~router_t () ZMQ_OVERRIDE;
// Overrides of functions from socket_base_t.
void xattach_pipe (zmq::pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_) ZMQ_FINAL;
int
xsetsockopt (int option_, const void *optval_, size_t optvallen_) ZMQ_FINAL;
int xsend (zmq::msg_t *msg_) ZMQ_OVERRIDE;
int xrecv (zmq::msg_t *msg_) ZMQ_OVERRIDE;
bool xhas_in () ZMQ_OVERRIDE;
bool xhas_out () ZMQ_OVERRIDE;
void xread_activated (zmq::pipe_t *pipe_) ZMQ_FINAL;
void xpipe_terminated (zmq::pipe_t *pipe_) ZMQ_FINAL;
int get_peer_state (const void *routing_id_,
size_t routing_id_size_) const ZMQ_FINAL;
protected:
// Rollback any message parts that were sent but not yet flushed.
int rollback ();
private:
// Receive peer id and update lookup map
bool identify_peer (pipe_t *pipe_, bool locally_initiated_);
// Fair queueing object for inbound pipes.
fq_t _fq;
// True iff there is a message held in the pre-fetch buffer.
bool _prefetched;
// If true, the receiver got the message part with
// the peer's identity.
bool _routing_id_sent;
// Holds the prefetched identity.
msg_t _prefetched_id;
// Holds the prefetched message.
msg_t _prefetched_msg;
// The pipe we are currently reading from
zmq::pipe_t *_current_in;
// Should current_in should be terminate after all parts received?
bool _terminate_current_in;
// If true, more incoming message parts are expected.
bool _more_in;
// We keep a set of pipes that have not been identified yet.
std::set<pipe_t *> _anonymous_pipes;
// The pipe we are currently writing to.
zmq::pipe_t *_current_out;
// If true, more outgoing message parts are expected.
bool _more_out;
// Routing IDs are generated. It's a simple increment and wrap-over
// algorithm. This value is the next ID to use (if not used already).
uint32_t _next_integral_routing_id;
// If true, report EAGAIN to the caller instead of silently dropping
// the message targeting an unknown peer.
bool _mandatory;
bool _raw_socket;
// if true, send an empty message to every connected router peer
bool _probe_router;
// If true, the router will reassign an identity upon encountering a
// name collision. The new pipe will take the identity, the old pipe
// will be terminated.
bool _handover;
ZMQ_NON_COPYABLE_NOR_MOVABLE (router_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/router.hpp
|
C++
|
gpl-3.0
| 3,070 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include "scatter.hpp"
#include "pipe.hpp"
#include "err.hpp"
#include "msg.hpp"
zmq::scatter_t::scatter_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
socket_base_t (parent_, tid_, sid_, true)
{
options.type = ZMQ_SCATTER;
}
zmq::scatter_t::~scatter_t ()
{
}
void zmq::scatter_t::xattach_pipe (pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_)
{
LIBZMQ_UNUSED (subscribe_to_all_);
LIBZMQ_UNUSED (locally_initiated_);
// Don't delay pipe termination as there is no one
// to receive the delimiter.
pipe_->set_nodelay ();
zmq_assert (pipe_);
_lb.attach (pipe_);
}
void zmq::scatter_t::xwrite_activated (pipe_t *pipe_)
{
_lb.activated (pipe_);
}
void zmq::scatter_t::xpipe_terminated (pipe_t *pipe_)
{
_lb.pipe_terminated (pipe_);
}
int zmq::scatter_t::xsend (msg_t *msg_)
{
// SCATTER sockets do not allow multipart data (ZMQ_SNDMORE)
if (msg_->flags () & msg_t::more) {
errno = EINVAL;
return -1;
}
return _lb.send (msg_);
}
bool zmq::scatter_t::xhas_out ()
{
return _lb.has_out ();
}
|
sophomore_public/libzmq
|
src/scatter.cpp
|
C++
|
gpl-3.0
| 1,256 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_SCATTER_HPP_INCLUDED__
#define __ZMQ_SCATTER_HPP_INCLUDED__
#include "socket_base.hpp"
#include "session_base.hpp"
#include "lb.hpp"
namespace zmq
{
class ctx_t;
class pipe_t;
class msg_t;
class io_thread_t;
class scatter_t ZMQ_FINAL : public socket_base_t
{
public:
scatter_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
~scatter_t ();
protected:
// Overrides of functions from socket_base_t.
void xattach_pipe (zmq::pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_);
int xsend (zmq::msg_t *msg_);
bool xhas_out ();
void xwrite_activated (zmq::pipe_t *pipe_);
void xpipe_terminated (zmq::pipe_t *pipe_);
private:
// Load balancer managing the outbound pipes.
lb_t _lb;
ZMQ_NON_COPYABLE_NOR_MOVABLE (scatter_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/scatter.hpp
|
C++
|
gpl-3.0
| 899 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_SECURE_ALLOCATOR_HPP_INCLUDED__
#define __ZMQ_SECURE_ALLOCATOR_HPP_INCLUDED__
#include "platform.hpp"
#include "macros.hpp"
#ifdef ZMQ_HAVE_CURVE
#if defined(ZMQ_USE_LIBSODIUM)
#include "sodium.h"
#endif
#include <memory>
namespace zmq
{
#if defined(ZMQ_USE_LIBSODIUM)
template <class T> struct secure_allocator_t
{
typedef T value_type;
secure_allocator_t () ZMQ_DEFAULT;
template <class U>
secure_allocator_t (const secure_allocator_t<U> &) ZMQ_NOEXCEPT
{
}
T *allocate (std::size_t n) ZMQ_NOEXCEPT
{
T *res = static_cast<T *> (sodium_allocarray (sizeof (T), n));
alloc_assert (res);
return res;
}
void deallocate (T *p, std::size_t) ZMQ_NOEXCEPT
{
if (p)
sodium_free (p);
}
// the following is only required with C++98
// TODO maybe make this conditionally compiled
typedef T *pointer;
typedef const T *const_pointer;
typedef T &reference;
typedef const T &const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
template <class U> struct rebind
{
typedef secure_allocator_t<U> other;
};
void construct (pointer p, const_reference val)
{
new ((void *) p) value_type (val);
}
void destroy (pointer p) { p->~value_type (); }
size_type max_size () const { return SIZE_MAX; }
};
template <class T, class U>
bool operator== (const secure_allocator_t<T> &, const secure_allocator_t<U> &)
{
return true;
}
template <class T, class U>
bool operator!= (const secure_allocator_t<T> &, const secure_allocator_t<U> &)
{
return false;
}
#else
template <typename T> struct secure_allocator_t : std::allocator<T>
{
secure_allocator_t () ZMQ_DEFAULT;
template <class U>
secure_allocator_t (const secure_allocator_t<U> &) ZMQ_NOEXCEPT
{
}
template <class U> struct rebind
{
typedef secure_allocator_t<U> other;
};
};
#endif
}
#endif
#endif
|
sophomore_public/libzmq
|
src/secure_allocator.hpp
|
C++
|
gpl-3.0
| 2,045 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "select.hpp"
#if defined ZMQ_IOTHREAD_POLLER_USE_SELECT
#if defined ZMQ_HAVE_WINDOWS
#elif defined ZMQ_HAVE_HPUX
#include <sys/param.h>
#include <sys/types.h>
#include <sys/time.h>
#elif defined ZMQ_HAVE_OPENVMS
#include <sys/types.h>
#include <sys/time.h>
#elif defined ZMQ_HAVE_VXWORKS
#include <sys/types.h>
#include <sys/time.h>
#include <strings.h>
#else
#include <sys/select.h>
#endif
#include "err.hpp"
#include "config.hpp"
#include "i_poll_events.hpp"
#include <algorithm>
#include <limits>
#include <climits>
zmq::select_t::select_t (const zmq::thread_ctx_t &ctx_) :
worker_poller_base_t (ctx_),
#if defined ZMQ_HAVE_WINDOWS
// Fine as long as map is not cleared.
_current_family_entry_it (_family_entries.end ())
#else
_max_fd (retired_fd)
#endif
{
#if defined ZMQ_HAVE_WINDOWS
for (size_t i = 0; i < fd_family_cache_size; ++i)
_fd_family_cache[i] = std::make_pair (retired_fd, 0);
#endif
}
zmq::select_t::~select_t ()
{
stop_worker ();
}
zmq::select_t::handle_t zmq::select_t::add_fd (fd_t fd_, i_poll_events *events_)
{
check_thread ();
zmq_assert (fd_ != retired_fd);
fd_entry_t fd_entry;
fd_entry.fd = fd_;
fd_entry.events = events_;
#if defined ZMQ_HAVE_WINDOWS
u_short family = get_fd_family (fd_);
wsa_assert (family != AF_UNSPEC);
family_entry_t &family_entry = _family_entries[family];
#else
family_entry_t &family_entry = _family_entry;
#endif
family_entry.fd_entries.push_back (fd_entry);
FD_SET (fd_, &family_entry.fds_set.error);
#if !defined ZMQ_HAVE_WINDOWS
if (fd_ > _max_fd)
_max_fd = fd_;
#endif
adjust_load (1);
return fd_;
}
zmq::select_t::fd_entries_t::iterator
zmq::select_t::find_fd_entry_by_handle (fd_entries_t &fd_entries_,
handle_t handle_)
{
fd_entries_t::iterator fd_entry_it;
for (fd_entry_it = fd_entries_.begin (); fd_entry_it != fd_entries_.end ();
++fd_entry_it)
if (fd_entry_it->fd == handle_)
break;
return fd_entry_it;
}
void zmq::select_t::trigger_events (const fd_entries_t &fd_entries_,
const fds_set_t &local_fds_set_,
int event_count_)
{
// Size is cached to avoid iteration through recently added descriptors.
for (fd_entries_t::size_type i = 0, size = fd_entries_.size ();
i < size && event_count_ > 0; ++i) {
// fd_entries_[i] may not be stored, since calls to
// in_event/out_event may reallocate the vector
if (is_retired_fd (fd_entries_[i]))
continue;
if (FD_ISSET (fd_entries_[i].fd, &local_fds_set_.read)) {
fd_entries_[i].events->in_event ();
--event_count_;
}
// TODO: can the is_retired_fd be true at this point? if it
// was retired before, we would already have continued, and I
// don't see where it might have been modified
// And if rc == 0, we can break instead of continuing
if (is_retired_fd (fd_entries_[i]) || event_count_ == 0)
continue;
if (FD_ISSET (fd_entries_[i].fd, &local_fds_set_.write)) {
fd_entries_[i].events->out_event ();
--event_count_;
}
// TODO: same as above
if (is_retired_fd (fd_entries_[i]) || event_count_ == 0)
continue;
if (FD_ISSET (fd_entries_[i].fd, &local_fds_set_.error)) {
fd_entries_[i].events->in_event ();
--event_count_;
}
}
}
#if defined ZMQ_HAVE_WINDOWS
int zmq::select_t::try_retire_fd_entry (
family_entries_t::iterator family_entry_it_, zmq::fd_t &handle_)
{
family_entry_t &family_entry = family_entry_it_->second;
fd_entries_t::iterator fd_entry_it =
find_fd_entry_by_handle (family_entry.fd_entries, handle_);
if (fd_entry_it == family_entry.fd_entries.end ())
return 0;
fd_entry_t &fd_entry = *fd_entry_it;
zmq_assert (fd_entry.fd != retired_fd);
if (family_entry_it_ != _current_family_entry_it) {
// Family is not currently being iterated and can be safely
// modified in-place. So later it can be skipped without
// re-verifying its content.
family_entry.fd_entries.erase (fd_entry_it);
} else {
// Otherwise mark removed entries as retired. It will be cleaned up
// at the end of the iteration. See zmq::select_t::loop
fd_entry.fd = retired_fd;
family_entry.has_retired = true;
}
family_entry.fds_set.remove_fd (handle_);
return 1;
}
#endif
void zmq::select_t::rm_fd (handle_t handle_)
{
check_thread ();
int retired = 0;
#if defined ZMQ_HAVE_WINDOWS
u_short family = get_fd_family (handle_);
if (family != AF_UNSPEC) {
family_entries_t::iterator family_entry_it =
_family_entries.find (family);
retired += try_retire_fd_entry (family_entry_it, handle_);
} else {
// get_fd_family may fail and return AF_UNSPEC if the socket was not
// successfully connected. In that case, we need to look for the
// socket in all family_entries.
family_entries_t::iterator end = _family_entries.end ();
for (family_entries_t::iterator family_entry_it =
_family_entries.begin ();
family_entry_it != end; ++family_entry_it) {
if (retired += try_retire_fd_entry (family_entry_it, handle_)) {
break;
}
}
}
#else
fd_entries_t::iterator fd_entry_it =
find_fd_entry_by_handle (_family_entry.fd_entries, handle_);
assert (fd_entry_it != _family_entry.fd_entries.end ());
zmq_assert (fd_entry_it->fd != retired_fd);
fd_entry_it->fd = retired_fd;
_family_entry.fds_set.remove_fd (handle_);
++retired;
if (handle_ == _max_fd) {
_max_fd = retired_fd;
for (fd_entry_it = _family_entry.fd_entries.begin ();
fd_entry_it != _family_entry.fd_entries.end (); ++fd_entry_it)
if (fd_entry_it->fd > _max_fd)
_max_fd = fd_entry_it->fd;
}
_family_entry.has_retired = true;
#endif
zmq_assert (retired == 1);
adjust_load (-1);
}
void zmq::select_t::set_pollin (handle_t handle_)
{
check_thread ();
#if defined ZMQ_HAVE_WINDOWS
u_short family = get_fd_family (handle_);
wsa_assert (family != AF_UNSPEC);
family_entry_t &family_entry = _family_entries[family];
#else
family_entry_t &family_entry = _family_entry;
#endif
FD_SET (handle_, &family_entry.fds_set.read);
}
void zmq::select_t::reset_pollin (handle_t handle_)
{
check_thread ();
#if defined ZMQ_HAVE_WINDOWS
u_short family = get_fd_family (handle_);
wsa_assert (family != AF_UNSPEC);
family_entry_t &family_entry = _family_entries[family];
#else
family_entry_t &family_entry = _family_entry;
#endif
FD_CLR (handle_, &family_entry.fds_set.read);
}
void zmq::select_t::set_pollout (handle_t handle_)
{
check_thread ();
#if defined ZMQ_HAVE_WINDOWS
u_short family = get_fd_family (handle_);
wsa_assert (family != AF_UNSPEC);
family_entry_t &family_entry = _family_entries[family];
#else
family_entry_t &family_entry = _family_entry;
#endif
FD_SET (handle_, &family_entry.fds_set.write);
}
void zmq::select_t::reset_pollout (handle_t handle_)
{
check_thread ();
#if defined ZMQ_HAVE_WINDOWS
u_short family = get_fd_family (handle_);
wsa_assert (family != AF_UNSPEC);
family_entry_t &family_entry = _family_entries[family];
#else
family_entry_t &family_entry = _family_entry;
#endif
FD_CLR (handle_, &family_entry.fds_set.write);
}
void zmq::select_t::stop ()
{
check_thread ();
// no-op... thread is stopped when no more fds or timers are registered
}
int zmq::select_t::max_fds ()
{
return FD_SETSIZE;
}
void zmq::select_t::loop ()
{
while (true) {
// Execute any due timers.
int timeout = static_cast<int> (execute_timers ());
cleanup_retired ();
#ifdef _WIN32
if (_family_entries.empty ()) {
#else
if (_family_entry.fd_entries.empty ()) {
#endif
zmq_assert (get_load () == 0);
if (timeout == 0)
break;
// TODO sleep for timeout
continue;
}
#if defined ZMQ_HAVE_OSX
struct timeval tv = {(long) (timeout / 1000), timeout % 1000 * 1000};
#else
struct timeval tv = {static_cast<long> (timeout / 1000),
static_cast<long> (timeout % 1000 * 1000)};
#endif
#if defined ZMQ_HAVE_WINDOWS
/*
On Windows select does not allow to mix descriptors from different
service providers. It seems to work for AF_INET and AF_INET6,
but fails for AF_INET and VMCI. The workaround is to use
WSAEventSelect and WSAWaitForMultipleEvents to wait, then use
select to find out what actually changed. WSAWaitForMultipleEvents
cannot be used alone, because it does not support more than 64 events
which is not enough.
To reduce unnecessary overhead, WSA is only used when there are more
than one family. Moreover, AF_INET and AF_INET6 are considered the same
family because Windows seems to handle them properly.
See get_fd_family for details.
*/
// If there is just one family, there is no reason to use WSA events.
int rc = 0;
const bool use_wsa_events = _family_entries.size () > 1;
if (use_wsa_events) {
// TODO: I don't really understand why we are doing this. If any of
// the events was signaled, we will call select for each fd_family
// afterwards. The only benefit is if none of the events was
// signaled, then we continue early.
// IMHO, either WSAEventSelect/WSAWaitForMultipleEvents or select
// should be used, but not both
wsa_events_t wsa_events;
for (family_entries_t::iterator family_entry_it =
_family_entries.begin ();
family_entry_it != _family_entries.end (); ++family_entry_it) {
family_entry_t &family_entry = family_entry_it->second;
for (fd_entries_t::iterator fd_entry_it =
family_entry.fd_entries.begin ();
fd_entry_it != family_entry.fd_entries.end ();
++fd_entry_it) {
fd_t fd = fd_entry_it->fd;
// http://stackoverflow.com/q/35043420/188530
if (FD_ISSET (fd, &family_entry.fds_set.read)
&& FD_ISSET (fd, &family_entry.fds_set.write))
rc = WSAEventSelect (fd, wsa_events.events[3],
FD_READ | FD_ACCEPT | FD_CLOSE
| FD_WRITE | FD_CONNECT);
else if (FD_ISSET (fd, &family_entry.fds_set.read))
rc = WSAEventSelect (fd, wsa_events.events[0],
FD_READ | FD_ACCEPT | FD_CLOSE);
else if (FD_ISSET (fd, &family_entry.fds_set.write))
rc = WSAEventSelect (fd, wsa_events.events[1],
FD_WRITE | FD_CONNECT);
else
rc = 0;
wsa_assert (rc != SOCKET_ERROR);
}
}
rc = WSAWaitForMultipleEvents (4, wsa_events.events, FALSE,
timeout ? timeout : INFINITE, FALSE);
wsa_assert (rc != (int) WSA_WAIT_FAILED);
zmq_assert (rc != WSA_WAIT_IO_COMPLETION);
if (rc == WSA_WAIT_TIMEOUT)
continue;
}
for (_current_family_entry_it = _family_entries.begin ();
_current_family_entry_it != _family_entries.end ();
++_current_family_entry_it) {
family_entry_t &family_entry = _current_family_entry_it->second;
if (use_wsa_events) {
// There is no reason to wait again after WSAWaitForMultipleEvents.
// Simply collect what is ready.
struct timeval tv_nodelay = {0, 0};
select_family_entry (family_entry, 0, true, tv_nodelay);
} else {
select_family_entry (family_entry, 0, timeout > 0, tv);
}
}
#else
select_family_entry (_family_entry, _max_fd + 1, timeout > 0, tv);
#endif
}
}
void zmq::select_t::select_family_entry (family_entry_t &family_entry_,
const int max_fd_,
const bool use_timeout_,
struct timeval &tv_)
{
// select will fail when run with empty sets.
fd_entries_t &fd_entries = family_entry_.fd_entries;
if (fd_entries.empty ())
return;
fds_set_t local_fds_set = family_entry_.fds_set;
int rc = select (max_fd_, &local_fds_set.read, &local_fds_set.write,
&local_fds_set.error, use_timeout_ ? &tv_ : NULL);
#if defined ZMQ_HAVE_WINDOWS
wsa_assert (rc != SOCKET_ERROR);
#else
if (rc == -1) {
errno_assert (errno == EINTR);
return;
}
#endif
trigger_events (fd_entries, local_fds_set, rc);
cleanup_retired (family_entry_);
}
zmq::select_t::fds_set_t::fds_set_t ()
{
FD_ZERO (&read);
FD_ZERO (&write);
FD_ZERO (&error);
}
zmq::select_t::fds_set_t::fds_set_t (const fds_set_t &other_)
{
#if defined ZMQ_HAVE_WINDOWS
// On Windows we don't need to copy the whole fd_set.
// SOCKETS are continuous from the beginning of fd_array in fd_set.
// We just need to copy fd_count elements of fd_array.
// We gain huge memcpy() improvement if number of used SOCKETs is much lower than FD_SETSIZE.
memcpy (&read, &other_.read,
(char *) (other_.read.fd_array + other_.read.fd_count)
- (char *) &other_.read);
memcpy (&write, &other_.write,
(char *) (other_.write.fd_array + other_.write.fd_count)
- (char *) &other_.write);
memcpy (&error, &other_.error,
(char *) (other_.error.fd_array + other_.error.fd_count)
- (char *) &other_.error);
#else
memcpy (&read, &other_.read, sizeof other_.read);
memcpy (&write, &other_.write, sizeof other_.write);
memcpy (&error, &other_.error, sizeof other_.error);
#endif
}
zmq::select_t::fds_set_t &
zmq::select_t::fds_set_t::operator= (const fds_set_t &other_)
{
#if defined ZMQ_HAVE_WINDOWS
// On Windows we don't need to copy the whole fd_set.
// SOCKETS are continuous from the beginning of fd_array in fd_set.
// We just need to copy fd_count elements of fd_array.
// We gain huge memcpy() improvement if number of used SOCKETs is much lower than FD_SETSIZE.
memcpy (&read, &other_.read,
(char *) (other_.read.fd_array + other_.read.fd_count)
- (char *) &other_.read);
memcpy (&write, &other_.write,
(char *) (other_.write.fd_array + other_.write.fd_count)
- (char *) &other_.write);
memcpy (&error, &other_.error,
(char *) (other_.error.fd_array + other_.error.fd_count)
- (char *) &other_.error);
#else
memcpy (&read, &other_.read, sizeof other_.read);
memcpy (&write, &other_.write, sizeof other_.write);
memcpy (&error, &other_.error, sizeof other_.error);
#endif
return *this;
}
void zmq::select_t::fds_set_t::remove_fd (const fd_t &fd_)
{
FD_CLR (fd_, &read);
FD_CLR (fd_, &write);
FD_CLR (fd_, &error);
}
bool zmq::select_t::cleanup_retired (family_entry_t &family_entry_)
{
if (family_entry_.has_retired) {
family_entry_.has_retired = false;
family_entry_.fd_entries.erase (
std::remove_if (family_entry_.fd_entries.begin (),
family_entry_.fd_entries.end (), is_retired_fd),
family_entry_.fd_entries.end ());
}
return family_entry_.fd_entries.empty ();
}
void zmq::select_t::cleanup_retired ()
{
#ifdef _WIN32
for (family_entries_t::iterator it = _family_entries.begin ();
it != _family_entries.end ();) {
if (cleanup_retired (it->second))
it = _family_entries.erase (it);
else
++it;
}
#else
cleanup_retired (_family_entry);
#endif
}
bool zmq::select_t::is_retired_fd (const fd_entry_t &entry_)
{
return entry_.fd == retired_fd;
}
zmq::select_t::family_entry_t::family_entry_t () : has_retired (false)
{
}
#if defined ZMQ_HAVE_WINDOWS
u_short zmq::select_t::get_fd_family (fd_t fd_)
{
// cache the results of determine_fd_family, as this is frequently called
// for the same sockets, and determine_fd_family is expensive
size_t i;
for (i = 0; i < fd_family_cache_size; ++i) {
const std::pair<fd_t, u_short> &entry = _fd_family_cache[i];
if (entry.first == fd_) {
return entry.second;
}
if (entry.first == retired_fd)
break;
}
std::pair<fd_t, u_short> res =
std::make_pair (fd_, determine_fd_family (fd_));
if (i < fd_family_cache_size) {
_fd_family_cache[i] = res;
} else {
// just overwrite a random entry
// could be optimized by some LRU strategy
_fd_family_cache[rand () % fd_family_cache_size] = res;
}
return res.second;
}
u_short zmq::select_t::determine_fd_family (fd_t fd_)
{
// Use sockaddr_storage instead of sockaddr to accommodate different structure sizes
sockaddr_storage addr = {0};
int addr_size = sizeof addr;
int type;
int type_length = sizeof (int);
int rc = getsockopt (fd_, SOL_SOCKET, SO_TYPE,
reinterpret_cast<char *> (&type), &type_length);
if (rc == 0) {
if (type == SOCK_DGRAM)
return AF_INET;
rc =
getsockname (fd_, reinterpret_cast<sockaddr *> (&addr), &addr_size);
// AF_INET and AF_INET6 can be mixed in select
// TODO: If proven otherwise, should simply return addr.sa_family
if (rc != SOCKET_ERROR)
return addr.ss_family == AF_INET6 ? AF_INET : addr.ss_family;
}
return AF_UNSPEC;
}
zmq::select_t::wsa_events_t::wsa_events_t ()
{
events[0] = WSACreateEvent ();
wsa_assert (events[0] != WSA_INVALID_EVENT);
events[1] = WSACreateEvent ();
wsa_assert (events[1] != WSA_INVALID_EVENT);
events[2] = WSACreateEvent ();
wsa_assert (events[2] != WSA_INVALID_EVENT);
events[3] = WSACreateEvent ();
wsa_assert (events[3] != WSA_INVALID_EVENT);
}
zmq::select_t::wsa_events_t::~wsa_events_t ()
{
wsa_assert (WSACloseEvent (events[0]));
wsa_assert (WSACloseEvent (events[1]));
wsa_assert (WSACloseEvent (events[2]));
wsa_assert (WSACloseEvent (events[3]));
}
#endif
#endif
|
sophomore_public/libzmq
|
src/select.cpp
|
C++
|
gpl-3.0
| 19,196 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_SELECT_HPP_INCLUDED__
#define __ZMQ_SELECT_HPP_INCLUDED__
// poller.hpp decides which polling mechanism to use.
#include "poller.hpp"
#if defined ZMQ_IOTHREAD_POLLER_USE_SELECT
#include <stddef.h>
#include <vector>
#include <map>
#if defined ZMQ_HAVE_WINDOWS
#elif defined ZMQ_HAVE_OPENVMS
#include <sys/types.h>
#include <sys/time.h>
#else
#include <sys/select.h>
#endif
#include "ctx.hpp"
#include "fd.hpp"
#include "poller_base.hpp"
namespace zmq
{
struct i_poll_events;
// Implements socket polling mechanism using POSIX.1-2001 select()
// function.
class select_t ZMQ_FINAL : public worker_poller_base_t
{
public:
typedef fd_t handle_t;
select_t (const thread_ctx_t &ctx_);
~select_t () ZMQ_FINAL;
// "poller" concept.
handle_t add_fd (fd_t fd_, zmq::i_poll_events *events_);
void rm_fd (handle_t handle_);
void set_pollin (handle_t handle_);
void reset_pollin (handle_t handle_);
void set_pollout (handle_t handle_);
void reset_pollout (handle_t handle_);
void stop ();
static int max_fds ();
private:
// Main event loop.
void loop () ZMQ_FINAL;
// Internal state.
struct fds_set_t
{
fds_set_t ();
fds_set_t (const fds_set_t &other_);
fds_set_t &operator= (const fds_set_t &other_);
// Convenience method to descriptor from all sets.
void remove_fd (const fd_t &fd_);
fd_set read;
fd_set write;
fd_set error;
};
struct fd_entry_t
{
fd_t fd;
zmq::i_poll_events *events;
};
typedef std::vector<fd_entry_t> fd_entries_t;
void trigger_events (const fd_entries_t &fd_entries_,
const fds_set_t &local_fds_set_,
int event_count_);
struct family_entry_t
{
family_entry_t ();
fd_entries_t fd_entries;
fds_set_t fds_set;
bool has_retired;
};
void select_family_entry (family_entry_t &family_entry_,
int max_fd_,
bool use_timeout_,
struct timeval &tv_);
#if defined ZMQ_HAVE_WINDOWS
typedef std::map<u_short, family_entry_t> family_entries_t;
struct wsa_events_t
{
wsa_events_t ();
~wsa_events_t ();
// read, write, error and readwrite
WSAEVENT events[4];
};
family_entries_t _family_entries;
// See loop for details.
family_entries_t::iterator _current_family_entry_it;
int try_retire_fd_entry (family_entries_t::iterator family_entry_it_,
zmq::fd_t &handle_);
static const size_t fd_family_cache_size = 8;
std::pair<fd_t, u_short> _fd_family_cache[fd_family_cache_size];
u_short get_fd_family (fd_t fd_);
// Socket's family or AF_UNSPEC on error.
static u_short determine_fd_family (fd_t fd_);
#else
// on non-Windows, we can treat all fds as one family
family_entry_t _family_entry;
fd_t _max_fd;
#endif
void cleanup_retired ();
bool cleanup_retired (family_entry_t &family_entry_);
// Checks if an fd_entry_t is retired.
static bool is_retired_fd (const fd_entry_t &entry_);
static fd_entries_t::iterator
find_fd_entry_by_handle (fd_entries_t &fd_entries_, handle_t handle_);
ZMQ_NON_COPYABLE_NOR_MOVABLE (select_t)
};
typedef select_t poller_t;
}
#endif
#endif
|
sophomore_public/libzmq
|
src/select.hpp
|
C++
|
gpl-3.0
| 3,476 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include "server.hpp"
#include "pipe.hpp"
#include "wire.hpp"
#include "random.hpp"
#include "likely.hpp"
#include "err.hpp"
zmq::server_t::server_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
socket_base_t (parent_, tid_, sid_, true),
_next_routing_id (generate_random ())
{
options.type = ZMQ_SERVER;
options.can_send_hello_msg = true;
options.can_recv_disconnect_msg = true;
}
zmq::server_t::~server_t ()
{
zmq_assert (_out_pipes.empty ());
}
void zmq::server_t::xattach_pipe (pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_)
{
LIBZMQ_UNUSED (subscribe_to_all_);
LIBZMQ_UNUSED (locally_initiated_);
zmq_assert (pipe_);
uint32_t routing_id = _next_routing_id++;
if (!routing_id)
routing_id = _next_routing_id++; // Never use Routing ID zero
pipe_->set_server_socket_routing_id (routing_id);
// Add the record into output pipes lookup table
outpipe_t outpipe = {pipe_, true};
const bool ok =
_out_pipes.ZMQ_MAP_INSERT_OR_EMPLACE (routing_id, outpipe).second;
zmq_assert (ok);
_fq.attach (pipe_);
}
void zmq::server_t::xpipe_terminated (pipe_t *pipe_)
{
const out_pipes_t::iterator it =
_out_pipes.find (pipe_->get_server_socket_routing_id ());
zmq_assert (it != _out_pipes.end ());
_out_pipes.erase (it);
_fq.pipe_terminated (pipe_);
}
void zmq::server_t::xread_activated (pipe_t *pipe_)
{
_fq.activated (pipe_);
}
void zmq::server_t::xwrite_activated (pipe_t *pipe_)
{
const out_pipes_t::iterator end = _out_pipes.end ();
out_pipes_t::iterator it;
for (it = _out_pipes.begin (); it != end; ++it)
if (it->second.pipe == pipe_)
break;
zmq_assert (it != _out_pipes.end ());
zmq_assert (!it->second.active);
it->second.active = true;
}
int zmq::server_t::xsend (msg_t *msg_)
{
// SERVER sockets do not allow multipart data (ZMQ_SNDMORE)
if (msg_->flags () & msg_t::more) {
errno = EINVAL;
return -1;
}
// Find the pipe associated with the routing stored in the message.
const uint32_t routing_id = msg_->get_routing_id ();
out_pipes_t::iterator it = _out_pipes.find (routing_id);
if (it != _out_pipes.end ()) {
if (!it->second.pipe->check_write ()) {
it->second.active = false;
errno = EAGAIN;
return -1;
}
} else {
errno = EHOSTUNREACH;
return -1;
}
// Message might be delivered over inproc, so we reset routing id
int rc = msg_->reset_routing_id ();
errno_assert (rc == 0);
const bool ok = it->second.pipe->write (msg_);
if (unlikely (!ok)) {
// Message failed to send - we must close it ourselves.
rc = msg_->close ();
errno_assert (rc == 0);
} else
it->second.pipe->flush ();
// Detach the message from the data buffer.
rc = msg_->init ();
errno_assert (rc == 0);
return 0;
}
int zmq::server_t::xrecv (msg_t *msg_)
{
pipe_t *pipe = NULL;
int rc = _fq.recvpipe (msg_, &pipe);
// Drop any messages with more flag
while (rc == 0 && msg_->flags () & msg_t::more) {
// drop all frames of the current multi-frame message
rc = _fq.recvpipe (msg_, NULL);
while (rc == 0 && msg_->flags () & msg_t::more)
rc = _fq.recvpipe (msg_, NULL);
// get the new message
if (rc == 0)
rc = _fq.recvpipe (msg_, &pipe);
}
if (rc != 0)
return rc;
zmq_assert (pipe != NULL);
const uint32_t routing_id = pipe->get_server_socket_routing_id ();
msg_->set_routing_id (routing_id);
return 0;
}
bool zmq::server_t::xhas_in ()
{
return _fq.has_in ();
}
bool zmq::server_t::xhas_out ()
{
// In theory, SERVER socket is always ready for writing. Whether actual
// attempt to write succeeds depends on which pipe the message is going
// to be routed to.
return true;
}
|
sophomore_public/libzmq
|
src/server.cpp
|
C++
|
gpl-3.0
| 4,126 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_SERVER_HPP_INCLUDED__
#define __ZMQ_SERVER_HPP_INCLUDED__
#include <map>
#include "socket_base.hpp"
#include "session_base.hpp"
#include "stdint.hpp"
#include "blob.hpp"
#include "fq.hpp"
namespace zmq
{
class ctx_t;
class msg_t;
class pipe_t;
// TODO: This class uses O(n) scheduling. Rewrite it to use O(1) algorithm.
class server_t : public socket_base_t
{
public:
server_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
~server_t ();
// Overrides of functions from socket_base_t.
void xattach_pipe (zmq::pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_);
int xsend (zmq::msg_t *msg_);
int xrecv (zmq::msg_t *msg_);
bool xhas_in ();
bool xhas_out ();
void xread_activated (zmq::pipe_t *pipe_);
void xwrite_activated (zmq::pipe_t *pipe_);
void xpipe_terminated (zmq::pipe_t *pipe_);
private:
// Fair queueing object for inbound pipes.
fq_t _fq;
struct outpipe_t
{
zmq::pipe_t *pipe;
bool active;
};
// Outbound pipes indexed by the peer IDs.
typedef std::map<uint32_t, outpipe_t> out_pipes_t;
out_pipes_t _out_pipes;
// Routing IDs are generated. It's a simple increment and wrap-over
// algorithm. This value is the next ID to use (if not used already).
uint32_t _next_routing_id;
ZMQ_NON_COPYABLE_NOR_MOVABLE (server_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/server.hpp
|
C++
|
gpl-3.0
| 1,480 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "macros.hpp"
#include "session_base.hpp"
#include "i_engine.hpp"
#include "err.hpp"
#include "pipe.hpp"
#include "likely.hpp"
#include "tcp_connecter.hpp"
#include "ws_connecter.hpp"
#include "ipc_connecter.hpp"
#include "tipc_connecter.hpp"
#include "socks_connecter.hpp"
#include "vmci_connecter.hpp"
#include "pgm_sender.hpp"
#include "pgm_receiver.hpp"
#include "address.hpp"
#include "norm_engine.hpp"
#include "udp_engine.hpp"
#include "ctx.hpp"
#include "req.hpp"
#include "radio.hpp"
#include "dish.hpp"
zmq::session_base_t *zmq::session_base_t::create (class io_thread_t *io_thread_,
bool active_,
class socket_base_t *socket_,
const options_t &options_,
address_t *addr_)
{
session_base_t *s = NULL;
switch (options_.type) {
case ZMQ_REQ:
s = new (std::nothrow)
req_session_t (io_thread_, active_, socket_, options_, addr_);
break;
case ZMQ_RADIO:
s = new (std::nothrow)
radio_session_t (io_thread_, active_, socket_, options_, addr_);
break;
case ZMQ_DISH:
s = new (std::nothrow)
dish_session_t (io_thread_, active_, socket_, options_, addr_);
break;
case ZMQ_DEALER:
case ZMQ_REP:
case ZMQ_ROUTER:
case ZMQ_PUB:
case ZMQ_XPUB:
case ZMQ_SUB:
case ZMQ_XSUB:
case ZMQ_PUSH:
case ZMQ_PULL:
case ZMQ_PAIR:
case ZMQ_STREAM:
case ZMQ_SERVER:
case ZMQ_CLIENT:
case ZMQ_GATHER:
case ZMQ_SCATTER:
case ZMQ_DGRAM:
case ZMQ_PEER:
case ZMQ_CHANNEL:
#ifdef ZMQ_BUILD_DRAFT_API
if (options_.can_send_hello_msg && options_.hello_msg.size () > 0)
s = new (std::nothrow) hello_msg_session_t (
io_thread_, active_, socket_, options_, addr_);
else
s = new (std::nothrow) session_base_t (
io_thread_, active_, socket_, options_, addr_);
break;
#else
s = new (std::nothrow)
session_base_t (io_thread_, active_, socket_, options_, addr_);
break;
#endif
default:
errno = EINVAL;
return NULL;
}
alloc_assert (s);
return s;
}
zmq::session_base_t::session_base_t (class io_thread_t *io_thread_,
bool active_,
class socket_base_t *socket_,
const options_t &options_,
address_t *addr_) :
own_t (io_thread_, options_),
io_object_t (io_thread_),
_active (active_),
_pipe (NULL),
_zap_pipe (NULL),
_incomplete_in (false),
_pending (false),
_engine (NULL),
_socket (socket_),
_io_thread (io_thread_),
_has_linger_timer (false),
_addr (addr_)
#ifdef ZMQ_HAVE_WSS
,
_wss_hostname (options_.wss_hostname)
#endif
{
}
const zmq::endpoint_uri_pair_t &zmq::session_base_t::get_endpoint () const
{
return _engine->get_endpoint ();
}
zmq::session_base_t::~session_base_t ()
{
zmq_assert (!_pipe);
zmq_assert (!_zap_pipe);
// If there's still a pending linger timer, remove it.
if (_has_linger_timer) {
cancel_timer (linger_timer_id);
_has_linger_timer = false;
}
// Close the engine.
if (_engine)
_engine->terminate ();
LIBZMQ_DELETE (_addr);
}
void zmq::session_base_t::attach_pipe (pipe_t *pipe_)
{
zmq_assert (!is_terminating ());
zmq_assert (!_pipe);
zmq_assert (pipe_);
_pipe = pipe_;
_pipe->set_event_sink (this);
}
int zmq::session_base_t::pull_msg (msg_t *msg_)
{
if (!_pipe || !_pipe->read (msg_)) {
errno = EAGAIN;
return -1;
}
_incomplete_in = (msg_->flags () & msg_t::more) != 0;
return 0;
}
int zmq::session_base_t::push_msg (msg_t *msg_)
{
// pass subscribe/cancel to the sockets
if ((msg_->flags () & msg_t::command) && !msg_->is_subscribe ()
&& !msg_->is_cancel ())
return 0;
if (_pipe && _pipe->write (msg_)) {
const int rc = msg_->init ();
errno_assert (rc == 0);
return 0;
}
errno = EAGAIN;
return -1;
}
int zmq::session_base_t::read_zap_msg (msg_t *msg_)
{
if (_zap_pipe == NULL) {
errno = ENOTCONN;
return -1;
}
if (!_zap_pipe->read (msg_)) {
errno = EAGAIN;
return -1;
}
return 0;
}
int zmq::session_base_t::write_zap_msg (msg_t *msg_)
{
if (_zap_pipe == NULL || !_zap_pipe->write (msg_)) {
errno = ENOTCONN;
return -1;
}
if ((msg_->flags () & msg_t::more) == 0)
_zap_pipe->flush ();
const int rc = msg_->init ();
errno_assert (rc == 0);
return 0;
}
void zmq::session_base_t::reset ()
{
}
void zmq::session_base_t::flush ()
{
if (_pipe)
_pipe->flush ();
}
void zmq::session_base_t::rollback ()
{
if (_pipe)
_pipe->rollback ();
}
void zmq::session_base_t::clean_pipes ()
{
zmq_assert (_pipe != NULL);
// Get rid of half-processed messages in the out pipe. Flush any
// unflushed messages upstream.
_pipe->rollback ();
_pipe->flush ();
// Remove any half-read message from the in pipe.
while (_incomplete_in) {
msg_t msg;
int rc = msg.init ();
errno_assert (rc == 0);
rc = pull_msg (&msg);
errno_assert (rc == 0);
rc = msg.close ();
errno_assert (rc == 0);
}
}
void zmq::session_base_t::pipe_terminated (pipe_t *pipe_)
{
// Drop the reference to the deallocated pipe if required.
zmq_assert (pipe_ == _pipe || pipe_ == _zap_pipe
|| _terminating_pipes.count (pipe_) == 1);
if (pipe_ == _pipe) {
// If this is our current pipe, remove it
_pipe = NULL;
if (_has_linger_timer) {
cancel_timer (linger_timer_id);
_has_linger_timer = false;
}
} else if (pipe_ == _zap_pipe)
_zap_pipe = NULL;
else
// Remove the pipe from the detached pipes set
_terminating_pipes.erase (pipe_);
if (!is_terminating () && options.raw_socket) {
if (_engine) {
_engine->terminate ();
_engine = NULL;
}
terminate ();
}
// If we are waiting for pending messages to be sent, at this point
// we are sure that there will be no more messages and we can proceed
// with termination safely.
if (_pending && !_pipe && !_zap_pipe && _terminating_pipes.empty ()) {
_pending = false;
own_t::process_term (0);
}
}
void zmq::session_base_t::read_activated (pipe_t *pipe_)
{
// Skip activating if we're detaching this pipe
if (unlikely (pipe_ != _pipe && pipe_ != _zap_pipe)) {
zmq_assert (_terminating_pipes.count (pipe_) == 1);
return;
}
if (unlikely (_engine == NULL)) {
if (_pipe)
_pipe->check_read ();
return;
}
if (likely (pipe_ == _pipe))
_engine->restart_output ();
else {
// i.e. pipe_ == zap_pipe
_engine->zap_msg_available ();
}
}
void zmq::session_base_t::write_activated (pipe_t *pipe_)
{
// Skip activating if we're detaching this pipe
if (_pipe != pipe_) {
zmq_assert (_terminating_pipes.count (pipe_) == 1);
return;
}
if (_engine)
_engine->restart_input ();
}
void zmq::session_base_t::hiccuped (pipe_t *)
{
// Hiccups are always sent from session to socket, not the other
// way round.
zmq_assert (false);
}
zmq::socket_base_t *zmq::session_base_t::get_socket () const
{
return _socket;
}
void zmq::session_base_t::process_plug ()
{
if (_active)
start_connecting (false);
}
// This functions can return 0 on success or -1 and errno=ECONNREFUSED if ZAP
// is not setup (IE: inproc://zeromq.zap.01 does not exist in the same context)
// or it aborts on any other error. In other words, either ZAP is not
// configured or if it is configured it MUST be configured correctly and it
// MUST work, otherwise authentication cannot be guaranteed and it would be a
// security flaw.
int zmq::session_base_t::zap_connect ()
{
if (_zap_pipe != NULL)
return 0;
endpoint_t peer = find_endpoint ("inproc://zeromq.zap.01");
if (peer.socket == NULL) {
errno = ECONNREFUSED;
return -1;
}
zmq_assert (peer.options.type == ZMQ_REP || peer.options.type == ZMQ_ROUTER
|| peer.options.type == ZMQ_SERVER);
// Create a bi-directional pipe that will connect
// session with zap socket.
object_t *parents[2] = {this, peer.socket};
pipe_t *new_pipes[2] = {NULL, NULL};
int hwms[2] = {0, 0};
bool conflates[2] = {false, false};
int rc = pipepair (parents, new_pipes, hwms, conflates);
errno_assert (rc == 0);
// Attach local end of the pipe to this socket object.
_zap_pipe = new_pipes[0];
_zap_pipe->set_nodelay ();
_zap_pipe->set_event_sink (this);
send_bind (peer.socket, new_pipes[1], false);
// Send empty routing id if required by the peer.
if (peer.options.recv_routing_id) {
msg_t id;
rc = id.init ();
errno_assert (rc == 0);
id.set_flags (msg_t::routing_id);
bool ok = _zap_pipe->write (&id);
zmq_assert (ok);
_zap_pipe->flush ();
}
return 0;
}
bool zmq::session_base_t::zap_enabled () const
{
return (options.mechanism != ZMQ_NULL || !options.zap_domain.empty ());
}
void zmq::session_base_t::process_attach (i_engine *engine_)
{
zmq_assert (engine_ != NULL);
zmq_assert (!_engine);
_engine = engine_;
if (!engine_->has_handshake_stage ())
engine_ready ();
// Plug in the engine.
_engine->plug (_io_thread, this);
}
void zmq::session_base_t::engine_ready ()
{
// Create the pipe if it does not exist yet.
if (!_pipe && !is_terminating ()) {
object_t *parents[2] = {this, _socket};
pipe_t *pipes[2] = {NULL, NULL};
const bool conflate = get_effective_conflate_option (options);
int hwms[2] = {conflate ? -1 : options.rcvhwm,
conflate ? -1 : options.sndhwm};
bool conflates[2] = {conflate, conflate};
const int rc = pipepair (parents, pipes, hwms, conflates);
errno_assert (rc == 0);
// Plug the local end of the pipe.
pipes[0]->set_event_sink (this);
// Remember the local end of the pipe.
zmq_assert (!_pipe);
_pipe = pipes[0];
// The endpoints strings are not set on bind, set them here so that
// events can use them.
pipes[0]->set_endpoint_pair (_engine->get_endpoint ());
pipes[1]->set_endpoint_pair (_engine->get_endpoint ());
// Ask socket to plug into the remote end of the pipe.
send_bind (_socket, pipes[1]);
}
}
void zmq::session_base_t::engine_error (bool handshaked_,
zmq::i_engine::error_reason_t reason_)
{
// Engine is dead. Let's forget about it.
_engine = NULL;
// Remove any half-done messages from the pipes.
if (_pipe) {
clean_pipes ();
// Only send disconnect message if socket was accepted and handshake was completed
if (!_active && handshaked_ && options.can_recv_disconnect_msg
&& !options.disconnect_msg.empty ()) {
_pipe->set_disconnect_msg (options.disconnect_msg);
_pipe->send_disconnect_msg ();
}
// Only send hiccup message if socket was connected and handshake was completed
if (_active && handshaked_ && options.can_recv_hiccup_msg
&& !options.hiccup_msg.empty ()) {
_pipe->send_hiccup_msg (options.hiccup_msg);
}
}
zmq_assert (reason_ == i_engine::connection_error
|| reason_ == i_engine::timeout_error
|| reason_ == i_engine::protocol_error);
switch (reason_) {
case i_engine::timeout_error:
/* FALLTHROUGH */
case i_engine::connection_error:
if (_active) {
reconnect ();
break;
}
case i_engine::protocol_error:
if (_pending) {
if (_pipe)
_pipe->terminate (false);
if (_zap_pipe)
_zap_pipe->terminate (false);
} else {
terminate ();
}
break;
}
// Just in case there's only a delimiter in the pipe.
if (_pipe)
_pipe->check_read ();
if (_zap_pipe)
_zap_pipe->check_read ();
}
void zmq::session_base_t::process_term (int linger_)
{
zmq_assert (!_pending);
// If the termination of the pipe happens before the term command is
// delivered there's nothing much to do. We can proceed with the
// standard termination immediately.
if (!_pipe && !_zap_pipe && _terminating_pipes.empty ()) {
own_t::process_term (0);
return;
}
_pending = true;
if (_pipe != NULL) {
// If there's finite linger value, delay the termination.
// If linger is infinite (negative) we don't even have to set
// the timer.
if (linger_ > 0) {
zmq_assert (!_has_linger_timer);
add_timer (linger_, linger_timer_id);
_has_linger_timer = true;
}
// Start pipe termination process. Delay the termination till all messages
// are processed in case the linger time is non-zero.
_pipe->terminate (linger_ != 0);
// TODO: Should this go into pipe_t::terminate ?
// In case there's no engine and there's only delimiter in the
// pipe it wouldn't be ever read. Thus we check for it explicitly.
if (!_engine)
_pipe->check_read ();
}
if (_zap_pipe != NULL)
_zap_pipe->terminate (false);
}
void zmq::session_base_t::timer_event (int id_)
{
// Linger period expired. We can proceed with termination even though
// there are still pending messages to be sent.
zmq_assert (id_ == linger_timer_id);
_has_linger_timer = false;
// Ask pipe to terminate even though there may be pending messages in it.
zmq_assert (_pipe);
_pipe->terminate (false);
}
void zmq::session_base_t::process_conn_failed ()
{
std::string *ep = new (std::string);
_addr->to_string (*ep);
send_term_endpoint (_socket, ep);
}
void zmq::session_base_t::reconnect ()
{
// For delayed connect situations, terminate the pipe
// and reestablish later on
if (_pipe && options.immediate == 1
#ifdef ZMQ_HAVE_OPENPGM
&& _addr->protocol != protocol_name::pgm
&& _addr->protocol != protocol_name::epgm
#endif
#ifdef ZMQ_HAVE_NORM
&& _addr->protocol != protocol_name::norm
#endif
&& _addr->protocol != protocol_name::udp) {
_pipe->hiccup ();
_pipe->terminate (false);
_terminating_pipes.insert (_pipe);
_pipe = NULL;
if (_has_linger_timer) {
cancel_timer (linger_timer_id);
_has_linger_timer = false;
}
}
reset ();
// Reconnect.
if (options.reconnect_ivl > 0)
start_connecting (true);
else {
std::string *ep = new (std::string);
_addr->to_string (*ep);
send_term_endpoint (_socket, ep);
}
// For subscriber sockets we hiccup the inbound pipe, which will cause
// the socket object to resend all the subscriptions.
if (_pipe
&& (options.type == ZMQ_SUB || options.type == ZMQ_XSUB
|| options.type == ZMQ_DISH))
_pipe->hiccup ();
}
void zmq::session_base_t::start_connecting (bool wait_)
{
zmq_assert (_active);
// Choose I/O thread to run connecter in. Given that we are already
// running in an I/O thread, there must be at least one available.
io_thread_t *io_thread = choose_io_thread (options.affinity);
zmq_assert (io_thread);
// Create the connecter object.
own_t *connecter = NULL;
if (_addr->protocol == protocol_name::tcp) {
if (!options.socks_proxy_address.empty ()) {
address_t *proxy_address = new (std::nothrow)
address_t (protocol_name::tcp, options.socks_proxy_address,
this->get_ctx ());
alloc_assert (proxy_address);
connecter = new (std::nothrow) socks_connecter_t (
io_thread, this, options, _addr, proxy_address, wait_);
alloc_assert (connecter);
if (!options.socks_proxy_username.empty ()) {
reinterpret_cast<socks_connecter_t *> (connecter)
->set_auth_method_basic (options.socks_proxy_username,
options.socks_proxy_password);
}
} else {
connecter = new (std::nothrow)
tcp_connecter_t (io_thread, this, options, _addr, wait_);
}
}
#if defined ZMQ_HAVE_IPC
else if (_addr->protocol == protocol_name::ipc) {
connecter = new (std::nothrow)
ipc_connecter_t (io_thread, this, options, _addr, wait_);
}
#endif
#if defined ZMQ_HAVE_TIPC
else if (_addr->protocol == protocol_name::tipc) {
connecter = new (std::nothrow)
tipc_connecter_t (io_thread, this, options, _addr, wait_);
}
#endif
#if defined ZMQ_HAVE_VMCI
else if (_addr->protocol == protocol_name::vmci) {
connecter = new (std::nothrow)
vmci_connecter_t (io_thread, this, options, _addr, wait_);
}
#endif
#if defined ZMQ_HAVE_WS
else if (_addr->protocol == protocol_name::ws) {
connecter = new (std::nothrow) ws_connecter_t (
io_thread, this, options, _addr, wait_, false, std::string ());
}
#endif
#if defined ZMQ_HAVE_WSS
else if (_addr->protocol == protocol_name::wss) {
connecter = new (std::nothrow) ws_connecter_t (
io_thread, this, options, _addr, wait_, true, _wss_hostname);
}
#endif
if (connecter != NULL) {
alloc_assert (connecter);
launch_child (connecter);
return;
}
if (_addr->protocol == protocol_name::udp) {
zmq_assert (options.type == ZMQ_DISH || options.type == ZMQ_RADIO
|| options.type == ZMQ_DGRAM);
udp_engine_t *engine = new (std::nothrow) udp_engine_t (options);
alloc_assert (engine);
bool recv = false;
bool send = false;
if (options.type == ZMQ_RADIO) {
send = true;
recv = false;
} else if (options.type == ZMQ_DISH) {
send = false;
recv = true;
} else if (options.type == ZMQ_DGRAM) {
send = true;
recv = true;
}
int rc = engine->init (_addr, send, recv);
errno_assert (rc == 0);
send_attach (this, engine);
return;
}
#ifdef ZMQ_HAVE_OPENPGM
// Both PGM and EPGM transports are using the same infrastructure.
if (_addr->protocol == "pgm" || _addr->protocol == "epgm") {
zmq_assert (options.type == ZMQ_PUB || options.type == ZMQ_XPUB
|| options.type == ZMQ_SUB || options.type == ZMQ_XSUB);
// For EPGM transport with UDP encapsulation of PGM is used.
bool const udp_encapsulation = _addr->protocol == "epgm";
// At this point we'll create message pipes to the session straight
// away. There's no point in delaying it as no concept of 'connect'
// exists with PGM anyway.
if (options.type == ZMQ_PUB || options.type == ZMQ_XPUB) {
// PGM sender.
pgm_sender_t *pgm_sender =
new (std::nothrow) pgm_sender_t (io_thread, options);
alloc_assert (pgm_sender);
int rc =
pgm_sender->init (udp_encapsulation, _addr->address.c_str ());
errno_assert (rc == 0);
send_attach (this, pgm_sender);
} else {
// PGM receiver.
pgm_receiver_t *pgm_receiver =
new (std::nothrow) pgm_receiver_t (io_thread, options);
alloc_assert (pgm_receiver);
int rc =
pgm_receiver->init (udp_encapsulation, _addr->address.c_str ());
errno_assert (rc == 0);
send_attach (this, pgm_receiver);
}
return;
}
#endif
#ifdef ZMQ_HAVE_NORM
if (_addr->protocol == "norm") {
// At this point we'll create message pipes to the session straight
// away. There's no point in delaying it as no concept of 'connect'
// exists with NORM anyway.
if (options.type == ZMQ_PUB || options.type == ZMQ_XPUB) {
// NORM sender.
norm_engine_t *norm_sender =
new (std::nothrow) norm_engine_t (io_thread, options);
alloc_assert (norm_sender);
int rc = norm_sender->init (_addr->address.c_str (), true, false);
errno_assert (rc == 0);
send_attach (this, norm_sender);
} else { // ZMQ_SUB or ZMQ_XSUB
// NORM receiver.
norm_engine_t *norm_receiver =
new (std::nothrow) norm_engine_t (io_thread, options);
alloc_assert (norm_receiver);
int rc = norm_receiver->init (_addr->address.c_str (), false, true);
errno_assert (rc == 0);
send_attach (this, norm_receiver);
}
return;
}
#endif // ZMQ_HAVE_NORM
zmq_assert (false);
}
zmq::hello_msg_session_t::hello_msg_session_t (io_thread_t *io_thread_,
bool connect_,
socket_base_t *socket_,
const options_t &options_,
address_t *addr_) :
session_base_t (io_thread_, connect_, socket_, options_, addr_),
_new_pipe (true)
{
}
zmq::hello_msg_session_t::~hello_msg_session_t ()
{
}
int zmq::hello_msg_session_t::pull_msg (msg_t *msg_)
{
if (_new_pipe) {
_new_pipe = false;
const int rc =
msg_->init_buffer (&options.hello_msg[0], options.hello_msg.size ());
errno_assert (rc == 0);
return 0;
}
return session_base_t::pull_msg (msg_);
}
void zmq::hello_msg_session_t::reset ()
{
session_base_t::reset ();
_new_pipe = true;
}
|
sophomore_public/libzmq
|
src/session_base.cpp
|
C++
|
gpl-3.0
| 22,892 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_SESSION_BASE_HPP_INCLUDED__
#define __ZMQ_SESSION_BASE_HPP_INCLUDED__
#include <stdarg.h>
#include "own.hpp"
#include "io_object.hpp"
#include "pipe.hpp"
#include "socket_base.hpp"
#include "i_engine.hpp"
#include "msg.hpp"
namespace zmq
{
class io_thread_t;
struct i_engine;
struct address_t;
class session_base_t : public own_t, public io_object_t, public i_pipe_events
{
public:
// Create a session of the particular type.
static session_base_t *create (zmq::io_thread_t *io_thread_,
bool active_,
zmq::socket_base_t *socket_,
const options_t &options_,
address_t *addr_);
// To be used once only, when creating the session.
void attach_pipe (zmq::pipe_t *pipe_);
// Following functions are the interface exposed towards the engine.
virtual void reset ();
void flush ();
void rollback ();
void engine_error (bool handshaked_, zmq::i_engine::error_reason_t reason_);
void engine_ready ();
// i_pipe_events interface implementation.
void read_activated (zmq::pipe_t *pipe_) ZMQ_FINAL;
void write_activated (zmq::pipe_t *pipe_) ZMQ_FINAL;
void hiccuped (zmq::pipe_t *pipe_) ZMQ_FINAL;
void pipe_terminated (zmq::pipe_t *pipe_) ZMQ_FINAL;
// Delivers a message. Returns 0 if successful; -1 otherwise.
// The function takes ownership of the message.
virtual int push_msg (msg_t *msg_);
int zap_connect ();
bool zap_enabled () const;
// Fetches a message. Returns 0 if successful; -1 otherwise.
// The caller is responsible for freeing the message when no
// longer used.
virtual int pull_msg (msg_t *msg_);
// Receives message from ZAP socket.
// Returns 0 on success; -1 otherwise.
// The caller is responsible for freeing the message.
int read_zap_msg (msg_t *msg_);
// Sends message to ZAP socket.
// Returns 0 on success; -1 otherwise.
// The function takes ownership of the message.
int write_zap_msg (msg_t *msg_);
socket_base_t *get_socket () const;
const endpoint_uri_pair_t &get_endpoint () const;
protected:
session_base_t (zmq::io_thread_t *io_thread_,
bool active_,
zmq::socket_base_t *socket_,
const options_t &options_,
address_t *addr_);
~session_base_t () ZMQ_OVERRIDE;
private:
void start_connecting (bool wait_);
void reconnect ();
// Handlers for incoming commands.
void process_plug () ZMQ_FINAL;
void process_attach (zmq::i_engine *engine_) ZMQ_FINAL;
void process_term (int linger_) ZMQ_FINAL;
void process_conn_failed () ZMQ_OVERRIDE;
// i_poll_events handlers.
void timer_event (int id_) ZMQ_FINAL;
// Remove any half processed messages. Flush unflushed messages.
// Call this function when engine disconnect to get rid of leftovers.
void clean_pipes ();
// If true, this session (re)connects to the peer. Otherwise, it's
// a transient session created by the listener.
const bool _active;
// Pipe connecting the session to its socket.
zmq::pipe_t *_pipe;
// Pipe used to exchange messages with ZAP socket.
zmq::pipe_t *_zap_pipe;
// This set is added to with pipes we are disconnecting, but haven't yet completed
std::set<pipe_t *> _terminating_pipes;
// This flag is true if the remainder of the message being processed
// is still in the in pipe.
bool _incomplete_in;
// True if termination have been suspended to push the pending
// messages to the network.
bool _pending;
// The protocol I/O engine connected to the session.
zmq::i_engine *_engine;
// The socket the session belongs to.
zmq::socket_base_t *_socket;
// I/O thread the session is living in. It will be used to plug in
// the engines into the same thread.
zmq::io_thread_t *_io_thread;
// ID of the linger timer
enum
{
linger_timer_id = 0x20
};
// True is linger timer is running.
bool _has_linger_timer;
// Protocol and address to use when connecting.
address_t *_addr;
#ifdef ZMQ_HAVE_WSS
// TLS handshake, we need to take a copy when the session is created,
// in order to maintain the value at the creation time
const std::string _wss_hostname;
#endif
ZMQ_NON_COPYABLE_NOR_MOVABLE (session_base_t)
};
class hello_msg_session_t ZMQ_FINAL : public session_base_t
{
public:
hello_msg_session_t (zmq::io_thread_t *io_thread_,
bool connect_,
zmq::socket_base_t *socket_,
const options_t &options_,
address_t *addr_);
~hello_msg_session_t ();
// Overrides of the functions from session_base_t.
int pull_msg (msg_t *msg_);
void reset ();
private:
bool _new_pipe;
ZMQ_NON_COPYABLE_NOR_MOVABLE (hello_msg_session_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/session_base.hpp
|
C++
|
gpl-3.0
| 5,138 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "poller.hpp"
#include "polling_util.hpp"
#if defined ZMQ_POLL_BASED_ON_POLL
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_AIX
#include <poll.h>
#endif
#elif defined ZMQ_POLL_BASED_ON_SELECT
#if defined ZMQ_HAVE_WINDOWS
#elif defined ZMQ_HAVE_HPUX
#include <sys/param.h>
#include <sys/types.h>
#include <sys/time.h>
#elif defined ZMQ_HAVE_OPENVMS
#include <sys/types.h>
#include <sys/time.h>
#elif defined ZMQ_HAVE_VXWORKS
#include <sys/types.h>
#include <sys/time.h>
#include <sockLib.h>
#include <strings.h>
#else
#include <sys/select.h>
#endif
#endif
#include "signaler.hpp"
#include "likely.hpp"
#include "stdint.hpp"
#include "config.hpp"
#include "err.hpp"
#include "fd.hpp"
#include "ip.hpp"
#include "tcp.hpp"
#if !defined ZMQ_HAVE_WINDOWS
#include <unistd.h>
#include <netinet/tcp.h>
#include <sys/types.h>
#include <sys/socket.h>
#endif
#if !defined(ZMQ_HAVE_WINDOWS)
// Helper to sleep for specific number of milliseconds (or until signal)
//
static int sleep_ms (unsigned int ms_)
{
if (ms_ == 0)
return 0;
#if defined ZMQ_HAVE_ANDROID
usleep (ms_ * 1000);
return 0;
#elif defined ZMQ_HAVE_VXWORKS
struct timespec ns_;
ns_.tv_sec = ms_ / 1000;
ns_.tv_nsec = ms_ % 1000 * 1000000;
return nanosleep (&ns_, 0);
#else
return usleep (ms_ * 1000);
#endif
}
// Helper to wait on close(), for non-blocking sockets, until it completes
// If EAGAIN is received, will sleep briefly (1-100ms) then try again, until
// the overall timeout is reached.
//
static int close_wait_ms (int fd_, unsigned int max_ms_ = 2000)
{
unsigned int ms_so_far = 0;
const unsigned int min_step_ms = 1;
const unsigned int max_step_ms = 100;
const unsigned int step_ms =
std::min (std::max (min_step_ms, max_ms_ / 10), max_step_ms);
int rc = 0; // do not sleep on first attempt
do {
if (rc == -1 && errno == EAGAIN) {
sleep_ms (step_ms);
ms_so_far += step_ms;
}
rc = close (fd_);
} while (ms_so_far < max_ms_ && rc == -1 && errno == EAGAIN);
return rc;
}
#endif
zmq::signaler_t::signaler_t ()
{
// Create the socketpair for signaling.
if (make_fdpair (&_r, &_w) == 0) {
unblock_socket (_w);
unblock_socket (_r);
}
#ifdef HAVE_FORK
pid = getpid ();
#endif
}
// This might get run after some part of construction failed, leaving one or
// both of _r and _w retired_fd.
zmq::signaler_t::~signaler_t ()
{
#if defined ZMQ_HAVE_EVENTFD
if (_r == retired_fd)
return;
int rc = close_wait_ms (_r);
errno_assert (rc == 0);
#elif defined ZMQ_HAVE_WINDOWS
if (_w != retired_fd) {
const struct linger so_linger = {1, 0};
int rc = setsockopt (_w, SOL_SOCKET, SO_LINGER,
reinterpret_cast<const char *> (&so_linger),
sizeof so_linger);
// Only check shutdown if WSASTARTUP was previously done
if (rc == 0 || WSAGetLastError () != WSANOTINITIALISED) {
wsa_assert (rc != SOCKET_ERROR);
rc = closesocket (_w);
wsa_assert (rc != SOCKET_ERROR);
if (_r == retired_fd)
return;
rc = closesocket (_r);
wsa_assert (rc != SOCKET_ERROR);
}
}
#else
if (_w != retired_fd) {
int rc = close_wait_ms (_w);
errno_assert (rc == 0);
}
if (_r != retired_fd) {
int rc = close_wait_ms (_r);
errno_assert (rc == 0);
}
#endif
}
zmq::fd_t zmq::signaler_t::get_fd () const
{
return _r;
}
void zmq::signaler_t::send ()
{
#if defined HAVE_FORK
if (unlikely (pid != getpid ())) {
//printf("Child process %d signaler_t::send returning without sending #1\n", getpid());
return; // do not send anything in forked child context
}
#endif
#if defined ZMQ_HAVE_EVENTFD
const uint64_t inc = 1;
ssize_t sz = write (_w, &inc, sizeof (inc));
errno_assert (sz == sizeof (inc));
#elif defined ZMQ_HAVE_WINDOWS
const char dummy = 0;
int nbytes;
do {
nbytes = ::send (_w, &dummy, sizeof (dummy), 0);
wsa_assert (nbytes != SOCKET_ERROR);
// wsa_assert does not abort on WSAEWOULDBLOCK. If we get this, we retry.
} while (nbytes == SOCKET_ERROR);
// Given the small size of dummy (should be 1) expect that send was able to send everything.
zmq_assert (nbytes == sizeof (dummy));
#elif defined ZMQ_HAVE_VXWORKS
unsigned char dummy = 0;
while (true) {
ssize_t nbytes = ::send (_w, (char *) &dummy, sizeof (dummy), 0);
if (unlikely (nbytes == -1 && errno == EINTR))
continue;
#if defined(HAVE_FORK)
if (unlikely (pid != getpid ())) {
//printf("Child process %d signaler_t::send returning without sending #2\n", getpid());
errno = EINTR;
break;
}
#endif
zmq_assert (nbytes == sizeof dummy);
break;
}
#else
unsigned char dummy = 0;
while (true) {
ssize_t nbytes = ::send (_w, &dummy, sizeof (dummy), 0);
if (unlikely (nbytes == -1 && errno == EINTR))
continue;
#if defined(HAVE_FORK)
if (unlikely (pid != getpid ())) {
//printf("Child process %d signaler_t::send returning without sending #2\n", getpid());
errno = EINTR;
break;
}
#endif
zmq_assert (nbytes == sizeof dummy);
break;
}
#endif
}
int zmq::signaler_t::wait (int timeout_) const
{
#ifdef HAVE_FORK
if (unlikely (pid != getpid ())) {
// we have forked and the file descriptor is closed. Emulate an interrupt
// response.
//printf("Child process %d signaler_t::wait returning simulating interrupt #1\n", getpid());
errno = EINTR;
return -1;
}
#endif
#ifdef ZMQ_POLL_BASED_ON_POLL
struct pollfd pfd;
pfd.fd = _r;
pfd.events = POLLIN;
const int rc = poll (&pfd, 1, timeout_);
if (unlikely (rc < 0)) {
errno_assert (errno == EINTR);
return -1;
}
if (unlikely (rc == 0)) {
errno = EAGAIN;
return -1;
}
#ifdef HAVE_FORK
if (unlikely (pid != getpid ())) {
// we have forked and the file descriptor is closed. Emulate an interrupt
// response.
//printf("Child process %d signaler_t::wait returning simulating interrupt #2\n", getpid());
errno = EINTR;
return -1;
}
#endif
zmq_assert (rc == 1);
zmq_assert (pfd.revents & POLLIN);
return 0;
#elif defined ZMQ_POLL_BASED_ON_SELECT
optimized_fd_set_t fds (1);
FD_ZERO (fds.get ());
FD_SET (_r, fds.get ());
struct timeval timeout;
if (timeout_ >= 0) {
timeout.tv_sec = timeout_ / 1000;
timeout.tv_usec = timeout_ % 1000 * 1000;
}
#ifdef ZMQ_HAVE_WINDOWS
int rc =
select (0, fds.get (), NULL, NULL, timeout_ >= 0 ? &timeout : NULL);
wsa_assert (rc != SOCKET_ERROR);
#else
int rc =
select (_r + 1, fds.get (), NULL, NULL, timeout_ >= 0 ? &timeout : NULL);
if (unlikely (rc < 0)) {
errno_assert (errno == EINTR);
return -1;
}
#endif
if (unlikely (rc == 0)) {
errno = EAGAIN;
return -1;
}
zmq_assert (rc == 1);
return 0;
#else
#error
#endif
}
void zmq::signaler_t::recv ()
{
// Attempt to read a signal.
#if defined ZMQ_HAVE_EVENTFD
uint64_t dummy;
ssize_t sz = read (_r, &dummy, sizeof (dummy));
errno_assert (sz == sizeof (dummy));
// If we accidentally grabbed the next signal(s) along with the current
// one, return it back to the eventfd object.
if (unlikely (dummy > 1)) {
const uint64_t inc = dummy - 1;
ssize_t sz2 = write (_w, &inc, sizeof (inc));
errno_assert (sz2 == sizeof (inc));
return;
}
zmq_assert (dummy == 1);
#else
unsigned char dummy;
#if defined ZMQ_HAVE_WINDOWS
const int nbytes =
::recv (_r, reinterpret_cast<char *> (&dummy), sizeof (dummy), 0);
wsa_assert (nbytes != SOCKET_ERROR);
#elif defined ZMQ_HAVE_VXWORKS
ssize_t nbytes = ::recv (_r, (char *) &dummy, sizeof (dummy), 0);
errno_assert (nbytes >= 0);
#else
ssize_t nbytes = ::recv (_r, &dummy, sizeof (dummy), 0);
errno_assert (nbytes >= 0);
#endif
zmq_assert (nbytes == sizeof (dummy));
zmq_assert (dummy == 0);
#endif
}
int zmq::signaler_t::recv_failable ()
{
// Attempt to read a signal.
#if defined ZMQ_HAVE_EVENTFD
uint64_t dummy;
ssize_t sz = read (_r, &dummy, sizeof (dummy));
if (sz == -1) {
errno_assert (errno == EAGAIN);
return -1;
}
errno_assert (sz == sizeof (dummy));
// If we accidentally grabbed the next signal(s) along with the current
// one, return it back to the eventfd object.
if (unlikely (dummy > 1)) {
const uint64_t inc = dummy - 1;
ssize_t sz2 = write (_w, &inc, sizeof (inc));
errno_assert (sz2 == sizeof (inc));
return 0;
}
zmq_assert (dummy == 1);
#else
unsigned char dummy;
#if defined ZMQ_HAVE_WINDOWS
const int nbytes =
::recv (_r, reinterpret_cast<char *> (&dummy), sizeof (dummy), 0);
if (nbytes == SOCKET_ERROR) {
const int last_error = WSAGetLastError ();
if (last_error == WSAEWOULDBLOCK) {
errno = EAGAIN;
return -1;
}
wsa_assert (last_error == WSAEWOULDBLOCK);
}
#elif defined ZMQ_HAVE_VXWORKS
ssize_t nbytes = ::recv (_r, (char *) &dummy, sizeof (dummy), 0);
if (nbytes == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) {
errno = EAGAIN;
return -1;
}
errno_assert (errno == EAGAIN || errno == EWOULDBLOCK
|| errno == EINTR);
}
#else
ssize_t nbytes = ::recv (_r, &dummy, sizeof (dummy), 0);
if (nbytes == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) {
errno = EAGAIN;
return -1;
}
errno_assert (errno == EAGAIN || errno == EWOULDBLOCK
|| errno == EINTR);
}
#endif
zmq_assert (nbytes == sizeof (dummy));
zmq_assert (dummy == 0);
#endif
return 0;
}
bool zmq::signaler_t::valid () const
{
return _w != retired_fd;
}
#ifdef HAVE_FORK
void zmq::signaler_t::forked ()
{
// Close file descriptors created in the parent and create new pair
close (_r);
close (_w);
make_fdpair (&_r, &_w);
}
#endif
|
sophomore_public/libzmq
|
src/signaler.cpp
|
C++
|
gpl-3.0
| 10,561 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_SIGNALER_HPP_INCLUDED__
#define __ZMQ_SIGNALER_HPP_INCLUDED__
#ifdef HAVE_FORK
#include <unistd.h>
#endif
#include "fd.hpp"
#include "macros.hpp"
namespace zmq
{
// This is a cross-platform equivalent to signal_fd. However, as opposed
// to signal_fd there can be at most one signal in the signaler at any
// given moment. Attempt to send a signal before receiving the previous
// one will result in undefined behaviour.
class signaler_t
{
public:
signaler_t ();
~signaler_t ();
// Returns the socket/file descriptor
// May return retired_fd if the signaler could not be initialized.
fd_t get_fd () const;
void send ();
int wait (int timeout_) const;
void recv ();
int recv_failable ();
bool valid () const;
#ifdef HAVE_FORK
// close the file descriptors in a forked child process so that they
// do not interfere with the context in the parent process.
void forked ();
#endif
private:
// Underlying write & read file descriptor
// Will be -1 if an error occurred during initialization, e.g. we
// exceeded the number of available handles
fd_t _w;
fd_t _r;
#ifdef HAVE_FORK
// the process that created this context. Used to detect forking.
pid_t pid;
// idempotent close of file descriptors that is safe to use by destructor
// and forked().
void close_internal ();
#endif
ZMQ_NON_COPYABLE_NOR_MOVABLE (signaler_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/signaler.hpp
|
C++
|
gpl-3.0
| 1,499 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <new>
#include <string>
#include <algorithm>
#include <limits>
#include "macros.hpp"
#if defined ZMQ_HAVE_WINDOWS
#if defined _MSC_VER
#if defined _WIN32_WCE
#include <cmnintrin.h>
#else
#include <intrin.h>
#endif
#endif
#else
#include <unistd.h>
#include <ctype.h>
#endif
#include "socket_base.hpp"
#include "tcp_listener.hpp"
#include "ws_listener.hpp"
#include "ipc_listener.hpp"
#include "tipc_listener.hpp"
#include "tcp_connecter.hpp"
#ifdef ZMQ_HAVE_WS
#include "ws_address.hpp"
#endif
#include "io_thread.hpp"
#include "session_base.hpp"
#include "config.hpp"
#include "pipe.hpp"
#include "err.hpp"
#include "ctx.hpp"
#include "likely.hpp"
#include "msg.hpp"
#include "address.hpp"
#include "ipc_address.hpp"
#include "tcp_address.hpp"
#include "udp_address.hpp"
#include "tipc_address.hpp"
#include "mailbox.hpp"
#include "mailbox_safe.hpp"
#ifdef ZMQ_HAVE_WSS
#include "wss_address.hpp"
#endif
#if defined ZMQ_HAVE_VMCI
#include "vmci_address.hpp"
#include "vmci_listener.hpp"
#endif
#ifdef ZMQ_HAVE_OPENPGM
#include "pgm_socket.hpp"
#endif
#include "pair.hpp"
#include "pub.hpp"
#include "sub.hpp"
#include "req.hpp"
#include "rep.hpp"
#include "pull.hpp"
#include "push.hpp"
#include "dealer.hpp"
#include "router.hpp"
#include "xpub.hpp"
#include "xsub.hpp"
#include "stream.hpp"
#include "server.hpp"
#include "client.hpp"
#include "radio.hpp"
#include "dish.hpp"
#include "gather.hpp"
#include "scatter.hpp"
#include "dgram.hpp"
#include "peer.hpp"
#include "channel.hpp"
void zmq::socket_base_t::inprocs_t::emplace (const char *endpoint_uri_,
pipe_t *pipe_)
{
_inprocs.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (endpoint_uri_), pipe_);
}
int zmq::socket_base_t::inprocs_t::erase_pipes (
const std::string &endpoint_uri_str_)
{
const std::pair<map_t::iterator, map_t::iterator> range =
_inprocs.equal_range (endpoint_uri_str_);
if (range.first == range.second) {
errno = ENOENT;
return -1;
}
for (map_t::iterator it = range.first; it != range.second; ++it) {
it->second->send_disconnect_msg ();
it->second->terminate (true);
}
_inprocs.erase (range.first, range.second);
return 0;
}
void zmq::socket_base_t::inprocs_t::erase_pipe (const pipe_t *pipe_)
{
for (map_t::iterator it = _inprocs.begin (), end = _inprocs.end ();
it != end; ++it)
if (it->second == pipe_) {
_inprocs.erase (it);
break;
}
}
bool zmq::socket_base_t::check_tag () const
{
return _tag == 0xbaddecaf;
}
bool zmq::socket_base_t::is_thread_safe () const
{
return _thread_safe;
}
zmq::socket_base_t *zmq::socket_base_t::create (int type_,
class ctx_t *parent_,
uint32_t tid_,
int sid_)
{
socket_base_t *s = NULL;
switch (type_) {
case ZMQ_PAIR:
s = new (std::nothrow) pair_t (parent_, tid_, sid_);
break;
case ZMQ_PUB:
s = new (std::nothrow) pub_t (parent_, tid_, sid_);
break;
case ZMQ_SUB:
s = new (std::nothrow) sub_t (parent_, tid_, sid_);
break;
case ZMQ_REQ:
s = new (std::nothrow) req_t (parent_, tid_, sid_);
break;
case ZMQ_REP:
s = new (std::nothrow) rep_t (parent_, tid_, sid_);
break;
case ZMQ_DEALER:
s = new (std::nothrow) dealer_t (parent_, tid_, sid_);
break;
case ZMQ_ROUTER:
s = new (std::nothrow) router_t (parent_, tid_, sid_);
break;
case ZMQ_PULL:
s = new (std::nothrow) pull_t (parent_, tid_, sid_);
break;
case ZMQ_PUSH:
s = new (std::nothrow) push_t (parent_, tid_, sid_);
break;
case ZMQ_XPUB:
s = new (std::nothrow) xpub_t (parent_, tid_, sid_);
break;
case ZMQ_XSUB:
s = new (std::nothrow) xsub_t (parent_, tid_, sid_);
break;
case ZMQ_STREAM:
s = new (std::nothrow) stream_t (parent_, tid_, sid_);
break;
case ZMQ_SERVER:
s = new (std::nothrow) server_t (parent_, tid_, sid_);
break;
case ZMQ_CLIENT:
s = new (std::nothrow) client_t (parent_, tid_, sid_);
break;
case ZMQ_RADIO:
s = new (std::nothrow) radio_t (parent_, tid_, sid_);
break;
case ZMQ_DISH:
s = new (std::nothrow) dish_t (parent_, tid_, sid_);
break;
case ZMQ_GATHER:
s = new (std::nothrow) gather_t (parent_, tid_, sid_);
break;
case ZMQ_SCATTER:
s = new (std::nothrow) scatter_t (parent_, tid_, sid_);
break;
case ZMQ_DGRAM:
s = new (std::nothrow) dgram_t (parent_, tid_, sid_);
break;
case ZMQ_PEER:
s = new (std::nothrow) peer_t (parent_, tid_, sid_);
break;
case ZMQ_CHANNEL:
s = new (std::nothrow) channel_t (parent_, tid_, sid_);
break;
default:
errno = EINVAL;
return NULL;
}
alloc_assert (s);
if (s->_mailbox == NULL) {
s->_destroyed = true;
LIBZMQ_DELETE (s);
return NULL;
}
return s;
}
zmq::socket_base_t::socket_base_t (ctx_t *parent_,
uint32_t tid_,
int sid_,
bool thread_safe_) :
own_t (parent_, tid_),
_sync (),
_tag (0xbaddecaf),
_ctx_terminated (false),
_destroyed (false),
_poller (NULL),
_handle (static_cast<poller_t::handle_t> (NULL)),
_last_tsc (0),
_ticks (0),
_rcvmore (false),
_monitor_socket (NULL),
_monitor_events (0),
_thread_safe (thread_safe_),
_reaper_signaler (NULL),
_monitor_sync (),
_disconnected (false)
{
options.socket_id = sid_;
options.ipv6 = (parent_->get (ZMQ_IPV6) != 0);
options.linger.store (parent_->get (ZMQ_BLOCKY) ? -1 : 0);
options.zero_copy = parent_->get (ZMQ_ZERO_COPY_RECV) != 0;
if (_thread_safe) {
_mailbox = new (std::nothrow) mailbox_safe_t (&_sync);
zmq_assert (_mailbox);
} else {
mailbox_t *m = new (std::nothrow) mailbox_t ();
zmq_assert (m);
if (m->get_fd () != retired_fd)
_mailbox = m;
else {
LIBZMQ_DELETE (m);
_mailbox = NULL;
}
}
}
int zmq::socket_base_t::get_peer_state (const void *routing_id_,
size_t routing_id_size_) const
{
LIBZMQ_UNUSED (routing_id_);
LIBZMQ_UNUSED (routing_id_size_);
// Only ROUTER sockets support this
errno = ENOTSUP;
return -1;
}
zmq::socket_base_t::~socket_base_t ()
{
if (_mailbox)
LIBZMQ_DELETE (_mailbox);
if (_reaper_signaler)
LIBZMQ_DELETE (_reaper_signaler);
scoped_lock_t lock (_monitor_sync);
stop_monitor ();
zmq_assert (_destroyed);
}
zmq::i_mailbox *zmq::socket_base_t::get_mailbox () const
{
return _mailbox;
}
void zmq::socket_base_t::stop ()
{
// Called by ctx when it is terminated (zmq_ctx_term).
// 'stop' command is sent from the threads that called zmq_ctx_term to
// the thread owning the socket. This way, blocking call in the
// owner thread can be interrupted.
send_stop ();
}
// TODO consider renaming protocol_ to scheme_ in conformance with RFC 3986
// terminology, but this requires extensive changes to be consistent
int zmq::socket_base_t::parse_uri (const char *uri_,
std::string &protocol_,
std::string &path_)
{
zmq_assert (uri_ != NULL);
const std::string uri (uri_);
const std::string::size_type pos = uri.find ("://");
if (pos == std::string::npos) {
errno = EINVAL;
return -1;
}
protocol_ = uri.substr (0, pos);
path_ = uri.substr (pos + 3);
if (protocol_.empty () || path_.empty ()) {
errno = EINVAL;
return -1;
}
return 0;
}
int zmq::socket_base_t::check_protocol (const std::string &protocol_) const
{
// First check out whether the protocol is something we are aware of.
if (protocol_ != protocol_name::inproc
#if defined ZMQ_HAVE_IPC
&& protocol_ != protocol_name::ipc
#endif
&& protocol_ != protocol_name::tcp
#ifdef ZMQ_HAVE_WS
&& protocol_ != protocol_name::ws
#endif
#ifdef ZMQ_HAVE_WSS
&& protocol_ != protocol_name::wss
#endif
#if defined ZMQ_HAVE_OPENPGM
// pgm/epgm transports only available if 0MQ is compiled with OpenPGM.
&& protocol_ != protocol_name::pgm
&& protocol_ != protocol_name::epgm
#endif
#if defined ZMQ_HAVE_TIPC
// TIPC transport is only available on Linux.
&& protocol_ != protocol_name::tipc
#endif
#if defined ZMQ_HAVE_NORM
&& protocol_ != protocol_name::norm
#endif
#if defined ZMQ_HAVE_VMCI
&& protocol_ != protocol_name::vmci
#endif
&& protocol_ != protocol_name::udp) {
errno = EPROTONOSUPPORT;
return -1;
}
// Check whether socket type and transport protocol match.
// Specifically, multicast protocols can't be combined with
// bi-directional messaging patterns (socket types).
#if defined ZMQ_HAVE_OPENPGM || defined ZMQ_HAVE_NORM
#if defined ZMQ_HAVE_OPENPGM && defined ZMQ_HAVE_NORM
if ((protocol_ == protocol_name::pgm || protocol_ == protocol_name::epgm
|| protocol_ == protocol_name::norm)
#elif defined ZMQ_HAVE_OPENPGM
if ((protocol_ == protocol_name::pgm || protocol_ == protocol_name::epgm)
#else // defined ZMQ_HAVE_NORM
if (protocol_ == protocol_name::norm
#endif
&& options.type != ZMQ_PUB && options.type != ZMQ_SUB
&& options.type != ZMQ_XPUB && options.type != ZMQ_XSUB) {
errno = ENOCOMPATPROTO;
return -1;
}
#endif
if (protocol_ == protocol_name::udp
&& (options.type != ZMQ_DISH && options.type != ZMQ_RADIO
&& options.type != ZMQ_DGRAM)) {
errno = ENOCOMPATPROTO;
return -1;
}
// Protocol is available.
return 0;
}
void zmq::socket_base_t::attach_pipe (pipe_t *pipe_,
bool subscribe_to_all_,
bool locally_initiated_)
{
// First, register the pipe so that we can terminate it later on.
pipe_->set_event_sink (this);
_pipes.push_back (pipe_);
// Let the derived socket type know about new pipe.
xattach_pipe (pipe_, subscribe_to_all_, locally_initiated_);
// If the socket is already being closed, ask any new pipes to terminate
// straight away.
if (is_terminating ()) {
register_term_acks (1);
pipe_->terminate (false);
}
}
int zmq::socket_base_t::setsockopt (int option_,
const void *optval_,
size_t optvallen_)
{
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
if (unlikely (_ctx_terminated)) {
errno = ETERM;
return -1;
}
// First, check whether specific socket type overloads the option.
int rc = xsetsockopt (option_, optval_, optvallen_);
if (rc == 0 || errno != EINVAL) {
return rc;
}
// If the socket type doesn't support the option, pass it to
// the generic option parser.
rc = options.setsockopt (option_, optval_, optvallen_);
update_pipe_options (option_);
return rc;
}
int zmq::socket_base_t::getsockopt (int option_,
void *optval_,
size_t *optvallen_)
{
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
if (unlikely (_ctx_terminated)) {
errno = ETERM;
return -1;
}
// First, check whether specific socket type overloads the option.
int rc = xgetsockopt (option_, optval_, optvallen_);
if (rc == 0 || errno != EINVAL) {
return rc;
}
if (option_ == ZMQ_RCVMORE) {
return do_getsockopt<int> (optval_, optvallen_, _rcvmore ? 1 : 0);
}
if (option_ == ZMQ_FD) {
if (_thread_safe) {
// thread safe socket doesn't provide file descriptor
errno = EINVAL;
return -1;
}
return do_getsockopt<fd_t> (
optval_, optvallen_,
(static_cast<mailbox_t *> (_mailbox))->get_fd ());
}
if (option_ == ZMQ_EVENTS) {
const int rc = process_commands (0, false);
if (rc != 0 && (errno == EINTR || errno == ETERM)) {
return -1;
}
errno_assert (rc == 0);
return do_getsockopt<int> (optval_, optvallen_,
(has_out () ? ZMQ_POLLOUT : 0)
| (has_in () ? ZMQ_POLLIN : 0));
}
if (option_ == ZMQ_LAST_ENDPOINT) {
return do_getsockopt (optval_, optvallen_, _last_endpoint);
}
if (option_ == ZMQ_THREAD_SAFE) {
return do_getsockopt<int> (optval_, optvallen_, _thread_safe ? 1 : 0);
}
return options.getsockopt (option_, optval_, optvallen_);
}
int zmq::socket_base_t::join (const char *group_)
{
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
return xjoin (group_);
}
int zmq::socket_base_t::leave (const char *group_)
{
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
return xleave (group_);
}
void zmq::socket_base_t::add_signaler (signaler_t *s_)
{
zmq_assert (_thread_safe);
scoped_lock_t sync_lock (_sync);
(static_cast<mailbox_safe_t *> (_mailbox))->add_signaler (s_);
}
void zmq::socket_base_t::remove_signaler (signaler_t *s_)
{
zmq_assert (_thread_safe);
scoped_lock_t sync_lock (_sync);
(static_cast<mailbox_safe_t *> (_mailbox))->remove_signaler (s_);
}
int zmq::socket_base_t::bind (const char *endpoint_uri_)
{
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
if (unlikely (_ctx_terminated)) {
errno = ETERM;
return -1;
}
// Process pending commands, if any.
int rc = process_commands (0, false);
if (unlikely (rc != 0)) {
return -1;
}
// Parse endpoint_uri_ string.
std::string protocol;
std::string address;
if (parse_uri (endpoint_uri_, protocol, address)
|| check_protocol (protocol)) {
return -1;
}
if (protocol == protocol_name::inproc) {
const endpoint_t endpoint = {this, options};
rc = register_endpoint (endpoint_uri_, endpoint);
if (rc == 0) {
connect_pending (endpoint_uri_, this);
_last_endpoint.assign (endpoint_uri_);
options.connected = true;
}
return rc;
}
#if defined ZMQ_HAVE_OPENPGM || defined ZMQ_HAVE_NORM
#if defined ZMQ_HAVE_OPENPGM && defined ZMQ_HAVE_NORM
if (protocol == protocol_name::pgm || protocol == protocol_name::epgm
|| protocol == protocol_name::norm) {
#elif defined ZMQ_HAVE_OPENPGM
if (protocol == protocol_name::pgm || protocol == protocol_name::epgm) {
#else // defined ZMQ_HAVE_NORM
if (protocol == protocol_name::norm) {
#endif
// For convenience's sake, bind can be used interchangeable with
// connect for PGM, EPGM, NORM transports.
rc = connect (endpoint_uri_);
if (rc != -1)
options.connected = true;
return rc;
}
#endif
if (protocol == protocol_name::udp) {
if (!(options.type == ZMQ_DGRAM || options.type == ZMQ_DISH)) {
errno = ENOCOMPATPROTO;
return -1;
}
// Choose the I/O thread to run the session in.
io_thread_t *io_thread = choose_io_thread (options.affinity);
if (!io_thread) {
errno = EMTHREAD;
return -1;
}
address_t *paddr =
new (std::nothrow) address_t (protocol, address, this->get_ctx ());
alloc_assert (paddr);
paddr->resolved.udp_addr = new (std::nothrow) udp_address_t ();
alloc_assert (paddr->resolved.udp_addr);
rc = paddr->resolved.udp_addr->resolve (address.c_str (), true,
options.ipv6);
if (rc != 0) {
LIBZMQ_DELETE (paddr);
return -1;
}
session_base_t *session =
session_base_t::create (io_thread, true, this, options, paddr);
errno_assert (session);
// Create a bi-directional pipe.
object_t *parents[2] = {this, session};
pipe_t *new_pipes[2] = {NULL, NULL};
int hwms[2] = {options.sndhwm, options.rcvhwm};
bool conflates[2] = {false, false};
rc = pipepair (parents, new_pipes, hwms, conflates);
errno_assert (rc == 0);
// Attach local end of the pipe to the socket object.
attach_pipe (new_pipes[0], true, true);
pipe_t *const newpipe = new_pipes[0];
// Attach remote end of the pipe to the session object later on.
session->attach_pipe (new_pipes[1]);
// Save last endpoint URI
paddr->to_string (_last_endpoint);
// TODO shouldn't this use _last_endpoint instead of endpoint_uri_? as in the other cases
add_endpoint (endpoint_uri_pair_t (endpoint_uri_, std::string (),
endpoint_type_none),
static_cast<own_t *> (session), newpipe);
return 0;
}
// Remaining transports require to be run in an I/O thread, so at this
// point we'll choose one.
io_thread_t *io_thread = choose_io_thread (options.affinity);
if (!io_thread) {
errno = EMTHREAD;
return -1;
}
if (protocol == protocol_name::tcp) {
tcp_listener_t *listener =
new (std::nothrow) tcp_listener_t (io_thread, this, options);
alloc_assert (listener);
rc = listener->set_local_address (address.c_str ());
if (rc != 0) {
LIBZMQ_DELETE (listener);
event_bind_failed (make_unconnected_bind_endpoint_pair (address),
zmq_errno ());
return -1;
}
// Save last endpoint URI
listener->get_local_address (_last_endpoint);
add_endpoint (make_unconnected_bind_endpoint_pair (_last_endpoint),
static_cast<own_t *> (listener), NULL);
options.connected = true;
return 0;
}
#ifdef ZMQ_HAVE_WS
#ifdef ZMQ_HAVE_WSS
if (protocol == protocol_name::ws || protocol == protocol_name::wss) {
ws_listener_t *listener = new (std::nothrow) ws_listener_t (
io_thread, this, options, protocol == protocol_name::wss);
#else
if (protocol == protocol_name::ws) {
ws_listener_t *listener =
new (std::nothrow) ws_listener_t (io_thread, this, options, false);
#endif
alloc_assert (listener);
rc = listener->set_local_address (address.c_str ());
if (rc != 0) {
LIBZMQ_DELETE (listener);
event_bind_failed (make_unconnected_bind_endpoint_pair (address),
zmq_errno ());
return -1;
}
// Save last endpoint URI
listener->get_local_address (_last_endpoint);
add_endpoint (make_unconnected_bind_endpoint_pair (_last_endpoint),
static_cast<own_t *> (listener), NULL);
options.connected = true;
return 0;
}
#endif
#if defined ZMQ_HAVE_IPC
if (protocol == protocol_name::ipc) {
ipc_listener_t *listener =
new (std::nothrow) ipc_listener_t (io_thread, this, options);
alloc_assert (listener);
int rc = listener->set_local_address (address.c_str ());
if (rc != 0) {
LIBZMQ_DELETE (listener);
event_bind_failed (make_unconnected_bind_endpoint_pair (address),
zmq_errno ());
return -1;
}
// Save last endpoint URI
listener->get_local_address (_last_endpoint);
add_endpoint (make_unconnected_bind_endpoint_pair (_last_endpoint),
static_cast<own_t *> (listener), NULL);
options.connected = true;
return 0;
}
#endif
#if defined ZMQ_HAVE_TIPC
if (protocol == protocol_name::tipc) {
tipc_listener_t *listener =
new (std::nothrow) tipc_listener_t (io_thread, this, options);
alloc_assert (listener);
int rc = listener->set_local_address (address.c_str ());
if (rc != 0) {
LIBZMQ_DELETE (listener);
event_bind_failed (make_unconnected_bind_endpoint_pair (address),
zmq_errno ());
return -1;
}
// Save last endpoint URI
listener->get_local_address (_last_endpoint);
// TODO shouldn't this use _last_endpoint as in the other cases?
add_endpoint (make_unconnected_bind_endpoint_pair (endpoint_uri_),
static_cast<own_t *> (listener), NULL);
options.connected = true;
return 0;
}
#endif
#if defined ZMQ_HAVE_VMCI
if (protocol == protocol_name::vmci) {
vmci_listener_t *listener =
new (std::nothrow) vmci_listener_t (io_thread, this, options);
alloc_assert (listener);
int rc = listener->set_local_address (address.c_str ());
if (rc != 0) {
LIBZMQ_DELETE (listener);
event_bind_failed (make_unconnected_bind_endpoint_pair (address),
zmq_errno ());
return -1;
}
listener->get_local_address (_last_endpoint);
add_endpoint (make_unconnected_bind_endpoint_pair (_last_endpoint),
static_cast<own_t *> (listener), NULL);
options.connected = true;
return 0;
}
#endif
zmq_assert (false);
return -1;
}
int zmq::socket_base_t::connect (const char *endpoint_uri_)
{
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
return connect_internal (endpoint_uri_);
}
int zmq::socket_base_t::connect_internal (const char *endpoint_uri_)
{
if (unlikely (_ctx_terminated)) {
errno = ETERM;
return -1;
}
// Process pending commands, if any.
int rc = process_commands (0, false);
if (unlikely (rc != 0)) {
return -1;
}
// Parse endpoint_uri_ string.
std::string protocol;
std::string address;
if (parse_uri (endpoint_uri_, protocol, address)
|| check_protocol (protocol)) {
return -1;
}
if (protocol == protocol_name::inproc) {
// TODO: inproc connect is specific with respect to creating pipes
// as there's no 'reconnect' functionality implemented. Once that
// is in place we should follow generic pipe creation algorithm.
// Find the peer endpoint.
const endpoint_t peer = find_endpoint (endpoint_uri_);
// The total HWM for an inproc connection should be the sum of
// the binder's HWM and the connector's HWM.
const int sndhwm = peer.socket == NULL ? options.sndhwm
: options.sndhwm != 0 && peer.options.rcvhwm != 0
? options.sndhwm + peer.options.rcvhwm
: 0;
const int rcvhwm = peer.socket == NULL ? options.rcvhwm
: options.rcvhwm != 0 && peer.options.sndhwm != 0
? options.rcvhwm + peer.options.sndhwm
: 0;
// Create a bi-directional pipe to connect the peers.
object_t *parents[2] = {this, peer.socket == NULL ? this : peer.socket};
pipe_t *new_pipes[2] = {NULL, NULL};
const bool conflate = get_effective_conflate_option (options);
int hwms[2] = {conflate ? -1 : sndhwm, conflate ? -1 : rcvhwm};
bool conflates[2] = {conflate, conflate};
rc = pipepair (parents, new_pipes, hwms, conflates);
if (!conflate) {
new_pipes[0]->set_hwms_boost (peer.options.sndhwm,
peer.options.rcvhwm);
new_pipes[1]->set_hwms_boost (options.sndhwm, options.rcvhwm);
}
errno_assert (rc == 0);
if (!peer.socket) {
// The peer doesn't exist yet so we don't know whether
// to send the routing id message or not. To resolve this,
// we always send our routing id and drop it later if
// the peer doesn't expect it.
send_routing_id (new_pipes[0], options);
#ifdef ZMQ_BUILD_DRAFT_API
// If set, send the hello msg of the local socket to the peer.
if (options.can_send_hello_msg && options.hello_msg.size () > 0) {
send_hello_msg (new_pipes[0], options);
}
#endif
const endpoint_t endpoint = {this, options};
pend_connection (std::string (endpoint_uri_), endpoint, new_pipes);
} else {
// If required, send the routing id of the local socket to the peer.
if (peer.options.recv_routing_id) {
send_routing_id (new_pipes[0], options);
}
// If required, send the routing id of the peer to the local socket.
if (options.recv_routing_id) {
send_routing_id (new_pipes[1], peer.options);
}
#ifdef ZMQ_BUILD_DRAFT_API
// If set, send the hello msg of the local socket to the peer.
if (options.can_send_hello_msg && options.hello_msg.size () > 0) {
send_hello_msg (new_pipes[0], options);
}
// If set, send the hello msg of the peer to the local socket.
if (peer.options.can_send_hello_msg
&& peer.options.hello_msg.size () > 0) {
send_hello_msg (new_pipes[1], peer.options);
}
if (peer.options.can_recv_disconnect_msg
&& peer.options.disconnect_msg.size () > 0)
new_pipes[0]->set_disconnect_msg (peer.options.disconnect_msg);
#endif
// Attach remote end of the pipe to the peer socket. Note that peer's
// seqnum was incremented in find_endpoint function. We don't need it
// increased here.
send_bind (peer.socket, new_pipes[1], false);
}
// Attach local end of the pipe to this socket object.
attach_pipe (new_pipes[0], false, true);
// Save last endpoint URI
_last_endpoint.assign (endpoint_uri_);
// remember inproc connections for disconnect
_inprocs.emplace (endpoint_uri_, new_pipes[0]);
options.connected = true;
return 0;
}
const bool is_single_connect =
(options.type == ZMQ_DEALER || options.type == ZMQ_SUB
|| options.type == ZMQ_PUB || options.type == ZMQ_REQ);
if (unlikely (is_single_connect)) {
if (0 != _endpoints.count (endpoint_uri_)) {
// There is no valid use for multiple connects for SUB-PUB nor
// DEALER-ROUTER nor REQ-REP. Multiple connects produces
// nonsensical results.
return 0;
}
}
// Choose the I/O thread to run the session in.
io_thread_t *io_thread = choose_io_thread (options.affinity);
if (!io_thread) {
errno = EMTHREAD;
return -1;
}
address_t *paddr =
new (std::nothrow) address_t (protocol, address, this->get_ctx ());
alloc_assert (paddr);
// Resolve address (if needed by the protocol)
if (protocol == protocol_name::tcp) {
// Do some basic sanity checks on tcp:// address syntax
// - hostname starts with digit or letter, with embedded '-' or '.'
// - IPv6 address may contain hex chars and colons.
// - IPv6 link local address may contain % followed by interface name / zone_id
// (Reference: https://tools.ietf.org/html/rfc4007)
// - IPv4 address may contain decimal digits and dots.
// - Address must end in ":port" where port is *, or numeric
// - Address may contain two parts separated by ':'
// Following code is quick and dirty check to catch obvious errors,
// without trying to be fully accurate.
const char *check = address.c_str ();
if (isalnum (*check) || isxdigit (*check) || *check == '['
|| *check == ':') {
check++;
while (isalnum (*check) || isxdigit (*check) || *check == '.'
|| *check == '-' || *check == ':' || *check == '%'
|| *check == ';' || *check == '[' || *check == ']'
|| *check == '_' || *check == '*') {
check++;
}
}
// Assume the worst, now look for success
rc = -1;
// Did we reach the end of the address safely?
if (*check == 0) {
// Do we have a valid port string? (cannot be '*' in connect
check = strrchr (address.c_str (), ':');
if (check) {
check++;
if (*check && (isdigit (*check)))
rc = 0; // Valid
}
}
if (rc == -1) {
errno = EINVAL;
LIBZMQ_DELETE (paddr);
return -1;
}
// Defer resolution until a socket is opened
paddr->resolved.tcp_addr = NULL;
}
#ifdef ZMQ_HAVE_WS
#ifdef ZMQ_HAVE_WSS
else if (protocol == protocol_name::ws || protocol == protocol_name::wss) {
if (protocol == protocol_name::wss) {
paddr->resolved.wss_addr = new (std::nothrow) wss_address_t ();
alloc_assert (paddr->resolved.wss_addr);
rc = paddr->resolved.wss_addr->resolve (address.c_str (), false,
options.ipv6);
} else
#else
else if (protocol == protocol_name::ws) {
#endif
{
paddr->resolved.ws_addr = new (std::nothrow) ws_address_t ();
alloc_assert (paddr->resolved.ws_addr);
rc = paddr->resolved.ws_addr->resolve (address.c_str (), false,
options.ipv6);
}
if (rc != 0) {
LIBZMQ_DELETE (paddr);
return -1;
}
}
#endif
#if defined ZMQ_HAVE_IPC
else if (protocol == protocol_name::ipc) {
paddr->resolved.ipc_addr = new (std::nothrow) ipc_address_t ();
alloc_assert (paddr->resolved.ipc_addr);
int rc = paddr->resolved.ipc_addr->resolve (address.c_str ());
if (rc != 0) {
LIBZMQ_DELETE (paddr);
return -1;
}
}
#endif
if (protocol == protocol_name::udp) {
if (options.type != ZMQ_RADIO) {
errno = ENOCOMPATPROTO;
LIBZMQ_DELETE (paddr);
return -1;
}
paddr->resolved.udp_addr = new (std::nothrow) udp_address_t ();
alloc_assert (paddr->resolved.udp_addr);
rc = paddr->resolved.udp_addr->resolve (address.c_str (), false,
options.ipv6);
if (rc != 0) {
LIBZMQ_DELETE (paddr);
return -1;
}
}
// TBD - Should we check address for ZMQ_HAVE_NORM???
#ifdef ZMQ_HAVE_OPENPGM
if (protocol == protocol_name::pgm || protocol == protocol_name::epgm) {
struct pgm_addrinfo_t *res = NULL;
uint16_t port_number = 0;
int rc =
pgm_socket_t::init_address (address.c_str (), &res, &port_number);
if (res != NULL)
pgm_freeaddrinfo (res);
if (rc != 0 || port_number == 0) {
return -1;
}
}
#endif
#if defined ZMQ_HAVE_TIPC
else if (protocol == protocol_name::tipc) {
paddr->resolved.tipc_addr = new (std::nothrow) tipc_address_t ();
alloc_assert (paddr->resolved.tipc_addr);
int rc = paddr->resolved.tipc_addr->resolve (address.c_str ());
if (rc != 0) {
LIBZMQ_DELETE (paddr);
return -1;
}
const sockaddr_tipc *const saddr =
reinterpret_cast<const sockaddr_tipc *> (
paddr->resolved.tipc_addr->addr ());
// Cannot connect to random Port Identity
if (saddr->addrtype == TIPC_ADDR_ID
&& paddr->resolved.tipc_addr->is_random ()) {
LIBZMQ_DELETE (paddr);
errno = EINVAL;
return -1;
}
}
#endif
#if defined ZMQ_HAVE_VMCI
else if (protocol == protocol_name::vmci) {
paddr->resolved.vmci_addr =
new (std::nothrow) vmci_address_t (this->get_ctx ());
alloc_assert (paddr->resolved.vmci_addr);
int rc = paddr->resolved.vmci_addr->resolve (address.c_str ());
if (rc != 0) {
LIBZMQ_DELETE (paddr);
return -1;
}
}
#endif
// Create session.
session_base_t *session =
session_base_t::create (io_thread, true, this, options, paddr);
errno_assert (session);
// PGM does not support subscription forwarding; ask for all data to be
// sent to this pipe. (same for NORM, currently?)
#if defined ZMQ_HAVE_OPENPGM && defined ZMQ_HAVE_NORM
const bool subscribe_to_all =
protocol == protocol_name::pgm || protocol == protocol_name::epgm
|| protocol == protocol_name::norm || protocol == protocol_name::udp;
#elif defined ZMQ_HAVE_OPENPGM
const bool subscribe_to_all = protocol == protocol_name::pgm
|| protocol == protocol_name::epgm
|| protocol == protocol_name::udp;
#elif defined ZMQ_HAVE_NORM
const bool subscribe_to_all =
protocol == protocol_name::norm || protocol == protocol_name::udp;
#else
const bool subscribe_to_all = protocol == protocol_name::udp;
#endif
pipe_t *newpipe = NULL;
if (options.immediate != 1 || subscribe_to_all) {
// Create a bi-directional pipe.
object_t *parents[2] = {this, session};
pipe_t *new_pipes[2] = {NULL, NULL};
const bool conflate = get_effective_conflate_option (options);
int hwms[2] = {conflate ? -1 : options.sndhwm,
conflate ? -1 : options.rcvhwm};
bool conflates[2] = {conflate, conflate};
rc = pipepair (parents, new_pipes, hwms, conflates);
errno_assert (rc == 0);
// Attach local end of the pipe to the socket object.
attach_pipe (new_pipes[0], subscribe_to_all, true);
newpipe = new_pipes[0];
// Attach remote end of the pipe to the session object later on.
session->attach_pipe (new_pipes[1]);
}
// Save last endpoint URI
paddr->to_string (_last_endpoint);
add_endpoint (make_unconnected_connect_endpoint_pair (endpoint_uri_),
static_cast<own_t *> (session), newpipe);
return 0;
}
std::string
zmq::socket_base_t::resolve_tcp_addr (std::string endpoint_uri_pair_,
const char *tcp_address_)
{
// The resolved last_endpoint is used as a key in the endpoints map.
// The address passed by the user might not match in the TCP case due to
// IPv4-in-IPv6 mapping (EG: tcp://[::ffff:127.0.0.1]:9999), so try to
// resolve before giving up. Given at this stage we don't know whether a
// socket is connected or bound, try with both.
if (_endpoints.find (endpoint_uri_pair_) == _endpoints.end ()) {
tcp_address_t *tcp_addr = new (std::nothrow) tcp_address_t ();
alloc_assert (tcp_addr);
int rc = tcp_addr->resolve (tcp_address_, false, options.ipv6);
if (rc == 0) {
tcp_addr->to_string (endpoint_uri_pair_);
if (_endpoints.find (endpoint_uri_pair_) == _endpoints.end ()) {
rc = tcp_addr->resolve (tcp_address_, true, options.ipv6);
if (rc == 0) {
tcp_addr->to_string (endpoint_uri_pair_);
}
}
}
LIBZMQ_DELETE (tcp_addr);
}
return endpoint_uri_pair_;
}
void zmq::socket_base_t::add_endpoint (
const endpoint_uri_pair_t &endpoint_pair_, own_t *endpoint_, pipe_t *pipe_)
{
// Activate the session. Make it a child of this socket.
launch_child (endpoint_);
_endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (endpoint_pair_.identifier (),
endpoint_pipe_t (endpoint_, pipe_));
if (pipe_ != NULL)
pipe_->set_endpoint_pair (endpoint_pair_);
}
int zmq::socket_base_t::term_endpoint (const char *endpoint_uri_)
{
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
// Check whether the context hasn't been shut down yet.
if (unlikely (_ctx_terminated)) {
errno = ETERM;
return -1;
}
// Check whether endpoint address passed to the function is valid.
if (unlikely (!endpoint_uri_)) {
errno = EINVAL;
return -1;
}
// Process pending commands, if any, since there could be pending unprocessed process_own()'s
// (from launch_child() for example) we're asked to terminate now.
const int rc = process_commands (0, false);
if (unlikely (rc != 0)) {
return -1;
}
// Parse endpoint_uri_ string.
std::string uri_protocol;
std::string uri_path;
if (parse_uri (endpoint_uri_, uri_protocol, uri_path)
|| check_protocol (uri_protocol)) {
return -1;
}
const std::string endpoint_uri_str = std::string (endpoint_uri_);
// Disconnect an inproc socket
if (uri_protocol == protocol_name::inproc) {
return unregister_endpoint (endpoint_uri_str, this) == 0
? 0
: _inprocs.erase_pipes (endpoint_uri_str);
}
const std::string resolved_endpoint_uri =
uri_protocol == protocol_name::tcp
? resolve_tcp_addr (endpoint_uri_str, uri_path.c_str ())
: endpoint_uri_str;
// Find the endpoints range (if any) corresponding to the endpoint_uri_pair_ string.
const std::pair<endpoints_t::iterator, endpoints_t::iterator> range =
_endpoints.equal_range (resolved_endpoint_uri);
if (range.first == range.second) {
errno = ENOENT;
return -1;
}
for (endpoints_t::iterator it = range.first; it != range.second; ++it) {
// If we have an associated pipe, terminate it.
if (it->second.second != NULL)
it->second.second->terminate (false);
term_child (it->second.first);
}
_endpoints.erase (range.first, range.second);
if (options.reconnect_stop & ZMQ_RECONNECT_STOP_AFTER_DISCONNECT) {
_disconnected = true;
}
return 0;
}
int zmq::socket_base_t::send (msg_t *msg_, int flags_)
{
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
// Check whether the context hasn't been shut down yet.
if (unlikely (_ctx_terminated)) {
errno = ETERM;
return -1;
}
// Check whether message passed to the function is valid.
if (unlikely (!msg_ || !msg_->check ())) {
errno = EFAULT;
return -1;
}
// Process pending commands, if any.
int rc = process_commands (0, true);
if (unlikely (rc != 0)) {
return -1;
}
// Clear any user-visible flags that are set on the message.
msg_->reset_flags (msg_t::more);
// At this point we impose the flags on the message.
if (flags_ & ZMQ_SNDMORE)
msg_->set_flags (msg_t::more);
msg_->reset_metadata ();
// Try to send the message using method in each socket class
rc = xsend (msg_);
if (rc == 0) {
return 0;
}
// Special case for ZMQ_PUSH: -2 means pipe is dead while a
// multi-part send is in progress and can't be recovered, so drop
// silently when in blocking mode to keep backward compatibility.
if (unlikely (rc == -2)) {
if (!((flags_ & ZMQ_DONTWAIT) || options.sndtimeo == 0)) {
rc = msg_->close ();
errno_assert (rc == 0);
rc = msg_->init ();
errno_assert (rc == 0);
return 0;
}
}
if (unlikely (errno != EAGAIN)) {
return -1;
}
// In case of non-blocking send we'll simply propagate
// the error - including EAGAIN - up the stack.
if ((flags_ & ZMQ_DONTWAIT) || options.sndtimeo == 0) {
return -1;
}
// Compute the time when the timeout should occur.
// If the timeout is infinite, don't care.
int timeout = options.sndtimeo;
const uint64_t end = timeout < 0 ? 0 : (_clock.now_ms () + timeout);
// Oops, we couldn't send the message. Wait for the next
// command, process it and try to send the message again.
// If timeout is reached in the meantime, return EAGAIN.
while (true) {
if (unlikely (process_commands (timeout, false) != 0)) {
return -1;
}
rc = xsend (msg_);
if (rc == 0)
break;
if (unlikely (errno != EAGAIN)) {
return -1;
}
if (timeout > 0) {
timeout = static_cast<int> (end - _clock.now_ms ());
if (timeout <= 0) {
errno = EAGAIN;
return -1;
}
}
}
return 0;
}
int zmq::socket_base_t::recv (msg_t *msg_, int flags_)
{
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
// Check whether the context hasn't been shut down yet.
if (unlikely (_ctx_terminated)) {
errno = ETERM;
return -1;
}
// Check whether message passed to the function is valid.
if (unlikely (!msg_ || !msg_->check ())) {
errno = EFAULT;
return -1;
}
// Once every inbound_poll_rate messages check for signals and process
// incoming commands. This happens only if we are not polling altogether
// because there are messages available all the time. If poll occurs,
// ticks is set to zero and thus we avoid this code.
//
// Note that 'recv' uses different command throttling algorithm (the one
// described above) from the one used by 'send'. This is because counting
// ticks is more efficient than doing RDTSC all the time.
if (++_ticks == inbound_poll_rate) {
if (unlikely (process_commands (0, false) != 0)) {
return -1;
}
_ticks = 0;
}
// Get the message.
int rc = xrecv (msg_);
if (unlikely (rc != 0 && errno != EAGAIN)) {
return -1;
}
// If we have the message, return immediately.
if (rc == 0) {
extract_flags (msg_);
return 0;
}
// If the message cannot be fetched immediately, there are two scenarios.
// For non-blocking recv, commands are processed in case there's an
// activate_reader command already waiting in a command pipe.
// If it's not, return EAGAIN.
if ((flags_ & ZMQ_DONTWAIT) || options.rcvtimeo == 0) {
if (unlikely (process_commands (0, false) != 0)) {
return -1;
}
_ticks = 0;
rc = xrecv (msg_);
if (rc < 0) {
return rc;
}
extract_flags (msg_);
return 0;
}
// Compute the time when the timeout should occur.
// If the timeout is infinite, don't care.
int timeout = options.rcvtimeo;
const uint64_t end = timeout < 0 ? 0 : (_clock.now_ms () + timeout);
// In blocking scenario, commands are processed over and over again until
// we are able to fetch a message.
bool block = (_ticks != 0);
while (true) {
if (unlikely (process_commands (block ? timeout : 0, false) != 0)) {
return -1;
}
rc = xrecv (msg_);
if (rc == 0) {
_ticks = 0;
break;
}
if (unlikely (errno != EAGAIN)) {
return -1;
}
block = true;
if (timeout > 0) {
timeout = static_cast<int> (end - _clock.now_ms ());
if (timeout <= 0) {
errno = EAGAIN;
return -1;
}
}
}
extract_flags (msg_);
return 0;
}
int zmq::socket_base_t::close ()
{
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
// Remove all existing signalers for thread safe sockets
if (_thread_safe)
(static_cast<mailbox_safe_t *> (_mailbox))->clear_signalers ();
// Mark the socket as dead
_tag = 0xdeadbeef;
// Transfer the ownership of the socket from this application thread
// to the reaper thread which will take care of the rest of shutdown
// process.
send_reap (this);
return 0;
}
bool zmq::socket_base_t::has_in ()
{
return xhas_in ();
}
bool zmq::socket_base_t::has_out ()
{
return xhas_out ();
}
void zmq::socket_base_t::start_reaping (poller_t *poller_)
{
// Plug the socket to the reaper thread.
_poller = poller_;
fd_t fd;
if (!_thread_safe)
fd = (static_cast<mailbox_t *> (_mailbox))->get_fd ();
else {
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
_reaper_signaler = new (std::nothrow) signaler_t ();
zmq_assert (_reaper_signaler);
// Add signaler to the safe mailbox
fd = _reaper_signaler->get_fd ();
(static_cast<mailbox_safe_t *> (_mailbox))
->add_signaler (_reaper_signaler);
// Send a signal to make sure reaper handle existing commands
_reaper_signaler->send ();
}
_handle = _poller->add_fd (fd, this);
_poller->set_pollin (_handle);
// Initialise the termination and check whether it can be deallocated
// immediately.
terminate ();
check_destroy ();
}
int zmq::socket_base_t::process_commands (int timeout_, bool throttle_)
{
if (timeout_ == 0) {
// If we are asked not to wait, check whether we haven't processed
// commands recently, so that we can throttle the new commands.
// Get the CPU's tick counter. If 0, the counter is not available.
const uint64_t tsc = zmq::clock_t::rdtsc ();
// Optimised version of command processing - it doesn't have to check
// for incoming commands each time. It does so only if certain time
// elapsed since last command processing. Command delay varies
// depending on CPU speed: It's ~1ms on 3GHz CPU, ~2ms on 1.5GHz CPU
// etc. The optimisation makes sense only on platforms where getting
// a timestamp is a very cheap operation (tens of nanoseconds).
if (tsc && throttle_) {
// Check whether TSC haven't jumped backwards (in case of migration
// between CPU cores) and whether certain time have elapsed since
// last command processing. If it didn't do nothing.
if (tsc >= _last_tsc && tsc - _last_tsc <= max_command_delay)
return 0;
_last_tsc = tsc;
}
}
// Check whether there are any commands pending for this thread.
command_t cmd;
int rc = _mailbox->recv (&cmd, timeout_);
if (rc != 0 && errno == EINTR)
return -1;
// Process all available commands.
while (rc == 0 || errno == EINTR) {
if (rc == 0) {
cmd.destination->process_command (cmd);
}
rc = _mailbox->recv (&cmd, 0);
}
zmq_assert (errno == EAGAIN);
if (_ctx_terminated) {
errno = ETERM;
return -1;
}
return 0;
}
void zmq::socket_base_t::process_stop ()
{
// Here, someone have called zmq_ctx_term while the socket was still alive.
// We'll remember the fact so that any blocking call is interrupted and any
// further attempt to use the socket will return ETERM. The user is still
// responsible for calling zmq_close on the socket though!
scoped_lock_t lock (_monitor_sync);
stop_monitor ();
_ctx_terminated = true;
}
void zmq::socket_base_t::process_bind (pipe_t *pipe_)
{
attach_pipe (pipe_);
}
void zmq::socket_base_t::process_term (int linger_)
{
// Unregister all inproc endpoints associated with this socket.
// Doing this we make sure that no new pipes from other sockets (inproc)
// will be initiated.
unregister_endpoints (this);
// Ask all attached pipes to terminate.
for (pipes_t::size_type i = 0, size = _pipes.size (); i != size; ++i) {
// Only inprocs might have a disconnect message set
_pipes[i]->send_disconnect_msg ();
_pipes[i]->terminate (false);
}
register_term_acks (static_cast<int> (_pipes.size ()));
// Continue the termination process immediately.
own_t::process_term (linger_);
}
void zmq::socket_base_t::process_term_endpoint (std::string *endpoint_)
{
term_endpoint (endpoint_->c_str ());
delete endpoint_;
}
void zmq::socket_base_t::process_pipe_stats_publish (
uint64_t outbound_queue_count_,
uint64_t inbound_queue_count_,
endpoint_uri_pair_t *endpoint_pair_)
{
uint64_t values[2] = {outbound_queue_count_, inbound_queue_count_};
event (*endpoint_pair_, values, 2, ZMQ_EVENT_PIPES_STATS);
delete endpoint_pair_;
}
/*
* There are 2 pipes per connection, and the inbound one _must_ be queried from
* the I/O thread. So ask the outbound pipe, in the application thread, to send
* a message (pipe_peer_stats) to its peer. The message will carry the outbound
* pipe stats and endpoint, and the reference to the socket object.
* The inbound pipe on the I/O thread will then add its own stats and endpoint,
* and write back a message to the socket object (pipe_stats_publish) which
* will raise an event with the data.
*/
int zmq::socket_base_t::query_pipes_stats ()
{
{
scoped_lock_t lock (_monitor_sync);
if (!(_monitor_events & ZMQ_EVENT_PIPES_STATS)) {
errno = EINVAL;
return -1;
}
}
if (_pipes.size () == 0) {
errno = EAGAIN;
return -1;
}
for (pipes_t::size_type i = 0, size = _pipes.size (); i != size; ++i) {
_pipes[i]->send_stats_to_peer (this);
}
return 0;
}
void zmq::socket_base_t::update_pipe_options (int option_)
{
if (option_ == ZMQ_SNDHWM || option_ == ZMQ_RCVHWM) {
for (pipes_t::size_type i = 0, size = _pipes.size (); i != size; ++i) {
_pipes[i]->set_hwms (options.rcvhwm, options.sndhwm);
_pipes[i]->send_hwms_to_peer (options.sndhwm, options.rcvhwm);
}
}
}
void zmq::socket_base_t::process_destroy ()
{
_destroyed = true;
}
int zmq::socket_base_t::xsetsockopt (int, const void *, size_t)
{
errno = EINVAL;
return -1;
}
int zmq::socket_base_t::xgetsockopt (int, void *, size_t *)
{
errno = EINVAL;
return -1;
}
bool zmq::socket_base_t::xhas_out ()
{
return false;
}
int zmq::socket_base_t::xsend (msg_t *)
{
errno = ENOTSUP;
return -1;
}
bool zmq::socket_base_t::xhas_in ()
{
return false;
}
int zmq::socket_base_t::xjoin (const char *group_)
{
LIBZMQ_UNUSED (group_);
errno = ENOTSUP;
return -1;
}
int zmq::socket_base_t::xleave (const char *group_)
{
LIBZMQ_UNUSED (group_);
errno = ENOTSUP;
return -1;
}
int zmq::socket_base_t::xrecv (msg_t *)
{
errno = ENOTSUP;
return -1;
}
void zmq::socket_base_t::xread_activated (pipe_t *)
{
zmq_assert (false);
}
void zmq::socket_base_t::xwrite_activated (pipe_t *)
{
zmq_assert (false);
}
void zmq::socket_base_t::xhiccuped (pipe_t *)
{
zmq_assert (false);
}
void zmq::socket_base_t::in_event ()
{
// This function is invoked only once the socket is running in the context
// of the reaper thread. Process any commands from other threads/sockets
// that may be available at the moment. Ultimately, the socket will
// be destroyed.
{
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
// If the socket is thread safe we need to unsignal the reaper signaler
if (_thread_safe)
_reaper_signaler->recv ();
process_commands (0, false);
}
check_destroy ();
}
void zmq::socket_base_t::out_event ()
{
zmq_assert (false);
}
void zmq::socket_base_t::timer_event (int)
{
zmq_assert (false);
}
void zmq::socket_base_t::check_destroy ()
{
// If the object was already marked as destroyed, finish the deallocation.
if (_destroyed) {
// Remove the socket from the reaper's poller.
_poller->rm_fd (_handle);
// Remove the socket from the context.
destroy_socket (this);
// Notify the reaper about the fact.
send_reaped ();
// Deallocate.
own_t::process_destroy ();
}
}
void zmq::socket_base_t::read_activated (pipe_t *pipe_)
{
xread_activated (pipe_);
}
void zmq::socket_base_t::write_activated (pipe_t *pipe_)
{
xwrite_activated (pipe_);
}
void zmq::socket_base_t::hiccuped (pipe_t *pipe_)
{
if (options.immediate == 1)
pipe_->terminate (false);
else
// Notify derived sockets of the hiccup
xhiccuped (pipe_);
}
void zmq::socket_base_t::pipe_terminated (pipe_t *pipe_)
{
// Notify the specific socket type about the pipe termination.
xpipe_terminated (pipe_);
// Remove pipe from inproc pipes
_inprocs.erase_pipe (pipe_);
// Remove the pipe from the list of attached pipes and confirm its
// termination if we are already shutting down.
_pipes.erase (pipe_);
// Remove the pipe from _endpoints (set it to NULL).
const std::string &identifier = pipe_->get_endpoint_pair ().identifier ();
if (!identifier.empty ()) {
std::pair<endpoints_t::iterator, endpoints_t::iterator> range;
range = _endpoints.equal_range (identifier);
for (endpoints_t::iterator it = range.first; it != range.second; ++it) {
if (it->second.second == pipe_) {
it->second.second = NULL;
break;
}
}
}
if (is_terminating ())
unregister_term_ack ();
}
void zmq::socket_base_t::extract_flags (const msg_t *msg_)
{
// Test whether routing_id flag is valid for this socket type.
if (unlikely (msg_->flags () & msg_t::routing_id))
zmq_assert (options.recv_routing_id);
// Remove MORE flag.
_rcvmore = (msg_->flags () & msg_t::more) != 0;
}
int zmq::socket_base_t::monitor (const char *endpoint_,
uint64_t events_,
int event_version_,
int type_)
{
scoped_lock_t lock (_monitor_sync);
if (unlikely (_ctx_terminated)) {
errno = ETERM;
return -1;
}
// Event version 1 supports only first 16 events.
if (unlikely (event_version_ == 1 && events_ >> 16 != 0)) {
errno = EINVAL;
return -1;
}
// Support deregistering monitoring endpoints as well
if (endpoint_ == NULL) {
stop_monitor ();
return 0;
}
// Parse endpoint_uri_ string.
std::string protocol;
std::string address;
if (parse_uri (endpoint_, protocol, address) || check_protocol (protocol))
return -1;
// Event notification only supported over inproc://
if (protocol != protocol_name::inproc) {
errno = EPROTONOSUPPORT;
return -1;
}
// already monitoring. Stop previous monitor before starting new one.
if (_monitor_socket != NULL) {
stop_monitor (true);
}
// Check if the specified socket type is supported. It must be a
// one-way socket types that support the SNDMORE flag.
switch (type_) {
case ZMQ_PAIR:
break;
case ZMQ_PUB:
break;
case ZMQ_PUSH:
break;
default:
errno = EINVAL;
return -1;
}
// Register events to monitor
_monitor_events = events_;
options.monitor_event_version = event_version_;
// Create a monitor socket of the specified type.
_monitor_socket = zmq_socket (get_ctx (), type_);
if (_monitor_socket == NULL)
return -1;
// Never block context termination on pending event messages
int linger = 0;
int rc =
zmq_setsockopt (_monitor_socket, ZMQ_LINGER, &linger, sizeof (linger));
if (rc == -1)
stop_monitor (false);
// Spawn the monitor socket endpoint
rc = zmq_bind (_monitor_socket, endpoint_);
if (rc == -1)
stop_monitor (false);
return rc;
}
void zmq::socket_base_t::event_connected (
const endpoint_uri_pair_t &endpoint_uri_pair_, zmq::fd_t fd_)
{
uint64_t values[1] = {static_cast<uint64_t> (fd_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_CONNECTED);
}
void zmq::socket_base_t::event_connect_delayed (
const endpoint_uri_pair_t &endpoint_uri_pair_, int err_)
{
uint64_t values[1] = {static_cast<uint64_t> (err_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_CONNECT_DELAYED);
}
void zmq::socket_base_t::event_connect_retried (
const endpoint_uri_pair_t &endpoint_uri_pair_, int interval_)
{
uint64_t values[1] = {static_cast<uint64_t> (interval_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_CONNECT_RETRIED);
}
void zmq::socket_base_t::event_listening (
const endpoint_uri_pair_t &endpoint_uri_pair_, zmq::fd_t fd_)
{
uint64_t values[1] = {static_cast<uint64_t> (fd_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_LISTENING);
}
void zmq::socket_base_t::event_bind_failed (
const endpoint_uri_pair_t &endpoint_uri_pair_, int err_)
{
uint64_t values[1] = {static_cast<uint64_t> (err_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_BIND_FAILED);
}
void zmq::socket_base_t::event_accepted (
const endpoint_uri_pair_t &endpoint_uri_pair_, zmq::fd_t fd_)
{
uint64_t values[1] = {static_cast<uint64_t> (fd_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_ACCEPTED);
}
void zmq::socket_base_t::event_accept_failed (
const endpoint_uri_pair_t &endpoint_uri_pair_, int err_)
{
uint64_t values[1] = {static_cast<uint64_t> (err_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_ACCEPT_FAILED);
}
void zmq::socket_base_t::event_closed (
const endpoint_uri_pair_t &endpoint_uri_pair_, zmq::fd_t fd_)
{
uint64_t values[1] = {static_cast<uint64_t> (fd_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_CLOSED);
}
void zmq::socket_base_t::event_close_failed (
const endpoint_uri_pair_t &endpoint_uri_pair_, int err_)
{
uint64_t values[1] = {static_cast<uint64_t> (err_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_CLOSE_FAILED);
}
void zmq::socket_base_t::event_disconnected (
const endpoint_uri_pair_t &endpoint_uri_pair_, zmq::fd_t fd_)
{
uint64_t values[1] = {static_cast<uint64_t> (fd_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_DISCONNECTED);
}
void zmq::socket_base_t::event_handshake_failed_no_detail (
const endpoint_uri_pair_t &endpoint_uri_pair_, int err_)
{
uint64_t values[1] = {static_cast<uint64_t> (err_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_HANDSHAKE_FAILED_NO_DETAIL);
}
void zmq::socket_base_t::event_handshake_failed_protocol (
const endpoint_uri_pair_t &endpoint_uri_pair_, int err_)
{
uint64_t values[1] = {static_cast<uint64_t> (err_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_HANDSHAKE_FAILED_PROTOCOL);
}
void zmq::socket_base_t::event_handshake_failed_auth (
const endpoint_uri_pair_t &endpoint_uri_pair_, int err_)
{
uint64_t values[1] = {static_cast<uint64_t> (err_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_HANDSHAKE_FAILED_AUTH);
}
void zmq::socket_base_t::event_handshake_succeeded (
const endpoint_uri_pair_t &endpoint_uri_pair_, int err_)
{
uint64_t values[1] = {static_cast<uint64_t> (err_)};
event (endpoint_uri_pair_, values, 1, ZMQ_EVENT_HANDSHAKE_SUCCEEDED);
}
void zmq::socket_base_t::event (const endpoint_uri_pair_t &endpoint_uri_pair_,
uint64_t values_[],
uint64_t values_count_,
uint64_t type_)
{
scoped_lock_t lock (_monitor_sync);
if (_monitor_events & type_) {
monitor_event (type_, values_, values_count_, endpoint_uri_pair_);
}
}
// Send a monitor event
void zmq::socket_base_t::monitor_event (
uint64_t event_,
const uint64_t values_[],
uint64_t values_count_,
const endpoint_uri_pair_t &endpoint_uri_pair_) const
{
// this is a private method which is only called from
// contexts where the _monitor_sync mutex has been locked before
if (_monitor_socket) {
zmq_msg_t msg;
switch (options.monitor_event_version) {
case 1: {
// The API should not allow to activate unsupported events
zmq_assert (event_ <= std::numeric_limits<uint16_t>::max ());
// v1 only allows one value
zmq_assert (values_count_ == 1);
zmq_assert (values_[0]
<= std::numeric_limits<uint32_t>::max ());
// Send event and value in first frame
const uint16_t event = static_cast<uint16_t> (event_);
const uint32_t value = static_cast<uint32_t> (values_[0]);
zmq_msg_init_size (&msg, sizeof (event) + sizeof (value));
uint8_t *data = static_cast<uint8_t *> (zmq_msg_data (&msg));
// Avoid dereferencing uint32_t on unaligned address
memcpy (data + 0, &event, sizeof (event));
memcpy (data + sizeof (event), &value, sizeof (value));
zmq_msg_send (&msg, _monitor_socket, ZMQ_SNDMORE);
const std::string &endpoint_uri =
endpoint_uri_pair_.identifier ();
// Send address in second frame
zmq_msg_init_size (&msg, endpoint_uri.size ());
memcpy (zmq_msg_data (&msg), endpoint_uri.c_str (),
endpoint_uri.size ());
zmq_msg_send (&msg, _monitor_socket, 0);
} break;
case 2: {
// Send event in first frame (64bit unsigned)
zmq_msg_init_size (&msg, sizeof (event_));
memcpy (zmq_msg_data (&msg), &event_, sizeof (event_));
zmq_msg_send (&msg, _monitor_socket, ZMQ_SNDMORE);
// Send number of values that will follow in second frame
zmq_msg_init_size (&msg, sizeof (values_count_));
memcpy (zmq_msg_data (&msg), &values_count_,
sizeof (values_count_));
zmq_msg_send (&msg, _monitor_socket, ZMQ_SNDMORE);
// Send values in third-Nth frames (64bit unsigned)
for (uint64_t i = 0; i < values_count_; ++i) {
zmq_msg_init_size (&msg, sizeof (values_[i]));
memcpy (zmq_msg_data (&msg), &values_[i],
sizeof (values_[i]));
zmq_msg_send (&msg, _monitor_socket, ZMQ_SNDMORE);
}
// Send local endpoint URI in second-to-last frame (string)
zmq_msg_init_size (&msg, endpoint_uri_pair_.local.size ());
memcpy (zmq_msg_data (&msg), endpoint_uri_pair_.local.c_str (),
endpoint_uri_pair_.local.size ());
zmq_msg_send (&msg, _monitor_socket, ZMQ_SNDMORE);
// Send remote endpoint URI in last frame (string)
zmq_msg_init_size (&msg, endpoint_uri_pair_.remote.size ());
memcpy (zmq_msg_data (&msg), endpoint_uri_pair_.remote.c_str (),
endpoint_uri_pair_.remote.size ());
zmq_msg_send (&msg, _monitor_socket, 0);
} break;
}
}
}
void zmq::socket_base_t::stop_monitor (bool send_monitor_stopped_event_)
{
// this is a private method which is only called from
// contexts where the _monitor_sync mutex has been locked before
if (_monitor_socket) {
if ((_monitor_events & ZMQ_EVENT_MONITOR_STOPPED)
&& send_monitor_stopped_event_) {
uint64_t values[1] = {0};
monitor_event (ZMQ_EVENT_MONITOR_STOPPED, values, 1,
endpoint_uri_pair_t ());
}
zmq_close (_monitor_socket);
_monitor_socket = NULL;
_monitor_events = 0;
}
}
bool zmq::socket_base_t::is_disconnected () const
{
return _disconnected;
}
zmq::routing_socket_base_t::routing_socket_base_t (class ctx_t *parent_,
uint32_t tid_,
int sid_) :
socket_base_t (parent_, tid_, sid_)
{
}
zmq::routing_socket_base_t::~routing_socket_base_t ()
{
zmq_assert (_out_pipes.empty ());
}
int zmq::routing_socket_base_t::xsetsockopt (int option_,
const void *optval_,
size_t optvallen_)
{
switch (option_) {
case ZMQ_CONNECT_ROUTING_ID:
// TODO why isn't it possible to set an empty connect_routing_id
// (which is the default value)
if (optval_ && optvallen_) {
_connect_routing_id.assign (static_cast<const char *> (optval_),
optvallen_);
return 0;
}
break;
}
errno = EINVAL;
return -1;
}
void zmq::routing_socket_base_t::xwrite_activated (pipe_t *pipe_)
{
const out_pipes_t::iterator end = _out_pipes.end ();
out_pipes_t::iterator it;
for (it = _out_pipes.begin (); it != end; ++it)
if (it->second.pipe == pipe_)
break;
zmq_assert (it != end);
zmq_assert (!it->second.active);
it->second.active = true;
}
std::string zmq::routing_socket_base_t::extract_connect_routing_id ()
{
std::string res = ZMQ_MOVE (_connect_routing_id);
_connect_routing_id.clear ();
return res;
}
bool zmq::routing_socket_base_t::connect_routing_id_is_set () const
{
return !_connect_routing_id.empty ();
}
void zmq::routing_socket_base_t::add_out_pipe (blob_t routing_id_,
pipe_t *pipe_)
{
// Add the record into output pipes lookup table
const out_pipe_t outpipe = {pipe_, true};
const bool ok =
_out_pipes.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (routing_id_), outpipe)
.second;
zmq_assert (ok);
}
bool zmq::routing_socket_base_t::has_out_pipe (const blob_t &routing_id_) const
{
return 0 != _out_pipes.count (routing_id_);
}
zmq::routing_socket_base_t::out_pipe_t *
zmq::routing_socket_base_t::lookup_out_pipe (const blob_t &routing_id_)
{
// TODO we could probably avoid constructor a temporary blob_t to call this function
out_pipes_t::iterator it = _out_pipes.find (routing_id_);
return it == _out_pipes.end () ? NULL : &it->second;
}
const zmq::routing_socket_base_t::out_pipe_t *
zmq::routing_socket_base_t::lookup_out_pipe (const blob_t &routing_id_) const
{
// TODO we could probably avoid constructor a temporary blob_t to call this function
const out_pipes_t::const_iterator it = _out_pipes.find (routing_id_);
return it == _out_pipes.end () ? NULL : &it->second;
}
void zmq::routing_socket_base_t::erase_out_pipe (const pipe_t *pipe_)
{
const size_t erased = _out_pipes.erase (pipe_->get_routing_id ());
zmq_assert (erased);
}
zmq::routing_socket_base_t::out_pipe_t
zmq::routing_socket_base_t::try_erase_out_pipe (const blob_t &routing_id_)
{
const out_pipes_t::iterator it = _out_pipes.find (routing_id_);
out_pipe_t res = {NULL, false};
if (it != _out_pipes.end ()) {
res = it->second;
_out_pipes.erase (it);
}
return res;
}
|
sophomore_public/libzmq
|
src/socket_base.cpp
|
C++
|
gpl-3.0
| 68,399 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_SOCKET_BASE_HPP_INCLUDED__
#define __ZMQ_SOCKET_BASE_HPP_INCLUDED__
#include <string>
#include <map>
#include <stdarg.h>
#include "own.hpp"
#include "array.hpp"
#include "blob.hpp"
#include "stdint.hpp"
#include "poller.hpp"
#include "i_poll_events.hpp"
#include "i_mailbox.hpp"
#include "clock.hpp"
#include "pipe.hpp"
#include "endpoint.hpp"
extern "C" {
void zmq_free_event (void *data_, void *hint_);
}
namespace zmq
{
class ctx_t;
class msg_t;
class pipe_t;
class socket_base_t : public own_t,
public array_item_t<>,
public i_poll_events,
public i_pipe_events
{
friend class reaper_t;
public:
// Returns false if object is not a socket.
bool check_tag () const;
// Returns whether the socket is thread-safe.
bool is_thread_safe () const;
// Create a socket of a specified type.
static socket_base_t *
create (int type_, zmq::ctx_t *parent_, uint32_t tid_, int sid_);
// Returns the mailbox associated with this socket.
i_mailbox *get_mailbox () const;
// Interrupt blocking call if the socket is stuck in one.
// This function can be called from a different thread!
void stop ();
// Interface for communication with the API layer.
int setsockopt (int option_, const void *optval_, size_t optvallen_);
int getsockopt (int option_, void *optval_, size_t *optvallen_);
int bind (const char *endpoint_uri_);
int connect (const char *endpoint_uri_);
int term_endpoint (const char *endpoint_uri_);
int send (zmq::msg_t *msg_, int flags_);
int recv (zmq::msg_t *msg_, int flags_);
void add_signaler (signaler_t *s_);
void remove_signaler (signaler_t *s_);
int close ();
// These functions are used by the polling mechanism to determine
// which events are to be reported from this socket.
bool has_in ();
bool has_out ();
// Joining and leaving groups
int join (const char *group_);
int leave (const char *group_);
// Using this function reaper thread ask the socket to register with
// its poller.
void start_reaping (poller_t *poller_);
// i_poll_events implementation. This interface is used when socket
// is handled by the poller in the reaper thread.
void in_event () ZMQ_FINAL;
void out_event () ZMQ_FINAL;
void timer_event (int id_) ZMQ_FINAL;
// i_pipe_events interface implementation.
void read_activated (pipe_t *pipe_) ZMQ_FINAL;
void write_activated (pipe_t *pipe_) ZMQ_FINAL;
void hiccuped (pipe_t *pipe_) ZMQ_FINAL;
void pipe_terminated (pipe_t *pipe_) ZMQ_FINAL;
void lock ();
void unlock ();
int monitor (const char *endpoint_,
uint64_t events_,
int event_version_,
int type_);
void event_connected (const endpoint_uri_pair_t &endpoint_uri_pair_,
zmq::fd_t fd_);
void event_connect_delayed (const endpoint_uri_pair_t &endpoint_uri_pair_,
int err_);
void event_connect_retried (const endpoint_uri_pair_t &endpoint_uri_pair_,
int interval_);
void event_listening (const endpoint_uri_pair_t &endpoint_uri_pair_,
zmq::fd_t fd_);
void event_bind_failed (const endpoint_uri_pair_t &endpoint_uri_pair_,
int err_);
void event_accepted (const endpoint_uri_pair_t &endpoint_uri_pair_,
zmq::fd_t fd_);
void event_accept_failed (const endpoint_uri_pair_t &endpoint_uri_pair_,
int err_);
void event_closed (const endpoint_uri_pair_t &endpoint_uri_pair_,
zmq::fd_t fd_);
void event_close_failed (const endpoint_uri_pair_t &endpoint_uri_pair_,
int err_);
void event_disconnected (const endpoint_uri_pair_t &endpoint_uri_pair_,
zmq::fd_t fd_);
void event_handshake_failed_no_detail (
const endpoint_uri_pair_t &endpoint_uri_pair_, int err_);
void event_handshake_failed_protocol (
const endpoint_uri_pair_t &endpoint_uri_pair_, int err_);
void
event_handshake_failed_auth (const endpoint_uri_pair_t &endpoint_uri_pair_,
int err_);
void
event_handshake_succeeded (const endpoint_uri_pair_t &endpoint_uri_pair_,
int err_);
// Query the state of a specific peer. The default implementation
// always returns an ENOTSUP error.
virtual int get_peer_state (const void *routing_id_,
size_t routing_id_size_) const;
// Request for pipes statistics - will generate a ZMQ_EVENT_PIPES_STATS
// after gathering the data asynchronously. Requires event monitoring to
// be enabled.
int query_pipes_stats ();
bool is_disconnected () const;
protected:
socket_base_t (zmq::ctx_t *parent_,
uint32_t tid_,
int sid_,
bool thread_safe_ = false);
~socket_base_t () ZMQ_OVERRIDE;
// Concrete algorithms for the x- methods are to be defined by
// individual socket types.
virtual void xattach_pipe (zmq::pipe_t *pipe_,
bool subscribe_to_all_ = false,
bool locally_initiated_ = false) = 0;
// The default implementation assumes there are no specific socket
// options for the particular socket type. If not so, ZMQ_FINAL this
// method.
virtual int
xsetsockopt (int option_, const void *optval_, size_t optvallen_);
// The default implementation assumes there are no specific socket
// options for the particular socket type. If not so, ZMQ_FINAL this
// method.
virtual int xgetsockopt (int option_, void *optval_, size_t *optvallen_);
// The default implementation assumes that send is not supported.
virtual bool xhas_out ();
virtual int xsend (zmq::msg_t *msg_);
// The default implementation assumes that recv in not supported.
virtual bool xhas_in ();
virtual int xrecv (zmq::msg_t *msg_);
// i_pipe_events will be forwarded to these functions.
virtual void xread_activated (pipe_t *pipe_);
virtual void xwrite_activated (pipe_t *pipe_);
virtual void xhiccuped (pipe_t *pipe_);
virtual void xpipe_terminated (pipe_t *pipe_) = 0;
// the default implementation assumes that joub and leave are not supported.
virtual int xjoin (const char *group_);
virtual int xleave (const char *group_);
// Delay actual destruction of the socket.
void process_destroy () ZMQ_FINAL;
int connect_internal (const char *endpoint_uri_);
// Mutex for synchronize access to the socket in thread safe mode
mutex_t _sync;
private:
// test if event should be sent and then dispatch it
void event (const endpoint_uri_pair_t &endpoint_uri_pair_,
uint64_t values_[],
uint64_t values_count_,
uint64_t type_);
// Socket event data dispatch
void monitor_event (uint64_t event_,
const uint64_t values_[],
uint64_t values_count_,
const endpoint_uri_pair_t &endpoint_uri_pair_) const;
// Monitor socket cleanup
void stop_monitor (bool send_monitor_stopped_event_ = true);
// Creates new endpoint ID and adds the endpoint to the map.
void add_endpoint (const endpoint_uri_pair_t &endpoint_pair_,
own_t *endpoint_,
pipe_t *pipe_);
// Map of open endpoints.
typedef std::pair<own_t *, pipe_t *> endpoint_pipe_t;
typedef std::multimap<std::string, endpoint_pipe_t> endpoints_t;
endpoints_t _endpoints;
// Map of open inproc endpoints.
class inprocs_t
{
public:
void emplace (const char *endpoint_uri_, pipe_t *pipe_);
int erase_pipes (const std::string &endpoint_uri_str_);
void erase_pipe (const pipe_t *pipe_);
private:
typedef std::multimap<std::string, pipe_t *> map_t;
map_t _inprocs;
};
inprocs_t _inprocs;
// To be called after processing commands or invoking any command
// handlers explicitly. If required, it will deallocate the socket.
void check_destroy ();
// Moves the flags from the message to local variables,
// to be later retrieved by getsockopt.
void extract_flags (const msg_t *msg_);
// Used to check whether the object is a socket.
uint32_t _tag;
// If true, associated context was already terminated.
bool _ctx_terminated;
// If true, object should have been already destroyed. However,
// destruction is delayed while we unwind the stack to the point
// where it doesn't intersect the object being destroyed.
bool _destroyed;
// Parse URI string.
static int
parse_uri (const char *uri_, std::string &protocol_, std::string &path_);
// Check whether transport protocol, as specified in connect or
// bind, is available and compatible with the socket type.
int check_protocol (const std::string &protocol_) const;
// Register the pipe with this socket.
void attach_pipe (zmq::pipe_t *pipe_,
bool subscribe_to_all_ = false,
bool locally_initiated_ = false);
// Processes commands sent to this socket (if any). If timeout is -1,
// returns only after at least one command was processed.
// If throttle argument is true, commands are processed at most once
// in a predefined time period.
int process_commands (int timeout_, bool throttle_);
// Handlers for incoming commands.
void process_stop () ZMQ_FINAL;
void process_bind (zmq::pipe_t *pipe_) ZMQ_FINAL;
void
process_pipe_stats_publish (uint64_t outbound_queue_count_,
uint64_t inbound_queue_count_,
endpoint_uri_pair_t *endpoint_pair_) ZMQ_FINAL;
void process_term (int linger_) ZMQ_FINAL;
void process_term_endpoint (std::string *endpoint_) ZMQ_FINAL;
void update_pipe_options (int option_);
std::string resolve_tcp_addr (std::string endpoint_uri_,
const char *tcp_address_);
// Socket's mailbox object.
i_mailbox *_mailbox;
// List of attached pipes.
typedef array_t<pipe_t, 3> pipes_t;
pipes_t _pipes;
// Reaper's poller and handle of this socket within it.
poller_t *_poller;
poller_t::handle_t _handle;
// Timestamp of when commands were processed the last time.
uint64_t _last_tsc;
// Number of messages received since last command processing.
int _ticks;
// True if the last message received had MORE flag set.
bool _rcvmore;
// Improves efficiency of time measurement.
clock_t _clock;
// Monitor socket;
void *_monitor_socket;
// Bitmask of events being monitored
int64_t _monitor_events;
// Last socket endpoint resolved URI
std::string _last_endpoint;
// Indicate if the socket is thread safe
const bool _thread_safe;
// Signaler to be used in the reaping stage
signaler_t *_reaper_signaler;
// Mutex to synchronize access to the monitor Pair socket
mutex_t _monitor_sync;
ZMQ_NON_COPYABLE_NOR_MOVABLE (socket_base_t)
// Add a flag for mark disconnect action
bool _disconnected;
};
class routing_socket_base_t : public socket_base_t
{
protected:
routing_socket_base_t (class ctx_t *parent_, uint32_t tid_, int sid_);
~routing_socket_base_t () ZMQ_OVERRIDE;
// methods from socket_base_t
int xsetsockopt (int option_,
const void *optval_,
size_t optvallen_) ZMQ_OVERRIDE;
void xwrite_activated (pipe_t *pipe_) ZMQ_FINAL;
// own methods
std::string extract_connect_routing_id ();
bool connect_routing_id_is_set () const;
struct out_pipe_t
{
pipe_t *pipe;
bool active;
};
void add_out_pipe (blob_t routing_id_, pipe_t *pipe_);
bool has_out_pipe (const blob_t &routing_id_) const;
out_pipe_t *lookup_out_pipe (const blob_t &routing_id_);
const out_pipe_t *lookup_out_pipe (const blob_t &routing_id_) const;
void erase_out_pipe (const pipe_t *pipe_);
out_pipe_t try_erase_out_pipe (const blob_t &routing_id_);
template <typename Func> bool any_of_out_pipes (Func func_)
{
bool res = false;
for (out_pipes_t::iterator it = _out_pipes.begin (),
end = _out_pipes.end ();
it != end && !res; ++it) {
res |= func_ (*it->second.pipe);
}
return res;
}
private:
// Outbound pipes indexed by the peer IDs.
typedef std::map<blob_t, out_pipe_t> out_pipes_t;
out_pipes_t _out_pipes;
// Next assigned name on a zmq_connect() call used by ROUTER and STREAM socket types
std::string _connect_routing_id;
};
}
#endif
|
sophomore_public/libzmq
|
src/socket_base.hpp
|
C++
|
gpl-3.0
| 13,208 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include "socket_poller.hpp"
#include "err.hpp"
#include "polling_util.hpp"
#include "macros.hpp"
#include <limits.h>
static bool is_thread_safe (const zmq::socket_base_t &socket_)
{
// do not use getsockopt here, since that would fail during context termination
return socket_.is_thread_safe ();
}
// compare elements to value
template <class It, class T, class Pred>
static It find_if2 (It b_, It e_, const T &value, Pred pred)
{
for (; b_ != e_; ++b_) {
if (pred (*b_, value)) {
break;
}
}
return b_;
}
zmq::socket_poller_t::socket_poller_t () :
_tag (0xCAFEBABE),
_signaler (NULL)
#if defined ZMQ_POLL_BASED_ON_POLL
,
_pollfds (NULL)
#elif defined ZMQ_POLL_BASED_ON_SELECT
,
_max_fd (0)
#endif
{
rebuild ();
}
zmq::socket_poller_t::~socket_poller_t ()
{
// Mark the socket_poller as dead
_tag = 0xdeadbeef;
for (items_t::iterator it = _items.begin (), end = _items.end (); it != end;
++it) {
// TODO shouldn't this zmq_assert (it->socket->check_tag ()) instead?
if (it->socket && it->socket->check_tag ()
&& is_thread_safe (*it->socket)) {
it->socket->remove_signaler (_signaler);
}
}
if (_signaler != NULL) {
LIBZMQ_DELETE (_signaler);
}
#if defined ZMQ_POLL_BASED_ON_POLL
if (_pollfds) {
free (_pollfds);
_pollfds = NULL;
}
#endif
}
bool zmq::socket_poller_t::check_tag () const
{
return _tag == 0xCAFEBABE;
}
int zmq::socket_poller_t::signaler_fd (fd_t *fd_) const
{
if (_signaler) {
*fd_ = _signaler->get_fd ();
return 0;
}
// Only thread-safe socket types are guaranteed to have a signaler.
errno = EINVAL;
return -1;
}
int zmq::socket_poller_t::add (socket_base_t *socket_,
void *user_data_,
short events_)
{
if (find_if2 (_items.begin (), _items.end (), socket_, &is_socket)
!= _items.end ()) {
errno = EINVAL;
return -1;
}
if (is_thread_safe (*socket_)) {
if (_signaler == NULL) {
_signaler = new (std::nothrow) signaler_t ();
if (!_signaler) {
errno = ENOMEM;
return -1;
}
if (!_signaler->valid ()) {
delete _signaler;
_signaler = NULL;
errno = EMFILE;
return -1;
}
}
socket_->add_signaler (_signaler);
}
const item_t item = {socket_, 0, user_data_, events_
#if defined ZMQ_POLL_BASED_ON_POLL
,
-1
#endif
};
try {
_items.push_back (item);
}
catch (const std::bad_alloc &) {
errno = ENOMEM;
return -1;
}
_need_rebuild = true;
return 0;
}
int zmq::socket_poller_t::add_fd (fd_t fd_, void *user_data_, short events_)
{
if (find_if2 (_items.begin (), _items.end (), fd_, &is_fd)
!= _items.end ()) {
errno = EINVAL;
return -1;
}
const item_t item = {NULL, fd_, user_data_, events_
#if defined ZMQ_POLL_BASED_ON_POLL
,
-1
#endif
};
try {
_items.push_back (item);
}
catch (const std::bad_alloc &) {
errno = ENOMEM;
return -1;
}
_need_rebuild = true;
return 0;
}
int zmq::socket_poller_t::modify (const socket_base_t *socket_, short events_)
{
const items_t::iterator it =
find_if2 (_items.begin (), _items.end (), socket_, &is_socket);
if (it == _items.end ()) {
errno = EINVAL;
return -1;
}
it->events = events_;
_need_rebuild = true;
return 0;
}
int zmq::socket_poller_t::modify_fd (fd_t fd_, short events_)
{
const items_t::iterator it =
find_if2 (_items.begin (), _items.end (), fd_, &is_fd);
if (it == _items.end ()) {
errno = EINVAL;
return -1;
}
it->events = events_;
_need_rebuild = true;
return 0;
}
int zmq::socket_poller_t::remove (socket_base_t *socket_)
{
const items_t::iterator it =
find_if2 (_items.begin (), _items.end (), socket_, &is_socket);
if (it == _items.end ()) {
errno = EINVAL;
return -1;
}
_items.erase (it);
_need_rebuild = true;
if (is_thread_safe (*socket_)) {
socket_->remove_signaler (_signaler);
}
return 0;
}
int zmq::socket_poller_t::remove_fd (fd_t fd_)
{
const items_t::iterator it =
find_if2 (_items.begin (), _items.end (), fd_, &is_fd);
if (it == _items.end ()) {
errno = EINVAL;
return -1;
}
_items.erase (it);
_need_rebuild = true;
return 0;
}
int zmq::socket_poller_t::rebuild ()
{
_use_signaler = false;
_pollset_size = 0;
_need_rebuild = false;
#if defined ZMQ_POLL_BASED_ON_POLL
if (_pollfds) {
free (_pollfds);
_pollfds = NULL;
}
for (items_t::iterator it = _items.begin (), end = _items.end (); it != end;
++it) {
if (it->events) {
if (it->socket && is_thread_safe (*it->socket)) {
if (!_use_signaler) {
_use_signaler = true;
_pollset_size++;
}
} else
_pollset_size++;
}
}
if (_pollset_size == 0)
return 0;
_pollfds = static_cast<pollfd *> (malloc (_pollset_size * sizeof (pollfd)));
if (!_pollfds) {
errno = ENOMEM;
_need_rebuild = true;
return -1;
}
int item_nbr = 0;
if (_use_signaler) {
item_nbr = 1;
_pollfds[0].fd = _signaler->get_fd ();
_pollfds[0].events = POLLIN;
}
for (items_t::iterator it = _items.begin (), end = _items.end (); it != end;
++it) {
if (it->events) {
if (it->socket) {
if (!is_thread_safe (*it->socket)) {
size_t fd_size = sizeof (zmq::fd_t);
const int rc = it->socket->getsockopt (
ZMQ_FD, &_pollfds[item_nbr].fd, &fd_size);
zmq_assert (rc == 0);
_pollfds[item_nbr].events = POLLIN;
item_nbr++;
}
} else {
_pollfds[item_nbr].fd = it->fd;
_pollfds[item_nbr].events =
(it->events & ZMQ_POLLIN ? POLLIN : 0)
| (it->events & ZMQ_POLLOUT ? POLLOUT : 0)
| (it->events & ZMQ_POLLPRI ? POLLPRI : 0);
it->pollfd_index = item_nbr;
item_nbr++;
}
}
}
#elif defined ZMQ_POLL_BASED_ON_SELECT
// Ensure we do not attempt to select () on more than FD_SETSIZE
// file descriptors.
zmq_assert (_items.size () <= FD_SETSIZE);
_pollset_in.resize (_items.size ());
_pollset_out.resize (_items.size ());
_pollset_err.resize (_items.size ());
FD_ZERO (_pollset_in.get ());
FD_ZERO (_pollset_out.get ());
FD_ZERO (_pollset_err.get ());
for (items_t::iterator it = _items.begin (), end = _items.end (); it != end;
++it) {
if (it->socket && is_thread_safe (*it->socket) && it->events) {
_use_signaler = true;
FD_SET (_signaler->get_fd (), _pollset_in.get ());
_pollset_size = 1;
break;
}
}
_max_fd = 0;
// Build the fd_sets for passing to select ().
for (items_t::iterator it = _items.begin (), end = _items.end (); it != end;
++it) {
if (it->events) {
// If the poll item is a 0MQ socket we are interested in input on the
// notification file descriptor retrieved by the ZMQ_FD socket option.
if (it->socket) {
if (!is_thread_safe (*it->socket)) {
zmq::fd_t notify_fd;
size_t fd_size = sizeof (zmq::fd_t);
int rc =
it->socket->getsockopt (ZMQ_FD, ¬ify_fd, &fd_size);
zmq_assert (rc == 0);
FD_SET (notify_fd, _pollset_in.get ());
if (_max_fd < notify_fd)
_max_fd = notify_fd;
_pollset_size++;
}
}
// Else, the poll item is a raw file descriptor. Convert the poll item
// events to the appropriate fd_sets.
else {
if (it->events & ZMQ_POLLIN)
FD_SET (it->fd, _pollset_in.get ());
if (it->events & ZMQ_POLLOUT)
FD_SET (it->fd, _pollset_out.get ());
if (it->events & ZMQ_POLLERR)
FD_SET (it->fd, _pollset_err.get ());
if (_max_fd < it->fd)
_max_fd = it->fd;
_pollset_size++;
}
}
}
#endif
return 0;
}
void zmq::socket_poller_t::zero_trail_events (
zmq::socket_poller_t::event_t *events_, int n_events_, int found_)
{
for (int i = found_; i < n_events_; ++i) {
events_[i].socket = NULL;
events_[i].fd = zmq::retired_fd;
events_[i].user_data = NULL;
events_[i].events = 0;
}
}
#if defined ZMQ_POLL_BASED_ON_POLL
int zmq::socket_poller_t::check_events (zmq::socket_poller_t::event_t *events_,
int n_events_)
#elif defined ZMQ_POLL_BASED_ON_SELECT
int zmq::socket_poller_t::check_events (zmq::socket_poller_t::event_t *events_,
int n_events_,
fd_set &inset_,
fd_set &outset_,
fd_set &errset_)
#endif
{
int found = 0;
for (items_t::iterator it = _items.begin (), end = _items.end ();
it != end && found < n_events_; ++it) {
// The poll item is a 0MQ socket. Retrieve pending events
// using the ZMQ_EVENTS socket option.
if (it->socket) {
size_t events_size = sizeof (uint32_t);
uint32_t events;
if (it->socket->getsockopt (ZMQ_EVENTS, &events, &events_size)
== -1) {
return -1;
}
if (it->events & events) {
events_[found].socket = it->socket;
events_[found].fd = zmq::retired_fd;
events_[found].user_data = it->user_data;
events_[found].events = it->events & events;
++found;
}
}
// Else, the poll item is a raw file descriptor, simply convert
// the events to zmq_pollitem_t-style format.
else if (it->events) {
#if defined ZMQ_POLL_BASED_ON_POLL
zmq_assert (it->pollfd_index >= 0);
const short revents = _pollfds[it->pollfd_index].revents;
short events = 0;
if (revents & POLLIN)
events |= ZMQ_POLLIN;
if (revents & POLLOUT)
events |= ZMQ_POLLOUT;
if (revents & POLLPRI)
events |= ZMQ_POLLPRI;
if (revents & ~(POLLIN | POLLOUT | POLLPRI))
events |= ZMQ_POLLERR;
#elif defined ZMQ_POLL_BASED_ON_SELECT
short events = 0;
if (FD_ISSET (it->fd, &inset_))
events |= ZMQ_POLLIN;
if (FD_ISSET (it->fd, &outset_))
events |= ZMQ_POLLOUT;
if (FD_ISSET (it->fd, &errset_))
events |= ZMQ_POLLERR;
#endif //POLL_SELECT
if (events) {
events_[found].socket = NULL;
events_[found].fd = it->fd;
events_[found].user_data = it->user_data;
events_[found].events = events;
++found;
}
}
}
return found;
}
//Return 0 if timeout is expired otherwise 1
int zmq::socket_poller_t::adjust_timeout (zmq::clock_t &clock_,
long timeout_,
uint64_t &now_,
uint64_t &end_,
bool &first_pass_)
{
// If socket_poller_t::timeout is zero, exit immediately whether there
// are events or not.
if (timeout_ == 0)
return 0;
// At this point we are meant to wait for events but there are none.
// If timeout is infinite we can just loop until we get some events.
if (timeout_ < 0) {
if (first_pass_)
first_pass_ = false;
return 1;
}
// The timeout is finite and there are no events. In the first pass
// we get a timestamp of when the polling have begun. (We assume that
// first pass have taken negligible time). We also compute the time
// when the polling should time out.
now_ = clock_.now_ms ();
if (first_pass_) {
end_ = now_ + timeout_;
first_pass_ = false;
return 1;
}
// Find out whether timeout have expired.
if (now_ >= end_)
return 0;
return 1;
}
int zmq::socket_poller_t::wait (zmq::socket_poller_t::event_t *events_,
int n_events_,
long timeout_)
{
if (_items.empty () && timeout_ < 0) {
errno = EFAULT;
return -1;
}
if (_need_rebuild) {
const int rc = rebuild ();
if (rc == -1)
return -1;
}
if (unlikely (_pollset_size == 0)) {
if (timeout_ < 0) {
// Fail instead of trying to sleep forever
errno = EFAULT;
return -1;
}
// We'll report an error (timed out) as if the list was non-empty and
// no event occurred within the specified timeout. Otherwise the caller
// needs to check the return value AND the event to avoid using the
// nullified event data.
errno = EAGAIN;
if (timeout_ == 0)
return -1;
#if defined ZMQ_HAVE_WINDOWS
Sleep (timeout_ > 0 ? timeout_ : INFINITE);
return -1;
#elif defined ZMQ_HAVE_ANDROID
usleep (timeout_ * 1000);
return -1;
#elif defined ZMQ_HAVE_OSX
usleep (timeout_ * 1000);
errno = EAGAIN;
return -1;
#elif defined ZMQ_HAVE_VXWORKS
struct timespec ns_;
ns_.tv_sec = timeout_ / 1000;
ns_.tv_nsec = timeout_ % 1000 * 1000000;
nanosleep (&ns_, 0);
return -1;
#else
usleep (timeout_ * 1000);
return -1;
#endif
}
#if defined ZMQ_POLL_BASED_ON_POLL
zmq::clock_t clock;
uint64_t now = 0;
uint64_t end = 0;
bool first_pass = true;
while (true) {
// Compute the timeout for the subsequent poll.
int timeout;
if (first_pass)
timeout = 0;
else if (timeout_ < 0)
timeout = -1;
else
timeout =
static_cast<int> (std::min<uint64_t> (end - now, INT_MAX));
// Wait for events.
const int rc = poll (_pollfds, _pollset_size, timeout);
if (rc == -1 && errno == EINTR) {
return -1;
}
errno_assert (rc >= 0);
// Receive the signal from pollfd
if (_use_signaler && _pollfds[0].revents & POLLIN)
_signaler->recv ();
// Check for the events.
const int found = check_events (events_, n_events_);
if (found) {
if (found > 0)
zero_trail_events (events_, n_events_, found);
return found;
}
// Adjust timeout or break
if (adjust_timeout (clock, timeout_, now, end, first_pass) == 0)
break;
}
errno = EAGAIN;
return -1;
#elif defined ZMQ_POLL_BASED_ON_SELECT
zmq::clock_t clock;
uint64_t now = 0;
uint64_t end = 0;
bool first_pass = true;
optimized_fd_set_t inset (_pollset_size);
optimized_fd_set_t outset (_pollset_size);
optimized_fd_set_t errset (_pollset_size);
while (true) {
// Compute the timeout for the subsequent poll.
timeval timeout;
timeval *ptimeout;
if (first_pass) {
timeout.tv_sec = 0;
timeout.tv_usec = 0;
ptimeout = &timeout;
} else if (timeout_ < 0)
ptimeout = NULL;
else {
timeout.tv_sec = static_cast<long> ((end - now) / 1000);
timeout.tv_usec = static_cast<long> ((end - now) % 1000 * 1000);
ptimeout = &timeout;
}
// Wait for events. Ignore interrupts if there's infinite timeout.
memcpy (inset.get (), _pollset_in.get (),
valid_pollset_bytes (*_pollset_in.get ()));
memcpy (outset.get (), _pollset_out.get (),
valid_pollset_bytes (*_pollset_out.get ()));
memcpy (errset.get (), _pollset_err.get (),
valid_pollset_bytes (*_pollset_err.get ()));
const int rc = select (static_cast<int> (_max_fd + 1), inset.get (),
outset.get (), errset.get (), ptimeout);
#if defined ZMQ_HAVE_WINDOWS
if (unlikely (rc == SOCKET_ERROR)) {
errno = wsa_error_to_errno (WSAGetLastError ());
wsa_assert (errno == ENOTSOCK);
return -1;
}
#else
if (unlikely (rc == -1)) {
errno_assert (errno == EINTR || errno == EBADF);
return -1;
}
#endif
if (_use_signaler && FD_ISSET (_signaler->get_fd (), inset.get ()))
_signaler->recv ();
// Check for the events.
const int found = check_events (events_, n_events_, *inset.get (),
*outset.get (), *errset.get ());
if (found) {
if (found > 0)
zero_trail_events (events_, n_events_, found);
return found;
}
// Adjust timeout or break
if (adjust_timeout (clock, timeout_, now, end, first_pass) == 0)
break;
}
errno = EAGAIN;
return -1;
#else
// Exotic platforms that support neither poll() nor select().
errno = ENOTSUP;
return -1;
#endif
}
|
sophomore_public/libzmq
|
src/socket_poller.cpp
|
C++
|
gpl-3.0
| 18,391 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_SOCKET_POLLER_HPP_INCLUDED__
#define __ZMQ_SOCKET_POLLER_HPP_INCLUDED__
#include "poller.hpp"
#if defined ZMQ_POLL_BASED_ON_POLL && !defined ZMQ_HAVE_WINDOWS
#include <poll.h>
#endif
#if defined ZMQ_HAVE_WINDOWS
#include "windows.hpp"
#elif defined ZMQ_HAVE_VXWORKS
#include <unistd.h>
#include <sys/time.h>
#include <strings.h>
#else
#include <unistd.h>
#endif
#include <vector>
#include "socket_base.hpp"
#include "signaler.hpp"
#include "polling_util.hpp"
namespace zmq
{
class socket_poller_t
{
public:
socket_poller_t ();
~socket_poller_t ();
typedef zmq_poller_event_t event_t;
int add (socket_base_t *socket_, void *user_data_, short events_);
int modify (const socket_base_t *socket_, short events_);
int remove (socket_base_t *socket_);
int add_fd (fd_t fd_, void *user_data_, short events_);
int modify_fd (fd_t fd_, short events_);
int remove_fd (fd_t fd_);
// Returns the signaler's fd if there is one, otherwise errors.
int signaler_fd (fd_t *fd_) const;
int wait (event_t *events_, int n_events_, long timeout_);
int size () const { return static_cast<int> (_items.size ()); };
// Return false if object is not a socket.
bool check_tag () const;
private:
typedef struct item_t
{
socket_base_t *socket;
fd_t fd;
void *user_data;
short events;
#if defined ZMQ_POLL_BASED_ON_POLL
int pollfd_index;
#endif
} item_t;
static void zero_trail_events (zmq::socket_poller_t::event_t *events_,
int n_events_,
int found_);
#if defined ZMQ_POLL_BASED_ON_POLL
int check_events (zmq::socket_poller_t::event_t *events_, int n_events_);
#elif defined ZMQ_POLL_BASED_ON_SELECT
int check_events (zmq::socket_poller_t::event_t *events_,
int n_events_,
fd_set &inset_,
fd_set &outset_,
fd_set &errset_);
#endif
static int adjust_timeout (zmq::clock_t &clock_,
long timeout_,
uint64_t &now_,
uint64_t &end_,
bool &first_pass_);
static bool is_socket (const item_t &item, const socket_base_t *socket_)
{
return item.socket == socket_;
}
static bool is_fd (const item_t &item, fd_t fd_)
{
return !item.socket && item.fd == fd_;
}
int rebuild ();
// Used to check whether the object is a socket_poller.
uint32_t _tag;
// Signaler used for thread safe sockets polling
signaler_t *_signaler;
// List of sockets
typedef std::vector<item_t> items_t;
items_t _items;
// Does the pollset needs rebuilding?
bool _need_rebuild;
// Should the signaler be used for the thread safe polling?
bool _use_signaler;
// Size of the pollset
int _pollset_size;
#if defined ZMQ_POLL_BASED_ON_POLL
pollfd *_pollfds;
#elif defined ZMQ_POLL_BASED_ON_SELECT
resizable_optimized_fd_set_t _pollset_in;
resizable_optimized_fd_set_t _pollset_out;
resizable_optimized_fd_set_t _pollset_err;
zmq::fd_t _max_fd;
#endif
ZMQ_NON_COPYABLE_NOR_MOVABLE (socket_poller_t)
};
}
#endif
|
sophomore_public/libzmq
|
src/socket_poller.hpp
|
C++
|
gpl-3.0
| 3,355 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <sys/types.h>
#include "err.hpp"
#include "socks.hpp"
#include "tcp.hpp"
#include "blob.hpp"
#ifndef ZMQ_HAVE_WINDOWS
#include <sys/socket.h>
#include <netinet/in.h>
#include <netdb.h>
#endif
zmq::socks_greeting_t::socks_greeting_t (uint8_t method_) : num_methods (1)
{
methods[0] = method_;
}
zmq::socks_greeting_t::socks_greeting_t (const uint8_t *methods_,
uint8_t num_methods_) :
num_methods (num_methods_)
{
for (uint8_t i = 0; i < num_methods_; i++)
methods[i] = methods_[i];
}
zmq::socks_greeting_encoder_t::socks_greeting_encoder_t () :
_bytes_encoded (0), _bytes_written (0)
{
}
void zmq::socks_greeting_encoder_t::encode (const socks_greeting_t &greeting_)
{
uint8_t *ptr = _buf;
*ptr++ = 0x05;
*ptr++ = static_cast<uint8_t> (greeting_.num_methods);
for (uint8_t i = 0; i < greeting_.num_methods; i++)
*ptr++ = greeting_.methods[i];
_bytes_encoded = 2 + greeting_.num_methods;
_bytes_written = 0;
}
int zmq::socks_greeting_encoder_t::output (fd_t fd_)
{
const int rc =
tcp_write (fd_, _buf + _bytes_written, _bytes_encoded - _bytes_written);
if (rc > 0)
_bytes_written += static_cast<size_t> (rc);
return rc;
}
bool zmq::socks_greeting_encoder_t::has_pending_data () const
{
return _bytes_written < _bytes_encoded;
}
void zmq::socks_greeting_encoder_t::reset ()
{
_bytes_encoded = _bytes_written = 0;
}
zmq::socks_choice_t::socks_choice_t (unsigned char method_) : method (method_)
{
}
zmq::socks_choice_decoder_t::socks_choice_decoder_t () : _bytes_read (0)
{
}
int zmq::socks_choice_decoder_t::input (fd_t fd_)
{
zmq_assert (_bytes_read < 2);
const int rc = tcp_read (fd_, _buf + _bytes_read, 2 - _bytes_read);
if (rc > 0) {
_bytes_read += static_cast<size_t> (rc);
if (_buf[0] != 0x05)
return -1;
}
return rc;
}
bool zmq::socks_choice_decoder_t::message_ready () const
{
return _bytes_read == 2;
}
zmq::socks_choice_t zmq::socks_choice_decoder_t::decode ()
{
zmq_assert (message_ready ());
return socks_choice_t (_buf[1]);
}
void zmq::socks_choice_decoder_t::reset ()
{
_bytes_read = 0;
}
zmq::socks_basic_auth_request_t::socks_basic_auth_request_t (
const std::string &username_, const std::string &password_) :
username (username_), password (password_)
{
zmq_assert (username_.size () <= UINT8_MAX);
zmq_assert (password_.size () <= UINT8_MAX);
}
zmq::socks_basic_auth_request_encoder_t::socks_basic_auth_request_encoder_t () :
_bytes_encoded (0), _bytes_written (0)
{
}
void zmq::socks_basic_auth_request_encoder_t::encode (
const socks_basic_auth_request_t &req_)
{
unsigned char *ptr = _buf;
*ptr++ = 0x01;
*ptr++ = static_cast<unsigned char> (req_.username.size ());
memcpy (ptr, req_.username.c_str (), req_.username.size ());
ptr += req_.username.size ();
*ptr++ = static_cast<unsigned char> (req_.password.size ());
memcpy (ptr, req_.password.c_str (), req_.password.size ());
ptr += req_.password.size ();
_bytes_encoded = ptr - _buf;
_bytes_written = 0;
}
int zmq::socks_basic_auth_request_encoder_t::output (fd_t fd_)
{
const int rc =
tcp_write (fd_, _buf + _bytes_written, _bytes_encoded - _bytes_written);
if (rc > 0)
_bytes_written += static_cast<size_t> (rc);
return rc;
}
bool zmq::socks_basic_auth_request_encoder_t::has_pending_data () const
{
return _bytes_written < _bytes_encoded;
}
void zmq::socks_basic_auth_request_encoder_t::reset ()
{
_bytes_encoded = _bytes_written = 0;
}
zmq::socks_auth_response_t::socks_auth_response_t (uint8_t response_code_) :
response_code (response_code_)
{
}
zmq::socks_auth_response_decoder_t::socks_auth_response_decoder_t () :
_bytes_read (0)
{
}
int zmq::socks_auth_response_decoder_t::input (fd_t fd_)
{
zmq_assert (_bytes_read < 2);
const int rc = tcp_read (fd_, _buf + _bytes_read, 2 - _bytes_read);
if (rc > 0) {
_bytes_read += static_cast<size_t> (rc);
if (_buf[0] != 0x01)
return -1;
}
return rc;
}
bool zmq::socks_auth_response_decoder_t::message_ready () const
{
return _bytes_read == 2;
}
zmq::socks_auth_response_t zmq::socks_auth_response_decoder_t::decode ()
{
zmq_assert (message_ready ());
return socks_auth_response_t (_buf[1]);
}
void zmq::socks_auth_response_decoder_t::reset ()
{
_bytes_read = 0;
}
zmq::socks_request_t::socks_request_t (uint8_t command_,
std::string hostname_,
uint16_t port_) :
command (command_), hostname (ZMQ_MOVE (hostname_)), port (port_)
{
zmq_assert (hostname.size () <= UINT8_MAX);
}
zmq::socks_request_encoder_t::socks_request_encoder_t () :
_bytes_encoded (0), _bytes_written (0)
{
}
void zmq::socks_request_encoder_t::encode (const socks_request_t &req_)
{
zmq_assert (req_.hostname.size () <= UINT8_MAX);
unsigned char *ptr = _buf;
*ptr++ = 0x05;
*ptr++ = req_.command;
*ptr++ = 0x00;
#if defined ZMQ_HAVE_OPENVMS && defined __ia64 && __INITIAL_POINTER_SIZE == 64
__addrinfo64 hints, *res = NULL;
#else
addrinfo hints, *res = NULL;
#endif
memset (&hints, 0, sizeof hints);
// Suppress potential DNS lookups.
hints.ai_flags = AI_NUMERICHOST;
const int rc = getaddrinfo (req_.hostname.c_str (), NULL, &hints, &res);
if (rc == 0 && res->ai_family == AF_INET) {
const struct sockaddr_in *sockaddr_in =
reinterpret_cast<const struct sockaddr_in *> (res->ai_addr);
*ptr++ = 0x01;
memcpy (ptr, &sockaddr_in->sin_addr, 4);
ptr += 4;
} else if (rc == 0 && res->ai_family == AF_INET6) {
const struct sockaddr_in6 *sockaddr_in6 =
reinterpret_cast<const struct sockaddr_in6 *> (res->ai_addr);
*ptr++ = 0x04;
memcpy (ptr, &sockaddr_in6->sin6_addr, 16);
ptr += 16;
} else {
*ptr++ = 0x03;
*ptr++ = static_cast<unsigned char> (req_.hostname.size ());
memcpy (ptr, req_.hostname.c_str (), req_.hostname.size ());
ptr += req_.hostname.size ();
}
if (rc == 0)
freeaddrinfo (res);
*ptr++ = req_.port / 256;
*ptr++ = req_.port % 256;
_bytes_encoded = ptr - _buf;
_bytes_written = 0;
}
int zmq::socks_request_encoder_t::output (fd_t fd_)
{
const int rc =
tcp_write (fd_, _buf + _bytes_written, _bytes_encoded - _bytes_written);
if (rc > 0)
_bytes_written += static_cast<size_t> (rc);
return rc;
}
bool zmq::socks_request_encoder_t::has_pending_data () const
{
return _bytes_written < _bytes_encoded;
}
void zmq::socks_request_encoder_t::reset ()
{
_bytes_encoded = _bytes_written = 0;
}
zmq::socks_response_t::socks_response_t (uint8_t response_code_,
const std::string &address_,
uint16_t port_) :
response_code (response_code_), address (address_), port (port_)
{
}
zmq::socks_response_decoder_t::socks_response_decoder_t () : _bytes_read (0)
{
}
int zmq::socks_response_decoder_t::input (fd_t fd_)
{
size_t n = 0;
if (_bytes_read < 5)
n = 5 - _bytes_read;
else {
const uint8_t atyp = _buf[3];
zmq_assert (atyp == 0x01 || atyp == 0x03 || atyp == 0x04);
if (atyp == 0x01)
n = 3 + 2;
else if (atyp == 0x03)
n = _buf[4] + 2;
else if (atyp == 0x04)
n = 15 + 2;
}
const int rc = tcp_read (fd_, _buf + _bytes_read, n);
if (rc > 0) {
_bytes_read += static_cast<size_t> (rc);
if (_buf[0] != 0x05)
return -1;
if (_bytes_read >= 2)
if (_buf[1] > 0x08)
return -1;
if (_bytes_read >= 3)
if (_buf[2] != 0x00)
return -1;
if (_bytes_read >= 4) {
const uint8_t atyp = _buf[3];
if (atyp != 0x01 && atyp != 0x03 && atyp != 0x04)
return -1;
}
}
return rc;
}
bool zmq::socks_response_decoder_t::message_ready () const
{
if (_bytes_read < 4)
return false;
const uint8_t atyp = _buf[3];
zmq_assert (atyp == 0x01 || atyp == 0x03 || atyp == 0x04);
if (atyp == 0x01)
return _bytes_read == 10;
if (atyp == 0x03)
return _bytes_read > 4 && _bytes_read == 4 + 1 + _buf[4] + 2u;
return _bytes_read == 22;
}
zmq::socks_response_t zmq::socks_response_decoder_t::decode ()
{
zmq_assert (message_ready ());
return socks_response_t (_buf[1], "", 0);
}
void zmq::socks_response_decoder_t::reset ()
{
_bytes_read = 0;
}
|
sophomore_public/libzmq
|
src/socks.cpp
|
C++
|
gpl-3.0
| 8,826 |
/* SPDX-License-Identifier: MPL-2.0 */
#ifndef __ZMQ_SOCKS_HPP_INCLUDED__
#define __ZMQ_SOCKS_HPP_INCLUDED__
#include <string>
#include "fd.hpp"
#include "stdint.hpp"
namespace zmq
{
struct socks_greeting_t
{
socks_greeting_t (uint8_t method_);
socks_greeting_t (const uint8_t *methods_, uint8_t num_methods_);
uint8_t methods[UINT8_MAX];
const size_t num_methods;
};
class socks_greeting_encoder_t
{
public:
socks_greeting_encoder_t ();
void encode (const socks_greeting_t &greeting_);
int output (fd_t fd_);
bool has_pending_data () const;
void reset ();
private:
size_t _bytes_encoded;
size_t _bytes_written;
uint8_t _buf[2 + UINT8_MAX];
};
struct socks_choice_t
{
socks_choice_t (uint8_t method_);
uint8_t method;
};
class socks_choice_decoder_t
{
public:
socks_choice_decoder_t ();
int input (fd_t fd_);
bool message_ready () const;
socks_choice_t decode ();
void reset ();
private:
unsigned char _buf[2];
size_t _bytes_read;
};
struct socks_basic_auth_request_t
{
socks_basic_auth_request_t (const std::string &username_,
const std::string &password_);
const std::string username;
const std::string password;
};
class socks_basic_auth_request_encoder_t
{
public:
socks_basic_auth_request_encoder_t ();
void encode (const socks_basic_auth_request_t &req_);
int output (fd_t fd_);
bool has_pending_data () const;
void reset ();
private:
size_t _bytes_encoded;
size_t _bytes_written;
uint8_t _buf[1 + 1 + UINT8_MAX + 1 + UINT8_MAX];
};
struct socks_auth_response_t
{
socks_auth_response_t (uint8_t response_code_);
uint8_t response_code;
};
class socks_auth_response_decoder_t
{
public:
socks_auth_response_decoder_t ();
int input (fd_t fd_);
bool message_ready () const;
socks_auth_response_t decode ();
void reset ();
private:
int8_t _buf[2];
size_t _bytes_read;
};
struct socks_request_t
{
socks_request_t (uint8_t command_, std::string hostname_, uint16_t port_);
const uint8_t command;
const std::string hostname;
const uint16_t port;
};
class socks_request_encoder_t
{
public:
socks_request_encoder_t ();
void encode (const socks_request_t &req_);
int output (fd_t fd_);
bool has_pending_data () const;
void reset ();
private:
size_t _bytes_encoded;
size_t _bytes_written;
uint8_t _buf[4 + UINT8_MAX + 1 + 2];
};
struct socks_response_t
{
socks_response_t (uint8_t response_code_,
const std::string &address_,
uint16_t port_);
uint8_t response_code;
std::string address;
uint16_t port;
};
class socks_response_decoder_t
{
public:
socks_response_decoder_t ();
int input (fd_t fd_);
bool message_ready () const;
socks_response_t decode ();
void reset ();
private:
int8_t _buf[4 + UINT8_MAX + 1 + 2];
size_t _bytes_read;
};
}
#endif
|
sophomore_public/libzmq
|
src/socks.hpp
|
C++
|
gpl-3.0
| 3,021 |
/* SPDX-License-Identifier: MPL-2.0 */
#include "precompiled.hpp"
#include <new>
#include <string>
#include "macros.hpp"
#include "socks_connecter.hpp"
#include "random.hpp"
#include "err.hpp"
#include "ip.hpp"
#include "tcp.hpp"
#include "address.hpp"
#include "tcp_address.hpp"
#include "session_base.hpp"
#include "socks.hpp"
#ifndef ZMQ_HAVE_WINDOWS
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#if defined ZMQ_HAVE_VXWORKS
#include <sockLib.h>
#endif
#endif
zmq::socks_connecter_t::socks_connecter_t (class io_thread_t *io_thread_,
class session_base_t *session_,
const options_t &options_,
address_t *addr_,
address_t *proxy_addr_,
bool delayed_start_) :
stream_connecter_base_t (
io_thread_, session_, options_, addr_, delayed_start_),
_proxy_addr (proxy_addr_),
_auth_method (socks_no_auth_required),
_status (unplugged)
{
zmq_assert (_addr->protocol == protocol_name::tcp);
_proxy_addr->to_string (_endpoint);
}
zmq::socks_connecter_t::~socks_connecter_t ()
{
LIBZMQ_DELETE (_proxy_addr);
}
void zmq::socks_connecter_t::set_auth_method_none ()
{
_auth_method = socks_no_auth_required;
_auth_username.clear ();
_auth_password.clear ();
}
void zmq::socks_connecter_t::set_auth_method_basic (
const std::string &username_, const std::string &password_)
{
_auth_method = socks_basic_auth;
_auth_username = username_;
_auth_password = password_;
}
void zmq::socks_connecter_t::in_event ()
{
int expected_status = -1;
zmq_assert (_status != unplugged);
if (_status == waiting_for_choice) {
int rc = _choice_decoder.input (_s);
if (rc == 0 || rc == -1)
error ();
else if (_choice_decoder.message_ready ()) {
const socks_choice_t choice = _choice_decoder.decode ();
rc = process_server_response (choice);
if (rc == -1)
error ();
else {
if (choice.method == socks_basic_auth)
expected_status = sending_basic_auth_request;
else
expected_status = sending_request;
}
}
} else if (_status == waiting_for_auth_response) {
int rc = _auth_response_decoder.input (_s);
if (rc == 0 || rc == -1)
error ();
else if (_auth_response_decoder.message_ready ()) {
const socks_auth_response_t auth_response =
_auth_response_decoder.decode ();
rc = process_server_response (auth_response);
if (rc == -1)
error ();
else {
expected_status = sending_request;
}
}
} else if (_status == waiting_for_response) {
int rc = _response_decoder.input (_s);
if (rc == 0 || rc == -1)
error ();
else if (_response_decoder.message_ready ()) {
const socks_response_t response = _response_decoder.decode ();
rc = process_server_response (response);
if (rc == -1)
error ();
else {
rm_handle ();
create_engine (
_s, get_socket_name<tcp_address_t> (_s, socket_end_local));
_s = -1;
_status = unplugged;
}
}
} else
error ();
if (expected_status == sending_basic_auth_request) {
_basic_auth_request_encoder.encode (
socks_basic_auth_request_t (_auth_username, _auth_password));
reset_pollin (_handle);
set_pollout (_handle);
_status = sending_basic_auth_request;
} else if (expected_status == sending_request) {
std::string hostname;
uint16_t port = 0;
if (parse_address (_addr->address, hostname, port) == -1)
error ();
else {
_request_encoder.encode (socks_request_t (1, hostname, port));
reset_pollin (_handle);
set_pollout (_handle);
_status = sending_request;
}
}
}
void zmq::socks_connecter_t::out_event ()
{
zmq_assert (
_status == waiting_for_proxy_connection || _status == sending_greeting
|| _status == sending_basic_auth_request || _status == sending_request);
if (_status == waiting_for_proxy_connection) {
const int rc = static_cast<int> (check_proxy_connection ());
if (rc == -1)
error ();
else {
_greeting_encoder.encode (socks_greeting_t (_auth_method));
_status = sending_greeting;
}
} else if (_status == sending_greeting) {
zmq_assert (_greeting_encoder.has_pending_data ());
const int rc = _greeting_encoder.output (_s);
if (rc == -1 || rc == 0)
error ();
else if (!_greeting_encoder.has_pending_data ()) {
reset_pollout (_handle);
set_pollin (_handle);
_status = waiting_for_choice;
}
} else if (_status == sending_basic_auth_request) {
zmq_assert (_basic_auth_request_encoder.has_pending_data ());
const int rc = _basic_auth_request_encoder.output (_s);
if (rc == -1 || rc == 0)
error ();
else if (!_basic_auth_request_encoder.has_pending_data ()) {
reset_pollout (_handle);
set_pollin (_handle);
_status = waiting_for_auth_response;
}
} else {
zmq_assert (_request_encoder.has_pending_data ());
const int rc = _request_encoder.output (_s);
if (rc == -1 || rc == 0)
error ();
else if (!_request_encoder.has_pending_data ()) {
reset_pollout (_handle);
set_pollin (_handle);
_status = waiting_for_response;
}
}
}
void zmq::socks_connecter_t::start_connecting ()
{
zmq_assert (_status == unplugged);
// Open the connecting socket.
const int rc = connect_to_proxy ();
// Connect may succeed in synchronous manner.
if (rc == 0) {
_handle = add_fd (_s);
set_pollout (_handle);
_status = sending_greeting;
}
// Connection establishment may be delayed. Poll for its completion.
else if (errno == EINPROGRESS) {
_handle = add_fd (_s);
set_pollout (_handle);
_status = waiting_for_proxy_connection;
_socket->event_connect_delayed (
make_unconnected_connect_endpoint_pair (_endpoint), zmq_errno ());
}
// Handle any other error condition by eventual reconnect.
else {
if (_s != retired_fd)
close ();
add_reconnect_timer ();
}
}
int zmq::socks_connecter_t::process_server_response (
const socks_choice_t &response_)
{
return response_.method == socks_no_auth_required
|| response_.method == socks_basic_auth
? 0
: -1;
}
int zmq::socks_connecter_t::process_server_response (
const socks_response_t &response_)
{
return response_.response_code == 0 ? 0 : -1;
}
int zmq::socks_connecter_t::process_server_response (
const socks_auth_response_t &response_)
{
return response_.response_code == 0 ? 0 : -1;
}
void zmq::socks_connecter_t::error ()
{
rm_fd (_handle);
close ();
_greeting_encoder.reset ();
_choice_decoder.reset ();
_basic_auth_request_encoder.reset ();
_auth_response_decoder.reset ();
_request_encoder.reset ();
_response_decoder.reset ();
_status = unplugged;
add_reconnect_timer ();
}
int zmq::socks_connecter_t::connect_to_proxy ()
{
zmq_assert (_s == retired_fd);
// Resolve the address
if (_proxy_addr->resolved.tcp_addr != NULL) {
LIBZMQ_DELETE (_proxy_addr->resolved.tcp_addr);
}
_proxy_addr->resolved.tcp_addr = new (std::nothrow) tcp_address_t ();
alloc_assert (_proxy_addr->resolved.tcp_addr);
// Automatic fallback to ipv4 is disabled here since this was the existing
// behaviour, however I don't see a real reason for this. Maybe this can
// be changed to true (and then the parameter can be removed entirely).
_s = tcp_open_socket (_proxy_addr->address.c_str (), options, false, false,
_proxy_addr->resolved.tcp_addr);
if (_s == retired_fd) {
// TODO we should emit some event in this case!
LIBZMQ_DELETE (_proxy_addr->resolved.tcp_addr);
return -1;
}
zmq_assert (_proxy_addr->resolved.tcp_addr != NULL);
// Set the socket to non-blocking mode so that we get async connect().
unblock_socket (_s);
const tcp_address_t *const tcp_addr = _proxy_addr->resolved.tcp_addr;
int rc;
// Set a source address for conversations
if (tcp_addr->has_src_addr ()) {
#if defined ZMQ_HAVE_VXWORKS
rc = ::bind (_s, (sockaddr *) tcp_addr->src_addr (),
tcp_addr->src_addrlen ());
#else
rc = ::bind (_s, tcp_addr->src_addr (), tcp_addr->src_addrlen ());
#endif
if (rc == -1) {
close ();
return -1;
}
}
// Connect to the remote peer.
#if defined ZMQ_HAVE_VXWORKS
rc = ::connect (_s, (sockaddr *) tcp_addr->addr (), tcp_addr->addrlen ());
#else
rc = ::connect (_s, tcp_addr->addr (), tcp_addr->addrlen ());
#endif
// Connect was successful immediately.
if (rc == 0)
return 0;
// Translate error codes indicating asynchronous connect has been
// launched to a uniform EINPROGRESS.
#ifdef ZMQ_HAVE_WINDOWS
const int last_error = WSAGetLastError ();
if (last_error == WSAEINPROGRESS || last_error == WSAEWOULDBLOCK)
errno = EINPROGRESS;
else {
errno = wsa_error_to_errno (last_error);
close ();
}
#else
if (errno == EINTR)
errno = EINPROGRESS;
#endif
return -1;
}
zmq::fd_t zmq::socks_connecter_t::check_proxy_connection () const
{
// Async connect has finished. Check whether an error occurred
int err = 0;
#if defined ZMQ_HAVE_HPUX || defined ZMQ_HAVE_VXWORKS
int len = sizeof err;
#else
socklen_t len = sizeof err;
#endif
int rc = getsockopt (_s, SOL_SOCKET, SO_ERROR,
reinterpret_cast<char *> (&err), &len);
// Assert if the error was caused by 0MQ bug.
// Networking problems are OK. No need to assert.
#ifdef ZMQ_HAVE_WINDOWS
zmq_assert (rc == 0);
if (err != 0) {
wsa_assert (err == WSAECONNREFUSED || err == WSAETIMEDOUT
|| err == WSAECONNABORTED || err == WSAEHOSTUNREACH
|| err == WSAENETUNREACH || err == WSAENETDOWN
|| err == WSAEACCES || err == WSAEINVAL
|| err == WSAEADDRINUSE);
return -1;
}
#else
// Following code should handle both Berkeley-derived socket
// implementations and Solaris.
if (rc == -1)
err = errno;
if (err != 0) {
errno = err;
errno_assert (errno == ECONNREFUSED || errno == ECONNRESET
|| errno == ETIMEDOUT || errno == EHOSTUNREACH
|| errno == ENETUNREACH || errno == ENETDOWN
|| errno == EINVAL);
return -1;
}
#endif
rc = tune_tcp_socket (_s);
rc = rc
| tune_tcp_keepalives (
_s, options.tcp_keepalive, options.tcp_keepalive_cnt,
options.tcp_keepalive_idle, options.tcp_keepalive_intvl);
if (rc != 0)
return -1;
return 0;
}
int zmq::socks_connecter_t::parse_address (const std::string &address_,
std::string &hostname_,
uint16_t &port_)
{
// Find the ':' at end that separates address from the port number.
const size_t idx = address_.rfind (':');
if (idx == std::string::npos) {
errno = EINVAL;
return -1;
}
// Extract hostname
if (idx < 2 || address_[0] != '[' || address_[idx - 1] != ']')
hostname_ = address_.substr (0, idx);
else
hostname_ = address_.substr (1, idx - 2);
// Separate the hostname/port.
const std::string port_str = address_.substr (idx + 1);
// Parse the port number (0 is not a valid port).
port_ = static_cast<uint16_t> (atoi (port_str.c_str ()));
if (port_ == 0) {
errno = EINVAL;
return -1;
}
return 0;
}
|
sophomore_public/libzmq
|
src/socks_connecter.cpp
|
C++
|
gpl-3.0
| 12,566 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.