Possible improvements in terms of performance while sending http requests using boost::beast

121 views Asked by At

I am building a low latency project and sending http requests as fast as possible is a key component. Here's the construction of my current http request

#include <thread>
#include <iostream>
#include <coroutine>
#include <optional>
#include <variant>
#include <vector>
#include <utility>
#include <string>
#include <chrono>

#include <boost/asio.hpp>
#include <boost/asio/ssl.hpp>
#include <boost/json.hpp>
#include <boost/beast.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/ssl/context.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/coroutine/all.hpp>
#include <boost/beast/ssl/ssl_stream.hpp>
#include <boost/beast/core/tcp_stream.hpp>
#include <boost/beast/core/flat_static_buffer.hpp>
#include <boost/beast/http.hpp>
#include <boost/beast/http/string_body.hpp>
#include <boost/beast/http/verb.hpp>
#include <boost/asio/awaitable.hpp>
#include <boost/asio/detached.hpp>

class http_client {
private:
    using response = boost::beast::http::response<boost::beast::http::string_body>;

    std::string http_hostname;
    std::string ip_address;
    boost::asio::ssl::context ssl_context;
    boost::asio::ip::tcp::resolver hostname_resolver;
    std::optional<boost::beast::ssl_stream<boost::beast::tcp_stream>> tcp_stream;
    boost::beast::flat_static_buffer<4 * 1024 * 1024> receive_flat_buffer;
public:
    http_client(const std::string& http_name, boost::asio::io_context& io_context) :
        http_hostname(http_name),
        ssl_context(boost::asio::ssl::context::tlsv12_client),
        hostname_resolver(io_context),
        tcp_stream(boost::beast::ssl_stream<boost::beast::tcp_stream>(io_context, ssl_context)),
        receive_flat_buffer()
    {

        ssl_context.set_verify_mode(boost::asio::ssl::verify_peer);
        ssl_context.set_options(
            boost::asio::ssl::context::default_workarounds | boost::asio::ssl::context::no_sslv2 |
            boost::asio::ssl::context::no_sslv3 | boost::asio::ssl::context::single_dh_use);

        if (!SSL_set_tlsext_host_name(tcp_stream->native_handle(), http_hostname.c_str())) {
            boost::beast::error_code error_code{static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()};
            throw boost::beast::system_error{error_code};
        }

        auto const resolved_endpoint = hostname_resolver.resolve(http_hostname, "443");
        ip_address = resolved_endpoint->endpoint().address().to_string();

        boost::beast::get_lowest_layer(tcp_stream.value()).connect(resolved_endpoint);
        boost::beast::get_lowest_layer(tcp_stream.value()).socket().set_option(boost::asio::socket_base::keep_alive(true));
        boost::beast::get_lowest_layer(tcp_stream.value()).socket().set_option(boost::asio::ip::tcp::no_delay(true));

        std::cout << "Connected to REST endpoint at IP address <" << ip_address << "> which was resolved from <" << resolved_endpoint->host_name() << std::endl;
        tcp_stream->handshake(boost::asio::ssl::stream_base::client);
    }


    void send_request(boost::asio::io_context& io_context, const std::string& target, const std::function<void(response)>& callback) {
        boost::asio::spawn(
            io_context, [target = std::move(target), callback = std::move(callback), this](boost::asio::yield_context yield_context) mutable
            {
                boost::beast::http::request<boost::beast::http::string_body> http_request{
                    boost::beast::http::verb::get,
                    target,
                    11};

                http_request.set(boost::beast::http::field::host, http_hostname);
                http_request.set(boost::beast::http::field::content_type, "application/json");
                http_request.set(boost::beast::http::field::connection, "Keep-Alive");
                http_request.set(boost::beast::http::field::keep_alive, "timeout=86400");
                http_request.keep_alive(true);
                http_request.prepare_payload();

                size_t bytes_transferred = boost::beast::http::async_write(tcp_stream.value(), http_request, yield_context);

                response http_response;
                boost::beast::http::async_read(tcp_stream.value(), receive_flat_buffer, http_response, yield_context);

                callback(http_response);
            }
        );
    }
};

int main() {
    boost::asio::io_context io_context{};

    std::string host_name{"fapi.binance.com"};
    http_client client(host_name, io_context);

    for (int i = 0; i < 100; i++) {
        auto const stime = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
        client.send_request(io_context, "/fapi/v1/time", [&](boost::beast::http::response<boost::beast::http::string_body> const& http_response) {});

        auto const etime = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
        std::cout << "time diff = " << etime - stime << std::endl;
        std::this_thread::sleep_for(std::chrono::milliseconds(100));
    }

    io_context.run();

    return 0;
}

And here are the compiler flags I am using

g++ -std=c++20 -O2 -flto -g beast_http_client.cpp -I/home/dev/vcpkg/installed/x64-linux/include -L/home/dev/vcpkg/installed/x64-linux/lib -lboost_system -lboost_coroutine -lboost_thread -lboost_json -lssl -lcrypto -lboost_context

I timed this and the latency varies around 10-20 us on average. I was wondering whether there are any improvements I can do to bring this around to low single digit micro. I know boost::beast is quite a heavy library but yeah, thought I would learn from the experts regarding obvious optimisations

Hardware: I am running this on AWS virtual machine with Intel Xeon processor 3GHz

1

There are 1 answers

1
sehe On

There are many issues.

Like I mentioned in my comments, it's unclear what you're trying to measure (and calling "latency", somehow).

But reading on, it becomes clear that the code is broken in many more ways.

The main loop schedules 100 coroutines, waiting 100ms between each (for no reason at all), then, only at the end of main executes all of them at once by invoking io_context.run().

This not only creates something similar to a denial-of-service attack on the server, it also clearly violates the restrictions that only one write operation can be in flight at one time (this goes for SSL streams in particular, but also for the underlying POSIX internet domain stream socket).

In fact, if you ran a Debug build, no doubt there would have been assertions like these aborting the program for you: sotest:

/home/sehe/custom/superboost/boost/beast/core/detail/stream_base.hpp:116: void boost::beast::detail::stream_base::pending_guard::assign(bool&): Assertion `! *b_' failed.

Some other issues include: Fixing these and some reability concerns:

  • you regularly move from const variables. Not very useful
  • you're redundantly passing io_context references instead of using executors
  • you might be setting socket options late
  • you're using optional<> for no reason. If you must, at least use stream_(std::in_place, ...) for the construction
  • std::function introduces type-erasure (virtual dispatch) and copying
  • you probably want to time the start-completion (roundtrip) time
  • you use a string_body instead of empty_body on a GET request

Live On Coliru

#include <iomanip>
#include <iostream>

#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/ssl.hpp>
#include <boost/beast.hpp>
#include <boost/beast/ssl.hpp>

namespace net   = boost::asio;
namespace ssl   = net::ssl;
namespace beast = boost::beast;
namespace http  = beast::http;
using beast::error_code;
using net::ip::tcp;
using namespace std::chrono_literals;

static constexpr auto now = std::chrono::steady_clock::now;
using duration            = std::chrono::steady_clock::duration;

class http_client {
  private:
    using Stream  = beast::ssl_stream<beast::tcp_stream>;

    std::string  host_;
    ssl::context ctx_{ssl::context::tlsv12_client};
    Stream       stream_;

    std::string                        ip_address;
    beast::flat_static_buffer<4 << 10> buf_;

  public:
    using response = http::response<http::string_body>;

    http_client(std::string host, net::io_context& ioc) : host_(std::move(host)), stream_(ioc, ctx_) {
        ctx_.set_verify_mode(ssl::verify_peer);
        using C = ssl::context;
        ctx_.set_options(C::default_workarounds | C::no_sslv2 | C::no_sslv3 | C::single_dh_use);

        if (!SSL_set_tlsext_host_name(stream_.native_handle(), host_.c_str()))
            throw beast::system_error(::ERR_get_error(), net::error::get_ssl_category());

        auto eps   = tcp::resolver(ioc).resolve(host_, "443");
        ip_address = eps->endpoint().address().to_string();

        {
            auto& ll = beast::get_lowest_layer(stream_);
            auto& s  = ll.socket();
            s.open(tcp::v4());
            s.set_option(tcp::no_delay(true));
            s.set_option(tcp::socket::keep_alive(true));
            ll.connect(eps);
        }

        std::cout << "Connected to REST endpoint at IP address " << quoted(ip_address)
                  << " which was resolved from " << quoted(eps->host_name()) << std::endl;
        stream_.handshake(Stream::client);
    }

    // template <typename F>
    // requires std::invocable<F, error_code, response, duration>
    void send_request(std::string target, std::function<void(error_code, response, duration)> callback) {
        spawn(stream_.get_executor(),
              [start = now(), target = std::move(target), cb = std::move(callback),
               this](net::yield_context yield) mutable {
                  http::request<http::empty_body> http_request{http::verb::get, target, 11};

                  http_request.set(http::field::host, host_);
                  http_request.set(http::field::content_type, "application/json");
                  http_request.set(http::field::connection, "Keep-Alive");
                  http_request.set(http::field::keep_alive, "timeout=86400");
                  http_request.keep_alive(true);
                  http_request.prepare_payload();

                  /*size_t bytes_transferred =*/async_write(stream_, http_request, yield);

                  response http_response;
                  error_code ec;
                  async_read(stream_, buf_, http_response, yield[ec]);

                  std::move(cb)(ec, std::move(http_response), now() - start);
              });
    }
};

void send_loop(http_client& client, std::string const& target, unsigned n) {
    if (n == 0)
        return;

    client.send_request(target, [=, &client](error_code ec, http_client::response res, duration dur) {
        send_loop(client, target, n - 1); // only now it is safe to schedule a new write

        std::cout << "#" << n << " " << ec.message() << " in " << (dur / 1ms) << " ms";
        if (!ec)
            std::cout << " HTTP " << res.reason();
        std::cout << std::endl;
    });
}

int main() {
    net::io_context io_context;

    http_client client("fapi.binance.com", io_context);
    send_loop(client, "/fapi/v1/time", 3);

    io_context.run();
}

Locally demoed:

enter image description here

Other Issues/Improvements

  • You're relying on the connection to never fail
  • Common HTTP server implementation limit the number of pipelined requests
  • You are using the default type erased executors (any_io_executor)
  • You are using stackful coroutines, which are a bit heavy

You should probably consider a separate IO thread and possibly multiple connections. See e.g. How do I make this HTTPS connection persistent in Beast? for ideas to progress from here.