diff options
Diffstat (limited to '')
8 files changed, 363 insertions, 5 deletions
diff --git a/devel/cpprestsdk/Makefile b/devel/cpprestsdk/Makefile index 2636b3cd6eff..f8f8f672c9fb 100644 --- a/devel/cpprestsdk/Makefile +++ b/devel/cpprestsdk/Makefile @@ -1,27 +1,26 @@ PORTNAME= cpprestsdk PORTVERSION= 2.10.19 -PORTREVISION= 2 +PORTREVISION= 4 DISTVERSIONPREFIX= v CATEGORIES= devel -MAINTAINER= ports@FreeBSD.org +MAINTAINER= sigsegv@radiotube.org COMMENT= Microsoft C++ REST SDK WWW= https://github.com/Microsoft/cpprestsdk LICENSE= MIT LICENSE_FILE= ${WRKSRC}/../license.txt -BROKEN= fails to build with Boost>=1.87 - BUILD_DEPENDS= ${LOCALBASE}/include/websocketpp/client.hpp:devel/websocketpp LIB_DEPENDS= libboost_system.so:devel/boost-libs -USES= cmake compiler:c++11-lang pkgconfig ssl +USES= cmake:testing compiler:c++11-lang pkgconfig ssl USE_GITHUB= yes GH_ACCOUNT= Microsoft USE_LDCONFIG= yes CMAKE_OFF= BUILD_SAMPLES BUILD_TESTS WERROR +CMAKE_TESTING_ON= BUILD_TESTS WRKSRC_SUBDIR= Release diff --git a/devel/cpprestsdk/files/patch-include_pplx_threadpool.h b/devel/cpprestsdk/files/patch-include_pplx_threadpool.h new file mode 100644 index 000000000000..e6a05d26b0d3 --- /dev/null +++ b/devel/cpprestsdk/files/patch-include_pplx_threadpool.h @@ -0,0 +1,21 @@ +--- include/pplx/threadpool.h.orig 2023-12-05 04:23:31 UTC ++++ include/pplx/threadpool.h +@@ -69,15 +69,15 @@ class threadpool (public) + CASABLANCA_DEPRECATED("Use `.service().post(task)` directly.") + void schedule(T task) + { +- service().post(task); ++ boost::asio::post(service(), task); + } + +- boost::asio::io_service& service() { return m_service; } ++ boost::asio::io_context& service() { return m_service; } + + protected: + threadpool(size_t num_threads) : m_service(static_cast<int>(num_threads)) {} + +- boost::asio::io_service m_service; ++ boost::asio::io_context m_service; + }; + + } // namespace crossplat diff --git a/devel/cpprestsdk/files/patch-src_http_client_http__client__asio.cpp b/devel/cpprestsdk/files/patch-src_http_client_http__client__asio.cpp new file mode 100644 index 000000000000..7b4d1af0d422 --- /dev/null +++ b/devel/cpprestsdk/files/patch-src_http_client_http__client__asio.cpp @@ -0,0 +1,218 @@ +--- src/http/client/http_client_asio.cpp.orig 2023-12-05 04:23:31 UTC ++++ src/http/client/http_client_asio.cpp +@@ -146,7 +146,7 @@ class asio_connection (public) + friend class asio_client; + + public: +- asio_connection(boost::asio::io_service& io_service) ++ asio_connection(boost::asio::io_context& io_service) + : m_socket_lock() + , m_socket(io_service) + , m_ssl_stream() +@@ -581,10 +581,8 @@ class asio_context final : public request_context, pub + + m_context->m_timer.start(); + +- tcp::resolver::query query(utility::conversions::to_utf8string(proxy_host), to_string(proxy_port)); +- + auto client = std::static_pointer_cast<asio_client>(m_context->m_http_client); +- m_context->m_resolver.async_resolve(query, ++ m_context->m_resolver.async_resolve(utility::conversions::to_utf8string(proxy_host), to_string(proxy_port), + boost::bind(&ssl_proxy_tunnel::handle_resolve, + shared_from_this(), + boost::asio::placeholders::error, +@@ -592,8 +590,9 @@ class asio_context final : public request_context, pub + } + + private: +- void handle_resolve(const boost::system::error_code& ec, tcp::resolver::iterator endpoints) ++ void handle_resolve(const boost::system::error_code& ec, tcp::resolver::results_type results) + { ++ auto iterator = results.begin(); + if (ec) + { + m_context->report_error("Error resolving proxy address", ec, httpclient_errorcode_context::connect); +@@ -601,16 +600,16 @@ class asio_context final : public request_context, pub + else + { + m_context->m_timer.reset(); +- auto endpoint = *endpoints; ++ auto endpoint = *iterator; + m_context->m_connection->async_connect(endpoint, + boost::bind(&ssl_proxy_tunnel::handle_tcp_connect, + shared_from_this(), + boost::asio::placeholders::error, +- ++endpoints)); ++ ++iterator, results.end())); + } + } + +- void handle_tcp_connect(const boost::system::error_code& ec, tcp::resolver::iterator endpoints) ++ void handle_tcp_connect(const boost::system::error_code& ec, tcp::resolver::results_type::iterator endpoints, tcp::resolver::results_type::iterator endpoints_end) + { + if (!ec) + { +@@ -621,7 +620,7 @@ class asio_context final : public request_context, pub + shared_from_this(), + boost::asio::placeholders::error)); + } +- else if (endpoints == tcp::resolver::iterator()) ++ else if (endpoints == endpoints_end) + { + m_context->report_error( + "Failed to connect to any resolved proxy endpoint", ec, httpclient_errorcode_context::connect); +@@ -646,7 +645,7 @@ class asio_context final : public request_context, pub + boost::bind(&ssl_proxy_tunnel::handle_tcp_connect, + shared_from_this(), + boost::asio::placeholders::error, +- ++endpoints)); ++ ++endpoints, endpoints_end)); + } + } + +@@ -885,8 +884,7 @@ class asio_context final : public request_context, pub + auto tcp_host = proxy_type == http_proxy_type::http ? proxy_host : host; + auto tcp_port = proxy_type == http_proxy_type::http ? proxy_port : port; + +- tcp::resolver::query query(tcp_host, to_string(tcp_port)); +- ctx->m_resolver.async_resolve(query, ++ ctx->m_resolver.async_resolve(tcp_host, to_string(tcp_port), + boost::bind(&asio_context::handle_resolve, + ctx, + boost::asio::placeholders::error, +@@ -1006,7 +1004,7 @@ class asio_context final : public request_context, pub + request_context::report_error(errorcodeValue, message); + } + +- void handle_connect(const boost::system::error_code& ec, tcp::resolver::iterator endpoints) ++ void handle_connect(const boost::system::error_code& ec, tcp::resolver::results_type::iterator endpoints, tcp::resolver::results_type::iterator endpoints_end) + { + m_timer.reset(); + if (!ec) +@@ -1019,7 +1017,7 @@ class asio_context final : public request_context, pub + { + report_error("Request canceled by user.", ec, httpclient_errorcode_context::connect); + } +- else if (endpoints == tcp::resolver::iterator()) ++ else if (endpoints == endpoints_end) + { + report_error("Failed to connect to any resolved endpoint", ec, httpclient_errorcode_context::connect); + } +@@ -1041,28 +1039,29 @@ class asio_context final : public request_context, pub + m_connection->async_connect( + endpoint, + boost::bind( +- &asio_context::handle_connect, shared_from_this(), boost::asio::placeholders::error, ++endpoints)); ++ &asio_context::handle_connect, shared_from_this(), boost::asio::placeholders::error, ++endpoints, endpoints_end)); + } + } + +- void handle_resolve(const boost::system::error_code& ec, tcp::resolver::iterator endpoints) ++ void handle_resolve(const boost::system::error_code& ec, tcp::resolver::results_type results) + { + if (ec) + { + report_error("Error resolving address", ec, httpclient_errorcode_context::connect); + } +- else if (endpoints == tcp::resolver::iterator()) ++ else if (results.empty()) + { + report_error("Failed to resolve address", ec, httpclient_errorcode_context::connect); + } + else + { + m_timer.reset(); +- auto endpoint = *endpoints; ++ auto iterator = results.begin(); ++ auto endpoint = *iterator; + m_connection->async_connect( + endpoint, + boost::bind( +- &asio_context::handle_connect, shared_from_this(), boost::asio::placeholders::error, ++endpoints)); ++ &asio_context::handle_connect, shared_from_this(), boost::asio::placeholders::error, ++iterator, results.end())); + } + } + +@@ -1134,7 +1133,7 @@ class asio_context final : public request_context, pub + } + #endif // CPPREST_PLATFORM_ASIO_CERT_VERIFICATION_AVAILABLE + +- boost::asio::ssl::rfc2818_verification rfc2818(m_connection->cn_hostname()); ++ boost::asio::ssl::host_name_verification rfc2818(m_connection->cn_hostname()); + return rfc2818(preverified, verifyCtx); + } + +@@ -1182,8 +1181,8 @@ class asio_context final : public request_context, pub + + const auto& chunkSize = m_http_client->client_config().chunksize(); + auto readbuf = _get_readbuffer(); +- uint8_t* buf = boost::asio::buffer_cast<uint8_t*>( +- m_body_buf.prepare(chunkSize + http::details::chunked_encoding::additional_encoding_space)); ++ auto bodyBuf = m_body_buf.prepare(chunkSize + http::details::chunked_encoding::additional_encoding_space); ++ uint8_t* buf = static_cast<uint8_t *>(bodyBuf.data()); + const auto this_request = shared_from_this(); + readbuf.getn(buf + http::details::chunked_encoding::data_offset, chunkSize) + .then([this_request, buf, chunkSize AND_CAPTURE_MEMBER_FUNCTION_POINTERS](pplx::task<size_t> op) { +@@ -1247,7 +1246,7 @@ class asio_context final : public request_context, pub + const auto readSize = static_cast<size_t>((std::min)( + static_cast<uint64_t>(m_http_client->client_config().chunksize()), m_content_length - m_uploaded)); + auto readbuf = _get_readbuffer(); +- readbuf.getn(boost::asio::buffer_cast<uint8_t*>(m_body_buf.prepare(readSize)), readSize) ++ readbuf.getn(static_cast<uint8_t*>(m_body_buf.prepare(readSize).data()), readSize) + .then([this_request AND_CAPTURE_MEMBER_FUNCTION_POINTERS](pplx::task<size_t> op) { + try + { +@@ -1639,7 +1638,7 @@ class asio_context final : public request_context, pub + std::vector<uint8_t> decompressed; + + bool boo = +- decompress(boost::asio::buffer_cast<const uint8_t*>(m_body_buf.data()), to_read, decompressed); ++ decompress(static_cast<const uint8_t*>(m_body_buf.data().data()), to_read, decompressed); + if (!boo) + { + report_exception(std::runtime_error("Failed to decompress the response body")); +@@ -1687,7 +1686,7 @@ class asio_context final : public request_context, pub + } + else + { +- writeBuffer.putn_nocopy(boost::asio::buffer_cast<const uint8_t*>(m_body_buf.data()), to_read) ++ writeBuffer.putn_nocopy(static_cast<const uint8_t*>(m_body_buf.data().data()), to_read) + .then([this_request, to_read AND_CAPTURE_MEMBER_FUNCTION_POINTERS](pplx::task<size_t> op) { + try + { +@@ -1759,7 +1758,7 @@ class asio_context final : public request_context, pub + std::vector<uint8_t> decompressed; + + bool boo = +- decompress(boost::asio::buffer_cast<const uint8_t*>(m_body_buf.data()), read_size, decompressed); ++ decompress(static_cast<const uint8_t*>(m_body_buf.data().data()), read_size, decompressed); + if (!boo) + { + this_request->report_exception(std::runtime_error("Failed to decompress the response body")); +@@ -1821,7 +1820,7 @@ class asio_context final : public request_context, pub + } + else + { +- writeBuffer.putn_nocopy(boost::asio::buffer_cast<const uint8_t*>(m_body_buf.data()), read_size) ++ writeBuffer.putn_nocopy(static_cast<const uint8_t*>(m_body_buf.data().data()), read_size) + .then([this_request AND_CAPTURE_MEMBER_FUNCTION_POINTERS](pplx::task<size_t> op) { + size_t writtenSize = 0; + try +@@ -1870,7 +1869,7 @@ class asio_context final : public request_context, pub + assert(!m_ctx.expired()); + m_state = started; + +- m_timer.expires_from_now(m_duration); ++ m_timer.expires_after(m_duration); + auto ctx = m_ctx; + m_timer.async_wait([ctx AND_CAPTURE_MEMBER_FUNCTION_POINTERS](const boost::system::error_code& ec) { + handle_timeout(ec, ctx); +@@ -1881,7 +1880,7 @@ class asio_context final : public request_context, pub + { + assert(m_state == started || m_state == timedout); + assert(!m_ctx.expired()); +- if (m_timer.expires_from_now(m_duration) > 0) ++ if (m_timer.expires_after(m_duration) > 0) + { + // The existing handler was canceled so schedule a new one. + assert(m_state == started); diff --git a/devel/cpprestsdk/files/patch-src_http_listener_http__server__asio.cpp b/devel/cpprestsdk/files/patch-src_http_listener_http__server__asio.cpp new file mode 100644 index 000000000000..21cd36ec493d --- /dev/null +++ b/devel/cpprestsdk/files/patch-src_http_listener_http__server__asio.cpp @@ -0,0 +1,67 @@ +--- src/http/listener/http_server_asio.cpp.orig 2023-12-05 04:23:31 UTC ++++ src/http/listener/http_server_asio.cpp +@@ -520,17 +520,14 @@ void hostport_listener::start() + auto& service = crossplat::threadpool::shared_instance().service(); + tcp::resolver resolver(service); + // #446: boost resolver does not recognize "+" as a host wildchar +- tcp::resolver::query query = +- ("+" == m_host) ? tcp::resolver::query(m_port, boost::asio::ip::resolver_query_base::flags()) +- : tcp::resolver::query(m_host, m_port, boost::asio::ip::resolver_query_base::flags()); + +- tcp::endpoint endpoint = *resolver.resolve(query); ++ tcp::endpoint endpoint = (("+" == m_host) ? *(resolver.resolve("", m_port).begin()) : *(resolver.resolve(m_host, m_port).begin())); + + m_acceptor.reset(new tcp::acceptor(service)); + m_acceptor->open(endpoint.protocol()); + m_acceptor->set_option(socket_base::reuse_address(true)); + m_acceptor->bind(endpoint); +- m_acceptor->listen(0 != m_backlog ? m_backlog : socket_base::max_connections); ++ m_acceptor->listen(0 != m_backlog ? m_backlog : socket_base::max_listen_connections); + + auto socket = new ip::tcp::socket(service); + std::unique_ptr<ip::tcp::socket> usocket(socket); +@@ -881,7 +878,7 @@ will_deref_t asio_server_connection::handle_chunked_bo + else + { + auto writebuf = requestImpl->outstream().streambuf(); +- writebuf.putn_nocopy(buffer_cast<const uint8_t*>(m_request_buf.data()), toWrite) ++ writebuf.putn_nocopy(static_cast<const uint8_t*>(m_request_buf.data().data()), toWrite) + .then([=](pplx::task<size_t> writeChunkTask) -> will_deref_t { + try + { +@@ -913,7 +910,7 @@ will_deref_t asio_server_connection::handle_body(const + { + auto writebuf = requestImpl->outstream().streambuf(); + writebuf +- .putn_nocopy(boost::asio::buffer_cast<const uint8_t*>(m_request_buf.data()), ++ .putn_nocopy(static_cast<const uint8_t*>(m_request_buf.data().data()), + (std::min)(m_request_buf.size(), m_read_size - m_read)) + .then([this](pplx::task<size_t> writtenSizeTask) -> will_deref_t { + size_t writtenSize = 0; +@@ -1134,7 +1131,7 @@ will_deref_and_erase_t asio_server_connection::handle_ + } + auto membuf = m_response_buf.prepare(ChunkSize + chunked_encoding::additional_encoding_space); + +- readbuf.getn(buffer_cast<uint8_t*>(membuf) + chunked_encoding::data_offset, ChunkSize) ++ readbuf.getn(static_cast<uint8_t*>(membuf.data()) + chunked_encoding::data_offset, ChunkSize) + .then([=](pplx::task<size_t> actualSizeTask) -> will_deref_and_erase_t { + size_t actualSize = 0; + try +@@ -1146,7 +1143,7 @@ will_deref_and_erase_t asio_server_connection::handle_ + return cancel_sending_response_with_error(response, std::current_exception()); + } + size_t offset = chunked_encoding::add_chunked_delimiters( +- buffer_cast<uint8_t*>(membuf), ChunkSize + chunked_encoding::additional_encoding_space, actualSize); ++ static_cast<uint8_t*>(membuf.data()), ChunkSize + chunked_encoding::additional_encoding_space, actualSize); + m_response_buf.commit(actualSize + chunked_encoding::additional_encoding_space); + m_response_buf.consume(offset); + if (actualSize == 0) +@@ -1167,7 +1164,7 @@ will_deref_and_erase_t asio_server_connection::handle_ + return cancel_sending_response_with_error( + response, std::make_exception_ptr(http_exception("Response stream close early!"))); + size_t readBytes = (std::min)(ChunkSize, m_write_size - m_write); +- readbuf.getn(buffer_cast<uint8_t*>(m_response_buf.prepare(readBytes)), readBytes) ++ readbuf.getn(static_cast<uint8_t*>(m_response_buf.prepare(readBytes).data()), readBytes) + .then([=](pplx::task<size_t> actualSizeTask) -> will_deref_and_erase_t { + size_t actualSize = 0; + try diff --git a/devel/cpprestsdk/files/patch-src_pplx_pplxlinux.cpp b/devel/cpprestsdk/files/patch-src_pplx_pplxlinux.cpp new file mode 100644 index 000000000000..51af71ecc2a3 --- /dev/null +++ b/devel/cpprestsdk/files/patch-src_pplx_pplxlinux.cpp @@ -0,0 +1,11 @@ +--- src/pplx/pplxlinux.cpp.orig 2023-12-05 04:23:31 UTC ++++ src/pplx/pplxlinux.cpp +@@ -35,7 +35,7 @@ _PPLXIMP void linux_scheduler::schedule(TaskProc_t pro + + _PPLXIMP void linux_scheduler::schedule(TaskProc_t proc, void* param) + { +- crossplat::threadpool::shared_instance().service().post(boost::bind(proc, param)); ++ boost::asio::post(crossplat::threadpool::shared_instance().service(), boost::bind(proc, param)); + } + + } // namespace details diff --git a/devel/cpprestsdk/files/patch-src_pplx_threadpool.cpp b/devel/cpprestsdk/files/patch-src_pplx_threadpool.cpp new file mode 100644 index 000000000000..f52ce704684e --- /dev/null +++ b/devel/cpprestsdk/files/patch-src_pplx_threadpool.cpp @@ -0,0 +1,20 @@ +--- src/pplx/threadpool.cpp.orig 2023-12-05 04:23:31 UTC ++++ src/pplx/threadpool.cpp +@@ -37,7 +37,7 @@ struct threadpool_impl final : crossplat::threadpool + + struct threadpool_impl final : crossplat::threadpool + { +- threadpool_impl(size_t n) : crossplat::threadpool(n), m_work(m_service) ++ threadpool_impl(size_t n) : crossplat::threadpool(n), m_work(boost::asio::make_work_guard(m_service)) + { + for (size_t i = 0; i < n; i++) + add_thread(); +@@ -84,7 +84,7 @@ struct threadpool_impl final : crossplat::threadpool + } + + std::vector<std::unique_ptr<boost::asio::detail::thread>> m_threads; +- boost::asio::io_service::work m_work; ++ boost::asio::executor_work_guard<boost::asio::io_context::executor_type > m_work; + }; + + #if defined(_WIN32) diff --git a/devel/cpprestsdk/files/patch-src_websockets_client_ws__client__wspp.cpp b/devel/cpprestsdk/files/patch-src_websockets_client_ws__client__wspp.cpp new file mode 100644 index 000000000000..d576a97084ac --- /dev/null +++ b/devel/cpprestsdk/files/patch-src_websockets_client_ws__client__wspp.cpp @@ -0,0 +1,11 @@ +--- src/websockets/client/ws_client_wspp.cpp.orig 2023-12-05 04:23:31 UTC ++++ src/websockets/client/ws_client_wspp.cpp +@@ -225,7 +225,7 @@ class wspp_callback_client : public websocket_client_c + verifyCtx, utility::conversions::to_utf8string(m_uri.host())); + } + #endif +- boost::asio::ssl::rfc2818_verification rfc2818(utility::conversions::to_utf8string(m_uri.host())); ++ boost::asio::ssl::host_name_verification rfc2818(utility::conversions::to_utf8string(m_uri.host())); + return rfc2818(preverified, verifyCtx); + }); + diff --git a/devel/cpprestsdk/files/patch-tests_functional_pplx_pplx__test_pplx__op__test.cpp b/devel/cpprestsdk/files/patch-tests_functional_pplx_pplx__test_pplx__op__test.cpp new file mode 100644 index 000000000000..356a5d7bb902 --- /dev/null +++ b/devel/cpprestsdk/files/patch-tests_functional_pplx_pplx__test_pplx__op__test.cpp @@ -0,0 +1,11 @@ +--- tests/functional/pplx/pplx_test/pplx_op_test.cpp.orig 2023-12-05 04:23:31 UTC ++++ tests/functional/pplx/pplx_test/pplx_op_test.cpp +@@ -57,7 +57,7 @@ class pplx_dflt_scheduler : public pplx::scheduler_int + virtual void schedule(pplx::TaskProc_t proc, void* param) + { + pplx::details::atomic_increment(s_flag); +- m_pool->service().post([=]() -> void { proc(param); }); ++ boost::asio::post(m_pool->service(), [=]() -> void { proc(param); }); + } + + public: |