diff --git a/.github/workflows/test-sync.yml b/.github/workflows/test-sync.yml new file mode 100644 index 0000000..df11315 --- /dev/null +++ b/.github/workflows/test-sync.yml @@ -0,0 +1,103 @@ +name: Sync test + +on: [push, pull_request] + +jobs: + sync-test-ubuntu: + + runs-on: ubuntu-22.04 + + steps: + - name: Install dependencies + run: | + sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test + sudo apt update + sudo apt install -y git build-essential cmake libuv1-dev libzmq3-dev libsodium-dev libpgm-dev libnorm-dev libgss-dev libcurl4-openssl-dev libidn2-0-dev gcc-12 g++-12 + + - name: Checkout repository + uses: actions/checkout@v2 + with: + submodules: true + + - name: Build p2pool + run: | + mkdir build + cd build + cmake .. -DDEV_TEST_SYNC=ON -DCMAKE_C_COMPILER=gcc-12 -DCMAKE_CXX_COMPILER=g++-12 + make -j$(nproc) + + - name: Run p2pool + timeout-minutes: 15 + run: | + cd build + ./p2pool --host p2pmd.xmrvsbeast.com --zmq-port 18084 --wallet 44MnN1f3Eto8DZYUWuE5XZNUtE3vcRzt2j6PzqWpPau34e6Cf4fAxt6X2MBmrm6F9YMEiMNjN6W4Shn4pLcfNAja621jwyg --no-cache --loglevel 4 + + - name: Archive p2pool.log + uses: actions/upload-artifact@v2 + with: + name: p2pool_ubuntu.log + path: build/p2pool.log + + sync-test-macos: + + runs-on: macos-12 + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Install dependencies + run: HOMEBREW_NO_AUTO_UPDATE=1 brew install cmake autoconf libtool automake libuv zmq libpgm curl + + - name: Build p2pool + run: | + mkdir build + cd build + cmake .. -DDEV_TEST_SYNC=ON + make -j3 + + - name: Run p2pool + timeout-minutes: 15 + run: | + cd build + ./p2pool --host p2pmd.xmrvsbeast.com --zmq-port 18084 --wallet 44MnN1f3Eto8DZYUWuE5XZNUtE3vcRzt2j6PzqWpPau34e6Cf4fAxt6X2MBmrm6F9YMEiMNjN6W4Shn4pLcfNAja621jwyg --no-cache --loglevel 4 + + - name: Archive p2pool.log + uses: actions/upload-artifact@v2 + with: + name: p2pool_macos.log + path: build/p2pool.log + + sync-test-windows: + + runs-on: windows-2022 + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Setup cmake + uses: lukka/get-cmake@latest + + - name: Build p2pool + run: | + mkdir build + cd build + cmake .. -G "Visual Studio 17 2022" -DDEV_TEST_SYNC=ON + & "C:\\Program Files\\Microsoft Visual Studio\\2022\\Enterprise\\Msbuild\\Current\\Bin\\amd64\\msbuild" /m /p:Configuration=Release p2pool.vcxproj + + - name: Run p2pool + timeout-minutes: 15 + run: | + cd build/Release + ./p2pool.exe --host p2pmd.xmrvsbeast.com --zmq-port 18084 --wallet 44MnN1f3Eto8DZYUWuE5XZNUtE3vcRzt2j6PzqWpPau34e6Cf4fAxt6X2MBmrm6F9YMEiMNjN6W4Shn4pLcfNAja621jwyg --no-cache --loglevel 4 + + - name: Archive p2pool.log + uses: actions/upload-artifact@v2 + with: + name: p2pool_windows.log + path: build/Release/p2pool.log diff --git a/CMakeLists.txt b/CMakeLists.txt index d1a4608..4ef15a9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,6 +5,8 @@ option(STATIC_BINARY "Build static binary" OFF) option(STATIC_LIBS "Link libuv and libzmq statically" OFF) option(WITH_RANDOMX "Include the RandomX library in the build. If this is turned off, p2pool will rely on monerod for verifying RandomX hashes" ON) +option(DEV_TEST_SYNC "[Developer only] Sync test, stop p2pool after sync is complete" OFF) + set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY VS_STARTUP_PROJECT p2pool) @@ -15,6 +17,10 @@ if (WITH_RANDOMX) set(LIBS randomx) endif() +if (DEV_TEST_SYNC) + add_definitions(-DDEV_TEST_SYNC) +endif() + include(cmake/flags.cmake) set(HEADERS @@ -77,10 +83,19 @@ if (WITH_RANDOMX) set(SOURCES ${SOURCES} src/miner.cpp) endif() +if (NOT STATIC_BINARY AND NOT STATIC_LIBS) + include(FindCURL) +endif() + +if (CURL_INCLUDE_DIRS) + include_directories(CURL_INCLUDE_DIRS) +else() + include_directories(external/src/curl/include) +endif() + include_directories(src) include_directories(external/src) include_directories(external/src/cryptonote) -include_directories(external/src/curl/include) include_directories(external/src/libuv/include) include_directories(external/src/cppzmq) include_directories(external/src/libzmq/include) @@ -115,8 +130,13 @@ elseif (CMAKE_CXX_COMPILER_ID MATCHES GNU OR CMAKE_CXX_COMPILER_ID MATCHES Clang find_library(CURL_LIBRARY_DEBUG NAMES libcurl.a PATHS "external/src/curl/lib/.libs" NO_DEFAULT_PATH) find_library(CURL_LIBRARY NAMES libcurl.a PATHS "external/src/curl/lib/.libs" NO_DEFAULT_PATH) else() - find_library(CURL_LIBRARY_DEBUG NAMES curl) - find_library(CURL_LIBRARY NAMES curl) + if (CURL_LIBRARIES) + set(CURL_LIBRARY_DEBUG ${CURL_LIBRARIES}) + set(CURL_LIBRARY ${CURL_LIBRARIES}) + else() + find_library(CURL_LIBRARY_DEBUG NAMES curl) + find_library(CURL_LIBRARY NAMES curl) + endif() endif() find_library(SODIUM_LIBRARY sodium) endif() diff --git a/README.md b/README.md index 4c9bf2a..bd3e6a8 100644 --- a/README.md +++ b/README.md @@ -253,7 +253,7 @@ Alternatively, you can select "Clone a repository" within the GUI, then select " Run the following commands to install the necessary prerequisites, clone this repo, and build P2Pool locally on your Mac: ``` -brew update && brew install git cmake libuv zmq libpgm +brew update && brew install git cmake libuv zmq libpgm curl git clone --recursive https://github.com/SChernykh/p2pool cd p2pool mkdir build && cd build @@ -265,7 +265,7 @@ make -j$(sysctl -n hw.logicalcpu) Run the following commands to install the necessary prerequisites, clone this repo, and build P2Pool locally on FreeBSD: ``` -pkg install git cmake libuv libzmq4 +pkg install git cmake libuv libzmq4 curl git clone --recursive https://github.com/SChernykh/p2pool cd p2pool mkdir build && cd build diff --git a/src/json_rpc_request.cpp b/src/json_rpc_request.cpp index 3c36240..08c4364 100644 --- a/src/json_rpc_request.cpp +++ b/src/json_rpc_request.cpp @@ -60,10 +60,7 @@ struct CurlContext void close_handles(); - bool m_closing; - - uv_poll_t m_pollHandle; - curl_socket_t m_socket; + std::vector> m_pollHandles; CallbackBase* m_callback; CallbackBase* m_closeCallback; @@ -85,10 +82,7 @@ struct CurlContext }; CurlContext::CurlContext(const std::string& address, int port, const std::string& req, const std::string& auth, CallbackBase* cb, CallbackBase* close_cb, uv_loop_t* loop) - : m_closing(false) - , m_pollHandle{} - , m_socket{} - , m_callback(cb) + : m_callback(cb) , m_closeCallback(close_cb) , m_loop(loop) , m_timer{} @@ -99,6 +93,8 @@ CurlContext::CurlContext(const std::string& address, int port, const std::string , m_auth(auth) , m_headers(nullptr) { + m_pollHandles.reserve(2); + { char buf[log::Stream::BUF_SIZE + 1]; buf[0] = '\0'; @@ -213,6 +209,15 @@ CurlContext::~CurlContext() } delete m_callback; + if (m_response.empty()) { + if (m_error.empty()) { + m_error = "Empty response"; + } + else { + m_error += " (empty response)"; + } + } + (*m_closeCallback)(m_error.c_str(), m_error.length()); delete m_closeCallback; @@ -221,42 +226,72 @@ CurlContext::~CurlContext() int CurlContext::on_socket(CURL* /*easy*/, curl_socket_t s, int action) { + auto it = std::find_if(m_pollHandles.begin(), m_pollHandles.end(), [s](const auto& value) { return value.first == s; }); + switch (action) { case CURL_POLL_IN: case CURL_POLL_OUT: case CURL_POLL_INOUT: - if (!m_closing && !uv_is_closing(reinterpret_cast(&m_pollHandle))) { - if (!m_socket) { - m_socket = s; - curl_multi_assign(m_multiHandle, s, this); - } - else if (m_socket != s) { - LOGERR(1, "This code can't work with multiple parallel requests. Fix the code!"); + { + uv_poll_t* h = nullptr; + + if (it != m_pollHandles.end()) { + h = it->second; } + else { + h = new uv_poll_t{}; - int events = 0; - if (action != CURL_POLL_IN) events |= UV_WRITABLE; - if (action != CURL_POLL_OUT) events |= UV_READABLE; + // cppcheck-suppress nullPointer + h->data = this; - if (!m_pollHandle.data) { - uv_poll_init_socket(m_loop, &m_pollHandle, s); - m_pollHandle.data = this; + const int result = uv_poll_init_socket(m_loop, h, s); + if (result < 0) { + LOGERR(1, "uv_poll_init_socket failed: " << uv_err_name(result)); + delete h; + h = nullptr; + } + else { + m_pollHandles.emplace_back(s, h); + } } - const int result = uv_poll_start(&m_pollHandle, events, curl_perform); - if (result < 0) { - LOGERR(1, "uv_poll_start failed with error " << uv_err_name(result)); + if (h) { + const CURLMcode err = curl_multi_assign(m_multiHandle, s, this); + if (err != CURLM_OK) { + LOGERR(1, "curl_multi_assign(action = " << action << ") failed: " << curl_multi_strerror(err)); + } + + int events = 0; + if (action != CURL_POLL_IN) events |= UV_WRITABLE; + if (action != CURL_POLL_OUT) events |= UV_READABLE; + + const int result = uv_poll_start(h, events, curl_perform); + if (result < 0) { + LOGERR(1, "uv_poll_start failed with error " << uv_err_name(result)); + } + } + else { + LOGERR(1, "failed to start polling on socket " << static_cast(s)); } - } - else { - LOGERR(1, "Poll handle is closing, can't process socket action " << action); } break; case CURL_POLL_REMOVE: default: - curl_multi_assign(m_multiHandle, s, nullptr); - close_handles(); + { + if (it != m_pollHandles.end()) { + uv_poll_t* h = it->second; + m_pollHandles.erase(it); + + uv_poll_stop(h); + uv_close(reinterpret_cast(h), [](uv_handle_t* h) { delete reinterpret_cast(h); }); + } + + const CURLMcode err = curl_multi_assign(m_multiHandle, s, nullptr); + if (err != CURLM_OK) { + LOGERR(1, "curl_multi_assign(action = " << action << ") failed: " << curl_multi_strerror(err)); + } + } break; } @@ -285,7 +320,11 @@ void CurlContext::on_timeout(uv_handle_t* req) CurlContext* ctx = reinterpret_cast(req->data); int running_handles = 0; - curl_multi_socket_action(ctx->m_multiHandle, CURL_SOCKET_TIMEOUT, 0, &running_handles); + CURLMcode err = curl_multi_socket_action(ctx->m_multiHandle, CURL_SOCKET_TIMEOUT, 0, &running_handles); + if (err != CURLM_OK) { + LOGERR(1, "curl_multi_socket_action failed, error " << curl_multi_strerror(err)); + } + ctx->check_multi_info(); if (running_handles == 0) { @@ -314,9 +353,17 @@ void CurlContext::curl_perform(uv_poll_t* req, int status, int events) CurlContext* ctx = reinterpret_cast(req->data); - int running_handles; - curl_multi_socket_action(ctx->m_multiHandle, ctx->m_socket, flags, &running_handles); + int running_handles = 0; + auto it = std::find_if(ctx->m_pollHandles.begin(), ctx->m_pollHandles.end(), [req](const auto& value) { return value.second == req; }); + if (it != ctx->m_pollHandles.end()) { + curl_multi_socket_action(ctx->m_multiHandle, it->first, flags, &running_handles); + } + ctx->check_multi_info(); + + if (running_handles == 0) { + ctx->close_handles(); + } } void CurlContext::check_multi_info() @@ -355,7 +402,7 @@ void CurlContext::on_close(uv_handle_t* h) CurlContext* ctx = reinterpret_cast(h->data); h->data = nullptr; - if (ctx->m_timer.data || ctx->m_async.data || ctx->m_pollHandle.data) { + if (ctx->m_timer.data || ctx->m_async.data) { return; } @@ -364,12 +411,11 @@ void CurlContext::on_close(uv_handle_t* h) void CurlContext::close_handles() { - m_closing = true; - - if (m_pollHandle.data && !uv_is_closing(reinterpret_cast(&m_pollHandle))) { - uv_poll_stop(&m_pollHandle); - uv_close(reinterpret_cast(&m_pollHandle), on_close); + for (const auto& p : m_pollHandles) { + uv_poll_stop(p.second); + uv_close(reinterpret_cast(p.second), [](uv_handle_t* h) { delete reinterpret_cast(h); }); } + m_pollHandles.clear(); if (m_async.data && !uv_is_closing(reinterpret_cast(&m_async))) { uv_close(reinterpret_cast(&m_async), on_close); diff --git a/src/side_chain.cpp b/src/side_chain.cpp index 614ce96..f1e9994 100644 --- a/src/side_chain.cpp +++ b/src/side_chain.cpp @@ -823,14 +823,15 @@ bool SideChain::get_outputs_blob(PoolBlock* block, uint64_t total_reward, std::v return true; } -void SideChain::print_status() const +void SideChain::print_status(bool obtain_sidechain_lock) const { std::vector blocks_in_window; blocks_in_window.reserve(m_chainWindowSize * 9 / 8); const difficulty_type diff = difficulty(); - ReadLock lock(m_sidechainLock); + if (obtain_sidechain_lock) uv_rwlock_rdlock(&m_sidechainLock); + ON_SCOPE_LEAVE([this, obtain_sidechain_lock]() { if (obtain_sidechain_lock) uv_rwlock_rdunlock(&m_sidechainLock); }); uint64_t rem; uint64_t pool_hashrate = udiv128(diff.hi, diff.lo, m_targetBlockTime, &rem); @@ -2123,6 +2124,17 @@ void SideChain::finish_precalc() { LOGERR(1, "exception in finish_precalc(): " << e.what()); } + +#ifdef DEV_TEST_SYNC + if (m_pool) { + LOGINFO(0, log::LightGreen() << "[DEV] Synchronization finished successfully, stopping P2Pool now"); + print_status(false); + if (m_pool->p2p_server()) { + m_pool->p2p_server()->print_status(); + } + m_pool->stop(); + } +#endif } } // namespace p2pool diff --git a/src/side_chain.h b/src/side_chain.h index 0118504..1f0e90d 100644 --- a/src/side_chain.h +++ b/src/side_chain.h @@ -58,7 +58,7 @@ public: bool get_block_blob(const hash& id, std::vector& blob) const; bool get_outputs_blob(PoolBlock* block, uint64_t total_reward, std::vector& blob, uv_loop_t* loop) const; - void print_status() const; + void print_status(bool obtain_sidechain_lock = true) const; double get_reward_share(const Wallet& w) const; // Consensus ID can be used to spawn independent P2Pools with their own sidechains diff --git a/src/util.h b/src/util.h index 5348648..b861ca1 100644 --- a/src/util.h +++ b/src/util.h @@ -68,7 +68,7 @@ private: template FORCEINLINE ScopeGuard on_scope_leave(T&& handler) { return ScopeGuard(std::move(handler)); } -#define ON_SCOPE_LEAVE(x) auto CONCAT(scope_guard_, __LINE__) = on_scope_leave(x); +#define ON_SCOPE_LEAVE(...) auto CONCAT(scope_guard_, __LINE__) = on_scope_leave(__VA_ARGS__); struct MinerCallbackHandler {