You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
wownero/src/blockchain_db/lmdb/db_lmdb.cpp

3766 lines
120 KiB

// Copyright (c) 2014-2018, The Monero Project
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "db_lmdb.h"
#include <boost/filesystem.hpp>
#include <boost/format.hpp>
#include <boost/current_function.hpp>
#include <memory> // std::unique_ptr
#include <cstring> // memcpy
#include <random>
#include "string_tools.h"
#include "common/util.h"
#include "cryptonote_basic/cryptonote_format_utils.h"
#include "crypto/crypto.h"
#include "profile_tools.h"
#include "ringct/rctOps.h"
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
#undef MONERO_DEFAULT_LOG_CATEGORY
#define MONERO_DEFAULT_LOG_CATEGORY "blockchain.db.lmdb"
#if defined(__i386) || defined(__x86_64)
#define MISALIGNED_OK 1
#endif
using epee::string_tools::pod_to_hex;
using namespace crypto;
// Increase when the DB changes in a non backward compatible way, and there
// is no automatic conversion, so that a full resync is needed.
#define VERSION 1
namespace
{
#pragma pack(push, 1)
// This MUST be identical to output_data_t, without the extra rct data at the end
struct pre_rct_output_data_t
{
crypto::public_key pubkey; //!< the output's public key (for spend verification)
uint64_t unlock_time; //!< the output's unlock time (or height)
uint64_t height; //!< the height of the block which created the output
};
#pragma pack(pop)
template <typename T>
inline void throw0(const T &e)
{
LOG_PRINT_L0(e.what());
throw e;
}
template <typename T>
inline void throw1(const T &e)
{
LOG_PRINT_L1(e.what());
throw e;
}
#define MDB_val_set(var, val) MDB_val var = {sizeof(val), (void *)&val}
template<typename T>
struct MDB_val_copy: public MDB_val
{
MDB_val_copy(const T &t) :
t_copy(t)
{
mv_size = sizeof (T);
mv_data = &t_copy;
}
private:
T t_copy;
};
template<>
struct MDB_val_copy<cryptonote::blobdata>: public MDB_val
{
MDB_val_copy(const cryptonote::blobdata &bd) :
data(new char[bd.size()])
{
memcpy(data.get(), bd.data(), bd.size());
mv_size = bd.size();
mv_data = data.get();
}
private:
std::unique_ptr<char[]> data;
};
template<>
struct MDB_val_copy<const char*>: public MDB_val
{
MDB_val_copy(const char *s):
size(strlen(s)+1), // include the NUL, makes it easier for compares
data(new char[size])
{
mv_size = size;
mv_data = data.get();
memcpy(mv_data, s, size);
}
private:
size_t size;
std::unique_ptr<char[]> data;
};
int compare_uint64(const MDB_val *a, const MDB_val *b)
{
const uint64_t va = *(const uint64_t *)a->mv_data;
const uint64_t vb = *(const uint64_t *)b->mv_data;
return (va < vb) ? -1 : va > vb;
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
int compare_hash32(const MDB_val *a, const MDB_val *b)
{
uint32_t *va = (uint32_t*) a->mv_data;
uint32_t *vb = (uint32_t*) b->mv_data;
for (int n = 7; n >= 0; n--)
{
if (va[n] == vb[n])
continue;
return va[n] < vb[n] ? -1 : 1;
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
return 0;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
}
int compare_string(const MDB_val *a, const MDB_val *b)
{
const char *va = (const char*) a->mv_data;
const char *vb = (const char*) b->mv_data;
return strcmp(va, vb);
}
/* DB schema:
*
* Table Key Data
* ----- --- ----
* blocks block ID block blob
* block_heights block hash block height
* block_info block ID {block metadata}
*
* txs txn ID txn blob
* tx_indices txn hash {txn ID, metadata}
* tx_outputs txn ID [txn amount output indices]
*
* output_txs output ID {txn hash, local index}
* output_amounts amount [{amount output index, metadata}...]
*
* spent_keys input hash -
*
* txpool_meta txn hash txn metadata
* txpool_blob txn hash txn blob
*
* Note: where the data items are of uniform size, DUPFIXED tables have
* been used to save space. In most of these cases, a dummy "zerokval"
* key is used when accessing the table; the Key listed above will be
* attached as a prefix on the Data to serve as the DUPSORT key.
* (DUPFIXED saves 8 bytes per record.)
*
* The output_amounts table doesn't use a dummy key, but uses DUPSORT.
*/
const char* const LMDB_BLOCKS = "blocks";
const char* const LMDB_BLOCK_HEIGHTS = "block_heights";
const char* const LMDB_BLOCK_INFO = "block_info";
const char* const LMDB_TXS = "txs";
const char* const LMDB_TX_INDICES = "tx_indices";
const char* const LMDB_TX_OUTPUTS = "tx_outputs";
const char* const LMDB_OUTPUT_TXS = "output_txs";
const char* const LMDB_OUTPUT_AMOUNTS = "output_amounts";
const char* const LMDB_SPENT_KEYS = "spent_keys";
const char* const LMDB_TXPOOL_META = "txpool_meta";
const char* const LMDB_TXPOOL_BLOB = "txpool_blob";
const char* const LMDB_HF_STARTING_HEIGHTS = "hf_starting_heights";
const char* const LMDB_HF_VERSIONS = "hf_versions";
const char* const LMDB_PROPERTIES = "properties";
const char zerokey[8] = {0};
const MDB_val zerokval = { sizeof(zerokey), (void *)zerokey };
const std::string lmdb_error(const std::string& error_string, int mdb_res)
{
const std::string full_string = error_string + mdb_strerror(mdb_res);
return full_string;
}
inline void lmdb_db_open(MDB_txn* txn, const char* name, int flags, MDB_dbi& dbi, const std::string& error_string)
{
if (auto res = mdb_dbi_open(txn, name, flags, &dbi))
throw0(cryptonote::DB_OPEN_FAILURE((lmdb_error(error_string + " : ", res) + std::string(" - you may want to start with --db-salvage")).c_str()));
}
} // anonymous namespace
#define CURSOR(name) \
if (!m_cur_ ## name) { \
int result = mdb_cursor_open(*m_write_txn, m_ ## name, &m_cur_ ## name); \
if (result) \
throw0(DB_ERROR(lmdb_error("Failed to open cursor: ", result).c_str())); \
}
#define RCURSOR(name) \
if (!m_cur_ ## name) { \
int result = mdb_cursor_open(m_txn, m_ ## name, (MDB_cursor **)&m_cur_ ## name); \
if (result) \
throw0(DB_ERROR(lmdb_error("Failed to open cursor: ", result).c_str())); \
if (m_cursors != &m_wcursors) \
m_tinfo->m_ti_rflags.m_rf_ ## name = true; \
} else if (m_cursors != &m_wcursors && !m_tinfo->m_ti_rflags.m_rf_ ## name) { \
int result = mdb_cursor_renew(m_txn, m_cur_ ## name); \
if (result) \
throw0(DB_ERROR(lmdb_error("Failed to renew cursor: ", result).c_str())); \
m_tinfo->m_ti_rflags.m_rf_ ## name = true; \
}
namespace cryptonote
{
typedef struct mdb_block_info
{
uint64_t bi_height;
uint64_t bi_timestamp;
uint64_t bi_coins;
uint64_t bi_size; // a size_t really but we need 32-bit compat
difficulty_type bi_diff;
crypto::hash bi_hash;
} mdb_block_info;
typedef struct blk_height {
crypto::hash bh_hash;
uint64_t bh_height;
} blk_height;
typedef struct txindex {
crypto::hash key;
tx_data_t data;
} txindex;
typedef struct pre_rct_outkey {
uint64_t amount_index;
uint64_t output_id;
pre_rct_output_data_t data;
} pre_rct_outkey;
typedef struct outkey {
uint64_t amount_index;
uint64_t output_id;
output_data_t data;
} outkey;
typedef struct outtx {
uint64_t output_id;
crypto::hash tx_hash;
uint64_t local_index;
} outtx;
std::atomic<uint64_t> mdb_txn_safe::num_active_txns{0};
std::atomic_flag mdb_txn_safe::creation_gate = ATOMIC_FLAG_INIT;
mdb_threadinfo::~mdb_threadinfo()
{
MDB_cursor **cur = &m_ti_rcursors.m_txc_blocks;
unsigned i;
for (i=0; i<sizeof(mdb_txn_cursors)/sizeof(MDB_cursor *); i++)
if (cur[i])
mdb_cursor_close(cur[i]);
if (m_ti_rtxn)
mdb_txn_abort(m_ti_rtxn);
}
mdb_txn_safe::mdb_txn_safe(const bool check) : m_txn(NULL), m_tinfo(NULL), m_check(check)
{
if (check)
{
while (creation_gate.test_and_set());
num_active_txns++;
creation_gate.clear();
}
}
mdb_txn_safe::~mdb_txn_safe()
{
8 years ago
if (!m_check)
return;
LOG_PRINT_L3("mdb_txn_safe: destructor");
if (m_tinfo != nullptr)
{
mdb_txn_reset(m_tinfo->m_ti_rtxn);
memset(&m_tinfo->m_ti_rflags, 0, sizeof(m_tinfo->m_ti_rflags));
} else if (m_txn != nullptr)
{
if (m_batch_txn) // this is a batch txn and should have been handled before this point for safety
{
LOG_PRINT_L0("WARNING: mdb_txn_safe: m_txn is a batch txn and it's not NULL in destructor - calling mdb_txn_abort()");
}
else
{
// Example of when this occurs: a lookup fails, so a read-only txn is
// aborted through this destructor. However, successful read-only txns
// ideally should have been committed when done and not end up here.
//
// NOTE: not sure if this is ever reached for a non-batch write
// transaction, but it's probably not ideal if it did.
LOG_PRINT_L3("mdb_txn_safe: m_txn not NULL in destructor - calling mdb_txn_abort()");
}
mdb_txn_abort(m_txn);
}
8 years ago
num_active_txns--;
}
void mdb_txn_safe::commit(std::string message)
{
if (message.size() == 0)
{
message = "Failed to commit a transaction to the db";
}
if (auto result = mdb_txn_commit(m_txn))
{
m_txn = nullptr;
throw0(DB_ERROR(lmdb_error(message + ": ", result).c_str()));
}
m_txn = nullptr;
}
void mdb_txn_safe::abort()
{
LOG_PRINT_L3("mdb_txn_safe: abort()");
if(m_txn != nullptr)
{
mdb_txn_abort(m_txn);
m_txn = nullptr;
}
else
{
LOG_PRINT_L0("WARNING: mdb_txn_safe: abort() called, but m_txn is NULL");
}
}
uint64_t mdb_txn_safe::num_active_tx() const
{
return num_active_txns;
}
void mdb_txn_safe::prevent_new_txns()
{
while (creation_gate.test_and_set());
}
void mdb_txn_safe::wait_no_active_txns()
{
while (num_active_txns > 0);
}
void mdb_txn_safe::allow_new_txns()
{
creation_gate.clear();
}
void lmdb_resized(MDB_env *env)
{
mdb_txn_safe::prevent_new_txns();
MGINFO("LMDB map resize detected.");
MDB_envinfo mei;
mdb_env_info(env, &mei);
uint64_t old = mei.me_mapsize;
mdb_txn_safe::wait_no_active_txns();
int result = mdb_env_set_mapsize(env, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to set new mapsize: ", result).c_str()));
mdb_env_info(env, &mei);
uint64_t new_mapsize = mei.me_mapsize;
MGINFO("LMDB Mapsize increased." << " Old: " << old / (1024 * 1024) << "MiB" << ", New: " << new_mapsize / (1024 * 1024) << "MiB");
mdb_txn_safe::allow_new_txns();
}
inline int lmdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **txn)
{
int res = mdb_txn_begin(env, parent, flags, txn);
if (res == MDB_MAP_RESIZED) {
lmdb_resized(env);
res = mdb_txn_begin(env, parent, flags, txn);
}
return res;
}
inline int lmdb_txn_renew(MDB_txn *txn)
{
int res = mdb_txn_renew(txn);
if (res == MDB_MAP_RESIZED) {
lmdb_resized(mdb_txn_env(txn));
res = mdb_txn_renew(txn);
}
return res;
}
void BlockchainLMDB::do_resize(uint64_t increase_size)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
CRITICAL_REGION_LOCAL(m_synchronization_lock);
const uint64_t add_size = 1LL << 30;
// check disk capacity
try
{
boost::filesystem::path path(m_folder);
boost::filesystem::space_info si = boost::filesystem::space(path);
if(si.available < add_size)
{
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
MERROR("!! WARNING: Insufficient free space to extend database !!: " << si.available / 1LL << 20L);
return;
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
}
catch(...)
{
// print something but proceed.
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
MWARNING("Unable to query free disk space.");
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
}
MDB_envinfo mei;
mdb_env_info(m_env, &mei);
MDB_stat mst;
mdb_env_stat(m_env, &mst);
// add 1Gb per resize, instead of doing a percentage increase
uint64_t new_mapsize = (double) mei.me_mapsize + add_size;
// If given, use increase_size intead of above way of resizing.
// This is currently used for increasing by an estimated size at start of new
// batch txn.
if (increase_size > 0)
new_mapsize = mei.me_mapsize + increase_size;
new_mapsize += (new_mapsize % mst.ms_psize);
mdb_txn_safe::prevent_new_txns();
if (m_write_txn != nullptr)
{
if (m_batch_active)
{
throw0(DB_ERROR("lmdb resizing not yet supported when batch transactions enabled!"));
}
else
{
throw0(DB_ERROR("attempting resize with write transaction in progress, this should not happen!"));
}
}
mdb_txn_safe::wait_no_active_txns();
int result = mdb_env_set_mapsize(m_env, new_mapsize);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to set new mapsize: ", result).c_str()));
MGINFO("LMDB Mapsize increased." << " Old: " << mei.me_mapsize / (1024 * 1024) << "MiB" << ", New: " << new_mapsize / (1024 * 1024) << "MiB");
mdb_txn_safe::allow_new_txns();
}
// threshold_size is used for batch transactions
bool BlockchainLMDB::need_resize(uint64_t threshold_size) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
#if defined(ENABLE_AUTO_RESIZE)
MDB_envinfo mei;
mdb_env_info(m_env, &mei);
MDB_stat mst;
mdb_env_stat(m_env, &mst);
// size_used doesn't include data yet to be committed, which can be
// significant size during batch transactions. For that, we estimate the size
// needed at the beginning of the batch transaction and pass in the
// additional size needed.
uint64_t size_used = mst.ms_psize * mei.me_last_pgno;
LOG_PRINT_L1("DB map size: " << mei.me_mapsize);
LOG_PRINT_L1("Space used: " << size_used);
LOG_PRINT_L1("Space remaining: " << mei.me_mapsize - size_used);
LOG_PRINT_L1("Size threshold: " << threshold_size);
float resize_percent_old = RESIZE_PERCENT;
LOG_PRINT_L1(boost::format("Percent used: %.04f Percent threshold: %.04f") % ((double)size_used/mei.me_mapsize) % resize_percent_old);
if (threshold_size > 0)
{
if (mei.me_mapsize - size_used < threshold_size)
{
LOG_PRINT_L1("Threshold met (size-based)");
return true;
}
else
return false;
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
std::mt19937 engine(std::random_device{}());
std::uniform_real_distribution<double> fdis(0.6, 0.9);
double resize_percent = fdis(engine);
if ((double)size_used / mei.me_mapsize > resize_percent)
{
LOG_PRINT_L1("Threshold met (percent-based)");
return true;
}
return false;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
#else
return false;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
#endif
}
void BlockchainLMDB::check_and_resize_for_batch(uint64_t batch_num_blocks, uint64_t batch_bytes)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
LOG_PRINT_L1("[" << __func__ << "] " << "checking DB size");
const uint64_t min_increase_size = 512 * (1 << 20);
uint64_t threshold_size = 0;
uint64_t increase_size = 0;
if (batch_num_blocks > 0)
{
threshold_size = get_estimated_batch_size(batch_num_blocks, batch_bytes);
MDEBUG("calculated batch size: " << threshold_size);
// The increased DB size could be a multiple of threshold_size, a fixed
// size increase (> threshold_size), or other variations.
//
// Currently we use the greater of threshold size and a minimum size. The
// minimum size increase is used to avoid frequent resizes when the batch
// size is set to a very small numbers of blocks.
increase_size = (threshold_size > min_increase_size) ? threshold_size : min_increase_size;
MDEBUG("increase size: " << increase_size);
}
// if threshold_size is 0 (i.e. number of blocks for batch not passed in), it
// will fall back to the percent-based threshold check instead of the
// size-based check
if (need_resize(threshold_size))
{
MGINFO("[batch] DB resize needed");
do_resize(increase_size);
}
}
uint64_t BlockchainLMDB::get_estimated_batch_size(uint64_t batch_num_blocks, uint64_t batch_bytes) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
uint64_t threshold_size = 0;
// batch size estimate * batch safety factor = final size estimate
// Takes into account "reasonable" block size increases in batch.
float batch_safety_factor = 1.7f;
float batch_fudge_factor = batch_safety_factor * batch_num_blocks;
// estimate of stored block expanded from raw block, including denormalization and db overhead.
// Note that this probably doesn't grow linearly with block size.
float db_expand_factor = 4.5f;
uint64_t num_prev_blocks = 500;
// For resizing purposes, allow for at least 4k average block size.
uint64_t min_block_size = 4 * 1024;
uint64_t block_stop = 0;
uint64_t m_height = height();
if (m_height > 1)
block_stop = m_height - 1;
uint64_t block_start = 0;
if (block_stop >= num_prev_blocks)
block_start = block_stop - num_prev_blocks + 1;
uint32_t num_blocks_used = 0;
uint64_t total_block_size = 0;
MDEBUG("[" << __func__ << "] " << "m_height: " << m_height << " block_start: " << block_start << " block_stop: " << block_stop);
size_t avg_block_size = 0;
if (batch_bytes)
{
avg_block_size = batch_bytes / batch_num_blocks;
goto estim;
}
if (m_height == 0)
{
MDEBUG("No existing blocks to check for average block size");
}
else if (m_cum_count >= num_prev_blocks)
{
avg_block_size = m_cum_size / m_cum_count;
MDEBUG("average block size across recent " << m_cum_count << " blocks: " << avg_block_size);
m_cum_size = 0;
m_cum_count = 0;
}
else
{
MDB_txn *rtxn;
mdb_txn_cursors *rcurs;
block_rtxn_start(&rtxn, &rcurs);
for (uint64_t block_num = block_start; block_num <= block_stop; ++block_num)
{
uint32_t block_size = get_block_size(block_num);
total_block_size += block_size;
// Track number of blocks being totalled here instead of assuming, in case
// some blocks were to be skipped for being outliers.
++num_blocks_used;
}
block_rtxn_stop();
avg_block_size = total_block_size / num_blocks_used;
MDEBUG("average block size across recent " << num_blocks_used << " blocks: " << avg_block_size);
}
estim:
if (avg_block_size < min_block_size)
avg_block_size = min_block_size;
MDEBUG("estimated average block size for batch: " << avg_block_size);
// bigger safety margin on smaller block sizes
if (batch_fudge_factor < 5000.0)
batch_fudge_factor = 5000.0;
threshold_size = avg_block_size * db_expand_factor * batch_fudge_factor;
return threshold_size;
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
void BlockchainLMDB::add_block(const block& blk, const size_t& block_size, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated,
const crypto::hash& blk_hash)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
mdb_txn_cursors *m_cursors = &m_wcursors;
uint64_t m_height = height();
CURSOR(block_heights)
blk_height bh = {blk_hash, m_height};
MDB_val_set(val_h, bh);
if (mdb_cursor_get(m_cur_block_heights, (MDB_val *)&zerokval, &val_h, MDB_GET_BOTH) == 0)
throw1(BLOCK_EXISTS("Attempting to add block that's already in the db"));
if (m_height > 0)
{
MDB_val_set(parent_key, blk.prev_id);
int result = mdb_cursor_get(m_cur_block_heights, (MDB_val *)&zerokval, &parent_key, MDB_GET_BOTH);
if (result)
{
LOG_PRINT_L3("m_height: " << m_height);
LOG_PRINT_L3("parent_key: " << blk.prev_id);
throw0(DB_ERROR(lmdb_error("Failed to get top block hash to check for new block's parent: ", result).c_str()));
}
blk_height *prev = (blk_height *)parent_key.mv_data;
if (prev->bh_height != m_height - 1)
throw0(BLOCK_PARENT_DNE("Top block is not new block's parent"));
}
int result = 0;
MDB_val_set(key, m_height);
CURSOR(blocks)
CURSOR(block_info)
// this call to mdb_cursor_put will change height()
MDB_val_copy<blobdata> blob(block_to_blob(blk));
result = mdb_cursor_put(m_cur_blocks, &key, &blob, MDB_APPEND);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to add block blob to db transaction: ", result).c_str()));
mdb_block_info bi;
bi.bi_height = m_height;
bi.bi_timestamp = blk.timestamp;
bi.bi_coins = coins_generated;
bi.bi_size = block_size;
bi.bi_diff = cumulative_difficulty;
bi.bi_hash = blk_hash;
MDB_val_set(val, bi);
result = mdb_cursor_put(m_cur_block_info, (MDB_val *)&zerokval, &val, MDB_APPENDDUP);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to add block info to db transaction: ", result).c_str()));
result = mdb_cursor_put(m_cur_block_heights, (MDB_val *)&zerokval, &val_h, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to add block height by hash to db transaction: ", result).c_str()));
m_cum_size += block_size;
m_cum_count++;
}
void BlockchainLMDB::remove_block()
{
int result;
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
uint64_t m_height = height();
if (m_height == 0)
throw0(BLOCK_DNE ("Attempting to remove block from an empty blockchain"));
mdb_txn_cursors *m_cursors = &m_wcursors;
CURSOR(block_info)
CURSOR(block_heights)
CURSOR(blocks)
MDB_val_copy<uint64_t> k(m_height - 1);
MDB_val h = k;
if ((result = mdb_cursor_get(m_cur_block_info, (MDB_val *)&zerokval, &h, MDB_GET_BOTH)))
throw1(BLOCK_DNE(lmdb_error("Attempting to remove block that's not in the db: ", result).c_str()));
// must use h now; deleting from m_block_info will invalidate it
mdb_block_info *bi = (mdb_block_info *)h.mv_data;
blk_height bh = {bi->bi_hash, 0};
h.mv_data = (void *)&bh;
h.mv_size = sizeof(bh);
if ((result = mdb_cursor_get(m_cur_block_heights, (MDB_val *)&zerokval, &h, MDB_GET_BOTH)))
throw1(DB_ERROR(lmdb_error("Failed to locate block height by hash for removal: ", result).c_str()));
if ((result = mdb_cursor_del(m_cur_block_heights, 0)))
throw1(DB_ERROR(lmdb_error("Failed to add removal of block height by hash to db transaction: ", result).c_str()));
if ((result = mdb_cursor_get(m_cur_blocks, &k, NULL, MDB_SET)))
throw1(DB_ERROR(lmdb_error("Failed to locate block for removal: ", result).c_str()));
if ((result = mdb_cursor_del(m_cur_blocks, 0)))
throw1(DB_ERROR(lmdb_error("Failed to add removal of block to db transaction: ", result).c_str()));
if ((result = mdb_cursor_del(m_cur_block_info, 0)))
throw1(DB_ERROR(lmdb_error("Failed to add removal of block info to db transaction: ", result).c_str()));
}
uint64_t BlockchainLMDB::add_transaction_data(const crypto::hash& blk_hash, const transaction& tx, const crypto::hash& tx_hash)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
mdb_txn_cursors *m_cursors = &m_wcursors;
uint64_t m_height = height();
int result;
uint64_t tx_id = get_tx_count();
CURSOR(txs)
CURSOR(tx_indices)
MDB_val_set(val_tx_id, tx_id);
MDB_val_set(val_h, tx_hash);
result = mdb_cursor_get(m_cur_tx_indices, (MDB_val *)&zerokval, &val_h, MDB_GET_BOTH);
if (result == 0) {
txindex *tip = (txindex *)val_h.mv_data;
throw1(TX_EXISTS(std::string("Attempting to add transaction that's already in the db (tx id ").append(boost::lexical_cast<std::string>(tip->data.tx_id)).append(")").c_str()));
} else if (result != MDB_NOTFOUND) {
throw1(DB_ERROR(lmdb_error(std::string("Error checking if tx index exists for tx hash ") + epee::string_tools::pod_to_hex(tx_hash) + ": ", result).c_str()));
}
txindex ti;
ti.key = tx_hash;
ti.data.tx_id = tx_id;
ti.data.unlock_time = tx.unlock_time;
ti.data.block_id = m_height; // we don't need blk_hash since we know m_height
val_h.mv_size = sizeof(ti);
val_h.mv_data = (void *)&ti;
result = mdb_cursor_put(m_cur_tx_indices, (MDB_val *)&zerokval, &val_h, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to add tx data to db transaction: ", result).c_str()));
MDB_val_copy<blobdata> blob(tx_to_blob(tx));
result = mdb_cursor_put(m_cur_txs, &val_tx_id, &blob, MDB_APPEND);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to add tx blob to db transaction: ", result).c_str()));
return tx_id;
}
// TODO: compare pros and cons of looking up the tx hash's tx index once and
// passing it in to functions like this
void BlockchainLMDB::remove_transaction_data(const crypto::hash& tx_hash, const transaction& tx)
{
int result;
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
mdb_txn_cursors *m_cursors = &m_wcursors;
CURSOR(tx_indices)
CURSOR(txs)
CURSOR(tx_outputs)
MDB_val_set(val_h, tx_hash);
if (mdb_cursor_get(m_cur_tx_indices, (MDB_val *)&zerokval, &val_h, MDB_GET_BOTH))
throw1(TX_DNE("Attempting to remove transaction that isn't in the db"));
txindex *tip = (txindex *)val_h.mv_data;
MDB_val_set(val_tx_id, tip->data.tx_id);
if ((result = mdb_cursor_get(m_cur_txs, &val_tx_id, NULL, MDB_SET)))
throw1(DB_ERROR(lmdb_error("Failed to locate tx for removal: ", result).c_str()));
result = mdb_cursor_del(m_cur_txs, 0);
if (result)
throw1(DB_ERROR(lmdb_error("Failed to add removal of tx to db transaction: ", result).c_str()));
remove_tx_outputs(tip->data.tx_id, tx);
result = mdb_cursor_get(m_cur_tx_outputs, &val_tx_id, NULL, MDB_SET);
if (result == MDB_NOTFOUND)
LOG_PRINT_L1("tx has no outputs to remove: " << tx_hash);
else if (result)
throw1(DB_ERROR(lmdb_error("Failed to locate tx outputs for removal: ", result).c_str()));
if (!result)
{
result = mdb_cursor_del(m_cur_tx_outputs, 0);
if (result)
throw1(DB_ERROR(lmdb_error("Failed to add removal of tx outputs to db transaction: ", result).c_str()));
}
// Don't delete the tx_indices entry until the end, after we're done with val_tx_id
if (mdb_cursor_del(m_cur_tx_indices, 0))
throw1(DB_ERROR("Failed to add removal of tx index to db transaction"));
}
uint64_t BlockchainLMDB::add_output(const crypto::hash& tx_hash,
const tx_out& tx_output,
const uint64_t& local_index,
const uint64_t unlock_time,
const rct::key *commitment)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
mdb_txn_cursors *m_cursors = &m_wcursors;
uint64_t m_height = height();
uint64_t m_num_outputs = num_outputs();
int result = 0;
CURSOR(output_txs)
CURSOR(output_amounts)
if (tx_output.target.type() != typeid(txout_to_key))
throw0(DB_ERROR("Wrong output type: expected txout_to_key"));
if (tx_output.amount == 0 && !commitment)
throw0(DB_ERROR("RCT output without commitment"));
outtx ot = {m_num_outputs, tx_hash, local_index};
MDB_val_set(vot, ot);
result = mdb_cursor_put(m_cur_output_txs, (MDB_val *)&zerokval, &vot, MDB_APPENDDUP);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to add output tx hash to db transaction: ", result).c_str()));
outkey ok;
MDB_val data;
MDB_val_copy<uint64_t> val_amount(tx_output.amount);
result = mdb_cursor_get(m_cur_output_amounts, &val_amount, &data, MDB_SET);
if (!result)
{
mdb_size_t num_elems = 0;
result = mdb_cursor_count(m_cur_output_amounts, &num_elems);
if (result)
throw0(DB_ERROR(std::string("Failed to get number of outputs for amount: ").append(mdb_strerror(result)).c_str()));
ok.amount_index = num_elems;
}
else if (result != MDB_NOTFOUND)
throw0(DB_ERROR(lmdb_error("Failed to get output amount in db transaction: ", result).c_str()));
else
ok.amount_index = 0;
ok.output_id = m_num_outputs;
ok.data.pubkey = boost::get < txout_to_key > (tx_output.target).key;
ok.data.unlock_time = unlock_time;
ok.data.height = m_height;
if (tx_output.amount == 0)
{
ok.data.commitment = *commitment;
data.mv_size = sizeof(ok);
}
else
{
data.mv_size = sizeof(pre_rct_outkey);
}
data.mv_data = &ok;
if ((result = mdb_cursor_put(m_cur_output_amounts, &val_amount, &data, MDB_APPENDDUP)))
throw0(DB_ERROR(lmdb_error("Failed to add output pubkey to db transaction: ", result).c_str()));
return ok.amount_index;
}
void BlockchainLMDB::add_tx_amount_output_indices(const uint64_t tx_id,
const std::vector<uint64_t>& amount_output_indices)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
mdb_txn_cursors *m_cursors = &m_wcursors;
CURSOR(tx_outputs)
int result = 0;
int num_outputs = amount_output_indices.size();
MDB_val_set(k_tx_id, tx_id);
MDB_val v;
v.mv_data = (void *)amount_output_indices.data();
v.mv_size = sizeof(uint64_t) * num_outputs;
// LOG_PRINT_L1("tx_outputs[tx_hash] size: " << v.mv_size);
result = mdb_cursor_put(m_cur_tx_outputs, &k_tx_id, &v, MDB_APPEND);
if (result)
throw0(DB_ERROR(std::string("Failed to add <tx hash, amount output index array> to db transaction: ").append(mdb_strerror(result)).c_str()));
}
void BlockchainLMDB::remove_tx_outputs(const uint64_t tx_id, const transaction& tx)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
std::vector<uint64_t> amount_output_indices = get_tx_amount_output_indices(tx_id);
if (amount_output_indices.empty())
{
if (tx.vout.empty())
LOG_PRINT_L2("tx has no outputs, so no output indices");
else
throw0(DB_ERROR("tx has outputs, but no output indices found"));
}
bool is_pseudo_rct = tx.version >= 2 && tx.vin.size() == 1 && tx.vin[0].type() == typeid(txin_gen);
for (size_t i = tx.vout.size(); i-- > 0;)
{
uint64_t amount = is_pseudo_rct ? 0 : tx.vout[i].amount;
remove_output(amount, amount_output_indices[i]);
}
}
void BlockchainLMDB::remove_output(const uint64_t amount, const uint64_t& out_index)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
mdb_txn_cursors *m_cursors = &m_wcursors;
CURSOR(output_amounts);
CURSOR(output_txs);
MDB_val_set(k, amount);
MDB_val_set(v, out_index);
auto result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_GET_BOTH);
if (result == MDB_NOTFOUND)
throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found"));
else if (result)
throw0(DB_ERROR(lmdb_error("DB error attempting to get an output", result).c_str()));
const pre_rct_outkey *ok = (const pre_rct_outkey *)v.mv_data;
MDB_val_set(otxk, ok->output_id);
result = mdb_cursor_get(m_cur_output_txs, (MDB_val *)&zerokval, &otxk, MDB_GET_BOTH);
if (result == MDB_NOTFOUND)
{
throw0(DB_ERROR("Unexpected: global output index not found in m_output_txs"));
}
else if (result)
{
throw1(DB_ERROR(lmdb_error("Error adding removal of output tx to db transaction", result).c_str()));
}
result = mdb_cursor_del(m_cur_output_txs, 0);
if (result)
throw0(DB_ERROR(lmdb_error(std::string("Error deleting output index ").append(boost::lexical_cast<std::string>(out_index).append(": ")).c_str(), result).c_str()));
// now delete the amount
result = mdb_cursor_del(m_cur_output_amounts, 0);
if (result)
throw0(DB_ERROR(lmdb_error(std::string("Error deleting amount for output index ").append(boost::lexical_cast<std::string>(out_index).append(": ")).c_str(), result).c_str()));
}
void BlockchainLMDB::add_spent_key(const crypto::key_image& k_image)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
mdb_txn_cursors *m_cursors = &m_wcursors;
CURSOR(spent_keys)
MDB_val k = {sizeof(k_image), (void *)&k_image};
if (auto result = mdb_cursor_put(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_NODUPDATA)) {
if (result == MDB_KEYEXIST)
throw1(KEY_IMAGE_EXISTS("Attempting to add spent key image that's already in the db"));
else
throw1(DB_ERROR(lmdb_error("Error adding spent key image to db transaction: ", result).c_str()));
}
}
void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
mdb_txn_cursors *m_cursors = &m_wcursors;
CURSOR(spent_keys)
MDB_val k = {sizeof(k_image), (void *)&k_image};
auto result = mdb_cursor_get(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_GET_BOTH);
if (result != 0 && result != MDB_NOTFOUND)
throw1(DB_ERROR(lmdb_error("Error finding spent key to remove", result).c_str()));
if (!result)
{
result = mdb_cursor_del(m_cur_spent_keys, 0);
if (result)
throw1(DB_ERROR(lmdb_error("Error adding removal of key image to db transaction", result).c_str()));
}
}
blobdata BlockchainLMDB::output_to_blob(const tx_out& output) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
blobdata b;
if (!t_serializable_object_to_blob(output, b))
throw1(DB_ERROR("Error serializing output to blob"));
return b;
}
tx_out BlockchainLMDB::output_from_blob(const blobdata& blob) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
std::stringstream ss;
ss << blob;
binary_archive<false> ba(ss);
tx_out o;
if (!(::serialization::serialize(ba, o)))
throw1(DB_ERROR("Error deserializing tx output blob"));
return o;
}
void BlockchainLMDB::check_open() const
{
// LOG_PRINT_L3("BlockchainLMDB::" << __func__);
if (!m_open)
throw0(DB_ERROR("DB operation attempted on a not-open DB instance"));
}
BlockchainLMDB::~BlockchainLMDB()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
// batch transaction shouldn't be active at this point. If it is, consider it aborted.
if (m_batch_active)
batch_abort();
if (m_open)
close();
}
BlockchainLMDB::BlockchainLMDB(bool batch_transactions)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
// initialize folder to something "safe" just in case
// someone accidentally misuses this class...
m_folder = "thishsouldnotexistbecauseitisgibberish";
m_open = false;
m_batch_transactions = batch_transactions;
m_write_txn = nullptr;
m_write_batch_txn = nullptr;
m_batch_active = false;
m_cum_size = 0;
m_cum_count = 0;
m_hardfork = nullptr;
}
void BlockchainLMDB::open(const std::string& filename, const int db_flags)
{
int result;
int mdb_flags = MDB_NORDAHEAD;
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
if (m_open)
throw0(DB_OPEN_FAILURE("Attempted to open db, but it's already open"));
boost::filesystem::path direc(filename);
if (boost::filesystem::exists(direc))
{
if (!boost::filesystem::is_directory(direc))
throw0(DB_OPEN_FAILURE("LMDB needs a directory path, but a file was passed"));
}
else
{
if (!boost::filesystem::create_directories(direc))
throw0(DB_OPEN_FAILURE(std::string("Failed to create directory ").append(filename).c_str()));
}
// check for existing LMDB files in base directory
boost::filesystem::path old_files = direc.parent_path();
if (boost::filesystem::exists(old_files / CRYPTONOTE_BLOCKCHAINDATA_FILENAME)
|| boost::filesystem::exists(old_files / CRYPTONOTE_BLOCKCHAINDATA_LOCK_FILENAME))
{
LOG_PRINT_L0("Found existing LMDB files in " << old_files.string());
LOG_PRINT_L0("Move " << CRYPTONOTE_BLOCKCHAINDATA_FILENAME << " and/or " << CRYPTONOTE_BLOCKCHAINDATA_LOCK_FILENAME << " to " << filename << ", or delete them, and then restart");
throw DB_ERROR("Database could not be opened");
}
m_folder = filename;
#ifdef __OpenBSD__
if ((mdb_flags & MDB_WRITEMAP) == 0) {
MCLOG_RED(el::Level::Info, "global", "Running on OpenBSD: forcing WRITEMAP");
mdb_flags |= MDB_WRITEMAP;
}
#endif
// set up lmdb environment
if ((result = mdb_env_create(&m_env)))
throw0(DB_ERROR(lmdb_error("Failed to create lmdb environment: ", result).c_str()));
if ((result = mdb_env_set_maxdbs(m_env, 20)))
throw0(DB_ERROR(lmdb_error("Failed to set max number of dbs: ", result).c_str()));
int threads = tools::get_max_concurrency();
if (threads > 110 && /* maxreaders default is 126, leave some slots for other read processes */
(result = mdb_env_set_maxreaders(m_env, threads+16)))
throw0(DB_ERROR(lmdb_error("Failed to set max number of readers: ", result).c_str()));
size_t mapsize = DEFAULT_MAPSIZE;
if (db_flags & DBF_FAST)
mdb_flags |= MDB_NOSYNC;
if (db_flags & DBF_FASTEST)
mdb_flags |= MDB_NOSYNC | MDB_WRITEMAP | MDB_MAPASYNC;
if (db_flags & DBF_RDONLY)
mdb_flags = MDB_RDONLY;
if (db_flags & DBF_SALVAGE)
mdb_flags |= MDB_PREVSNAPSHOT;
if (auto result = mdb_env_open(m_env, filename.c_str(), mdb_flags, 0644))
throw0(DB_ERROR(lmdb_error("Failed to open lmdb environment: ", result).c_str()));
MDB_envinfo mei;
mdb_env_info(m_env, &mei);
uint64_t cur_mapsize = (double)mei.me_mapsize;
if (cur_mapsize < mapsize)
{
if (auto result = mdb_env_set_mapsize(m_env, mapsize))
throw0(DB_ERROR(lmdb_error("Failed to set max memory map size: ", result).c_str()));
mdb_env_info(m_env, &mei);
cur_mapsize = (double)mei.me_mapsize;
LOG_PRINT_L1("LMDB memory map size: " << cur_mapsize);
}
if (need_resize())
{
LOG_PRINT_L0("LMDB memory map needs to be resized, doing that now.");
do_resize();
}
int txn_flags = 0;
if (mdb_flags & MDB_RDONLY)
txn_flags |= MDB_RDONLY;
// get a read/write MDB_txn, depending on mdb_flags
mdb_txn_safe txn;
if (auto mdb_res = mdb_txn_begin(m_env, NULL, txn_flags, txn))
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", mdb_res).c_str()));
// open necessary databases, and set properties as needed
// uses macros to avoid having to change things too many places
lmdb_db_open(txn, LMDB_BLOCKS, MDB_INTEGERKEY | MDB_CREATE, m_blocks, "Failed to open db handle for m_blocks");
lmdb_db_open(txn, LMDB_BLOCK_INFO, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_block_info, "Failed to open db handle for m_block_info");
lmdb_db_open(txn, LMDB_BLOCK_HEIGHTS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_block_heights, "Failed to open db handle for m_block_heights");
lmdb_db_open(txn, LMDB_TXS, MDB_INTEGERKEY | MDB_CREATE, m_txs, "Failed to open db handle for m_txs");
lmdb_db_open(txn, LMDB_TX_INDICES, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_tx_indices, "Failed to open db handle for m_tx_indices");
lmdb_db_open(txn, LMDB_TX_OUTPUTS, MDB_INTEGERKEY | MDB_CREATE, m_tx_outputs, "Failed to open db handle for m_tx_outputs");
lmdb_db_open(txn, LMDB_OUTPUT_TXS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_output_txs, "Failed to open db handle for m_output_txs");
lmdb_db_open(txn, LMDB_OUTPUT_AMOUNTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_output_amounts, "Failed to open db handle for m_output_amounts");
lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys");
lmdb_db_open(txn, LMDB_TXPOOL_META, MDB_CREATE, m_txpool_meta, "Failed to open db handle for m_txpool_meta");
lmdb_db_open(txn, LMDB_TXPOOL_BLOB, MDB_CREATE, m_txpool_blob, "Failed to open db handle for m_txpool_blob");
// this subdb is dropped on sight, so it may not be present when we open the DB.
// Since we use MDB_CREATE, we'll get an exception if we open read-only and it does not exist.
// So we don't open for read-only, and also not drop below. It is not used elsewhere.
if (!(mdb_flags & MDB_RDONLY))
lmdb_db_open(txn, LMDB_HF_STARTING_HEIGHTS, MDB_CREATE, m_hf_starting_heights, "Failed to open db handle for m_hf_starting_heights");
lmdb_db_open(txn, LMDB_HF_VERSIONS, MDB_INTEGERKEY | MDB_CREATE, m_hf_versions, "Failed to open db handle for m_hf_versions");
lmdb_db_open(txn, LMDB_PROPERTIES, MDB_CREATE, m_properties, "Failed to open db handle for m_properties");
mdb_set_dupsort(txn, m_spent_keys, compare_hash32);
mdb_set_dupsort(txn, m_block_heights, compare_hash32);
mdb_set_dupsort(txn, m_tx_indices, compare_hash32);
mdb_set_dupsort(txn, m_output_amounts, compare_uint64);
mdb_set_dupsort(txn, m_output_txs, compare_uint64);
mdb_set_dupsort(txn, m_block_info, compare_uint64);
mdb_set_compare(txn, m_txpool_meta, compare_hash32);
mdb_set_compare(txn, m_txpool_blob, compare_hash32);
mdb_set_compare(txn, m_properties, compare_string);
if (!(mdb_flags & MDB_RDONLY))
{
result = mdb_drop(txn, m_hf_starting_heights, 1);
if (result && result != MDB_NOTFOUND)
throw0(DB_ERROR(lmdb_error("Failed to drop m_hf_starting_heights: ", result).c_str()));
}
// get and keep current height
MDB_stat db_stats;
if ((result = mdb_stat(txn, m_blocks, &db_stats)))
throw0(DB_ERROR(lmdb_error("Failed to query m_blocks: ", result).c_str()));
LOG_PRINT_L2("Setting m_height to: " << db_stats.ms_entries);
uint64_t m_height = db_stats.ms_entries;
bool compatible = true;
MDB_val_copy<const char*> k("version");
MDB_val v;
auto get_result = mdb_get(txn, m_properties, &k, &v);
if(get_result == MDB_SUCCESS)
{
if (*(const uint32_t*)v.mv_data > VERSION)
{
MWARNING("Existing lmdb database was made by a later version. We don't know how it will change yet.");
compatible = false;
}
#if VERSION > 0
else if (*(const uint32_t*)v.mv_data < VERSION)
{
// Note that there was a schema change within version 0 as well.
// See commit e5d2680094ee15889934fe28901e4e133cda56f2 2015/07/10
// We don't handle the old format previous to that commit.
txn.commit();
m_open = true;
migrate(*(const uint32_t *)v.mv_data);
return;
}
#endif
}
else
{
// if not found, and the DB is non-empty, this is probably
// an "old" version 0, which we don't handle. If the DB is
// empty it's fine.
if (VERSION > 0 && m_height > 0)
compatible = false;
}
if (!compatible)
{
txn.abort();
mdb_env_close(m_env);
m_open = false;
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
MFATAL("Existing lmdb database is incompatible with this version.");
MFATAL("Please delete the existing database and resync.");
return;
}
if (!(mdb_flags & MDB_RDONLY))
{
// only write version on an empty DB
if (m_height == 0)
{
MDB_val_copy<const char*> k("version");
MDB_val_copy<uint32_t> v(VERSION);
auto put_result = mdb_put(txn, m_properties, &k, &v, 0);
if (put_result != MDB_SUCCESS)
{
txn.abort();
mdb_env_close(m_env);
m_open = false;
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
MERROR("Failed to write version to database.");
return;
}
}
}
// commit the transaction
txn.commit();
m_open = true;
// from here, init should be finished
}
void BlockchainLMDB::close()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
if (m_batch_active)
{
LOG_PRINT_L3("close() first calling batch_abort() due to active batch transaction");
batch_abort();
}
this->sync();
m_tinfo.reset();
// FIXME: not yet thread safe!!! Use with care.
mdb_env_close(m_env);
m_open = false;
}
void BlockchainLMDB::sync()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
// Does nothing unless LMDB environment was opened with MDB_NOSYNC or in part
// MDB_NOMETASYNC. Force flush to be synchronous.
if (auto result = mdb_env_sync(m_env, true))
{
throw0(DB_ERROR(lmdb_error("Failed to sync database: ", result).c_str()));
}
}
void BlockchainLMDB::safesyncmode(const bool onoff)
{
mdb_env_set_flags(m_env, MDB_NOSYNC|MDB_MAPASYNC, !onoff);
}
void BlockchainLMDB::reset()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
mdb_txn_safe txn;
if (auto result = lmdb_txn_begin(m_env, NULL, 0, txn))
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
if (auto result = mdb_drop(txn, m_blocks, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_blocks: ", result).c_str()));
if (auto result = mdb_drop(txn, m_block_info, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_block_info: ", result).c_str()));
if (auto result = mdb_drop(txn, m_block_heights, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_block_heights: ", result).c_str()));
if (auto result = mdb_drop(txn, m_txs, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_txs: ", result).c_str()));
if (auto result = mdb_drop(txn, m_tx_indices, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_tx_indices: ", result).c_str()));
if (auto result = mdb_drop(txn, m_tx_outputs, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_tx_outputs: ", result).c_str()));
if (auto result = mdb_drop(txn, m_output_txs, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_output_txs: ", result).c_str()));
if (auto result = mdb_drop(txn, m_output_amounts, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_output_amounts: ", result).c_str()));
if (auto result = mdb_drop(txn, m_spent_keys, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_spent_keys: ", result).c_str()));
(void)mdb_drop(txn, m_hf_starting_heights, 0); // this one is dropped in new code
if (auto result = mdb_drop(txn, m_hf_versions, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_hf_versions: ", result).c_str()));
if (auto result = mdb_drop(txn, m_properties, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_properties: ", result).c_str()));
// init with current version
MDB_val_copy<const char*> k("version");
MDB_val_copy<uint32_t> v(VERSION);
if (auto result = mdb_put(txn, m_properties, &k, &v, 0))
throw0(DB_ERROR(lmdb_error("Failed to write version to database: ", result).c_str()));
txn.commit();
m_cum_size = 0;
m_cum_count = 0;
}
std::vector<std::string> BlockchainLMDB::get_filenames() const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
std::vector<std::string> filenames;
boost::filesystem::path datafile(m_folder);
datafile /= CRYPTONOTE_BLOCKCHAINDATA_FILENAME;
boost::filesystem::path lockfile(m_folder);
lockfile /= CRYPTONOTE_BLOCKCHAINDATA_LOCK_FILENAME;
filenames.push_back(datafile.string());
filenames.push_back(lockfile.string());
return filenames;
}
std::string BlockchainLMDB::get_db_name() const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
return std::string("lmdb");
}
// TODO: this?
bool BlockchainLMDB::lock()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
return false;
}
// TODO: this?
void BlockchainLMDB::unlock()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
}
#define TXN_PREFIX(flags); \
mdb_txn_safe auto_txn; \
mdb_txn_safe* txn_ptr = &auto_txn; \
if (m_batch_active) \
txn_ptr = m_write_txn; \
else \
{ \
if (auto mdb_res = lmdb_txn_begin(m_env, NULL, flags, auto_txn)) \
throw0(DB_ERROR(lmdb_error(std::string("Failed to create a transaction for the db in ")+__FUNCTION__+": ", mdb_res).c_str())); \
} \
#define TXN_PREFIX_RDONLY() \
MDB_txn *m_txn; \
mdb_txn_cursors *m_cursors; \
bool my_rtxn = block_rtxn_start(&m_txn, &m_cursors); \
mdb_txn_safe auto_txn(my_rtxn); \
if (my_rtxn) auto_txn.m_tinfo = m_tinfo.get()
#define TXN_POSTFIX_RDONLY()
#define TXN_POSTFIX_SUCCESS() \
do { \
if (! m_batch_active) \
auto_txn.commit(); \
} while(0)
// The below two macros are for DB access within block add/remove, whether
// regular batch txn is in use or not. m_write_txn is used as a batch txn, even
// if it's only within block add/remove.
//
// DB access functions that may be called both within block add/remove and
// without should use these. If the function will be called ONLY within block
// add/remove, m_write_txn alone may be used instead of these macros.
#define TXN_BLOCK_PREFIX(flags); \
mdb_txn_safe auto_txn; \
mdb_txn_safe* txn_ptr = &auto_txn; \
if (m_batch_active || m_write_txn) \
txn_ptr = m_write_txn; \
else \
{ \
if (auto mdb_res = lmdb_txn_begin(m_env, NULL, flags, auto_txn)) \
throw0(DB_ERROR(lmdb_error(std::string("Failed to create a transaction for the db in ")+__FUNCTION__+": ", mdb_res).c_str())); \
} \
#define TXN_BLOCK_POSTFIX_SUCCESS() \
do { \
if (! m_batch_active && ! m_write_txn) \
auto_txn.commit(); \
} while(0)
void BlockchainLMDB::add_txpool_tx(const transaction &tx, const txpool_tx_meta_t &meta)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
mdb_txn_cursors *m_cursors = &m_wcursors;
CURSOR(txpool_meta)
CURSOR(txpool_blob)
const crypto::hash txid = get_transaction_hash(tx);
MDB_val k = {sizeof(txid), (void *)&txid};
MDB_val v = {sizeof(meta), (void *)&meta};
if (auto result = mdb_cursor_put(m_cur_txpool_meta, &k, &v, MDB_NODUPDATA)) {
if (result == MDB_KEYEXIST)
throw1(DB_ERROR("Attempting to add txpool tx metadata that's already in the db"));
else
throw1(DB_ERROR(lmdb_error("Error adding txpool tx metadata to db transaction: ", result).c_str()));
}
MDB_val_copy<cryptonote::blobdata> blob_val(tx_to_blob(tx));
if (auto result = mdb_cursor_put(m_cur_txpool_blob, &k, &blob_val, MDB_NODUPDATA)) {
if (result == MDB_KEYEXIST)
throw1(DB_ERROR("Attempting to add txpool tx blob that's already in the db"));
else
throw1(DB_ERROR(lmdb_error("Error adding txpool tx blob to db transaction: ", result).c_str()));
}
}
void BlockchainLMDB::update_txpool_tx(const crypto::hash &txid, const txpool_tx_meta_t &meta)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
mdb_txn_cursors *m_cursors = &m_wcursors;
CURSOR(txpool_meta)
CURSOR(txpool_blob)
MDB_val k = {sizeof(txid), (void *)&txid};
MDB_val v;
auto result = mdb_cursor_get(m_cur_txpool_meta, &k, &v, MDB_SET);
if (result != 0)
throw1(DB_ERROR(lmdb_error("Error finding txpool tx meta to update: ", result).c_str()));
result = mdb_cursor_del(m_cur_txpool_meta, 0);
if (result)
throw1(DB_ERROR(lmdb_error("Error adding removal of txpool tx metadata to db transaction: ", result).c_str()));
v = MDB_val({sizeof(meta), (void *)&meta});
if ((result = mdb_cursor_put(m_cur_txpool_meta, &k, &v, MDB_NODUPDATA)) != 0) {
if (result == MDB_KEYEXIST)
throw1(DB_ERROR("Attempting to add txpool tx metadata that's already in the db"));
else
throw1(DB_ERROR(lmdb_error("Error adding txpool tx metadata to db transaction: ", result).c_str()));
}
}
uint64_t BlockchainLMDB::get_txpool_tx_count(bool include_unrelayed_txes) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
int result;
uint64_t num_entries = 0;
TXN_PREFIX_RDONLY();
if (include_unrelayed_txes)
{
// No filtering, we can get the number of tx the "fast" way
MDB_stat db_stats;
if ((result = mdb_stat(m_txn, m_txpool_meta, &db_stats)))
throw0(DB_ERROR(lmdb_error("Failed to query m_txpool_meta: ", result).c_str()));
num_entries = db_stats.ms_entries;
}
else
{
// Filter unrelayed tx out of the result, so we need to loop over transactions and check their meta data
RCURSOR(txpool_meta);
RCURSOR(txpool_blob);
MDB_val k;
MDB_val v;
MDB_cursor_op op = MDB_FIRST;
while (1)
{
result = mdb_cursor_get(m_cur_txpool_meta, &k, &v, op);
op = MDB_NEXT;
if (result == MDB_NOTFOUND)
break;
if (result)
throw0(DB_ERROR(lmdb_error("Failed to enumerate txpool tx metadata: ", result).c_str()));
const txpool_tx_meta_t &meta = *(const txpool_tx_meta_t*)v.mv_data;
if (!meta.do_not_relay)
++num_entries;
}
}
TXN_POSTFIX_RDONLY();
return num_entries;
}
bool BlockchainLMDB::txpool_has_tx(const crypto::hash& txid) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(txpool_meta)
MDB_val k = {sizeof(txid), (void *)&txid};
auto result = mdb_cursor_get(m_cur_txpool_meta, &k, NULL, MDB_SET);
if (result != 0 && result != MDB_NOTFOUND)
throw1(DB_ERROR(lmdb_error("Error finding txpool tx meta: ", result).c_str()));
TXN_POSTFIX_RDONLY();
return result != MDB_NOTFOUND;
}
void BlockchainLMDB::remove_txpool_tx(const crypto::hash& txid)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
mdb_txn_cursors *m_cursors = &m_wcursors;
CURSOR(txpool_meta)
CURSOR(txpool_blob)
MDB_val k = {sizeof(txid), (void *)&txid};
auto result = mdb_cursor_get(m_cur_txpool_meta, &k, NULL, MDB_SET);
if (result != 0 && result != MDB_NOTFOUND)
throw1(DB_ERROR(lmdb_error("Error finding txpool tx meta to remove: ", result).c_str()));
if (!result)
{
result = mdb_cursor_del(m_cur_txpool_meta, 0);
if (result)
throw1(DB_ERROR(lmdb_error("Error adding removal of txpool tx metadata to db transaction: ", result).c_str()));
}
result = mdb_cursor_get(m_cur_txpool_blob, &k, NULL, MDB_SET);
if (result != 0 && result != MDB_NOTFOUND)
throw1(DB_ERROR(lmdb_error("Error finding txpool tx blob to remove: ", result).c_str()));
if (!result)
{
result = mdb_cursor_del(m_cur_txpool_blob, 0);
if (result)
throw1(DB_ERROR(lmdb_error("Error adding removal of txpool tx blob to db transaction: ", result).c_str()));
}
}
bool BlockchainLMDB::get_txpool_tx_meta(const crypto::hash& txid, txpool_tx_meta_t &meta) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(txpool_meta)
MDB_val k = {sizeof(txid), (void *)&txid};
MDB_val v;
auto result = mdb_cursor_get(m_cur_txpool_meta, &k, &v, MDB_SET);
if (result == MDB_NOTFOUND)
return false;
if (result != 0)
throw1(DB_ERROR(lmdb_error("Error finding txpool tx meta: ", result).c_str()));
meta = *(const txpool_tx_meta_t*)v.mv_data;
TXN_POSTFIX_RDONLY();
return true;
}
bool BlockchainLMDB::get_txpool_tx_blob(const crypto::hash& txid, cryptonote::blobdata &bd) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(txpool_blob)
MDB_val k = {sizeof(txid), (void *)&txid};
MDB_val v;
auto result = mdb_cursor_get(m_cur_txpool_blob, &k, &v, MDB_SET);
if (result == MDB_NOTFOUND)
return false;
if (result != 0)
throw1(DB_ERROR(lmdb_error("Error finding txpool tx blob: ", result).c_str()));
bd.assign(reinterpret_cast<const char*>(v.mv_data), v.mv_size);
TXN_POSTFIX_RDONLY();
return true;
}
cryptonote::blobdata BlockchainLMDB::get_txpool_tx_blob(const crypto::hash& txid) const
{
cryptonote::blobdata bd;
if (!get_txpool_tx_blob(txid, bd))
throw1(DB_ERROR("Tx not found in txpool: "));
return bd;
}
bool BlockchainLMDB::for_all_txpool_txes(std::function<bool(const crypto::hash&, const txpool_tx_meta_t&, const cryptonote::blobdata*)> f, bool include_blob, bool include_unrelayed_txes) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(txpool_meta);
RCURSOR(txpool_blob);
MDB_val k;
MDB_val v;
bool ret = true;
MDB_cursor_op op = MDB_FIRST;
while (1)
{
int result = mdb_cursor_get(m_cur_txpool_meta, &k, &v, op);
op = MDB_NEXT;
if (result == MDB_NOTFOUND)
break;
if (result)
throw0(DB_ERROR(lmdb_error("Failed to enumerate txpool tx metadata: ", result).c_str()));
const crypto::hash txid = *(const crypto::hash*)k.mv_data;
const txpool_tx_meta_t &meta = *(const txpool_tx_meta_t*)v.mv_data;
if (!include_unrelayed_txes && meta.do_not_relay)
// Skipping that tx
continue;
const cryptonote::blobdata *passed_bd = NULL;
cryptonote::blobdata bd;
if (include_blob)
{
MDB_val b;
result = mdb_cursor_get(m_cur_txpool_blob, &k, &b, MDB_SET);
if (result == MDB_NOTFOUND)
throw0(DB_ERROR("Failed to find txpool tx blob to match metadata"));
if (result)
throw0(DB_ERROR(lmdb_error("Failed to enumerate txpool tx blob: ", result).c_str()));
bd.assign(reinterpret_cast<const char*>(b.mv_data), b.mv_size);
passed_bd = &bd;
}
if (!f(txid, meta, passed_bd)) {
ret = false;
break;
}
}
TXN_POSTFIX_RDONLY();
return ret;
}
bool BlockchainLMDB::block_exists(const crypto::hash& h, uint64_t *height) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(block_heights);
bool ret = false;
MDB_val_set(key, h);
auto get_result = mdb_cursor_get(m_cur_block_heights, (MDB_val *)&zerokval, &key, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
{
LOG_PRINT_L3("Block with hash " << epee::string_tools::pod_to_hex(h) << " not found in db");
}
else if (get_result)
throw0(DB_ERROR(lmdb_error("DB error attempting to fetch block index from hash", get_result).c_str()));
else
{
if (height)
{
const blk_height *bhp = (const blk_height *)key.mv_data;
*height = bhp->bh_height;
}
ret = true;
}
TXN_POSTFIX_RDONLY();
return ret;
}
cryptonote::blobdata BlockchainLMDB::get_block_blob(const crypto::hash& h) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
return get_block_blob_from_height(get_block_height(h));
}
uint64_t BlockchainLMDB::get_block_height(const crypto::hash& h) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(block_heights);
MDB_val_set(key, h);
auto get_result = mdb_cursor_get(m_cur_block_heights, (MDB_val *)&zerokval, &key, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
throw1(BLOCK_DNE("Attempted to retrieve non-existent block height"));
else if (get_result)
throw0(DB_ERROR("Error attempting to retrieve a block height from the db"));
blk_height *bhp = (blk_height *)key.mv_data;
uint64_t ret = bhp->bh_height;
TXN_POSTFIX_RDONLY();
return ret;
}
block_header BlockchainLMDB::get_block_header(const crypto::hash& h) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
// block_header object is automatically cast from block object
return get_block(h);
}
cryptonote::blobdata BlockchainLMDB::get_block_blob_from_height(const uint64_t& height) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(blocks);
MDB_val_copy<uint64_t> key(height);
MDB_val result;
auto get_result = mdb_cursor_get(m_cur_blocks, &key, &result, MDB_SET);
if (get_result == MDB_NOTFOUND)
{
throw0(BLOCK_DNE(std::string("Attempt to get block from height ").append(boost::lexical_cast<std::string>(height)).append(" failed -- block not in db").c_str()));
}
else if (get_result)
throw0(DB_ERROR("Error attempting to retrieve a block from the db"));
blobdata bd;
bd.assign(reinterpret_cast<char*>(result.mv_data), result.mv_size);
TXN_POSTFIX_RDONLY();
return bd;
}
uint64_t BlockchainLMDB::get_block_timestamp(const uint64_t& height) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(block_info);
MDB_val_set(result, height);
auto get_result = mdb_cursor_get(m_cur_block_info, (MDB_val *)&zerokval, &result, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
{
throw0(BLOCK_DNE(std::string("Attempt to get timestamp from height ").append(boost::lexical_cast<std::string>(height)).append(" failed -- timestamp not in db").c_str()));
}
else if (get_result)
throw0(DB_ERROR("Error attempting to retrieve a timestamp from the db"));
mdb_block_info *bi = (mdb_block_info *)result.mv_data;
uint64_t ret = bi->bi_timestamp;
TXN_POSTFIX_RDONLY();
return ret;
}
uint64_t BlockchainLMDB::get_top_block_timestamp() const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
uint64_t m_height = height();
// if no blocks, return 0
if (m_height == 0)
{
return 0;
}
return get_block_timestamp(m_height - 1);
}
size_t BlockchainLMDB::get_block_size(const uint64_t& height) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(block_info);
MDB_val_set(result, height);
auto get_result = mdb_cursor_get(m_cur_block_info, (MDB_val *)&zerokval, &result, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
{
throw0(BLOCK_DNE(std::string("Attempt to get block size from height ").append(boost::lexical_cast<std::string>(height)).append(" failed -- block size not in db").c_str()));
}
else if (get_result)
throw0(DB_ERROR("Error attempting to retrieve a block size from the db"));
mdb_block_info *bi = (mdb_block_info *)result.mv_data;
size_t ret = bi->bi_size;
TXN_POSTFIX_RDONLY();
return ret;
}
difficulty_type BlockchainLMDB::get_block_cumulative_difficulty(const uint64_t& height) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__ << " height: " << height);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(block_info);
MDB_val_set(result, height);
auto get_result = mdb_cursor_get(m_cur_block_info, (MDB_val *)&zerokval, &result, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
{
throw0(BLOCK_DNE(std::string("Attempt to get cumulative difficulty from height ").append(boost::lexical_cast<std::string>(height)).append(" failed -- difficulty not in db").c_str()));
}
else if (get_result)
throw0(DB_ERROR("Error attempting to retrieve a cumulative difficulty from the db"));
mdb_block_info *bi = (mdb_block_info *)result.mv_data;
difficulty_type ret = bi->bi_diff;
TXN_POSTFIX_RDONLY();
return ret;
}
difficulty_type BlockchainLMDB::get_block_difficulty(const uint64_t& height) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
difficulty_type diff1 = 0;
difficulty_type diff2 = 0;
diff1 = get_block_cumulative_difficulty(height);
if (height != 0)
{
diff2 = get_block_cumulative_difficulty(height - 1);
}
return diff1 - diff2;
}
uint64_t BlockchainLMDB::get_block_already_generated_coins(const uint64_t& height) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(block_info);
MDB_val_set(result, height);
auto get_result = mdb_cursor_get(m_cur_block_info, (MDB_val *)&zerokval, &result, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
{
throw0(BLOCK_DNE(std::string("Attempt to get generated coins from height ").append(boost::lexical_cast<std::string>(height)).append(" failed -- block size not in db").c_str()));
}
else if (get_result)
throw0(DB_ERROR("Error attempting to retrieve a total generated coins from the db"));
mdb_block_info *bi = (mdb_block_info *)result.mv_data;
uint64_t ret = bi->bi_coins;
TXN_POSTFIX_RDONLY();
return ret;
}
crypto::hash BlockchainLMDB::get_block_hash_from_height(const uint64_t& height) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(block_info);
MDB_val_set(result, height);
auto get_result = mdb_cursor_get(m_cur_block_info, (MDB_val *)&zerokval, &result, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
{
throw0(BLOCK_DNE(std::string("Attempt to get hash from height ").append(boost::lexical_cast<std::string>(height)).append(" failed -- hash not in db").c_str()));
}
else if (get_result)
throw0(DB_ERROR(lmdb_error("Error attempting to retrieve a block hash from the db: ", get_result).c_str()));
mdb_block_info *bi = (mdb_block_info *)result.mv_data;
crypto::hash ret = bi->bi_hash;
TXN_POSTFIX_RDONLY();
return ret;
}
std::vector<block> BlockchainLMDB::get_blocks_range(const uint64_t& h1, const uint64_t& h2) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
std::vector<block> v;
for (uint64_t height = h1; height <= h2; ++height)
{
v.push_back(get_block_from_height(height));
}
return v;
}
std::vector<crypto::hash> BlockchainLMDB::get_hashes_range(const uint64_t& h1, const uint64_t& h2) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
std::vector<crypto::hash> v;
for (uint64_t height = h1; height <= h2; ++height)
{
v.push_back(get_block_hash_from_height(height));
}
return v;
}
crypto::hash BlockchainLMDB::top_block_hash() const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
uint64_t m_height = height();
if (m_height != 0)
{
return get_block_hash_from_height(m_height - 1);
}
return null_hash;
}
block BlockchainLMDB::get_top_block() const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
uint64_t m_height = height();
if (m_height != 0)
{
return get_block_from_height(m_height - 1);
}
block b;
return b;
}
uint64_t BlockchainLMDB::height() const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
int result;
// get current height
MDB_stat db_stats;
if ((result = mdb_stat(m_txn, m_blocks, &db_stats)))
throw0(DB_ERROR(lmdb_error("Failed to query m_blocks: ", result).c_str()));
return db_stats.ms_entries;
}
uint64_t BlockchainLMDB::num_outputs() const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
int result;
// get current height
MDB_stat db_stats;
if ((result = mdb_stat(m_txn, m_output_txs, &db_stats)))
throw0(DB_ERROR(lmdb_error("Failed to query m_output_txs: ", result).c_str()));
return db_stats.ms_entries;
}
bool BlockchainLMDB::tx_exists(const crypto::hash& h) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(tx_indices);
RCURSOR(txs);
MDB_val_set(key, h);
bool tx_found = false;
TIME_MEASURE_START(time1);
auto get_result = mdb_cursor_get(m_cur_tx_indices, (MDB_val *)&zerokval, &key, MDB_GET_BOTH);
if (get_result == 0)
tx_found = true;
else if (get_result != MDB_NOTFOUND)
throw0(DB_ERROR(lmdb_error(std::string("DB error attempting to fetch transaction index from hash ") + epee::string_tools::pod_to_hex(h) + ": ", get_result).c_str()));
// This isn't needed as part of the check. we're not checking consistency of db.
// get_result = mdb_cursor_get(m_cur_txs, &val_tx_index, &result, MDB_SET);
TIME_MEASURE_FINISH(time1);
time_tx_exists += time1;
TXN_POSTFIX_RDONLY();
if (! tx_found)
{
9 years ago
LOG_PRINT_L1("transaction with hash " << epee::string_tools::pod_to_hex(h) << " not found in db");
return false;
}
// Below not needed due to above comment.
// if (get_result == MDB_NOTFOUND)
// throw0(DB_ERROR(std::string("transaction with hash ").append(epee::string_tools::pod_to_hex(h)).append(" not found at index").c_str()));
// else if (get_result)
// throw0(DB_ERROR(lmdb_error(std::string("DB error attempting to fetch transaction ") + epee::string_tools::pod_to_hex(h) + " at index: ", get_result).c_str()));
return true;
}
bool BlockchainLMDB::tx_exists(const crypto::hash& h, uint64_t& tx_id) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(tx_indices);
MDB_val_set(v, h);
TIME_MEASURE_START(time1);
auto get_result = mdb_cursor_get(m_cur_tx_indices, (MDB_val *)&zerokval, &v, MDB_GET_BOTH);
TIME_MEASURE_FINISH(time1);
time_tx_exists += time1;
if (!get_result) {
txindex *tip = (txindex *)v.mv_data;
tx_id = tip->data.tx_id;
}
TXN_POSTFIX_RDONLY();
bool ret = false;
if (get_result == MDB_NOTFOUND)
{
9 years ago
LOG_PRINT_L1("transaction with hash " << epee::string_tools::pod_to_hex(h) << " not found in db");
}
else if (get_result)
throw0(DB_ERROR(lmdb_error("DB error attempting to fetch transaction from hash", get_result).c_str()));
else
ret = true;
return ret;
}
uint64_t BlockchainLMDB::get_tx_unlock_time(const crypto::hash& h) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(tx_indices);
MDB_val_set(v, h);
auto get_result = mdb_cursor_get(m_cur_tx_indices, (MDB_val *)&zerokval, &v, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
throw1(TX_DNE(lmdb_error(std::string("tx data with hash ") + epee::string_tools::pod_to_hex(h) + " not found in db: ", get_result).c_str()));
else if (get_result)
throw0(DB_ERROR(lmdb_error("DB error attempting to fetch tx data from hash: ", get_result).c_str()));
txindex *tip = (txindex *)v.mv_data;
uint64_t ret = tip->data.unlock_time;
TXN_POSTFIX_RDONLY();
return ret;
}
bool BlockchainLMDB::get_tx_blob(const crypto::hash& h, cryptonote::blobdata &bd) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(tx_indices);
RCURSOR(txs);
MDB_val_set(v, h);
MDB_val result;
auto get_result = mdb_cursor_get(m_cur_tx_indices, (MDB_val *)&zerokval, &v, MDB_GET_BOTH);
if (get_result == 0)
{
txindex *tip = (txindex *)v.mv_data;
MDB_val_set(val_tx_id, tip->data.tx_id);
get_result = mdb_cursor_get(m_cur_txs, &val_tx_id, &result, MDB_SET);
}
if (get_result == MDB_NOTFOUND)
return false;
else if (get_result)
throw0(DB_ERROR(lmdb_error("DB error attempting to fetch tx from hash", get_result).c_str()));
bd.assign(reinterpret_cast<char*>(result.mv_data), result.mv_size);
TXN_POSTFIX_RDONLY();
return true;
}
uint64_t BlockchainLMDB::get_tx_count() const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
int result;
MDB_stat db_stats;
if ((result = mdb_stat(m_txn, m_txs, &db_stats)))
throw0(DB_ERROR(lmdb_error("Failed to query m_txs: ", result).c_str()));
TXN_POSTFIX_RDONLY();
return db_stats.ms_entries;
}
std::vector<transaction> BlockchainLMDB::get_tx_list(const std::vector<crypto::hash>& hlist) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
std::vector<transaction> v;
for (auto& h : hlist)
{
v.push_back(get_tx(h));
}
return v;
}
uint64_t BlockchainLMDB::get_tx_block_height(const crypto::hash& h) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(tx_indices);
MDB_val_set(v, h);
auto get_result = mdb_cursor_get(m_cur_tx_indices, (MDB_val *)&zerokval, &v, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
{
throw1(TX_DNE(std::string("tx_data_t with hash ").append(epee::string_tools::pod_to_hex(h)).append(" not found in db").c_str()));
}
else if (get_result)
throw0(DB_ERROR(lmdb_error("DB error attempting to fetch tx height from hash", get_result).c_str()));
txindex *tip = (txindex *)v.mv_data;
uint64_t ret = tip->data.block_id;
TXN_POSTFIX_RDONLY();
return ret;
}
uint64_t BlockchainLMDB::get_num_outputs(const uint64_t& amount) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(output_amounts);
MDB_val_copy<uint64_t> k(amount);
MDB_val v;
mdb_size_t num_elems = 0;
auto result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_SET);
if (result == MDB_SUCCESS)
{
mdb_cursor_count(m_cur_output_amounts, &num_elems);
}
else if (result != MDB_NOTFOUND)
throw0(DB_ERROR("DB error attempting to get number of outputs of an amount"));
TXN_POSTFIX_RDONLY();
return num_elems;
}
// This is a lot harder now that we've removed the output_keys index
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
output_data_t BlockchainLMDB::get_output_key(const uint64_t &global_index) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__ << " (unused version - does nothing)");
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(output_txs);
RCURSOR(tx_indices);
RCURSOR(txs);
output_data_t od;
MDB_val_set(v, global_index);
auto get_result = mdb_cursor_get(m_cur_output_txs, (MDB_val *)&zerokval, &v, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
throw1(OUTPUT_DNE("output with given index not in db"));
else if (get_result)
throw0(DB_ERROR("DB error attempting to fetch output tx hash"));
outtx *ot = (outtx *)v.mv_data;
MDB_val_set(val_h, ot->tx_hash);
get_result = mdb_cursor_get(m_cur_tx_indices, (MDB_val *)&zerokval, &val_h, MDB_GET_BOTH);
if (get_result)
throw0(DB_ERROR(lmdb_error(std::string("DB error attempting to fetch transaction index from hash ") + epee::string_tools::pod_to_hex(ot->tx_hash) + ": ", get_result).c_str()));
txindex *tip = (txindex *)val_h.mv_data;
MDB_val_set(val_tx_id, tip->data.tx_id);
MDB_val result;
get_result = mdb_cursor_get(m_cur_txs, &val_tx_id, &result, MDB_SET);
if (get_result == MDB_NOTFOUND)
throw1(TX_DNE(std::string("tx with hash ").append(epee::string_tools::pod_to_hex(ot->tx_hash)).append(" not found in db").c_str()));
else if (get_result)
throw0(DB_ERROR(lmdb_error("DB error attempting to fetch tx from hash", get_result).c_str()));
blobdata bd;
bd.assign(reinterpret_cast<char*>(result.mv_data), result.mv_size);
transaction tx;
if (!parse_and_validate_tx_from_blob(bd, tx))
throw0(DB_ERROR("Failed to parse tx from blob retrieved from the db"));
const tx_out tx_output = tx.vout[ot->local_index];
od.unlock_time = tip->data.unlock_time;
od.height = tip->data.block_id;
od.pubkey = boost::get<txout_to_key>(tx_output.target).key;
TXN_POSTFIX_RDONLY();
return od;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
}
output_data_t BlockchainLMDB::get_output_key(const uint64_t& amount, const uint64_t& index)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(output_amounts);
MDB_val_set(k, amount);
MDB_val_set(v, index);
auto get_result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
throw1(OUTPUT_DNE("Attempting to get output pubkey by index, but key does not exist"));
else if (get_result)
throw0(DB_ERROR("Error attempting to retrieve an output pubkey from the db"));
output_data_t ret;
if (amount == 0)
{
const outkey *okp = (const outkey *)v.mv_data;
ret = okp->data;
}
else
{
const pre_rct_outkey *okp = (const pre_rct_outkey *)v.mv_data;
memcpy(&ret, &okp->data, sizeof(pre_rct_output_data_t));;
ret.commitment = rct::zeroCommit(amount);
}
TXN_POSTFIX_RDONLY();
return ret;
}
tx_out_index BlockchainLMDB::get_output_tx_and_index_from_global(const uint64_t& output_id) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(output_txs);
MDB_val_set(v, output_id);
auto get_result = mdb_cursor_get(m_cur_output_txs, (MDB_val *)&zerokval, &v, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
throw1(OUTPUT_DNE("output with given index not in db"));
else if (get_result)
throw0(DB_ERROR("DB error attempting to fetch output tx hash"));
outtx *ot = (outtx *)v.mv_data;
tx_out_index ret = tx_out_index(ot->tx_hash, ot->local_index);
TXN_POSTFIX_RDONLY();
return ret;
}
tx_out_index BlockchainLMDB::get_output_tx_and_index(const uint64_t& amount, const uint64_t& index) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
std::vector < uint64_t > offsets;
std::vector<tx_out_index> indices;
offsets.push_back(index);
get_output_tx_and_index(amount, offsets, indices);
if (!indices.size())
throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found"));
return indices[0];
}
std::vector<uint64_t> BlockchainLMDB::get_tx_amount_output_indices(const uint64_t tx_id) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(tx_outputs);
int result = 0;
MDB_val_set(k_tx_id, tx_id);
MDB_val v;
std::vector<uint64_t> amount_output_indices;
result = mdb_cursor_get(m_cur_tx_outputs, &k_tx_id, &v, MDB_SET);
if (result == MDB_NOTFOUND)
LOG_PRINT_L0("WARNING: Unexpected: tx has no amount indices stored in "
"tx_outputs, but it should have an empty entry even if it's a tx without "
"outputs");
else if (result)
throw0(DB_ERROR(lmdb_error("DB error attempting to get data for tx_outputs[tx_index]", result).c_str()));
const uint64_t* indices = (const uint64_t*)v.mv_data;
int num_outputs = v.mv_size / sizeof(uint64_t);
amount_output_indices.reserve(num_outputs);
for (int i = 0; i < num_outputs; ++i)
{
// LOG_PRINT_L0("amount output index[" << 2*i << "]" << ": " << paired_indices[2*i] << " global output index: " << paired_indices[2*i+1]);
amount_output_indices.push_back(indices[i]);
}
indices = nullptr;
TXN_POSTFIX_RDONLY();
return amount_output_indices;
}
Fix transfers (without mixins) Fix Blockchain::get_tx_outputs_gindexs() to return amount output indices. Implement BlockchainLMDB::get_tx_amount_output_indices() and call it from the function instead of BlockchainLMDB::get_tx_output_indices() Previously, Blockchain::get_tx_outputs_gindexs() was instead returning global output indices, which are internal to LMDB databases. Allows bitmonerod RPC /get_o_indexes.bin to return the amount output indices as expected. Allows simplewallet refresh to set correct amount output indices for incoming transfers. simplewallet can now construct and send valid transactions (currently only without mixins). This is a fix that doesn't require altering the structure of the current LMDB databases. TODO: This can be done more efficiently by adding another LMDB database (key-value table). It's not used during regular transaction validation by bitmonerod. I think it's currently used only or mainly by simplewallet for just its own incoming transactions. So the current behavior is not a primary bottleneck. Currently, it's using the "output_amounts" database, walking through a given amount's list of values, comparing each one to a given global output index. The iteration number of the match is the desired result: the amount output index. This is done for each global output index of the transaction. A tx's amount output indices can be stored in various other ways allowing for faster lookup. Since a tx is only written once, there are no special future write requirements for its list of indices.
9 years ago
bool BlockchainLMDB::has_key_image(const crypto::key_image& img) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
bool ret;
TXN_PREFIX_RDONLY();
RCURSOR(spent_keys);
MDB_val k = {sizeof(img), (void *)&img};
ret = (mdb_cursor_get(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_GET_BOTH) == 0);
TXN_POSTFIX_RDONLY();
return ret;
}
bool BlockchainLMDB::for_all_key_images(std::function<bool(const crypto::key_image&)> f) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(spent_keys);
MDB_val k, v;
bool ret = true;
k = zerokval;
MDB_cursor_op op = MDB_FIRST;
while (1)
{
int ret = mdb_cursor_get(m_cur_spent_keys, &k, &v, op);
op = MDB_NEXT;
if (ret == MDB_NOTFOUND)
break;
if (ret < 0)
throw0(DB_ERROR("Failed to enumerate key images"));
const crypto::key_image k_image = *(const crypto::key_image*)v.mv_data;
if (!f(k_image)) {
ret = false;
break;
}
}
TXN_POSTFIX_RDONLY();
return ret;
}
bool BlockchainLMDB::for_blocks_range(const uint64_t& h1, const uint64_t& h2, std::function<bool(uint64_t, const crypto::hash&, const cryptonote::block&)> f) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(blocks);
MDB_val k;
MDB_val v;
bool ret = true;
MDB_cursor_op op;
if (h1)
{
k = MDB_val{sizeof(h1), (void*)&h1};
op = MDB_SET;
} else
{
op = MDB_FIRST;
}
while (1)
{
int ret = mdb_cursor_get(m_cur_blocks, &k, &v, op);
op = MDB_NEXT;
if (ret == MDB_NOTFOUND)
break;
if (ret)
throw0(DB_ERROR("Failed to enumerate blocks"));
uint64_t height = *(const uint64_t*)k.mv_data;
blobdata bd;
bd.assign(reinterpret_cast<char*>(v.mv_data), v.mv_size);
block b;
if (!parse_and_validate_block_from_blob(bd, b))
throw0(DB_ERROR("Failed to parse block from blob retrieved from the db"));
crypto::hash hash;
if (!get_block_hash(b, hash))
throw0(DB_ERROR("Failed to get block hash from blob retrieved from the db"));
if (!f(height, hash, b)) {
ret = false;
break;
}
if (height >= h2)
break;
}
TXN_POSTFIX_RDONLY();
return ret;
}
bool BlockchainLMDB::for_all_transactions(std::function<bool(const crypto::hash&, const cryptonote::transaction&)> f) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(txs);
RCURSOR(tx_indices);
MDB_val k;
MDB_val v;
bool ret = true;
MDB_cursor_op op = MDB_FIRST;
while (1)
{
int ret = mdb_cursor_get(m_cur_tx_indices, &k, &v, op);
op = MDB_NEXT;
if (ret == MDB_NOTFOUND)
break;
if (ret)
throw0(DB_ERROR(lmdb_error("Failed to enumerate transactions: ", ret).c_str()));
txindex *ti = (txindex *)v.mv_data;
const crypto::hash hash = ti->key;
k.mv_data = (void *)&ti->data.tx_id;
k.mv_size = sizeof(ti->data.tx_id);
ret = mdb_cursor_get(m_cur_txs, &k, &v, MDB_SET);
if (ret == MDB_NOTFOUND)
break;
if (ret)
throw0(DB_ERROR(lmdb_error("Failed to enumerate transactions: ", ret).c_str()));
blobdata bd;
bd.assign(reinterpret_cast<char*>(v.mv_data), v.mv_size);
transaction tx;
if (!parse_and_validate_tx_from_blob(bd, tx))
throw0(DB_ERROR("Failed to parse tx from blob retrieved from the db"));
if (!f(hash, tx)) {
ret = false;
break;
}
}
TXN_POSTFIX_RDONLY();
return ret;
}
bool BlockchainLMDB::for_all_outputs(std::function<bool(uint64_t amount, const crypto::hash &tx_hash, size_t tx_idx)> f) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(output_amounts);
MDB_val k;
MDB_val v;
bool ret = true;
MDB_cursor_op op = MDB_FIRST;
while (1)
{
int ret = mdb_cursor_get(m_cur_output_amounts, &k, &v, op);
op = MDB_NEXT;
if (ret == MDB_NOTFOUND)
break;
if (ret)
throw0(DB_ERROR("Failed to enumerate outputs"));
uint64_t amount = *(const uint64_t*)k.mv_data;
outkey *ok = (outkey *)v.mv_data;
tx_out_index toi = get_output_tx_and_index_from_global(ok->output_id);
if (!f(amount, toi.first, toi.second)) {
ret = false;
break;
}
}
TXN_POSTFIX_RDONLY();
return ret;
}
// batch_num_blocks: (optional) Used to check if resize needed before batch transaction starts.
bool BlockchainLMDB::batch_start(uint64_t batch_num_blocks, uint64_t batch_bytes)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
if (! m_batch_transactions)
throw0(DB_ERROR("batch transactions not enabled"));
if (m_batch_active)
return false;
if (m_write_batch_txn != nullptr)
return false;
if (m_write_txn)
throw0(DB_ERROR("batch transaction attempted, but m_write_txn already in use"));
check_open();
m_writer = boost::this_thread::get_id();
check_and_resize_for_batch(batch_num_blocks, batch_bytes);
m_write_batch_txn = new mdb_txn_safe();
// NOTE: need to make sure it's destroyed properly when done
if (auto mdb_res = lmdb_txn_begin(m_env, NULL, 0, *m_write_batch_txn))
{
delete m_write_batch_txn;
m_write_batch_txn = nullptr;
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", mdb_res).c_str()));
}
// indicates this transaction is for batch transactions, but not whether it's
// active
m_write_batch_txn->m_batch_txn = true;
m_write_txn = m_write_batch_txn;
m_batch_active = true;
memset(&m_wcursors, 0, sizeof(m_wcursors));
if (m_tinfo.get())
{
if (m_tinfo->m_ti_rflags.m_rf_txn)
mdb_txn_reset(m_tinfo->m_ti_rtxn);
memset(&m_tinfo->m_ti_rflags, 0, sizeof(m_tinfo->m_ti_rflags));
}
LOG_PRINT_L3("batch transaction: begin");
return true;
}
void BlockchainLMDB::batch_commit()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
if (! m_batch_transactions)
throw0(DB_ERROR("batch transactions not enabled"));
if (! m_batch_active)
throw1(DB_ERROR("batch transaction not in progress"));
if (m_write_batch_txn == nullptr)
throw1(DB_ERROR("batch transaction not in progress"));
if (m_writer != boost::this_thread::get_id())
throw1(DB_ERROR("batch transaction owned by other thread"));
check_open();
LOG_PRINT_L3("batch transaction: committing...");
TIME_MEASURE_START(time1);
m_write_txn->commit();
TIME_MEASURE_FINISH(time1);
time_commit1 += time1;
LOG_PRINT_L3("batch transaction: committed");
m_write_txn = nullptr;
delete m_write_batch_txn;
m_write_batch_txn = nullptr;
memset(&m_wcursors, 0, sizeof(m_wcursors));
}
void BlockchainLMDB::cleanup_batch()
{
// for destruction of batch transaction
m_write_txn = nullptr;
delete m_write_batch_txn;
m_write_batch_txn = nullptr;
m_batch_active = false;
memset(&m_wcursors, 0, sizeof(m_wcursors));
}
void BlockchainLMDB::batch_stop()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
if (! m_batch_transactions)
throw0(DB_ERROR("batch transactions not enabled"));
if (! m_batch_active)
throw1(DB_ERROR("batch transaction not in progress"));
if (m_write_batch_txn == nullptr)
throw1(DB_ERROR("batch transaction not in progress"));
if (m_writer != boost::this_thread::get_id())
throw1(DB_ERROR("batch transaction owned by other thread"));
check_open();
LOG_PRINT_L3("batch transaction: committing...");
TIME_MEASURE_START(time1);
try
{
m_write_txn->commit();
TIME_MEASURE_FINISH(time1);
time_commit1 += time1;
cleanup_batch();
}
catch (const std::exception &e)
{
cleanup_batch();
throw;
}
LOG_PRINT_L3("batch transaction: end");
}
void BlockchainLMDB::batch_abort()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
if (! m_batch_transactions)
throw0(DB_ERROR("batch transactions not enabled"));
if (! m_batch_active)
throw1(DB_ERROR("batch transaction not in progress"));
if (m_write_batch_txn == nullptr)
throw1(DB_ERROR("batch transaction not in progress"));
if (m_writer != boost::this_thread::get_id())
throw1(DB_ERROR("batch transaction owned by other thread"));
check_open();
// for destruction of batch transaction
m_write_txn = nullptr;
// explicitly call in case mdb_env_close() (BlockchainLMDB::close()) called before BlockchainLMDB destructor called.
m_write_batch_txn->abort();
delete m_write_batch_txn;
m_write_batch_txn = nullptr;
m_batch_active = false;
memset(&m_wcursors, 0, sizeof(m_wcursors));
LOG_PRINT_L3("batch transaction: aborted");
}
void BlockchainLMDB::set_batch_transactions(bool batch_transactions)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
if ((batch_transactions) && (m_batch_transactions))
{
LOG_PRINT_L0("WARNING: batch transaction mode already enabled, but asked to enable batch mode");
}
m_batch_transactions = batch_transactions;
LOG_PRINT_L3("batch transactions " << (m_batch_transactions ? "enabled" : "disabled"));
}
// return true if we started the txn, false if already started
bool BlockchainLMDB::block_rtxn_start(MDB_txn **mtxn, mdb_txn_cursors **mcur) const
{
bool ret = false;
mdb_threadinfo *tinfo;
if (m_write_txn && m_writer == boost::this_thread::get_id()) {
*mtxn = m_write_txn->m_txn;
*mcur = (mdb_txn_cursors *)&m_wcursors;
return ret;
}
/* Check for existing info and force reset if env doesn't match -
* only happens if env was opened/closed multiple times in same process
*/
if (!(tinfo = m_tinfo.get()) || mdb_txn_env(tinfo->m_ti_rtxn) != m_env)
{
tinfo = new mdb_threadinfo;
m_tinfo.reset(tinfo);
memset(&tinfo->m_ti_rcursors, 0, sizeof(tinfo->m_ti_rcursors));
memset(&tinfo->m_ti_rflags, 0, sizeof(tinfo->m_ti_rflags));
if (auto mdb_res = lmdb_txn_begin(m_env, NULL, MDB_RDONLY, &tinfo->m_ti_rtxn))
throw0(DB_ERROR_TXN_START(lmdb_error("Failed to create a read transaction for the db: ", mdb_res).c_str()));
ret = true;
} else if (!tinfo->m_ti_rflags.m_rf_txn)
{
if (auto mdb_res = lmdb_txn_renew(tinfo->m_ti_rtxn))
throw0(DB_ERROR_TXN_START(lmdb_error("Failed to renew a read transaction for the db: ", mdb_res).c_str()));
ret = true;
}
if (ret)
tinfo->m_ti_rflags.m_rf_txn = true;
*mtxn = tinfo->m_ti_rtxn;
*mcur = &tinfo->m_ti_rcursors;
if (ret)
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
return ret;
}
void BlockchainLMDB::block_rtxn_stop() const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
mdb_txn_reset(m_tinfo->m_ti_rtxn);
memset(&m_tinfo->m_ti_rflags, 0, sizeof(m_tinfo->m_ti_rflags));
}
void BlockchainLMDB::block_txn_start(bool readonly)
{
if (readonly)
{
MDB_txn *mtxn;
mdb_txn_cursors *mcur;
block_rtxn_start(&mtxn, &mcur);
return;
}
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
// Distinguish the exceptions here from exceptions that would be thrown while
// using the txn and committing it.
//
// If an exception is thrown in this setup, we don't want the caller to catch
// it and proceed as if there were an existing write txn, such as trying to
// call block_txn_abort(). It also indicates a serious issue which will
// probably be thrown up another layer.
if (! m_batch_active && m_write_txn)
throw0(DB_ERROR_TXN_START((std::string("Attempted to start new write txn when write txn already exists in ")+__FUNCTION__).c_str()));
if (! m_batch_active)
{
m_writer = boost::this_thread::get_id();
m_write_txn = new mdb_txn_safe();
if (auto mdb_res = lmdb_txn_begin(m_env, NULL, 0, *m_write_txn))
{
delete m_write_txn;
m_write_txn = nullptr;
throw0(DB_ERROR_TXN_START(lmdb_error("Failed to create a transaction for the db: ", mdb_res).c_str()));
}
memset(&m_wcursors, 0, sizeof(m_wcursors));
if (m_tinfo.get())
{
if (m_tinfo->m_ti_rflags.m_rf_txn)
mdb_txn_reset(m_tinfo->m_ti_rtxn);
memset(&m_tinfo->m_ti_rflags, 0, sizeof(m_tinfo->m_ti_rflags));
}
} else if (m_writer != boost::this_thread::get_id())
throw0(DB_ERROR_TXN_START((std::string("Attempted to start new write txn when batch txn already exists in ")+__FUNCTION__).c_str()));
}
void BlockchainLMDB::block_txn_stop()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
if (m_write_txn && m_writer == boost::this_thread::get_id())
{
if (! m_batch_active)
{
TIME_MEASURE_START(time1);
m_write_txn->commit();
TIME_MEASURE_FINISH(time1);
time_commit1 += time1;
delete m_write_txn;
m_write_txn = nullptr;
memset(&m_wcursors, 0, sizeof(m_wcursors));
}
}
else if (m_tinfo->m_ti_rtxn)
{
mdb_txn_reset(m_tinfo->m_ti_rtxn);
memset(&m_tinfo->m_ti_rflags, 0, sizeof(m_tinfo->m_ti_rflags));
}
}
void BlockchainLMDB::block_txn_abort()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
if (m_write_txn && m_writer == boost::this_thread::get_id())
{
if (! m_batch_active)
{
delete m_write_txn;
m_write_txn = nullptr;
memset(&m_wcursors, 0, sizeof(m_wcursors));
}
}
else if (m_tinfo->m_ti_rtxn)
{
mdb_txn_reset(m_tinfo->m_ti_rtxn);
memset(&m_tinfo->m_ti_rflags, 0, sizeof(m_tinfo->m_ti_rflags));
}
else
{
// This would probably mean an earlier exception was caught, but then we
// proceeded further than we should have.
throw0(DB_ERROR((std::string("BlockchainLMDB::") + __func__ +
std::string(": block-level DB transaction abort called when write txn doesn't exist")
).c_str()));
}
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
uint64_t BlockchainLMDB::add_block(const block& blk, const size_t& block_size, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated,
const std::vector<transaction>& txs)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
uint64_t m_height = height();
if (m_height % 1000 == 0)
{
// for batch mode, DB resize check is done at start of batch transaction
if (! m_batch_active && need_resize())
{
LOG_PRINT_L0("LMDB memory map needs to be resized, doing that now.");
do_resize();
}
}
try
{
BlockchainDB::add_block(blk, block_size, cumulative_difficulty, coins_generated, txs);
}
catch (const DB_ERROR_TXN_START& e)
{
throw;
}
catch (...)
{
block_txn_abort();
throw;
}
return ++m_height;
}
void BlockchainLMDB::pop_block(block& blk, std::vector<transaction>& txs)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
block_txn_start(false);
try
{
BlockchainDB::pop_block(blk, txs);
block_txn_stop();
}
catch (...)
{
block_txn_abort();
throw;
}
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
void BlockchainLMDB::get_output_tx_and_index_from_global(const std::vector<uint64_t> &global_indices,
std::vector<tx_out_index> &tx_out_indices) const
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
tx_out_indices.clear();
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
TXN_PREFIX_RDONLY();
RCURSOR(output_txs);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
for (const uint64_t &output_id : global_indices)
{
MDB_val_set(v, output_id);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
auto get_result = mdb_cursor_get(m_cur_output_txs, (MDB_val *)&zerokval, &v, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
throw1(OUTPUT_DNE("output with given index not in db"));
else if (get_result)
throw0(DB_ERROR("DB error attempting to fetch output tx hash"));
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
outtx *ot = (outtx *)v.mv_data;
auto result = tx_out_index(ot->tx_hash, ot->local_index);
tx_out_indices.push_back(result);
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
TXN_POSTFIX_RDONLY();
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
}
void BlockchainLMDB::get_output_key(const uint64_t &amount, const std::vector<uint64_t> &offsets, std::vector<output_data_t> &outputs, bool allow_partial)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
TIME_MEASURE_START(db3);
check_open();
outputs.clear();
TXN_PREFIX_RDONLY();
RCURSOR(output_amounts);
MDB_val_set(k, amount);
for (const uint64_t &index : offsets)
{
MDB_val_set(v, index);
auto get_result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
{
if (allow_partial)
{
MDEBUG("Partial result: " << outputs.size() << "/" << offsets.size());
break;
}
throw1(OUTPUT_DNE((std::string("Attempting to get output pubkey by global index (amount ") + boost::lexical_cast<std::string>(amount) + ", index " + boost::lexical_cast<std::string>(index) + ", count " + boost::lexical_cast<std::string>(get_num_outputs(amount)) + "), but key does not exist (current height " + boost::lexical_cast<std::string>(height()) + ")").c_str()));
}
else if (get_result)
throw0(DB_ERROR(lmdb_error("Error attempting to retrieve an output pubkey from the db", get_result).c_str()));
output_data_t data;
if (amount == 0)
{
const outkey *okp = (const outkey *)v.mv_data;
data = okp->data;
}
else
{
const pre_rct_outkey *okp = (const pre_rct_outkey *)v.mv_data;
memcpy(&data, &okp->data, sizeof(pre_rct_output_data_t));
data.commitment = rct::zeroCommit(amount);
}
outputs.push_back(data);
}
TXN_POSTFIX_RDONLY();
TIME_MEASURE_FINISH(db3);
LOG_PRINT_L3("db3: " << db3);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
}
void BlockchainLMDB::get_output_tx_and_index(const uint64_t& amount, const std::vector<uint64_t> &offsets, std::vector<tx_out_index> &indices) const
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
indices.clear();
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
std::vector <uint64_t> tx_indices;
TXN_PREFIX_RDONLY();
RCURSOR(output_amounts);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
MDB_val_set(k, amount);
for (const uint64_t &index : offsets)
{
MDB_val_set(v, index);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
auto get_result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_GET_BOTH);
if (get_result == MDB_NOTFOUND)
throw1(OUTPUT_DNE("Attempting to get output by index, but key does not exist"));
else if (get_result)
throw0(DB_ERROR(lmdb_error("Error attempting to retrieve an output from the db", get_result).c_str()));
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
const outkey *okp = (const outkey *)v.mv_data;
tx_indices.push_back(okp->output_id);
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
TIME_MEASURE_START(db3);
if(tx_indices.size() > 0)
{
get_output_tx_and_index_from_global(tx_indices, indices);
}
TIME_MEASURE_FINISH(db3);
LOG_PRINT_L3("db3: " << db3);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
}
std::map<uint64_t, std::tuple<uint64_t, uint64_t, uint64_t>> BlockchainLMDB::get_output_histogram(const std::vector<uint64_t> &amounts, bool unlocked, uint64_t recent_cutoff) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(output_amounts);
std::map<uint64_t, std::tuple<uint64_t, uint64_t, uint64_t>> histogram;
MDB_val k;
MDB_val v;
if (amounts.empty())
{
MDB_cursor_op op = MDB_FIRST;
while (1)
{
int ret = mdb_cursor_get(m_cur_output_amounts, &k, &v, op);
op = MDB_NEXT_NODUP;
if (ret == MDB_NOTFOUND)
break;
if (ret)
throw0(DB_ERROR(lmdb_error("Failed to enumerate outputs: ", ret).c_str()));
mdb_size_t num_elems = 0;
mdb_cursor_count(m_cur_output_amounts, &num_elems);
uint64_t amount = *(const uint64_t*)k.mv_data;
histogram[amount] = std::make_tuple(num_elems, 0, 0);
}
}
else
{
for (const auto &amount: amounts)
{
MDB_val_copy<uint64_t> k(amount);
int ret = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_SET);
if (ret == MDB_NOTFOUND)
{
histogram[amount] = std::make_tuple(0, 0, 0);
}
else if (ret == MDB_SUCCESS)
{
mdb_size_t num_elems = 0;
mdb_cursor_count(m_cur_output_amounts, &num_elems);
histogram[amount] = std::make_tuple(num_elems, 0, 0);
}
else
{
throw0(DB_ERROR(lmdb_error("Failed to enumerate outputs: ", ret).c_str()));
}
}
}
if (unlocked || recent_cutoff > 0) {
const uint64_t blockchain_height = height();
for (std::map<uint64_t, std::tuple<uint64_t, uint64_t, uint64_t>>::iterator i = histogram.begin(); i != histogram.end(); ++i) {
uint64_t amount = i->first;
uint64_t num_elems = std::get<0>(i->second);
while (num_elems > 0) {
const tx_out_index toi = get_output_tx_and_index(amount, num_elems - 1);
const uint64_t height = get_tx_block_height(toi.first);
if (height + CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE <= blockchain_height)
break;
--num_elems;
}
// modifying second does not invalidate the iterator
std::get<1>(i->second) = num_elems;
if (recent_cutoff > 0)
{
uint64_t recent = 0;
while (num_elems > 0) {
const tx_out_index toi = get_output_tx_and_index(amount, num_elems - 1);
const uint64_t height = get_tx_block_height(toi.first);
const uint64_t ts = get_block_timestamp(height);
if (ts < recent_cutoff)
break;
--num_elems;
++recent;
}
// modifying second does not invalidate the iterator
std::get<2>(i->second) = recent;
}
}
}
TXN_POSTFIX_RDONLY();
return histogram;
}
void BlockchainLMDB::check_hard_fork_info()
{
}
void BlockchainLMDB::drop_hard_fork_info()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX(0);
mdb_drop(*txn_ptr, m_hf_starting_heights, 1);
mdb_drop(*txn_ptr, m_hf_versions, 1);
TXN_POSTFIX_SUCCESS();
}
void BlockchainLMDB::set_hard_fork_version(uint64_t height, uint8_t version)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_BLOCK_PREFIX(0);
MDB_val_copy<uint64_t> val_key(height);
MDB_val_copy<uint8_t> val_value(version);
int result;
result = mdb_put(*txn_ptr, m_hf_versions, &val_key, &val_value, MDB_APPEND);
if (result == MDB_KEYEXIST)
result = mdb_put(*txn_ptr, m_hf_versions, &val_key, &val_value, 0);
if (result)
throw1(DB_ERROR(lmdb_error("Error adding hard fork version to db transaction: ", result).c_str()));
TXN_BLOCK_POSTFIX_SUCCESS();
}
uint8_t BlockchainLMDB::get_hard_fork_version(uint64_t height) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(hf_versions);
MDB_val_copy<uint64_t> val_key(height);
MDB_val val_ret;
auto result = mdb_cursor_get(m_cur_hf_versions, &val_key, &val_ret, MDB_SET);
if (result == MDB_NOTFOUND || result)
throw0(DB_ERROR(lmdb_error("Error attempting to retrieve a hard fork version at height " + boost::lexical_cast<std::string>(height) + " from the db: ", result).c_str()));
uint8_t ret = *(const uint8_t*)val_ret.mv_data;
TXN_POSTFIX_RDONLY();
return ret;
}
bool BlockchainLMDB::is_read_only() const
{
unsigned int flags;
auto result = mdb_env_get_flags(m_env, &flags);
if (result)
throw0(DB_ERROR(lmdb_error("Error getting database environment info: ", result).c_str()));
if (flags & MDB_RDONLY)
return true;
return false;
}
void BlockchainLMDB::fixup()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
// Always call parent as well
BlockchainDB::fixup();
}
#define RENAME_DB(name) \
k.mv_data = (void *)name; \
k.mv_size = sizeof(name)-1; \
result = mdb_cursor_open(txn, 1, &c_cur); \
if (result) \
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for " name ": ", result).c_str())); \
result = mdb_cursor_get(c_cur, &k, NULL, MDB_SET_KEY); \
if (result) \
throw0(DB_ERROR(lmdb_error("Failed to get DB record for " name ": ", result).c_str())); \
ptr = (char *)k.mv_data; \
ptr[sizeof(name)-2] = 's'
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
#define LOGIF(y) if (ELPP->vRegistry()->allowed(y, MONERO_DEFAULT_LOG_CATEGORY))
void BlockchainLMDB::migrate_0_1()
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
uint64_t i, z, m_height;
int result;
mdb_txn_safe txn(false);
MDB_val k, v;
char *ptr;
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
MLOG_YELLOW(el::Level::Info, "Migrating blockchain from DB version 0 to 1 - this may take a while:");
MINFO("updating blocks, hf_versions, outputs, txs, and spent_keys tables...");
do {
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
MDB_stat db_stats;
if ((result = mdb_stat(txn, m_blocks, &db_stats)))
throw0(DB_ERROR(lmdb_error("Failed to query m_blocks: ", result).c_str()));
m_height = db_stats.ms_entries;
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
MINFO("Total number of blocks: " << m_height);
MINFO("block migration will update block_heights, block_info, and hf_versions...");
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
MINFO("migrating block_heights:");
MDB_dbi o_heights;
unsigned int flags;
result = mdb_dbi_flags(txn, m_block_heights, &flags);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to retrieve block_heights flags: ", result).c_str()));
/* if the flags are what we expect, this table has already been migrated */
if ((flags & (MDB_INTEGERKEY|MDB_DUPSORT|MDB_DUPFIXED)) == (MDB_INTEGERKEY|MDB_DUPSORT|MDB_DUPFIXED)) {
txn.abort();
LOG_PRINT_L1(" block_heights already migrated");
break;
}
/* the block_heights table name is the same but the old version and new version
* have incompatible DB flags. Create a new table with the right flags. We want
* the name to be similar to the old name so that it will occupy the same location
* in the DB.
*/
o_heights = m_block_heights;
lmdb_db_open(txn, "block_heightr", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_block_heights, "Failed to open db handle for block_heightr");
mdb_set_dupsort(txn, m_block_heights, compare_hash32);
MDB_cursor *c_old, *c_cur;
blk_height bh;
MDB_val_set(nv, bh);
/* old table was k(hash), v(height).
* new table is DUPFIXED, k(zeroval), v{hash, height}.
*/
i = 0;
z = m_height;
while(1) {
if (!(i % 2000)) {
if (i) {
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
LOGIF(el::Level::Info) {
std::cout << i << " / " << z << " \r" << std::flush;
}
txn.commit();
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
}
result = mdb_cursor_open(txn, m_block_heights, &c_cur);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_heightr: ", result).c_str()));
result = mdb_cursor_open(txn, o_heights, &c_old);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_heights: ", result).c_str()));
if (!i) {
MDB_stat ms;
mdb_stat(txn, m_block_heights, &ms);
i = ms.ms_entries;
}
}
result = mdb_cursor_get(c_old, &k, &v, MDB_NEXT);
if (result == MDB_NOTFOUND) {
txn.commit();
break;
}
else if (result)
throw0(DB_ERROR(lmdb_error("Failed to get a record from block_heights: ", result).c_str()));
bh.bh_hash = *(crypto::hash *)k.mv_data;
bh.bh_height = *(uint64_t *)v.mv_data;
result = mdb_cursor_put(c_cur, (MDB_val *)&zerokval, &nv, MDB_APPENDDUP);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to put a record into block_heightr: ", result).c_str()));
/* we delete the old records immediately, so the overall DB and mapsize should not grow.
* This is a little slower than just letting mdb_drop() delete it all at the end, but
* it saves a significant amount of disk space.
*/
result = mdb_cursor_del(c_old, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_heights: ", result).c_str()));
i++;
}
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
/* Delete the old table */
result = mdb_drop(txn, o_heights, 1);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete old block_heights table: ", result).c_str()));
RENAME_DB("block_heightr");
/* close and reopen to get old dbi slot back */
mdb_dbi_close(m_env, m_block_heights);
lmdb_db_open(txn, "block_heights", MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED, m_block_heights, "Failed to open db handle for block_heights");
mdb_set_dupsort(txn, m_block_heights, compare_hash32);
txn.commit();
} while(0);
/* old tables are k(height), v(value).
* new table is DUPFIXED, k(zeroval), v{height, values...}.
*/
do {
LOG_PRINT_L1("migrating block info:");
MDB_dbi coins;
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
result = mdb_dbi_open(txn, "block_coins", 0, &coins);
if (result == MDB_NOTFOUND) {
txn.abort();
LOG_PRINT_L1(" block_info already migrated");
break;
}
MDB_dbi diffs, hashes, sizes, timestamps;
mdb_block_info bi;
MDB_val_set(nv, bi);
lmdb_db_open(txn, "block_diffs", 0, diffs, "Failed to open db handle for block_diffs");
lmdb_db_open(txn, "block_hashes", 0, hashes, "Failed to open db handle for block_hashes");
lmdb_db_open(txn, "block_sizes", 0, sizes, "Failed to open db handle for block_sizes");
lmdb_db_open(txn, "block_timestamps", 0, timestamps, "Failed to open db handle for block_timestamps");
MDB_cursor *c_cur, *c_coins, *c_diffs, *c_hashes, *c_sizes, *c_timestamps;
i = 0;
z = m_height;
while(1) {
MDB_val k, v;
if (!(i % 2000)) {
if (i) {
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
LOGIF(el::Level::Info) {
std::cout << i << " / " << z << " \r" << std::flush;
}
txn.commit();
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
}
result = mdb_cursor_open(txn, m_block_info, &c_cur);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_info: ", result).c_str()));
result = mdb_cursor_open(txn, coins, &c_coins);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_coins: ", result).c_str()));
result = mdb_cursor_open(txn, diffs, &c_diffs);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_diffs: ", result).c_str()));
result = mdb_cursor_open(txn, hashes, &c_hashes);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_hashes: ", result).c_str()));
result = mdb_cursor_open(txn, sizes, &c_sizes);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_coins: ", result).c_str()));
result = mdb_cursor_open(txn, timestamps, &c_timestamps);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_timestamps: ", result).c_str()));
if (!i) {
MDB_stat ms;
mdb_stat(txn, m_block_info, &ms);
i = ms.ms_entries;
}
}
result = mdb_cursor_get(c_coins, &k, &v, MDB_NEXT);
if (result == MDB_NOTFOUND) {
break;
} else if (result)
throw0(DB_ERROR(lmdb_error("Failed to get a record from block_coins: ", result).c_str()));
bi.bi_height = *(uint64_t *)k.mv_data;
bi.bi_coins = *(uint64_t *)v.mv_data;
result = mdb_cursor_get(c_diffs, &k, &v, MDB_NEXT);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to get a record from block_diffs: ", result).c_str()));
bi.bi_diff = *(uint64_t *)v.mv_data;
result = mdb_cursor_get(c_hashes, &k, &v, MDB_NEXT);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to get a record from block_hashes: ", result).c_str()));
bi.bi_hash = *(crypto::hash *)v.mv_data;
result = mdb_cursor_get(c_sizes, &k, &v, MDB_NEXT);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to get a record from block_sizes: ", result).c_str()));
if (v.mv_size == sizeof(uint32_t))
bi.bi_size = *(uint32_t *)v.mv_data;
else
bi.bi_size = *(uint64_t *)v.mv_data; // this is a 32/64 compat bug in version 0
result = mdb_cursor_get(c_timestamps, &k, &v, MDB_NEXT);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to get a record from block_timestamps: ", result).c_str()));
bi.bi_timestamp = *(uint64_t *)v.mv_data;
result = mdb_cursor_put(c_cur, (MDB_val *)&zerokval, &nv, MDB_APPENDDUP);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to put a record into block_info: ", result).c_str()));
result = mdb_cursor_del(c_coins, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_coins: ", result).c_str()));
result = mdb_cursor_del(c_diffs, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_diffs: ", result).c_str()));
result = mdb_cursor_del(c_hashes, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_hashes: ", result).c_str()));
result = mdb_cursor_del(c_sizes, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_sizes: ", result).c_str()));
result = mdb_cursor_del(c_timestamps, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_timestamps: ", result).c_str()));
i++;
}
mdb_cursor_close(c_timestamps);
mdb_cursor_close(c_sizes);
mdb_cursor_close(c_hashes);
mdb_cursor_close(c_diffs);
mdb_cursor_close(c_coins);
result = mdb_drop(txn, timestamps, 1);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete block_timestamps from the db: ", result).c_str()));
result = mdb_drop(txn, sizes, 1);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete block_sizes from the db: ", result).c_str()));
result = mdb_drop(txn, hashes, 1);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete block_hashes from the db: ", result).c_str()));
result = mdb_drop(txn, diffs, 1);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete block_diffs from the db: ", result).c_str()));
result = mdb_drop(txn, coins, 1);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete block_coins from the db: ", result).c_str()));
txn.commit();
} while(0);
do {
LOG_PRINT_L1("migrating hf_versions:");
MDB_dbi o_hfv;
unsigned int flags;
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
result = mdb_dbi_flags(txn, m_hf_versions, &flags);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to retrieve hf_versions flags: ", result).c_str()));
/* if the flags are what we expect, this table has already been migrated */
if (flags & MDB_INTEGERKEY) {
txn.abort();
LOG_PRINT_L1(" hf_versions already migrated");
break;
}
/* the hf_versions table name is the same but the old version and new version
* have incompatible DB flags. Create a new table with the right flags.
*/
o_hfv = m_hf_versions;
lmdb_db_open(txn, "hf_versionr", MDB_INTEGERKEY | MDB_CREATE, m_hf_versions, "Failed to open db handle for hf_versionr");
MDB_cursor *c_old, *c_cur;
i = 0;
z = m_height;
while(1) {
if (!(i % 2000)) {
if (i) {
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
LOGIF(el::Level::Info) {
std::cout << i << " / " << z << " \r" << std::flush;
}
txn.commit();
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
}
result = mdb_cursor_open(txn, m_hf_versions, &c_cur);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for spent_keyr: ", result).c_str()));
result = mdb_cursor_open(txn, o_hfv, &c_old);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for spent_keys: ", result).c_str()));
if (!i) {
MDB_stat ms;
mdb_stat(txn, m_hf_versions, &ms);
i = ms.ms_entries;
}
}
result = mdb_cursor_get(c_old, &k, &v, MDB_NEXT);
if (result == MDB_NOTFOUND) {
txn.commit();
break;
}
else if (result)
throw0(DB_ERROR(lmdb_error("Failed to get a record from hf_versions: ", result).c_str()));
result = mdb_cursor_put(c_cur, &k, &v, MDB_APPEND);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to put a record into hf_versionr: ", result).c_str()));
result = mdb_cursor_del(c_old, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete a record from hf_versions: ", result).c_str()));
i++;
}
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
/* Delete the old table */
result = mdb_drop(txn, o_hfv, 1);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete old hf_versions table: ", result).c_str()));
RENAME_DB("hf_versionr");
mdb_dbi_close(m_env, m_hf_versions);
lmdb_db_open(txn, "hf_versions", MDB_INTEGERKEY, m_hf_versions, "Failed to open db handle for hf_versions");
txn.commit();
} while(0);
do {
LOG_PRINT_L1("deleting old indices:");
/* Delete all other tables, we're just going to recreate them */
MDB_dbi dbi;
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
result = mdb_dbi_open(txn, "tx_unlocks", 0, &dbi);
if (result == MDB_NOTFOUND) {
txn.abort();
LOG_PRINT_L1(" old indices already deleted");
break;
}
txn.abort();
#define DELETE_DB(x) do { \
LOG_PRINT_L1(" " x ":"); \
result = mdb_txn_begin(m_env, NULL, 0, txn); \
if (result) \
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); \
result = mdb_dbi_open(txn, x, 0, &dbi); \
if (!result) { \
result = mdb_drop(txn, dbi, 1); \
if (result) \
throw0(DB_ERROR(lmdb_error("Failed to delete " x ": ", result).c_str())); \
txn.commit(); \
} } while(0)
DELETE_DB("tx_heights");
DELETE_DB("output_txs");
DELETE_DB("output_indices");
DELETE_DB("output_keys");
DELETE_DB("spent_keys");
DELETE_DB("output_amounts");
DELETE_DB("tx_outputs");
DELETE_DB("tx_unlocks");
/* reopen new DBs with correct flags */
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
lmdb_db_open(txn, LMDB_OUTPUT_TXS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_output_txs, "Failed to open db handle for m_output_txs");
mdb_set_dupsort(txn, m_output_txs, compare_uint64);
lmdb_db_open(txn, LMDB_TX_OUTPUTS, MDB_INTEGERKEY | MDB_CREATE, m_tx_outputs, "Failed to open db handle for m_tx_outputs");
lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys");
mdb_set_dupsort(txn, m_spent_keys, compare_hash32);
lmdb_db_open(txn, LMDB_OUTPUT_AMOUNTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_output_amounts, "Failed to open db handle for m_output_amounts");
mdb_set_dupsort(txn, m_output_amounts, compare_uint64);
txn.commit();
} while(0);
do {
LOG_PRINT_L1("migrating txs and outputs:");
unsigned int flags;
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
result = mdb_dbi_flags(txn, m_txs, &flags);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to retrieve txs flags: ", result).c_str()));
/* if the flags are what we expect, this table has already been migrated */
if (flags & MDB_INTEGERKEY) {
txn.abort();
LOG_PRINT_L1(" txs already migrated");
break;
}
MDB_dbi o_txs;
blobdata bd;
block b;
MDB_val hk;
o_txs = m_txs;
mdb_set_compare(txn, o_txs, compare_hash32);
lmdb_db_open(txn, "txr", MDB_INTEGERKEY | MDB_CREATE, m_txs, "Failed to open db handle for txr");
txn.commit();
MDB_cursor *c_blocks, *c_txs, *c_props, *c_cur;
i = 0;
z = m_height;
hk.mv_size = sizeof(crypto::hash);
set_batch_transactions(true);
batch_start(1000);
txn.m_txn = m_write_txn->m_txn;
m_height = 0;
while(1) {
if (!(i % 1000)) {
if (i) {
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
LOGIF(el::Level::Info) {
std::cout << i << " / " << z << " \r" << std::flush;
}
MDB_val_set(pk, "txblk");
MDB_val_set(pv, m_height);
result = mdb_cursor_put(c_props, &pk, &pv, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to update txblk property: ", result).c_str()));
txn.commit();
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
m_write_txn->m_txn = txn.m_txn;
m_write_batch_txn->m_txn = txn.m_txn;
memset(&m_wcursors, 0, sizeof(m_wcursors));
}
result = mdb_cursor_open(txn, m_blocks, &c_blocks);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for blocks: ", result).c_str()));
result = mdb_cursor_open(txn, m_properties, &c_props);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for properties: ", result).c_str()));
result = mdb_cursor_open(txn, o_txs, &c_txs);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for txs: ", result).c_str()));
if (!i) {
MDB_stat ms;
mdb_stat(txn, m_txs, &ms);
i = ms.ms_entries;
if (i) {
MDB_val_set(pk, "txblk");
result = mdb_cursor_get(c_props, &pk, &k, MDB_SET);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to get a record from properties: ", result).c_str()));
m_height = *(uint64_t *)k.mv_data;
}
}
if (i) {
result = mdb_cursor_get(c_blocks, &k, &v, MDB_SET);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to get a record from blocks: ", result).c_str()));
}
}
result = mdb_cursor_get(c_blocks, &k, &v, MDB_NEXT);
if (result == MDB_NOTFOUND) {
MDB_val_set(pk, "txblk");
result = mdb_cursor_get(c_props, &pk, &v, MDB_SET);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to get a record from props: ", result).c_str()));
result = mdb_cursor_del(c_props, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete a record from props: ", result).c_str()));
batch_stop();
break;
} else if (result)
throw0(DB_ERROR(lmdb_error("Failed to get a record from blocks: ", result).c_str()));
bd.assign(reinterpret_cast<char*>(v.mv_data), v.mv_size);
if (!parse_and_validate_block_from_blob(bd, b))
throw0(DB_ERROR("Failed to parse block from blob retrieved from the db"));
add_transaction(null_hash, b.miner_tx);
for (unsigned int j = 0; j<b.tx_hashes.size(); j++) {
transaction tx;
hk.mv_data = &b.tx_hashes[j];
result = mdb_cursor_get(c_txs, &hk, &v, MDB_SET);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to get record from txs: ", result).c_str()));
bd.assign(reinterpret_cast<char*>(v.mv_data), v.mv_size);
if (!parse_and_validate_tx_from_blob(bd, tx))
throw0(DB_ERROR("Failed to parse tx from blob retrieved from the db"));
add_transaction(null_hash, tx, &b.tx_hashes[j]);
result = mdb_cursor_del(c_txs, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to get record from txs: ", result).c_str()));
}
i++;
m_height = i;
}
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
result = mdb_drop(txn, o_txs, 1);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to delete txs from the db: ", result).c_str()));
RENAME_DB("txr");
mdb_dbi_close(m_env, m_txs);
lmdb_db_open(txn, "txs", MDB_INTEGERKEY, m_txs, "Failed to open db handle for txs");
txn.commit();
} while(0);
uint32_t version = 1;
v.mv_data = (void *)&version;
v.mv_size = sizeof(version);
MDB_val_copy<const char *> vk("version");
result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
result = mdb_put(txn, m_properties, &vk, &v, 0);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to update version for the db: ", result).c_str()));
txn.commit();
}
void BlockchainLMDB::migrate(const uint32_t oldversion)
{
switch(oldversion) {
case 0:
migrate_0_1(); /* FALLTHRU */
default:
;
}
}
} // namespace cryptonote