You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
wownero/src/cryptonote_basic/difficulty.cpp

401 lines
15 KiB

// Copyright (c) 2014-2019, The Monero Project
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <vector>
6 years ago
#include <boost/math/special_functions/round.hpp>
#include "int-util.h"
#include "crypto/hash.h"
#include "cryptonote_config.h"
#include "difficulty.h"
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
7 years ago
#undef MONERO_DEFAULT_LOG_CATEGORY
#define MONERO_DEFAULT_LOG_CATEGORY "difficulty"
namespace cryptonote {
using std::size_t;
using std::uint64_t;
using std::vector;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
9 years ago
#if defined(__x86_64__)
static inline void mul(uint64_t a, uint64_t b, uint64_t &low, uint64_t &high) {
low = mul128(a, b, &high);
}
#else
static inline void mul(uint64_t a, uint64_t b, uint64_t &low, uint64_t &high) {
// __int128 isn't part of the standard, so the previous function wasn't portable. mul128() in Windows is fine,
// but this portable function should be used elsewhere. Credit for this function goes to latexi95.
uint64_t aLow = a & 0xFFFFFFFF;
uint64_t aHigh = a >> 32;
uint64_t bLow = b & 0xFFFFFFFF;
uint64_t bHigh = b >> 32;
uint64_t res = aLow * bLow;
uint64_t lowRes1 = res & 0xFFFFFFFF;
uint64_t carry = res >> 32;
res = aHigh * bLow + carry;
uint64_t highResHigh1 = res >> 32;
uint64_t highResLow1 = res & 0xFFFFFFFF;
res = aLow * bHigh;
uint64_t lowRes2 = res & 0xFFFFFFFF;
carry = res >> 32;
res = aHigh * bHigh + carry;
uint64_t highResHigh2 = res >> 32;
uint64_t highResLow2 = res & 0xFFFFFFFF;
//Addition
uint64_t r = highResLow1 + lowRes2;
carry = r >> 32;
low = (r << 32) | lowRes1;
r = highResHigh1 + highResLow2 + carry;
uint64_t d3 = r & 0xFFFFFFFF;
carry = r >> 32;
r = highResHigh2 + carry;
high = d3 | (r << 32);
}
#endif
static inline bool cadd(uint64_t a, uint64_t b) {
return a + b < a;
}
static inline bool cadc(uint64_t a, uint64_t b, bool c) {
return a + b < a || (c && a + b == (uint64_t) -1);
}
bool check_hash_64(const crypto::hash &hash, uint64_t difficulty) {
uint64_t low, high, top, cur;
// First check the highest word, this will most likely fail for a random hash.
mul(swap64le(((const uint64_t *) &hash)[3]), difficulty, top, high);
if (high != 0) {
return false;
}
mul(swap64le(((const uint64_t *) &hash)[0]), difficulty, low, cur);
mul(swap64le(((const uint64_t *) &hash)[1]), difficulty, low, high);
bool carry = cadd(cur, low);
cur = high;
mul(swap64le(((const uint64_t *) &hash)[2]), difficulty, low, high);
carry = cadc(cur, low, carry);
carry = cadc(high, top, carry);
return !carry;
}
uint64_t next_difficulty_64(std::vector<std::uint64_t> timestamps, std::vector<uint64_t> cumulative_difficulties, size_t target_seconds) {
if(timestamps.size() > DIFFICULTY_WINDOW)
{
timestamps.resize(DIFFICULTY_WINDOW);
cumulative_difficulties.resize(DIFFICULTY_WINDOW);
}
size_t length = timestamps.size();
assert(length == cumulative_difficulties.size());
if (length <= 1) {
return 1;
}
static_assert(DIFFICULTY_WINDOW >= 2, "Window is too small");
assert(length <= DIFFICULTY_WINDOW);
sort(timestamps.begin(), timestamps.end());
size_t cut_begin, cut_end;
static_assert(2 * DIFFICULTY_CUT <= DIFFICULTY_WINDOW - 2, "Cut length is too large");
if (length <= DIFFICULTY_WINDOW - 2 * DIFFICULTY_CUT) {
cut_begin = 0;
cut_end = length;
} else {
cut_begin = (length - (DIFFICULTY_WINDOW - 2 * DIFFICULTY_CUT) + 1) / 2;
cut_end = cut_begin + (DIFFICULTY_WINDOW - 2 * DIFFICULTY_CUT);
}
assert(/*cut_begin >= 0 &&*/ cut_begin + 2 <= cut_end && cut_end <= length);
uint64_t time_span = timestamps[cut_end - 1] - timestamps[cut_begin];
if (time_span == 0) {
time_span = 1;
}
uint64_t total_work = cumulative_difficulties[cut_end - 1] - cumulative_difficulties[cut_begin];
assert(total_work > 0);
uint64_t low, high;
mul(total_work, target_seconds, low, high);
// blockchain errors "difficulty overhead" if this function returns zero.
// TODO: consider throwing an exception instead
if (high != 0 || low + time_span - 1 < low) {
return 0;
}
return (low + time_span - 1) / time_span;
}
#if defined(_MSC_VER)
#ifdef max
#undef max
#endif
#endif
const difficulty_type max64bit(std::numeric_limits<std::uint64_t>::max());
const boost::multiprecision::uint256_t max128bit(std::numeric_limits<boost::multiprecision::uint128_t>::max());
const boost::multiprecision::uint512_t max256bit(std::numeric_limits<boost::multiprecision::uint256_t>::max());
#define FORCE_FULL_128_BITS
bool check_hash_128(const crypto::hash &hash, difficulty_type difficulty) {
#ifndef FORCE_FULL_128_BITS
// fast check
if (difficulty >= max64bit && ((const uint64_t *) &hash)[3] > 0)
return false;
#endif
// usual slow check
boost::multiprecision::uint512_t hashVal = 0;
#ifdef FORCE_FULL_128_BITS
for(int i = 0; i < 4; i++) { // highest word is zero
#else
for(int i = 1; i < 4; i++) { // highest word is zero
#endif
hashVal <<= 64;
hashVal |= swap64le(((const uint64_t *) &hash)[3 - i]);
}
return hashVal * difficulty <= max256bit;
}
bool check_hash(const crypto::hash &hash, difficulty_type difficulty) {
if (difficulty <= max64bit) // if can convert to small difficulty - do it
return check_hash_64(hash, difficulty.convert_to<std::uint64_t>());
else
return check_hash_128(hash, difficulty);
}
difficulty_type next_difficulty(std::vector<uint64_t> timestamps, std::vector<difficulty_type> cumulative_difficulties, size_t target_seconds) {
//cutoff DIFFICULTY_LAG
if(timestamps.size() > DIFFICULTY_WINDOW)
{
timestamps.resize(DIFFICULTY_WINDOW);
cumulative_difficulties.resize(DIFFICULTY_WINDOW);
}
size_t length = timestamps.size();
assert(length == cumulative_difficulties.size());
if (length <= 1) {
return 1;
}
static_assert(DIFFICULTY_WINDOW >= 2, "Window is too small");
assert(length <= DIFFICULTY_WINDOW);
sort(timestamps.begin(), timestamps.end());
size_t cut_begin, cut_end;
static_assert(2 * DIFFICULTY_CUT <= DIFFICULTY_WINDOW - 2, "Cut length is too large");
if (length <= DIFFICULTY_WINDOW - 2 * DIFFICULTY_CUT) {
cut_begin = 0;
cut_end = length;
} else {
cut_begin = (length - (DIFFICULTY_WINDOW - 2 * DIFFICULTY_CUT) + 1) / 2;
cut_end = cut_begin + (DIFFICULTY_WINDOW - 2 * DIFFICULTY_CUT);
}
assert(/*cut_begin >= 0 &&*/ cut_begin + 2 <= cut_end && cut_end <= length);
uint64_t time_span = timestamps[cut_end - 1] - timestamps[cut_begin];
if (time_span == 0) {
time_span = 1;
}
difficulty_type total_work = cumulative_difficulties[cut_end - 1] - cumulative_difficulties[cut_begin];
assert(total_work > 0);
boost::multiprecision::uint256_t res = (boost::multiprecision::uint256_t(total_work) * target_seconds + time_span - 1) / time_span;
if(res > max128bit)
return 0; // to behave like previous implementation, may be better return max128bit?
return res.convert_to<difficulty_type>();
}
std::string hex(difficulty_type v)
{
static const char chars[] = "0123456789abcdef";
std::string s;
while (v > 0)
{
s.push_back(chars[(v & 0xf).convert_to<unsigned>()]);
v >>= 4;
}
if (s.empty())
s += "0";
std::reverse(s.begin(), s.end());
return "0x" + s;
}
6 years ago
// LWMA difficulty algorithm
// Background: https://github.com/zawy12/difficulty-algorithms/issues/3
// Copyright (c) 2017-2018 Zawy
difficulty_type next_difficulty_v2(std::vector<std::uint64_t> timestamps, std::vector<difficulty_type> cumulative_difficulties, size_t target_seconds) {
const int64_t T = static_cast<int64_t>(target_seconds);
size_t N = DIFFICULTY_WINDOW_V2;
if (timestamps.size() < 4) {
return 1;
} else if ( timestamps.size() < N+1 ) {
N = timestamps.size() - 1;
} else {
timestamps.resize(N+1);
cumulative_difficulties.resize(N+1);
}
const double adjust = 0.998;
const double k = N * (N + 1) / 2;
double LWMA(0), sum_inverse_D(0), harmonic_mean_D(0), nextDifficulty(0);
int64_t solveTime(0);
uint64_t difficulty(0), next_difficulty(0);
for (size_t i = 1; i <= N; i++) {
solveTime = static_cast<int64_t>(timestamps[i]) - static_cast<int64_t>(timestamps[i - 1]);
solveTime = std::min<int64_t>((T * 7), std::max<int64_t>(solveTime, (-7 * T)));
difficulty = static_cast<uint64_t>(cumulative_difficulties[i] - cumulative_difficulties[i - 1]);
LWMA += (int64_t)(solveTime * i) / k;
sum_inverse_D += 1 / static_cast<double>(difficulty);
}
harmonic_mean_D = N / sum_inverse_D;
if (static_cast<int64_t>(boost::math::round(LWMA)) < T / 20)
LWMA = static_cast<double>(T / 20);
nextDifficulty = harmonic_mean_D * T / LWMA * adjust;
next_difficulty = static_cast<uint64_t>(nextDifficulty);
return next_difficulty;
}
// LWMA-2
difficulty_type next_difficulty_v3(std::vector<uint64_t> timestamps, std::vector<difficulty_type> cumulative_difficulties) {
int64_t T = DIFFICULTY_TARGET_V2;
int64_t N = DIFFICULTY_WINDOW_V2;
int64_t L(0), ST, sum_3_ST(0), next_D, prev_D;
assert(timestamps.size() == cumulative_difficulties.size() && timestamps.size() <= static_cast<uint64_t>(N+1) );
for ( int64_t i = 1; i <= N; i++ ) {
ST = static_cast<int64_t>(timestamps[i]) - static_cast<int64_t>(timestamps[i-1]);
ST = std::max(-4*T, std::min(ST, 6*T));
L += ST * i ;
if ( i > N-3 ) {
sum_3_ST += ST;
}
}
next_D = (static_cast<int64_t>(cumulative_difficulties[N] - cumulative_difficulties[0])*T*(N+1)*99)/(100*2*L);
prev_D = static_cast<int64_t>(cumulative_difficulties[N] - cumulative_difficulties[N-1]);
next_D = std::max((prev_D*67)/100, std::min(next_D, (prev_D*150)/100));
if ( sum_3_ST < (8*T)/10) {
next_D = std::max(next_D,(prev_D*108)/100);
}
return static_cast<uint64_t>(next_D);
}
// LWMA-4
difficulty_type next_difficulty_v4(std::vector<uint64_t> timestamps, std::vector<difficulty_type> cumulative_difficulties, size_t height) {
uint64_t T = DIFFICULTY_TARGET_V2;
uint64_t N = DIFFICULTY_WINDOW_V2;
uint64_t L(0), ST(0), next_D, prev_D, avg_D, i;
assert(timestamps.size() == cumulative_difficulties.size() && timestamps.size() <= N+1 );
if ( height <= 63469 + 1 ) { return 100000069; }
std::vector<uint64_t>TS(N+1);
TS[0] = timestamps[0];
for ( i = 1; i <= N; i++) {
if ( timestamps[i] > TS[i-1] ) { TS[i] = timestamps[i]; }
else { TS[i] = TS[i-1]; }
}
for ( i = 1; i <= N; i++) {
if ( i > 4 && TS[i]-TS[i-1] > 5*T && TS[i-1] - TS[i-4] < (14*T)/10 ) { ST = 2*T; }
else if ( i > 7 && TS[i]-TS[i-1] > 5*T && TS[i-1] - TS[i-7] < 4*T ) { ST = 2*T; }
else {
ST = std::min(5*T ,TS[i] - TS[i-1]);
}
L += ST * i ;
}
if (L < N*N*T/20 ) { L = N*N*T/20; }
avg_D = static_cast<uint64_t>(( cumulative_difficulties[N] - cumulative_difficulties[0] )/ N);
if (avg_D > 2000000*N*N*T) {
next_D = (avg_D/(200*L))*(N*(N+1)*T*97);
}
else { next_D = (avg_D*N*(N+1)*T*97)/(200*L); }
prev_D = static_cast<uint64_t>(cumulative_difficulties[N] - cumulative_difficulties[N-1]);
if ( ( TS[N] - TS[N-1] < (2*T)/10 ) ||
( TS[N] - TS[N-2] < (5*T)/10 ) ||
( TS[N] - TS[N-3] < (8*T)/10 ) )
{
next_D = std::max( next_D, std::min( (prev_D*110)/100, (105*avg_D)/100 ) );
}
i = 1000000000;
while (i > 1) {
if ( next_D > i*100 ) { next_D = ((next_D+i/2)/i)*i; break; }
else { i /= 10; }
}
if ( next_D > 100000 ) {
next_D = ((next_D+500)/1000)*1000 + std::min(static_cast<uint64_t>(999), (TS[N]-TS[N-10])/10);
}
return static_cast<uint64_t>(next_D);
}
// LWMA-1 difficulty algorithm
// Copyright (c) 2017-2019 Zawy, MIT License
// https://github.com/zawy12/difficulty-algorithms/issues/3
difficulty_type next_difficulty_v5(std::vector<std::uint64_t> timestamps, std::vector<difficulty_type> cumulative_difficulties, uint64_t T, uint64_t N, uint64_t HEIGHT, uint64_t FORK_HEIGHT, uint64_t difficulty_guess) {
assert(timestamps.size() == cumulative_difficulties.size() && timestamps.size() <= N+1 );
if (HEIGHT >= FORK_HEIGHT && HEIGHT < FORK_HEIGHT + N) { return difficulty_guess; }
assert(timestamps.size() == N+1);
uint64_t L(0), next_D, i, this_timestamp(0), previous_timestamp(0), avg_D;
previous_timestamp = timestamps[0]-T;
for ( i = 1; i <= N; i++) {
// Safely prevent out-of-sequence timestamps
if ( timestamps[i] > previous_timestamp ) { this_timestamp = timestamps[i]; }
else { this_timestamp = previous_timestamp+1; }
L += i*std::min(6*T ,this_timestamp - previous_timestamp);
previous_timestamp = this_timestamp;
}
if (L < N*N*T/20 ) { L = N*N*T/20; }
avg_D = static_cast<uint64_t>(( cumulative_difficulties[N] - cumulative_difficulties[0] )/ N);
// Prevent round off error for small D and overflow for large D.
if (avg_D > 2000000*N*N*T) {
next_D = (avg_D/(200*L))*(N*(N+1)*T*99);
}
else { next_D = (avg_D*N*(N+1)*T*99)/(200*L); }
// Make all insignificant digits zero for easy reading.
i = 1000000000;
while (i > 1) {
if ( next_D > i*100 ) { next_D = ((next_D+i/2)/i)*i; break; }
else { i /= 10; }
}
return next_D;
}
}