Aeon update

Check AEON-Notes.md
master
Tim Garrity 7 years ago
parent 71efdbf844
commit f32b121d1c

@ -0,0 +1,9 @@
# Notes about AEON coin
#### Special considerations for AEON
NodeJS-Pool and PoolUI were built with monero in mind. While a recent update has made the pool compatible with Cryptonight-lite, the UI itself, by another developer, was hardcoded for Monero. Due to this there's a few things that need to be changed for the pool to successfully work.
- aeond does not have a blockchain import function. So this makes downloading a blockchain.bin and importing it a bit difficult to do. I have included in my deploy.bash script a url i'm providing to keep the blockchain up to date within atleast a week delay, but it's unusable at the moment
- Until the PoolUI can be updated to allow for coins to be changed via a config, we're going to need to use a separate fork that I'll provide. I'll be working to try to include what I can to make the fork work for Monero AND aeon, but for the purpose here, my main goal will be for Aeon's compatibility.
- NodeJS-Pool is a bit more complex than CryptoNote Uni Pool, but it appears to be working just fine for our purpsoes. I highly recommend testing it for 1-2blocks if possible.

@ -1,3 +1,11 @@
SUPER IMPORTANT UPDATE UNTIL THIS UPDATE DISAPPEARS
===================================================
None of the following applies if you installed the pool AFTER June 2nd 2017, as the installers will do this work for you.
The pool currently uses a version of LMDB that is not supported in Ubuntu 16.04 at this time. Please run: bash deployment/install_lmdb_tools.sh once from the root of the installation to load the LMDB tools, this will put them somewhere handy on your path, and drop a new alias to them so they can be used to introspect your database.
If you had installed the pool prior to 6/2/2017, PLEASE make sure you run a npm install before you restart services.
Pool Design/Theory
==================
The nodejs-pool is built around a small series of core daemons that share access to a single LMDB table for tracking of shares, with MySQL being used to centralize configurations and ensure simple access from local/remote nodes. The core daemons follow:

@ -0,0 +1,13 @@
[Unit]
Description=Aeon Daemon
After=network.target
[Service]
Type=forking
GuessMainPID=no
ExecStart=/usr/local/src/aeon/build/release/bin/aeon --rpc-bind-ip 127.0.0.1 --detach --restricted-rpc
Restart=always
User=aeondaemon
[Install]
WantedBy=multi-user.target

@ -0,0 +1,96 @@
#!/bin/bash
echo "This assumes that you are doing a green-field install. If you're not, please exit in the next 15 seconds."
sleep 15
echo "Continuing install, this will prompt you for your password if you're not already running as root and you didn't enable passwordless sudo. Please do not run me as root!"
if [[ `whoami` == "root" ]]; then
echo "You ran me as root! Do not run me as root!"
exit 1
fi
ROOT_SQL_PASS=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)
CURUSER=$(whoami)
echo "Etc/UTC" | sudo tee -a /etc/timezone
sudo rm -rf /etc/localtime
sudo ln -s /usr/share/zoneinfo/Zulu /etc/localtime
sudo dpkg-reconfigure -f noninteractive tzdata
sudo apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get -y upgrade
sudo debconf-set-selections <<< "mysql-server mysql-server/root_password password $ROOT_SQL_PASS"
sudo debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $ROOT_SQL_PASS"
echo -e "[client]\nuser=root\npassword=$ROOT_SQL_PASS" | sudo tee /root/.my.cnf
sudo DEBIAN_FRONTEND=noninteractive apt-get -y install git python-virtualenv python3-virtualenv curl ntp build-essential screen cmake pkg-config libboost-all-dev libevent-dev libunbound-dev libminiupnpc-dev libunwind8-dev liblzma-dev libldns-dev libexpat1-dev libgtest-dev mysql-server lmdb-utils libzmq3-dev
cd /usr/src/gtest
sudo cmake .
sudo make
sudo mv libg* /usr/lib/
cd ~
#sudo systemctl enable ntp
#cd /usr/local/src
#sudo git clone https://github.com/aeonix/aeon.git
#cd aeon
#sudo git checkout v0.9.14.0
#sudo make -j$(nproc)
#sudo cp ~/nodejs-pool/deployment/aeon.service /lib/systemd/system/
#sudo useradd -m aeondaemon -d /home/aeondaemon
#sudo -u aeondaemon mkdir /home/aeondaemon/.aeon
#BLOCKCHAIN_DOWNLOAD_DIR=$(sudo -u aeondaemon mktemp -d)
#sudo -u aeondaemon wget --limit-rate=50m -O $BLOCKCHAIN_DOWNLOAD_DIR/blockchain.bin http://74.208.156.45/blockchain.raw
#sudo -u aeondaemon mv $BLOCKCHAIN_DOWNLOAD_DIR/blockchain.bin /home/aeondaemon/.aeon/blockchain.bin
#sudo -u aeondaemon rm -rf $BLOCKCHAIN_DOWNLOAD_DIR
#sudo systemctl daemon-reload
#sudo systemctl enable aeon
#sudo systemctl start aeon
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.0/install.sh | bash
source ~/.nvm/nvm.sh
nvm install v6.9.2
cd ~/nodejs-pool
npm install
npm install -g pm2
openssl req -subj "/C=IT/ST=Pool/L=Daemon/O=Mining Pool/CN=mining.pool" -newkey rsa:2048 -nodes -keyout cert.key -x509 -out cert.pem -days 36500
mkdir ~/pool_db/
sed -r "s/(\"db_storage_path\": ).*/\1\"\/home\/$CURUSER\/pool_db\/\",/" config_example.json > config.json
cd ~
git clone https://github.com/mesh0000/poolui.git
cd poolui
npm install
./node_modules/bower/bin/bower update
./node_modules/gulp/bin/gulp.js build
cd build
sudo ln -s `pwd` /var/www
CADDY_DOWNLOAD_DIR=$(mktemp -d)
cd $CADDY_DOWNLOAD_DIR
curl -sL "https://snipanet.com/caddy.tar.gz" | tar -xz caddy init/linux-systemd/caddy.service
sudo mv caddy /usr/local/bin
sudo chown root:root /usr/local/bin/caddy
sudo chmod 755 /usr/local/bin/caddy
sudo setcap 'cap_net_bind_service=+ep' /usr/local/bin/caddy
sudo groupadd -g 33 www-data
sudo useradd -g www-data --no-user-group --home-dir /var/www --no-create-home --shell /usr/sbin/nologin --system --uid 33 www-data
sudo mkdir /etc/caddy
sudo chown -R root:www-data /etc/caddy
sudo mkdir /etc/ssl/caddy
sudo chown -R www-data:root /etc/ssl/caddy
sudo chmod 0770 /etc/ssl/caddy
sudo cp ~/nodejs-pool/deployment/caddyfile /etc/caddy/Caddyfile
sudo chown www-data:www-data /etc/caddy/Caddyfile
sudo chmod 444 /etc/caddy/Caddyfile
sudo sh -c "sed 's/ProtectHome=true/ProtectHome=false/' init/linux-systemd/caddy.service > /etc/systemd/system/caddy.service"
sudo chown root:root /etc/systemd/system/caddy.service
sudo chmod 644 /etc/systemd/system/caddy.service
sudo systemctl daemon-reload
sudo systemctl enable caddy.service
sudo systemctl start caddy.service
rm -rf $CADDY_DOWNLOAD_DIR
cd ~
sudo env PATH=$PATH:`pwd`/.nvm/versions/node/v6.9.2/bin `pwd`/.nvm/versions/node/v6.9.2/lib/node_modules/pm2/bin/pm2 startup systemd -u $CURUSER --hp `pwd`
cd ~/nodejs-pool
sudo chown -R $CURUSER. ~/.pm2
echo "Installing pm2-logrotate in the background!"
pm2 install pm2-logrotate &
mysql -u root --password=$ROOT_SQL_PASS < deployment/base.sql
mysql -u root --password=$ROOT_SQL_PASS pool -e "INSERT INTO pool.config (module, item, item_value, item_type, Item_desc) VALUES ('api', 'authKey', '`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1`', 'string', 'Auth key sent with all Websocket frames for validation.')"
mysql -u root --password=$ROOT_SQL_PASS pool -e "INSERT INTO pool.config (module, item, item_value, item_type, Item_desc) VALUES ('api', 'secKey', '`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1`', 'string', 'HMAC key for Passwords. JWT Secret Key. Changing this will invalidate all current logins.')"
pm2 start init.js --name=api --log-date-format="YYYY-MM-DD HH:mm Z" -- --module=api
bash ~/nodejs-pool/deployment/install_lmdb_tools.sh
cd ~/nodejs-pool/sql_sync/
env PATH=$PATH:`pwd`/.nvm/versions/node/v6.9.2/bin node sql_sync.js
echo "You're setup! Please read the rest of the readme for the remainder of your setup and configuration. These steps include: Setting your Fee Address, Pool Address, Global Domain, and the Mailgun setup!"

@ -517,115 +517,38 @@ function Database(){
txn.commit();
};
this.getOldestLockedBlock = function(){
/*
6-29-2017 - Snipa -
This function returns a decompressed block proto for the first locked block in the system as part of the
share depth functions. DO NOT BLINDLY REPLACE getLastBlock WITH THIS FUNCTION.
*/
this.refreshEnv();
debug("Getting the oldest locked block in the system");
let txn = this.env.beginTxn({readOnly: true});
let cursor = new this.lmdb.Cursor(txn, this.blockDB);
let highestBlock = null;
for (let found = cursor.goToFirst(); found; found = cursor.goToNext()) {
if (highestBlock !== null){
break;
}
cursor.getCurrentBinary(function(key, data){ // jshint ignore:line
let blockData = global.protos.Block.decode(data);
if (blockData.unlocked === false){
highestBlock = blockData;
}
});
}
cursor.close();
txn.commit();
if (highestBlock !== null) {
debug("Got the oldest locked block in the system at height: " + JSON.stringify(highestBlock));
} else {
debug("There are no unlocked blocks in the system. Woohoo!");
}
return highestBlock;
};
this.cleanShareDB = function() {
/*
This function takes the difficulty of the current block, and the last PPS block. If it's 0, save everything,
UNLESS global.config.pps.enable is FALSE, then feel free to trash it.
Due to LMDB under current config, we must delete entire keys, due to this, we save diff * shareMulti * 1.3
6/29/2017 - Fixed bug with the way the system got blocks. getLastBlock gets the most recent block.
getOldestLockedBlock gets the oldest block in the system that's locked. This ensures that shares for that block
can't be destroyed, and that there's enough depth past that point to ensure the system will have the ability
to make payouts based on the shareMultiLog. Thanks suhz for finding this. Sorry it hit your aeon pool hard.
:( -- Snipa
If current_height - global.payout.blocksRequired > lastLockedBlock, then set the scan start to
current_height - global.payout.blocksRequired - 1 so that we have the full block in case of PPS.
Otherwise, use the lastPPLNSBlock as the scan start. There we go. Stupid logic!
Math check!
cur_height = 100, blocksRequired=20, lastPPLNSLockedBlock.height=90
In this case, the functional depth required for SOLO is 80 - 1, giving us 79 as our start
cur_height = 100, blocksRequired=20, lastPPLNSLockedBlock.height=70
In this case, the PPLNS locked block is older than the current height - the required amount, so start is 70.
PPS height no longer matters! Yay!
Solo really doesn't matter, as block finder gets everything.
If there is no valid locked block to start from, aka all blocks are unlocked, then scan from the current height
of the chain, as there's no way for the system to have older blocks. We only need to save extra in the case
where there's unlocked blocks. A find on the current block will have enough depth as long as the saves are
correct. This will cause the system to clean up shares massively when there are no unlocked blocks.
This function takes the difficulty of the current block, and the last PPS block. If it's 0, save everything,
UNLESS global.config.pps.enable is FALSE, then feel free to trash it.
Due to LMDB under current config, we must delete entire keys, due to this, we save diff * shareMultiLog * 1.5
global.config.pplns.shareMultiLog should be at least 1.5x your shareMulti, in case of diff spikiing
*/
let oldestLockedBlock = this.getOldestLockedBlock();
let lastPPSBlock = this.getLastBlock();
if (global.config.pps.enable){
lastPPSBlock = this.getLastBlock(global.protos.POOLTYPE.PPS);
if (lastPPSBlock === 0){
return;
}
}
let lastPPLNSBlock = this.getLastBlock(global.protos.POOLTYPE.PPLNS);
debug("Last PPS block: "+lastPPSBlock);
// Hopping into async, we need the current block height to know where to start our indexing...
async.waterfall([
function(callback){
global.coinFuncs.getBlockHeaderByHash(oldestLockedBlock.hash, (err, result) => {
oldestLockedBlock.height = result.height;
console.log(`Got the oldest block`);
callback(null, oldestLockedBlock);
});
},
function(oldestLockedBlock, callback){
global.coinFuncs.getLastBlockHeader(function(err, body){
if (oldestLockedBlock === null){
/*
If there's no locked blocks, then allow the system to scan from the PPS depth downwards if PPS
is enabled.
Save enough shares so that the diff * share multi * 30% for buffer.
*/
if (global.config.pps.enable){
// If PPS is enabled, we scan for new blocks at cur height - blocksRequired/2.
// We need to save shares back that far at the least.
callback(null, body.height - Math.floor(global.config.payout.blocksRequired/2), Math.floor(body.difficulty * global.config.pplns.shareMulti * 1.3));
} else {
// Otherwise, we can just start from the current height. Woo!
callback(null, body.height, Math.floor(body.difficulty * global.config.pplns.shareMulti * 1.3));
}
} else {
/*
Otherwise, start the scan from the oldest locked block downwards.
This protects against the blockManager being messed up and not unlocking blocks.
This will ensure that enough shares are in place to unlock all blocks.
If the block is Solo, PPLNS or PPS, it doesn't matter.
*/
if (global.config.pps.enable && oldestLockedBlock.height > body.height - Math.floor(global.config.payout.blocksRequired/2)) {
// If PPS is enabled, and the oldestLockedBlock.height > the PPS minimum, start from the PPS minimum.
callback(null, body.height - Math.floor(global.config.payout.blocksRequired/2), Math.floor(body.difficulty * global.config.pplns.shareMulti * 1.3));
} else {
// If PPS isn't enabled, or the oldestLockedBlock.height < the PPS minimum, then start from there.
callback(null, oldestLockedBlock.height, Math.floor(oldestLockedBlock.difficulty * global.config.pplns.shareMulti * 1.3));
}
if (err) {
return callback(true, body);
}
return callback(null, body.height, Math.floor(body.difficulty * 1.5 * global.config.pplns.shareMultiLog));
});
},
function (lastBlock, difficulty, callback) {
let shareCount = 0;
let ppsFound = false;
let pplnsFound = false;
let blockList = [];
console.log("Scanning from: "+lastBlock + " for more than: " + difficulty + " shares");
debug("Scanning from: "+lastBlock + " for more than: " + difficulty + " shares");
range.range(0, lastBlock+1).forEach(function (blockID) {
blockID = (blockID - lastBlock+1) * -1;
if (blockID < 0){
@ -635,7 +558,7 @@ function Database(){
let txn = global.database.env.beginTxn({readOnly: true});
let cursor = new global.database.lmdb.Cursor(txn, global.database.shareDB);
for (let found = (cursor.goToRange(blockID) === blockID); found; found = cursor.goToNextDup()) {
if (pplnsFound){
if (ppsFound && pplnsFound){
cursor.getCurrentBinary(function(key, data) { // jshint ignore:line
if (blockList.indexOf(key) === -1){
blockList.push(key);
@ -643,6 +566,9 @@ function Database(){
});
} else {
cursor.getCurrentBinary(function(key, data) { // jshint ignore:line
if (key < lastPPSBlock){
ppsFound = true;
}
try{
let shareData = global.protos.Share.decode(data);
if (shareData.poolType === global.protos.POOLTYPE.PPLNS){
@ -663,11 +589,19 @@ function Database(){
callback(null, blockList);
}
], function(err, data){
if (global.config.general.blockCleaner === true){
if (err === null && global.config.general.blockCleaner === true){
if(data.length > 0){
global.database.refreshEnv();
let blockList = global.database.getBlockList();
debug("Got the block list");
let totalDeleted = 0;
data.forEach(function(block){
if ((blockList.indexOf(block) !== -1 && !blockList.unlocked) || block > lastPPLNSBlock){
// Don't delete locked blocks. ffs.
// Don't delete blocks that could contain shares. Even if it's unlikely as all getout.
debug("Skipped deleting block: " + block);
return;
}
totalDeleted += 1;
let txn = global.database.env.beginTxn();
txn.del(global.database.shareDB, block);
@ -679,11 +613,12 @@ function Database(){
global.database.env.sync(function(){
});
} else {
console.log("Block cleaning disabled. Would have removed: " + JSON.stringify(data));
console.log("Block cleaning disabled. Would of removed: " + JSON.stringify(data));
}
});
};
this.refreshEnv = function(){};
setInterval(function(){

Loading…
Cancel
Save