Code generator refactoring

feature/light-code-gen
tevador 5 years ago
parent 37ff37cd11
commit 24a22c6b54

@ -31,40 +31,10 @@ along with RandomX. If not, see<http://www.gnu.org/licenses/>.
namespace RandomX {
namespace LightInstructionOpcode {
constexpr int IADD_RS = 0;
constexpr int IADD_RC = RANDOMX_FREQ_IADD_RS + RANDOMX_FREQ_IADD_M;
constexpr int ISUB_R = IADD_RC + RANDOMX_FREQ_IADD_RC;
constexpr int IMUL_9C = ISUB_R + RANDOMX_FREQ_ISUB_R + RANDOMX_FREQ_ISUB_M;
constexpr int IMUL_R = IMUL_9C + RANDOMX_FREQ_IMUL_9C;
constexpr int IMULH_R = IMUL_R + RANDOMX_FREQ_IMUL_R + RANDOMX_FREQ_IMUL_M;
constexpr int ISMULH_R = IMULH_R + RANDOMX_FREQ_IMULH_R + RANDOMX_FREQ_IMULH_M;
constexpr int IMUL_RCP = ISMULH_R + RANDOMX_FREQ_ISMULH_R + RANDOMX_FREQ_ISMULH_M;
constexpr int IXOR_R = IMUL_RCP + RANDOMX_FREQ_IMUL_RCP + RANDOMX_FREQ_INEG_R;
constexpr int IROR_R = IXOR_R + RANDOMX_FREQ_IXOR_R + RANDOMX_FREQ_IXOR_M;
constexpr int COND_R = IROR_R + RANDOMX_FREQ_IROR_R + RANDOMX_FREQ_IROL_R + RANDOMX_FREQ_ISWAP_R + RANDOMX_FREQ_FSWAP_R + RANDOMX_FREQ_FADD_R + RANDOMX_FREQ_FADD_M + RANDOMX_FREQ_FSUB_R + RANDOMX_FREQ_FSUB_M + RANDOMX_FREQ_FSCAL_R + RANDOMX_FREQ_FMUL_R + RANDOMX_FREQ_FDIV_M + RANDOMX_FREQ_FSQRT_R;
}
static bool isMul(int type) {
return type == LightInstructionType::IMUL_R || type == LightInstructionType::IMULH_R || type == LightInstructionType::ISMULH_R || type == LightInstructionType::IMUL_RCP;
}
const int lightInstructionOpcode[] = {
LightInstructionOpcode::IADD_RS,
LightInstructionOpcode::ISUB_R, //ISUB_R
LightInstructionOpcode::ISUB_R, //ISUB_R
LightInstructionOpcode::IMUL_R, //IMUL_R
LightInstructionOpcode::IMUL_R, //IMUL_C
LightInstructionOpcode::IMULH_R,
LightInstructionOpcode::ISMULH_R,
LightInstructionOpcode::IMUL_RCP,
LightInstructionOpcode::IXOR_R, //IXOR_R
LightInstructionOpcode::IXOR_R, //IXOR_C
LightInstructionOpcode::IROR_R, //IROR_R
LightInstructionOpcode::IROR_R, //IROR_C
LightInstructionOpcode::COND_R
};
namespace ExecutionPort {
using type = int;
constexpr type Null = 0;
@ -144,31 +114,11 @@ namespace RandomX {
bool isDependent() const {
return dependent_;
}
int getCycle() const {
return cycle_;
}
void setCycle(int cycle) {
cycle_ = cycle;
}
MacroOp* getSrcDep() const {
return depSrc_;
}
void setSrcDep(MacroOp* src) {
depSrc_ = src;
}
MacroOp* getDstDep() const {
return depDst_;
}
void setDstDep(MacroOp* dst) {
depDst_ = dst;
}
static const MacroOp Add_rr;
static const MacroOp Add_ri;
static const MacroOp Lea_sib;
static const MacroOp Sub_rr;
static const MacroOp Sub_ri;
static const MacroOp Imul_rr;
static const MacroOp Imul_rri;
static const MacroOp Imul_r;
static const MacroOp Mul_r;
static const MacroOp Mov_rr;
@ -193,21 +143,28 @@ namespace RandomX {
MacroOp* depSrc_ = nullptr;
};
//Size: 3 bytes
const MacroOp MacroOp::Add_rr = MacroOp("add r,r", 3, 1, ExecutionPort::P015);
const MacroOp MacroOp::Add_ri = MacroOp("add r,i", 7, 1, ExecutionPort::P015);
const MacroOp MacroOp::Lea_sib = MacroOp("lea r,r+r*s", 4, 1, ExecutionPort::P01);
const MacroOp MacroOp::Sub_rr = MacroOp("sub r,r", 3, 1, ExecutionPort::P015);
const MacroOp MacroOp::Sub_ri = MacroOp("sub r,i", 7, 1, ExecutionPort::P015);
const MacroOp MacroOp::Imul_rr = MacroOp("imul r,r", 4, 3, ExecutionPort::P1);
const MacroOp MacroOp::Imul_rri = MacroOp("imul r,r,i", 7, 3, ExecutionPort::P1);
const MacroOp MacroOp::Xor_rr = MacroOp("xor r,r", 3, 1, ExecutionPort::P015);
const MacroOp MacroOp::Imul_r = MacroOp("imul r", 3, 4, ExecutionPort::P1, ExecutionPort::P5);
const MacroOp MacroOp::Mul_r = MacroOp("mul r", 3, 3, ExecutionPort::P1, ExecutionPort::P5);
const MacroOp MacroOp::Mov_rr = MacroOp("mov r,r", 3);
const MacroOp MacroOp::Mov_ri64 = MacroOp("mov rax,i64", 10, 1, ExecutionPort::P015);
const MacroOp MacroOp::Xor_rr = MacroOp("xor r,r", 3, 1, ExecutionPort::P015);
//Size: 4 bytes
const MacroOp MacroOp::Lea_sib = MacroOp("lea r,r+r*s", 4, 1, ExecutionPort::P01);
const MacroOp MacroOp::Imul_rr = MacroOp("imul r,r", 4, 3, ExecutionPort::P1);
const MacroOp MacroOp::Ror_ri = MacroOp("ror r,i", 4, 1, ExecutionPort::P05);
//Size: 7 bytes (can be optionally padded with nop to 8 or 9 bytes)
const MacroOp MacroOp::Add_ri = MacroOp("add r,i", 7, 1, ExecutionPort::P015);
const MacroOp MacroOp::Xor_ri = MacroOp("xor r,i", 7, 1, ExecutionPort::P015);
//Size: 10 bytes
const MacroOp MacroOp::Mov_ri64 = MacroOp("mov rax,i64", 10, 1, ExecutionPort::P015);
//Unused:
const MacroOp MacroOp::Ror_rcl = MacroOp("ror r,cl", 3, 1, ExecutionPort::P0, ExecutionPort::P5);
const MacroOp MacroOp::Ror_ri = MacroOp("ror r,i", 4, 1, ExecutionPort::P05);
const MacroOp MacroOp::Xor_self = MacroOp("xor rcx,rcx", 3);
const MacroOp MacroOp::Cmp_ri = MacroOp("cmp r,i", 7, 1, ExecutionPort::P015);
const MacroOp MacroOp::Setcc_r = MacroOp("setcc cl", 3, 1, ExecutionPort::P05);
@ -216,36 +173,9 @@ namespace RandomX {
const MacroOp IMULH_R_ops_array[] = { MacroOp::Mov_rr, MacroOp::Mul_r, MacroOp::Mov_rr };
const MacroOp ISMULH_R_ops_array[] = { MacroOp::Mov_rr, MacroOp::Imul_r, MacroOp::Mov_rr };
const MacroOp IMUL_RCP_ops_array[] = { MacroOp::Mov_ri64, MacroOp(MacroOp::Imul_rr, true) };
const MacroOp IROR_R_ops_array[] = { MacroOp::Mov_rr, MacroOp::Ror_rcl };
const MacroOp COND_R_ops_array[] = { MacroOp::Add_ri, MacroOp(MacroOp::TestJz_fused, true), MacroOp::Xor_self, MacroOp::Cmp_ri, MacroOp(MacroOp::Setcc_r, true), MacroOp(MacroOp::Add_rr, true) };
class LightInstructionInfo {
public:
LightInstructionInfo(const char* name, int type, const MacroOp& op, int srcOp)
: name_(name), type_(type), latency_(op.getLatency()), srcOp_(srcOp) {
ops_.push_back(MacroOp(op));
}
template <size_t N>
LightInstructionInfo(const char* name, int type, const MacroOp(&arr)[N], int resultOp, int dstOp, int srcOp)
: name_(name), type_(type), latency_(0), resultOp_(resultOp), dstOp_(dstOp), srcOp_(srcOp) {
for (unsigned i = 0; i < N; ++i) {
ops_.push_back(MacroOp(arr[i]));
latency_ += ops_.back().getLatency();
}
static_assert(N > 1, "Invalid array size");
}
template <size_t N>
LightInstructionInfo(const char* name, int type, const MacroOp*(&arr)[N], int latency, int resultOp, int dstOp, int srcOp)
: name_(name), type_(type), latency_(latency), resultOp_(resultOp), dstOp_(dstOp), srcOp_(srcOp) {
for (unsigned i = 0; i < N; ++i) {
ops_.push_back(MacroOp(arr[i]));
if (arr[i].isDependent()) {
ops_[i].setSrcDep(&ops_[i - 1]);
}
}
static_assert(N > 1, "Invalid array size");
}
const char* getName() const {
return name_;
}
@ -258,7 +188,7 @@ namespace RandomX {
int getLatency() const {
return latency_;
}
MacroOp& getOp(int index) {
const MacroOp& getOp(int index) const {
return ops_[index];
}
int getType() const {
@ -299,6 +229,19 @@ namespace RandomX {
LightInstructionInfo(const char* name)
: name_(name), type_(-1), latency_(0) {}
LightInstructionInfo(const char* name, int type, const MacroOp& op, int srcOp)
: name_(name), type_(type), latency_(op.getLatency()), srcOp_(srcOp) {
ops_.push_back(MacroOp(op));
}
template <size_t N>
LightInstructionInfo(const char* name, int type, const MacroOp(&arr)[N], int resultOp, int dstOp, int srcOp)
: name_(name), type_(type), latency_(0), resultOp_(resultOp), dstOp_(dstOp), srcOp_(srcOp) {
for (unsigned i = 0; i < N; ++i) {
ops_.push_back(MacroOp(arr[i]));
latency_ += ops_.back().getLatency();
}
static_assert(N > 1, "Invalid array size");
}
};
const LightInstructionInfo LightInstructionInfo::ISUB_R = LightInstructionInfo("ISUB_R", LightInstructionType::ISUB_R, MacroOp::Sub_rr, 0);
@ -320,13 +263,6 @@ namespace RandomX {
const LightInstructionInfo LightInstructionInfo::NOP = LightInstructionInfo("NOP");
const int buffer0[] = { 4, 8, 4 };
const int buffer1[] = { 7, 3, 3, 3 };
const int buffer2[] = { 3, 7, 3, 3 };
const int buffer3[] = { 4, 9, 3 };
const int buffer4[] = { 4, 4, 4, 4 };
const int buffer5[] = { 3, 3, 10 };
class DecoderBuffer {
public:
static const DecoderBuffer Default;
@ -346,13 +282,22 @@ namespace RandomX {
return name_;
}
const DecoderBuffer* fetchNext(int instrType, int cycle, int mulCount, Blake2Generator& gen) const {
//If the current RandomX instruction is "IMULH", the next fetch configuration must be 3-3-10
//because the full 128-bit multiplication instruction is 3 bytes long and decodes to 2 uOPs on Intel CPUs.
//Intel CPUs can decode at most 4 uOPs per cycle, so this requires a 2-1-1 configuration for a total of 3 macro ops.
if (instrType == LightInstructionType::IMULH_R || instrType == LightInstructionType::ISMULH_R)
return &decodeBuffer3310; //2-1-1 decode
return &decodeBuffer3310;
//To make sure that the multiplication port is saturated, a 4-4-4-4 configuration is generated if the number of multiplications
//is lower than the number of cycles.
if (mulCount < cycle + 1)
return &decodeBuffer4444;
if (index_ == 5) { //IMUL_RCP end
//If the current RandomX instruction is "IMUL_RCP", the next buffer must begin with a 4-byte slot for multiplication.
if(instrType == LightInstructionType::IMUL_RCP)
return (gen.getByte() & 1) ? &decodeBuffer484 : &decodeBuffer493;
}
//Default: select a random fetch configuration.
return fetchNextDefault(gen);
}
private:
@ -373,6 +318,16 @@ namespace RandomX {
}
};
//these are some of the options how to split a 16-byte window into 3 or 4 x86 instructions.
//RandomX uses instructions with a native size of 3 (sub, xor, mul, mov), 4 (lea, mul), 7 (xor, add immediate) or 10 bytes (mov 64-bit immediate).
//Slots with sizes of 8 or 9 bytes need to be padded with a nop instruction.
const int buffer0[] = { 4, 8, 4 };
const int buffer1[] = { 7, 3, 3, 3 };
const int buffer2[] = { 3, 7, 3, 3 };
const int buffer3[] = { 4, 9, 3 };
const int buffer4[] = { 4, 4, 4, 4 };
const int buffer5[] = { 3, 3, 10 };
const DecoderBuffer DecoderBuffer::decodeBuffer484 = DecoderBuffer("4,8,4", 0, buffer0);
const DecoderBuffer DecoderBuffer::decodeBuffer7333 = DecoderBuffer("7,3,3,3", 1, buffer1);
const DecoderBuffer DecoderBuffer::decodeBuffer3733 = DecoderBuffer("3,7,3,3", 2, buffer2);
@ -401,7 +356,6 @@ namespace RandomX {
int index;
if (availableRegisters.size() == 0)
return false;
//throw std::runtime_error("No available registers");
if (availableRegisters.size() > 1) {
index = gen.getInt32() % availableRegisters.size();
@ -423,131 +377,136 @@ namespace RandomX {
instr.setImm32(imm32_);
}
static LightInstruction createForSlot(Blake2Generator& gen, int slotSize, int fetchType, bool isLast, bool isFirst) {
void createForSlot(Blake2Generator& gen, int slotSize, int fetchType, bool isLast, bool isFirst) {
switch (slotSize)
{
case 3:
if (isLast) {
return create(slot_3L[gen.getByte() & 3], gen);
create(slot_3L[gen.getByte() & 3], gen);
}
else {
return create(slot_3[gen.getByte() & 1], gen);
create(slot_3[gen.getByte() & 1], gen);
}
break;
case 4:
if (fetchType == 4 && !isLast) {
return create(&LightInstructionInfo::IMUL_R, gen);
create(&LightInstructionInfo::IMUL_R, gen);
}
else {
return create(slot_4[gen.getByte() & 1], gen);
create(slot_4[gen.getByte() & 1], gen);
}
break;
case 7:
return create(slot_7[gen.getByte() & 1], gen);
create(slot_7[gen.getByte() & 1], gen);
break;
case 8:
return create(slot_8[gen.getByte() & 1], gen);
create(slot_8[gen.getByte() & 1], gen);
break;
case 9:
return create(slot_9[gen.getByte() & 1], gen);
create(slot_9[gen.getByte() & 1], gen);
break;
case 10:
return create(slot_10, gen);
create(slot_10, gen);
break;
default:
throw std::runtime_error("Invalid slot");
UNREACHABLE;
}
}
static LightInstruction create(const LightInstructionInfo* info, Blake2Generator& gen) {
LightInstruction li(info);
void create(const LightInstructionInfo* info, Blake2Generator& gen) {
info_ = info;
reset();
switch (info->getType())
{
case LightInstructionType::ISUB_R: {
li.mod_ = 0;
li.imm32_ = 0;
li.opGroup_ = LightInstructionType::IADD_RS;
li.groupParIsSource_ = true;
mod_ = 0;
imm32_ = 0;
opGroup_ = LightInstructionType::IADD_RS;
groupParIsSource_ = true;
} break;
case LightInstructionType::IXOR_R: {
li.mod_ = 0;
li.imm32_ = 0;
li.opGroup_ = LightInstructionType::IXOR_R;
li.groupParIsSource_ = true;
mod_ = 0;
imm32_ = 0;
opGroup_ = LightInstructionType::IXOR_R;
groupParIsSource_ = true;
} break;
case LightInstructionType::IADD_RS: {
li.mod_ = gen.getByte();
li.imm32_ = 0;
li.opGroup_ = LightInstructionType::IADD_RS;
li.groupParIsSource_ = true;
mod_ = gen.getByte();
imm32_ = 0;
opGroup_ = LightInstructionType::IADD_RS;
groupParIsSource_ = true;
} break;
case LightInstructionType::IMUL_R: {
li.mod_ = 0;
li.imm32_ = 0;
li.opGroup_ = LightInstructionType::IMUL_R;
li.opGroupPar_ = -1; //TODO
mod_ = 0;
imm32_ = 0;
opGroup_ = LightInstructionType::IMUL_R;
opGroupPar_ = -1;
} break;
case LightInstructionType::IROR_C: {
li.mod_ = 0;
mod_ = 0;
do {
li.imm32_ = gen.getByte() & 63;
} while (li.imm32_ == 0);
li.opGroup_ = LightInstructionType::IROR_C;
li.opGroupPar_ = -1;
imm32_ = gen.getByte() & 63;
} while (imm32_ == 0);
opGroup_ = LightInstructionType::IROR_C;
opGroupPar_ = -1;
} break;
case LightInstructionType::IADD_C7:
case LightInstructionType::IADD_C8:
case LightInstructionType::IADD_C9: {
li.mod_ = 0;
li.imm32_ = gen.getInt32();
li.opGroup_ = LightInstructionType::IADD_C7;
li.opGroupPar_ = -1;
mod_ = 0;
imm32_ = gen.getInt32();
opGroup_ = LightInstructionType::IADD_C7;
opGroupPar_ = -1;
} break;
case LightInstructionType::IXOR_C7:
case LightInstructionType::IXOR_C8:
case LightInstructionType::IXOR_C9: {
li.mod_ = 0;
li.imm32_ = gen.getInt32();
li.opGroup_ = LightInstructionType::IXOR_C7;
li.opGroupPar_ = -1;
mod_ = 0;
imm32_ = gen.getInt32();
opGroup_ = LightInstructionType::IXOR_C7;
opGroupPar_ = -1;
} break;
case LightInstructionType::IMULH_R: {
li.canReuse_ = true;
li.mod_ = 0;
li.imm32_ = 0;
li.opGroup_ = LightInstructionType::IMULH_R;
li.opGroupPar_ = gen.getInt32();
canReuse_ = true;
mod_ = 0;
imm32_ = 0;
opGroup_ = LightInstructionType::IMULH_R;
opGroupPar_ = gen.getInt32();
} break;
case LightInstructionType::ISMULH_R: {
li.canReuse_ = true;
li.mod_ = 0;
li.imm32_ = 0;
li.opGroup_ = LightInstructionType::ISMULH_R;
li.opGroupPar_ = gen.getInt32();
canReuse_ = true;
mod_ = 0;
imm32_ = 0;
opGroup_ = LightInstructionType::ISMULH_R;
opGroupPar_ = gen.getInt32();
} break;
case LightInstructionType::IMUL_RCP: {
li.mod_ = 0;
mod_ = 0;
do {
li.imm32_ = gen.getInt32();
} while ((li.imm32_ & (li.imm32_ - 1)) == 0);
li.opGroup_ = LightInstructionType::IMUL_RCP;
li.opGroupPar_ = -1;
imm32_ = gen.getInt32();
} while ((imm32_ & (imm32_ - 1)) == 0);
opGroup_ = LightInstructionType::IMUL_RCP;
opGroupPar_ = -1;
} break;
default:
break;
}
return li;
}
bool selectDestination(int cycle, RegisterInfo (&registers)[8], Blake2Generator& gen) {
std::vector<int> availableRegisters;
for (unsigned i = 0; i < 8; ++i) {
if (registers[i].latency <= cycle && (canReuse_ || i != src_) && (registers[i].lastOpGroup != opGroup_ || registers[i].lastOpPar != opGroupPar_) && (info_.getType() != LightInstructionType::IADD_RS || i != 5))
if (registers[i].latency <= cycle && (canReuse_ || i != src_) && (registers[i].lastOpGroup != opGroup_ || registers[i].lastOpPar != opGroupPar_) && (info_->getType() != LightInstructionType::IADD_RS || i != 5))
availableRegisters.push_back(i);
}
return selectRegister(availableRegisters, gen, dst_);
@ -559,7 +518,7 @@ namespace RandomX {
if (registers[i].latency <= cycle)
availableRegisters.push_back(i);
}
if (availableRegisters.size() == 2 && info_.getType() == LightInstructionType::IADD_RS) {
if (availableRegisters.size() == 2 && info_->getType() == LightInstructionType::IADD_RS) {
if (availableRegisters[0] == 5 || availableRegisters[1] == 5) {
opGroupPar_ = src_ = 5;
return true;
@ -574,7 +533,7 @@ namespace RandomX {
}
int getType() {
return info_.getType();
return info_->getType();
}
int getSource() {
return src_;
@ -589,14 +548,14 @@ namespace RandomX {
return opGroupPar_;
}
LightInstructionInfo& getInfo() {
return info_;
const LightInstructionInfo& getInfo() const {
return *info_;
}
static const LightInstruction Null;
private:
LightInstructionInfo info_;
const LightInstructionInfo* info_;
int src_ = -1;
int dst_ = -1;
int mod_;
@ -606,24 +565,20 @@ namespace RandomX {
bool canReuse_ = false;
bool groupParIsSource_ = false;
LightInstruction(const LightInstructionInfo* info) : info_(*info) {
for (unsigned i = 0; i < info_.getSize(); ++i) {
MacroOp& mop = info_.getOp(i);
if (mop.isDependent()) {
mop.setSrcDep(&info_.getOp(i - 1));
}
}
void reset() {
src_ = dst_ = -1;
canReuse_ = groupParIsSource_ = false;
}
LightInstruction(const LightInstructionInfo* info) : info_(info) {
}
};
const LightInstruction LightInstruction::Null = LightInstruction(&LightInstructionInfo::NOP);
constexpr int ALU_COUNT_MUL = 1;
constexpr int ALU_COUNT = 3;
constexpr int LIGHT_OPCODE_BITS = 4;
constexpr int V4_SRC_INDEX_BITS = 3;
constexpr int V4_DST_INDEX_BITS = 3;
constexpr int CYCLE_MAP_SIZE = RANDOMX_LPROG_LATENCY + 3;
constexpr int CYCLE_MAP_SIZE = RANDOMX_SUPERSCALAR_LATENCY + 3;
constexpr int LOOK_FORWARD_CYCLES = 4;
constexpr int MAX_THROWAWAY_COUNT = 256;
#ifndef _DEBUG
constexpr bool TRACE = false;
constexpr bool INFO = false;
@ -735,98 +690,102 @@ namespace RandomX {
return -1;
}
// If we don't have enough data available, generate more
static FORCE_INLINE void check_data(size_t& data_index, const size_t bytes_needed, uint8_t* data, const size_t data_size)
{
if (data_index + bytes_needed > data_size)
{
std::cout << "Calling Blake " << (++blakeCounter) << std::endl;
blake2b(data, data_size, data, data_size, nullptr, 0);
data_index = 0;
}
}
double generateLightProg2(LightProgram& prog, Blake2Generator& gen) {
ExecutionPort::type portBusy[CYCLE_MAP_SIZE][3];
memset(portBusy, 0, sizeof(portBusy));
RegisterInfo registers[8];
std::vector<LightInstruction> instructions;
const DecoderBuffer* fetchLine = &DecoderBuffer::Default;
const DecoderBuffer* decodeBuffer = &DecoderBuffer::Default;
LightInstruction currentInstruction = LightInstruction::Null;
int instrIndex = 0;
int codeSize = 0;
int macroOpCount = 0;
int cycle = 0;
int fetchCycle = 0;
int depCycle = 0;
int retireCycle = 0;
int mopIndex = 0;
bool portsSaturated = false;
int outIndex = 0;
int attempts = 0;
int mulCount = 0;
constexpr int MAX_ATTEMPTS = 4;
int decodeCycle;
while(!portsSaturated) {
fetchLine = fetchLine->fetchNext(currentInstruction.getType(), fetchCycle++, mulCount, gen);
if (TRACE) std::cout << "; ------------- fetch cycle " << cycle << " (" << fetchLine->getName() << ")" << std::endl;
//decode instructions for RANDOMX_SUPERSCALAR_LATENCY cycles or until an execution port is saturated.
//Each decode cycle decodes 16 bytes of x86 code.
//Since a decode cycle produces on average 3.45 macro-ops and there are only 3 ALU ports, execution ports are always
//saturated first. The cycle limit is present only to guarantee loop termination.
for (decodeCycle = 0; decodeCycle < RANDOMX_SUPERSCALAR_LATENCY && !portsSaturated && outIndex < RANDOMX_SUPERSCALAR_MAX_SIZE; ++decodeCycle) {
mopIndex = 0;
//select a fetch/decode configuration
decodeBuffer = decodeBuffer->fetchNext(currentInstruction.getType(), decodeCycle, mulCount, gen);
if (TRACE) std::cout << "; ------------- fetch cycle " << cycle << " (" << decodeBuffer->getName() << ")" << std::endl;
int bufferIndex = 0;
while (mopIndex < fetchLine->getSize()) {
//fill all instruction slots in the current fetch/decode buffer
while (bufferIndex < decodeBuffer->getSize()) {
int topCycle = cycle;
//if we have created all macro-ops for the current RandomX instruction, create a new instruction
if (instrIndex >= currentInstruction.getInfo().getSize()) {
if (portsSaturated)
break;
currentInstruction = LightInstruction::createForSlot(gen, fetchLine->getCounts()[mopIndex], fetchLine->getIndex(), fetchLine->getSize() == mopIndex + 1, mopIndex == 0);
currentInstruction.createForSlot(gen, decodeBuffer->getCounts()[bufferIndex], decodeBuffer->getIndex(), decodeBuffer->getSize() == bufferIndex + 1, bufferIndex == 0);
instrIndex = 0;
if (TRACE) std::cout << "; " << currentInstruction.getInfo().getName() << std::endl;
}
MacroOp& mop = currentInstruction.getInfo().getOp(instrIndex);
if (fetchLine->getCounts()[mopIndex] != mop.getSize()) {
if (TRACE) std::cout << "ERROR instruction " << mop.getName() << " doesn't fit into slot of size " << fetchLine->getCounts()[mopIndex] << std::endl;
}
const MacroOp& mop = currentInstruction.getInfo().getOp(instrIndex);
if (TRACE) std::cout << mop.getName() << " ";
//calculate the earliest cycle when this macro-op (all of its uOPs) can be scheduled for execution
int scheduleCycle = scheduleUop<false>(mop, portBusy, cycle, depCycle);
mop.setCycle(scheduleCycle);
if (scheduleCycle < 0) {
if (TRACE) std::cout << "; Failed at cycle " << cycle << std::endl;
return 0;
}
//find a source register (if applicable) that will be ready when this instruction executes
if (instrIndex == currentInstruction.getInfo().getSrcOp()) {
for (attempts = 0; attempts < MAX_ATTEMPTS && !currentInstruction.selectSource(scheduleCycle, registers, gen); ++attempts) {
int forward;
//if no suitable operand is ready, look up to LOOK_FORWARD_CYCLES forward
for (forward = 0; forward < LOOK_FORWARD_CYCLES && !currentInstruction.selectSource(scheduleCycle, registers, gen); ++forward) {
if (TRACE) std::cout << "; src STALL at cycle " << cycle << std::endl;
++scheduleCycle;
++cycle;
}
if (attempts == MAX_ATTEMPTS) { //throw instruction away
//cycle = topCycle;
//if no register was found, throw the instruction away and try another one
if (forward == LOOK_FORWARD_CYCLES) {
instrIndex = currentInstruction.getInfo().getSize();
if (TRACE) std::cout << "; THROW away " << currentInstruction.getInfo().getName() << std::endl;
continue;
}
if (TRACE) std::cout << "; src = r" << currentInstruction.getSource() << std::endl;
}
//find a destination register that will be ready when this instruction executes
if (instrIndex == currentInstruction.getInfo().getDstOp()) {
for (attempts = 0; attempts < MAX_ATTEMPTS && !currentInstruction.selectDestination(scheduleCycle, registers, gen); ++attempts) {
int forward;
for (forward = 0; forward < LOOK_FORWARD_CYCLES && !currentInstruction.selectDestination(scheduleCycle, registers, gen); ++forward) {
if (TRACE) std::cout << "; dst STALL at cycle " << cycle << std::endl;
++scheduleCycle;
++cycle;
}
if (attempts == MAX_ATTEMPTS) { //throw instruction away
//cycle = topCycle;
if (forward == LOOK_FORWARD_CYCLES) { //throw instruction away
instrIndex = currentInstruction.getInfo().getSize();
if (TRACE) std::cout << "; THROW away " << currentInstruction.getInfo().getName() << std::endl;
continue;
}
if (TRACE) std::cout << "; dst = r" << currentInstruction.getDestination() << std::endl;
}
//recalculate when the instruction can be scheduled for execution based on operand availability
scheduleCycle = scheduleUop<true>(mop, portBusy, scheduleCycle, scheduleCycle);
//calculate when the result will be ready
depCycle = scheduleCycle + mop.getLatency();
//if this instruction writes the result, modify register information
// RegisterInfo.latency - which cycle the register will be ready
// RegisterInfo.lastOpGroup - the last operation that was applied to the register
// RegisterInfo.lastOpPar - the last operation parameter
if (instrIndex == currentInstruction.getInfo().getResultOp()) {
int dst = currentInstruction.getDestination();
RegisterInfo& ri = registers[dst];
@ -837,13 +796,17 @@ namespace RandomX {
if (TRACE) std::cout << "; RETIRED at cycle " << retireCycle << std::endl;
}
codeSize += mop.getSize();
mopIndex++;
bufferIndex++;
instrIndex++;
macroOpCount++;
if (scheduleCycle >= RANDOMX_LPROG_LATENCY) {
//terminating condition
if (scheduleCycle >= RANDOMX_SUPERSCALAR_LATENCY) {
portsSaturated = true;
}
cycle = topCycle;
//when all macro-ops of the current instruction have been issued, add the instruction into the program
if (instrIndex >= currentInstruction.getInfo().getSize()) {
currentInstruction.toInstr(prog(outIndex++));
mulCount += isMul(currentInstruction.getType());
@ -856,19 +819,20 @@ namespace RandomX {
if (INFO) std::cout << "; (* = in use, _ = idle)" << std::endl;
int portCycles = 0;
/*for (int i = 0; i < CYCLE_MAP_SIZE; ++i) {
for (int i = 0; i < CYCLE_MAP_SIZE; ++i) {
std::cout << "; " << std::setw(3) << i << " ";
for (int j = 0; j < 3; ++j) {
std::cout << (portBusy[i][j] ? '*' : '_');
portCycles += !!portBusy[i][j];
}
std::cout << std::endl;
}*/
}
double ipc = (macroOpCount / (double)retireCycle);
if (INFO) std::cout << "; code size " << codeSize << " bytes" << std::endl;
if (INFO) std::cout << "; x86 macro-ops: " << macroOpCount << std::endl;
if (INFO) std::cout << "; fetch cycles: " << decodeCycle << std::endl;
if (INFO) std::cout << "; RandomX instructions: " << outIndex << std::endl;
if (INFO) std::cout << "; Execution time: " << retireCycle << " cycles" << std::endl;
if (INFO) std::cout << "; IPC = " << ipc << std::endl;
@ -878,7 +842,8 @@ namespace RandomX {
int asicLatency[8];
memset(asicLatency, 0, sizeof(asicLatency));
//Calculate ASIC latency:
//Assumes 1 cycle latency for all operations and unlimited parallelization.
for (int i = 0; i < outIndex; ++i) {
Instruction& instr = prog(i);
int latDst = asicLatency[instr.dst] + 1;
@ -886,16 +851,17 @@ namespace RandomX {
asicLatency[instr.dst] = std::max(latDst, latSrc);
}
int asicLatencyFinal = 0;
//address register is the register with the highest ASIC latency
int asicLatencyMax = 0;
int addressReg = 0;
for (int i = 0; i < 8; ++i) {
if (asicLatency[i] > asicLatencyFinal) {
asicLatencyFinal = asicLatency[i];
if (asicLatency[i] > asicLatencyMax) {
asicLatencyMax = asicLatency[i];
addressReg = i;
}
}
if (INFO) std::cout << "; ASIC latency: " << asicLatencyFinal << std::endl;
if (INFO) std::cout << "; ASIC latency: " << asicLatencyMax << std::endl;
if (INFO) {
std::cout << "; ASIC latency:" << std::endl;

@ -81,7 +81,7 @@ namespace RandomX {
os << instr;
}
}
Instruction programBuffer[RANDOMX_LPROG_MAX_SIZE];
Instruction programBuffer[RANDOMX_SUPERSCALAR_MAX_SIZE];
uint32_t size;
int addrReg;
};

@ -37,10 +37,8 @@ along with RandomX. If not, see<http://www.gnu.org/licenses/>.
//Number of random Cache accesses per Dataset block. Minimum is 2.
#define RANDOMX_CACHE_ACCESSES 8
#define RANDOMX_LPROG_LATENCY 170
#define RANDOMX_LPROG_ASIC_LATENCY 84
#define RANDOMX_LPROG_MIN_SIZE 225
#define RANDOMX_LPROG_MAX_SIZE 512
#define RANDOMX_SUPERSCALAR_LATENCY 170
#define RANDOMX_SUPERSCALAR_MAX_SIZE 512
//Dataset size in bytes. Must be a power of 2.
#define RANDOMX_DATASET_SIZE (2ULL * 1024 * 1024 * 1024)

Loading…
Cancel
Save