diff --git a/src/crypto/CryptoNight.cpp b/src/crypto/CryptoNight.cpp index 9eaf440cd5..3b6c908e8e 100644 --- a/src/crypto/CryptoNight.cpp +++ b/src/crypto/CryptoNight.cpp @@ -39,118 +39,138 @@ static void cryptonight_aesni(AsmOptimization asmOptimization, uint64_t height, # if !defined(XMRIG_ARMv7) if (variant == PowVariant::POW_V1) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_V1, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_V1, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_V1, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_V2) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_V2, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); #else if ((asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS <= 2) || (asmOptimization == AsmOptimization::ASM_RYZEN && NUM_HASH_BLOCKS == 1) || (asmOptimization == AsmOptimization::ASM_BULLDOZER && NUM_HASH_BLOCKS == 1)) { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_V2, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_V2, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); } #endif - } else if (variant == PowVariant::POW_V4 || variant == PowVariant::POW_WOW) { + } else if (variant == PowVariant::POW_V4) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV4(input, size, output, scratchPad, height, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_V4, NUM_HASH_BLOCKS>::hashPowV4(input, size, output, scratchPad, height); #else if (asmOptimization != AsmOptimization::ASM_OFF && NUM_HASH_BLOCKS <= 2) { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV4_asm(input, size, output, scratchPad, height, asmOptimization, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_V4, NUM_HASH_BLOCKS>::hashPowV4_asm(input, size, output, scratchPad, height, asmOptimization); } else { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV4(input, size, output, scratchPad, height, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_V4, NUM_HASH_BLOCKS>::hashPowV4(input, size, output, scratchPad, height); + } +#endif + } else if (variant == PowVariant::POW_WOW) { +#if defined(XMRIG_ARM) + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_WOW, NUM_HASH_BLOCKS>::hashPowV4(input, size, output, scratchPad, height); +#else + if (asmOptimization != AsmOptimization::ASM_OFF && NUM_HASH_BLOCKS <= 2) { + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_WOW, NUM_HASH_BLOCKS>::hashPowV4_asm(input, size, output, scratchPad, height, asmOptimization); + } else { + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_WOW, NUM_HASH_BLOCKS>::hashPowV4(input, size, output, scratchPad, height); } #endif } else if (variant == PowVariant::POW_ALLOY) { - CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hash(input, size, output, scratchPad); + CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_ALLOY, NUM_HASH_BLOCKS>::hash(input, size, output, scratchPad); } else if (variant == PowVariant::POW_XTL) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x80000, POW_XLT_V4_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_XLT_V4_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_XTL, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x80000, POW_XLT_V4_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x80000, POW_XLT_V4_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_XTL, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x80000, POW_XLT_V4_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_XLT_V4_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_XTL, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_FAST_2) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_FAST_2, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); #else if ((asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS <= 2) || (asmOptimization == AsmOptimization::ASM_RYZEN && NUM_HASH_BLOCKS == 1) || (asmOptimization == AsmOptimization::ASM_BULLDOZER && NUM_HASH_BLOCKS == 1)) { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_FAST_2, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_FAST_2, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_DOUBLE) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_DOUBLE, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); #else if ((asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS <= 2) || (asmOptimization == AsmOptimization::ASM_RYZEN && NUM_HASH_BLOCKS == 1) || (asmOptimization == AsmOptimization::ASM_BULLDOZER && NUM_HASH_BLOCKS == 1)) { - CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_DOUBLE, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_DOUBLE, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_ZELERIUS) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_ZELERIUS, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); #else if ((asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS <= 2) || (asmOptimization == AsmOptimization::ASM_RYZEN && NUM_HASH_BLOCKS == 1) || (asmOptimization == AsmOptimization::ASM_BULLDOZER && NUM_HASH_BLOCKS == 1)) { - CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_ZELERIUS, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_ZELERIUS, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_RWZ) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false,POW_RWZ, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); #else if ((asmOptimization != AsmOptimization::ASM_OFF && NUM_HASH_BLOCKS <= 2)) { - CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_RWZ, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_RWZ, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_MSR) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_MSR, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_MSR, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_MSR, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); } #endif - } else if (variant == PowVariant::POW_RTO || variant == PowVariant::POW_HOSP) { + } else if (variant == PowVariant::POW_RTO) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_RTO, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_RTO, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_RTO, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); + } +#endif + } else if (variant == PowVariant::POW_HOSP) { +#if defined(XMRIG_ARM) + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_HOSP, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); +#else + if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_HOSP, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); + } else { + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_HOSP, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_XFH) { - CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hashHeavyHaven(input, size, output, scratchPad); + CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_XFH, NUM_HASH_BLOCKS>::hashHeavyHaven(input, size, output, scratchPad); } else { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hash(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, false, POW_V0, NUM_HASH_BLOCKS>::hash(input, size, output, scratchPad); } # endif } @@ -159,104 +179,122 @@ template static void cryptonight_softaes(AsmOptimization asmOptimization, uint64_t height, PowVariant variant, const uint8_t* input, size_t size, uint8_t* output, ScratchPad** scratchPad) { if (variant == PowVariant::POW_V1) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_V1, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_V1, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_V1, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_V2) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_V2, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_V2, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_V2, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); } #endif - } else if (variant == PowVariant::POW_V4 || variant == PowVariant::POW_WOW) { + } else if (variant == PowVariant::POW_V4) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV4(input, size, output, scratchPad, height, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_V4, NUM_HASH_BLOCKS>::hashPowV4(input, size, output, scratchPad, height); #else if (asmOptimization != AsmOptimization::ASM_OFF && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV4_asm(input, size, output, scratchPad, height, asmOptimization, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_V4, NUM_HASH_BLOCKS>::hashPowV4_asm(input, size, output, scratchPad, height, asmOptimization); } else { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV4(input, size, output, scratchPad, height, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_V4, NUM_HASH_BLOCKS>::hashPowV4(input, size, output, scratchPad, height); + } +#endif + } else if (variant == PowVariant::POW_WOW) { +#if defined(XMRIG_ARM) + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_WOW,NUM_HASH_BLOCKS>::hashPowV4(input, size, output, scratchPad, height); +#else + if (asmOptimization != AsmOptimization::ASM_OFF && NUM_HASH_BLOCKS == 1) { + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_WOW, NUM_HASH_BLOCKS>::hashPowV4_asm(input, size, output, scratchPad, height, asmOptimization); + } else { + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_WOW, NUM_HASH_BLOCKS>::hashPowV4(input, size, output, scratchPad, height); } #endif - - } else if (variant == PowVariant::POW_FAST_2) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_FAST_2, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_FAST_2, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_FAST_2, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_DOUBLE) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_DOUBLE, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_DOUBLE, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_DOUBLE, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_ZELERIUS) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_ZELERIUS, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_ZELERIUS, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_ZELERIUS, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_RWZ) { - CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x60000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_RWZ, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); } else if (variant == PowVariant::POW_ALLOY) { - CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hash(input, size, output, scratchPad); + CryptoNightMultiHash<0x100000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_ALLOY, NUM_HASH_BLOCKS>::hash(input, size, output, scratchPad); } else if (variant == PowVariant::POW_XTL) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x80000, POW_XLT_V4_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_XLT_V4_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_XTL, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x80000, POW_XLT_V4_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x80000, POW_XLT_V4_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_XTL, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x80000, POW_XLT_V4_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_XLT_V4_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_XTL, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_MSR) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_MSR, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); +#else + if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_MSR, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); + } else { + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_MSR, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + } +#endif + } else if (variant == PowVariant::POW_RTO) { +#if defined(XMRIG_ARM) + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_RTO, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_RTO, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_RTO, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); } #endif - } else if (variant == PowVariant::POW_RTO || variant == PowVariant::POW_HOSP) { + } else if (variant == PowVariant::POW_HOSP) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_HOSP, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_HOSP, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_HOSP, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_XFH) { - CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hashHeavyHaven(input, size, output, scratchPad); + CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_XFH, NUM_HASH_BLOCKS>::hashHeavyHaven(input, size, output, scratchPad); } else { - CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hash(input, size, output, scratchPad); + CryptoNightMultiHash<0x80000, POW_DEFAULT_INDEX_SHIFT, MEMORY, 0x1FFFF0, true, POW_V0, NUM_HASH_BLOCKS>::hash(input, size, output, scratchPad); } } @@ -265,28 +303,28 @@ static void cryptonight_lite_aesni(AsmOptimization asmOptimization, uint64_t hei # if !defined(XMRIG_ARMv7) if (variant == PowVariant::POW_V1) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, POW_V1, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, POW_V1, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, POW_V1, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_TUBE) { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, POW_TUBE, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); } else if (variant == PowVariant::POW_UPX) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, POW_UPX, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, POW_UPX, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, POW_UPX, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); } #endif } else { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, NUM_HASH_BLOCKS>::hash(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, false, POW_V0, NUM_HASH_BLOCKS>::hash(input, size, output, scratchPad); } # endif } @@ -295,28 +333,28 @@ template static void cryptonight_lite_softaes(AsmOptimization asmOptimization, uint64_t height, PowVariant variant, const uint8_t* input, size_t size, uint8_t* output, ScratchPad** scratchPad) { if (variant == PowVariant::POW_V1) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, POW_V1, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, POW_V1, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, POW_V1, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); } #endif } else if (variant == PowVariant::POW_TUBE) { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, POW_TUBE, NUM_HASH_BLOCKS>::hashLiteTube(input, size, output, scratchPad); } else if (variant == PowVariant::POW_UPX) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, POW_UPX, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, POW_UPX, NUM_HASH_BLOCKS>::hashPowV2_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); + CryptoNightMultiHash<0x20000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, POW_UPX, NUM_HASH_BLOCKS>::hashPowV2(input, size, output, scratchPad); } #endif } else { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, NUM_HASH_BLOCKS>::hash(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_LITE, 0xFFFF0, true, POW_V0, NUM_HASH_BLOCKS>::hash(input, size, output, scratchPad); } } @@ -334,14 +372,14 @@ template static void cryptonight_ultra_lite_aesni(AsmOptimization asmOptimization, uint64_t height, PowVariant variant, const uint8_t* input, size_t size, uint8_t* output, ScratchPad** scratchPad) { # if !defined(XMRIG_ARMv7) #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x10000, POW_DEFAULT_INDEX_SHIFT, MEMORY_ULTRA_LITE, 0x1FFF0, false, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x10000, POW_DEFAULT_INDEX_SHIFT, MEMORY_ULTRA_LITE, 0x1FFF0, false, POW_TURTLE, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); #else if ((asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS <= 2) || (asmOptimization == AsmOptimization::ASM_RYZEN && NUM_HASH_BLOCKS == 1) || (asmOptimization == AsmOptimization::ASM_BULLDOZER && NUM_HASH_BLOCKS == 1)) { - CryptoNightMultiHash<0x10000, POW_DEFAULT_INDEX_SHIFT, MEMORY_ULTRA_LITE, 0x1FFF0, false, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x10000, POW_DEFAULT_INDEX_SHIFT, MEMORY_ULTRA_LITE, 0x1FFF0, false, POW_TURTLE, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x10000, POW_DEFAULT_INDEX_SHIFT, MEMORY_ULTRA_LITE, 0x1FFF0, false, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x10000, POW_DEFAULT_INDEX_SHIFT, MEMORY_ULTRA_LITE, 0x1FFF0, false, POW_TURTLE, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); } #endif # endif @@ -350,12 +388,12 @@ static void cryptonight_ultra_lite_aesni(AsmOptimization asmOptimization, uint64 template static void cryptonight_ultra_lite_softaes(AsmOptimization asmOptimization, uint64_t height, PowVariant variant, const uint8_t* input, size_t size, uint8_t* output, ScratchPad** scratchPad) { #if defined(XMRIG_ARM) - CryptoNightMultiHash<0x10000, POW_DEFAULT_INDEX_SHIFT, MEMORY_ULTRA_LITE, 0x1FFF0, true, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x10000, POW_DEFAULT_INDEX_SHIFT, MEMORY_ULTRA_LITE, 0x1FFF0, true, POW_TURTLE, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); #else if (asmOptimization == AsmOptimization::ASM_INTEL && NUM_HASH_BLOCKS == 1) { - CryptoNightMultiHash<0x10000, POW_DEFAULT_INDEX_SHIFT, MEMORY_ULTRA_LITE, 0x1FFF0, true, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization, variant); + CryptoNightMultiHash<0x10000, POW_DEFAULT_INDEX_SHIFT, MEMORY_ULTRA_LITE, 0x1FFF0, true, POW_TURTLE, NUM_HASH_BLOCKS>::hashPowV3_asm(input, size, output, scratchPad, asmOptimization); } else { - CryptoNightMultiHash<0x10000, POW_DEFAULT_INDEX_SHIFT, MEMORY_ULTRA_LITE, 0x1FFF0, true, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad, variant); + CryptoNightMultiHash<0x10000, POW_DEFAULT_INDEX_SHIFT, MEMORY_ULTRA_LITE, 0x1FFF0, true, POW_TURTLE, NUM_HASH_BLOCKS>::hashPowV3(input, size, output, scratchPad); } #endif } @@ -364,13 +402,13 @@ template static void cryptonight_heavy_aesni(AsmOptimization asmOptimization, uint64_t height, PowVariant variant, const uint8_t* input, size_t size, uint8_t* output, ScratchPad** scratchPad) { # if !defined(XMRIG_ARMv7) if (variant == PowVariant::POW_XHV) { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_HEAVY, 0x3FFFF0, false, NUM_HASH_BLOCKS>::hashHeavyHaven(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_HEAVY, 0x3FFFF0, false, POW_XHV, NUM_HASH_BLOCKS>::hashHeavyHaven(input, size, output, scratchPad); } else if (variant == PowVariant::POW_TUBE) { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_HEAVY, 0x3FFFF0, false, NUM_HASH_BLOCKS>::hashHeavyTube(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_HEAVY, 0x3FFFF0, false, POW_TUBE, NUM_HASH_BLOCKS>::hashHeavyTube(input, size, output, scratchPad); } else { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_HEAVY, 0x3FFFF0, false, NUM_HASH_BLOCKS>::hashHeavy(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_HEAVY, 0x3FFFF0, false, POW_V0, NUM_HASH_BLOCKS>::hashHeavy(input, size, output, scratchPad); } # endif } @@ -378,13 +416,13 @@ static void cryptonight_heavy_aesni(AsmOptimization asmOptimization, uint64_t he template static void cryptonight_heavy_softaes(AsmOptimization asmOptimization, uint64_t height, PowVariant variant, const uint8_t* input, size_t size, uint8_t* output, ScratchPad** scratchPad) { if (variant == PowVariant::POW_XHV) { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_HEAVY, 0x3FFFF0, true, NUM_HASH_BLOCKS>::hashHeavyHaven(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_HEAVY, 0x3FFFF0, true, POW_XHV, NUM_HASH_BLOCKS>::hashHeavyHaven(input, size, output, scratchPad); } else if (variant == PowVariant::POW_TUBE) { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_HEAVY, 0x3FFFF0, true, NUM_HASH_BLOCKS>::hashHeavyTube(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_HEAVY, 0x3FFFF0, true, POW_TUBE, NUM_HASH_BLOCKS>::hashHeavyTube(input, size, output, scratchPad); } else { - CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_HEAVY, 0x3FFFF0, true, NUM_HASH_BLOCKS>::hashHeavy(input, size, output, scratchPad); + CryptoNightMultiHash<0x40000, POW_DEFAULT_INDEX_SHIFT, MEMORY_HEAVY, 0x3FFFF0, true, POW_V0, NUM_HASH_BLOCKS>::hashHeavy(input, size, output, scratchPad); } } diff --git a/src/crypto/CryptoNight_arm.h b/src/crypto/CryptoNight_arm.h index 43cfab394c..94731a1c1b 100644 --- a/src/crypto/CryptoNight_arm.h +++ b/src/crypto/CryptoNight_arm.h @@ -126,21 +126,11 @@ static inline __attribute__((always_inline)) uint64_t _mm_cvtsi128_si64(__m128i #define EXTRACT64(X) _mm_cvtsi128_si64(X) -# define SHUFFLE_PHASE_1(l, idx, bx0, bx1, ax) \ +# define SHUFFLE_PHASE_1(l, idx, bx0, bx1, ax, reverse) \ { \ - const uint64x2_t chunk1 = vld1q_u64((uint64_t*)((l) + ((idx) ^ 0x10))); \ - const uint64x2_t chunk2 = vld1q_u64((uint64_t*)((l) + ((idx) ^ 0x20))); \ - const uint64x2_t chunk3 = vld1q_u64((uint64_t*)((l) + ((idx) ^ 0x30))); \ - vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x10)), vaddq_u64(chunk3, vreinterpretq_u64_u8(bx1))); \ - vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x20)), vaddq_u64(chunk1, vreinterpretq_u64_u8(bx0))); \ - vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x30)), vaddq_u64(chunk2, vreinterpretq_u64_u8(ax))); \ -} - -# define SHUFFLE_PHASE_1_RWZ(l, idx, bx0, bx1, ax) \ -{ \ - const uint64x2_t chunk1 = vld1q_u64((uint64_t*)((l) + ((idx) ^ 0x30))); \ + const uint64x2_t chunk1 = vld1q_u64((uint64_t*)((l) + ((idx) ^ (reverse ? 0x30 : 0x10)))); \ const uint64x2_t chunk2 = vld1q_u64((uint64_t*)((l) + ((idx) ^ 0x20))); \ - const uint64x2_t chunk3 = vld1q_u64((uint64_t*)((l) + ((idx) ^ 0x10))); \ + const uint64x2_t chunk3 = vld1q_u64((uint64_t*)((l) + ((idx) ^ (reverse ? 0x10 : 0x30)))); \ vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x10)), vaddq_u64(chunk3, vreinterpretq_u64_u8(bx1))); \ vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x20)), vaddq_u64(chunk1, vreinterpretq_u64_u8(bx0))); \ vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x30)), vaddq_u64(chunk2, vreinterpretq_u64_u8(ax))); \ @@ -161,27 +151,20 @@ static inline __attribute__((always_inline)) uint64_t _mm_cvtsi128_si64(__m128i sqrt_result##idx += ((r2 + b > sqrt_input) ? -1 : 0) + ((r2 + (1ULL << 32) < sqrt_input - s) ? 1 : 0); \ } -# define SHUFFLE_PHASE_2(l, idx, bx0, bx1, ax, lo, hi) \ -{ \ - const uint64x2_t chunk1 = veorq_u64(vld1q_u64((uint64_t*)((l) + ((idx) ^ 0x10))), vcombine_u64(vcreate_u64(hi), vcreate_u64(lo))); \ - const uint64x2_t chunk2 = vld1q_u64((uint64_t*)((l) + ((idx) ^ 0x20))); \ - const uint64x2_t chunk3 = vld1q_u64((uint64_t*)((l) + ((idx) ^ 0x30))); \ - hi ^= ((uint64_t*)((l) + ((idx) ^ 0x20)))[0]; \ - lo ^= ((uint64_t*)((l) + ((idx) ^ 0x20)))[1]; \ - vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x10)), vaddq_u64(chunk3, vreinterpretq_u64_u8(bx1))); \ - vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x20)), vaddq_u64(chunk1, vreinterpretq_u64_u8(bx0))); \ - vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x30)), vaddq_u64(chunk2, vreinterpretq_u64_u8(ax))); \ -} - -# define SHUFFLE_PHASE_2_RWZ(l, idx, bx0, bx1, ax, lo, hi) \ +# define SHUFFLE_PHASE_2(l, idx, bx0, bx1, ax, lo, hi, reverse) \ { \ const uint64x2_t chunk1 = veorq_u64(vld1q_u64((uint64_t*)((l) + ((idx) ^ 0x10))), vcombine_u64(vcreate_u64(hi), vcreate_u64(lo))); \ const uint64x2_t chunk2 = vld1q_u64((uint64_t*)((l) + ((idx) ^ 0x20))); \ const uint64x2_t chunk3 = vld1q_u64((uint64_t*)((l) + ((idx) ^ 0x30))); \ hi ^= ((uint64_t*)((l) + ((idx) ^ 0x20)))[0]; \ lo ^= ((uint64_t*)((l) + ((idx) ^ 0x20)))[1]; \ - vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x10)), vaddq_u64(chunk1, vreinterpretq_u64_u8(bx1))); \ - vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x20)), vaddq_u64(chunk3, vreinterpretq_u64_u8(bx0))); \ + if (reverse) { \ + vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x10)), vaddq_u64(chunk1, vreinterpretq_u64_u8(bx1))); \ + vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x20)), vaddq_u64(chunk3, vreinterpretq_u64_u8(bx0))); \ + } else { \ + vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x10)), vaddq_u64(chunk3, vreinterpretq_u64_u8(bx1))); \ + vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x20)), vaddq_u64(chunk1, vreinterpretq_u64_u8(bx0))); \ + } \ vst1q_u64((uint64_t*)((l) + ((idx) ^ 0x30)), vaddq_u64(chunk2, vreinterpretq_u64_u8(ax))); \ } @@ -203,7 +186,7 @@ static inline __attribute__((always_inline)) uint64_t _mm_cvtsi128_si64(__m128i r##idx[1] = (uint32_t)(h[12] >> 32); \ r##idx[2] = (uint32_t)(h[13]); \ r##idx[3] = (uint32_t)(h[13] >> 32); \ - v4_random_math_init(code##idx, variant, height); + v4_random_math_init(code##idx, VARIANT, height); # define VARIANT4_RANDOM_MATH(idx, al, ah, cl, bx0, bx1) \ cl ^= (r##idx[0] + r##idx[1]) | ((uint64_t)(r##idx[2] + r##idx[3]) << 32); \ @@ -706,7 +689,7 @@ static inline void cn_implode_scratchpad_heavy(const __m128i* input, __m128i* ou _mm_store_si128(output + 11, xout7); } -template +template class CryptoNightMultiHash { public: @@ -729,8 +712,7 @@ class CryptoNightMultiHash inline static void hashPowV3(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, - ScratchPad** __restrict__ scratchPad, - PowVariant variant) + ScratchPad** __restrict__ scratchPad) { //dummy } @@ -739,8 +721,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - uint64_t height, - PowVariant variant) + uint64_t height) { // dummy } @@ -778,8 +759,8 @@ class CryptoNightMultiHash } }; -template -class CryptoNightMultiHash +template +class CryptoNightMultiHash { public: inline static void hash(const uint8_t* __restrict__ input, @@ -912,8 +893,7 @@ class CryptoNightMultiHash inline static void hashPowV3(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, - ScratchPad** __restrict__ scratchPad, - PowVariant variant) + ScratchPad** __restrict__ scratchPad) { keccak(input, (int) size, scratchPad[0]->state, 200); @@ -943,12 +923,7 @@ class CryptoNightMultiHash cx0 = _mm_aesenc_si128(cx0, _mm_set_epi64x(ah0, al0)); } - if (variant == POW_RWZ) - { - SHUFFLE_PHASE_1_RWZ(l0, (idx0&MASK), bx00, bx10, ax0) - } else { - SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0) - } + SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0, VARIANT == POW_RWZ) _mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx00, cx0)); @@ -962,12 +937,7 @@ class CryptoNightMultiHash lo = __umul128(idx0, cl, &hi); - if (variant == POW_RWZ) - { - SHUFFLE_PHASE_2_RWZ(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi) - } else { - SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi) - } + SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi, VARIANT == POW_RWZ) al0 += hi; ah0 += lo; @@ -995,8 +965,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - uint64_t height, - PowVariant variant) + uint64_t height) { keccak(input, (int) size, scratchPad[0]->state, 200); @@ -1039,7 +1008,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx00, bx10) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -1379,8 +1348,8 @@ class CryptoNightMultiHash }; -template -class CryptoNightMultiHash +template +class CryptoNightMultiHash { public: inline static void hash(const uint8_t* __restrict__ input, @@ -1586,8 +1555,7 @@ class CryptoNightMultiHash inline static void hashPowV3(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, - ScratchPad** __restrict__ scratchPad, - PowVariant variant) + ScratchPad** __restrict__ scratchPad) { keccak(input, (int) size, scratchPad[0]->state, 200); keccak(input + size, (int) size, scratchPad[1]->state, 200); @@ -1638,14 +1606,8 @@ class CryptoNightMultiHash cx1 = _mm_aesenc_si128(cx1, _mm_set_epi64x(ah1, al1)); } - if (variant == POW_RWZ) - { - SHUFFLE_PHASE_1_RWZ(l0, (idx0&MASK), bx00, bx10, ax0) - SHUFFLE_PHASE_1_RWZ(l1, (idx1&MASK), bx01, bx11, ax1) - } else { - SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0) - SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1) - } + SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1, VARIANT == POW_RWZ) _mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx00, cx0)); _mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx01, cx1)); @@ -1661,12 +1623,7 @@ class CryptoNightMultiHash lo = __umul128(idx0, cl, &hi); - if (variant == POW_RWZ) - { - SHUFFLE_PHASE_2_RWZ(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi) - } else { - SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi) - } + SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi, VARIANT == POW_RWZ) al0 += hi; ah0 += lo; @@ -1689,12 +1646,7 @@ class CryptoNightMultiHash lo = __umul128(idx1, cl, &hi); - if (variant == POW_RWZ) - { - SHUFFLE_PHASE_2_RWZ(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi) - } else { - SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi) - } + SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi, VARIANT == POW_RWZ) al1 += hi; ah1 += lo; @@ -1725,8 +1677,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - uint64_t height, - PowVariant variant) + uint64_t height) { keccak(input, (int) size, scratchPad[0]->state, 200); keccak(input + size, (int) size, scratchPad[1]->state, 200); @@ -1789,7 +1740,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx00, bx10) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -1817,7 +1768,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(1, al1, ah1, cl, bx01, bx11) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al1 ^= r1[2] | ((uint64_t)(r1[3]) << 32); ah1 ^= r1[0] | ((uint64_t)(r1[1]) << 32); } @@ -2352,8 +2303,8 @@ class CryptoNightMultiHash } }; -template -class CryptoNightMultiHash +template +class CryptoNightMultiHash { public: inline static void hash(const uint8_t* __restrict__ input, @@ -2636,8 +2587,7 @@ class CryptoNightMultiHash inline static void hashPowV3(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, - ScratchPad** __restrict__ scratchPad, - PowVariant variant) + ScratchPad** __restrict__ scratchPad) { keccak(input, (int) size, scratchPad[0]->state, 200); keccak(input + size, (int) size, scratchPad[1]->state, 200); @@ -2704,15 +2654,9 @@ class CryptoNightMultiHash cx2 = _mm_aesenc_si128(cx2, _mm_set_epi64x(ah2, al2)); } - if (variant == POW_RWZ) { - SHUFFLE_PHASE_1_RWZ(l0, (idx0&MASK), bx00, bx10, ax0) - SHUFFLE_PHASE_1_RWZ(l1, (idx1&MASK), bx01, bx11, ax1) - SHUFFLE_PHASE_1_RWZ(l2, (idx2&MASK), bx02, bx12, ax2) - } else { - SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0) - SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1) - SHUFFLE_PHASE_1(l2, (idx2&MASK), bx02, bx12, ax2) - } + SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l2, (idx2&MASK), bx02, bx12, ax2, VARIANT == POW_RWZ) _mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx00, cx0)); _mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx01, cx1)); @@ -2730,11 +2674,7 @@ class CryptoNightMultiHash lo = __umul128(idx0, cl, &hi); - if (variant == POW_RWZ) { - SHUFFLE_PHASE_2_RWZ(l0, (idx0 & MASK), bx00, bx10, ax0, lo, hi) - } else { - SHUFFLE_PHASE_2(l0, (idx0 & MASK), bx00, bx10, ax0, lo, hi) - } + SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi, VARIANT == POW_RWZ) al0 += hi; ah0 += lo; @@ -2757,11 +2697,7 @@ class CryptoNightMultiHash lo = __umul128(idx1, cl, &hi); - if (variant == POW_RWZ) { - SHUFFLE_PHASE_2_RWZ(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi) - } else { - SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi) - } + SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi, VARIANT == POW_RWZ) al1 += hi; ah1 += lo; @@ -2784,11 +2720,7 @@ class CryptoNightMultiHash lo = __umul128(idx2, cl, &hi); - if (variant == POW_RWZ) { - SHUFFLE_PHASE_2_RWZ(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi) - } else { - SHUFFLE_PHASE_2(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi) - } + SHUFFLE_PHASE_2(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi, VARIANT == POW_RWZ) al2 += hi; ah2 += lo; @@ -2822,8 +2754,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - uint64_t height, - PowVariant variant) + uint64_t height) { keccak(input, (int) size, scratchPad[0]->state, 200); keccak(input + size, (int) size, scratchPad[1]->state, 200); @@ -2904,7 +2835,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx00, bx10) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -2932,7 +2863,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(1, al1, ah1, cl, bx01, bx11) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al1 ^= r1[2] | ((uint64_t)(r1[3]) << 32); ah1 ^= r1[0] | ((uint64_t)(r1[1]) << 32); } @@ -2960,7 +2891,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(2, al2, ah2, cl, bx02, bx12) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al2 ^= r2[2] | ((uint64_t)(r2[3]) << 32); ah2 ^= r2[0] | ((uint64_t)(r2[1]) << 32); } @@ -3695,8 +3626,8 @@ class CryptoNightMultiHash } }; -template -class CryptoNightMultiHash +template +class CryptoNightMultiHash { public: inline static void hash(const uint8_t* __restrict__ input, @@ -4052,8 +3983,7 @@ class CryptoNightMultiHash inline static void hashPowV3(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, - ScratchPad** __restrict__ scratchPad, - PowVariant variant) + ScratchPad** __restrict__ scratchPad) { keccak(input, (int) size, scratchPad[0]->state, 200); keccak(input + size, (int) size, scratchPad[1]->state, 200); @@ -4136,17 +4066,10 @@ class CryptoNightMultiHash cx3 = _mm_aesenc_si128(cx3, ax3); } - if (variant == POW_RWZ) { - SHUFFLE_PHASE_1_RWZ(l0, (idx0&MASK), bx00, bx10, ax0) - SHUFFLE_PHASE_1_RWZ(l1, (idx1&MASK), bx01, bx11, ax1) - SHUFFLE_PHASE_1_RWZ(l2, (idx2&MASK), bx02, bx12, ax2) - SHUFFLE_PHASE_1_RWZ(l3, (idx3&MASK), bx03, bx13, ax3) - } else { - SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0) - SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1) - SHUFFLE_PHASE_1(l2, (idx2&MASK), bx02, bx12, ax2) - SHUFFLE_PHASE_1(l3, (idx3&MASK), bx03, bx13, ax3) - } + SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l2, (idx2&MASK), bx02, bx12, ax2, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l3, (idx3&MASK), bx03, bx13, ax3, VARIANT == POW_RWZ) _mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx00, cx0)); _mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx01, cx1)); @@ -4166,11 +4089,7 @@ class CryptoNightMultiHash lo = __umul128(idx0, cl, &hi); - if (variant == POW_RWZ) { - SHUFFLE_PHASE_2_RWZ(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi) - } else { - SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi) - } + SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi, VARIANT == POW_RWZ) al0 += hi; ah0 += lo; @@ -4193,11 +4112,7 @@ class CryptoNightMultiHash lo = __umul128(idx1, cl, &hi); - if (variant == POW_RWZ) { - SHUFFLE_PHASE_2_RWZ(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi) - } else { - SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi) - } + SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi, VARIANT == POW_RWZ) al1 += hi; ah1 += lo; @@ -4220,11 +4135,7 @@ class CryptoNightMultiHash lo = __umul128(idx2, cl, &hi); - if (variant == POW_RWZ) { - SHUFFLE_PHASE_2_RWZ(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi) - } else { - SHUFFLE_PHASE_2(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi) - } + SHUFFLE_PHASE_2(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi, VARIANT == POW_RWZ) al2 += hi; ah2 += lo; @@ -4247,11 +4158,7 @@ class CryptoNightMultiHash lo = __umul128(idx3, cl, &hi); - if (variant == POW_RWZ) { - SHUFFLE_PHASE_2_RWZ(l3, (idx3&MASK), bx03, bx13, ax3, lo, hi) - } else { - SHUFFLE_PHASE_2(l3, (idx3&MASK), bx03, bx13, ax3, lo, hi) - } + SHUFFLE_PHASE_2(l3, (idx3&MASK), bx03, bx13, ax3, lo, hi, VARIANT == POW_RWZ) al3 += hi; ah3 += lo; @@ -4288,8 +4195,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - uint64_t height, - PowVariant variant) + uint64_t height) { keccak(input, (int) size, scratchPad[0]->state, 200); keccak(input + size, (int) size, scratchPad[1]->state, 200); @@ -4388,7 +4294,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx00, bx10) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -4416,7 +4322,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(1, al1, ah1, cl, bx01, bx11) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al1 ^= r1[2] | ((uint64_t)(r1[3]) << 32); ah1 ^= r1[0] | ((uint64_t)(r1[1]) << 32); } @@ -4445,7 +4351,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(2, al2, ah2, cl, bx02, bx12) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al2 ^= r2[2] | ((uint64_t)(r2[3]) << 32); ah2 ^= r2[0] | ((uint64_t)(r2[1]) << 32); } @@ -4473,7 +4379,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(3, al3, ah3, cl, bx03, bx13) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al3 ^= r3[2] | ((uint64_t)(r3[3]) << 32); ah3 ^= r3[0] | ((uint64_t)(r3[1]) << 32); } @@ -4735,8 +4641,8 @@ class CryptoNightMultiHash } }; -template -class CryptoNightMultiHash +template +class CryptoNightMultiHash {// public: inline static void hash(const uint8_t* __restrict__ input, @@ -5164,8 +5070,7 @@ class CryptoNightMultiHash inline static void hashPowV3(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, - ScratchPad** __restrict__ scratchPad, - PowVariant variant) + ScratchPad** __restrict__ scratchPad) { keccak(input, (int) size, scratchPad[0]->state, 200); keccak(input + size, (int) size, scratchPad[1]->state, 200); @@ -5264,19 +5169,11 @@ class CryptoNightMultiHash cx4 = _mm_aesenc_si128(cx4, ax4); } - if (variant == POW_RWZ) { - SHUFFLE_PHASE_1_RWZ(l0, (idx0&MASK), bx00, bx10, ax0) - SHUFFLE_PHASE_1_RWZ(l1, (idx1&MASK), bx01, bx11, ax1) - SHUFFLE_PHASE_1_RWZ(l2, (idx2&MASK), bx02, bx12, ax2) - SHUFFLE_PHASE_1_RWZ(l3, (idx3&MASK), bx03, bx13, ax3) - SHUFFLE_PHASE_1_RWZ(l4, (idx4&MASK), bx04, bx14, ax4) - } else { - SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0) - SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1) - SHUFFLE_PHASE_1(l2, (idx2&MASK), bx02, bx12, ax2) - SHUFFLE_PHASE_1(l3, (idx3&MASK), bx03, bx13, ax3) - SHUFFLE_PHASE_1(l4, (idx4&MASK), bx04, bx14, ax4) - } + SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l2, (idx2&MASK), bx02, bx12, ax2, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l3, (idx3&MASK), bx03, bx13, ax3, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l4, (idx4&MASK), bx04, bx14, ax4, VARIANT == POW_RWZ) _mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx00, cx0)); _mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx01, cx1)); @@ -5298,11 +5195,7 @@ class CryptoNightMultiHash lo = __umul128(idx0, cl, &hi); - if (variant == POW_RWZ) { - SHUFFLE_PHASE_2_RWZ(l0, (idx0 & MASK), bx00, bx10, ax0, lo, hi) - } else { - SHUFFLE_PHASE_2(l0, (idx0 & MASK), bx00, bx10, ax0, lo, hi) - } + SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi, VARIANT == POW_RWZ) al0 += hi; ah0 += lo; @@ -5325,11 +5218,7 @@ class CryptoNightMultiHash lo = __umul128(idx1, cl, &hi); - if (variant == POW_RWZ) { - SHUFFLE_PHASE_2_RWZ(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi) - } else { - SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi) - } + SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi, VARIANT == POW_RWZ) al1 += hi; ah1 += lo; @@ -5352,11 +5241,7 @@ class CryptoNightMultiHash lo = __umul128(idx2, cl, &hi); - if (variant == POW_RWZ) { - SHUFFLE_PHASE_2_RWZ(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi) - } else { - SHUFFLE_PHASE_2(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi) - } + SHUFFLE_PHASE_2(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi, VARIANT == POW_RWZ) al2 += hi; ah2 += lo; @@ -5379,11 +5264,7 @@ class CryptoNightMultiHash lo = __umul128(idx3, cl, &hi); - if (variant == POW_RWZ) { - SHUFFLE_PHASE_2_RWZ(l3, (idx3&MASK), bx03, bx13, ax3, lo, hi) - } else { - SHUFFLE_PHASE_2(l3, (idx3&MASK), bx03, bx13, ax3, lo, hi) - } + SHUFFLE_PHASE_2(l3, (idx3&MASK), bx03, bx13, ax3, lo, hi, VARIANT == POW_RWZ) al3 += hi; ah3 += lo; @@ -5406,11 +5287,7 @@ class CryptoNightMultiHash lo = __umul128(idx4, cl, &hi); - if (variant == POW_RWZ) { - SHUFFLE_PHASE_2_RWZ(l4, (idx4&MASK), bx04, bx14, ax4, lo, hi) - } else { - SHUFFLE_PHASE_2(l4, (idx4&MASK), bx04, bx14, ax4, lo, hi) - } + SHUFFLE_PHASE_2(l4, (idx4&MASK), bx04, bx14, ax4, lo, hi, VARIANT == POW_RWZ) al4 += hi; ah4 += lo; @@ -5450,8 +5327,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - uint64_t height, - PowVariant variant) + uint64_t height) { keccak(input, (int) size, scratchPad[0]->state, 200); keccak(input + size, (int) size, scratchPad[1]->state, 200); @@ -5568,7 +5444,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx00, bx10) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -5596,7 +5472,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(1, al1, ah1, cl, bx01, bx11) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al1 ^= r1[2] | ((uint64_t)(r1[3]) << 32); ah1 ^= r1[0] | ((uint64_t)(r1[1]) << 32); } @@ -5624,7 +5500,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(2, al2, ah2, cl, bx02, bx12) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al2 ^= r2[2] | ((uint64_t)(r2[3]) << 32); ah2 ^= r2[0] | ((uint64_t)(r2[1]) << 32); } @@ -5652,7 +5528,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(3, al3, ah3, cl, bx03, bx13) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al3 ^= r3[2] | ((uint64_t)(r3[3]) << 32); ah3 ^= r3[0] | ((uint64_t)(r3[1]) << 32); } @@ -5680,7 +5556,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(4, al4, ah4, cl, bx04, bx14) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al4 ^= r4[2] | ((uint64_t)(r4[3]) << 32); ah4 ^= r4[0] | ((uint64_t)(r4[1]) << 32); } diff --git a/src/crypto/CryptoNight_x86.h b/src/crypto/CryptoNight_x86.h index 8a7b86ff45..203431fd97 100644 --- a/src/crypto/CryptoNight_x86.h +++ b/src/crypto/CryptoNight_x86.h @@ -195,9 +195,9 @@ static inline uint64_t __umul128(uint64_t multiplier, uint64_t multiplicand, uin # define SHUFFLE_PHASE_1(l, idx, bx0, bx1, ax, reverse) \ { \ - const __m128i chunk1 = _mm_load_si128((__m128i *)((l) + ((idx) ^ (reverse ? 0x30 : 0x10)))); \ + const __m128i chunk1 = _mm_load_si128((__m128i *)((l) + ((idx) ^ 0x10))); \ const __m128i chunk2 = _mm_load_si128((__m128i *)((l) + ((idx) ^ 0x20))); \ - const __m128i chunk3 = _mm_load_si128((__m128i *)((l) + ((idx) ^ (reverse ? 0x10 : 0x30)))); \ + const __m128i chunk3 = _mm_load_si128((__m128i *)((l) + ((idx) ^ 0x30))); \ _mm_store_si128((__m128i *)((l) + ((idx) ^ 0x10)), _mm_add_epi64(chunk3, bx1)); \ _mm_store_si128((__m128i *)((l) + ((idx) ^ 0x20)), _mm_add_epi64(chunk1, bx0)); \ _mm_store_si128((__m128i *)((l) + ((idx) ^ 0x30)), _mm_add_epi64(chunk2, ax)); \ @@ -221,13 +221,8 @@ static inline uint64_t __umul128(uint64_t multiplier, uint64_t multiplicand, uin const __m128i chunk3 = _mm_load_si128((__m128i *)((l) + ((idx) ^ 0x30))); \ hi ^= ((uint64_t*)((l) + ((idx) ^ 0x20)))[0]; \ lo ^= ((uint64_t*)((l) + ((idx) ^ 0x20)))[1]; \ - if (reverse) { \ - _mm_store_si128((__m128i *)((l) + ((idx) ^ 0x10)), _mm_add_epi64(chunk1, bx1)); \ - _mm_store_si128((__m128i *)((l) + ((idx) ^ 0x20)), _mm_add_epi64(chunk3, bx0)); \ - } else { \ _mm_store_si128((__m128i *)((l) + ((idx) ^ 0x10)), _mm_add_epi64(chunk3, bx1)); \ _mm_store_si128((__m128i *)((l) + ((idx) ^ 0x20)), _mm_add_epi64(chunk1, bx0)); \ - } \ _mm_store_si128((__m128i *)((l) + ((idx) ^ 0x30)), _mm_add_epi64(chunk2, ax)); \ } @@ -249,7 +244,7 @@ static inline uint64_t __umul128(uint64_t multiplier, uint64_t multiplicand, uin r##idx[1] = (uint32_t)(h[12] >> 32); \ r##idx[2] = (uint32_t)(h[13]); \ r##idx[3] = (uint32_t)(h[13] >> 32); \ - v4_random_math_init(code##idx, variant, height); + v4_random_math_init(code##idx, VARIANT, height); # define VARIANT4_RANDOM_MATH(idx, al, ah, cl, bx0, bx1) \ cl ^= (r##idx[0] + r##idx[1]) | ((uint64_t)(r##idx[2] + r##idx[3]) << 32); \ @@ -661,7 +656,7 @@ return r; } // n-Loop version. Seems to be little bit slower then the hardcoded one. -template +template class CryptoNightMultiHash { public: @@ -685,8 +680,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // dummy } @@ -695,8 +689,7 @@ class CryptoNightMultiHash inline static void hashPowV3(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, - ScratchPad** __restrict__ scratchPad, - PowVariant variant) + ScratchPad** __restrict__ scratchPad) { // dummy } @@ -705,8 +698,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // dummy } @@ -715,8 +707,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - uint64_t height, - PowVariant variant) + uint64_t height) { // dummy } @@ -726,8 +717,7 @@ class CryptoNightMultiHash uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, uint64_t height, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // dummy } @@ -765,8 +755,8 @@ class CryptoNightMultiHash } }; -template -class CryptoNightMultiHash +template +class CryptoNightMultiHash { public: inline static void hash(const uint8_t* __restrict__ input, @@ -900,8 +890,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { keccak(static_cast(input), (int) size, scratchPad[0]->state, 200); @@ -922,7 +911,7 @@ class CryptoNightMultiHash if (SOFT_AES) { scratchPad[0]->t_fn = (const uint32_t*)saes_table; - switch (variant) + switch (VARIANT) { case POW_MSR: cnv1_main_loop_fast_soft_aes_sandybridge_asm(scratchPad[0]); @@ -943,7 +932,7 @@ class CryptoNightMultiHash break; } } else { - switch (variant) + switch (VARIANT) { case POW_MSR: cnv1_main_loop_fast_sandybridge_asm(scratchPad[0]); @@ -975,8 +964,7 @@ class CryptoNightMultiHash inline static void hashPowV3(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, - ScratchPad** __restrict__ scratchPad, - PowVariant variant) + ScratchPad** __restrict__ scratchPad) { keccak(static_cast(input), (int) size, scratchPad[0]->state, 200); @@ -1009,7 +997,7 @@ class CryptoNightMultiHash cx = _mm_aesenc_si128(cx, ax); } - SHUFFLE_PHASE_1(l, (idx&MASK), bx0, bx1, ax, variant == POW_RWZ) + SHUFFLE_PHASE_1(l, (idx&MASK), bx0, bx1, ax, VARIANT == POW_RWZ) _mm_store_si128((__m128i*) &l[idx & MASK], _mm_xor_si128(bx0, cx)); @@ -1023,7 +1011,7 @@ class CryptoNightMultiHash lo = __umul128(idx, cl, &hi); - SHUFFLE_PHASE_2(l, (idx&MASK), bx0, bx1, ax, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l, (idx&MASK), bx0, bx1, ax, lo, hi, VARIANT == POW_RWZ) al += hi; // two fence statements are overhead ah += lo; @@ -1049,8 +1037,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { const uint8_t* l = scratchPad[0]->memory; uint64_t* h = reinterpret_cast(scratchPad[0]->state); @@ -1064,7 +1051,7 @@ class CryptoNightMultiHash scratchPad[0]->input = input; scratchPad[0]->t_fn = (const uint32_t*)saes_table; - switch (variant) + switch (VARIANT) { case POW_FAST_2: cnv2_main_loop_fastv2_soft_aes_sandybridge_asm(scratchPad[0]); @@ -1083,7 +1070,7 @@ class CryptoNightMultiHash break; } } else { - switch (variant) + switch (VARIANT) { case POW_FAST_2: cnv2_main_loop_fastv2_ivybridge_asm(scratchPad[0]); @@ -1106,7 +1093,7 @@ class CryptoNightMultiHash } } } else if (asmOptimization == AsmOptimization::ASM_RYZEN) { - switch (variant) + switch (VARIANT) { case POW_FAST_2: cnv2_main_loop_fastv2_ryzen_asm(scratchPad[0]); @@ -1128,7 +1115,7 @@ class CryptoNightMultiHash break; } } else if (asmOptimization == AsmOptimization::ASM_BULLDOZER) { - switch (variant) + switch (VARIANT) { case POW_FAST_2: cnv2_main_loop_fastv2_bulldozer_asm(scratchPad[0]); @@ -1162,8 +1149,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - uint64_t height, - PowVariant variant) + uint64_t height) { keccak(static_cast(input), (int) size, scratchPad[0]->state, 200); @@ -1208,7 +1194,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(0, al, ah, cl, bx0, bx1) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -1242,8 +1228,7 @@ class CryptoNightMultiHash uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, uint64_t height, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { const uint8_t* l = scratchPad[0]->memory; uint64_t* h = reinterpret_cast(scratchPad[0]->state); @@ -1253,17 +1238,17 @@ class CryptoNightMultiHash #ifndef XMRIG_NO_ASM if (SOFT_AES) { - if (!scratchPad[0]->generated_code_data.match(variant, height)) { + if (!scratchPad[0]->generated_code_data.match(VARIANT, height)) { V4_Instruction code[256]; - const int code_size = v4_random_math_init(code, variant, height); + const int code_size = v4_random_math_init(code, VARIANT, height); - if (variant == POW_WOW) { + if (VARIANT == POW_WOW) { wow_soft_aes_compile_code(code, code_size, reinterpret_cast(scratchPad[0]->generated_code), ASM_OFF); } else { v4_soft_aes_compile_code(code, code_size, reinterpret_cast(scratchPad[0]->generated_code), ASM_OFF); } - scratchPad[0]->generated_code_data.variant = variant; + scratchPad[0]->generated_code_data.variant = VARIANT; scratchPad[0]->generated_code_data.height = height; } @@ -1271,17 +1256,17 @@ class CryptoNightMultiHash scratchPad[0]->t_fn = (const uint32_t*)saes_table; scratchPad[0]->generated_code(scratchPad[0]); } else { - if (!scratchPad[0]->generated_code_data.match(variant, height)) { + if (!scratchPad[0]->generated_code_data.match(VARIANT, height)) { V4_Instruction code[256]; - const int code_size = v4_random_math_init(code, variant, height); + const int code_size = v4_random_math_init(code, VARIANT, height); - if (variant == POW_WOW) { + if (VARIANT == POW_WOW) { wow_compile_code(code, code_size, reinterpret_cast(scratchPad[0]->generated_code), asmOptimization); } else { v4_compile_code(code, code_size, reinterpret_cast(scratchPad[0]->generated_code), asmOptimization); } - scratchPad[0]->generated_code_data.variant = variant; + scratchPad[0]->generated_code_data.variant = VARIANT; scratchPad[0]->generated_code_data.height = height; } @@ -1591,8 +1576,8 @@ class CryptoNightMultiHash } }; -template -class CryptoNightMultiHash +template +class CryptoNightMultiHash { public: inline static void hash(const uint8_t* __restrict__ input, @@ -1798,8 +1783,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // not supported } @@ -1808,8 +1792,7 @@ class CryptoNightMultiHash inline static void hashPowV3(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, - ScratchPad** __restrict__ scratchPad, - PowVariant variant) + ScratchPad** __restrict__ scratchPad) { keccak((const uint8_t*) input, (int) size, scratchPad[0]->state, 200); keccak((const uint8_t*) input + size, (int) size, scratchPad[1]->state, 200); @@ -1866,8 +1849,8 @@ class CryptoNightMultiHash cx1 = _mm_aesenc_si128(cx1, ax1); } - SHUFFLE_PHASE_1(l0, (idx0 & MASK), bx00, bx10, ax0, variant == POW_RWZ) - SHUFFLE_PHASE_1(l1, (idx1 & MASK), bx01, bx11, ax1, variant == POW_RWZ) + SHUFFLE_PHASE_1(l0, (idx0 & MASK), bx00, bx10, ax0, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l1, (idx1 & MASK), bx01, bx11, ax1, VARIANT == POW_RWZ) _mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx00, cx0)); _mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx01, cx1)); @@ -1888,7 +1871,7 @@ class CryptoNightMultiHash lo = __umul128(idx0, cl, &hi); - SHUFFLE_PHASE_2(l0, (idx0 & MASK), bx00, bx10, ax0, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l0, (idx0 & MASK), bx00, bx10, ax0, lo, hi, VARIANT == POW_RWZ) al0 += hi; ah0 += lo; @@ -1957,7 +1940,7 @@ class CryptoNightMultiHash lo = __umul128(idx1, cl, &hi); - SHUFFLE_PHASE_2(l1, (idx1 & MASK), bx01, bx11, ax1, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l1, (idx1 & MASK), bx01, bx11, ax1, lo, hi, VARIANT == POW_RWZ) al1 += hi; ah1 += lo; @@ -1988,8 +1971,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { keccak((const uint8_t*) input, (int) size, scratchPad[0]->state, 200); keccak((const uint8_t*) input + size, (int) size, scratchPad[1]->state, 200); @@ -2003,7 +1985,7 @@ class CryptoNightMultiHash cn_explode_scratchpad((__m128i*) h1, (__m128i*) l1); #ifndef XMRIG_NO_ASM - switch(variant) { + switch(VARIANT) { case POW_FAST_2: cnv2_double_main_loop_fastv2_sandybridge_asm(scratchPad[0], scratchPad[1]); break; @@ -2039,8 +2021,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - uint64_t height, - PowVariant variant) + uint64_t height) { keccak((const uint8_t*) input, (int) size, scratchPad[0]->state, 200); keccak((const uint8_t*) input + size, (int) size, scratchPad[1]->state, 200); @@ -2107,7 +2088,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx00, bx10); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -2135,7 +2116,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(1, al1, ah1, cl, bx01, bx11); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al1 ^= r1[2] | ((uint64_t)(r1[3]) << 32); ah1 ^= r1[0] | ((uint64_t)(r1[1]) << 32); } @@ -2173,8 +2154,7 @@ class CryptoNightMultiHash uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, uint64_t height, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { keccak((const uint8_t*) input, (int) size, scratchPad[0]->state, 200); keccak((const uint8_t*) input + size, (int) size, scratchPad[1]->state, 200); @@ -2188,17 +2168,17 @@ class CryptoNightMultiHash cn_explode_scratchpad((__m128i*) h1, (__m128i*) l1); #ifndef XMRIG_NO_ASM - if (!scratchPad[0]->generated_code_double_data.match(variant, height)) { + if (!scratchPad[0]->generated_code_double_data.match(VARIANT, height)) { V4_Instruction code[256]; - const int code_size = v4_random_math_init(code, variant, height); + const int code_size = v4_random_math_init(code, VARIANT, height); - if (variant == POW_WOW) { + if (VARIANT == POW_WOW) { wow_compile_code_double(code, code_size, reinterpret_cast(scratchPad[0]->generated_code_double), asmOptimization); } else { v4_compile_code_double(code, code_size, reinterpret_cast(scratchPad[0]->generated_code_double), asmOptimization); } - scratchPad[0]->generated_code_double_data.variant = variant; + scratchPad[0]->generated_code_double_data.variant = VARIANT; scratchPad[0]->generated_code_double_data.height = height; } @@ -2704,8 +2684,8 @@ class CryptoNightMultiHash } }; -template -class CryptoNightMultiHash +template +class CryptoNightMultiHash { public: inline static void hash(const uint8_t* __restrict__ input, @@ -2988,8 +2968,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // not supported } @@ -2998,8 +2977,7 @@ class CryptoNightMultiHash inline static void hashPowV3(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, - ScratchPad** __restrict__ scratchPad, - PowVariant variant) + ScratchPad** __restrict__ scratchPad) { keccak((const uint8_t*) input, (int) size, scratchPad[0]->state, 200); keccak((const uint8_t*) input + size, (int) size, scratchPad[1]->state, 200); @@ -3069,9 +3047,9 @@ class CryptoNightMultiHash cx2 = _mm_aesenc_si128(cx2, ax2); } - SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0, variant == POW_RWZ) - SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1, variant == POW_RWZ) - SHUFFLE_PHASE_1(l2, (idx2&MASK), bx02, bx12, ax2, variant == POW_RWZ) + SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l2, (idx2&MASK), bx02, bx12, ax2, VARIANT == POW_RWZ) _mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx00, cx0)); _mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx01, cx1)); @@ -3089,7 +3067,7 @@ class CryptoNightMultiHash lo = __umul128(idx0, cl, &hi); - SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi, VARIANT == POW_RWZ) al0 += hi; ah0 += lo; @@ -3112,7 +3090,7 @@ class CryptoNightMultiHash lo = __umul128(idx1, cl, &hi); - SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi, VARIANT == POW_RWZ) al1 += hi; ah1 += lo; @@ -3134,7 +3112,7 @@ class CryptoNightMultiHash lo = __umul128(idx2, cl, &hi); - SHUFFLE_PHASE_2(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi, VARIANT == POW_RWZ) al2 += hi; ah2 += lo; @@ -3167,8 +3145,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // not supported } @@ -3178,8 +3155,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - uint64_t height, - PowVariant variant) + uint64_t height) { keccak((const uint8_t*) input, (int) size, scratchPad[0]->state, 200); keccak((const uint8_t*) input + size, (int) size, scratchPad[1]->state, 200); @@ -3263,7 +3239,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx00, bx10); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -3291,7 +3267,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(1, al1, ah1, cl, bx01, bx11); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al1 ^= r1[2] | ((uint64_t)(r1[3]) << 32); ah1 ^= r1[0] | ((uint64_t)(r1[1]) << 32); } @@ -3318,7 +3294,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(2, al2, ah2, cl, bx02, bx12); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al2 ^= r2[2] | ((uint64_t)(r2[3]) << 32); ah2 ^= r2[0] | ((uint64_t)(r2[1]) << 32); } @@ -3359,8 +3335,7 @@ class CryptoNightMultiHash uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, uint64_t height, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // not supported } @@ -4037,8 +4012,8 @@ class CryptoNightMultiHash } }; -template -class CryptoNightMultiHash +template +class CryptoNightMultiHash { public: inline static void hash(const uint8_t* __restrict__ input, @@ -4394,8 +4369,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // not supported } @@ -4404,8 +4378,7 @@ class CryptoNightMultiHash inline static void hashPowV3(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, - ScratchPad** __restrict__ scratchPad, - PowVariant variant) + ScratchPad** __restrict__ scratchPad) { keccak((const uint8_t*) input, (int) size, scratchPad[0]->state, 200); keccak((const uint8_t*) input + size, (int) size, scratchPad[1]->state, 200); @@ -4492,10 +4465,10 @@ class CryptoNightMultiHash cx3 = _mm_aesenc_si128(cx3, ax3); } - SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0, variant == POW_RWZ) - SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1, variant == POW_RWZ) - SHUFFLE_PHASE_1(l2, (idx2&MASK), bx02, bx12, ax2, variant == POW_RWZ) - SHUFFLE_PHASE_1(l3, (idx3&MASK), bx03, bx13, ax3, variant == POW_RWZ) + SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l2, (idx2&MASK), bx02, bx12, ax2, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l3, (idx3&MASK), bx03, bx13, ax3, VARIANT == POW_RWZ) _mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx00, cx0)); _mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx01, cx1)); @@ -4515,7 +4488,7 @@ class CryptoNightMultiHash lo = __umul128(idx0, cl, &hi); - SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi, VARIANT == POW_RWZ) al0 += hi; ah0 += lo; @@ -4538,7 +4511,7 @@ class CryptoNightMultiHash lo = __umul128(idx1, cl, &hi); - SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi, VARIANT == POW_RWZ) al1 += hi; ah1 += lo; @@ -4561,7 +4534,7 @@ class CryptoNightMultiHash lo = __umul128(idx2, cl, &hi); - SHUFFLE_PHASE_2(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi, VARIANT == POW_RWZ) al2 += hi; ah2 += lo; @@ -4584,7 +4557,7 @@ class CryptoNightMultiHash lo = __umul128(idx3, cl, &hi); - SHUFFLE_PHASE_2(l3, (idx3&MASK), bx03, bx13, ax3, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l3, (idx3&MASK), bx03, bx13, ax3, lo, hi, VARIANT == POW_RWZ) al3 += hi; ah3 += lo; @@ -4620,8 +4593,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // not supported } @@ -4631,8 +4603,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - uint64_t height, - PowVariant variant) + uint64_t height) { keccak((const uint8_t*) input, (int) size, scratchPad[0]->state, 200); keccak((const uint8_t*) input + size, (int) size, scratchPad[1]->state, 200); @@ -4735,7 +4706,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx00, bx10); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -4763,7 +4734,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(1, al1, ah1, cl, bx01, bx11); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al1 ^= r1[2] | ((uint64_t)(r1[3]) << 32); ah1 ^= r1[0] | ((uint64_t)(r1[1]) << 32); } @@ -4791,7 +4762,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(2, al2, ah2, cl, bx02, bx12); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al2 ^= r2[2] | ((uint64_t)(r2[3]) << 32); ah2 ^= r2[0] | ((uint64_t)(r2[1]) << 32); } @@ -4819,7 +4790,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(3, al3, ah3, cl, bx03, bx13); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al3 ^= r3[2] | ((uint64_t)(r3[3]) << 32); ah3 ^= r3[0] | ((uint64_t)(r3[1]) << 32); } @@ -4864,8 +4835,7 @@ class CryptoNightMultiHash uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, uint64_t height, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // not supported } @@ -5093,8 +5063,8 @@ class CryptoNightMultiHash } }; -template -class CryptoNightMultiHash +template +class CryptoNightMultiHash { public: inline static void hash(const uint8_t* __restrict__ input, @@ -5522,8 +5492,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // not supported } @@ -5532,8 +5501,7 @@ class CryptoNightMultiHash inline static void hashPowV3(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, - ScratchPad** __restrict__ scratchPad, - PowVariant variant) + ScratchPad** __restrict__ scratchPad) { keccak((const uint8_t*) input, (int) size, scratchPad[0]->state, 200); keccak((const uint8_t*) input + size, (int) size, scratchPad[1]->state, 200); @@ -5635,11 +5603,11 @@ class CryptoNightMultiHash cx4 = _mm_aesenc_si128(cx4, ax4); } - SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0, variant == POW_RWZ) - SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1, variant == POW_RWZ) - SHUFFLE_PHASE_1(l2, (idx2&MASK), bx02, bx12, ax2, variant == POW_RWZ) - SHUFFLE_PHASE_1(l3, (idx3&MASK), bx03, bx13, ax3, variant == POW_RWZ) - SHUFFLE_PHASE_1(l4, (idx4&MASK), bx04, bx14, ax4, variant == POW_RWZ) + SHUFFLE_PHASE_1(l0, (idx0&MASK), bx00, bx10, ax0, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l1, (idx1&MASK), bx01, bx11, ax1, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l2, (idx2&MASK), bx02, bx12, ax2, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l3, (idx3&MASK), bx03, bx13, ax3, VARIANT == POW_RWZ) + SHUFFLE_PHASE_1(l4, (idx4&MASK), bx04, bx14, ax4, VARIANT == POW_RWZ) _mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx00, cx0)); _mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx01, cx1)); @@ -5661,7 +5629,7 @@ class CryptoNightMultiHash lo = __umul128(idx0, cl, &hi); - SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l0, (idx0&MASK), bx00, bx10, ax0, lo, hi, VARIANT == POW_RWZ) al0 += hi; ah0 += lo; @@ -5684,7 +5652,7 @@ class CryptoNightMultiHash lo = __umul128(idx1, cl, &hi); - SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l1, (idx1&MASK), bx01, bx11, ax1, lo, hi, VARIANT == POW_RWZ) al1 += hi; ah1 += lo; @@ -5707,7 +5675,7 @@ class CryptoNightMultiHash lo = __umul128(idx2, cl, &hi); - SHUFFLE_PHASE_2(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l2, (idx2&MASK), bx02, bx12, ax2, lo, hi, VARIANT == POW_RWZ) al2 += hi; ah2 += lo; @@ -5730,7 +5698,7 @@ class CryptoNightMultiHash lo = __umul128(idx3, cl, &hi); - SHUFFLE_PHASE_2(l3, (idx3&MASK), bx03, bx13, ax3, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l3, (idx3&MASK), bx03, bx13, ax3, lo, hi, VARIANT == POW_RWZ) al3 += hi; ah3 += lo; @@ -5753,7 +5721,7 @@ class CryptoNightMultiHash lo = __umul128(idx4, cl, &hi); - SHUFFLE_PHASE_2(l4, (idx4&MASK), bx04, bx14, ax4, lo, hi, variant == POW_RWZ) + SHUFFLE_PHASE_2(l4, (idx4&MASK), bx04, bx14, ax4, lo, hi, VARIANT == POW_RWZ) al4 += hi; ah4 += lo; @@ -5792,8 +5760,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // not supported } @@ -5803,8 +5770,7 @@ class CryptoNightMultiHash size_t size, uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, - uint64_t height, - PowVariant variant) + uint64_t height) { keccak((const uint8_t*) input, (int) size, scratchPad[0]->state, 200); keccak((const uint8_t*) input + size, (int) size, scratchPad[1]->state, 200); @@ -5924,7 +5890,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx00, bx10); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -5952,7 +5918,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(1, al1, ah1, cl, bx01, bx11) - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al1 ^= r1[2] | ((uint64_t)(r1[3]) << 32); ah1 ^= r1[0] | ((uint64_t)(r1[1]) << 32); } @@ -5980,7 +5946,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(2, al2, ah2, cl, bx02, bx12); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al2 ^= r2[2] | ((uint64_t)(r2[3]) << 32); ah2 ^= r2[0] | ((uint64_t)(r2[1]) << 32); } @@ -6008,7 +5974,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(3, al3, ah3, cl, bx03, bx13); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al3 ^= r3[2] | ((uint64_t)(r3[3]) << 32); ah3 ^= r3[0] | ((uint64_t)(r3[1]) << 32); } @@ -6036,7 +6002,7 @@ class CryptoNightMultiHash VARIANT4_RANDOM_MATH(4, al4, ah4, cl, bx04, bx14); - if (variant == POW_V4) { + if (VARIANT == POW_V4) { al4 ^= r4[2] | ((uint64_t)(r4[3]) << 32); ah4 ^= r4[0] | ((uint64_t)(r4[1]) << 32); } @@ -6083,8 +6049,7 @@ class CryptoNightMultiHash uint8_t* __restrict__ output, ScratchPad** __restrict__ scratchPad, uint64_t height, - AsmOptimization asmOptimization, - PowVariant variant) + AsmOptimization asmOptimization) { // not supported }