Skip to content

Commit

Permalink
remove large input specialization
Browse files Browse the repository at this point in the history
Removes unused large input specialization for dense affine transform. It has been obsolete since official-stockfish#4612 was merged.

closes official-stockfish#4684

No functional change
  • Loading branch information
AndrovT authored and vondele committed Jul 16, 2023
1 parent ee53f8e commit a42ab95
Showing 1 changed file with 3 additions and 258 deletions.
261 changes: 3 additions & 258 deletions src/nnue/layers/affine_transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,33 +29,18 @@

/*
This file contains the definition for a fully connected layer (aka affine transform).
Two approaches are employed, depending on the sizes of the transform.
Approach 1 (a specialization for large inputs):
- used when the PaddedInputDimensions >= 128
- uses AVX512 if possible
- processes inputs in batches of 2*InputSimdWidth
- so in batches of 128 for AVX512
- the weight blocks of size InputSimdWidth are transposed such that
access is sequential
- N columns of the weight matrix are processed a time, where N
depends on the architecture (the amount of registers)
- accumulate + hadd is used
Approach 2 (a specialization for small inputs):
- used when the PaddedInputDimensions < 128
- expected use-case is for when PaddedInputDimensions == 32 and InputDimensions <= 32.
- that's why AVX512 is hard to implement
- expected use-case is small layers
- not optimized as well as the approach 1
- inputs are processed in chunks of 4, weights are respectively transposed
- accumulation happens directly to int32s
*/

namespace Stockfish::Eval::NNUE::Layers {

// Fallback implementation for older/other architectures.
// Identical for both approaches. Requires the input to be padded to at least 16 values.
// Requires the input to be padded to at least 16 values.
#if !defined(USE_SSSE3)
template <IndexType InputDimensions, IndexType PaddedInputDimensions, IndexType OutputDimensions>
static void affine_transform_non_ssse3(std::int32_t* output, const std::int8_t* weights, const std::int32_t* biases, const std::uint8_t* input)
Expand Down Expand Up @@ -159,18 +144,8 @@ namespace Stockfish::Eval::NNUE::Layers {
}
#endif

template <IndexType InDims, IndexType OutDims, typename Enabled = void>
class AffineTransform;

#if defined (USE_AVX512)
constexpr IndexType LargeInputSize = 2 * 64;
#else
constexpr IndexType LargeInputSize = std::numeric_limits<IndexType>::max();
#endif

// A specialization for large inputs
template <IndexType InDims, IndexType OutDims>
class AffineTransform<InDims, OutDims, std::enable_if_t<(ceil_to_multiple<IndexType>(InDims, MaxSimdWidth) >= LargeInputSize)>> {
class AffineTransform {
public:
// Input/output type
using InputType = std::uint8_t;
Expand All @@ -187,236 +162,6 @@ namespace Stockfish::Eval::NNUE::Layers {

using OutputBuffer = OutputType[PaddedOutputDimensions];

static_assert(PaddedInputDimensions >= LargeInputSize, "Something went wrong. This specialization (for large inputs) should not have been chosen.");

#if defined (USE_AVX512)
static constexpr IndexType InputSimdWidth = 64;
static constexpr IndexType MaxNumOutputRegs = 16;
#elif defined (USE_AVX2)
static constexpr IndexType InputSimdWidth = 32;
static constexpr IndexType MaxNumOutputRegs = 8;
#elif defined (USE_SSSE3)
static constexpr IndexType InputSimdWidth = 16;
static constexpr IndexType MaxNumOutputRegs = 8;
#elif defined (USE_NEON_DOTPROD)
static constexpr IndexType InputSimdWidth = 16;
static constexpr IndexType MaxNumOutputRegs = 8;
#elif defined (USE_NEON)
static constexpr IndexType InputSimdWidth = 8;
static constexpr IndexType MaxNumOutputRegs = 8;
#else
// The fallback implementation will not have permuted weights.
// We define these to avoid a lot of ifdefs later.
static constexpr IndexType InputSimdWidth = 1;
static constexpr IndexType MaxNumOutputRegs = 1;
#endif

// A big block is a region in the weight matrix of the size [PaddedInputDimensions, NumOutputRegs].
// A small block is a region of size [InputSimdWidth, 1]

static constexpr IndexType NumOutputRegs = std::min(MaxNumOutputRegs, OutputDimensions);
static constexpr IndexType SmallBlockSize = InputSimdWidth;
static constexpr IndexType BigBlockSize = NumOutputRegs * PaddedInputDimensions;
static constexpr IndexType NumSmallBlocksInBigBlock = BigBlockSize / SmallBlockSize;
static constexpr IndexType NumSmallBlocksPerOutput = PaddedInputDimensions / SmallBlockSize;
static constexpr IndexType NumBigBlocks = OutputDimensions / NumOutputRegs;

static_assert(OutputDimensions % NumOutputRegs == 0);

// Hash value embedded in the evaluation file
static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
std::uint32_t hashValue = 0xCC03DAE4u;
hashValue += OutputDimensions;
hashValue ^= prevHash >> 1;
hashValue ^= prevHash << 31;
return hashValue;
}

/*
Transposes the small blocks within a block.
Effectively means that weights can be traversed sequentially during inference.
*/
static IndexType get_weight_index(IndexType i)
{
const IndexType smallBlock = (i / SmallBlockSize) % NumSmallBlocksInBigBlock;
const IndexType smallBlockCol = smallBlock / NumSmallBlocksPerOutput;
const IndexType smallBlockRow = smallBlock % NumSmallBlocksPerOutput;
const IndexType bigBlock = i / BigBlockSize;
const IndexType rest = i % SmallBlockSize;

const IndexType idx =
bigBlock * BigBlockSize
+ smallBlockRow * SmallBlockSize * NumOutputRegs
+ smallBlockCol * SmallBlockSize
+ rest;

return idx;
}

// Read network parameters
bool read_parameters(std::istream& stream) {
read_little_endian<BiasType>(stream, biases, OutputDimensions);

for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);

return !stream.fail();
}

// Write network parameters
bool write_parameters(std::ostream& stream) const {
write_little_endian<BiasType>(stream, biases, OutputDimensions);

for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);

return !stream.fail();
}

// Forward propagation
const OutputType* propagate(
const InputType* input, OutputType* output) const {

#if defined (USE_AVX512)
using acc_vec_t = __m512i;
using bias_vec_t = __m128i;
using weight_vec_t = __m512i;
using in_vec_t = __m512i;
#define vec_zero _mm512_setzero_si512()
#define vec_add_dpbusd_32x2 Simd::m512_add_dpbusd_epi32x2
#define vec_hadd Simd::m512_hadd
#define vec_haddx4 Simd::m512_haddx4
#elif defined (USE_AVX2)
using acc_vec_t = __m256i;
using bias_vec_t = __m128i;
using weight_vec_t = __m256i;
using in_vec_t = __m256i;
#define vec_zero _mm256_setzero_si256()
#define vec_add_dpbusd_32x2 Simd::m256_add_dpbusd_epi32x2
#define vec_hadd Simd::m256_hadd
#define vec_haddx4 Simd::m256_haddx4
#elif defined (USE_SSSE3)
using acc_vec_t = __m128i;
using bias_vec_t = __m128i;
using weight_vec_t = __m128i;
using in_vec_t = __m128i;
#define vec_zero _mm_setzero_si128()
#define vec_add_dpbusd_32x2 Simd::m128_add_dpbusd_epi32x2
#define vec_hadd Simd::m128_hadd
#define vec_haddx4 Simd::m128_haddx4
#elif defined (USE_NEON_DOTPROD)
using acc_vec_t = int32x4_t;
using bias_vec_t = int32x4_t;
using weight_vec_t = int8x16_t;
using in_vec_t = int8x16_t;
#define vec_zero {0}
#define vec_add_dpbusd_32x2 Simd::dotprod_m128_add_dpbusd_epi32x2
#define vec_hadd Simd::neon_m128_hadd
#define vec_haddx4 Simd::neon_m128_haddx4
#elif defined (USE_NEON)
using acc_vec_t = int32x4_t;
using bias_vec_t = int32x4_t;
using weight_vec_t = int8x8_t;
using in_vec_t = int8x8_t;
#define vec_zero {0}
#define vec_add_dpbusd_32x2 Simd::neon_m128_add_dpbusd_epi32x2
#define vec_hadd Simd::neon_m128_hadd
#define vec_haddx4 Simd::neon_m128_haddx4
#endif

#if defined (USE_SSSE3) || defined (USE_NEON)
const in_vec_t* invec = reinterpret_cast<const in_vec_t*>(input);

// Perform accumulation to registers for each big block
for (IndexType bigBlock = 0; bigBlock < NumBigBlocks; ++bigBlock)
{
acc_vec_t acc[NumOutputRegs] = { vec_zero };

// Each big block has NumOutputRegs small blocks in each "row", one per register.
// We process two small blocks at a time to save on one addition without VNNI.
for (IndexType smallBlock = 0; smallBlock < NumSmallBlocksPerOutput; smallBlock += 2)
{
const weight_vec_t* weightvec =
reinterpret_cast<const weight_vec_t*>(
weights
+ bigBlock * BigBlockSize
+ smallBlock * SmallBlockSize * NumOutputRegs);

const in_vec_t in0 = invec[smallBlock + 0];
const in_vec_t in1 = invec[smallBlock + 1];

for (IndexType k = 0; k < NumOutputRegs; ++k)
vec_add_dpbusd_32x2(acc[k], in0, weightvec[k], in1, weightvec[k + NumOutputRegs]);
}

// Horizontally add all accumulators.
if constexpr (NumOutputRegs % 4 == 0)
{
bias_vec_t* outputvec = reinterpret_cast<bias_vec_t*>(output);
const bias_vec_t* biasvec = reinterpret_cast<const bias_vec_t*>(biases);

for (IndexType k = 0; k < NumOutputRegs; k += 4)
{
const IndexType idx = (bigBlock * NumOutputRegs + k) / 4;
outputvec[idx] = vec_haddx4(acc[k+0], acc[k+1], acc[k+2], acc[k+3], biasvec[idx]);
}
}
else
{
for (IndexType k = 0; k < NumOutputRegs; ++k)
{
const IndexType idx = (bigBlock * NumOutputRegs + k);
output[idx] = vec_hadd(acc[k], biases[idx]);
}
}
}

# undef vec_zero
# undef vec_add_dpbusd_32x2
# undef vec_hadd
# undef vec_haddx4
#else
// Use old implementation for the other architectures.
affine_transform_non_ssse3<
InputDimensions,
PaddedInputDimensions,
OutputDimensions>(output, weights, biases, input);

#endif

return output;
}

private:
using BiasType = OutputType;
using WeightType = std::int8_t;

alignas(CacheLineSize) BiasType biases[OutputDimensions];
alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
};

// A specialization for small inputs
template <IndexType InDims, IndexType OutDims>
class AffineTransform<InDims, OutDims, std::enable_if_t<(ceil_to_multiple<IndexType>(InDims, MaxSimdWidth) < LargeInputSize)>> {
public:
// Input/output type
// Input/output type
using InputType = std::uint8_t;
using OutputType = std::int32_t;

// Number of input/output dimensions
static constexpr IndexType InputDimensions = InDims;
static constexpr IndexType OutputDimensions = OutDims;

static constexpr IndexType PaddedInputDimensions =
ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
static constexpr IndexType PaddedOutputDimensions =
ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);

using OutputBuffer = OutputType[PaddedOutputDimensions];

static_assert(PaddedInputDimensions < LargeInputSize, "Something went wrong. This specialization (for small inputs) should not have been chosen.");

// Hash value embedded in the evaluation file
static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
std::uint32_t hashValue = 0xCC03DAE4u;
Expand Down

0 comments on commit a42ab95

Please sign in to comment.