Skip to content

Commit

Permalink
huh
Browse files Browse the repository at this point in the history
Differential Revision: D63404074
  • Loading branch information
mengdilin authored and facebook-github-bot committed Sep 25, 2024
1 parent ea866ea commit 4ba3ce1
Showing 1 changed file with 31 additions and 29 deletions.
60 changes: 31 additions & 29 deletions faiss/impl/ScalarQuantizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -487,7 +487,7 @@ struct QuantizerTemplate<Codec, QuantizerTemplateScaling::UNIFORM, 8>
}
};

#elif defined(__AVX2__)
#else

template <class Codec>
struct QuantizerTemplate<Codec, QuantizerTemplateScaling::NON_UNIFORM, 8>
Expand All @@ -510,34 +510,36 @@ struct QuantizerTemplate<Codec, QuantizerTemplateScaling::NON_UNIFORM, 8>

#endif

#ifdef __aarch64__

template <class Codec>
struct QuantizerTemplate<Codec, QuantizerTemplateScaling::NON_UNIFORM, 8>
: QuantizerTemplate<
Codec,
QuantizerTemplateScaling::NON_UNIFORM,
1> {
QuantizerTemplate(size_t d, const std::vector<float>& trained)
: QuantizerTemplate<
Codec,
QuantizerTemplateScaling::NON_UNIFORM,
1>(d, trained) {}

FAISS_ALWAYS_INLINE simd8float32
reconstruct_8_components(const uint8_t* code, int i) const {
float32x4x2_t xi = Codec::decode_8_components(code, i).data;

float32x4x2_t vmin_8 = vld1q_f32_x2(this->vmin + i);
float32x4x2_t vdiff_8 = vld1q_f32_x2(this->vdiff + i);

return simd8float32(
{vfmaq_f32(vmin_8.val[0], xi.val[0], vdiff_8.val[0]),
vfmaq_f32(vmin_8.val[1], xi.val[1], vdiff_8.val[1])});
}
};

#endif
// #ifdef __aarch64__

// template <class Codec>
// struct QuantizerTemplate<Codec,
// QuantizerTemplateScaling::NON_UNIFORM, 8>
// : QuantizerTemplate<
// Codec,
// QuantizerTemplateScaling::NON_UNIFORM,
// 1> {
// QuantizerTemplate(size_t d, const std::vector<float>& trained)
// : QuantizerTemplate<
// Codec,
// QuantizerTemplateScaling::NON_UNIFORM,
// 1>(d, trained) {}

// FAISS_ALWAYS_INLINE simd8float32
// reconstruct_8_components(const uint8_t* code, int i) const {
// simd8float32 xi = Codec::decode_8_components(code, i);

// float32x4x2_t vmin_8 = vld1q_f32_x2(this->vmin + i);
// float32x4x2_t vdiff_8 = vld1q_f32_x2(this->vdiff + i);
// // why is this flipped?
// return simd8float32(
// {vfmaq_f32(vmin_8.val[0], xi.val[0], vdiff_8.val[0]),
// vfmaq_f32(vmin_8.val[1], xi.val[1],
// vdiff_8.val[1])});
// }
// };

// #endif

/*******************************************************************
* FP16 quantizer
Expand Down

0 comments on commit 4ba3ce1

Please sign in to comment.