#include "../test.h" #include "../../aligned.h" #include "../../kernels.h" #include namespace intgemm { template void kernel_multiply_sat_test() { if (kCPU < CPUType_) return; using vec_t = vector_t; constexpr static auto VECTOR_LENGTH = sizeof(vec_t) / sizeof(Type_); AlignedVector input1(VECTOR_LENGTH); AlignedVector input2(VECTOR_LENGTH); AlignedVector output(VECTOR_LENGTH); std::iota(input1.begin(), input1.end(), -int(VECTOR_LENGTH / 2)); std::iota(input2.begin(), input2.end(), -int(VECTOR_LENGTH / 3)); for (std::size_t shift = 0; shift <= 2 * 8 * sizeof(Type_); ++shift) { *output.template as() = kernels::multiply_sat(*input1.template as(), *input2.template as(), shift); for (std::size_t i = 0; i < output.size(); ++i) { auto ref = (int64_t(input1[i]) * input2[i]) >> shift; auto ref_sat = Type_(std::min(std::numeric_limits::max(), std::max(std::numeric_limits::min(), ref))); CHECK(output[i] == ref_sat); } } } template INTGEMM_SSE2 void kernel_multiply_sat_test(); template INTGEMM_SSE2 void kernel_multiply_sat_test(); KERNEL_TEST_CASE("multiply_sat/int8 SSE2") { return kernel_multiply_sat_test(); } KERNEL_TEST_CASE("multiply_sat/int16 SSE2") { return kernel_multiply_sat_test(); } template INTGEMM_AVX2 void kernel_multiply_sat_test(); template INTGEMM_AVX2 void kernel_multiply_sat_test(); KERNEL_TEST_CASE("multiply_sat/int8 AVX2") { return kernel_multiply_sat_test(); } KERNEL_TEST_CASE("multiply_sat/int16 AVX2") { return kernel_multiply_sat_test(); } #ifdef INTGEMM_COMPILER_SUPPORTS_AVX512 template INTGEMM_AVX512BW void kernel_multiply_sat_test(); template INTGEMM_AVX512BW void kernel_multiply_sat_test(); KERNEL_TEST_CASE("multiply_sat/int8 AVX512BW") { return kernel_multiply_sat_test(); } KERNEL_TEST_CASE("multiply_sat/int16 AVX512BW") { return kernel_multiply_sat_test(); } #endif }