Welcome to mirror list, hosted at ThFree Co, Russian Federation.

add_bias_test.cc « kernels « test - github.com/marian-nmt/intgemm/intgemm.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: b9e5fd95f58095475d6ad00bd7c3b7575968b58b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
#include "../test.h"
#include "../../intgemm/aligned.h"
#include "../../intgemm/kernels.h"

#include <numeric>

namespace intgemm {

template <CPUType CPUType_, typename ElemType_>
void kernel_add_bias_test() {
  if (kCPU < CPUType_)
    return;

  using vec_t = vector_t<CPUType_, ElemType_>;
  constexpr static auto VECTOR_LENGTH = sizeof(vec_t) / sizeof(ElemType_);

  AlignedVector<ElemType_> input(VECTOR_LENGTH);
  AlignedVector<ElemType_> bias(VECTOR_LENGTH);
  AlignedVector<ElemType_> output(VECTOR_LENGTH);

  std::iota(input.begin(), input.end(), static_cast<ElemType_>(0));
  std::fill(bias.begin(), bias.end(), static_cast<ElemType_>(100));

  *output.template as<vec_t>() = kernels::add_bias(*input.template as<vec_t>(), bias.begin(), 0);
  for (std::size_t i = 0; i < output.size(); ++i)
    CHECK(output[i] == ElemType_(100 + i));
}

template INTGEMM_SSE2 void kernel_add_bias_test<CPUType::SSE2, int8_t>();
template INTGEMM_SSE2 void kernel_add_bias_test<CPUType::SSE2, int16_t>();
template INTGEMM_SSE2 void kernel_add_bias_test<CPUType::SSE2, int>();
template INTGEMM_SSE2 void kernel_add_bias_test<CPUType::SSE2, float>();
template INTGEMM_SSE2 void kernel_add_bias_test<CPUType::SSE2, double>();
KERNEL_TEST_CASE("add_bias/int8 SSE2") { return kernel_add_bias_test<CPUType::SSE2, int8_t>(); }
KERNEL_TEST_CASE("add_bias/int16 SSE2") { return kernel_add_bias_test<CPUType::SSE2, int16_t>(); }
KERNEL_TEST_CASE("add_bias/int SSE2") { return kernel_add_bias_test<CPUType::SSE2, int>(); }
KERNEL_TEST_CASE("add_bias/float SSE2") { return kernel_add_bias_test<CPUType::SSE2, float>(); }
KERNEL_TEST_CASE("add_bias/double SSE2") { return kernel_add_bias_test<CPUType::SSE2, double>(); }

#ifdef INTGEMM_COMPILER_SUPPORTS_AVX2
template INTGEMM_AVX2 void kernel_add_bias_test<CPUType::AVX2, int8_t>();
template INTGEMM_AVX2 void kernel_add_bias_test<CPUType::AVX2, int16_t>();
template INTGEMM_AVX2 void kernel_add_bias_test<CPUType::AVX2, int>();
template INTGEMM_AVX2 void kernel_add_bias_test<CPUType::AVX2, float>();
template INTGEMM_AVX2 void kernel_add_bias_test<CPUType::AVX2, double>();
KERNEL_TEST_CASE("add_bias/int8 AVX2") { return kernel_add_bias_test<CPUType::AVX2, int8_t>(); }
KERNEL_TEST_CASE("add_bias/int16 AVX2") { return kernel_add_bias_test<CPUType::AVX2, int16_t>(); }
KERNEL_TEST_CASE("add_bias/int AVX2") { return kernel_add_bias_test<CPUType::AVX2, int>(); }
KERNEL_TEST_CASE("add_bias/float AVX2") { return kernel_add_bias_test<CPUType::AVX2, float>(); }
KERNEL_TEST_CASE("add_bias/double AVX2") { return kernel_add_bias_test<CPUType::AVX2, double>(); }
#endif

#ifdef INTGEMM_COMPILER_SUPPORTS_AVX512BW
template INTGEMM_AVX512BW void kernel_add_bias_test<CPUType::AVX512BW, int8_t>();
template INTGEMM_AVX512BW void kernel_add_bias_test<CPUType::AVX512BW, int16_t>();
template INTGEMM_AVX512BW void kernel_add_bias_test<CPUType::AVX512BW, int>();
template INTGEMM_AVX512BW void kernel_add_bias_test<CPUType::AVX512BW, float>();
template INTGEMM_AVX512BW void kernel_add_bias_test<CPUType::AVX512BW, double>();
KERNEL_TEST_CASE("add_bias/int8 AVX512BW") { return kernel_add_bias_test<CPUType::AVX512BW, int8_t>(); }
KERNEL_TEST_CASE("add_bias/int16 AVX512BW") { return kernel_add_bias_test<CPUType::AVX512BW, int16_t>(); }
KERNEL_TEST_CASE("add_bias/int AVX512BW") { return kernel_add_bias_test<CPUType::AVX512BW, int>(); }
KERNEL_TEST_CASE("add_bias/float AVX512BW") { return kernel_add_bias_test<CPUType::AVX512BW, float>(); }
KERNEL_TEST_CASE("add_bias/double AVX512BW") { return kernel_add_bias_test<CPUType::AVX512BW, double>(); }
#endif

}