Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/marian-nmt/intgemm/intgemm.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKenneth Heafield <github@kheafield.com>2020-08-11 16:43:51 +0300
committerKenneth Heafield <github@kheafield.com>2020-08-11 16:43:51 +0300
commit96f64223cf60f9b491237425446f87629546fc89 (patch)
tree2e66772ab16d9b0fa5aeec64eaf6aff23d1ac6cf
parentfa2d58d68eaa041b7f41e1f56bc65983ad9ec54d (diff)
Fix lots of MSVC type conversions
-rw-r--r--test/add127_test.cc12
-rw-r--r--test/kernels/bitwise_not_test.cc3
-rw-r--r--test/kernels/downcast_test.cc13
-rw-r--r--test/kernels/exp_test.cc5
-rw-r--r--test/kernels/floor_test.cc5
-rw-r--r--test/kernels/multiply_sat_test.cc9
-rw-r--r--test/kernels/multiply_test.cc9
-rw-r--r--test/kernels/relu_test.cc5
-rw-r--r--test/kernels/rescale_test.cc7
-rw-r--r--test/kernels/sigmoid_test.cc5
-rw-r--r--test/kernels/tanh_test.cc3
-rw-r--r--test/kernels/upcast_test.cc15
-rw-r--r--test/kernels/write_test.cc3
13 files changed, 57 insertions, 37 deletions
diff --git a/test/add127_test.cc b/test/add127_test.cc
index bf15f49..90455a4 100644
--- a/test/add127_test.cc
+++ b/test/add127_test.cc
@@ -135,7 +135,7 @@ template <class Routine> void TestMultiplyBiasNew(Index A_rows, Index width, Ind
/*ACTUAL MULTIPLICATION
*
*/
- float unquant_mult_forprep = (-1)*(alpha)*(alpha)/(127.0f); //Minus one to invert add_ps later on
+ float unquant_mult_forprep = (-1.0f)*(alpha)*(alpha)/(127.0f); //Minus one to invert add_ps later on
Routine::PrepareBias(B_prep.begin(), width, B_cols, callbacks::UnquantizeAndAddBiasAndWrite(unquant_mult_forprep, bias.begin(), bias.begin()));
//Routine::PrepareBias(B.begin(), bias.begin(), alpha, width, B_cols);
Routine::Multiply8Shift(A_prep.begin(), B_prep.begin(), A_rows, width, B_cols, callbacks::UnquantizeAndAddBiasAndWrite(unquant_mult, bias.begin(), test_C.begin()));
@@ -166,8 +166,8 @@ template <class Routine> void TestMultiplyShiftNonShift(Index A_rows, Index widt
}
float alpha = 2.0f;
- float quant_mult = 127/alpha;
- float unquant_mult = 1.0/(quant_mult*quant_mult);
+ float quant_mult = 127.0f / alpha;
+ float unquant_mult = 1.0f / (quant_mult*quant_mult);
AlignedVector<uint8_t> A_prep(A.size());
AlignedVector<int8_t> A_prep_old(A.size());
@@ -192,7 +192,7 @@ template <class Routine> void TestMultiplyShiftNonShift(Index A_rows, Index widt
/*
* Multiply8 shift multiplication
*/
- float unquant_mult_forprep = (-1)*(alpha)*(alpha)/(127.0f); //Minus one to invert add_ps later on
+ float unquant_mult_forprep = (-1.0f)*(alpha)*(alpha)/(127.0f); //Minus one to invert add_ps later on
Routine::PrepareBias(B_prep.begin(), width, B_cols, callbacks::UnquantizeAndAddBiasAndWrite(unquant_mult_forprep, bias.begin(), bias.begin()));
Routine::Multiply8Shift(A_prep.begin(), B_prep.begin(), A_rows, width, B_cols, callbacks::UnquantizeAndAddBiasAndWrite(unquant_mult, bias.begin(), test_C.begin()));
@@ -222,8 +222,8 @@ template <class Routine> void TestMultiplyShiftInt(Index A_rows, Index width, In
}
float alpha = 2.0f;
- float quant_mult = 127/alpha;
- float unquant_mult = 1.0/(quant_mult*quant_mult);
+ float quant_mult = 127.0f / alpha;
+ float unquant_mult = 1.0f / (quant_mult*quant_mult);
AlignedVector<uint8_t> A_prep(A.size());
AlignedVector<int8_t> A_prep_old(A.size());
diff --git a/test/kernels/bitwise_not_test.cc b/test/kernels/bitwise_not_test.cc
index 309be7e..1408db3 100644
--- a/test/kernels/bitwise_not_test.cc
+++ b/test/kernels/bitwise_not_test.cc
@@ -2,6 +2,7 @@
#include "../../aligned.h"
#include "../../kernels.h"
+#include <cstdlib>
#include <numeric>
namespace intgemm {
@@ -12,7 +13,7 @@ void kernel_bitwise_not_test() {
return;
using vec_t = vector_t<CPUType_, int>;
- constexpr static auto VECTOR_LENGTH = sizeof(vec_t) / sizeof(int);
+ constexpr static std::size_t VECTOR_LENGTH = sizeof(vec_t) / sizeof(int);
AlignedVector<int> input(VECTOR_LENGTH);
AlignedVector<int> output(VECTOR_LENGTH);
diff --git a/test/kernels/downcast_test.cc b/test/kernels/downcast_test.cc
index d3261c7..7bc20a1 100644
--- a/test/kernels/downcast_test.cc
+++ b/test/kernels/downcast_test.cc
@@ -2,6 +2,7 @@
#include "../../aligned.h"
#include "../../kernels.h"
+#include <cstddef>
#include <numeric>
namespace intgemm {
@@ -12,12 +13,12 @@ void kernel_downcast32to8_test() {
return;
using vi = vector_t<CPUType_, int>;
- const int LENGTH = sizeof(vi) / sizeof(int8_t);
+ const std::size_t LENGTH = sizeof(vi) / sizeof(int8_t);
AlignedVector<int32_t> input(LENGTH);
AlignedVector<int8_t> output(LENGTH);
- std::iota(input.begin(), input.end(), -LENGTH / 2);
+ std::iota(input.begin(), input.end(), -static_cast<int32_t>(LENGTH / 2));
*output.template as<vi>() = kernels::downcast32to8(
input.template as<vi>()[0], input.template as<vi>()[1],
@@ -43,12 +44,12 @@ void kernel_downcast32to16_test() {
return;
using vi = vector_t<CPUType_, int>;
- const int LENGTH = sizeof(vi) / sizeof(int16_t);
+ const std::size_t LENGTH = sizeof(vi) / sizeof(int16_t);
AlignedVector<int32_t> input(LENGTH);
AlignedVector<int16_t> output(LENGTH);
- std::iota(input.begin(), input.end(), -LENGTH / 2);
+ std::iota(input.begin(), input.end(), -static_cast<int32_t>(LENGTH / 2));
*output.template as<vi>() = kernels::downcast32to16(
input.template as<vi>()[0], input.template as<vi>()[1]);
@@ -73,12 +74,12 @@ void kernel_downcast16to8_test() {
return;
using vi = vector_t<CPUType_, int>;
- const int LENGTH = sizeof(vi) / sizeof(int8_t);
+ const std::size_t LENGTH = sizeof(vi) / sizeof(int8_t);
AlignedVector<int16_t> input(LENGTH);
AlignedVector<int8_t> output(LENGTH);
- std::iota(input.begin(), input.end(), -LENGTH / 2);
+ std::iota(input.begin(), input.end(), -static_cast<int16_t>(LENGTH / 2));
*output.template as<vi>() = kernels::downcast16to8(
input.template as<vi>()[0], input.template as<vi>()[1]);
diff --git a/test/kernels/exp_test.cc b/test/kernels/exp_test.cc
index cf85562..b76a2e1 100644
--- a/test/kernels/exp_test.cc
+++ b/test/kernels/exp_test.cc
@@ -2,6 +2,7 @@
#include "../../aligned.h"
#include "../../kernels.h"
+#include <cstddef>
#include <numeric>
namespace intgemm {
@@ -12,12 +13,12 @@ void kernel_exp_approx_taylor_test() {
return;
using vec_t = vector_t<CPUType_, float>;
- constexpr static auto VECTOR_LENGTH = sizeof(vec_t) / sizeof(float);
+ constexpr static std::size_t VECTOR_LENGTH = sizeof(vec_t) / sizeof(float);
AlignedVector<float> input(VECTOR_LENGTH);
AlignedVector<float> output(VECTOR_LENGTH);
- std::iota(input.begin(), input.end(), -int(VECTOR_LENGTH / 2));
+ std::iota(input.begin(), input.end(), -static_cast<float>(VECTOR_LENGTH / 2));
*output.template as<vec_t>() = kernels::exp_approx_taylor(*input.template as<vec_t>());
for (std::size_t i = 0; i < output.size(); ++i)
diff --git a/test/kernels/floor_test.cc b/test/kernels/floor_test.cc
index 365d16d..01b607f 100644
--- a/test/kernels/floor_test.cc
+++ b/test/kernels/floor_test.cc
@@ -2,6 +2,7 @@
#include "../../aligned.h"
#include "../../kernels.h"
+#include <cstddef>
#include <numeric>
namespace intgemm {
@@ -12,12 +13,12 @@ void kernel_floor_test() {
return;
using vec_t = vector_t<CPUType_, float>;
- constexpr static auto VECTOR_LENGTH = sizeof(vec_t) / sizeof(float);
+ constexpr static std::size_t VECTOR_LENGTH = sizeof(vec_t) / sizeof(float);
AlignedVector<float> input(VECTOR_LENGTH);
AlignedVector<float> output(VECTOR_LENGTH);
- std::iota(input.begin(), input.end(), -int(VECTOR_LENGTH / 2));
+ std::iota(input.begin(), input.end(), -static_cast<float>(VECTOR_LENGTH / 2));
*output.template as<vec_t>() = kernels::floor(*input.template as<vec_t>());
for (std::size_t i = 0; i < output.size(); ++i)
diff --git a/test/kernels/multiply_sat_test.cc b/test/kernels/multiply_sat_test.cc
index edea772..e65183a 100644
--- a/test/kernels/multiply_sat_test.cc
+++ b/test/kernels/multiply_sat_test.cc
@@ -2,6 +2,9 @@
#include "../../aligned.h"
#include "../../kernels.h"
+#include <stdint.h>
+#include <cstdint>
+#include <cstddef>
#include <numeric>
namespace intgemm {
@@ -12,14 +15,14 @@ void kernel_multiply_sat_test() {
return;
using vec_t = vector_t<CPUType_, Type_>;
- constexpr static auto VECTOR_LENGTH = sizeof(vec_t) / sizeof(Type_);
+ constexpr static std::size_t VECTOR_LENGTH = sizeof(vec_t) / sizeof(Type_);
AlignedVector<Type_> input1(VECTOR_LENGTH);
AlignedVector<Type_> input2(VECTOR_LENGTH);
AlignedVector<Type_> output(VECTOR_LENGTH);
- std::iota(input1.begin(), input1.end(), -int(VECTOR_LENGTH / 2));
- std::iota(input2.begin(), input2.end(), -int(VECTOR_LENGTH / 3));
+ std::iota(input1.begin(), input1.end(), -static_cast<Type_>(VECTOR_LENGTH / 2));
+ std::iota(input2.begin(), input2.end(), -static_cast<Type_>(VECTOR_LENGTH / 3));
// TODO: try all shifts. The shift must be an immediate.
std::size_t shift = 1;
diff --git a/test/kernels/multiply_test.cc b/test/kernels/multiply_test.cc
index 30f1640..338ea0c 100644
--- a/test/kernels/multiply_test.cc
+++ b/test/kernels/multiply_test.cc
@@ -2,6 +2,9 @@
#include "../../aligned.h"
#include "../../kernels.h"
+#include <cstddef>
+#include <cstdint>
+#include <stdint.h>
#include <numeric>
namespace intgemm {
@@ -12,14 +15,14 @@ void kernel_multiply_test() {
return;
using vec_t = vector_t<CPUType_, Type_>;
- constexpr static auto VECTOR_LENGTH = sizeof(vec_t) / sizeof(Type_);
+ constexpr static std::size_t VECTOR_LENGTH = sizeof(vec_t) / sizeof(Type_);
AlignedVector<Type_> input1(VECTOR_LENGTH);
AlignedVector<Type_> input2(VECTOR_LENGTH);
AlignedVector<Type_> output(VECTOR_LENGTH);
- std::iota(input1.begin(), input1.end(), -int(VECTOR_LENGTH / 2));
- std::iota(input2.begin(), input2.end(), -int(VECTOR_LENGTH / 3));
+ std::iota(input1.begin(), input1.end(), -static_cast<Type_>(VECTOR_LENGTH / 2));
+ std::iota(input2.begin(), input2.end(), -static_cast<Type_>(VECTOR_LENGTH / 3));
*output.template as<vec_t>() = kernels::multiply<Type_>(*input1.template as<vec_t>(), *input2.template as<vec_t>());
for (std::size_t i = 0; i < output.size(); ++i)
diff --git a/test/kernels/relu_test.cc b/test/kernels/relu_test.cc
index c291dea..6c7309a 100644
--- a/test/kernels/relu_test.cc
+++ b/test/kernels/relu_test.cc
@@ -2,6 +2,7 @@
#include "../../aligned.h"
#include "../../kernels.h"
+#include <cstddef>
#include <numeric>
namespace intgemm {
@@ -12,12 +13,12 @@ void kernel_relu_test() {
return;
using vec_t = vector_t<CPUType_, ElemType_>;
- constexpr static auto VECTOR_LENGTH = sizeof(vec_t) / sizeof(ElemType_);
+ constexpr static std::size_t VECTOR_LENGTH = sizeof(vec_t) / sizeof(ElemType_);
AlignedVector<ElemType_> input(VECTOR_LENGTH);
AlignedVector<ElemType_> output(VECTOR_LENGTH);
- std::iota(input.begin(), input.end(), -int(VECTOR_LENGTH / 2));
+ std::iota(input.begin(), input.end(), -static_cast<ElemType_>(VECTOR_LENGTH / 2));
*output.template as<vec_t>() = kernels::relu<ElemType_>(*input.template as<vec_t>());
for (std::size_t i = 0; i < output.size(); ++i)
diff --git a/test/kernels/rescale_test.cc b/test/kernels/rescale_test.cc
index 2f79d39..fb22c52 100644
--- a/test/kernels/rescale_test.cc
+++ b/test/kernels/rescale_test.cc
@@ -2,6 +2,9 @@
#include "../../aligned.h"
#include "../../kernels.h"
+#include <cstddef>
+#include <cstdint>
+#include <stdint.h>
#include <numeric>
namespace intgemm {
@@ -13,12 +16,12 @@ void kernel_rescale_test() {
using vi = vector_t<CPUType_, int>;
using vf = vector_t<CPUType_, float>;
- const int LENGTH = sizeof(vi) / sizeof(int);
+ const std::size_t LENGTH = sizeof(vi) / sizeof(int);
AlignedVector<int32_t> input(LENGTH);
AlignedVector<int32_t> output(LENGTH);
- std::iota(input.begin(), input.end(), -LENGTH / 2);
+ std::iota(input.begin(), input.end(), -static_cast<int32_t>(LENGTH / 2));
float scale = 2;
*output.template as<vi>() = kernels::rescale(*input.template as<vi>(), intgemm::set1_ps<vf>(scale));
diff --git a/test/kernels/sigmoid_test.cc b/test/kernels/sigmoid_test.cc
index a8b0b3c..7f7392d 100644
--- a/test/kernels/sigmoid_test.cc
+++ b/test/kernels/sigmoid_test.cc
@@ -2,6 +2,7 @@
#include "../../aligned.h"
#include "../../kernels.h"
+#include <cstddef>
#include <numeric>
namespace intgemm {
@@ -19,12 +20,12 @@ void kernel_sigmoid_test() {
return;
using vec_t = vector_t<CPUType_, float>;
- constexpr static int VECTOR_LENGTH = sizeof(vec_t) / sizeof(float);
+ constexpr static std::size_t VECTOR_LENGTH = sizeof(vec_t) / sizeof(float);
AlignedVector<float> input(VECTOR_LENGTH);
AlignedVector<float> output(VECTOR_LENGTH);
- std::iota(input.begin(), input.end(), -int(VECTOR_LENGTH / 2));
+ std::iota(input.begin(), input.end(), -static_cast<float>(VECTOR_LENGTH / 2));
*output.template as<vec_t>() = kernels::sigmoid(*input.template as<vec_t>());
for (std::size_t i = 0; i < output.size(); ++i)
diff --git a/test/kernels/tanh_test.cc b/test/kernels/tanh_test.cc
index 2d688ea..4ba099c 100644
--- a/test/kernels/tanh_test.cc
+++ b/test/kernels/tanh_test.cc
@@ -2,6 +2,7 @@
#include "../../aligned.h"
#include "../../kernels.h"
+#include <cstddef>
#include <numeric>
namespace intgemm {
@@ -12,7 +13,7 @@ void kernel_tanh_test() {
return;
using vec_t = vector_t<CPUType_, float>;
- constexpr static int VECTOR_LENGTH = sizeof(vec_t) / sizeof(float);
+ constexpr static std::size_t VECTOR_LENGTH = sizeof(vec_t) / sizeof(float);
AlignedVector<float> input(VECTOR_LENGTH);
AlignedVector<float> output(VECTOR_LENGTH);
diff --git a/test/kernels/upcast_test.cc b/test/kernels/upcast_test.cc
index 497c734..8b3469a 100644
--- a/test/kernels/upcast_test.cc
+++ b/test/kernels/upcast_test.cc
@@ -2,7 +2,10 @@
#include "../../aligned.h"
#include "../../kernels.h"
+#include <cstdint>
+#include <cstddef>
#include <numeric>
+#include <stdint.h>
namespace intgemm {
@@ -12,12 +15,12 @@ void kernel_upcast8to16_test() {
return;
using vi = vector_t<CPUType_, int>;
- const int LENGTH = sizeof(vi) / sizeof(int8_t);
+ const std::size_t LENGTH = sizeof(vi) / sizeof(int8_t);
AlignedVector<int8_t> input(LENGTH);
AlignedVector<int16_t> output(LENGTH);
- std::iota(input.begin(), input.end(), -LENGTH / 2);
+ std::iota(input.begin(), input.end(), -static_cast<int8_t>(LENGTH / 2));
auto result = kernels::upcast8to16(*input.template as<vi>());
output.template as<vi>()[0] = result.first;
@@ -44,12 +47,12 @@ void kernel_upcast16to32_test() {
return;
using vi = vector_t<CPUType_, int>;
- const int LENGTH = sizeof(vi) / sizeof(int16_t);
+ const std::size_t LENGTH = sizeof(vi) / sizeof(int16_t);
AlignedVector<int16_t> input(LENGTH);
AlignedVector<int32_t> output(LENGTH);
- std::iota(input.begin(), input.end(), -LENGTH / 2);
+ std::iota(input.begin(), input.end(), -static_cast<int16_t>(LENGTH / 2));
auto result = kernels::upcast16to32(*input.template as<vi>());
output.template as<vi>()[0] = result.first;
@@ -76,12 +79,12 @@ void kernel_upcast8to32_test() {
return;
using vi = vector_t<CPUType_, int>;
- const int LENGTH = sizeof(vi) / sizeof(int8_t);
+ const std::size_t LENGTH = sizeof(vi) / sizeof(int8_t);
AlignedVector<int8_t> input(LENGTH);
AlignedVector<int32_t> output(LENGTH);
- std::iota(input.begin(), input.end(), -LENGTH / 2);
+ std::iota(input.begin(), input.end(), -static_cast<int8_t>(LENGTH / 2));
auto result = kernels::upcast8to32(*input.template as<vi>());
output.template as<vi>()[0] = result.first;
diff --git a/test/kernels/write_test.cc b/test/kernels/write_test.cc
index aa02c70..a0189fe 100644
--- a/test/kernels/write_test.cc
+++ b/test/kernels/write_test.cc
@@ -2,6 +2,7 @@
#include "../../aligned.h"
#include "../../kernels.h"
+#include <cstddef>
#include <numeric>
namespace intgemm {
@@ -12,7 +13,7 @@ void kernel_write_test() {
return;
using vec_t = vector_t<CPUType_, ElemType_>;
- constexpr static auto VECTOR_LENGTH = sizeof(vec_t) / sizeof(ElemType_);
+ constexpr static std::size_t VECTOR_LENGTH = sizeof(vec_t) / sizeof(ElemType_);
AlignedVector<ElemType_> input(VECTOR_LENGTH);
AlignedVector<ElemType_> output(VECTOR_LENGTH);