Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/marian-nmt/intgemm/intgemm.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKenneth Heafield <github@kheafield.com>2020-08-11 02:20:04 +0300
committerKenneth Heafield <github@kheafield.com>2020-08-11 02:20:04 +0300
commit6d30eb00ef64ad225926eaaa766550201236ac44 (patch)
tree35869130641b5b9563483be2b63bbcfcc9bb71a7
parentffdac07b832fd251ac3e3ac5521b4ec90a216847 (diff)
MSVC conversion fixes
-rw-r--r--test/add127_test.cc14
-rw-r--r--test/kernels/add_bias_test.cc4
-rw-r--r--test/prepare_b_transposed.cc2
3 files changed, 11 insertions, 9 deletions
diff --git a/test/add127_test.cc b/test/add127_test.cc
index 748c1c3..bf15f49 100644
--- a/test/add127_test.cc
+++ b/test/add127_test.cc
@@ -1,6 +1,7 @@
#include "test.h"
namespace intgemm {
+namespace {
void CompareAs(int8_t * output_old, uint8_t * output_new, Index rows, Index cols) {
for (Index r = 0; r<rows; r++) {
@@ -47,7 +48,7 @@ template <class Routine> void TestPrepareBias(Index rows, Index cols) {
AlignedVector<int8_t> B_prep(inputB.size());
AlignedVector<int8_t> B_quant(inputB.size());
Routine::PrepareB(inputB.begin(), B_prep.begin(), quant_mult, rows, cols);
- Routine::Quantize(inputB.begin(), B_quant.begin(), quant_mult, inputB.size());
+ Routine::Quantize(inputB.begin(), B_quant.begin(), quant_mult, static_cast<intgemm::Index>(inputB.size()));
AlignedVector<float> inputBias(cols);
@@ -102,8 +103,8 @@ template <class Routine> void TestMultiplyBiasNew(Index A_rows, Index width, Ind
}
float alpha = 2.0f;
- float quant_mult = 127/alpha;
- float unquant_mult = 1.0/(quant_mult*quant_mult);
+ float quant_mult = 127.0f / alpha;
+ float unquant_mult = 1.0f / (quant_mult*quant_mult);
AlignedVector<uint8_t> A_prep(A.size());
AlignedVector<int8_t> B_prep(B.size());
@@ -117,7 +118,7 @@ template <class Routine> void TestMultiplyBiasNew(Index A_rows, Index width, Ind
*
*/
AlignedVector<int8_t> B_quant(B.size());
- Routine::Quantize(B.begin(), B_quant.begin(), quant_mult, B.size());
+ Routine::Quantize(B.begin(), B_quant.begin(), quant_mult, static_cast<Index>(B.size()));
AlignedVector<float> slowint_C(test_C.size());
// Taking the original A_preparation which means A would be int8_t
AlignedVector<int8_t> A_prep2(A.size());
@@ -237,7 +238,7 @@ template <class Routine> void TestMultiplyShiftInt(Index A_rows, Index width, In
* Reference float multiplication
*/
AlignedVector<int8_t> B_quant(B.size());
- Routine::Quantize(B.begin(), B_quant.begin(), quant_mult, B.size());
+ Routine::Quantize(B.begin(), B_quant.begin(), quant_mult, static_cast<Index>(B.size()));
AlignedVector<float> slowint_C(test_C.size());
// Taking the original A_preparation which means A would be int8_t
// references::Multiply(A_prep.begin(), B_quant.begin(), slowint_C.begin(), A_rows, width, B_cols, [&](int32_t sum, const callbacks::OutputBufferInfo& info) {
@@ -474,4 +475,5 @@ TEST_CASE ("Multiply AVX512VNNI 8bit Shift vs Int", "[Add127]") {
}
#endif
-} //namespace intgemm
+} // namespace
+} // namespace intgemm
diff --git a/test/kernels/add_bias_test.cc b/test/kernels/add_bias_test.cc
index 2dd4e3d..7c299f0 100644
--- a/test/kernels/add_bias_test.cc
+++ b/test/kernels/add_bias_test.cc
@@ -18,8 +18,8 @@ void kernel_add_bias_test() {
AlignedVector<ElemType_> bias(VECTOR_LENGTH);
AlignedVector<ElemType_> output(VECTOR_LENGTH);
- std::iota(input.begin(), input.end(), 0);
- std::fill(bias.begin(), bias.end(), 100);
+ std::iota(input.begin(), input.end(), static_cast<ElemType_>(0));
+ std::fill(bias.begin(), bias.end(), static_cast<ElemType_>(100));
*output.template as<vec_t>() = kernels::add_bias(*input.template as<vec_t>(), bias.begin(), 0);
for (std::size_t i = 0; i < output.size(); ++i)
diff --git a/test/prepare_b_transposed.cc b/test/prepare_b_transposed.cc
index 1a4ed88..661f9af 100644
--- a/test/prepare_b_transposed.cc
+++ b/test/prepare_b_transposed.cc
@@ -22,7 +22,7 @@ void PrepareBTransposedRef(const float* input, typename Backend::Integer* output
for (Index k = 0; k < vec_len; ++k) {
Index col = (i + k) % B_transposed_cols;
Index row = 8 * ((i + k) / B_transposed_cols) + j;
- *output++ = input[row * B_transposed_cols + col] * quant_mult;
+ *output++ = static_cast<float>(input[row * B_transposed_cols + col] * quant_mult);
}
}