Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/marian-nmt/intgemm/intgemm.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKenneth Heafield <github@kheafield.com>2018-06-23 22:27:52 +0300
committerKenneth Heafield <github@kheafield.com>2018-06-23 22:27:52 +0300
commit0ae900a954ee19ebb8d33ba57493e4a14249c719 (patch)
tree19192163a762ac28ec454439e0a7e40575479338 /example.cc
parent4da058ae1c33a859882e60eb0bf89512762422d8 (diff)
Name Generic_16bit to Int16, Generic_8bit to Int8
Diffstat (limited to 'example.cc')
-rw-r--r--example.cc12
1 files changed, 6 insertions, 6 deletions
diff --git a/example.cc b/example.cc
index c67e48b..0072596 100644
--- a/example.cc
+++ b/example.cc
@@ -41,14 +41,14 @@ int main() {
AlignedVector<int16_t> A_prepared(A_rows * width);
AlignedVector<int16_t> B_prepared(width * B_cols);
// Quantize A.
- intgemm::Generic_16bit::PrepareA(A.get(), A_prepared.get(), quant_mult, A_rows, width);
+ intgemm::Int16::PrepareA(A.get(), A_prepared.get(), quant_mult, A_rows, width);
// Quantize and reshape B.
// Typically you will do this once when parameters are loaded, not every time.
- intgemm::Generic_16bit::PrepareB(B.get(), B_prepared.get(), quant_mult, width, B_cols);
+ intgemm::Int16::PrepareB(B.get(), B_prepared.get(), quant_mult, width, B_cols);
AlignedVector<float> C(A_rows * B_cols);
// Do the actual multiply.
- intgemm::Generic_16bit::Multiply(A_prepared.get(), B_prepared.get(), C.get(), 1.0 / (quant_mult * quant_mult), A_rows, width, B_cols);
+ intgemm::Int16::Multiply(A_prepared.get(), B_prepared.get(), C.get(), 1.0 / (quant_mult * quant_mult), A_rows, width, B_cols);
// Sanity check. C will be row major.
assert(fabs(C[0] - top_left_reference) < 0.05);
}
@@ -60,14 +60,14 @@ int main() {
AlignedVector<int8_t> A_prepared(A_rows * width);
AlignedVector<int8_t> B_prepared(width * B_cols);
// Quantize A.
- intgemm::Generic_8bit::PrepareA(A.get(), A_prepared.get(), quant_mult, A_rows, width);
+ intgemm::Int8::PrepareA(A.get(), A_prepared.get(), quant_mult, A_rows, width);
// Quantize and reshape B.
// Typically you will do this once when parameters are loaded, not every time.
- intgemm::Generic_8bit::PrepareB(B.get(), B_prepared.get(), quant_mult, width, B_cols);
+ intgemm::Int8::PrepareB(B.get(), B_prepared.get(), quant_mult, width, B_cols);
AlignedVector<float> C(A_rows * B_cols);
// Do the actual multiply.
- intgemm::Generic_8bit::Multiply(A_prepared.get(), B_prepared.get(), C.get(), 1.0 / (quant_mult * quant_mult), A_rows, width, B_cols);
+ intgemm::Int8::Multiply(A_prepared.get(), B_prepared.get(), C.get(), 1.0 / (quant_mult * quant_mult), A_rows, width, B_cols);
// Sanity check. C will be row major.
assert(fabs(C[0] - top_left_reference) < 0.05);
}