diff options
author | Jianyu Huang <jianyuhuang@fb.com> | 2019-08-06 21:55:17 +0300 |
---|---|---|
committer | Facebook Github Bot <facebook-github-bot@users.noreply.github.com> | 2019-08-06 21:59:00 +0300 |
commit | cf34b9a26b609109b18d6498f0608faddb7a911b (patch) | |
tree | 1ceaddaf942edb9debcafad7491b750fc3a5f066 /include/fbgemm | |
parent | d8b3323668fdd15dc70e9cb43ab16e96f4846eeb (diff) |
Back out "[fbgemm] Integrate VNNI into FBGEMM master branch"
Summary:
Original commit changeset: fcaa13cc3159
ASMJIT requires the CMake version to be 3.8
However, FBGEMM and PyTorch only need the CMake version to be 3.5+.
This caused the build failure in FBGEMM:
https://circleci.com/gh/pytorch/FBGEMM/122#build-timing/containers/0
Reviewed By: dskhudia
Differential Revision: D16670547
fbshipit-source-id: 506714c3db1cb82cf98895f58f82f235128f5285
Diffstat (limited to 'include/fbgemm')
-rw-r--r-- | include/fbgemm/PackingTraits-inl.h | 50 | ||||
-rw-r--r-- | include/fbgemm/Utils.h | 7 |
2 files changed, 1 insertions, 56 deletions
diff --git a/include/fbgemm/PackingTraits-inl.h b/include/fbgemm/PackingTraits-inl.h index baccfad..76eb425 100644 --- a/include/fbgemm/PackingTraits-inl.h +++ b/include/fbgemm/PackingTraits-inl.h @@ -222,53 +222,3 @@ struct PackingTraits< 128}; ///< Cache block for N dimension (multiple of NR). static constexpr int KCB{256}; ///< Cache block for K dimension. }; - -/** - * @brief Helper struct to type specialize for int16_t and int32_t together. - */ -template <typename T> -struct is_16or32bit { - static constexpr bool value = - std::is_same<T, int16_t>::value || std::is_same<T, int32_t>::value; -}; - -/** - * @brief Packing parameter specialization for accumulation into 32-bit/16-bit - * integers. - * - * Since there is no int16_t accumulation for AVX512 VNNI, we redirect int16_t - * to int32_t accumulation and use the same blocking parameters as int32_t. - * - * This is picked when T is of int8 type (signed or unsigned) and instruction - * set is avx512_vnni. - */ -template <typename T, typename accT> -struct PackingTraits< - T, - accT, - inst_set_t::avx512_vnni, - typename std::enable_if< - is_8bit<T>::value && is_16or32bit<accT>::value>::type> { - static constexpr int MR{8}; ///< Register block for M dimension. - static constexpr int NR_MIN{ - 16}; ///< Minimum register block for N dimension. - ///< 16 because 16*ROW_INTERLEAVE int8 elements - ///< completely fill a 512-bit wide vector. - static constexpr int NR{ - 32}; ///< Register block for N dimension. - ///< Must be a multiple of 16 because 16*ROW_INTERLEAVE int8 elements - ///< completely fill a 512-bit wide vector. Total registers used for - ///< N dimension: NR*ROW_INTERLEAVE*8/VLEN. We use MR x - ///< NR*ROW_INTERLEAVE*8/VLEN zmm registers - ///< for C accumulations. - - static constexpr int ROW_INTERLEAVE{ - 4}; ///< 4 rows are interleaved to use vpmaddubsw instruction for packing - ///< B matrix. - - static constexpr int MCB{ - 128}; ///< Cache block for M dimension (multiple of MR). - static constexpr int NCB{ - 32}; ///< Cache block for N dimension (multiple of NR). - static constexpr int KCB{256}; ///< Cache block for K dimension. -}; diff --git a/include/fbgemm/Utils.h b/include/fbgemm/Utils.h index 3f8522b..107cf07 100644 --- a/include/fbgemm/Utils.h +++ b/include/fbgemm/Utils.h @@ -29,7 +29,7 @@ enum class matrix_op_t { NoTranspose, Transpose }; /** * @brief Typed enum for supported instruction sets. */ -enum class inst_set_t { anyarch, avx2, avx512, avx512_vnni }; +enum class inst_set_t { anyarch, avx2, avx512 }; /** * @brief Typed enum for optimized paths for convolutions @@ -100,11 +100,6 @@ FBGEMM_API bool fbgemmHasAvx512Support(); FBGEMM_API bool fbgemmHasAvx2Support(); /** - * @brief Are we running on a AVX512_VNNI supported cpu? - */ -FBGEMM_API bool fbgemmHasAvx512VnniSupport(); - -/** * @brief Helper struct to enable autotuning of FBGEMM packing and kernels. * * This structure is optional. If not used, the default values for these |