Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/torch7.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjokeren <robinho364@gmail.com>2016-12-11 09:04:46 +0300
committerSoumith Chintala <soumith@gmail.com>2017-02-23 13:50:34 +0300
commit084a856cbe3f8f97c48e9679826ee2dce814e75f (patch)
tree88381eaa1410b20016c0cf94427132e8546ab2dd
parentd46b2ac4521ad56c68121187d816c9031bd00a92 (diff)
optimize THVector add mul div
-rw-r--r--lib/TH/vector/AVX.c60
1 files changed, 30 insertions, 30 deletions
diff --git a/lib/TH/vector/AVX.c b/lib/TH/vector/AVX.c
index a964c88..1abfccf 100644
--- a/lib/TH/vector/AVX.c
+++ b/lib/TH/vector/AVX.c
@@ -41,14 +41,14 @@ static void THDoubleVector_cdiv_AVX(double *z, const double *x, const double *y,
static void THDoubleVector_div_AVX(double *y, const double *x, const double c, const ptrdiff_t n) {
ptrdiff_t i;
__m256d YMM15 = _mm256_set_pd(c, c, c, c);
- __m256d YMM0, YMM1, YMM2, YMM3, YMM4, YMM5;
+ __m256d YMM0, YMM1;
for (i=0; i<=((n)-8); i+=8) {
YMM0 = _mm256_loadu_pd(x+i);
YMM1 = _mm256_loadu_pd(x+i+4);
- YMM4 = _mm256_div_pd(YMM0, YMM15);
- YMM5 = _mm256_div_pd(YMM1, YMM15);
- _mm256_storeu_pd(y+i, YMM4);
- _mm256_storeu_pd(y+i+4, YMM5);
+ YMM0 = _mm256_div_pd(YMM0, YMM15);
+ YMM1 = _mm256_div_pd(YMM1, YMM15);
+ _mm256_storeu_pd(y+i, YMM0);
+ _mm256_storeu_pd(y+i+4, YMM1);
}
for (; i<(n); i++) {
y[i] = x[i] / c;
@@ -76,14 +76,14 @@ static void THDoubleVector_cmul_AVX(double *z, const double *x, const double *y,
static void THDoubleVector_mul_AVX(double *y, const double *x, const double c, const ptrdiff_t n) {
ptrdiff_t i;
__m256d YMM15 = _mm256_set_pd(c, c, c, c);
- __m256d YMM0, YMM1, YMM2, YMM3, YMM4, YMM5;
+ __m256d YMM0, YMM1;
for (i=0; i<=((n)-8); i+=8) {
YMM0 = _mm256_loadu_pd(x+i);
YMM1 = _mm256_loadu_pd(x+i+4);
- YMM4 = _mm256_mul_pd(YMM0, YMM15);
- YMM5 = _mm256_mul_pd(YMM1, YMM15);
- _mm256_storeu_pd(y+i, YMM4);
- _mm256_storeu_pd(y+i+4, YMM5);
+ YMM0 = _mm256_mul_pd(YMM0, YMM15);
+ YMM1 = _mm256_mul_pd(YMM1, YMM15);
+ _mm256_storeu_pd(y+i, YMM0);
+ _mm256_storeu_pd(y+i+4, YMM1);
}
for (; i<n; i++) {
y[i] = x[i] * c;
@@ -109,14 +109,14 @@ static void THDoubleVector_cadd_AVX(double *z, const double *x, const double *y,
static void THDoubleVector_add_AVX(double *y, const double *x, const double c, const ptrdiff_t n) {
ptrdiff_t i;
__m256d YMM15 = _mm256_set_pd(c, c, c, c);
- __m256d YMM0, YMM1, YMM2, YMM3, YMM4, YMM5;
+ __m256d YMM0, YMM1;
for (i=0; i<=((n)-8); i+=8) {
YMM0 = _mm256_loadu_pd(x+i);
YMM1 = _mm256_loadu_pd(x+i+4);
- YMM4 = _mm256_add_pd(YMM0, YMM15);
- YMM5 = _mm256_add_pd(YMM1, YMM15);
- _mm256_storeu_pd(y+i, YMM4);
- _mm256_storeu_pd(y+i+4, YMM5);
+ YMM0 = _mm256_add_pd(YMM0, YMM15);
+ YMM1 = _mm256_add_pd(YMM1, YMM15);
+ _mm256_storeu_pd(y+i, YMM0);
+ _mm256_storeu_pd(y+i+4, YMM1);
}
for (; i<(n); i++) {
y[i] = x[i] + c;
@@ -160,14 +160,14 @@ static void THFloatVector_cdiv_AVX(float *z, const float *x, const float *y, con
static void THFloatVector_div_AVX(float *y, const float *x, const float c, const ptrdiff_t n) {
ptrdiff_t i;
__m256 YMM15 = _mm256_set_ps(c, c, c, c, c, c, c, c);
- __m256 YMM0, YMM1, YMM2, YMM3, YMM4, YMM5;
+ __m256 YMM0, YMM1;
for (i=0; i<=((n)-16); i+=16) {
YMM0 = _mm256_loadu_ps(x+i);
YMM1 = _mm256_loadu_ps(x+i+8);
- YMM4 = _mm256_div_ps(YMM0, YMM15);
- YMM5 = _mm256_div_ps(YMM1, YMM15);
- _mm256_storeu_ps(y+i, YMM4);
- _mm256_storeu_ps(y+i+8, YMM5);
+ YMM0 = _mm256_div_ps(YMM0, YMM15);
+ YMM1 = _mm256_div_ps(YMM1, YMM15);
+ _mm256_storeu_ps(y+i, YMM0);
+ _mm256_storeu_ps(y+i+8, YMM1);
}
for (; i<(n); i++) {
y[i] = x[i] / c;
@@ -195,14 +195,14 @@ static void THFloatVector_cmul_AVX(float *z, const float *x, const float *y, con
static void THFloatVector_mul_AVX(float *y, const float *x, const float c, const ptrdiff_t n) {
ptrdiff_t i;
__m256 YMM15 = _mm256_set_ps(c, c, c, c, c, c, c, c);
- __m256 YMM0, YMM1, YMM2, YMM3, YMM4, YMM5;
+ __m256 YMM0, YMM1;
for (i=0; i<=((n)-16); i+=16) {
YMM0 = _mm256_loadu_ps(x+i);
YMM1 = _mm256_loadu_ps(x+i+8);
- YMM4 = _mm256_mul_ps(YMM0, YMM15);
- YMM5 = _mm256_mul_ps(YMM1, YMM15);
- _mm256_storeu_ps(y+i, YMM4);
- _mm256_storeu_ps(y+i+8, YMM5);
+ YMM0 = _mm256_mul_ps(YMM0, YMM15);
+ YMM1 = _mm256_mul_ps(YMM1, YMM15);
+ _mm256_storeu_ps(y+i, YMM0);
+ _mm256_storeu_ps(y+i+8, YMM1);
}
for (; i<n; i++) {
y[i] = x[i] * c;
@@ -228,14 +228,14 @@ static void THFloatVector_cadd_AVX(float *z, const float *x, const float *y, con
static void THFloatVector_add_AVX(float *y, const float *x, const float c, const ptrdiff_t n) {
ptrdiff_t i;
__m256 YMM15 = _mm256_set_ps(c, c, c, c, c, c, c, c);
- __m256 YMM0, YMM1, YMM2, YMM3, YMM4, YMM5;
+ __m256 YMM0, YMM1;
for (i=0; i<=((n)-16); i+=16) {
YMM0 = _mm256_loadu_ps(x+i);
YMM1 = _mm256_loadu_ps(x+i+8);
- YMM4 = _mm256_add_ps(YMM0, YMM15);
- YMM5 = _mm256_add_ps(YMM1, YMM15);
- _mm256_storeu_ps(y+i, YMM4);
- _mm256_storeu_ps(y+i+8, YMM5);
+ YMM0 = _mm256_add_ps(YMM0, YMM15);
+ YMM1 = _mm256_add_ps(YMM1, YMM15);
+ _mm256_storeu_ps(y+i, YMM0);
+ _mm256_storeu_ps(y+i+8, YMM1);
}
for (; i<(n); i++) {
y[i] = x[i] + c;