Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/FFmpeg/FFmpeg.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKieran Kunhya <kierank@ob-encoder.com>2012-04-20 23:49:30 +0400
committerJustin Ruggles <justin.ruggles@gmail.com>2012-05-22 01:13:05 +0400
commit5ff01259a8b558ba84066125b41832121f2f5864 (patch)
tree49d4e5844c59485037d26f59d3465f4f4c00c068 /libavcodec/x86
parentafeb3590fc5ff01d43b1a1be9df8fac64431ff9e (diff)
Convert vector_fmul range of functions to YASM and add AVX versions
Signed-off-by: Justin Ruggles <justin.ruggles@gmail.com>
Diffstat (limited to 'libavcodec/x86')
-rw-r--r--libavcodec/x86/dsputil_mmx.c160
-rw-r--r--libavcodec/x86/dsputil_yasm.asm105
2 files changed, 128 insertions, 137 deletions
diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c
index 6377a73555..fca09b4e65 100644
--- a/libavcodec/x86/dsputil_mmx.c
+++ b/libavcodec/x86/dsputil_mmx.c
@@ -2348,135 +2348,6 @@ static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2],
}
}
-static void vector_fmul_3dnow(float *dst, const float *src0, const float *src1,
- int len)
-{
- x86_reg i = (len - 4) * 4;
- __asm__ volatile (
- "1: \n\t"
- "movq (%2, %0), %%mm0 \n\t"
- "movq 8(%2, %0), %%mm1 \n\t"
- "pfmul (%3, %0), %%mm0 \n\t"
- "pfmul 8(%3, %0), %%mm1 \n\t"
- "movq %%mm0, (%1, %0) \n\t"
- "movq %%mm1, 8(%1, %0) \n\t"
- "sub $16, %0 \n\t"
- "jge 1b \n\t"
- "femms \n\t"
- : "+r"(i)
- : "r"(dst), "r"(src0), "r"(src1)
- : "memory"
- );
-}
-
-static void vector_fmul_sse(float *dst, const float *src0, const float *src1,
- int len)
-{
- x86_reg i = (len - 8) * 4;
- __asm__ volatile (
- "1: \n\t"
- "movaps (%2, %0), %%xmm0 \n\t"
- "movaps 16(%2, %0), %%xmm1 \n\t"
- "mulps (%3, %0), %%xmm0 \n\t"
- "mulps 16(%3, %0), %%xmm1 \n\t"
- "movaps %%xmm0, (%1, %0) \n\t"
- "movaps %%xmm1, 16(%1, %0) \n\t"
- "sub $32, %0 \n\t"
- "jge 1b \n\t"
- : "+r"(i)
- : "r"(dst), "r"(src0), "r"(src1)
- : "memory"
- );
-}
-
-static void vector_fmul_reverse_3dnow2(float *dst, const float *src0,
- const float *src1, int len)
-{
- x86_reg i = len * 4 - 16;
- __asm__ volatile (
- "1: \n\t"
- "pswapd 8(%1), %%mm0 \n\t"
- "pswapd (%1), %%mm1 \n\t"
- "pfmul (%3, %0), %%mm0 \n\t"
- "pfmul 8(%3, %0), %%mm1 \n\t"
- "movq %%mm0, (%2, %0) \n\t"
- "movq %%mm1, 8(%2, %0) \n\t"
- "add $16, %1 \n\t"
- "sub $16, %0 \n\t"
- "jge 1b \n\t"
- : "+r"(i), "+r"(src1)
- : "r"(dst), "r"(src0)
- );
- __asm__ volatile ("femms");
-}
-
-static void vector_fmul_reverse_sse(float *dst, const float *src0,
- const float *src1, int len)
-{
- x86_reg i = len * 4 - 32;
- __asm__ volatile (
- "1: \n\t"
- "movaps 16(%1), %%xmm0 \n\t"
- "movaps (%1), %%xmm1 \n\t"
- "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
- "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
- "mulps (%3, %0), %%xmm0 \n\t"
- "mulps 16(%3, %0), %%xmm1 \n\t"
- "movaps %%xmm0, (%2, %0) \n\t"
- "movaps %%xmm1, 16(%2, %0) \n\t"
- "add $32, %1 \n\t"
- "sub $32, %0 \n\t"
- "jge 1b \n\t"
- : "+r"(i), "+r"(src1)
- : "r"(dst), "r"(src0)
- );
-}
-
-static void vector_fmul_add_3dnow(float *dst, const float *src0,
- const float *src1, const float *src2, int len)
-{
- x86_reg i = (len - 4) * 4;
- __asm__ volatile (
- "1: \n\t"
- "movq (%2, %0), %%mm0 \n\t"
- "movq 8(%2, %0), %%mm1 \n\t"
- "pfmul (%3, %0), %%mm0 \n\t"
- "pfmul 8(%3, %0), %%mm1 \n\t"
- "pfadd (%4, %0), %%mm0 \n\t"
- "pfadd 8(%4, %0), %%mm1 \n\t"
- "movq %%mm0, (%1, %0) \n\t"
- "movq %%mm1, 8(%1, %0) \n\t"
- "sub $16, %0 \n\t"
- "jge 1b \n\t"
- : "+r"(i)
- : "r"(dst), "r"(src0), "r"(src1), "r"(src2)
- : "memory"
- );
- __asm__ volatile ("femms");
-}
-
-static void vector_fmul_add_sse(float *dst, const float *src0,
- const float *src1, const float *src2, int len)
-{
- x86_reg i = (len - 8) * 4;
- __asm__ volatile (
- "1: \n\t"
- "movaps (%2, %0), %%xmm0 \n\t"
- "movaps 16(%2, %0), %%xmm1 \n\t"
- "mulps (%3, %0), %%xmm0 \n\t"
- "mulps 16(%3, %0), %%xmm1 \n\t"
- "addps (%4, %0), %%xmm0 \n\t"
- "addps 16(%4, %0), %%xmm1 \n\t"
- "movaps %%xmm0, (%1, %0) \n\t"
- "movaps %%xmm1, 16(%1, %0) \n\t"
- "sub $32, %0 \n\t"
- "jge 1b \n\t"
- : "+r"(i)
- : "r"(dst), "r"(src0), "r"(src1), "r"(src2)
- : "memory"
- );
-}
-
#if HAVE_6REGS
static void vector_fmul_window_3dnow2(float *dst, const float *src0,
const float *src1, const float *win,
@@ -2631,6 +2502,21 @@ int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
+void ff_vector_fmul_sse(float *dst, const float *src0, const float *src1,
+ int len);
+void ff_vector_fmul_avx(float *dst, const float *src0, const float *src1,
+ int len);
+
+void ff_vector_fmul_reverse_sse(float *dst, const float *src0,
+ const float *src1, int len);
+void ff_vector_fmul_reverse_avx(float *dst, const float *src0,
+ const float *src1, int len);
+
+void ff_vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
+ const float *src2, int len);
+void ff_vector_fmul_add_avx(float *dst, const float *src0, const float *src1,
+ const float *src2, int len);
+
void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src,
int32_t min, int32_t max, unsigned int len);
void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src,
@@ -2918,8 +2804,6 @@ static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
#endif
c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
- c->vector_fmul = vector_fmul_3dnow;
- c->vector_fmul_add = vector_fmul_add_3dnow;
#if HAVE_7REGS
c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
@@ -2929,7 +2813,6 @@ static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
static void dsputil_init_3dnow2(DSPContext *c, AVCodecContext *avctx,
int mm_flags)
{
- c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
#if HAVE_6REGS
c->vector_fmul_window = vector_fmul_window_3dnow2;
#endif
@@ -2949,11 +2832,11 @@ static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags)
c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
c->ac3_downmix = ac3_downmix_sse;
- c->vector_fmul = vector_fmul_sse;
- c->vector_fmul_reverse = vector_fmul_reverse_sse;
-
- if (!(mm_flags & AV_CPU_FLAG_3DNOW))
- c->vector_fmul_add = vector_fmul_add_sse;
+#if HAVE_YASM
+ c->vector_fmul = ff_vector_fmul_sse;
+ c->vector_fmul_reverse = ff_vector_fmul_reverse_sse;
+ c->vector_fmul_add = ff_vector_fmul_add_sse;
+#endif
#if HAVE_6REGS
c->vector_fmul_window = vector_fmul_window_sse;
@@ -3112,6 +2995,9 @@ static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
}
}
c->butterflies_float_interleave = ff_butterflies_float_interleave_avx;
+ c->vector_fmul = ff_vector_fmul_avx;
+ c->vector_fmul_reverse = ff_vector_fmul_reverse_avx;
+ c->vector_fmul_add = ff_vector_fmul_add_avx;
#endif
}
diff --git a/libavcodec/x86/dsputil_yasm.asm b/libavcodec/x86/dsputil_yasm.asm
index 807c64102b..708ee67678 100644
--- a/libavcodec/x86/dsputil_yasm.asm
+++ b/libavcodec/x86/dsputil_yasm.asm
@@ -1130,6 +1130,111 @@ VECTOR_CLIP_INT32 6, 1, 0, 0
%endif
;-----------------------------------------------------------------------------
+; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
+;-----------------------------------------------------------------------------
+%macro VECTOR_FMUL 0
+cglobal vector_fmul, 4,4,2, dst, src0, src1, len
+ lea lenq, [lend*4 - 2*mmsize]
+ALIGN 16
+.loop
+ mova m0, [src0q + lenq]
+ mova m1, [src0q + lenq + mmsize]
+ mulps m0, m0, [src1q + lenq]
+ mulps m1, m1, [src1q + lenq + mmsize]
+ mova [dstq + lenq], m0
+ mova [dstq + lenq + mmsize], m1
+
+ sub lenq, 2*mmsize
+ jge .loop
+%if mmsize == 32
+ vzeroupper
+ RET
+%else
+ REP_RET
+%endif
+%endmacro
+
+INIT_XMM sse
+VECTOR_FMUL
+INIT_YMM avx
+VECTOR_FMUL
+
+;-----------------------------------------------------------------------------
+; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
+; int len)
+;-----------------------------------------------------------------------------
+%macro VECTOR_FMUL_REVERSE 0
+cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
+ lea lenq, [lend*4 - 2*mmsize]
+ALIGN 16
+.loop
+%if cpuflag(avx)
+ vmovaps xmm0, [src1q + 16]
+ vinsertf128 m0, m0, [src1q], 1
+ vshufps m0, m0, m0, q0123
+ vmovaps xmm1, [src1q + mmsize + 16]
+ vinsertf128 m1, m1, [src1q + mmsize], 1
+ vshufps m1, m1, m1, q0123
+%else
+ mova m0, [src1q]
+ mova m1, [src1q + mmsize]
+ shufps m0, m0, q0123
+ shufps m1, m1, q0123
+%endif
+ mulps m0, m0, [src0q + lenq + mmsize]
+ mulps m1, m1, [src0q + lenq]
+ mova [dstq + lenq + mmsize], m0
+ mova [dstq + lenq], m1
+ add src1q, 2*mmsize
+ sub lenq, 2*mmsize
+ jge .loop
+%if mmsize == 32
+ vzeroupper
+ RET
+%else
+ REP_RET
+%endif
+%endmacro
+
+INIT_XMM sse
+VECTOR_FMUL_REVERSE
+INIT_YMM avx
+VECTOR_FMUL_REVERSE
+
+;-----------------------------------------------------------------------------
+; vector_fmul_add(float *dst, const float *src0, const float *src1,
+; const float *src2, int len)
+;-----------------------------------------------------------------------------
+%macro VECTOR_FMUL_ADD 0
+cglobal vector_fmul_add, 5,5,2, dst, src0, src1, src2, len
+ lea lenq, [lend*4 - 2*mmsize]
+ALIGN 16
+.loop
+ mova m0, [src0q + lenq]
+ mova m1, [src0q + lenq + mmsize]
+ mulps m0, m0, [src1q + lenq]
+ mulps m1, m1, [src1q + lenq + mmsize]
+ addps m0, m0, [src2q + lenq]
+ addps m1, m1, [src2q + lenq + mmsize]
+ mova [dstq + lenq], m0
+ mova [dstq + lenq + mmsize], m1
+
+ sub lenq, 2*mmsize
+ jge .loop
+%if mmsize == 32
+ vzeroupper
+ RET
+%else
+ REP_RET
+%endif
+%endmacro
+
+INIT_XMM sse
+VECTOR_FMUL_ADD
+INIT_YMM avx
+VECTOR_FMUL_ADD
+
+;-----------------------------------------------------------------------------
; void ff_butterflies_float_interleave(float *dst, const float *src0,
; const float *src1, int len);
;-----------------------------------------------------------------------------