Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/FFmpeg/FFmpeg.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Almer <jamrial@gmail.com>2014-05-16 02:54:00 +0400
committerMichael Niedermayer <michaelni@gmx.at>2014-05-16 07:27:03 +0400
commita9bf713d350a07edd17bd0cc88df92261cda79f2 (patch)
treee92b57ed09d316d96b1bcd25a967e2d5172c926c /libswresample/x86
parentcdff1cc98f3be4b8d5e51a352fc565d1a4e36f45 (diff)
swresample: add swri_resample_float_avx
Signed-off-by: James Almer <jamrial@gmail.com> Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libswresample/x86')
-rw-r--r--libswresample/x86/resample_mmx.h61
1 files changed, 61 insertions, 0 deletions
diff --git a/libswresample/x86/resample_mmx.h b/libswresample/x86/resample_mmx.h
index 2bd48a9295..36c7a066e1 100644
--- a/libswresample/x86/resample_mmx.h
+++ b/libswresample/x86/resample_mmx.h
@@ -25,6 +25,7 @@
int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
int swri_resample_float_sse (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx);
+int swri_resample_float_avx (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx);
int swri_resample_double_sse2(struct ResampleContext *c, double *dst, const double *src, int *consumed, int src_size, int dst_size, int update_ctx);
DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
@@ -195,6 +196,66 @@ __asm__ volatile(\
XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
);
+#define COMMON_CORE_FLT_AVX \
+ x86_reg len= -4*c->filter_length;\
+__asm__ volatile(\
+ "vxorps %%ymm0, %%ymm0, %%ymm0 \n\t"\
+ "1: \n\t"\
+ "vmovups (%1, %0), %%ymm1 \n\t"\
+ "vmulps (%2, %0), %%ymm1, %%ymm1 \n\t"\
+ "vaddps %%ymm1, %%ymm0, %%ymm0 \n\t"\
+ "add $32, %0 \n\t"\
+ " js 1b \n\t"\
+ "vextractf128 $1, %%ymm0, %%xmm1 \n\t"\
+ "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
+ "vmovhlps %%xmm0, %%xmm1, %%xmm1 \n\t"\
+ "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
+ "vshufps $1, %%xmm0, %%xmm0, %%xmm1 \n\t"\
+ "vaddss %%xmm1, %%xmm0, %%xmm0 \n\t"\
+ "vmovss %%xmm0, (%3) \n\t"\
+ : "+r" (len)\
+ : "r" (((uint8_t*)(src+sample_index))-len),\
+ "r" (((uint8_t*)filter)-len),\
+ "r" (dst+dst_index)\
+ XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
+);
+
+#define LINEAR_CORE_FLT_AVX \
+ x86_reg len= -4*c->filter_length;\
+__asm__ volatile(\
+ "vxorps %%ymm0, %%ymm0, %%ymm0 \n\t"\
+ "vxorps %%ymm2, %%ymm2, %%ymm2 \n\t"\
+ "1: \n\t"\
+ "vmovups (%3, %0), %%ymm1 \n\t"\
+ "vmulps (%5, %0), %%ymm1, %%ymm3 \n\t"\
+ "vmulps (%4, %0), %%ymm1, %%ymm1 \n\t"\
+ "vaddps %%ymm1, %%ymm0, %%ymm0 \n\t"\
+ "vaddps %%ymm3, %%ymm2, %%ymm2 \n\t"\
+ "add $32, %0 \n\t"\
+ " js 1b \n\t"\
+ "vextractf128 $1, %%ymm0, %%xmm1 \n\t"\
+ "vextractf128 $1, %%ymm2, %%xmm3 \n\t"\
+ "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
+ "vaddps %%xmm3, %%xmm2, %%xmm2 \n\t"\
+ "vmovhlps %%xmm0, %%xmm1, %%xmm1 \n\t"\
+ "vmovhlps %%xmm2, %%xmm3, %%xmm3 \n\t"\
+ "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
+ "vaddps %%xmm3, %%xmm2, %%xmm2 \n\t"\
+ "vshufps $1, %%xmm0, %%xmm0, %%xmm1 \n\t"\
+ "vshufps $1, %%xmm2, %%xmm2, %%xmm3 \n\t"\
+ "vaddss %%xmm1, %%xmm0, %%xmm0 \n\t"\
+ "vaddss %%xmm3, %%xmm2, %%xmm2 \n\t"\
+ "vmovss %%xmm0, %1 \n\t"\
+ "vmovss %%xmm2, %2 \n\t"\
+ : "+r" (len),\
+ "=m" (val),\
+ "=m" (v2)\
+ : "r" (((uint8_t*)(src+sample_index))-len),\
+ "r" (((uint8_t*)filter)-len),\
+ "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
+ XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
+);
+
#define COMMON_CORE_DBL_SSE2 \
x86_reg len= -8*c->filter_length;\
__asm__ volatile(\