Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mpc-hc/mpc-hc.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpovaddict <povaddict@users.sourceforge.net>2010-02-10 02:16:44 +0300
committerpovaddict <povaddict@users.sourceforge.net>2010-02-10 02:16:44 +0300
commit726a91b12a7524e45e7a901c9e4883af5b1bffe6 (patch)
treef5d25e3b2e84c92f4901280c73d5d3d7e6c3cd19 /src/filters/transform/MPCVideoDec/ffmpeg/libswscale
parent02183f6e47ad4ea1057de9950482f291f2ae4290 (diff)
Rename several directories to use MixedCase instead of lowercase.
They now mostly match the case used in #includes, and they're consistent with the names of the .h files they contain. git-svn-id: https://mpc-hc.svn.sourceforge.net/svnroot/mpc-hc/trunk@1648 10f7b99b-c216-0410-bff0-8a66a9350fd8
Diffstat (limited to 'src/filters/transform/MPCVideoDec/ffmpeg/libswscale')
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/asmalign.h7
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/config.h66
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/isP4HT.c136
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/libswscalew32thread.c155
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocFilters.h41
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess.c860
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess.h91
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess_internal.h106
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess_template.c3844
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/rgb2rgb.c659
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/rgb2rgb.h152
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/rgb2rgb_template.c2750
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale.c3070
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale.h147
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale_internal.h200
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale_template.c3290
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/x86/yuv2rgb_mmx.c99
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/x86/yuv2rgb_template.c545
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/yuv2rgb.c728
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libswscale/yuv2rgb_template.c538
20 files changed, 17484 insertions, 0 deletions
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/asmalign.h b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/asmalign.h
new file mode 100644
index 000000000..cb2856aa4
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/asmalign.h
@@ -0,0 +1,7 @@
+#ifdef SYS_DARWIN
+#define ASMALIGN8 ".align 3\n\t"
+#define ASMALIGN16 ".align 4\n\t"
+#else
+#define ASMALIGN8 ".balign 8\n\t"
+#define ASMALIGN16 ".balign 16\n\t"
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/config.h b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/config.h
new file mode 100644
index 000000000..fad24058f
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/config.h
@@ -0,0 +1,66 @@
+#ifdef __GNUC__
+ #define HAVE_MMX 1
+ #define HAVE_MMX2 1
+ #define HAVE_SSE 1
+ #define HAVE_SSE2 1
+ #define HAVE_SSSE3 1
+ #define HAVE_AMD3DNOW 1
+ #define HAVE_AMD3DNOWEX 1
+
+ #define ARCH_X86 1
+ #ifdef ARCH_X86_64
+ #define HAVE_FAST_64BIT 1
+ #define HAVE_CMOV 1
+ #define HAVE_FAST_CMOV 1
+ #else
+ #define ARCH_X86_32 1
+ #endif
+#else
+ #define HAVE_INLINE_ASM 0
+ #define HAVE_MMX 0
+ #define HAVE_MMX2 0
+ #define HAVE_SSE 0
+ #define HAVE_SSE2 0
+ #define HAVE_SSSE3 0
+ #define HAVE_AMD3DNOW 0
+ #define HAVE_AMD3DNOWEXT 0
+ #define ARCH_X86 0
+ #define ARCH_X86_32 0
+ #define ARCH_X86_64 0
+ #define HAVE_FAST_64BIT 0
+ #define HAVE_CMOV 0
+ #define HAVE_FAST_CMOV 0
+#endif
+
+#define HAVE_TEN_OPERANDS 1
+#define HAVE_EBP_AVAILABLE 1
+#define HAVE_EBX_AVAILABLE 1
+#define NAMED_ASM_ARGS 1
+
+#define HAVE_ALLOCA_H 1
+#define HAVE_BSWAP 1
+#define HAVE_MALLOC_H 1
+#define HAVE_MEMALIGN 1
+#define HAVE_THREADS 1
+#define RUNTIME_CPUDETECT 1
+#define USE_FASTMEMCPY 1
+#define CONFIG_SWSCALE_ALPHA 1
+
+#define ASMALIGN(ZEROBITS) ".align 1<<" #ZEROBITS "\n\t"
+
+#define CONFIG_GPL 1
+
+/* Toggles debugging informations */
+#undef MP_DEBUG
+
+#ifndef __GNUC__
+ #define inline __inline
+ #ifndef __attribute__
+ #define __attribute__(x) /**/
+ #endif
+ #pragma warning (disable:4002)
+ #include <malloc.h>
+ #define memalign(a,b) _aligned_malloc(b,a)
+#else
+ #define memalign(a,b) __mingw_aligned_malloc(b,a)
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/isP4HT.c b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/isP4HT.c
new file mode 100644
index 000000000..7a1a9dff9
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/isP4HT.c
@@ -0,0 +1,136 @@
+// Included from swscale.c, if HAVE_THREADS is defined.
+
+//#include <string.h>
+//#include "config.h"
+
+// int IsCPUID() call to see if CPUID is supported
+int IsCPUID(void)
+{
+#ifdef WIN64
+ return 0;
+#else
+ long result;
+ result= 1;
+#if ARCH_X86 // GCC
+ asm volatile(
+ "pushf \n\t"
+ "pop %%edx \n\t"
+ "mov %%edx,%%ecx \n\t"
+ "xor $0x200000,%%edx \n\t"
+ "push %%edx \n\t"
+ "popf \n\t"
+ "pushf \n\t"
+ "pop %%edx \n\t"
+ "cmp %%ecx,%%edx \n\t"
+ "jnz 1f \n\t"
+ "mov $0,%0 \n\t"
+ "1: \n\t"
+
+ : "+a" (result)
+ :: "%ecx","%edx"
+ );
+#endif
+ return result;
+#endif
+}
+
+int isP4HT_I(int family, int model, const char *v_name) // return true if P4HT, P4EE or P4
+{
+ if (!strncmp("GenuineIntel", v_name, 12) && family==0x0f && model<=3)
+ return 1;
+ return 0;
+}
+
+int isP4HT (void)
+{
+#ifdef WIN64
+ return 0;
+#else
+ long dwStandard = 0;
+ long dwBrandIndex = 0;
+ long dwFeature = 0;
+ long dwMax = 0;
+ long dwExt = 0;
+ union {
+ char cBuf[12+1];
+ struct {
+ long dw0;
+ long dw1;
+ long dw2;
+ } s;
+ } Ident;
+ char v_name[13]; // vendor name
+ int family; // family of the processor
+ // e.g. 6 = Pentium-Pro architecture
+ int model; // model of processor
+ // e.g. 1 = Pentium-Pro for family = 6
+ int stepping; // processor revision number
+
+ if (!IsCPUID())
+ return 0;
+
+#if ARCH_X86 // GCC
+ asm volatile(
+ // get the vendor string
+ "xor %%eax, %%eax \n\t"
+ "cpuid \n\t"
+ "mov %%eax, %0 \n\t"
+ "mov %%ebx, %1 \n\t"
+ "mov %%edx, %2 \n\t"
+ "mov %%ecx, %3 \n\t"
+
+ // get the Standard bits
+ "mov $1, %%eax \n\t"
+ "cpuid \n\t"
+ "mov %%eax, %4 \n\t"
+ "mov %%ebx, %5 \n\t"
+ "mov %%edx, %6 \n\t"
+
+ : "=m" (dwMax), "=m" (Ident.s.dw0), "=m" (Ident.s.dw1), "=m" (Ident.s.dw2), "=m" (dwStandard), "=m" (dwBrandIndex), "=m" (dwFeature)
+ :: "%eax","%ebx","%ecx","%edx"
+ );
+#ifndef _WIN64
+#elif defined(_WIN32)
+ _asm {
+ push ebx
+ push ecx
+ push edx
+
+ // get the vendor string
+ xor eax, eax
+ cpuid
+ mov dwMax, eax
+ mov Ident.s.dw0, ebx
+ mov Ident.s.dw1, edx
+ mov Ident.s.dw2, ecx
+
+ // get the Standard bits
+ mov eax, 1
+ cpuid
+ mov dwStandard, eax
+ mov dwBrandIndex, ebx
+ mov dwFeature, edx
+
+ pop ecx
+ pop ebx
+ pop edx
+ }
+#else
+#endif
+ return 0;
+#endif
+
+ family = (dwStandard >> 8) & 0xF; // retrieve family
+ model = (dwStandard >> 4) & 0xF; // retrieve model
+ stepping = (dwStandard) & 0xF; // retrieve stepping
+ Ident.cBuf[12] = 0;
+ strcpy(v_name, Ident.cBuf);
+ if((dwFeature & 0x8000000)==0)
+ return 0;
+
+ if(((dwBrandIndex & 0xff0000) >> 16)==1)
+ return 0;
+
+ return isP4HT_I(family, model, v_name);
+#endif
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/libswscalew32thread.c b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/libswscalew32thread.c
new file mode 100644
index 000000000..48223cc75
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/libswscalew32thread.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Customized for swscaler by Haruhiko Yamagata
+ */
+
+//#define DEBUG
+
+#include <inttypes.h>
+#include "config.h"
+#include "../libavutil/internal.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <process.h>
+
+typedef struct SwsThreadContext{
+ SwsContext *swsctx;
+ HANDLE thread;
+ HANDLE work_sem;
+ HANDLE done_sem;
+ int (*func)(SwsContext *c);
+ void *arg;
+ int ret;
+}SwsThreadContext;
+
+
+static unsigned __stdcall sws_thread_func(void *v){
+ SwsThreadContext *c= v;
+
+ for(;;){
+//printf("sws_thread_func %X enter wait\n", (int)v); fflush(stdout);
+ WaitForSingleObject(c->work_sem, INFINITE);
+//printf("sws_thread_func %X after wait (func=%X)\n", (int)v, (int)c->func); fflush(stdout);
+ if(c->func)
+ c->ret= c->func(c->swsctx);
+ else
+ return 0;
+//printf("sws_thread_func %X signal complete\n", (int)v); fflush(stdout);
+ ReleaseSemaphore(c->done_sem, 1, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * Free what has been allocated by sws_thread_init().
+ * Must be called after decoding has finished, especially do not call while sws_thread_execute() is running
+ */
+void sws_thread_free(SwsContext *s){
+ SwsThreadContext *c= s->thread_opaque;
+ int i;
+
+ for(i=0; i<s->thread_count; i++){
+
+ c[i].func= NULL;
+ ReleaseSemaphore(c[i].work_sem, 1, 0);
+ WaitForSingleObject(c[i].thread, INFINITE);
+ if(c[i].work_sem) CloseHandle(c[i].work_sem);
+ if(c[i].done_sem) CloseHandle(c[i].done_sem);
+ if(c[i].thread) CloseHandle(c[i].thread);
+ }
+
+ av_freep(&s->thread_opaque);
+}
+
+int sws_thread_execute(SwsContext *s, int (*func)(SwsContext *c2), int *ret, int count){ //CUSTOMIZED no void **arg
+ SwsThreadContext *c= s->thread_opaque;
+ int i;
+
+ assert(s == c->swsctx);
+ assert(count <= s->thread_count);
+
+ /* note, we can be certain that this is not called with the same SwsContext by different threads at the same time */
+
+ for(i=0; i<count; i++){
+ c[i].arg = &s[i].stp;//CUSTOMIZED
+ c[i].func= func;
+ c[i].ret = 12345;
+
+ ReleaseSemaphore(c[i].work_sem, 1, 0);
+ }
+ for(i=0; i<count; i++){
+ WaitForSingleObject(c[i].done_sem, INFINITE);
+
+ c[i].func= NULL;
+ if(ret) ret[i]= c[i].ret;
+ }
+ return 0;
+}
+
+int sws_thread_init(SwsContext *s, int thread_count){
+ int i;
+ SwsThreadContext *c;
+ uint32_t threadid;
+
+ s->thread_count= thread_count;
+
+ assert(!s->thread_opaque);
+ c= av_mallocz(sizeof(SwsThreadContext)*thread_count);
+ s->thread_opaque= c;
+
+ for(i=0; i<thread_count; i++){
+//printf("init semaphors %d\n", i); fflush(stdout);
+ c[i].swsctx= &s[i]; //CUSTOMIZED
+
+ if(!(c[i].work_sem = CreateSemaphore(NULL, 0, s->thread_count, NULL)))
+ goto fail;
+ if(!(c[i].done_sem = CreateSemaphore(NULL, 0, s->thread_count, NULL)))
+ goto fail;
+
+//printf("create thread %d\n", i); fflush(stdout);
+ c[i].thread = (HANDLE)_beginthreadex(NULL, 0, sws_thread_func, &c[i], 0, &threadid );
+ if( !c[i].thread ) goto fail;
+ }
+//printf("init done\n"); fflush(stdout);
+
+ s->execute= sws_thread_execute;
+
+ return 0;
+fail:
+ sws_thread_free(s);
+ return -1;
+}
+
+int GetCPUCount(void){
+ int CPUCount;
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ if(isP4HT() &&si.dwNumberOfProcessors>=2)
+ CPUCount = si.dwNumberOfProcessors>>1;
+ else
+ CPUCount= si.dwNumberOfProcessors;
+ return CPUCount;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocFilters.h b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocFilters.h
new file mode 100644
index 000000000..dfa61d4fb
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocFilters.h
@@ -0,0 +1,41 @@
+#ifndef _POSTPROCFILTERS_H_
+#define _POSTPROCFILTERS_H_
+
+#define V_DEBLOCK 0x01
+#define H_DEBLOCK 0x02
+#define DERING 0x04
+#define LEVEL_FIX 0x08 ///< Brightness & Contrast
+
+#define LUM_V_DEBLOCK V_DEBLOCK // 1
+#define LUM_H_DEBLOCK H_DEBLOCK // 2
+#define CHROM_V_DEBLOCK (V_DEBLOCK<<4) // 16
+#define CHROM_H_DEBLOCK (H_DEBLOCK<<4) // 32
+#define LUM_DERING DERING // 4
+#define CHROM_DERING (DERING<<4) // 64
+#define LUM_LEVEL_FIX LEVEL_FIX // 8
+#define CHROM_LEVEL_FIX (LEVEL_FIX<<4) // 128 (not implemented yet)
+
+// Experimental vertical filters
+#define V_X1_FILTER 0x0
+#define V_A_DEBLOCK 0x2000000
+
+// Experimental horizontal filters
+#define H_X1_FILTER 0x0
+#define H_A_DEBLOCK 0x4000000
+
+/// select between full y range (255-0) or standart one (234-16)
+#define FULL_Y_RANGE 0x8000 // 32768
+
+//Deinterlacing Filters
+#define LINEAR_IPOL_DEINT_FILTER 0x10000 // 65536
+#define LINEAR_BLEND_DEINT_FILTER 0x20000 // 131072
+#define CUBIC_BLEND_DEINT_FILTER 0x8000 // (not implemented yet)
+#define CUBIC_IPOL_DEINT_FILTER 0x40000 // 262144
+#define MEDIAN_DEINT_FILTER 0x80000 // 524288
+#define FFMPEG_DEINT_FILTER 0x400000
+#define LOWPASS5_DEINT_FILTER 0x800000
+
+#define TEMP_NOISE_FILTER 0x100000
+#define FORCE_QUANT 0x200000
+
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess.c b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess.c
new file mode 100644
index 000000000..a7c7a3696
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess.c
@@ -0,0 +1,860 @@
+/*
+ Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
+
+ AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+/**
+ * @file postprocess.c
+ * postprocessing.
+ */
+
+/*
+ C MMX MMX2 3DNow AltiVec
+isVertDC Ec Ec Ec
+isVertMinMaxOk Ec Ec Ec
+doVertLowPass E e e Ec
+doVertDefFilter Ec Ec e e Ec
+isHorizDC Ec Ec Ec
+isHorizMinMaxOk a E Ec
+doHorizLowPass E e e Ec
+doHorizDefFilter Ec Ec e e Ec
+do_a_deblock Ec E Ec E
+deRing E e e* Ecp
+Vertical RKAlgo1 E a a
+Horizontal RKAlgo1 a a
+Vertical X1# a E E
+Horizontal X1# a E E
+LinIpolDeinterlace e E E*
+CubicIpolDeinterlace a e e*
+LinBlendDeinterlace e E E*
+MedianDeinterlace# E Ec Ec
+TempDeNoiser# E e e Ec
+
+* i dont have a 3dnow CPU -> its untested, but noone said it doesnt work so it seems to work
+# more or less selfinvented filters so the exactness isnt too meaningfull
+E = Exact implementation
+e = allmost exact implementation (slightly different rounding,...)
+a = alternative / approximate impl
+c = checked against the other implementations (-vo md5)
+p = partially optimized, still some work to do
+*/
+
+/*
+TODO:
+reduce the time wasted on the mem transfer
+unroll stuff if instructions depend too much on the prior one
+move YScale thing to the end instead of fixing QP
+write a faster and higher quality deblocking filter :)
+make the mainloop more flexible (variable number of blocks at once
+ (the if/else stuff per block is slowing things down)
+compare the quality & speed of all filters
+split this huge file
+optimize c versions
+try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
+...
+*/
+
+//Changelog: use the CVS log
+
+#include "config.h"
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+//#undef HAVE_MMX2
+//#define HAVE_AMD3DNOW
+//#undef HAVE_MMX
+//#undef ARCH_X86
+//#define DEBUG_BRIGHTNESS
+#include "../libavutil/internal.h"
+#ifdef USE_FASTMEMCPY
+#include "fastmemcpy.h"
+#endif
+#include "postprocess.h"
+#include "postprocess_internal.h"
+#include "cpudetect.h"
+
+#ifdef HAVE_ALTIVEC_H
+#include <altivec.h>
+#endif
+
+#ifndef HAVE_MEMALIGN
+#define memalign(a,b) av_malloc(b)
+#endif
+
+#define MIN(a,b) ((a) > (b) ? (b) : (a))
+#define MAX(a,b) ((a) < (b) ? (b) : (a))
+#define ABS(a) ((a) > 0 ? (a) : (-(a)))
+#define SIGN(a) ((a) > 0 ? 1 : -1)
+
+#define GET_MODE_BUFFER_SIZE 500
+#define OPTIONS_ARRAY_SIZE 10
+#define BLOCK_SIZE 8
+#define TEMP_STRIDE 8
+//#define NUM_BLOCKS_AT_ONCE 16 //not used yet
+
+#if ARCH_X86_32 || ARCH_X86_64
+static uint64_t __attribute__((aligned(8))) attribute_used w05= 0x0005000500050005LL;
+static uint64_t __attribute__((aligned(8))) attribute_used w04= 0x0004000400040004LL;
+static uint64_t __attribute__((aligned(8))) attribute_used w20= 0x0020002000200020LL;
+static uint64_t __attribute__((aligned(8))) attribute_used b00= 0x0000000000000000LL;
+static uint64_t __attribute__((aligned(8))) attribute_used b01= 0x0101010101010101LL;
+static uint64_t __attribute__((aligned(8))) attribute_used b02= 0x0202020202020202LL;
+static uint64_t __attribute__((aligned(8))) attribute_used b08= 0x0808080808080808LL;
+static uint64_t __attribute__((aligned(8))) attribute_used b80= 0x8080808080808080LL;
+#endif
+
+static uint8_t clip_table[3*256];
+static uint8_t * const clip_tab= clip_table + 256;
+
+static const int verbose= 0;
+
+static const int attribute_used deringThreshold= 20;
+
+
+
+
+#if ARCH_X86_32 || ARCH_X86_64
+static inline void prefetchnta(void *p)
+{
+ asm volatile( "prefetchnta (%0)\n\t"
+ : : "r" (p)
+ );
+}
+
+static inline void prefetcht0(void *p)
+{
+ asm volatile( "prefetcht0 (%0)\n\t"
+ : : "r" (p)
+ );
+}
+
+static inline void prefetcht1(void *p)
+{
+ asm volatile( "prefetcht1 (%0)\n\t"
+ : : "r" (p)
+ );
+}
+
+static inline void prefetcht2(void *p)
+{
+ asm volatile( "prefetcht2 (%0)\n\t"
+ : : "r" (p)
+ );
+}
+#endif
+
+// The horizontal Functions exist only in C cuz the MMX code is faster with vertical filters and transposing
+
+/**
+ * Check if the given 8x8 Block is mostly "flat"
+ */
+static inline int isHorizDC_C(uint8_t src[], stride_t stride, PPContext *c)
+{
+ int numEq= 0;
+ int y;
+ const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
+ const int dcThreshold= dcOffset*2 + 1;
+
+ for(y=0; y<BLOCK_SIZE; y++)
+ {
+ if(((unsigned)(src[0] - src[1] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[1] - src[2] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[2] - src[3] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[3] - src[4] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[4] - src[5] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[5] - src[6] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[6] - src[7] + dcOffset)) < dcThreshold) numEq++;
+ src+= stride;
+ }
+ return numEq > c->ppMode.flatnessThreshold;
+}
+
+/**
+ * Check if the middle 8x8 Block in the given 8x16 block is flat
+ */
+static inline int isVertDC_C(uint8_t src[], stride_t stride, PPContext *c){
+ int numEq= 0;
+ int y;
+ const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
+ const int dcThreshold= dcOffset*2 + 1;
+
+ src+= stride*4; // src points to begin of the 8x8 Block
+ for(y=0; y<BLOCK_SIZE-1; y++)
+ {
+ if(((unsigned)(src[0] - src[0+stride] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[1] - src[1+stride] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[2] - src[2+stride] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[3] - src[3+stride] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[4] - src[4+stride] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[5] - src[5+stride] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[6] - src[6+stride] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[7] - src[7+stride] + dcOffset)) < dcThreshold) numEq++;
+ src+= stride;
+ }
+ return numEq > c->ppMode.flatnessThreshold;
+}
+
+static inline int isHorizMinMaxOk_C(uint8_t src[], stride_t stride, int QP)
+{
+ int i;
+#if 1
+ for(i=0; i<2; i++){
+ if((unsigned)(src[0] - src[5] + 2*QP) > 4*QP) return 0;
+ src += stride;
+ if((unsigned)(src[2] - src[7] + 2*QP) > 4*QP) return 0;
+ src += stride;
+ if((unsigned)(src[4] - src[1] + 2*QP) > 4*QP) return 0;
+ src += stride;
+ if((unsigned)(src[6] - src[3] + 2*QP) > 4*QP) return 0;
+ src += stride;
+ }
+#else
+ for(i=0; i<8; i++){
+ if((unsigned)(src[0] - src[7] + 2*QP) > 4*QP) return 0;
+ src += stride;
+ }
+#endif
+ return 1;
+}
+
+static inline int isVertMinMaxOk_C(uint8_t src[], stride_t stride, int QP)
+{
+#if 1
+#if 1
+ int x;
+ src+= stride*4;
+ for(x=0; x<BLOCK_SIZE; x+=4)
+ {
+ if((unsigned)(src[ x + 0*stride] - src[ x + 5*stride] + 2*QP) > 4*QP) return 0;
+ if((unsigned)(src[1+x + 2*stride] - src[1+x + 7*stride] + 2*QP) > 4*QP) return 0;
+ if((unsigned)(src[2+x + 4*stride] - src[2+x + 1*stride] + 2*QP) > 4*QP) return 0;
+ if((unsigned)(src[3+x + 6*stride] - src[3+x + 3*stride] + 2*QP) > 4*QP) return 0;
+ }
+#else
+ int x;
+ src+= stride*3;
+ for(x=0; x<BLOCK_SIZE; x++)
+ {
+ if((unsigned)(src[x + stride] - src[x + (stride<<3)] + 2*QP) > 4*QP) return 0;
+ }
+#endif
+ return 1;
+#else
+ int x;
+ src+= stride*4;
+ for(x=0; x<BLOCK_SIZE; x++)
+ {
+ int min=255;
+ int max=0;
+ int y;
+ for(y=0; y<8; y++){
+ int v= src[x + y*stride];
+ if(v>max) max=v;
+ if(v<min) min=v;
+ }
+ if(max-min > 2*QP) return 0;
+ }
+ return 1;
+#endif
+}
+
+static inline int horizClassify_C(uint8_t src[], stride_t stride, PPContext *c){
+ if( isHorizDC_C(src, stride, c) ){
+ if( isHorizMinMaxOk_C(src, stride, c->QP) )
+ return 1;
+ else
+ return 0;
+ }else{
+ return 2;
+ }
+}
+
+static inline int vertClassify_C(uint8_t src[], stride_t stride, PPContext *c){
+ if( isVertDC_C(src, stride, c) ){
+ if( isVertMinMaxOk_C(src, stride, c->QP) )
+ return 1;
+ else
+ return 0;
+ }else{
+ return 2;
+ }
+}
+
+static inline void doHorizDefFilter_C(uint8_t dst[], stride_t stride, PPContext *c)
+{
+ int y;
+ for(y=0; y<BLOCK_SIZE; y++)
+ {
+ const int middleEnergy= 5*(dst[4] - dst[3]) + 2*(dst[2] - dst[5]);
+
+ if(ABS(middleEnergy) < 8*c->QP)
+ {
+ const int q=(dst[3] - dst[4])/2;
+ const int leftEnergy= 5*(dst[2] - dst[1]) + 2*(dst[0] - dst[3]);
+ const int rightEnergy= 5*(dst[6] - dst[5]) + 2*(dst[4] - dst[7]);
+
+ int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
+ d= MAX(d, 0);
+
+ d= (5*d + 32) >> 6;
+ d*= SIGN(-middleEnergy);
+
+ if(q>0)
+ {
+ d= d<0 ? 0 : d;
+ d= d>q ? q : d;
+ }
+ else
+ {
+ d= d>0 ? 0 : d;
+ d= d<q ? q : d;
+ }
+
+ dst[3]-= d;
+ dst[4]+= d;
+ }
+ dst+= stride;
+ }
+}
+
+/**
+ * Do a horizontal low pass filter on the 10x8 block (dst points to middle 8x8 Block)
+ * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16 (C version)
+ */
+static inline void doHorizLowPass_C(uint8_t dst[], stride_t stride, PPContext *c)
+{
+ int y;
+ for(y=0; y<BLOCK_SIZE; y++)
+ {
+ const int first= ABS(dst[-1] - dst[0]) < c->QP ? dst[-1] : dst[0];
+ const int last= ABS(dst[8] - dst[7]) < c->QP ? dst[8] : dst[7];
+
+ int sums[10];
+ sums[0] = 4*first + dst[0] + dst[1] + dst[2] + 4;
+ sums[1] = sums[0] - first + dst[3];
+ sums[2] = sums[1] - first + dst[4];
+ sums[3] = sums[2] - first + dst[5];
+ sums[4] = sums[3] - first + dst[6];
+ sums[5] = sums[4] - dst[0] + dst[7];
+ sums[6] = sums[5] - dst[1] + last;
+ sums[7] = sums[6] - dst[2] + last;
+ sums[8] = sums[7] - dst[3] + last;
+ sums[9] = sums[8] - dst[4] + last;
+
+ dst[0]= (sums[0] + sums[2] + 2*dst[0])>>4;
+ dst[1]= (sums[1] + sums[3] + 2*dst[1])>>4;
+ dst[2]= (sums[2] + sums[4] + 2*dst[2])>>4;
+ dst[3]= (sums[3] + sums[5] + 2*dst[3])>>4;
+ dst[4]= (sums[4] + sums[6] + 2*dst[4])>>4;
+ dst[5]= (sums[5] + sums[7] + 2*dst[5])>>4;
+ dst[6]= (sums[6] + sums[8] + 2*dst[6])>>4;
+ dst[7]= (sums[7] + sums[9] + 2*dst[7])>>4;
+
+ dst+= stride;
+ }
+}
+
+/**
+ * Experimental Filter 1 (Horizontal)
+ * will not damage linear gradients
+ * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
+ * can only smooth blocks at the expected locations (it cant smooth them if they did move)
+ * MMX2 version does correct clipping C version doesnt
+ * not identical with the vertical one
+ */
+static inline void horizX1Filter(uint8_t *src, stride_t stride, int QP)
+{
+ int y;
+ static uint64_t *lut= NULL;
+ if(lut==NULL)
+ {
+ int i;
+ lut= (uint64_t*)memalign(8, 256*8);
+ for(i=0; i<256; i++)
+ {
+ int v= i < 128 ? 2*i : 2*(i-256);
+/*
+//Simulate 112242211 9-Tap filter
+ uint64_t a= (v/16) & 0xFF;
+ uint64_t b= (v/8) & 0xFF;
+ uint64_t c= (v/4) & 0xFF;
+ uint64_t d= (3*v/8) & 0xFF;
+*/
+//Simulate piecewise linear interpolation
+ uint64_t a= (v/16) & 0xFF;
+ uint64_t b= (v*3/16) & 0xFF;
+ uint64_t c= (v*5/16) & 0xFF;
+ uint64_t d= (7*v/16) & 0xFF;
+ uint64_t A= (0x100 - a)&0xFF;
+ uint64_t B= (0x100 - b)&0xFF;
+ uint64_t C= (0x100 - c)&0xFF;
+ uint64_t D= (0x100 - c)&0xFF;
+
+ lut[i] = (a<<56) | (b<<48) | (c<<40) | (d<<32) |
+ (D<<24) | (C<<16) | (B<<8) | (A);
+ //lut[i] = (v<<32) | (v<<24);
+ }
+ }
+
+ for(y=0; y<BLOCK_SIZE; y++)
+ {
+ int a= src[1] - src[2];
+ int b= src[3] - src[4];
+ int c= src[5] - src[6];
+
+ int d= MAX(ABS(b) - (ABS(a) + ABS(c))/2, 0);
+
+ if(d < QP)
+ {
+ int v = d * SIGN(-b);
+
+ src[1] +=v/8;
+ src[2] +=v/4;
+ src[3] +=3*v/8;
+ src[4] -=3*v/8;
+ src[5] -=v/4;
+ src[6] -=v/8;
+
+ }
+ src+=stride;
+ }
+}
+
+/**
+ * accurate deblock filter
+ */
+static av_always_inline void do_a_deblock_C(uint8_t *src, stride_t step, stride_t stride, PPContext *c){
+ int y;
+ const int QP= c->QP;
+ const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
+ const int dcThreshold= dcOffset*2 + 1;
+//START_TIMER
+ src+= step*4; // src points to begin of the 8x8 Block
+ for(y=0; y<8; y++){
+ int numEq= 0;
+
+ if(((unsigned)(src[-1*step] - src[0*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 0*step] - src[1*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 1*step] - src[2*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 2*step] - src[3*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 3*step] - src[4*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 4*step] - src[5*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 5*step] - src[6*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 6*step] - src[7*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 7*step] - src[8*step] + dcOffset)) < dcThreshold) numEq++;
+ if(numEq > c->ppMode.flatnessThreshold){
+ int min, max, x;
+
+ if(src[0] > src[step]){
+ max= src[0];
+ min= src[step];
+ }else{
+ max= src[step];
+ min= src[0];
+ }
+ for(x=2; x<8; x+=2){
+ if(src[x*step] > src[(x+1)*step]){
+ if(src[x *step] > max) max= src[ x *step];
+ if(src[(x+1)*step] < min) min= src[(x+1)*step];
+ }else{
+ if(src[(x+1)*step] > max) max= src[(x+1)*step];
+ if(src[ x *step] < min) min= src[ x *step];
+ }
+ }
+ if(max-min < 2*QP){
+ const int first= ABS(src[-1*step] - src[0]) < QP ? src[-1*step] : src[0];
+ const int last= ABS(src[8*step] - src[7*step]) < QP ? src[8*step] : src[7*step];
+
+ int sums[10];
+ sums[0] = 4*first + src[0*step] + src[1*step] + src[2*step] + 4;
+ sums[1] = sums[0] - first + src[3*step];
+ sums[2] = sums[1] - first + src[4*step];
+ sums[3] = sums[2] - first + src[5*step];
+ sums[4] = sums[3] - first + src[6*step];
+ sums[5] = sums[4] - src[0*step] + src[7*step];
+ sums[6] = sums[5] - src[1*step] + last;
+ sums[7] = sums[6] - src[2*step] + last;
+ sums[8] = sums[7] - src[3*step] + last;
+ sums[9] = sums[8] - src[4*step] + last;
+
+ src[0*step]= (sums[0] + sums[2] + 2*src[0*step])>>4;
+ src[1*step]= (sums[1] + sums[3] + 2*src[1*step])>>4;
+ src[2*step]= (sums[2] + sums[4] + 2*src[2*step])>>4;
+ src[3*step]= (sums[3] + sums[5] + 2*src[3*step])>>4;
+ src[4*step]= (sums[4] + sums[6] + 2*src[4*step])>>4;
+ src[5*step]= (sums[5] + sums[7] + 2*src[5*step])>>4;
+ src[6*step]= (sums[6] + sums[8] + 2*src[6*step])>>4;
+ src[7*step]= (sums[7] + sums[9] + 2*src[7*step])>>4;
+ }
+ }else{
+ const int middleEnergy= 5*(src[4*step] - src[3*step]) + 2*(src[2*step] - src[5*step]);
+
+ if(ABS(middleEnergy) < 8*QP)
+ {
+ const int q=(src[3*step] - src[4*step])/2;
+ const int leftEnergy= 5*(src[2*step] - src[1*step]) + 2*(src[0*step] - src[3*step]);
+ const int rightEnergy= 5*(src[6*step] - src[5*step]) + 2*(src[4*step] - src[7*step]);
+
+ int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
+ d= MAX(d, 0);
+
+ d= (5*d + 32) >> 6;
+ d*= SIGN(-middleEnergy);
+
+ if(q>0)
+ {
+ d= d<0 ? 0 : d;
+ d= d>q ? q : d;
+ }
+ else
+ {
+ d= d>0 ? 0 : d;
+ d= d<q ? q : d;
+ }
+
+ src[3*step]-= d;
+ src[4*step]+= d;
+ }
+ }
+
+ src += stride;
+ }
+/*if(step==16){
+ STOP_TIMER("step16")
+}else{
+ STOP_TIMER("stepX")
+}*/
+}
+
+//Note: we have C, MMX, MMX2, 3DNOW version there is no 3DNOW+MMX2 one
+//Plain C versions
+#if !HAVE_MMX || defined (RUNTIME_CPUDETECT)
+#define COMPILE_C
+#endif
+
+#ifdef ARCH_POWERPC
+#ifdef HAVE_ALTIVEC
+#define COMPILE_ALTIVEC
+#endif //HAVE_ALTIVEC
+#endif //ARCH_POWERPC
+
+#if ARCH_X86_32 || ARCH_X86_64
+
+#if (HAVE_MMX && !HAVE_AMD3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
+#define COMPILE_MMX
+#endif
+
+#if HAVE_MMX2 || defined (RUNTIME_CPUDETECT)
+#define COMPILE_MMX2
+#endif
+
+#if (HAVE_AMD3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
+#define COMPILE_3DNOW
+#endif
+#endif //ARCH_X86
+
+#undef HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#undef HAVE_ALTIVEC
+
+#ifdef COMPILE_C
+#undef HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#define RENAME(a) a ## _C
+#include "postprocess_template.c"
+#endif
+
+#ifdef ARCH_POWERPC
+#ifdef COMPILE_ALTIVEC
+#undef RENAME
+#define HAVE_ALTIVEC
+#define RENAME(a) a ## _altivec
+#include "postprocess_altivec_template.c"
+#include "postprocess_template.c"
+#endif
+#endif //ARCH_POWERPC
+
+//MMX versions
+#ifdef COMPILE_MMX
+#undef RENAME
+#define HAVE_MMX 1
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#define RENAME(a) a ## _MMX
+#include "postprocess_template.c"
+#endif
+
+//MMX2 versions
+#ifdef COMPILE_MMX2
+#undef RENAME
+#define HAVE_MMX 1
+#define HAVE_MMX2 1
+#undef HAVE_AMD3DNOW
+#define RENAME(a) a ## _MMX2
+#include "postprocess_template.c"
+#endif
+
+//3DNOW versions
+#ifdef COMPILE_3DNOW
+#undef RENAME
+#define HAVE_MMX 1
+#undef HAVE_MMX2
+#define HAVE_AMD3DNOW 1
+#define RENAME(a) a ## _3DNow
+#include "postprocess_template.c"
+#endif
+
+// minor note: the HAVE_xyz is messed up after that line so dont use it
+
+static inline void postProcess(uint8_t src[], stride_t srcStride, uint8_t dst[], stride_t dstStride, int width, int height,
+ QP_STORE_T QPs[], int QPStride, int isColor, pp_mode_t *vm, pp_context_t *vc)
+{
+ PPContext *c= (PPContext *)vc;
+ PPMode *ppMode= (PPMode *)vm;
+ c->ppMode= *ppMode; //FIXME
+
+ // useing ifs here as they are faster than function pointers allthough the
+ // difference wouldnt be messureable here but its much better because
+ // someone might exchange the cpu whithout restarting mplayer ;)
+#ifdef RUNTIME_CPUDETECT
+#if ARCH_X86_32 || ARCH_X86_64
+ // ordered per speed fasterst first
+ if(c->cpuCaps & PP_CPU_CAPS_MMX2)
+ postProcess_MMX2(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
+ else if(c->cpuCaps & PP_CPU_CAPS_3DNOW)
+ postProcess_3DNow(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
+ else if(c->cpuCaps & PP_CPU_CAPS_MMX)
+ postProcess_MMX(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
+ else
+ postProcess_C(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
+#else
+#ifdef ARCH_POWERPC
+#ifdef HAVE_ALTIVEC
+ if(c->cpuCaps & PP_CPU_CAPS_ALTIVEC)
+ postProcess_altivec(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
+ else
+#endif
+#endif
+ postProcess_C(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
+#endif
+#else //RUNTIME_CPUDETECT
+#if HAVE_MMX2
+ postProcess_MMX2(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
+#elif HAVE_AMD3DNOW
+ postProcess_3DNow(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
+#elif HAVE_MMX
+ postProcess_MMX(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
+#elif defined (HAVE_ALTIVEC)
+ postProcess_altivec(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
+#else
+ postProcess_C(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
+#endif
+#endif //!RUNTIME_CPUDETECT
+}
+
+static void reallocAlign(void **p, int alignment, stride_t size){
+ av_free(*p);
+ *p= memalign(alignment, size);
+ memset(*p, 0, size);
+}
+
+static void reallocBuffers(PPContext *c, int width, int height, stride_t stride, int qpStride){
+ int mbWidth = (width+15)>>4;
+ int mbHeight= (height+15)>>4;
+ int i;
+
+ c->stride= stride;
+ c->qpStride= qpStride;
+
+ reallocAlign((void **)&c->tempDst, 8, stride*24*2);
+ reallocAlign((void **)&c->tempSrc, 8, stride*24*2);
+ reallocAlign((void **)&c->tempBlocks, 8, 2*16*8);
+ reallocAlign((void **)&c->yHistogram, 8, 256*sizeof(uint64_t));
+ for(i=0; i<256; i++)
+ c->yHistogram[i]= width*height/64*15/256;
+
+ for(i=0; i<3; i++)
+ {
+ //Note:the +17*1024 is just there so i dont have to worry about r/w over te end
+ reallocAlign((void **)&c->tempBlured[i], 8, stride*mbHeight*16 + 17*1024+256);
+ reallocAlign((void **)&c->tempBluredPast[i], 8, 256*((height+7)&(~7))/2 + 17*1024+256);//FIXME size
+ }
+
+ reallocAlign((void **)&c->deintTemp, 8, 2*width+32);
+ reallocAlign((void **)&c->nonBQPTable, 8, qpStride*mbHeight*sizeof(QP_STORE_T));
+ reallocAlign((void **)&c->stdQPTable, 8, qpStride*mbHeight*sizeof(QP_STORE_T));
+ reallocAlign((void **)&c->forcedQPTable, 8, mbWidth*sizeof(QP_STORE_T));
+}
+
+static void global_init(void){
+ int i;
+ memset(clip_table, 0, 256);
+ for(i=256; i<512; i++)
+ clip_table[i]= i;
+ memset(clip_table+512, 0, 256);
+}
+
+pp_context_t *pp_get_context(int width, int height, int cpuCaps){
+ PPContext *c= memalign(32, sizeof(PPContext));
+ int stride= (width+15)&(~15); //assumed / will realloc if needed
+ int qpStride= (width+15)/16 + 2; //assumed / will realloc if needed
+
+ global_init();
+
+ memset(c, 0, sizeof(PPContext));
+ c->cpuCaps= cpuCaps;
+ if(cpuCaps&PP_FORMAT){
+ c->hChromaSubSample= cpuCaps&0x3;
+ c->vChromaSubSample= (cpuCaps>>4)&0x3;
+ }else{
+ c->hChromaSubSample= 1;
+ c->vChromaSubSample= 1;
+ }
+
+ reallocBuffers(c, width, height, stride, qpStride);
+
+ c->frameNum=-1;
+
+ return c;
+}
+
+void pp_free_context(void *vc){
+ PPContext *c = (PPContext*)vc;
+ int i;
+
+ for(i=0; i<3; i++) av_free(c->tempBlured[i]);
+ for(i=0; i<3; i++) av_free(c->tempBluredPast[i]);
+
+ av_free(c->tempBlocks);
+ av_free(c->yHistogram);
+ av_free(c->tempDst);
+ av_free(c->tempSrc);
+ av_free(c->deintTemp);
+ av_free(c->stdQPTable);
+ av_free(c->nonBQPTable);
+ av_free(c->forcedQPTable);
+
+ memset(c, 0, sizeof(PPContext));
+
+ av_free(c);
+}
+
+void pp_postprocess(uint8_t * src[3], stride_t srcStride[3],
+ uint8_t * dst[3], stride_t dstStride[3],
+ int width, int height,
+ QP_STORE_T *QP_store, int QPStride,
+ pp_mode_t *vm, void *vc, int pict_type)
+{
+ int mbWidth = (width+15)>>4;
+ int mbHeight= (height+15)>>4;
+ PPMode *mode = (PPMode*)vm;
+ PPContext *c = (PPContext*)vc;
+ stride_t minStride= MAX(srcStride[0], dstStride[0]);
+
+ if(c->stride < minStride || c->qpStride < QPStride)
+ reallocBuffers(c, width, height,
+ MAX(minStride, c->stride),
+ MAX(c->qpStride, QPStride));
+
+ if(QP_store==NULL || (mode->lumMode & FORCE_QUANT))
+ {
+ int i;
+ QP_store= c->forcedQPTable;
+ QPStride= 0;
+ if(mode->lumMode & FORCE_QUANT)
+ for(i=0; i<mbWidth; i++) QP_store[i]= mode->forcedQuant;
+ else
+ for(i=0; i<mbWidth; i++) QP_store[i]= 1;
+ }
+//printf("pict_type:%d\n", pict_type);
+
+ if(pict_type & PP_PICT_TYPE_QP2){
+ int i;
+ const int count= mbHeight * QPStride;
+ for(i=0; i<(count>>2); i++){
+ ((uint32_t*)c->stdQPTable)[i] = (((uint32_t*)QP_store)[i]>>1) & 0x7F7F7F7F;
+ }
+ for(i<<=2; i<count; i++){
+ c->stdQPTable[i] = QP_store[i]>>1;
+ }
+ QP_store= c->stdQPTable;
+ }
+
+if(0){
+int x,y;
+for(y=0; y<mbHeight; y++){
+ for(x=0; x<mbWidth; x++){
+ ;//printf("%2d ", QP_store[x + y*QPStride]);
+ }
+ //printf("\n");
+}
+ //printf("\n");
+}
+
+ if((pict_type&7)!=3)
+ {
+ int i;
+ const int count= mbHeight * QPStride;
+ for(i=0; i<(count>>2); i++){
+ ((uint32_t*)c->nonBQPTable)[i] = ((uint32_t*)QP_store)[i] & 0x3F3F3F3F;
+ }
+ for(i<<=2; i<count; i++){
+ c->nonBQPTable[i] = QP_store[i] & 0x3F;
+ }
+ }
+
+ if(verbose>2)
+ {
+ //printf("using npp filters 0x%X/0x%X\n", mode->lumMode, mode->chromMode);
+ }
+
+ postProcess(src[0], srcStride[0], dst[0], dstStride[0],
+ width, height, QP_store, QPStride, 0, mode, c);
+
+ width = (width )>>c->hChromaSubSample;
+ height = (height)>>c->vChromaSubSample;
+
+ if(mode->chromMode)
+ {
+ postProcess(src[1], srcStride[1], dst[1], dstStride[1],
+ width, height, QP_store, QPStride, 1, mode, c);
+ postProcess(src[2], srcStride[2], dst[2], dstStride[2],
+ width, height, QP_store, QPStride, 2, mode, c);
+ }/*
+ else if(srcStride[1] == dstStride[1] && srcStride[1]>0 && srcStride[2] == dstStride[2] && srcStride[2]>0)
+ {
+ memcpy(dst[1], src[1], srcStride[1]*height);
+ memcpy(dst[2], src[2], srcStride[2]*height);
+ }*/
+ else
+ {
+ int y;
+ for(y=0; y<height; y++)
+ {
+ memcpy(&(dst[1][y*dstStride[1]]), &(src[1][y*srcStride[1]]), width);
+ memcpy(&(dst[2][y*dstStride[2]]), &(src[2][y*srcStride[2]]), width);
+ }
+ }
+}
+
+
+
+
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess.h b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess.h
new file mode 100644
index 000000000..277522cf1
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess.h
@@ -0,0 +1,91 @@
+/*
+ Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef NEWPOSTPROCESS_H
+#define NEWPOSTPROCESS_H
+
+/**
+ * @file postprocess.h
+ * @brief
+ * external api for the pp stuff
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PP_QUALITY_MAX 6
+
+#define QP_STORE_T int8_t
+
+#include "postprocFilters.h"
+#define FF_CSP_ONLY
+#include "ffImgfmt.h"
+
+/**
+ * Postprocessng mode.
+ */
+typedef struct PPMode{
+ int lumMode; ///< acivates filters for luminance
+ int chromMode; ///< acivates filters for chrominance
+ int error; ///< non zero on error
+
+ int minAllowedY; ///< for brigtness correction
+ int maxAllowedY; ///< for brihtness correction
+ float maxClippedThreshold; ///< amount of "black" u r willing to loose to get a brightness corrected picture
+
+ int maxTmpNoise[3]; ///< for Temporal Noise Reducing filter (Maximal sum of abs differences)
+
+ int baseDcDiff;
+ int flatnessThreshold;
+
+ int forcedQuant; ///< quantizer if FORCE_QUANT is used
+} PPMode;
+
+typedef void pp_context_t;
+typedef PPMode pp_mode_t;
+
+void pp_postprocess(uint8_t * src[3], stride_t srcStride[3],
+ uint8_t * dst[3], stride_t dstStride[3],
+ int horizontalSize, int verticalSize,
+ QP_STORE_T *QP_store, int QP_stride,
+ pp_mode_t *mode, pp_context_t *ppContext, int pict_type);
+
+
+pp_context_t *pp_get_context(int width, int height, int flags);
+void pp_free_context(pp_context_t *ppContext);
+
+#define PP_CPU_CAPS_MMX 0x80000000
+#define PP_CPU_CAPS_MMX2 0x20000000
+#define PP_CPU_CAPS_3DNOW 0x40000000
+#define PP_CPU_CAPS_ALTIVEC 0x10000000
+
+#define PP_FORMAT 0x00000008
+#define PP_FORMAT_420 (0x00000011|PP_FORMAT)
+#define PP_FORMAT_422 (0x00000001|PP_FORMAT)
+#define PP_FORMAT_411 (0x00000002|PP_FORMAT)
+#define PP_FORMAT_444 (0x00000000|PP_FORMAT)
+#define PP_FORMAT_410 (0x00000022|PP_FORMAT)
+
+#define PP_PICT_TYPE_QP2 0x00000010 ///< MPEG2 style QScale
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess_internal.h b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess_internal.h
new file mode 100644
index 000000000..625e35855
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess_internal.h
@@ -0,0 +1,106 @@
+/*
+ Copyright (C) 2001-2002 Michael Niedermayer (michaelni@gmx.at)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include "postprocFilters.h"
+
+//use if u want a faster postprocessing code
+//cant differentiate between chroma & luma filters (both on or both off)
+//obviosly the -pp option at the commandline has no effect except turning the here selected
+//filters on
+//#define COMPILE_TIME_MODE 0x77
+
+#if 1
+static inline int CLIP(int a){
+ if(a&256) return ((a)>>31)^(-1);
+ else return a;
+}
+//#define CLIP(a) (((a)&256) ? ((a)>>31)^(-1) : (a))
+#elif 0
+#define CLIP(a) clip_tab[a]
+#else
+#define CLIP(a) (a)
+#endif
+/**
+ * Postprocessng filter.
+ */
+struct PPFilter{
+ char *shortName;
+ char *longName;
+ int chromDefault; ///< is chrominance filtering on by default if this filter is manually activated
+ int minLumQuality; ///< minimum quality to turn luminance filtering on
+ int minChromQuality; ///< minimum quality to turn chrominance filtering on
+ int mask; ///< Bitmask to turn this filter on
+};
+
+/**
+ * postprocess context.
+ */
+typedef struct PPContext{
+ uint8_t *tempBlocks; ///<used for the horizontal code
+
+ /**
+ * luma histogram.
+ * we need 64bit here otherwise we'll going to have a problem
+ * after watching a black picture for 5 hours
+ */
+ uint64_t *yHistogram;
+
+ uint64_t __attribute__((aligned(8))) packedYOffset;
+ uint64_t __attribute__((aligned(8))) packedYScale;
+
+ /** Temporal noise reducing buffers */
+ uint8_t *tempBlured[3];
+ int32_t *tempBluredPast[3];
+
+ /** Temporary buffers for handling the last row(s) */
+ uint8_t *tempDst;
+ uint8_t *tempSrc;
+
+ uint8_t *deintTemp;
+
+ uint64_t __attribute__((aligned(8))) pQPb;
+ uint64_t __attribute__((aligned(8))) pQPb2;
+
+ uint64_t __attribute__((aligned(8))) mmxDcOffset[64];
+ uint64_t __attribute__((aligned(8))) mmxDcThreshold[64];
+
+ QP_STORE_T *stdQPTable; ///< used to fix MPEG2 style qscale
+ QP_STORE_T *nonBQPTable;
+ QP_STORE_T *forcedQPTable;
+
+ int QP;
+ int nonBQP;
+
+ int frameNum;
+
+ int cpuCaps;
+
+ int qpStride; ///<size of qp buffers (needed to realloc them if needed)
+ stride_t stride; ///<size of some buffers (needed to realloc them if needed)
+
+ int hChromaSubSample;
+ int vChromaSubSample;
+
+ PPMode ppMode;
+} PPContext;
+
+
+
+
+
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess_template.c b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess_template.c
new file mode 100644
index 000000000..1a5a5e0e7
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/postprocess_template.c
@@ -0,0 +1,3844 @@
+/*
+ Copyright (C) 2001-2002 Michael Niedermayer (michaelni@gmx.at)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+/**
+ * @file postprocess_template.c
+ * mmx/mmx2/3dnow postprocess code.
+ */
+
+
+#if ARCH_X86_64
+# define REGa rax
+# define REGc rcx
+# define REGd rdx
+# define REG_a "rax"
+# define REG_c "rcx"
+# define REG_d "rdx"
+# define REG_SP "rsp"
+# define ALIGN_MASK "$0xFFFFFFFFFFFFFFF8"
+#else
+# define REGa eax
+# define REGc ecx
+# define REGd edx
+# define REG_a "eax"
+# define REG_c "ecx"
+# define REG_d "edx"
+# define REG_SP "esp"
+# define ALIGN_MASK "$0xFFFFFFF8"
+#endif
+
+
+#undef PAVGB
+#undef PMINUB
+#undef PMAXUB
+
+#if HAVE_MMX2
+#define REAL_PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
+#elif HAVE_AMD3DNOW
+#define REAL_PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
+#endif
+#define PAVGB(a,b) REAL_PAVGB(a,b)
+
+#if HAVE_MMX2
+#define PMINUB(a,b,t) "pminub " #a ", " #b " \n\t"
+#elif HAVE_MMX
+#define PMINUB(b,a,t) \
+ "movq " #a ", " #t " \n\t"\
+ "psubusb " #b ", " #t " \n\t"\
+ "psubb " #t ", " #a " \n\t"
+#endif
+
+#if HAVE_MMX2
+#define PMAXUB(a,b) "pmaxub " #a ", " #b " \n\t"
+#elif HAVE_MMX
+#define PMAXUB(a,b) \
+ "psubusb " #a ", " #b " \n\t"\
+ "paddb " #a ", " #b " \n\t"
+#endif
+
+//FIXME? |255-0| = 1 (shouldnt be a problem ...)
+#if HAVE_MMX
+/**
+ * Check if the middle 8x8 Block in the given 8x16 block is flat
+ */
+static inline int RENAME(vertClassify)(uint8_t src[], int stride, PPContext *c){
+ int numEq= 0, dcOk;
+ src+= stride*4; // src points to begin of the 8x8 Block
+asm volatile(
+ "movq %0, %%mm7 \n\t"
+ "movq %1, %%mm6 \n\t"
+ : : "m" (c->mmxDcOffset[c->nonBQP]), "m" (c->mmxDcThreshold[c->nonBQP])
+ );
+
+asm volatile(
+ "lea (%2, %3), %%"REG_a" \n\t"
+// 0 1 2 3 4 5 6 7 8 9
+// %1 eax eax+%2 eax+2%2 %1+4%2 ecx ecx+%2 ecx+2%2 %1+8%2 ecx+4%2
+
+ "movq (%2), %%mm0 \n\t"
+ "movq (%%"REG_a"), %%mm1 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "movq %%mm0, %%mm4 \n\t"
+ PMAXUB(%%mm1, %%mm4)
+ PMINUB(%%mm1, %%mm3, %%mm5)
+ "psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
+ "paddb %%mm7, %%mm0 \n\t"
+ "pcmpgtb %%mm6, %%mm0 \n\t"
+
+ "movq (%%"REG_a",%3), %%mm2 \n\t"
+ PMAXUB(%%mm2, %%mm4)
+ PMINUB(%%mm2, %%mm3, %%mm5)
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "movq (%%"REG_a", %3, 2), %%mm1 \n\t"
+ PMAXUB(%%mm1, %%mm4)
+ PMINUB(%%mm1, %%mm3, %%mm5)
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+
+ "lea (%%"REG_a", %3, 4), %%"REG_a" \n\t"
+
+ "movq (%2, %3, 4), %%mm2 \n\t"
+ PMAXUB(%%mm2, %%mm4)
+ PMINUB(%%mm2, %%mm3, %%mm5)
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "movq (%%"REG_a"), %%mm1 \n\t"
+ PMAXUB(%%mm1, %%mm4)
+ PMINUB(%%mm1, %%mm3, %%mm5)
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+
+ "movq (%%"REG_a", %3), %%mm2 \n\t"
+ PMAXUB(%%mm2, %%mm4)
+ PMINUB(%%mm2, %%mm3, %%mm5)
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "movq (%%"REG_a", %3, 2), %%mm1 \n\t"
+ PMAXUB(%%mm1, %%mm4)
+ PMINUB(%%mm1, %%mm3, %%mm5)
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+ "psubusb %%mm3, %%mm4 \n\t"
+
+ " \n\t"
+#if HAVE_MMX2
+ "pxor %%mm7, %%mm7 \n\t"
+ "psadbw %%mm7, %%mm0 \n\t"
+#else
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlq $16, %%mm0 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlq $32, %%mm0 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+#endif
+ "movq %4, %%mm7 \n\t" // QP,..., QP
+ "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
+ "psubusb %%mm7, %%mm4 \n\t" // Diff <= 2QP -> 0
+ "packssdw %%mm4, %%mm4 \n\t"
+ "movd %%mm0, %0 \n\t"
+ "movd %%mm4, %1 \n\t"
+
+ : "=r" (numEq), "=r" (dcOk)
+ : "r" (src), "r" ((stride_t)stride), "m" (c->pQPb)
+ : "%"REG_a
+ );
+
+ numEq= (-numEq) &0xFF;
+ if(numEq > c->ppMode.flatnessThreshold){
+ if(dcOk) return 0;
+ else return 1;
+ }else{
+ return 2;
+ }
+ }
+#endif
+
+/**
+ * Do a vertical low pass filter on the 8x16 block (only write to the 8x8 block in the middle)
+ * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16
+ */
+#ifndef HAVE_ALTIVEC
+static inline void RENAME(doVertLowPass)(uint8_t *src, stride_t stride, PPContext *c)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ src+= stride*3;
+ asm volatile( //"movv %0 %1 %2\n\t"
+ "movq %2, %%mm0 \n\t" // QP,..., QP
+ "pxor %%mm4, %%mm4 \n\t"
+
+ "movq (%0), %%mm6 \n\t"
+ "movq (%0, %1), %%mm5 \n\t"
+ "movq %%mm5, %%mm1 \n\t"
+ "movq %%mm6, %%mm2 \n\t"
+ "psubusb %%mm6, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
+ "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
+ "pcmpeqb %%mm4, %%mm2 \n\t" // diff <= QP -> FF
+
+ "pand %%mm2, %%mm6 \n\t"
+ "pandn %%mm1, %%mm2 \n\t"
+ "por %%mm2, %%mm6 \n\t"// First Line to Filter
+
+ "movq (%0, %1, 8), %%mm5 \n\t"
+ "lea (%0, %1, 4), %%"REG_a" \n\t"
+ "lea (%0, %1, 8), %%"REG_c" \n\t"
+ "sub %1, %%"REG_c" \n\t"
+ "add %1, %0 \n\t" // %0 points to line 1 not 0
+ "movq (%0, %1, 8), %%mm7 \n\t"
+ "movq %%mm5, %%mm1 \n\t"
+ "movq %%mm7, %%mm2 \n\t"
+ "psubusb %%mm7, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
+ "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
+ "pcmpeqb %%mm4, %%mm2 \n\t" // diff <= QP -> FF
+
+ "pand %%mm2, %%mm7 \n\t"
+ "pandn %%mm1, %%mm2 \n\t"
+ "por %%mm2, %%mm7 \n\t" // First Line to Filter
+
+
+ // 1 2 3 4 5 6 7 8
+ // %0 %0+%1 %0+2%1 eax %0+4%1 eax+2%1 ecx eax+4%1
+ // 6 4 2 2 1 1
+ // 6 4 4 2
+ // 6 8 2
+
+ "movq (%0, %1), %%mm0 \n\t" // 1
+ "movq %%mm0, %%mm1 \n\t" // 1
+ PAVGB(%%mm6, %%mm0) //1 1 /2
+ PAVGB(%%mm6, %%mm0) //3 1 /4
+
+ "movq (%0, %1, 4), %%mm2 \n\t" // 1
+ "movq %%mm2, %%mm5 \n\t" // 1
+ PAVGB((%%REGa), %%mm2) // 11 /2
+ PAVGB((%0, %1, 2), %%mm2) // 211 /4
+ "movq %%mm2, %%mm3 \n\t" // 211 /4
+ "movq (%0), %%mm4 \n\t" // 1
+ PAVGB(%%mm4, %%mm3) // 4 211 /8
+ PAVGB(%%mm0, %%mm3) //642211 /16
+ "movq %%mm3, (%0) \n\t" // X
+ // mm1=2 mm2=3(211) mm4=1 mm5=5 mm6=0 mm7=9
+ "movq %%mm1, %%mm0 \n\t" // 1
+ PAVGB(%%mm6, %%mm0) //1 1 /2
+ "movq %%mm4, %%mm3 \n\t" // 1
+ PAVGB((%0,%1,2), %%mm3) // 1 1 /2
+ PAVGB((%%REGa,%1,2), %%mm5) // 11 /2
+ PAVGB((%%REGa), %%mm5) // 211 /4
+ PAVGB(%%mm5, %%mm3) // 2 2211 /8
+ PAVGB(%%mm0, %%mm3) //4242211 /16
+ "movq %%mm3, (%0,%1) \n\t" // X
+ // mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
+ PAVGB(%%mm4, %%mm6) //11 /2
+ "movq (%%"REG_c"), %%mm0 \n\t" // 1
+ PAVGB((%%REGa, %1, 2), %%mm0) // 11/2
+ "movq %%mm0, %%mm3 \n\t" // 11/2
+ PAVGB(%%mm1, %%mm0) // 2 11/4
+ PAVGB(%%mm6, %%mm0) //222 11/8
+ PAVGB(%%mm2, %%mm0) //22242211/16
+ "movq (%0, %1, 2), %%mm2 \n\t" // 1
+ "movq %%mm0, (%0, %1, 2) \n\t" // X
+ // mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
+ "movq (%%"REG_a", %1, 4), %%mm0 \n\t" // 1
+ PAVGB((%%REGc), %%mm0) // 11 /2
+ PAVGB(%%mm0, %%mm6) //11 11 /4
+ PAVGB(%%mm1, %%mm4) // 11 /2
+ PAVGB(%%mm2, %%mm1) // 11 /2
+ PAVGB(%%mm1, %%mm6) //1122 11 /8
+ PAVGB(%%mm5, %%mm6) //112242211 /16
+ "movq (%%"REG_a"), %%mm5 \n\t" // 1
+ "movq %%mm6, (%%"REG_a") \n\t" // X
+ // mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
+ "movq (%%"REG_a", %1, 4), %%mm6 \n\t" // 1
+ PAVGB(%%mm7, %%mm6) // 11 /2
+ PAVGB(%%mm4, %%mm6) // 11 11 /4
+ PAVGB(%%mm3, %%mm6) // 11 2211 /8
+ PAVGB(%%mm5, %%mm2) // 11 /2
+ "movq (%0, %1, 4), %%mm4 \n\t" // 1
+ PAVGB(%%mm4, %%mm2) // 112 /4
+ PAVGB(%%mm2, %%mm6) // 112242211 /16
+ "movq %%mm6, (%0, %1, 4) \n\t" // X
+ // mm0=7(11) mm1=2(11) mm2=3(112) mm3=6(11) mm4=5 mm5=4 mm7=9
+ PAVGB(%%mm7, %%mm1) // 11 2 /4
+ PAVGB(%%mm4, %%mm5) // 11 /2
+ PAVGB(%%mm5, %%mm0) // 11 11 /4
+ "movq (%%"REG_a", %1, 2), %%mm6 \n\t" // 1
+ PAVGB(%%mm6, %%mm1) // 11 4 2 /8
+ PAVGB(%%mm0, %%mm1) // 11224222 /16
+ "movq %%mm1, (%%"REG_a", %1, 2) \n\t" // X
+ // mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
+ PAVGB((%%REGc), %%mm2) // 112 4 /8
+ "movq (%%"REG_a", %1, 4), %%mm0 \n\t" // 1
+ PAVGB(%%mm0, %%mm6) // 1 1 /2
+ PAVGB(%%mm7, %%mm6) // 1 12 /4
+ PAVGB(%%mm2, %%mm6) // 1122424 /4
+ "movq %%mm6, (%%"REG_c") \n\t" // X
+ // mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
+ PAVGB(%%mm7, %%mm5) // 11 2 /4
+ PAVGB(%%mm7, %%mm5) // 11 6 /8
+
+ PAVGB(%%mm3, %%mm0) // 112 /4
+ PAVGB(%%mm0, %%mm5) // 112246 /16
+ "movq %%mm5, (%%"REG_a", %1, 4) \n\t" // X
+ "sub %1, %0 \n\t"
+
+ :
+ : "r" (src), "r" ((stride_t)stride), "m" (c->pQPb)
+ : "%"REG_a, "%"REG_c
+ );
+#else
+ const stride_t l1= stride;
+ const stride_t l2= stride + l1;
+ const stride_t l3= stride + l2;
+ const stride_t l4= stride + l3;
+ const stride_t l5= stride + l4;
+ const stride_t l6= stride + l5;
+ const stride_t l7= stride + l6;
+ const stride_t l8= stride + l7;
+ const stride_t l9= stride + l8;
+ int x;
+ src+= stride*3;
+ for(x=0; x<BLOCK_SIZE; x++)
+ {
+ const int first= ABS(src[0] - src[l1]) < c->QP ? src[0] : src[l1];
+ const int last= ABS(src[l8] - src[l9]) < c->QP ? src[l9] : src[l8];
+
+ int sums[10];
+ sums[0] = 4*first + src[l1] + src[l2] + src[l3] + 4;
+ sums[1] = sums[0] - first + src[l4];
+ sums[2] = sums[1] - first + src[l5];
+ sums[3] = sums[2] - first + src[l6];
+ sums[4] = sums[3] - first + src[l7];
+ sums[5] = sums[4] - src[l1] + src[l8];
+ sums[6] = sums[5] - src[l2] + last;
+ sums[7] = sums[6] - src[l3] + last;
+ sums[8] = sums[7] - src[l4] + last;
+ sums[9] = sums[8] - src[l5] + last;
+
+ src[l1]= (sums[0] + sums[2] + 2*src[l1])>>4;
+ src[l2]= (sums[1] + sums[3] + 2*src[l2])>>4;
+ src[l3]= (sums[2] + sums[4] + 2*src[l3])>>4;
+ src[l4]= (sums[3] + sums[5] + 2*src[l4])>>4;
+ src[l5]= (sums[4] + sums[6] + 2*src[l5])>>4;
+ src[l6]= (sums[5] + sums[7] + 2*src[l6])>>4;
+ src[l7]= (sums[6] + sums[8] + 2*src[l7])>>4;
+ src[l8]= (sums[7] + sums[9] + 2*src[l8])>>4;
+
+ src++;
+ }
+#endif
+}
+#endif //HAVE_ALTIVEC
+
+#if 0
+/**
+ * Experimental implementation of the filter (Algorithm 1) described in a paper from Ramkishor & Karandikar
+ * values are correctly clipped (MMX2)
+ * values are wraparound (C)
+ * conclusion: its fast, but introduces ugly horizontal patterns if there is a continious gradient
+ 0 8 16 24
+ x = 8
+ x/2 = 4
+ x/8 = 1
+ 1 12 12 23
+ */
+static inline void RENAME(vertRK1Filter)(uint8_t *src, int stride, int QP)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ src+= stride*3;
+// FIXME rounding
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t" // 0
+ "movq "MANGLE(b80)", %%mm6 \n\t" // MIN_SIGNED_BYTE
+ "leal (%0, %1), %%"REG_a" \n\t"
+ "leal (%%"REG_a", %1, 4), %%"REG_c" \n\t"
+// 0 1 2 3 4 5 6 7 8 9
+// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %0+8%1 ecx+4%1
+ "movq "MANGLE(pQPb)", %%mm0 \n\t" // QP,..., QP
+ "movq %%mm0, %%mm1 \n\t" // QP,..., QP
+ "paddusb "MANGLE(b02)", %%mm0 \n\t"
+ "psrlw $2, %%mm0 \n\t"
+ "pand "MANGLE(b3F)", %%mm0 \n\t" // QP/4,..., QP/4
+ "paddusb %%mm1, %%mm0 \n\t" // QP*1.25 ...
+ "movq (%0, %1, 4), %%mm2 \n\t" // line 4
+ "movq (%%"REG_c"), %%mm3 \n\t" // line 5
+ "movq %%mm2, %%mm4 \n\t" // line 4
+ "pcmpeqb %%mm5, %%mm5 \n\t" // -1
+ "pxor %%mm2, %%mm5 \n\t" // -line 4 - 1
+ PAVGB(%%mm3, %%mm5)
+ "paddb %%mm6, %%mm5 \n\t" // (l5-l4)/2
+ "psubusb %%mm3, %%mm4 \n\t"
+ "psubusb %%mm2, %%mm3 \n\t"
+ "por %%mm3, %%mm4 \n\t" // |l4 - l5|
+ "psubusb %%mm0, %%mm4 \n\t"
+ "pcmpeqb %%mm7, %%mm4 \n\t"
+ "pand %%mm4, %%mm5 \n\t" // d/2
+
+// "paddb %%mm6, %%mm2 \n\t" // line 4 + 0x80
+ "paddb %%mm5, %%mm2 \n\t"
+// "psubb %%mm6, %%mm2 \n\t"
+ "movq %%mm2, (%0,%1, 4) \n\t"
+
+ "movq (%%"REG_c"), %%mm2 \n\t"
+// "paddb %%mm6, %%mm2 \n\t" // line 5 + 0x80
+ "psubb %%mm5, %%mm2 \n\t"
+// "psubb %%mm6, %%mm2 \n\t"
+ "movq %%mm2, (%%"REG_c") \n\t"
+
+ "paddb %%mm6, %%mm5 \n\t"
+ "psrlw $2, %%mm5 \n\t"
+ "pand "MANGLE(b3F)", %%mm5 \n\t"
+ "psubb "MANGLE(b20)", %%mm5 \n\t" // (l5-l4)/8
+
+ "movq (%%"REG_a", %1, 2), %%mm2 \n\t"
+ "paddb %%mm6, %%mm2 \n\t" // line 3 + 0x80
+ "paddsb %%mm5, %%mm2 \n\t"
+ "psubb %%mm6, %%mm2 \n\t"
+ "movq %%mm2, (%%"REG_a", %1, 2) \n\t"
+
+ "movq (%%"REG_c", %1), %%mm2 \n\t"
+ "paddb %%mm6, %%mm2 \n\t" // line 6 + 0x80
+ "psubsb %%mm5, %%mm2 \n\t"
+ "psubb %%mm6, %%mm2 \n\t"
+ "movq %%mm2, (%%"REG_c", %1) \n\t"
+
+ :
+ : "r" (src), "r" ((stride_t)stride)
+ : "%"REG_a, "%"REG_c
+ );
+#else
+ const int l1= stride;
+ const int l2= stride + l1;
+ const int l3= stride + l2;
+ const int l4= stride + l3;
+ const int l5= stride + l4;
+ const int l6= stride + l5;
+// const int l7= stride + l6;
+// const int l8= stride + l7;
+// const int l9= stride + l8;
+ int x;
+ const int QP15= QP + (QP>>2);
+ src+= stride*3;
+ for(x=0; x<BLOCK_SIZE; x++)
+ {
+ const int v = (src[x+l5] - src[x+l4]);
+ if(ABS(v) < QP15)
+ {
+ src[x+l3] +=v>>3;
+ src[x+l4] +=v>>1;
+ src[x+l5] -=v>>1;
+ src[x+l6] -=v>>3;
+
+ }
+ }
+
+#endif
+}
+#endif
+
+/**
+ * Experimental Filter 1
+ * will not damage linear gradients
+ * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
+ * can only smooth blocks at the expected locations (it cant smooth them if they did move)
+ * MMX2 version does correct clipping C version doesnt
+ */
+static inline void RENAME(vertX1Filter)(uint8_t *src, stride_t stride, PPContext *co)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ src+= stride*3;
+
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t" // 0
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
+// 0 1 2 3 4 5 6 7 8 9
+// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %0+8%1 ecx+4%1
+ "movq (%%"REG_a", %1, 2), %%mm0 \n\t" // line 3
+ "movq (%0, %1, 4), %%mm1 \n\t" // line 4
+ "movq %%mm1, %%mm2 \n\t" // line 4
+ "psubusb %%mm0, %%mm1 \n\t"
+ "psubusb %%mm2, %%mm0 \n\t"
+ "por %%mm1, %%mm0 \n\t" // |l2 - l3|
+ "movq (%%"REG_c"), %%mm3 \n\t" // line 5
+ "movq (%%"REG_c", %1), %%mm4 \n\t" // line 6
+ "movq %%mm3, %%mm5 \n\t" // line 5
+ "psubusb %%mm4, %%mm3 \n\t"
+ "psubusb %%mm5, %%mm4 \n\t"
+ "por %%mm4, %%mm3 \n\t" // |l5 - l6|
+ PAVGB(%%mm3, %%mm0) // (|l2 - l3| + |l5 - l6|)/2
+ "movq %%mm2, %%mm1 \n\t" // line 4
+ "psubusb %%mm5, %%mm2 \n\t"
+ "movq %%mm2, %%mm4 \n\t"
+ "pcmpeqb %%mm7, %%mm2 \n\t" // (l4 - l5) <= 0 ? -1 : 0
+ "psubusb %%mm1, %%mm5 \n\t"
+ "por %%mm5, %%mm4 \n\t" // |l4 - l5|
+ "psubusb %%mm0, %%mm4 \n\t" //d = MAX(0, |l4-l5| - (|l2-l3| + |l5-l6|)/2)
+ "movq %%mm4, %%mm3 \n\t" // d
+ "movq %2, %%mm0 \n\t"
+ "paddusb %%mm0, %%mm0 \n\t"
+ "psubusb %%mm0, %%mm4 \n\t"
+ "pcmpeqb %%mm7, %%mm4 \n\t" // d <= QP ? -1 : 0
+ "psubusb "MANGLE(b01)", %%mm3 \n\t"
+ "pand %%mm4, %%mm3 \n\t" // d <= QP ? d : 0
+
+ PAVGB(%%mm7, %%mm3) // d/2
+ "movq %%mm3, %%mm1 \n\t" // d/2
+ PAVGB(%%mm7, %%mm3) // d/4
+ PAVGB(%%mm1, %%mm3) // 3*d/8
+
+ "movq (%0, %1, 4), %%mm0 \n\t" // line 4
+ "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
+ "psubusb %%mm3, %%mm0 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "movq %%mm0, (%0, %1, 4) \n\t" // line 4
+
+ "movq (%%"REG_c"), %%mm0 \n\t" // line 5
+ "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
+ "paddusb %%mm3, %%mm0 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "movq %%mm0, (%%"REG_c") \n\t" // line 5
+
+ PAVGB(%%mm7, %%mm1) // d/4
+
+ "movq (%%"REG_a", %1, 2), %%mm0 \n\t" // line 3
+ "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
+ "psubusb %%mm1, %%mm0 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "movq %%mm0, (%%"REG_a", %1, 2) \n\t" // line 3
+
+ "movq (%%"REG_c", %1), %%mm0 \n\t" // line 6
+ "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
+ "paddusb %%mm1, %%mm0 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "movq %%mm0, (%%"REG_c", %1) \n\t" // line 6
+
+ PAVGB(%%mm7, %%mm1) // d/8
+
+ "movq (%%"REG_a", %1), %%mm0 \n\t" // line 2
+ "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l2-1 : l2
+ "psubusb %%mm1, %%mm0 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "movq %%mm0, (%%"REG_a", %1) \n\t" // line 2
+
+ "movq (%%"REG_c", %1, 2), %%mm0 \n\t" // line 7
+ "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l7-1 : l7
+ "paddusb %%mm1, %%mm0 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "movq %%mm0, (%%"REG_c", %1, 2) \n\t" // line 7
+
+ :
+ : "r" (src), "r" ((stride_t)stride), "m" (co->pQPb)
+ : "%"REG_a, "%"REG_c
+ );
+#else
+
+ const stride_t l1= stride;
+ const stride_t l2= stride + l1;
+ const stride_t l3= stride + l2;
+ const stride_t l4= stride + l3;
+ const stride_t l5= stride + l4;
+ const stride_t l6= stride + l5;
+ const stride_t l7= stride + l6;
+// const int l8= stride + l7;
+// const int l9= stride + l8;
+ int x;
+
+ src+= stride*3;
+ for(x=0; x<BLOCK_SIZE; x++)
+ {
+ int a= src[l3] - src[l4];
+ int b= src[l4] - src[l5];
+ int c= src[l5] - src[l6];
+
+ int d= ABS(b) - ((ABS(a) + ABS(c))>>1);
+ d= MAX(d, 0);
+
+ if(d < co->QP*2)
+ {
+ int v = d * SIGN(-b);
+
+ src[l2] +=v>>3;
+ src[l3] +=v>>2;
+ src[l4] +=(3*v)>>3;
+ src[l5] -=(3*v)>>3;
+ src[l6] -=v>>2;
+ src[l7] -=v>>3;
+
+ }
+ src++;
+ }
+#endif
+}
+
+#ifndef HAVE_ALTIVEC
+static inline void RENAME(doVertDefFilter)(uint8_t src[], stride_t stride, PPContext *c)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+/*
+ uint8_t tmp[16];
+ const int l1= stride;
+ const int l2= stride + l1;
+ const int l3= stride + l2;
+ const int l4= (int)tmp - (int)src - stride*3;
+ const int l5= (int)tmp - (int)src - stride*3 + 8;
+ const int l6= stride*3 + l3;
+ const int l7= stride + l6;
+ const int l8= stride + l7;
+
+ memcpy(tmp, src+stride*7, 8);
+ memcpy(tmp+8, src+stride*8, 8);
+*/
+ src+= stride*4;
+ asm volatile(
+
+#if 0 //sligtly more accurate and slightly slower
+ "pxor %%mm7, %%mm7 \n\t" // 0
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
+// 0 1 2 3 4 5 6 7
+// %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ecx+%1 ecx+2%1
+// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1
+
+
+ "movq (%0, %1, 2), %%mm0 \n\t" // l2
+ "movq (%0), %%mm1 \n\t" // l0
+ "movq %%mm0, %%mm2 \n\t" // l2
+ PAVGB(%%mm7, %%mm0) // ~l2/2
+ PAVGB(%%mm1, %%mm0) // ~(l2 + 2l0)/4
+ PAVGB(%%mm2, %%mm0) // ~(5l2 + 2l0)/8
+
+ "movq (%%"REG_a"), %%mm1 \n\t" // l1
+ "movq (%%"REG_a", %1, 2), %%mm3 \n\t" // l3
+ "movq %%mm1, %%mm4 \n\t" // l1
+ PAVGB(%%mm7, %%mm1) // ~l1/2
+ PAVGB(%%mm3, %%mm1) // ~(l1 + 2l3)/4
+ PAVGB(%%mm4, %%mm1) // ~(5l1 + 2l3)/8
+
+ "movq %%mm0, %%mm4 \n\t" // ~(5l2 + 2l0)/8
+ "psubusb %%mm1, %%mm0 \n\t"
+ "psubusb %%mm4, %%mm1 \n\t"
+ "por %%mm0, %%mm1 \n\t" // ~|2l0 - 5l1 + 5l2 - 2l3|/8
+// mm1= |lenergy|, mm2= l2, mm3= l3, mm7=0
+
+ "movq (%0, %1, 4), %%mm0 \n\t" // l4
+ "movq %%mm0, %%mm4 \n\t" // l4
+ PAVGB(%%mm7, %%mm0) // ~l4/2
+ PAVGB(%%mm2, %%mm0) // ~(l4 + 2l2)/4
+ PAVGB(%%mm4, %%mm0) // ~(5l4 + 2l2)/8
+
+ "movq (%%"REG_c"), %%mm2 \n\t" // l5
+ "movq %%mm3, %%mm5 \n\t" // l3
+ PAVGB(%%mm7, %%mm3) // ~l3/2
+ PAVGB(%%mm2, %%mm3) // ~(l3 + 2l5)/4
+ PAVGB(%%mm5, %%mm3) // ~(5l3 + 2l5)/8
+
+ "movq %%mm0, %%mm6 \n\t" // ~(5l4 + 2l2)/8
+ "psubusb %%mm3, %%mm0 \n\t"
+ "psubusb %%mm6, %%mm3 \n\t"
+ "por %%mm0, %%mm3 \n\t" // ~|2l2 - 5l3 + 5l4 - 2l5|/8
+ "pcmpeqb %%mm7, %%mm0 \n\t" // SIGN(2l2 - 5l3 + 5l4 - 2l5)
+// mm0= SIGN(menergy), mm1= |lenergy|, mm2= l5, mm3= |menergy|, mm4=l4, mm5= l3, mm7=0
+
+ "movq (%%"REG_c", %1), %%mm6 \n\t" // l6
+ "movq %%mm6, %%mm5 \n\t" // l6
+ PAVGB(%%mm7, %%mm6) // ~l6/2
+ PAVGB(%%mm4, %%mm6) // ~(l6 + 2l4)/4
+ PAVGB(%%mm5, %%mm6) // ~(5l6 + 2l4)/8
+
+ "movq (%%"REG_c", %1, 2), %%mm5 \n\t" // l7
+ "movq %%mm2, %%mm4 \n\t" // l5
+ PAVGB(%%mm7, %%mm2) // ~l5/2
+ PAVGB(%%mm5, %%mm2) // ~(l5 + 2l7)/4
+ PAVGB(%%mm4, %%mm2) // ~(5l5 + 2l7)/8
+
+ "movq %%mm6, %%mm4 \n\t" // ~(5l6 + 2l4)/8
+ "psubusb %%mm2, %%mm6 \n\t"
+ "psubusb %%mm4, %%mm2 \n\t"
+ "por %%mm6, %%mm2 \n\t" // ~|2l4 - 5l5 + 5l6 - 2l7|/8
+// mm0= SIGN(menergy), mm1= |lenergy|/8, mm2= |renergy|/8, mm3= |menergy|/8, mm7=0
+
+
+ PMINUB(%%mm2, %%mm1, %%mm4) // MIN(|lenergy|,|renergy|)/8
+ "movq %2, %%mm4 \n\t" // QP //FIXME QP+1 ?
+ "paddusb "MANGLE(b01)", %%mm4 \n\t"
+ "pcmpgtb %%mm3, %%mm4 \n\t" // |menergy|/8 < QP
+ "psubusb %%mm1, %%mm3 \n\t" // d=|menergy|/8-MIN(|lenergy|,|renergy|)/8
+ "pand %%mm4, %%mm3 \n\t"
+
+ "movq %%mm3, %%mm1 \n\t"
+// "psubusb "MANGLE(b01)", %%mm3 \n\t"
+ PAVGB(%%mm7, %%mm3)
+ PAVGB(%%mm7, %%mm3)
+ "paddusb %%mm1, %%mm3 \n\t"
+// "paddusb "MANGLE(b01)", %%mm3 \n\t"
+
+ "movq (%%"REG_a", %1, 2), %%mm6 \n\t" //l3
+ "movq (%0, %1, 4), %%mm5 \n\t" //l4
+ "movq (%0, %1, 4), %%mm4 \n\t" //l4
+ "psubusb %%mm6, %%mm5 \n\t"
+ "psubusb %%mm4, %%mm6 \n\t"
+ "por %%mm6, %%mm5 \n\t" // |l3-l4|
+ "pcmpeqb %%mm7, %%mm6 \n\t" // SIGN(l3-l4)
+ "pxor %%mm6, %%mm0 \n\t"
+ "pand %%mm0, %%mm3 \n\t"
+ PMINUB(%%mm5, %%mm3, %%mm0)
+
+ "psubusb "MANGLE(b01)", %%mm3 \n\t"
+ PAVGB(%%mm7, %%mm3)
+
+ "movq (%%"REG_a", %1, 2), %%mm0 \n\t"
+ "movq (%0, %1, 4), %%mm2 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm2 \n\t"
+ "psubb %%mm3, %%mm0 \n\t"
+ "paddb %%mm3, %%mm2 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm2 \n\t"
+ "movq %%mm0, (%%"REG_a", %1, 2) \n\t"
+ "movq %%mm2, (%0, %1, 4) \n\t"
+#endif
+
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "pcmpeqb %%mm6, %%mm6 \n\t" // -1
+// 0 1 2 3 4 5 6 7
+// %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ecx+%1 ecx+2%1
+// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1
+
+
+ "movq (%%"REG_a", %1, 2), %%mm1 \n\t" // l3
+ "movq (%0, %1, 4), %%mm0 \n\t" // l4
+ "pxor %%mm6, %%mm1 \n\t" // -l3-1
+ PAVGB(%%mm1, %%mm0) // -q+128 = (l4-l3+256)/2
+// mm1=-l3-1, mm0=128-q
+
+ "movq (%%"REG_a", %1, 4), %%mm2 \n\t" // l5
+ "movq (%%"REG_a", %1), %%mm3 \n\t" // l2
+ "pxor %%mm6, %%mm2 \n\t" // -l5-1
+ "movq %%mm2, %%mm5 \n\t" // -l5-1
+ "movq "MANGLE(b80)", %%mm4 \n\t" // 128
+ "lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
+ PAVGB(%%mm3, %%mm2) // (l2-l5+256)/2
+ PAVGB(%%mm0, %%mm4) // ~(l4-l3)/4 + 128
+ PAVGB(%%mm2, %%mm4) // ~(l2-l5)/4 +(l4-l3)/8 + 128
+ PAVGB(%%mm0, %%mm4) // ~(l2-l5)/8 +5(l4-l3)/16 + 128
+// mm1=-l3-1, mm0=128-q, mm3=l2, mm4=menergy/16 + 128, mm5= -l5-1
+
+ "movq (%%"REG_a"), %%mm2 \n\t" // l1
+ "pxor %%mm6, %%mm2 \n\t" // -l1-1
+ PAVGB(%%mm3, %%mm2) // (l2-l1+256)/2
+ PAVGB((%0), %%mm1) // (l0-l3+256)/2
+ "movq "MANGLE(b80)", %%mm3 \n\t" // 128
+ PAVGB(%%mm2, %%mm3) // ~(l2-l1)/4 + 128
+ PAVGB(%%mm1, %%mm3) // ~(l0-l3)/4 +(l2-l1)/8 + 128
+ PAVGB(%%mm2, %%mm3) // ~(l0-l3)/8 +5(l2-l1)/16 + 128
+// mm0=128-q, mm3=lenergy/16 + 128, mm4= menergy/16 + 128, mm5= -l5-1
+
+ PAVGB((%%REGc, %1), %%mm5) // (l6-l5+256)/2
+ "movq (%%"REG_c", %1, 2), %%mm1 \n\t" // l7
+ "pxor %%mm6, %%mm1 \n\t" // -l7-1
+ PAVGB((%0, %1, 4), %%mm1) // (l4-l7+256)/2
+ "movq "MANGLE(b80)", %%mm2 \n\t" // 128
+ PAVGB(%%mm5, %%mm2) // ~(l6-l5)/4 + 128
+ PAVGB(%%mm1, %%mm2) // ~(l4-l7)/4 +(l6-l5)/8 + 128
+ PAVGB(%%mm5, %%mm2) // ~(l4-l7)/8 +5(l6-l5)/16 + 128
+// mm0=128-q, mm2=renergy/16 + 128, mm3=lenergy/16 + 128, mm4= menergy/16 + 128
+
+ "movq "MANGLE(b00)", %%mm1 \n\t" // 0
+ "movq "MANGLE(b00)", %%mm5 \n\t" // 0
+ "psubb %%mm2, %%mm1 \n\t" // 128 - renergy/16
+ "psubb %%mm3, %%mm5 \n\t" // 128 - lenergy/16
+ PMAXUB(%%mm1, %%mm2) // 128 + |renergy/16|
+ PMAXUB(%%mm5, %%mm3) // 128 + |lenergy/16|
+ PMINUB(%%mm2, %%mm3, %%mm1) // 128 + MIN(|lenergy|,|renergy|)/16
+
+// mm0=128-q, mm3=128 + MIN(|lenergy|,|renergy|)/16, mm4= menergy/16 + 128
+
+ "movq "MANGLE(b00)", %%mm7 \n\t" // 0
+ "movq %2, %%mm2 \n\t" // QP
+ PAVGB(%%mm6, %%mm2) // 128 + QP/2
+ "psubb %%mm6, %%mm2 \n\t"
+
+ "movq %%mm4, %%mm1 \n\t"
+ "pcmpgtb %%mm7, %%mm1 \n\t" // SIGN(menergy)
+ "pxor %%mm1, %%mm4 \n\t"
+ "psubb %%mm1, %%mm4 \n\t" // 128 + |menergy|/16
+ "pcmpgtb %%mm4, %%mm2 \n\t" // |menergy|/16 < QP/2
+ "psubusb %%mm3, %%mm4 \n\t" //d=|menergy|/16 - MIN(|lenergy|,|renergy|)/16
+// mm0=128-q, mm1= SIGN(menergy), mm2= |menergy|/16 < QP/2, mm4= d/16
+
+ "movq %%mm4, %%mm3 \n\t" // d
+ "psubusb "MANGLE(b01)", %%mm4 \n\t"
+ PAVGB(%%mm7, %%mm4) // d/32
+ PAVGB(%%mm7, %%mm4) // (d + 32)/64
+ "paddb %%mm3, %%mm4 \n\t" // 5d/64
+ "pand %%mm2, %%mm4 \n\t"
+
+ "movq "MANGLE(b80)", %%mm5 \n\t" // 128
+ "psubb %%mm0, %%mm5 \n\t" // q
+ "paddsb %%mm6, %%mm5 \n\t" // fix bad rounding
+ "pcmpgtb %%mm5, %%mm7 \n\t" // SIGN(q)
+ "pxor %%mm7, %%mm5 \n\t"
+
+ PMINUB(%%mm5, %%mm4, %%mm3) // MIN(|q|, 5d/64)
+ "pxor %%mm1, %%mm7 \n\t" // SIGN(d*q)
+
+ "pand %%mm7, %%mm4 \n\t"
+ "movq (%%"REG_a", %1, 2), %%mm0 \n\t"
+ "movq (%0, %1, 4), %%mm2 \n\t"
+ "pxor %%mm1, %%mm0 \n\t"
+ "pxor %%mm1, %%mm2 \n\t"
+ "paddb %%mm4, %%mm0 \n\t"
+ "psubb %%mm4, %%mm2 \n\t"
+ "pxor %%mm1, %%mm0 \n\t"
+ "pxor %%mm1, %%mm2 \n\t"
+ "movq %%mm0, (%%"REG_a", %1, 2) \n\t"
+ "movq %%mm2, (%0, %1, 4) \n\t"
+
+ :
+ : "r" (src), "r" ((stride_t)stride), "m" (c->pQPb)
+ : "%"REG_a, "%"REG_c
+ );
+
+/*
+ {
+ int x;
+ src-= stride;
+ for(x=0; x<BLOCK_SIZE; x++)
+ {
+ const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
+ if(ABS(middleEnergy)< 8*QP)
+ {
+ const int q=(src[l4] - src[l5])/2;
+ const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
+ const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
+
+ int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
+ d= MAX(d, 0);
+
+ d= (5*d + 32) >> 6;
+ d*= SIGN(-middleEnergy);
+
+ if(q>0)
+ {
+ d= d<0 ? 0 : d;
+ d= d>q ? q : d;
+ }
+ else
+ {
+ d= d>0 ? 0 : d;
+ d= d<q ? q : d;
+ }
+
+ src[l4]-= d;
+ src[l5]+= d;
+ }
+ src++;
+ }
+src-=8;
+ for(x=0; x<8; x++)
+ {
+ int y;
+ for(y=4; y<6; y++)
+ {
+ int d= src[x+y*stride] - tmp[x+(y-4)*8];
+ int ad= ABS(d);
+ static int max=0;
+ static int sum=0;
+ static int num=0;
+ static int bias=0;
+
+ if(max<ad) max=ad;
+ sum+= ad>3 ? 1 : 0;
+ if(ad>3)
+ {
+ src[0] = src[7] = src[stride*7] = src[(stride+1)*7]=255;
+ }
+ if(y==4) bias+=d;
+ num++;
+ if(num%1000000 == 0)
+ {
+ printf(" %d %d %d %d\n", num, sum, max, bias);
+ }
+ }
+ }
+}
+*/
+#elif HAVE_MMX
+ src+= stride*4;
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea -40(%%"REG_SP"), %%"REG_c" \n\t" // make space for 4 8-byte vars
+ "and "ALIGN_MASK", %%"REG_c" \n\t" // align
+// 0 1 2 3 4 5 6 7
+// %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 edx+%1 edx+2%1
+// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1
+
+ "movq (%0), %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
+ "punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
+
+ "movq (%0, %1), %%mm2 \n\t"
+ "lea (%0, %1, 2), %%"REG_a" \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
+ "punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
+
+ "movq (%%"REG_a"), %%mm4 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
+ "punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
+
+ "paddw %%mm0, %%mm0 \n\t" // 2L0
+ "paddw %%mm1, %%mm1 \n\t" // 2H0
+ "psubw %%mm4, %%mm2 \n\t" // L1 - L2
+ "psubw %%mm5, %%mm3 \n\t" // H1 - H2
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
+
+ "psllw $2, %%mm2 \n\t" // 4L1 - 4L2
+ "psllw $2, %%mm3 \n\t" // 4H1 - 4H2
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
+
+ "movq (%%"REG_a", %1), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // L3
+ "punpckhbw %%mm7, %%mm3 \n\t" // H3
+
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+ "movq %%mm0, (%%"REG_c") \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "movq %%mm1, 8(%%"REG_c") \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+
+ "movq (%%"REG_a", %1, 2), %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t" // L4
+ "punpckhbw %%mm7, %%mm1 \n\t" // H4
+
+ "psubw %%mm0, %%mm2 \n\t" // L3 - L4
+ "psubw %%mm1, %%mm3 \n\t" // H3 - H4
+ "movq %%mm2, 16(%%"REG_c") \n\t" // L3 - L4
+ "movq %%mm3, 24(%%"REG_c") \n\t" // H3 - H4
+ "paddw %%mm4, %%mm4 \n\t" // 2L2
+ "paddw %%mm5, %%mm5 \n\t" // 2H2
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
+
+ "lea (%%"REG_a", %1), %0 \n\t"
+ "psllw $2, %%mm2 \n\t" // 4L3 - 4L4
+ "psllw $2, %%mm3 \n\t" // 4H3 - 4H4
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
+//50 opcodes so far
+ "movq (%0, %1, 2), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // L5
+ "punpckhbw %%mm7, %%mm3 \n\t" // H5
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
+
+ "movq (%%"REG_a", %1, 4), %%mm6 \n\t"
+ "punpcklbw %%mm7, %%mm6 \n\t" // L6
+ "psubw %%mm6, %%mm2 \n\t" // L5 - L6
+ "movq (%%"REG_a", %1, 4), %%mm6 \n\t"
+ "punpckhbw %%mm7, %%mm6 \n\t" // H6
+ "psubw %%mm6, %%mm3 \n\t" // H5 - H6
+
+ "paddw %%mm0, %%mm0 \n\t" // 2L4
+ "paddw %%mm1, %%mm1 \n\t" // 2H4
+ "psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
+ "psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
+
+ "psllw $2, %%mm2 \n\t" // 4L5 - 4L6
+ "psllw $2, %%mm3 \n\t" // 4H5 - 4H6
+ "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
+ "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
+
+ "movq (%0, %1, 4), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // L7
+ "punpckhbw %%mm7, %%mm3 \n\t" // H7
+
+ "paddw %%mm2, %%mm2 \n\t" // 2L7
+ "paddw %%mm3, %%mm3 \n\t" // 2H7
+ "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
+ "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
+
+ "movq (%%"REG_c"), %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "movq 8(%%"REG_c"), %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+
+#if HAVE_MMX2
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "psubw %%mm0, %%mm6 \n\t"
+ "pmaxsw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "psubw %%mm1, %%mm6 \n\t"
+ "pmaxsw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "psubw %%mm2, %%mm6 \n\t"
+ "pmaxsw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "psubw %%mm3, %%mm6 \n\t"
+ "pmaxsw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
+#else
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm0, %%mm6 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm1, %%mm6 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm2, %%mm6 \n\t"
+ "pxor %%mm6, %%mm2 \n\t"
+ "psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm3, %%mm6 \n\t"
+ "pxor %%mm6, %%mm3 \n\t"
+ "psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
+#endif
+
+#if HAVE_MMX2
+ "pminsw %%mm2, %%mm0 \n\t"
+ "pminsw %%mm3, %%mm1 \n\t"
+#else
+ "movq %%mm0, %%mm6 \n\t"
+ "psubusw %%mm2, %%mm6 \n\t"
+ "psubw %%mm6, %%mm0 \n\t"
+ "movq %%mm1, %%mm6 \n\t"
+ "psubusw %%mm3, %%mm6 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+#endif
+
+ "movd %2, %%mm2 \n\t" // QP
+ "punpcklbw %%mm7, %%mm2 \n\t"
+
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
+ "pxor %%mm6, %%mm4 \n\t"
+ "psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
+ "pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
+ "pxor %%mm7, %%mm5 \n\t"
+ "psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
+// 100 opcodes
+ "psllw $3, %%mm2 \n\t" // 8QP
+ "movq %%mm2, %%mm3 \n\t" // 8QP
+ "pcmpgtw %%mm4, %%mm2 \n\t"
+ "pcmpgtw %%mm5, %%mm3 \n\t"
+ "pand %%mm2, %%mm4 \n\t"
+ "pand %%mm3, %%mm5 \n\t"
+
+
+ "psubusw %%mm0, %%mm4 \n\t" // hd
+ "psubusw %%mm1, %%mm5 \n\t" // ld
+
+
+ "movq "MANGLE(w05)", %%mm2 \n\t" // 5
+ "pmullw %%mm2, %%mm4 \n\t"
+ "pmullw %%mm2, %%mm5 \n\t"
+ "movq "MANGLE(w20)", %%mm2 \n\t" // 32
+ "paddw %%mm2, %%mm4 \n\t"
+ "paddw %%mm2, %%mm5 \n\t"
+ "psrlw $6, %%mm4 \n\t"
+ "psrlw $6, %%mm5 \n\t"
+
+ "movq 16(%%"REG_c"), %%mm0 \n\t" // L3 - L4
+ "movq 24(%%"REG_c"), %%mm1 \n\t" // H3 - H4
+
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+
+ "pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
+ "pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" // |L3-L4|
+ "psubw %%mm3, %%mm1 \n\t" // |H3-H4|
+ "psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
+ "psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
+
+ "pxor %%mm6, %%mm2 \n\t"
+ "pxor %%mm7, %%mm3 \n\t"
+ "pand %%mm2, %%mm4 \n\t"
+ "pand %%mm3, %%mm5 \n\t"
+
+#if HAVE_MMX2
+ "pminsw %%mm0, %%mm4 \n\t"
+ "pminsw %%mm1, %%mm5 \n\t"
+#else
+ "movq %%mm4, %%mm2 \n\t"
+ "psubusw %%mm0, %%mm2 \n\t"
+ "psubw %%mm2, %%mm4 \n\t"
+ "movq %%mm5, %%mm2 \n\t"
+ "psubusw %%mm1, %%mm2 \n\t"
+ "psubw %%mm2, %%mm5 \n\t"
+#endif
+ "pxor %%mm6, %%mm4 \n\t"
+ "pxor %%mm7, %%mm5 \n\t"
+ "psubw %%mm6, %%mm4 \n\t"
+ "psubw %%mm7, %%mm5 \n\t"
+ "packsswb %%mm5, %%mm4 \n\t"
+ "movq (%0), %%mm0 \n\t"
+ "paddb %%mm4, %%mm0 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq (%0, %1), %%mm0 \n\t"
+ "psubb %%mm4, %%mm0 \n\t"
+ "movq %%mm0, (%0, %1) \n\t"
+
+ : "+r" (src)
+ : "r" ((stride_t)stride), "m" (c->pQPb)
+ : "%"REG_a, "%"REG_c
+ );
+#else
+ const stride_t l1= stride;
+ const stride_t l2= stride + l1;
+ const stride_t l3= stride + l2;
+ const stride_t l4= stride + l3;
+ const stride_t l5= stride + l4;
+ const stride_t l6= stride + l5;
+ const stride_t l7= stride + l6;
+ const stride_t l8= stride + l7;
+// const int l9= stride + l8;
+ int x;
+ src+= stride*3;
+ for(x=0; x<BLOCK_SIZE; x++)
+ {
+ const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
+ if(ABS(middleEnergy) < 8*c->QP)
+ {
+ const int q=(src[l4] - src[l5])/2;
+ const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
+ const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
+
+ int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
+ d= MAX(d, 0);
+
+ d= (5*d + 32) >> 6;
+ d*= SIGN(-middleEnergy);
+
+ if(q>0)
+ {
+ d= d<0 ? 0 : d;
+ d= d>q ? q : d;
+ }
+ else
+ {
+ d= d>0 ? 0 : d;
+ d= d<q ? q : d;
+ }
+
+ src[l4]-= d;
+ src[l5]+= d;
+ }
+ src++;
+ }
+#endif
+}
+#endif //HAVE_ALTIVEC
+
+#ifndef HAVE_ALTIVEC
+static inline void RENAME(dering)(uint8_t src[], stride_t stride, PPContext *c)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ asm volatile(
+ "pxor %%mm6, %%mm6 \n\t"
+ "pcmpeqb %%mm7, %%mm7 \n\t"
+ "movq %2, %%mm0 \n\t"
+ "punpcklbw %%mm6, %%mm0 \n\t"
+ "psrlw $1, %%mm0 \n\t"
+ "psubw %%mm7, %%mm0 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "movq %%mm0, %3 \n\t"
+
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
+
+// 0 1 2 3 4 5 6 7 8 9
+// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
+
+#undef FIND_MIN_MAX
+#if HAVE_MMX2
+#define REAL_FIND_MIN_MAX(addr)\
+ "movq " #addr ", %%mm0 \n\t"\
+ "pminub %%mm0, %%mm7 \n\t"\
+ "pmaxub %%mm0, %%mm6 \n\t"
+#else
+#define REAL_FIND_MIN_MAX(addr)\
+ "movq " #addr ", %%mm0 \n\t"\
+ "movq %%mm7, %%mm1 \n\t"\
+ "psubusb %%mm0, %%mm6 \n\t"\
+ "paddb %%mm0, %%mm6 \n\t"\
+ "psubusb %%mm0, %%mm1 \n\t"\
+ "psubb %%mm1, %%mm7 \n\t"
+#endif
+#define FIND_MIN_MAX(addr) REAL_FIND_MIN_MAX(addr)
+
+FIND_MIN_MAX((%%REGa))
+FIND_MIN_MAX((%%REGa, %1))
+FIND_MIN_MAX((%%REGa, %1, 2))
+FIND_MIN_MAX((%0, %1, 4))
+FIND_MIN_MAX((%%REGd))
+FIND_MIN_MAX((%%REGd, %1))
+FIND_MIN_MAX((%%REGd, %1, 2))
+FIND_MIN_MAX((%0, %1, 8))
+
+ "movq %%mm7, %%mm4 \n\t"
+ "psrlq $8, %%mm7 \n\t"
+#if HAVE_MMX2
+ "pminub %%mm4, %%mm7 \n\t" // min of pixels
+ "pshufw $0xF9, %%mm7, %%mm4 \n\t"
+ "pminub %%mm4, %%mm7 \n\t" // min of pixels
+ "pshufw $0xFE, %%mm7, %%mm4 \n\t"
+ "pminub %%mm4, %%mm7 \n\t"
+#else
+ "movq %%mm7, %%mm1 \n\t"
+ "psubusb %%mm4, %%mm1 \n\t"
+ "psubb %%mm1, %%mm7 \n\t"
+ "movq %%mm7, %%mm4 \n\t"
+ "psrlq $16, %%mm7 \n\t"
+ "movq %%mm7, %%mm1 \n\t"
+ "psubusb %%mm4, %%mm1 \n\t"
+ "psubb %%mm1, %%mm7 \n\t"
+ "movq %%mm7, %%mm4 \n\t"
+ "psrlq $32, %%mm7 \n\t"
+ "movq %%mm7, %%mm1 \n\t"
+ "psubusb %%mm4, %%mm1 \n\t"
+ "psubb %%mm1, %%mm7 \n\t"
+#endif
+
+
+ "movq %%mm6, %%mm4 \n\t"
+ "psrlq $8, %%mm6 \n\t"
+#if HAVE_MMX2
+ "pmaxub %%mm4, %%mm6 \n\t" // max of pixels
+ "pshufw $0xF9, %%mm6, %%mm4 \n\t"
+ "pmaxub %%mm4, %%mm6 \n\t"
+ "pshufw $0xFE, %%mm6, %%mm4 \n\t"
+ "pmaxub %%mm4, %%mm6 \n\t"
+#else
+ "psubusb %%mm4, %%mm6 \n\t"
+ "paddb %%mm4, %%mm6 \n\t"
+ "movq %%mm6, %%mm4 \n\t"
+ "psrlq $16, %%mm6 \n\t"
+ "psubusb %%mm4, %%mm6 \n\t"
+ "paddb %%mm4, %%mm6 \n\t"
+ "movq %%mm6, %%mm4 \n\t"
+ "psrlq $32, %%mm6 \n\t"
+ "psubusb %%mm4, %%mm6 \n\t"
+ "paddb %%mm4, %%mm6 \n\t"
+#endif
+ "movq %%mm6, %%mm0 \n\t" // max
+ "psubb %%mm7, %%mm6 \n\t" // max - min
+ "movd %%mm6, %%ecx \n\t"
+ "cmpb "MANGLE(deringThreshold)", %%cl \n\t"
+ " jb 1f \n\t"
+ "lea -24(%%"REG_SP"), %%"REG_c" \n\t"
+ "and "ALIGN_MASK", %%"REG_c" \n\t"
+ PAVGB(%%mm0, %%mm7) // a=(max + min)/2
+ "punpcklbw %%mm7, %%mm7 \n\t"
+ "punpcklbw %%mm7, %%mm7 \n\t"
+ "punpcklbw %%mm7, %%mm7 \n\t"
+ "movq %%mm7, (%%"REG_c") \n\t"
+
+ "movq (%0), %%mm0 \n\t" // L10
+ "movq %%mm0, %%mm1 \n\t" // L10
+ "movq %%mm0, %%mm2 \n\t" // L10
+ "psllq $8, %%mm1 \n\t"
+ "psrlq $8, %%mm2 \n\t"
+ "movd -4(%0), %%mm3 \n\t"
+ "movd 8(%0), %%mm4 \n\t"
+ "psrlq $24, %%mm3 \n\t"
+ "psllq $56, %%mm4 \n\t"
+ "por %%mm3, %%mm1 \n\t" // L00
+ "por %%mm4, %%mm2 \n\t" // L20
+ "movq %%mm1, %%mm3 \n\t" // L00
+ PAVGB(%%mm2, %%mm1) // (L20 + L00)/2
+ PAVGB(%%mm0, %%mm1) // (L20 + L00 + 2L10)/4
+ "psubusb %%mm7, %%mm0 \n\t"
+ "psubusb %%mm7, %%mm2 \n\t"
+ "psubusb %%mm7, %%mm3 \n\t"
+ "pcmpeqb "MANGLE(b00)", %%mm0 \n\t" // L10 > a ? 0 : -1
+ "pcmpeqb "MANGLE(b00)", %%mm2 \n\t" // L20 > a ? 0 : -1
+ "pcmpeqb "MANGLE(b00)", %%mm3 \n\t" // L00 > a ? 0 : -1
+ "paddb %%mm2, %%mm0 \n\t"
+ "paddb %%mm3, %%mm0 \n\t"
+
+ "movq (%%"REG_a"), %%mm2 \n\t" // L11
+ "movq %%mm2, %%mm3 \n\t" // L11
+ "movq %%mm2, %%mm4 \n\t" // L11
+ "psllq $8, %%mm3 \n\t"
+ "psrlq $8, %%mm4 \n\t"
+ "movd -4(%%"REG_a"), %%mm5 \n\t"
+ "movd 8(%%"REG_a"), %%mm6 \n\t"
+ "psrlq $24, %%mm5 \n\t"
+ "psllq $56, %%mm6 \n\t"
+ "por %%mm5, %%mm3 \n\t" // L01
+ "por %%mm6, %%mm4 \n\t" // L21
+ "movq %%mm3, %%mm5 \n\t" // L01
+ PAVGB(%%mm4, %%mm3) // (L21 + L01)/2
+ PAVGB(%%mm2, %%mm3) // (L21 + L01 + 2L11)/4
+ "psubusb %%mm7, %%mm2 \n\t"
+ "psubusb %%mm7, %%mm4 \n\t"
+ "psubusb %%mm7, %%mm5 \n\t"
+ "pcmpeqb "MANGLE(b00)", %%mm2 \n\t" // L11 > a ? 0 : -1
+ "pcmpeqb "MANGLE(b00)", %%mm4 \n\t" // L21 > a ? 0 : -1
+ "pcmpeqb "MANGLE(b00)", %%mm5 \n\t" // L01 > a ? 0 : -1
+ "paddb %%mm4, %%mm2 \n\t"
+ "paddb %%mm5, %%mm2 \n\t"
+// 0, 2, 3, 1
+#define REAL_DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1) \
+ "movq " #src ", " #sx " \n\t" /* src[0] */\
+ "movq " #sx ", " #lx " \n\t" /* src[0] */\
+ "movq " #sx ", " #t0 " \n\t" /* src[0] */\
+ "psllq $8, " #lx " \n\t"\
+ "psrlq $8, " #t0 " \n\t"\
+ "movd -4" #src ", " #t1 " \n\t"\
+ "psrlq $24, " #t1 " \n\t"\
+ "por " #t1 ", " #lx " \n\t" /* src[-1] */\
+ "movd 8" #src ", " #t1 " \n\t"\
+ "psllq $56, " #t1 " \n\t"\
+ "por " #t1 ", " #t0 " \n\t" /* src[+1] */\
+ "movq " #lx ", " #t1 " \n\t" /* src[-1] */\
+ PAVGB(t0, lx) /* (src[-1] + src[+1])/2 */\
+ PAVGB(sx, lx) /* (src[-1] + 2src[0] + src[+1])/4 */\
+ PAVGB(lx, pplx) \
+ "movq " #lx ", 8(%%"REG_c") \n\t"\
+ "movq (%%"REG_c"), " #lx " \n\t"\
+ "psubusb " #lx ", " #t1 " \n\t"\
+ "psubusb " #lx ", " #t0 " \n\t"\
+ "psubusb " #lx ", " #sx " \n\t"\
+ "movq "MANGLE(b00)", " #lx " \n\t"\
+ "pcmpeqb " #lx ", " #t1 " \n\t" /* src[-1] > a ? 0 : -1*/\
+ "pcmpeqb " #lx ", " #t0 " \n\t" /* src[+1] > a ? 0 : -1*/\
+ "pcmpeqb " #lx ", " #sx " \n\t" /* src[0] > a ? 0 : -1*/\
+ "paddb " #t1 ", " #t0 " \n\t"\
+ "paddb " #t0 ", " #sx " \n\t"\
+\
+ PAVGB(plx, pplx) /* filtered */\
+ "movq " #dst ", " #t0 " \n\t" /* dst */\
+ "movq " #t0 ", " #t1 " \n\t" /* dst */\
+ "psubusb %3, " #t0 " \n\t"\
+ "paddusb %3, " #t1 " \n\t"\
+ PMAXUB(t0, pplx)\
+ PMINUB(t1, pplx, t0)\
+ "paddb " #sx ", " #ppsx " \n\t"\
+ "paddb " #psx ", " #ppsx " \n\t"\
+ "#paddb "MANGLE(b02)", " #ppsx " \n\t"\
+ "pand "MANGLE(b08)", " #ppsx " \n\t"\
+ "pcmpeqb " #lx ", " #ppsx " \n\t"\
+ "pand " #ppsx ", " #pplx " \n\t"\
+ "pandn " #dst ", " #ppsx " \n\t"\
+ "por " #pplx ", " #ppsx " \n\t"\
+ "movq " #ppsx ", " #dst " \n\t"\
+ "movq 8(%%"REG_c"), " #lx " \n\t"
+
+#define DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1) \
+ REAL_DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1)
+/*
+0000000
+1111111
+
+1111110
+1111101
+1111100
+1111011
+1111010
+1111001
+
+1111000
+1110111
+
+*/
+//DERING_CORE(dst,src ,ppsx ,psx ,sx ,pplx ,plx ,lx ,t0 ,t1)
+DERING_CORE((%%REGa),(%%REGa, %1) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
+DERING_CORE((%%REGa, %1),(%%REGa, %1, 2) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
+DERING_CORE((%%REGa, %1, 2),(%0, %1, 4) ,%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
+DERING_CORE((%0, %1, 4),(%%REGd) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
+DERING_CORE((%%REGd),(%%REGd, %1) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
+DERING_CORE((%%REGd, %1), (%%REGd, %1, 2),%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
+DERING_CORE((%%REGd, %1, 2),(%0, %1, 8) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
+DERING_CORE((%0, %1, 8),(%%REGd, %1, 4) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
+
+ "1: \n\t"
+ : : "r" (src), "r" ((stride_t)stride), "m" (c->pQPb), "m"(c->pQPb2)
+ : "%"REG_a, "%"REG_d, "%"REG_c
+ );
+#else
+ int y;
+ int min=255;
+ int max=0;
+ int avg;
+ uint8_t *p;
+ int s[10];
+ const int QP2= c->QP/2 + 1;
+
+ for(y=1; y<9; y++)
+ {
+ int x;
+ p= src + stride*y;
+ for(x=1; x<9; x++)
+ {
+ p++;
+ if(*p > max) max= *p;
+ if(*p < min) min= *p;
+ }
+ }
+ avg= (min + max + 1)>>1;
+
+ if(max - min <deringThreshold) return;
+
+ for(y=0; y<10; y++)
+ {
+ int t = 0;
+
+ if(src[stride*y + 0] > avg) t+= 1;
+ if(src[stride*y + 1] > avg) t+= 2;
+ if(src[stride*y + 2] > avg) t+= 4;
+ if(src[stride*y + 3] > avg) t+= 8;
+ if(src[stride*y + 4] > avg) t+= 16;
+ if(src[stride*y + 5] > avg) t+= 32;
+ if(src[stride*y + 6] > avg) t+= 64;
+ if(src[stride*y + 7] > avg) t+= 128;
+ if(src[stride*y + 8] > avg) t+= 256;
+ if(src[stride*y + 9] > avg) t+= 512;
+
+ t |= (~t)<<16;
+ t &= (t<<1) & (t>>1);
+ s[y] = t;
+ }
+
+ for(y=1; y<9; y++)
+ {
+ int t = s[y-1] & s[y] & s[y+1];
+ t|= t>>16;
+ s[y-1]= t;
+ }
+
+ for(y=1; y<9; y++)
+ {
+ int x;
+ int t = s[y-1];
+
+ p= src + stride*y;
+ for(x=1; x<9; x++)
+ {
+ p++;
+ if(t & (1<<x))
+ {
+ int f= (*(p-stride-1)) + 2*(*(p-stride)) + (*(p-stride+1))
+ +2*(*(p -1)) + 4*(*p ) + 2*(*(p +1))
+ +(*(p+stride-1)) + 2*(*(p+stride)) + (*(p+stride+1));
+ f= (f + 8)>>4;
+
+#ifdef DEBUG_DERING_THRESHOLD
+ asm volatile("emms\n\t":);
+ {
+ static long long numPixels=0;
+ if(x!=1 && x!=8 && y!=1 && y!=8) numPixels++;
+// if((max-min)<20 || (max-min)*QP<200)
+// if((max-min)*QP < 500)
+// if(max-min<QP/2)
+ if(max-min < 20)
+ {
+ static int numSkiped=0;
+ static int errorSum=0;
+ static int worstQP=0;
+ static int worstRange=0;
+ static int worstDiff=0;
+ int diff= (f - *p);
+ int absDiff= ABS(diff);
+ int error= diff*diff;
+
+ if(x==1 || x==8 || y==1 || y==8) continue;
+
+ numSkiped++;
+ if(absDiff > worstDiff)
+ {
+ worstDiff= absDiff;
+ worstQP= QP;
+ worstRange= max-min;
+ }
+ errorSum+= error;
+
+ if(1024LL*1024LL*1024LL % numSkiped == 0)
+ {
+ }
+ }
+ }
+#endif
+ if (*p + QP2 < f) *p= *p + QP2;
+ else if(*p - QP2 > f) *p= *p - QP2;
+ else *p=f;
+ }
+ }
+ }
+#ifdef DEBUG_DERING_THRESHOLD
+ if(max-min < 20)
+ {
+ for(y=1; y<9; y++)
+ {
+ int x;
+ int t = 0;
+ p= src + stride*y;
+ for(x=1; x<9; x++)
+ {
+ p++;
+ *p = MIN(*p + 20, 255);
+ }
+ }
+// src[0] = src[7]=src[stride*7]=src[stride*7 + 7]=255;
+ }
+#endif
+#endif
+}
+#endif //HAVE_ALTIVEC
+
+/**
+ * Deinterlaces the given block by linearly interpolating every second line.
+ * will be called for every 8x8 block and can read & write from line 4-15
+ * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
+ * lines 4-12 will be read into the deblocking filter and should be deinterlaced
+ */
+static inline void RENAME(deInterlaceInterpolateLinear)(uint8_t src[], stride_t stride)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ src+= 4*stride;
+ asm volatile(
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
+// 0 1 2 3 4 5 6 7 8 9
+// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %0+8%1 ecx+4%1
+
+ "movq (%0), %%mm0 \n\t"
+ "movq (%%"REG_a", %1), %%mm1 \n\t"
+ PAVGB(%%mm1, %%mm0)
+ "movq %%mm0, (%%"REG_a") \n\t"
+ "movq (%0, %1, 4), %%mm0 \n\t"
+ PAVGB(%%mm0, %%mm1)
+ "movq %%mm1, (%%"REG_a", %1, 2) \n\t"
+ "movq (%%"REG_c", %1), %%mm1 \n\t"
+ PAVGB(%%mm1, %%mm0)
+ "movq %%mm0, (%%"REG_c") \n\t"
+ "movq (%0, %1, 8), %%mm0 \n\t"
+ PAVGB(%%mm0, %%mm1)
+ "movq %%mm1, (%%"REG_c", %1, 2) \n\t"
+
+ : : "r" (src), "r" ((stride_t)stride)
+ : "%"REG_a, "%"REG_c
+ );
+#else
+ int a, b, x;
+ src+= 4*stride;
+
+ for(x=0; x<2; x++){
+ a= *(uint32_t*)&src[stride*0];
+ b= *(uint32_t*)&src[stride*2];
+ *(uint32_t*)&src[stride*1]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
+ a= *(uint32_t*)&src[stride*4];
+ *(uint32_t*)&src[stride*3]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
+ b= *(uint32_t*)&src[stride*6];
+ *(uint32_t*)&src[stride*5]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
+ a= *(uint32_t*)&src[stride*8];
+ *(uint32_t*)&src[stride*7]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
+ src += 4;
+ }
+#endif
+}
+
+/**
+ * Deinterlaces the given block by cubic interpolating every second line.
+ * will be called for every 8x8 block and can read & write from line 4-15
+ * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
+ * lines 4-12 will be read into the deblocking filter and should be deinterlaced
+ * this filter will read lines 3-15 and write 7-13
+ */
+static inline void RENAME(deInterlaceInterpolateCubic)(uint8_t src[], stride_t stride)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ src+= stride*3;
+ asm volatile(
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
+ "lea (%%"REG_d", %1, 4), %%"REG_c" \n\t"
+ "add %1, %%"REG_c" \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+// 0 1 2 3 4 5 6 7 8 9 10
+// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 ecx
+
+#define REAL_DEINT_CUBIC(a,b,c,d,e)\
+ "movq " #a ", %%mm0 \n\t"\
+ "movq " #b ", %%mm1 \n\t"\
+ "movq " #d ", %%mm2 \n\t"\
+ "movq " #e ", %%mm3 \n\t"\
+ PAVGB(%%mm2, %%mm1) /* (b+d) /2 */\
+ PAVGB(%%mm3, %%mm0) /* a(a+e) /2 */\
+ "movq %%mm0, %%mm2 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpckhbw %%mm7, %%mm2 \n\t"\
+ "movq %%mm1, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm1 \n\t"\
+ "punpckhbw %%mm7, %%mm3 \n\t"\
+ "psubw %%mm1, %%mm0 \n\t" /* L(a+e - (b+d))/2 */\
+ "psubw %%mm3, %%mm2 \n\t" /* H(a+e - (b+d))/2 */\
+ "psraw $3, %%mm0 \n\t" /* L(a+e - (b+d))/16 */\
+ "psraw $3, %%mm2 \n\t" /* H(a+e - (b+d))/16 */\
+ "psubw %%mm0, %%mm1 \n\t" /* L(9b + 9d - a - e)/16 */\
+ "psubw %%mm2, %%mm3 \n\t" /* H(9b + 9d - a - e)/16 */\
+ "packuswb %%mm3, %%mm1 \n\t"\
+ "movq %%mm1, " #c " \n\t"
+#define DEINT_CUBIC(a,b,c,d,e) REAL_DEINT_CUBIC(a,b,c,d,e)
+
+DEINT_CUBIC((%0), (%%REGa, %1), (%%REGa, %1, 2), (%0, %1, 4), (%%REGd, %1))
+DEINT_CUBIC((%%REGa, %1), (%0, %1, 4), (%%REGd), (%%REGd, %1), (%0, %1, 8))
+DEINT_CUBIC((%0, %1, 4), (%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8), (%%REGc))
+DEINT_CUBIC((%%REGd, %1), (%0, %1, 8), (%%REGd, %1, 4), (%%REGc), (%%REGc, %1, 2))
+
+ : : "r" (src), "r" ((stride_t)stride)
+ : "%"REG_a, "%"REG_d, "%"REG_c
+ );
+#else
+ int x;
+ src+= stride*3;
+ for(x=0; x<8; x++)
+ {
+ src[stride*3] = CLIP((-src[0] + 9*src[stride*2] + 9*src[stride*4] - src[stride*6])>>4);
+ src[stride*5] = CLIP((-src[stride*2] + 9*src[stride*4] + 9*src[stride*6] - src[stride*8])>>4);
+ src[stride*7] = CLIP((-src[stride*4] + 9*src[stride*6] + 9*src[stride*8] - src[stride*10])>>4);
+ src[stride*9] = CLIP((-src[stride*6] + 9*src[stride*8] + 9*src[stride*10] - src[stride*12])>>4);
+ src++;
+ }
+#endif
+}
+
+/**
+ * Deinterlaces the given block by filtering every second line with a (-1 4 2 4 -1) filter.
+ * will be called for every 8x8 block and can read & write from line 4-15
+ * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
+ * lines 4-12 will be read into the deblocking filter and should be deinterlaced
+ * this filter will read lines 4-13 and write 5-11
+ */
+static inline void RENAME(deInterlaceFF)(uint8_t src[], stride_t stride, uint8_t *tmp)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ src+= stride*4;
+ asm volatile(
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq (%2), %%mm0 \n\t"
+// 0 1 2 3 4 5 6 7 8 9 10
+// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 ecx
+
+#define REAL_DEINT_FF(a,b,c,d)\
+ "movq " #a ", %%mm1 \n\t"\
+ "movq " #b ", %%mm2 \n\t"\
+ "movq " #c ", %%mm3 \n\t"\
+ "movq " #d ", %%mm4 \n\t"\
+ PAVGB(%%mm3, %%mm1) \
+ PAVGB(%%mm4, %%mm0) \
+ "movq %%mm0, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpckhbw %%mm7, %%mm3 \n\t"\
+ "movq %%mm1, %%mm4 \n\t"\
+ "punpcklbw %%mm7, %%mm1 \n\t"\
+ "punpckhbw %%mm7, %%mm4 \n\t"\
+ "psllw $2, %%mm1 \n\t"\
+ "psllw $2, %%mm4 \n\t"\
+ "psubw %%mm0, %%mm1 \n\t"\
+ "psubw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm5 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm1 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ "psraw $2, %%mm1 \n\t"\
+ "psraw $2, %%mm4 \n\t"\
+ "packuswb %%mm4, %%mm1 \n\t"\
+ "movq %%mm1, " #b " \n\t"\
+
+#define DEINT_FF(a,b,c,d) REAL_DEINT_FF(a,b,c,d)
+
+DEINT_FF((%0) , (%%REGa) , (%%REGa, %1), (%%REGa, %1, 2))
+DEINT_FF((%%REGa, %1), (%%REGa, %1, 2), (%0, %1, 4), (%%REGd) )
+DEINT_FF((%0, %1, 4), (%%REGd) , (%%REGd, %1), (%%REGd, %1, 2))
+DEINT_FF((%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8), (%%REGd, %1, 4))
+
+ "movq %%mm0, (%2) \n\t"
+ : : "r" (src), "r" ((stride_t)stride), "r"(tmp)
+ : "%"REG_a, "%"REG_d
+ );
+#else
+ int x;
+ src+= stride*4;
+ for(x=0; x<8; x++)
+ {
+ int t1= tmp[x];
+ int t2= src[stride*1];
+
+ src[stride*1]= CLIP((-t1 + 4*src[stride*0] + 2*t2 + 4*src[stride*2] - src[stride*3] + 4)>>3);
+ t1= src[stride*4];
+ src[stride*3]= CLIP((-t2 + 4*src[stride*2] + 2*t1 + 4*src[stride*4] - src[stride*5] + 4)>>3);
+ t2= src[stride*6];
+ src[stride*5]= CLIP((-t1 + 4*src[stride*4] + 2*t2 + 4*src[stride*6] - src[stride*7] + 4)>>3);
+ t1= src[stride*8];
+ src[stride*7]= CLIP((-t2 + 4*src[stride*6] + 2*t1 + 4*src[stride*8] - src[stride*9] + 4)>>3);
+ tmp[x]= t1;
+
+ src++;
+ }
+#endif
+}
+
+/**
+ * Deinterlaces the given block by filtering every line with a (-1 2 6 2 -1) filter.
+ * will be called for every 8x8 block and can read & write from line 4-15
+ * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
+ * lines 4-12 will be read into the deblocking filter and should be deinterlaced
+ * this filter will read lines 4-13 and write 4-11
+ */
+static inline void RENAME(deInterlaceL5)(uint8_t src[], stride_t stride, uint8_t *tmp, uint8_t *tmp2)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ src+= stride*4;
+ asm volatile(
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq (%2), %%mm0 \n\t"
+ "movq (%3), %%mm1 \n\t"
+// 0 1 2 3 4 5 6 7 8 9 10
+// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 ecx
+
+#define REAL_DEINT_L5(t1,t2,a,b,c)\
+ "movq " #a ", %%mm2 \n\t"\
+ "movq " #b ", %%mm3 \n\t"\
+ "movq " #c ", %%mm4 \n\t"\
+ PAVGB(t2, %%mm3) \
+ PAVGB(t1, %%mm4) \
+ "movq %%mm2, %%mm5 \n\t"\
+ "movq %%mm2, " #t1 " \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm5 \n\t"\
+ "movq %%mm2, %%mm6 \n\t"\
+ "paddw %%mm2, %%mm2 \n\t"\
+ "paddw %%mm6, %%mm2 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "paddw %%mm5, %%mm5 \n\t"\
+ "paddw %%mm6, %%mm5 \n\t"\
+ "movq %%mm3, %%mm6 \n\t"\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpckhbw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm3, %%mm3 \n\t"\
+ "paddw %%mm6, %%mm6 \n\t"\
+ "paddw %%mm3, %%mm2 \n\t"\
+ "paddw %%mm6, %%mm5 \n\t"\
+ "movq %%mm4, %%mm6 \n\t"\
+ "punpcklbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm7, %%mm6 \n\t"\
+ "psubw %%mm4, %%mm2 \n\t"\
+ "psubw %%mm6, %%mm5 \n\t"\
+ "psraw $2, %%mm2 \n\t"\
+ "psraw $2, %%mm5 \n\t"\
+ "packuswb %%mm5, %%mm2 \n\t"\
+ "movq %%mm2, " #a " \n\t"\
+
+#define DEINT_L5(t1,t2,a,b,c) REAL_DEINT_L5(t1,t2,a,b,c)
+
+DEINT_L5(%%mm0, %%mm1, (%0) , (%%REGa) , (%%REGa, %1) )
+DEINT_L5(%%mm1, %%mm0, (%%REGa) , (%%REGa, %1) , (%%REGa, %1, 2))
+DEINT_L5(%%mm0, %%mm1, (%%REGa, %1) , (%%REGa, %1, 2), (%0, %1, 4) )
+DEINT_L5(%%mm1, %%mm0, (%%REGa, %1, 2), (%0, %1, 4) , (%%REGd) )
+DEINT_L5(%%mm0, %%mm1, (%0, %1, 4) , (%%REGd) , (%%REGd, %1) )
+DEINT_L5(%%mm1, %%mm0, (%%REGd) , (%%REGd, %1) , (%%REGd, %1, 2))
+DEINT_L5(%%mm0, %%mm1, (%%REGd, %1) , (%%REGd, %1, 2), (%0, %1, 8) )
+DEINT_L5(%%mm1, %%mm0, (%%REGd, %1, 2), (%0, %1, 8) , (%%REGd, %1, 4))
+
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%3) \n\t"
+ : : "r" (src), "r" ((stride_t)stride), "r"(tmp), "r"(tmp2)
+ : "%"REG_a, "%"REG_d
+ );
+#else
+ int x;
+ src+= stride*4;
+ for(x=0; x<8; x++)
+ {
+ int t1= tmp[x];
+ int t2= tmp2[x];
+ int t3= src[0];
+
+ src[stride*0]= CLIP((-(t1 + src[stride*2]) + 2*(t2 + src[stride*1]) + 6*t3 + 4)>>3);
+ t1= src[stride*1];
+ src[stride*1]= CLIP((-(t2 + src[stride*3]) + 2*(t3 + src[stride*2]) + 6*t1 + 4)>>3);
+ t2= src[stride*2];
+ src[stride*2]= CLIP((-(t3 + src[stride*4]) + 2*(t1 + src[stride*3]) + 6*t2 + 4)>>3);
+ t3= src[stride*3];
+ src[stride*3]= CLIP((-(t1 + src[stride*5]) + 2*(t2 + src[stride*4]) + 6*t3 + 4)>>3);
+ t1= src[stride*4];
+ src[stride*4]= CLIP((-(t2 + src[stride*6]) + 2*(t3 + src[stride*5]) + 6*t1 + 4)>>3);
+ t2= src[stride*5];
+ src[stride*5]= CLIP((-(t3 + src[stride*7]) + 2*(t1 + src[stride*6]) + 6*t2 + 4)>>3);
+ t3= src[stride*6];
+ src[stride*6]= CLIP((-(t1 + src[stride*8]) + 2*(t2 + src[stride*7]) + 6*t3 + 4)>>3);
+ t1= src[stride*7];
+ src[stride*7]= CLIP((-(t2 + src[stride*9]) + 2*(t3 + src[stride*8]) + 6*t1 + 4)>>3);
+
+ tmp[x]= t3;
+ tmp2[x]= t1;
+
+ src++;
+ }
+#endif
+}
+
+/**
+ * Deinterlaces the given block by filtering all lines with a (1 2 1) filter.
+ * will be called for every 8x8 block and can read & write from line 4-15
+ * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
+ * lines 4-12 will be read into the deblocking filter and should be deinterlaced
+ * this filter will read lines 4-13 and write 4-11
+ */
+static inline void RENAME(deInterlaceBlendLinear)(uint8_t src[], stride_t stride, uint8_t *tmp)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ src+= 4*stride;
+ asm volatile(
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
+// 0 1 2 3 4 5 6 7 8 9
+// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
+
+ "movq (%2), %%mm0 \n\t" // L0
+ "movq (%%"REG_a"), %%mm1 \n\t" // L2
+ PAVGB(%%mm1, %%mm0) // L0+L2
+ "movq (%0), %%mm2 \n\t" // L1
+ PAVGB(%%mm2, %%mm0)
+ "movq %%mm0, (%0) \n\t"
+ "movq (%%"REG_a", %1), %%mm0 \n\t" // L3
+ PAVGB(%%mm0, %%mm2) // L1+L3
+ PAVGB(%%mm1, %%mm2) // 2L2 + L1 + L3
+ "movq %%mm2, (%%"REG_a") \n\t"
+ "movq (%%"REG_a", %1, 2), %%mm2 \n\t" // L4
+ PAVGB(%%mm2, %%mm1) // L2+L4
+ PAVGB(%%mm0, %%mm1) // 2L3 + L2 + L4
+ "movq %%mm1, (%%"REG_a", %1) \n\t"
+ "movq (%0, %1, 4), %%mm1 \n\t" // L5
+ PAVGB(%%mm1, %%mm0) // L3+L5
+ PAVGB(%%mm2, %%mm0) // 2L4 + L3 + L5
+ "movq %%mm0, (%%"REG_a", %1, 2) \n\t"
+ "movq (%%"REG_d"), %%mm0 \n\t" // L6
+ PAVGB(%%mm0, %%mm2) // L4+L6
+ PAVGB(%%mm1, %%mm2) // 2L5 + L4 + L6
+ "movq %%mm2, (%0, %1, 4) \n\t"
+ "movq (%%"REG_d", %1), %%mm2 \n\t" // L7
+ PAVGB(%%mm2, %%mm1) // L5+L7
+ PAVGB(%%mm0, %%mm1) // 2L6 + L5 + L7
+ "movq %%mm1, (%%"REG_d") \n\t"
+ "movq (%%"REG_d", %1, 2), %%mm1 \n\t" // L8
+ PAVGB(%%mm1, %%mm0) // L6+L8
+ PAVGB(%%mm2, %%mm0) // 2L7 + L6 + L8
+ "movq %%mm0, (%%"REG_d", %1) \n\t"
+ "movq (%0, %1, 8), %%mm0 \n\t" // L9
+ PAVGB(%%mm0, %%mm2) // L7+L9
+ PAVGB(%%mm1, %%mm2) // 2L8 + L7 + L9
+ "movq %%mm2, (%%"REG_d", %1, 2) \n\t"
+ "movq %%mm1, (%2) \n\t"
+
+ : : "r" (src), "r" ((stride_t)stride), "r" (tmp)
+ : "%"REG_a, "%"REG_d
+ );
+#else
+ int a, b, c, x;
+ src+= 4*stride;
+
+ for(x=0; x<2; x++){
+ a= *(uint32_t*)&tmp[stride*0];
+ b= *(uint32_t*)&src[stride*0];
+ c= *(uint32_t*)&src[stride*1];
+ a= (a&c) + (((a^c)&0xFEFEFEFEUL)>>1);
+ *(uint32_t*)&src[stride*0]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
+
+ a= *(uint32_t*)&src[stride*2];
+ b= (a&b) + (((a^b)&0xFEFEFEFEUL)>>1);
+ *(uint32_t*)&src[stride*1]= (c|b) - (((c^b)&0xFEFEFEFEUL)>>1);
+
+ b= *(uint32_t*)&src[stride*3];
+ c= (b&c) + (((b^c)&0xFEFEFEFEUL)>>1);
+ *(uint32_t*)&src[stride*2]= (c|a) - (((c^a)&0xFEFEFEFEUL)>>1);
+
+ c= *(uint32_t*)&src[stride*4];
+ a= (a&c) + (((a^c)&0xFEFEFEFEUL)>>1);
+ *(uint32_t*)&src[stride*3]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
+
+ a= *(uint32_t*)&src[stride*5];
+ b= (a&b) + (((a^b)&0xFEFEFEFEUL)>>1);
+ *(uint32_t*)&src[stride*4]= (c|b) - (((c^b)&0xFEFEFEFEUL)>>1);
+
+ b= *(uint32_t*)&src[stride*6];
+ c= (b&c) + (((b^c)&0xFEFEFEFEUL)>>1);
+ *(uint32_t*)&src[stride*5]= (c|a) - (((c^a)&0xFEFEFEFEUL)>>1);
+
+ c= *(uint32_t*)&src[stride*7];
+ a= (a&c) + (((a^c)&0xFEFEFEFEUL)>>1);
+ *(uint32_t*)&src[stride*6]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
+
+ a= *(uint32_t*)&src[stride*8];
+ b= (a&b) + (((a^b)&0xFEFEFEFEUL)>>1);
+ *(uint32_t*)&src[stride*7]= (c|b) - (((c^b)&0xFEFEFEFEUL)>>1);
+
+ *(uint32_t*)&tmp[stride*0]= c;
+ src += 4;
+ tmp += 4;
+ }
+#endif
+}
+
+/**
+ * Deinterlaces the given block by applying a median filter to every second line.
+ * will be called for every 8x8 block and can read & write from line 4-15,
+ * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
+ * lines 4-12 will be read into the deblocking filter and should be deinterlaced
+ */
+static inline void RENAME(deInterlaceMedian)(uint8_t src[], stride_t stride)
+{
+#if HAVE_MMX
+ src+= 4*stride;
+#if HAVE_MMX2
+ asm volatile(
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
+// 0 1 2 3 4 5 6 7 8 9
+// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
+
+ "movq (%0), %%mm0 \n\t" //
+ "movq (%%"REG_a", %1), %%mm2 \n\t" //
+ "movq (%%"REG_a"), %%mm1 \n\t" //
+ "movq %%mm0, %%mm3 \n\t"
+ "pmaxub %%mm1, %%mm0 \n\t" //
+ "pminub %%mm3, %%mm1 \n\t" //
+ "pmaxub %%mm2, %%mm1 \n\t" //
+ "pminub %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%%"REG_a") \n\t"
+
+ "movq (%0, %1, 4), %%mm0 \n\t" //
+ "movq (%%"REG_a", %1, 2), %%mm1 \n\t" //
+ "movq %%mm2, %%mm3 \n\t"
+ "pmaxub %%mm1, %%mm2 \n\t" //
+ "pminub %%mm3, %%mm1 \n\t" //
+ "pmaxub %%mm0, %%mm1 \n\t" //
+ "pminub %%mm1, %%mm2 \n\t"
+ "movq %%mm2, (%%"REG_a", %1, 2) \n\t"
+
+ "movq (%%"REG_d"), %%mm2 \n\t" //
+ "movq (%%"REG_d", %1), %%mm1 \n\t" //
+ "movq %%mm2, %%mm3 \n\t"
+ "pmaxub %%mm0, %%mm2 \n\t" //
+ "pminub %%mm3, %%mm0 \n\t" //
+ "pmaxub %%mm1, %%mm0 \n\t" //
+ "pminub %%mm0, %%mm2 \n\t"
+ "movq %%mm2, (%%"REG_d") \n\t"
+
+ "movq (%%"REG_d", %1, 2), %%mm2 \n\t" //
+ "movq (%0, %1, 8), %%mm0 \n\t" //
+ "movq %%mm2, %%mm3 \n\t"
+ "pmaxub %%mm0, %%mm2 \n\t" //
+ "pminub %%mm3, %%mm0 \n\t" //
+ "pmaxub %%mm1, %%mm0 \n\t" //
+ "pminub %%mm0, %%mm2 \n\t"
+ "movq %%mm2, (%%"REG_d", %1, 2) \n\t"
+
+
+ : : "r" (src), "r" ((stride_t)stride)
+ : "%"REG_a, "%"REG_d
+ );
+
+#else // MMX without MMX2
+ asm volatile(
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
+// 0 1 2 3 4 5 6 7 8 9
+// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
+ "pxor %%mm7, %%mm7 \n\t"
+
+#define REAL_MEDIAN(a,b,c)\
+ "movq " #a ", %%mm0 \n\t"\
+ "movq " #b ", %%mm2 \n\t"\
+ "movq " #c ", %%mm1 \n\t"\
+ "movq %%mm0, %%mm3 \n\t"\
+ "movq %%mm1, %%mm4 \n\t"\
+ "movq %%mm2, %%mm5 \n\t"\
+ "psubusb %%mm1, %%mm3 \n\t"\
+ "psubusb %%mm2, %%mm4 \n\t"\
+ "psubusb %%mm0, %%mm5 \n\t"\
+ "pcmpeqb %%mm7, %%mm3 \n\t"\
+ "pcmpeqb %%mm7, %%mm4 \n\t"\
+ "pcmpeqb %%mm7, %%mm5 \n\t"\
+ "movq %%mm3, %%mm6 \n\t"\
+ "pxor %%mm4, %%mm3 \n\t"\
+ "pxor %%mm5, %%mm4 \n\t"\
+ "pxor %%mm6, %%mm5 \n\t"\
+ "por %%mm3, %%mm1 \n\t"\
+ "por %%mm4, %%mm2 \n\t"\
+ "por %%mm5, %%mm0 \n\t"\
+ "pand %%mm2, %%mm0 \n\t"\
+ "pand %%mm1, %%mm0 \n\t"\
+ "movq %%mm0, " #b " \n\t"
+#define MEDIAN(a,b,c) REAL_MEDIAN(a,b,c)
+
+MEDIAN((%0), (%%REGa), (%%REGa, %1))
+MEDIAN((%%REGa, %1), (%%REGa, %1, 2), (%0, %1, 4))
+MEDIAN((%0, %1, 4), (%%REGd), (%%REGd, %1))
+MEDIAN((%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8))
+
+ : : "r" (src), "r" ((stride_t)stride)
+ : "%"REG_a, "%"REG_d
+ );
+#endif // MMX
+#else
+ int x, y;
+ src+= 4*stride;
+ // FIXME - there should be a way to do a few columns in parallel like w/mmx
+ for(x=0; x<8; x++)
+ {
+ uint8_t *colsrc = src;
+ for (y=0; y<4; y++)
+ {
+ int a, b, c, d, e, f;
+ a = colsrc[0 ];
+ b = colsrc[stride ];
+ c = colsrc[stride*2];
+ d = (a-b)>>31;
+ e = (b-c)>>31;
+ f = (c-a)>>31;
+ colsrc[stride ] = (a|(d^f)) & (b|(d^e)) & (c|(e^f));
+ colsrc += stride*2;
+ }
+ src++;
+ }
+#endif
+}
+
+#if HAVE_MMX
+/**
+ * transposes and shift the given 8x8 Block into dst1 and dst2
+ */
+static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int srcStride)
+{
+ asm(
+ "lea (%0, %1), %%"REG_a" \n\t"
+// 0 1 2 3 4 5 6 7 8 9
+// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
+ "movq (%0), %%mm0 \n\t" // 12345678
+ "movq (%%"REG_a"), %%mm1 \n\t" // abcdefgh
+ "movq %%mm0, %%mm2 \n\t" // 12345678
+ "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
+ "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
+
+ "movq (%%"REG_a", %1), %%mm1 \n\t"
+ "movq (%%"REG_a", %1, 2), %%mm3 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "punpcklbw %%mm3, %%mm1 \n\t"
+ "punpckhbw %%mm3, %%mm4 \n\t"
+
+ "movq %%mm0, %%mm3 \n\t"
+ "punpcklwd %%mm1, %%mm0 \n\t"
+ "punpckhwd %%mm1, %%mm3 \n\t"
+ "movq %%mm2, %%mm1 \n\t"
+ "punpcklwd %%mm4, %%mm2 \n\t"
+ "punpckhwd %%mm4, %%mm1 \n\t"
+
+ "movd %%mm0, 128(%2) \n\t"
+ "psrlq $32, %%mm0 \n\t"
+ "movd %%mm0, 144(%2) \n\t"
+ "movd %%mm3, 160(%2) \n\t"
+ "psrlq $32, %%mm3 \n\t"
+ "movd %%mm3, 176(%2) \n\t"
+ "movd %%mm3, 48(%3) \n\t"
+ "movd %%mm2, 192(%2) \n\t"
+ "movd %%mm2, 64(%3) \n\t"
+ "psrlq $32, %%mm2 \n\t"
+ "movd %%mm2, 80(%3) \n\t"
+ "movd %%mm1, 96(%3) \n\t"
+ "psrlq $32, %%mm1 \n\t"
+ "movd %%mm1, 112(%3) \n\t"
+
+ "lea (%%"REG_a", %1, 4), %%"REG_a" \n\t"
+
+ "movq (%0, %1, 4), %%mm0 \n\t" // 12345678
+ "movq (%%"REG_a"), %%mm1 \n\t" // abcdefgh
+ "movq %%mm0, %%mm2 \n\t" // 12345678
+ "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
+ "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
+
+ "movq (%%"REG_a", %1), %%mm1 \n\t"
+ "movq (%%"REG_a", %1, 2), %%mm3 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "punpcklbw %%mm3, %%mm1 \n\t"
+ "punpckhbw %%mm3, %%mm4 \n\t"
+
+ "movq %%mm0, %%mm3 \n\t"
+ "punpcklwd %%mm1, %%mm0 \n\t"
+ "punpckhwd %%mm1, %%mm3 \n\t"
+ "movq %%mm2, %%mm1 \n\t"
+ "punpcklwd %%mm4, %%mm2 \n\t"
+ "punpckhwd %%mm4, %%mm1 \n\t"
+
+ "movd %%mm0, 132(%2) \n\t"
+ "psrlq $32, %%mm0 \n\t"
+ "movd %%mm0, 148(%2) \n\t"
+ "movd %%mm3, 164(%2) \n\t"
+ "psrlq $32, %%mm3 \n\t"
+ "movd %%mm3, 180(%2) \n\t"
+ "movd %%mm3, 52(%3) \n\t"
+ "movd %%mm2, 196(%2) \n\t"
+ "movd %%mm2, 68(%3) \n\t"
+ "psrlq $32, %%mm2 \n\t"
+ "movd %%mm2, 84(%3) \n\t"
+ "movd %%mm1, 100(%3) \n\t"
+ "psrlq $32, %%mm1 \n\t"
+ "movd %%mm1, 116(%3) \n\t"
+
+
+ :: "r" (src), "r" ((stride_t)srcStride), "r" (dst1), "r" (dst2)
+ : "%"REG_a
+ );
+}
+
+/**
+ * transposes the given 8x8 block
+ */
+static inline void RENAME(transpose2)(uint8_t *dst, int dstStride, uint8_t *src)
+{
+ asm(
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a",%1,4), %%"REG_d"\n\t"
+// 0 1 2 3 4 5 6 7 8 9
+// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
+ "movq (%2), %%mm0 \n\t" // 12345678
+ "movq 16(%2), %%mm1 \n\t" // abcdefgh
+ "movq %%mm0, %%mm2 \n\t" // 12345678
+ "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
+ "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
+
+ "movq 32(%2), %%mm1 \n\t"
+ "movq 48(%2), %%mm3 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "punpcklbw %%mm3, %%mm1 \n\t"
+ "punpckhbw %%mm3, %%mm4 \n\t"
+
+ "movq %%mm0, %%mm3 \n\t"
+ "punpcklwd %%mm1, %%mm0 \n\t"
+ "punpckhwd %%mm1, %%mm3 \n\t"
+ "movq %%mm2, %%mm1 \n\t"
+ "punpcklwd %%mm4, %%mm2 \n\t"
+ "punpckhwd %%mm4, %%mm1 \n\t"
+
+ "movd %%mm0, (%0) \n\t"
+ "psrlq $32, %%mm0 \n\t"
+ "movd %%mm0, (%%"REG_a") \n\t"
+ "movd %%mm3, (%%"REG_a", %1) \n\t"
+ "psrlq $32, %%mm3 \n\t"
+ "movd %%mm3, (%%"REG_a", %1, 2) \n\t"
+ "movd %%mm2, (%0, %1, 4) \n\t"
+ "psrlq $32, %%mm2 \n\t"
+ "movd %%mm2, (%%"REG_d") \n\t"
+ "movd %%mm1, (%%"REG_d", %1) \n\t"
+ "psrlq $32, %%mm1 \n\t"
+ "movd %%mm1, (%%"REG_d", %1, 2) \n\t"
+
+
+ "movq 64(%2), %%mm0 \n\t" // 12345678
+ "movq 80(%2), %%mm1 \n\t" // abcdefgh
+ "movq %%mm0, %%mm2 \n\t" // 12345678
+ "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
+ "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
+
+ "movq 96(%2), %%mm1 \n\t"
+ "movq 112(%2), %%mm3 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "punpcklbw %%mm3, %%mm1 \n\t"
+ "punpckhbw %%mm3, %%mm4 \n\t"
+
+ "movq %%mm0, %%mm3 \n\t"
+ "punpcklwd %%mm1, %%mm0 \n\t"
+ "punpckhwd %%mm1, %%mm3 \n\t"
+ "movq %%mm2, %%mm1 \n\t"
+ "punpcklwd %%mm4, %%mm2 \n\t"
+ "punpckhwd %%mm4, %%mm1 \n\t"
+
+ "movd %%mm0, 4(%0) \n\t"
+ "psrlq $32, %%mm0 \n\t"
+ "movd %%mm0, 4(%%"REG_a") \n\t"
+ "movd %%mm3, 4(%%"REG_a", %1) \n\t"
+ "psrlq $32, %%mm3 \n\t"
+ "movd %%mm3, 4(%%"REG_a", %1, 2) \n\t"
+ "movd %%mm2, 4(%0, %1, 4) \n\t"
+ "psrlq $32, %%mm2 \n\t"
+ "movd %%mm2, 4(%%"REG_d") \n\t"
+ "movd %%mm1, 4(%%"REG_d", %1) \n\t"
+ "psrlq $32, %%mm1 \n\t"
+ "movd %%mm1, 4(%%"REG_d", %1, 2) \n\t"
+
+ :: "r" (dst), "r" ((stride_t)dstStride), "r" (src)
+ : "%"REG_a, "%"REG_d
+ );
+}
+#endif
+//static long test=0;
+
+#ifndef HAVE_ALTIVEC
+static inline void RENAME(tempNoiseReducer)(uint8_t *src, stride_t stride,
+ uint8_t *tempBlured, uint32_t *tempBluredPast, int *maxNoise)
+{
+ // to save a register (FIXME do this outside of the loops)
+ tempBluredPast[127]= maxNoise[0];
+ tempBluredPast[128]= maxNoise[1];
+ tempBluredPast[129]= maxNoise[2];
+
+#define FAST_L2_DIFF
+//#define L1_DIFF //u should change the thresholds too if u try that one
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ asm volatile(
+ "lea (%2, %2, 2), %%"REG_a" \n\t" // 3*stride
+ "lea (%2, %2, 4), %%"REG_d" \n\t" // 5*stride
+ "lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
+// 0 1 2 3 4 5 6 7 8 9
+// %x %x+%2 %x+2%2 %x+eax %x+4%2 %x+edx %x+2eax %x+ecx %x+8%2
+//FIXME reorder?
+#ifdef L1_DIFF //needs mmx2
+ "movq (%0), %%mm0 \n\t" // L0
+ "psadbw (%1), %%mm0 \n\t" // |L0-R0|
+ "movq (%0, %2), %%mm1 \n\t" // L1
+ "psadbw (%1, %2), %%mm1 \n\t" // |L1-R1|
+ "movq (%0, %2, 2), %%mm2 \n\t" // L2
+ "psadbw (%1, %2, 2), %%mm2 \n\t" // |L2-R2|
+ "movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
+ "psadbw (%1, %%"REG_a"), %%mm3 \n\t" // |L3-R3|
+
+ "movq (%0, %2, 4), %%mm4 \n\t" // L4
+ "paddw %%mm1, %%mm0 \n\t"
+ "psadbw (%1, %2, 4), %%mm4 \n\t" // |L4-R4|
+ "movq (%0, %%"REG_d"), %%mm5 \n\t" // L5
+ "paddw %%mm2, %%mm0 \n\t"
+ "psadbw (%1, %%"REG_d"), %%mm5 \n\t" // |L5-R5|
+ "movq (%0, %%"REG_a", 2), %%mm6 \n\t" // L6
+ "paddw %%mm3, %%mm0 \n\t"
+ "psadbw (%1, %%"REG_a", 2), %%mm6 \n\t" // |L6-R6|
+ "movq (%0, %%"REG_c"), %%mm7 \n\t" // L7
+ "paddw %%mm4, %%mm0 \n\t"
+ "psadbw (%1, %%"REG_c"), %%mm7 \n\t" // |L7-R7|
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm7, %%mm6 \n\t"
+ "paddw %%mm6, %%mm0 \n\t"
+#else
+#if defined (FAST_L2_DIFF)
+ "pcmpeqb %%mm7, %%mm7 \n\t"
+ "movq "MANGLE(b80)", %%mm6 \n\t"
+ "pxor %%mm0, %%mm0 \n\t"
+#define REAL_L2_DIFF_CORE(a, b)\
+ "movq " #a ", %%mm5 \n\t"\
+ "movq " #b ", %%mm2 \n\t"\
+ "pxor %%mm7, %%mm2 \n\t"\
+ PAVGB(%%mm2, %%mm5)\
+ "paddb %%mm6, %%mm5 \n\t"\
+ "movq %%mm5, %%mm2 \n\t"\
+ "psllw $8, %%mm5 \n\t"\
+ "pmaddwd %%mm5, %%mm5 \n\t"\
+ "pmaddwd %%mm2, %%mm2 \n\t"\
+ "paddd %%mm2, %%mm5 \n\t"\
+ "psrld $14, %%mm5 \n\t"\
+ "paddd %%mm5, %%mm0 \n\t"
+
+#else
+ "pxor %%mm7, %%mm7 \n\t"
+ "pxor %%mm0, %%mm0 \n\t"
+#define REAL_L2_DIFF_CORE(a, b)\
+ "movq " #a ", %%mm5 \n\t"\
+ "movq " #b ", %%mm2 \n\t"\
+ "movq %%mm5, %%mm1 \n\t"\
+ "movq %%mm2, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm5 \n\t"\
+ "punpckhbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm3 \n\t"\
+ "psubw %%mm2, %%mm5 \n\t"\
+ "psubw %%mm3, %%mm1 \n\t"\
+ "pmaddwd %%mm5, %%mm5 \n\t"\
+ "pmaddwd %%mm1, %%mm1 \n\t"\
+ "paddd %%mm1, %%mm5 \n\t"\
+ "paddd %%mm5, %%mm0 \n\t"
+
+#endif
+
+#define L2_DIFF_CORE(a, b) REAL_L2_DIFF_CORE(a, b)
+
+L2_DIFF_CORE((%0), (%1))
+L2_DIFF_CORE((%0, %2), (%1, %2))
+L2_DIFF_CORE((%0, %2, 2), (%1, %2, 2))
+L2_DIFF_CORE((%0, %%REGa), (%1, %%REGa))
+L2_DIFF_CORE((%0, %2, 4), (%1, %2, 4))
+L2_DIFF_CORE((%0, %%REGd), (%1, %%REGd))
+L2_DIFF_CORE((%0, %%REGa,2), (%1, %%REGa,2))
+L2_DIFF_CORE((%0, %%REGc), (%1, %%REGc))
+
+#endif
+
+ "movq %%mm0, %%mm4 \n\t"
+ "psrlq $32, %%mm0 \n\t"
+ "paddd %%mm0, %%mm4 \n\t"
+ "movd %%mm4, %%ecx \n\t"
+ "shll $2, %%ecx \n\t"
+ "mov %3, %%"REG_d" \n\t"
+ "addl -4(%%"REG_d"), %%ecx \n\t"
+ "addl 4(%%"REG_d"), %%ecx \n\t"
+ "addl -1024(%%"REG_d"), %%ecx \n\t"
+ "addl $4, %%ecx \n\t"
+ "addl 1024(%%"REG_d"), %%ecx \n\t"
+ "shrl $3, %%ecx \n\t"
+ "movl %%ecx, (%%"REG_d") \n\t"
+
+// "mov %3, %%"REG_c" \n\t"
+// "mov %%"REG_c", test \n\t"
+// "jmp 4f \n\t"
+ "cmpl 512(%%"REG_d"), %%ecx \n\t"
+ " jb 2f \n\t"
+ "cmpl 516(%%"REG_d"), %%ecx \n\t"
+ " jb 1f \n\t"
+
+ "lea (%%"REG_a", %2, 2), %%"REG_d" \n\t" // 5*stride
+ "lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
+ "movq (%0), %%mm0 \n\t" // L0
+ "movq (%0, %2), %%mm1 \n\t" // L1
+ "movq (%0, %2, 2), %%mm2 \n\t" // L2
+ "movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
+ "movq (%0, %2, 4), %%mm4 \n\t" // L4
+ "movq (%0, %%"REG_d"), %%mm5 \n\t" // L5
+ "movq (%0, %%"REG_a", 2), %%mm6 \n\t" // L6
+ "movq (%0, %%"REG_c"), %%mm7 \n\t" // L7
+ "movq %%mm0, (%1) \n\t" // L0
+ "movq %%mm1, (%1, %2) \n\t" // L1
+ "movq %%mm2, (%1, %2, 2) \n\t" // L2
+ "movq %%mm3, (%1, %%"REG_a") \n\t" // L3
+ "movq %%mm4, (%1, %2, 4) \n\t" // L4
+ "movq %%mm5, (%1, %%"REG_d") \n\t" // L5
+ "movq %%mm6, (%1, %%"REG_a", 2) \n\t" // L6
+ "movq %%mm7, (%1, %%"REG_c") \n\t" // L7
+ "jmp 4f \n\t"
+
+ "1: \n\t"
+ "lea (%%"REG_a", %2, 2), %%"REG_d" \n\t" // 5*stride
+ "lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
+ "movq (%0), %%mm0 \n\t" // L0
+ PAVGB((%1), %%mm0) // L0
+ "movq (%0, %2), %%mm1 \n\t" // L1
+ PAVGB((%1, %2), %%mm1) // L1
+ "movq (%0, %2, 2), %%mm2 \n\t" // L2
+ PAVGB((%1, %2, 2), %%mm2) // L2
+ "movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
+ PAVGB((%1, %%REGa), %%mm3) // L3
+ "movq (%0, %2, 4), %%mm4 \n\t" // L4
+ PAVGB((%1, %2, 4), %%mm4) // L4
+ "movq (%0, %%"REG_d"), %%mm5 \n\t" // L5
+ PAVGB((%1, %%REGd), %%mm5) // L5
+ "movq (%0, %%"REG_a", 2), %%mm6 \n\t" // L6
+ PAVGB((%1, %%REGa, 2), %%mm6) // L6
+ "movq (%0, %%"REG_c"), %%mm7 \n\t" // L7
+ PAVGB((%1, %%REGc), %%mm7) // L7
+ "movq %%mm0, (%1) \n\t" // R0
+ "movq %%mm1, (%1, %2) \n\t" // R1
+ "movq %%mm2, (%1, %2, 2) \n\t" // R2
+ "movq %%mm3, (%1, %%"REG_a") \n\t" // R3
+ "movq %%mm4, (%1, %2, 4) \n\t" // R4
+ "movq %%mm5, (%1, %%"REG_d") \n\t" // R5
+ "movq %%mm6, (%1, %%"REG_a", 2) \n\t" // R6
+ "movq %%mm7, (%1, %%"REG_c") \n\t" // R7
+ "movq %%mm0, (%0) \n\t" // L0
+ "movq %%mm1, (%0, %2) \n\t" // L1
+ "movq %%mm2, (%0, %2, 2) \n\t" // L2
+ "movq %%mm3, (%0, %%"REG_a") \n\t" // L3
+ "movq %%mm4, (%0, %2, 4) \n\t" // L4
+ "movq %%mm5, (%0, %%"REG_d") \n\t" // L5
+ "movq %%mm6, (%0, %%"REG_a", 2) \n\t" // L6
+ "movq %%mm7, (%0, %%"REG_c") \n\t" // L7
+ "jmp 4f \n\t"
+
+ "2: \n\t"
+ "cmpl 508(%%"REG_d"), %%ecx \n\t"
+ " jb 3f \n\t"
+
+ "lea (%%"REG_a", %2, 2), %%"REG_d" \n\t" // 5*stride
+ "lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
+ "movq (%0), %%mm0 \n\t" // L0
+ "movq (%0, %2), %%mm1 \n\t" // L1
+ "movq (%0, %2, 2), %%mm2 \n\t" // L2
+ "movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
+ "movq (%1), %%mm4 \n\t" // R0
+ "movq (%1, %2), %%mm5 \n\t" // R1
+ "movq (%1, %2, 2), %%mm6 \n\t" // R2
+ "movq (%1, %%"REG_a"), %%mm7 \n\t" // R3
+ PAVGB(%%mm4, %%mm0)
+ PAVGB(%%mm5, %%mm1)
+ PAVGB(%%mm6, %%mm2)
+ PAVGB(%%mm7, %%mm3)
+ PAVGB(%%mm4, %%mm0)
+ PAVGB(%%mm5, %%mm1)
+ PAVGB(%%mm6, %%mm2)
+ PAVGB(%%mm7, %%mm3)
+ "movq %%mm0, (%1) \n\t" // R0
+ "movq %%mm1, (%1, %2) \n\t" // R1
+ "movq %%mm2, (%1, %2, 2) \n\t" // R2
+ "movq %%mm3, (%1, %%"REG_a") \n\t" // R3
+ "movq %%mm0, (%0) \n\t" // L0
+ "movq %%mm1, (%0, %2) \n\t" // L1
+ "movq %%mm2, (%0, %2, 2) \n\t" // L2
+ "movq %%mm3, (%0, %%"REG_a") \n\t" // L3
+
+ "movq (%0, %2, 4), %%mm0 \n\t" // L4
+ "movq (%0, %%"REG_d"), %%mm1 \n\t" // L5
+ "movq (%0, %%"REG_a", 2), %%mm2 \n\t" // L6
+ "movq (%0, %%"REG_c"), %%mm3 \n\t" // L7
+ "movq (%1, %2, 4), %%mm4 \n\t" // R4
+ "movq (%1, %%"REG_d"), %%mm5 \n\t" // R5
+ "movq (%1, %%"REG_a", 2), %%mm6 \n\t" // R6
+ "movq (%1, %%"REG_c"), %%mm7 \n\t" // R7
+ PAVGB(%%mm4, %%mm0)
+ PAVGB(%%mm5, %%mm1)
+ PAVGB(%%mm6, %%mm2)
+ PAVGB(%%mm7, %%mm3)
+ PAVGB(%%mm4, %%mm0)
+ PAVGB(%%mm5, %%mm1)
+ PAVGB(%%mm6, %%mm2)
+ PAVGB(%%mm7, %%mm3)
+ "movq %%mm0, (%1, %2, 4) \n\t" // R4
+ "movq %%mm1, (%1, %%"REG_d") \n\t" // R5
+ "movq %%mm2, (%1, %%"REG_a", 2) \n\t" // R6
+ "movq %%mm3, (%1, %%"REG_c") \n\t" // R7
+ "movq %%mm0, (%0, %2, 4) \n\t" // L4
+ "movq %%mm1, (%0, %%"REG_d") \n\t" // L5
+ "movq %%mm2, (%0, %%"REG_a", 2) \n\t" // L6
+ "movq %%mm3, (%0, %%"REG_c") \n\t" // L7
+ "jmp 4f \n\t"
+
+ "3: \n\t"
+ "lea (%%"REG_a", %2, 2), %%"REG_d" \n\t" // 5*stride
+ "lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
+ "movq (%0), %%mm0 \n\t" // L0
+ "movq (%0, %2), %%mm1 \n\t" // L1
+ "movq (%0, %2, 2), %%mm2 \n\t" // L2
+ "movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
+ "movq (%1), %%mm4 \n\t" // R0
+ "movq (%1, %2), %%mm5 \n\t" // R1
+ "movq (%1, %2, 2), %%mm6 \n\t" // R2
+ "movq (%1, %%"REG_a"), %%mm7 \n\t" // R3
+ PAVGB(%%mm4, %%mm0)
+ PAVGB(%%mm5, %%mm1)
+ PAVGB(%%mm6, %%mm2)
+ PAVGB(%%mm7, %%mm3)
+ PAVGB(%%mm4, %%mm0)
+ PAVGB(%%mm5, %%mm1)
+ PAVGB(%%mm6, %%mm2)
+ PAVGB(%%mm7, %%mm3)
+ PAVGB(%%mm4, %%mm0)
+ PAVGB(%%mm5, %%mm1)
+ PAVGB(%%mm6, %%mm2)
+ PAVGB(%%mm7, %%mm3)
+ "movq %%mm0, (%1) \n\t" // R0
+ "movq %%mm1, (%1, %2) \n\t" // R1
+ "movq %%mm2, (%1, %2, 2) \n\t" // R2
+ "movq %%mm3, (%1, %%"REG_a") \n\t" // R3
+ "movq %%mm0, (%0) \n\t" // L0
+ "movq %%mm1, (%0, %2) \n\t" // L1
+ "movq %%mm2, (%0, %2, 2) \n\t" // L2
+ "movq %%mm3, (%0, %%"REG_a") \n\t" // L3
+
+ "movq (%0, %2, 4), %%mm0 \n\t" // L4
+ "movq (%0, %%"REG_d"), %%mm1 \n\t" // L5
+ "movq (%0, %%"REG_a", 2), %%mm2 \n\t" // L6
+ "movq (%0, %%"REG_c"), %%mm3 \n\t" // L7
+ "movq (%1, %2, 4), %%mm4 \n\t" // R4
+ "movq (%1, %%"REG_d"), %%mm5 \n\t" // R5
+ "movq (%1, %%"REG_a", 2), %%mm6 \n\t" // R6
+ "movq (%1, %%"REG_c"), %%mm7 \n\t" // R7
+ PAVGB(%%mm4, %%mm0)
+ PAVGB(%%mm5, %%mm1)
+ PAVGB(%%mm6, %%mm2)
+ PAVGB(%%mm7, %%mm3)
+ PAVGB(%%mm4, %%mm0)
+ PAVGB(%%mm5, %%mm1)
+ PAVGB(%%mm6, %%mm2)
+ PAVGB(%%mm7, %%mm3)
+ PAVGB(%%mm4, %%mm0)
+ PAVGB(%%mm5, %%mm1)
+ PAVGB(%%mm6, %%mm2)
+ PAVGB(%%mm7, %%mm3)
+ "movq %%mm0, (%1, %2, 4) \n\t" // R4
+ "movq %%mm1, (%1, %%"REG_d") \n\t" // R5
+ "movq %%mm2, (%1, %%"REG_a", 2) \n\t" // R6
+ "movq %%mm3, (%1, %%"REG_c") \n\t" // R7
+ "movq %%mm0, (%0, %2, 4) \n\t" // L4
+ "movq %%mm1, (%0, %%"REG_d") \n\t" // L5
+ "movq %%mm2, (%0, %%"REG_a", 2) \n\t" // L6
+ "movq %%mm3, (%0, %%"REG_c") \n\t" // L7
+
+ "4: \n\t"
+
+ :: "r" (src), "r" (tempBlured), "r"((stride_t)stride), "m" (tempBluredPast)
+ : "%"REG_a, "%"REG_d, "%"REG_c, "memory"
+ );
+//printf("%d\n", test);
+#else
+{
+ int y;
+ int d=0;
+// int sysd=0;
+ int i;
+
+ for(y=0; y<8; y++)
+ {
+ int x;
+ for(x=0; x<8; x++)
+ {
+ int ref= tempBlured[ x + y*stride ];
+ int cur= src[ x + y*stride ];
+ int d1=ref - cur;
+// if(x==0 || x==7) d1+= d1>>1;
+// if(y==0 || y==7) d1+= d1>>1;
+// d+= ABS(d1);
+ d+= d1*d1;
+// sysd+= d1;
+ }
+ }
+ i=d;
+ d= (
+ 4*d
+ +(*(tempBluredPast-256))
+ +(*(tempBluredPast-1))+ (*(tempBluredPast+1))
+ +(*(tempBluredPast+256))
+ +4)>>3;
+ *tempBluredPast=i;
+// ((*tempBluredPast)*3 + d + 2)>>2;
+
+//printf("%d %d %d\n", maxNoise[0], maxNoise[1], maxNoise[2]);
+/*
+Switch between
+ 1 0 0 0 0 0 0 (0)
+64 32 16 8 4 2 1 (1)
+64 48 36 27 20 15 11 (33) (approx)
+64 56 49 43 37 33 29 (200) (approx)
+*/
+ if(d > maxNoise[1])
+ {
+ if(d < maxNoise[2])
+ {
+ for(y=0; y<8; y++)
+ {
+ int x;
+ for(x=0; x<8; x++)
+ {
+ int ref= tempBlured[ x + y*stride ];
+ int cur= src[ x + y*stride ];
+ tempBlured[ x + y*stride ]=
+ src[ x + y*stride ]=
+ (ref + cur + 1)>>1;
+ }
+ }
+ }
+ else
+ {
+ for(y=0; y<8; y++)
+ {
+ int x;
+ for(x=0; x<8; x++)
+ {
+ tempBlured[ x + y*stride ]= src[ x + y*stride ];
+ }
+ }
+ }
+ }
+ else
+ {
+ if(d < maxNoise[0])
+ {
+ for(y=0; y<8; y++)
+ {
+ int x;
+ for(x=0; x<8; x++)
+ {
+ int ref= tempBlured[ x + y*stride ];
+ int cur= src[ x + y*stride ];
+ tempBlured[ x + y*stride ]=
+ src[ x + y*stride ]=
+ (ref*7 + cur + 4)>>3;
+ }
+ }
+ }
+ else
+ {
+ for(y=0; y<8; y++)
+ {
+ int x;
+ for(x=0; x<8; x++)
+ {
+ int ref= tempBlured[ x + y*stride ];
+ int cur= src[ x + y*stride ];
+ tempBlured[ x + y*stride ]=
+ src[ x + y*stride ]=
+ (ref*3 + cur + 2)>>2;
+ }
+ }
+ }
+ }
+}
+#endif
+}
+#endif //HAVE_ALTIVEC
+
+#if HAVE_MMX
+/**
+ * accurate deblock filter
+ */
+static av_always_inline void RENAME(do_a_deblock)(uint8_t *src, int step, int stride, PPContext *c){
+ int64_t dc_mask, eq_mask, both_masks;
+ int64_t sums[10*8*2];
+ src+= step*3; // src points to begin of the 8x8 Block
+//START_TIMER
+asm volatile(
+ "movq %0, %%mm7 \n\t"
+ "movq %1, %%mm6 \n\t"
+ : : "m" (c->mmxDcOffset[c->nonBQP]), "m" (c->mmxDcThreshold[c->nonBQP])
+ );
+
+asm volatile(
+ "lea (%2, %3), %%"REG_a" \n\t"
+// 0 1 2 3 4 5 6 7 8 9
+// %1 eax eax+%2 eax+2%2 %1+4%2 ecx ecx+%2 ecx+2%2 %1+8%2 ecx+4%2
+
+ "movq (%2), %%mm0 \n\t"
+ "movq (%%"REG_a"), %%mm1 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
+ "paddb %%mm7, %%mm0 \n\t"
+ "pcmpgtb %%mm6, %%mm0 \n\t"
+
+ "movq (%%"REG_a",%3), %%mm2 \n\t"
+ PMAXUB(%%mm2, %%mm4)
+ PMINUB(%%mm2, %%mm3, %%mm5)
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "movq (%%"REG_a", %3, 2), %%mm1 \n\t"
+ PMAXUB(%%mm1, %%mm4)
+ PMINUB(%%mm1, %%mm3, %%mm5)
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+
+ "lea (%%"REG_a", %3, 4), %%"REG_a" \n\t"
+
+ "movq (%2, %3, 4), %%mm2 \n\t"
+ PMAXUB(%%mm2, %%mm4)
+ PMINUB(%%mm2, %%mm3, %%mm5)
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "movq (%%"REG_a"), %%mm1 \n\t"
+ PMAXUB(%%mm1, %%mm4)
+ PMINUB(%%mm1, %%mm3, %%mm5)
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+
+ "movq (%%"REG_a", %3), %%mm2 \n\t"
+ PMAXUB(%%mm2, %%mm4)
+ PMINUB(%%mm2, %%mm3, %%mm5)
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "movq (%%"REG_a", %3, 2), %%mm1 \n\t"
+ PMAXUB(%%mm1, %%mm4)
+ PMINUB(%%mm1, %%mm3, %%mm5)
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+
+ "movq (%2, %3, 8), %%mm2 \n\t"
+ PMAXUB(%%mm2, %%mm4)
+ PMINUB(%%mm2, %%mm3, %%mm5)
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "movq (%%"REG_a", %3, 4), %%mm1 \n\t"
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+ "psubusb %%mm3, %%mm4 \n\t"
+
+ "pxor %%mm6, %%mm6 \n\t"
+ "movq %4, %%mm7 \n\t" // QP,..., QP
+ "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
+ "psubusb %%mm4, %%mm7 \n\t" // Diff >=2QP -> 0
+ "pcmpeqb %%mm6, %%mm7 \n\t" // Diff < 2QP -> 0
+ "pcmpeqb %%mm6, %%mm7 \n\t" // Diff < 2QP -> 0
+ "movq %%mm7, %1 \n\t"
+
+ "movq %5, %%mm7 \n\t"
+ "punpcklbw %%mm7, %%mm7 \n\t"
+ "punpcklbw %%mm7, %%mm7 \n\t"
+ "punpcklbw %%mm7, %%mm7 \n\t"
+ "psubb %%mm0, %%mm6 \n\t"
+ "pcmpgtb %%mm7, %%mm6 \n\t"
+ "movq %%mm6, %0 \n\t"
+
+ : "=m" (eq_mask), "=m" (dc_mask)
+ : "r" (src), "r" ((stride_t)step), "m" (c->pQPb), "m"(c->ppMode.flatnessThreshold)
+ : "%"REG_a
+ );
+
+ both_masks = dc_mask & eq_mask;
+
+ if(both_masks){
+ stride_t offset= -8*step;
+ int64_t *temp_sums= sums;
+
+ asm volatile(
+ "movq %2, %%mm0 \n\t" // QP,..., QP
+ "pxor %%mm4, %%mm4 \n\t"
+
+ "movq (%0), %%mm6 \n\t"
+ "movq (%0, %1), %%mm5 \n\t"
+ "movq %%mm5, %%mm1 \n\t"
+ "movq %%mm6, %%mm2 \n\t"
+ "psubusb %%mm6, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
+ "psubusb %%mm2, %%mm0 \n\t" // diff >= QP -> 0
+ "pcmpeqb %%mm4, %%mm0 \n\t" // diff >= QP -> FF
+
+ "pxor %%mm6, %%mm1 \n\t"
+ "pand %%mm0, %%mm1 \n\t"
+ "pxor %%mm1, %%mm6 \n\t"
+ // 0:QP 6:First
+
+ "movq (%0, %1, 8), %%mm5 \n\t"
+ "add %1, %0 \n\t" // %0 points to line 1 not 0
+ "movq (%0, %1, 8), %%mm7 \n\t"
+ "movq %%mm5, %%mm1 \n\t"
+ "movq %%mm7, %%mm2 \n\t"
+ "psubusb %%mm7, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
+ "movq %2, %%mm0 \n\t" // QP,..., QP
+ "psubusb %%mm2, %%mm0 \n\t" // diff >= QP -> 0
+ "pcmpeqb %%mm4, %%mm0 \n\t" // diff >= QP -> FF
+
+ "pxor %%mm7, %%mm1 \n\t"
+ "pand %%mm0, %%mm1 \n\t"
+ "pxor %%mm1, %%mm7 \n\t"
+
+ "movq %%mm6, %%mm5 \n\t"
+ "punpckhbw %%mm4, %%mm6 \n\t"
+ "punpcklbw %%mm4, %%mm5 \n\t"
+ // 4:0 5/6:First 7:Last
+
+ "movq %%mm5, %%mm0 \n\t"
+ "movq %%mm6, %%mm1 \n\t"
+ "psllw $2, %%mm0 \n\t"
+ "psllw $2, %%mm1 \n\t"
+ "paddw "MANGLE(w04)", %%mm0 \n\t"
+ "paddw "MANGLE(w04)", %%mm1 \n\t"
+
+#define NEXT\
+ "movq (%0), %%mm2 \n\t"\
+ "movq (%0), %%mm3 \n\t"\
+ "add %1, %0 \n\t"\
+ "punpcklbw %%mm4, %%mm2 \n\t"\
+ "punpckhbw %%mm4, %%mm3 \n\t"\
+ "paddw %%mm2, %%mm0 \n\t"\
+ "paddw %%mm3, %%mm1 \n\t"
+
+#define PREV\
+ "movq (%0), %%mm2 \n\t"\
+ "movq (%0), %%mm3 \n\t"\
+ "add %1, %0 \n\t"\
+ "punpcklbw %%mm4, %%mm2 \n\t"\
+ "punpckhbw %%mm4, %%mm3 \n\t"\
+ "psubw %%mm2, %%mm0 \n\t"\
+ "psubw %%mm3, %%mm1 \n\t"
+
+
+ NEXT //0
+ NEXT //1
+ NEXT //2
+ "movq %%mm0, (%3) \n\t"
+ "movq %%mm1, 8(%3) \n\t"
+
+ NEXT //3
+ "psubw %%mm5, %%mm0 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+ "movq %%mm0, 16(%3) \n\t"
+ "movq %%mm1, 24(%3) \n\t"
+
+ NEXT //4
+ "psubw %%mm5, %%mm0 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+ "movq %%mm0, 32(%3) \n\t"
+ "movq %%mm1, 40(%3) \n\t"
+
+ NEXT //5
+ "psubw %%mm5, %%mm0 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+ "movq %%mm0, 48(%3) \n\t"
+ "movq %%mm1, 56(%3) \n\t"
+
+ NEXT //6
+ "psubw %%mm5, %%mm0 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+ "movq %%mm0, 64(%3) \n\t"
+ "movq %%mm1, 72(%3) \n\t"
+
+ "movq %%mm7, %%mm6 \n\t"
+ "punpckhbw %%mm4, %%mm7 \n\t"
+ "punpcklbw %%mm4, %%mm6 \n\t"
+
+ NEXT //7
+ "mov %4, %0 \n\t"
+ "add %1, %0 \n\t"
+ PREV //0
+ "movq %%mm0, 80(%3) \n\t"
+ "movq %%mm1, 88(%3) \n\t"
+
+ PREV //1
+ "paddw %%mm6, %%mm0 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+ "movq %%mm0, 96(%3) \n\t"
+ "movq %%mm1, 104(%3) \n\t"
+
+ PREV //2
+ "paddw %%mm6, %%mm0 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+ "movq %%mm0, 112(%3) \n\t"
+ "movq %%mm1, 120(%3) \n\t"
+
+ PREV //3
+ "paddw %%mm6, %%mm0 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+ "movq %%mm0, 128(%3) \n\t"
+ "movq %%mm1, 136(%3) \n\t"
+
+ PREV //4
+ "paddw %%mm6, %%mm0 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+ "movq %%mm0, 144(%3) \n\t"
+ "movq %%mm1, 152(%3) \n\t"
+
+ "mov %4, %0 \n\t" //FIXME
+
+ : "+&r"(src)
+ : "r" ((stride_t)step), "m" (c->pQPb), "r"(sums), "g"(src)
+ );
+
+ src+= step; // src points to begin of the 8x8 Block
+
+ asm volatile(
+ "movq %4, %%mm6 \n\t"
+ "pcmpeqb %%mm5, %%mm5 \n\t"
+ "pxor %%mm6, %%mm5 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm1 \n\t"
+ "paddw 32(%1), %%mm0 \n\t"
+ "paddw 40(%1), %%mm1 \n\t"
+ "movq (%0, %3), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq %%mm2, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+ "paddw %%mm3, %%mm1 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+ "paddw %%mm3, %%mm1 \n\t"
+ "psrlw $4, %%mm0 \n\t"
+ "psrlw $4, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "pand %%mm6, %%mm0 \n\t"
+ "pand %%mm5, %%mm4 \n\t"
+ "por %%mm4, %%mm0 \n\t"
+ "movq %%mm0, (%0, %3) \n\t"
+ "add $16, %1 \n\t"
+ "add %2, %0 \n\t"
+ " js 1b \n\t"
+
+ : "+r"(offset), "+r"(temp_sums)
+ : "r" ((stride_t)step), "r"(src - offset), "m"(both_masks)
+ );
+ }else
+ src+= step; // src points to begin of the 8x8 Block
+
+ if(eq_mask != -1LL){
+ uint8_t *temp_src= src;
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea -40(%%"REG_SP"), %%"REG_c" \n\t" // make space for 4 8-byte vars
+ "and "ALIGN_MASK", %%"REG_c" \n\t" // align
+// 0 1 2 3 4 5 6 7 8 9
+// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %1+8%1 ecx+4%1
+
+ "movq (%0), %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
+ "punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
+
+ "movq (%0, %1), %%mm2 \n\t"
+ "lea (%0, %1, 2), %%"REG_a" \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
+ "punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
+
+ "movq (%%"REG_a"), %%mm4 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
+ "punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
+
+ "paddw %%mm0, %%mm0 \n\t" // 2L0
+ "paddw %%mm1, %%mm1 \n\t" // 2H0
+ "psubw %%mm4, %%mm2 \n\t" // L1 - L2
+ "psubw %%mm5, %%mm3 \n\t" // H1 - H2
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
+
+ "psllw $2, %%mm2 \n\t" // 4L1 - 4L2
+ "psllw $2, %%mm3 \n\t" // 4H1 - 4H2
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
+
+ "movq (%%"REG_a", %1), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // L3
+ "punpckhbw %%mm7, %%mm3 \n\t" // H3
+
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+ "movq %%mm0, (%%"REG_c") \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "movq %%mm1, 8(%%"REG_c") \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+
+ "movq (%%"REG_a", %1, 2), %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t" // L4
+ "punpckhbw %%mm7, %%mm1 \n\t" // H4
+
+ "psubw %%mm0, %%mm2 \n\t" // L3 - L4
+ "psubw %%mm1, %%mm3 \n\t" // H3 - H4
+ "movq %%mm2, 16(%%"REG_c") \n\t" // L3 - L4
+ "movq %%mm3, 24(%%"REG_c") \n\t" // H3 - H4
+ "paddw %%mm4, %%mm4 \n\t" // 2L2
+ "paddw %%mm5, %%mm5 \n\t" // 2H2
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
+
+ "lea (%%"REG_a", %1), %0 \n\t"
+ "psllw $2, %%mm2 \n\t" // 4L3 - 4L4
+ "psllw $2, %%mm3 \n\t" // 4H3 - 4H4
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
+//50 opcodes so far
+ "movq (%0, %1, 2), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // L5
+ "punpckhbw %%mm7, %%mm3 \n\t" // H5
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
+
+ "movq (%%"REG_a", %1, 4), %%mm6 \n\t"
+ "punpcklbw %%mm7, %%mm6 \n\t" // L6
+ "psubw %%mm6, %%mm2 \n\t" // L5 - L6
+ "movq (%%"REG_a", %1, 4), %%mm6 \n\t"
+ "punpckhbw %%mm7, %%mm6 \n\t" // H6
+ "psubw %%mm6, %%mm3 \n\t" // H5 - H6
+
+ "paddw %%mm0, %%mm0 \n\t" // 2L4
+ "paddw %%mm1, %%mm1 \n\t" // 2H4
+ "psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
+ "psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
+
+ "psllw $2, %%mm2 \n\t" // 4L5 - 4L6
+ "psllw $2, %%mm3 \n\t" // 4H5 - 4H6
+ "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
+ "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
+
+ "movq (%0, %1, 4), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // L7
+ "punpckhbw %%mm7, %%mm3 \n\t" // H7
+
+ "paddw %%mm2, %%mm2 \n\t" // 2L7
+ "paddw %%mm3, %%mm3 \n\t" // 2H7
+ "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
+ "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
+
+ "movq (%%"REG_c"), %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "movq 8(%%"REG_c"), %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+
+#if HAVE_MMX2
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "psubw %%mm0, %%mm6 \n\t"
+ "pmaxsw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "psubw %%mm1, %%mm6 \n\t"
+ "pmaxsw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "psubw %%mm2, %%mm6 \n\t"
+ "pmaxsw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "psubw %%mm3, %%mm6 \n\t"
+ "pmaxsw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
+#else
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm0, %%mm6 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm1, %%mm6 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm2, %%mm6 \n\t"
+ "pxor %%mm6, %%mm2 \n\t"
+ "psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm3, %%mm6 \n\t"
+ "pxor %%mm6, %%mm3 \n\t"
+ "psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
+#endif
+
+#if HAVE_MMX2
+ "pminsw %%mm2, %%mm0 \n\t"
+ "pminsw %%mm3, %%mm1 \n\t"
+#else
+ "movq %%mm0, %%mm6 \n\t"
+ "psubusw %%mm2, %%mm6 \n\t"
+ "psubw %%mm6, %%mm0 \n\t"
+ "movq %%mm1, %%mm6 \n\t"
+ "psubusw %%mm3, %%mm6 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+#endif
+
+ "movd %2, %%mm2 \n\t" // QP
+ "punpcklbw %%mm7, %%mm2 \n\t"
+
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
+ "pxor %%mm6, %%mm4 \n\t"
+ "psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
+ "pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
+ "pxor %%mm7, %%mm5 \n\t"
+ "psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
+// 100 opcodes
+ "psllw $3, %%mm2 \n\t" // 8QP
+ "movq %%mm2, %%mm3 \n\t" // 8QP
+ "pcmpgtw %%mm4, %%mm2 \n\t"
+ "pcmpgtw %%mm5, %%mm3 \n\t"
+ "pand %%mm2, %%mm4 \n\t"
+ "pand %%mm3, %%mm5 \n\t"
+
+
+ "psubusw %%mm0, %%mm4 \n\t" // hd
+ "psubusw %%mm1, %%mm5 \n\t" // ld
+
+
+ "movq "MANGLE(w05)", %%mm2 \n\t" // 5
+ "pmullw %%mm2, %%mm4 \n\t"
+ "pmullw %%mm2, %%mm5 \n\t"
+ "movq "MANGLE(w20)", %%mm2 \n\t" // 32
+ "paddw %%mm2, %%mm4 \n\t"
+ "paddw %%mm2, %%mm5 \n\t"
+ "psrlw $6, %%mm4 \n\t"
+ "psrlw $6, %%mm5 \n\t"
+
+ "movq 16(%%"REG_c"), %%mm0 \n\t" // L3 - L4
+ "movq 24(%%"REG_c"), %%mm1 \n\t" // H3 - H4
+
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+
+ "pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
+ "pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" // |L3-L4|
+ "psubw %%mm3, %%mm1 \n\t" // |H3-H4|
+ "psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
+ "psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
+
+ "pxor %%mm6, %%mm2 \n\t"
+ "pxor %%mm7, %%mm3 \n\t"
+ "pand %%mm2, %%mm4 \n\t"
+ "pand %%mm3, %%mm5 \n\t"
+
+#if HAVE_MMX2
+ "pminsw %%mm0, %%mm4 \n\t"
+ "pminsw %%mm1, %%mm5 \n\t"
+#else
+ "movq %%mm4, %%mm2 \n\t"
+ "psubusw %%mm0, %%mm2 \n\t"
+ "psubw %%mm2, %%mm4 \n\t"
+ "movq %%mm5, %%mm2 \n\t"
+ "psubusw %%mm1, %%mm2 \n\t"
+ "psubw %%mm2, %%mm5 \n\t"
+#endif
+ "pxor %%mm6, %%mm4 \n\t"
+ "pxor %%mm7, %%mm5 \n\t"
+ "psubw %%mm6, %%mm4 \n\t"
+ "psubw %%mm7, %%mm5 \n\t"
+ "packsswb %%mm5, %%mm4 \n\t"
+ "movq %3, %%mm1 \n\t"
+ "pandn %%mm4, %%mm1 \n\t"
+ "movq (%0), %%mm0 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq (%0, %1), %%mm0 \n\t"
+ "psubb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%0, %1) \n\t"
+
+ : "+r" (temp_src)
+ : "r" ((stride_t)step), "m" (c->pQPb), "m"(eq_mask)
+ : "%"REG_a, "%"REG_c
+ );
+ }
+/*if(step==16){
+ STOP_TIMER("step16")
+}else{
+ STOP_TIMER("stepX")
+}*/
+}
+#endif //HAVE_MMX
+
+static void RENAME(postProcess)(uint8_t src[], stride_t srcStride, uint8_t dst[], stride_t dstStride, int width, int height,
+ QP_STORE_T QPs[], int QPStride, int isColor, PPContext *c);
+
+/**
+ * Copies a block from src to dst and fixes the blacklevel
+ * levelFix == 0 -> dont touch the brighness & contrast
+ */
+#undef SCALED_CPY
+
+static inline void RENAME(blockCopy)(uint8_t dst[], stride_t dstStride, uint8_t src[], stride_t srcStride,
+ int levelFix, int64_t *packedOffsetAndScale)
+{
+#if !HAVE_MMX
+ int i;
+#endif
+ if(levelFix)
+ {
+#if HAVE_MMX
+ asm volatile(
+ "movq (%%"REG_a"), %%mm2 \n\t" // packedYOffset
+ "movq 8(%%"REG_a"), %%mm3 \n\t" // packedYScale
+ "lea (%2,%4), %%"REG_a" \n\t"
+ "lea (%3,%5), %%"REG_d" \n\t"
+ "pxor %%mm4, %%mm4 \n\t"
+#if HAVE_MMX2
+#define REAL_SCALED_CPY(src1, src2, dst1, dst2) \
+ "movq " #src1 ", %%mm0 \n\t"\
+ "movq " #src1 ", %%mm5 \n\t"\
+ "movq " #src2 ", %%mm1 \n\t"\
+ "movq " #src2 ", %%mm6 \n\t"\
+ "punpcklbw %%mm0, %%mm0 \n\t"\
+ "punpckhbw %%mm5, %%mm5 \n\t"\
+ "punpcklbw %%mm1, %%mm1 \n\t"\
+ "punpckhbw %%mm6, %%mm6 \n\t"\
+ "pmulhuw %%mm3, %%mm0 \n\t"\
+ "pmulhuw %%mm3, %%mm5 \n\t"\
+ "pmulhuw %%mm3, %%mm1 \n\t"\
+ "pmulhuw %%mm3, %%mm6 \n\t"\
+ "psubw %%mm2, %%mm0 \n\t"\
+ "psubw %%mm2, %%mm5 \n\t"\
+ "psubw %%mm2, %%mm1 \n\t"\
+ "psubw %%mm2, %%mm6 \n\t"\
+ "packuswb %%mm5, %%mm0 \n\t"\
+ "packuswb %%mm6, %%mm1 \n\t"\
+ "movq %%mm0, " #dst1 " \n\t"\
+ "movq %%mm1, " #dst2 " \n\t"\
+
+#else //HAVE_MMX2
+#define REAL_SCALED_CPY(src1, src2, dst1, dst2) \
+ "movq " #src1 ", %%mm0 \n\t"\
+ "movq " #src1 ", %%mm5 \n\t"\
+ "punpcklbw %%mm4, %%mm0 \n\t"\
+ "punpckhbw %%mm4, %%mm5 \n\t"\
+ "psubw %%mm2, %%mm0 \n\t"\
+ "psubw %%mm2, %%mm5 \n\t"\
+ "movq " #src2 ", %%mm1 \n\t"\
+ "psllw $6, %%mm0 \n\t"\
+ "psllw $6, %%mm5 \n\t"\
+ "pmulhw %%mm3, %%mm0 \n\t"\
+ "movq " #src2 ", %%mm6 \n\t"\
+ "pmulhw %%mm3, %%mm5 \n\t"\
+ "punpcklbw %%mm4, %%mm1 \n\t"\
+ "punpckhbw %%mm4, %%mm6 \n\t"\
+ "psubw %%mm2, %%mm1 \n\t"\
+ "psubw %%mm2, %%mm6 \n\t"\
+ "psllw $6, %%mm1 \n\t"\
+ "psllw $6, %%mm6 \n\t"\
+ "pmulhw %%mm3, %%mm1 \n\t"\
+ "pmulhw %%mm3, %%mm6 \n\t"\
+ "packuswb %%mm5, %%mm0 \n\t"\
+ "packuswb %%mm6, %%mm1 \n\t"\
+ "movq %%mm0, " #dst1 " \n\t"\
+ "movq %%mm1, " #dst2 " \n\t"\
+
+#endif //!HAVE_MMX2
+#define SCALED_CPY(src1, src2, dst1, dst2)\
+ REAL_SCALED_CPY(src1, src2, dst1, dst2)
+
+SCALED_CPY((%2) , (%2, %4) , (%3) , (%3, %5))
+SCALED_CPY((%2, %4, 2), (%%REGa, %4, 2), (%3, %5, 2), (%%REGd, %5, 2))
+SCALED_CPY((%2, %4, 4), (%%REGa, %4, 4), (%3, %5, 4), (%%REGd, %5, 4))
+ "lea (%%"REG_a",%4,4), %%"REG_a" \n\t"
+ "lea (%%"REG_d",%5,4), %%"REG_d" \n\t"
+SCALED_CPY((%%REGa, %4), (%%REGa, %4, 2), (%%REGd, %5), (%%REGd, %5, 2))
+
+
+ : "=&a" (packedOffsetAndScale)
+ : "0" (packedOffsetAndScale),
+ "r"(src),
+ "r"(dst),
+ "r" ((stride_t)srcStride),
+ "r" ((stride_t)dstStride)
+ : "%"REG_d
+ );
+#else
+ for(i=0; i<8; i++)
+ memcpy( &(dst[dstStride*i]),
+ &(src[srcStride*i]), BLOCK_SIZE);
+#endif
+ }
+ else
+ {
+#if HAVE_MMX
+ asm volatile(
+ "lea (%0,%2), %%"REG_a" \n\t"
+ "lea (%1,%3), %%"REG_d" \n\t"
+
+#define REAL_SIMPLE_CPY(src1, src2, dst1, dst2) \
+ "movq " #src1 ", %%mm0 \n\t"\
+ "movq " #src2 ", %%mm1 \n\t"\
+ "movq %%mm0, " #dst1 " \n\t"\
+ "movq %%mm1, " #dst2 " \n\t"\
+
+#define SIMPLE_CPY(src1, src2, dst1, dst2)\
+ REAL_SIMPLE_CPY(src1, src2, dst1, dst2)
+
+SIMPLE_CPY((%0) , (%0, %2) , (%1) , (%1, %3))
+SIMPLE_CPY((%0, %2, 2), (%%REGa, %2, 2), (%1, %3, 2), (%%REGd, %3, 2))
+SIMPLE_CPY((%0, %2, 4), (%%REGa, %2, 4), (%1, %3, 4), (%%REGd, %3, 4))
+ "lea (%%"REG_a",%2,4), %%"REG_a" \n\t"
+ "lea (%%"REG_d",%3,4), %%"REG_d" \n\t"
+SIMPLE_CPY((%%REGa, %2), (%%REGa, %2, 2), (%%REGd, %3), (%%REGd, %3, 2))
+
+ : : "r" (src),
+ "r" (dst),
+ "r" ((stride_t)srcStride),
+ "r" ((stride_t)dstStride)
+ : "%"REG_a, "%"REG_d
+ );
+#else
+ for(i=0; i<8; i++)
+ memcpy( &(dst[dstStride*i]),
+ &(src[srcStride*i]), BLOCK_SIZE);
+#endif
+ }
+}
+
+/**
+ * Duplicates the given 8 src pixels ? times upward
+ */
+static inline void RENAME(duplicate)(uint8_t src[], stride_t stride)
+{
+#if HAVE_MMX
+ asm volatile(
+ "movq (%0), %%mm0 \n\t"
+ "add %1, %0 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm0, (%0, %1) \n\t"
+ "movq %%mm0, (%0, %1, 2) \n\t"
+ : "+r" (src)
+ : "r" ((stride_t)-stride)
+ );
+#else
+ int i;
+ uint8_t *p=src;
+ for(i=0; i<3; i++)
+ {
+ p-= stride;
+ memcpy(p, src, 8);
+ }
+#endif
+}
+
+/**
+ * Filters array of bytes (Y or U or V values)
+ */
+static void RENAME(postProcess)(uint8_t src[], stride_t srcStride, uint8_t dst[], stride_t dstStride, int width, int height,
+ QP_STORE_T QPs[], int QPStride, int isColor, PPContext *c2)
+{
+ PPContext __attribute__((aligned(8))) c= *c2; //copy to stack for faster access
+ int x,y;
+#ifdef COMPILE_TIME_MODE
+ const int mode= COMPILE_TIME_MODE;
+#else
+ const int mode= isColor ? c.ppMode.chromMode : c.ppMode.lumMode;
+#endif
+ int black=0, white=255; // blackest black and whitest white in the picture
+ int QPCorrecture= 256*256;
+
+ stride_t copyAhead;
+#if HAVE_MMX
+ int i;
+#endif
+
+ const int qpHShift= isColor ? 4-c.hChromaSubSample : 4;
+ const int qpVShift= isColor ? 4-c.vChromaSubSample : 4;
+
+ //FIXME remove
+ uint64_t * const yHistogram= c.yHistogram;
+ uint8_t * const tempSrc= c.tempSrc+24*width;
+ uint8_t * const tempDst= c.tempDst+24*width;
+ //const int mbWidth= isColor ? (width+7)>>3 : (width+15)>>4;
+
+#if HAVE_MMX
+ for(i=0; i<57; i++){
+ int offset= ((i*c.ppMode.baseDcDiff)>>8) + 1;
+ int threshold= offset*2 + 1;
+ c.mmxDcOffset[i]= 0x7F - offset;
+ c.mmxDcThreshold[i]= 0x7F - threshold;
+ c.mmxDcOffset[i]*= 0x0101010101010101LL;
+ c.mmxDcThreshold[i]*= 0x0101010101010101LL;
+ }
+#endif
+
+ if(mode & CUBIC_IPOL_DEINT_FILTER) copyAhead=16;
+ else if( (mode & LINEAR_BLEND_DEINT_FILTER)
+ || (mode & FFMPEG_DEINT_FILTER)
+ || (mode & LOWPASS5_DEINT_FILTER)) copyAhead=14;
+ else if( (mode & V_DEBLOCK)
+ || (mode & LINEAR_IPOL_DEINT_FILTER)
+ || (mode & MEDIAN_DEINT_FILTER)
+ || (mode & V_A_DEBLOCK)) copyAhead=13;
+ else if(mode & V_X1_FILTER) copyAhead=11;
+// else if(mode & V_RK1_FILTER) copyAhead=10;
+ else if(mode & DERING) copyAhead=9;
+ else copyAhead=8;
+
+ copyAhead-= 8;
+
+ if(!isColor)
+ {
+ uint64_t sum= 0;
+ int i;
+ uint64_t maxClipped;
+ uint64_t clipped;
+ double scale;
+
+ c.frameNum++;
+ // first frame is fscked so we ignore it
+ if(c.frameNum == 1) yHistogram[0]= width*height/64*15/256;
+
+ for(i=0; i<256; i++)
+ {
+ sum+= yHistogram[i];
+// printf("%d ", yHistogram[i]);
+ }
+// printf("\n\n");
+
+ /* we allways get a completly black picture first */
+ maxClipped= (uint64_t)(sum * c.ppMode.maxClippedThreshold);
+
+ clipped= sum;
+ for(black=255; black>0; black--)
+ {
+ if(clipped < maxClipped) break;
+ clipped-= yHistogram[black];
+ }
+
+ clipped= sum;
+ for(white=0; white<256; white++)
+ {
+ if(clipped < maxClipped) break;
+ clipped-= yHistogram[white];
+ }
+
+ scale= (double)(c.ppMode.maxAllowedY - c.ppMode.minAllowedY) / (double)(white-black);
+
+#if HAVE_MMX2
+ c.packedYScale= (uint16_t)(scale*256.0 + 0.5);
+ c.packedYOffset= (((black*c.packedYScale)>>8) - c.ppMode.minAllowedY) & 0xFFFF;
+#else
+ c.packedYScale= (uint16_t)(scale*1024.0 + 0.5);
+ c.packedYOffset= (black - c.ppMode.minAllowedY) & 0xFFFF;
+#endif
+
+ c.packedYOffset|= c.packedYOffset<<32;
+ c.packedYOffset|= c.packedYOffset<<16;
+
+ c.packedYScale|= c.packedYScale<<32;
+ c.packedYScale|= c.packedYScale<<16;
+
+ if(mode & LEVEL_FIX) QPCorrecture= (int)(scale*256*256 + 0.5);
+ else QPCorrecture= 256*256;
+ }
+ else
+ {
+ c.packedYScale= 0x0100010001000100LL;
+ c.packedYOffset= 0;
+ QPCorrecture= 256*256;
+ }
+
+ /* copy & deinterlace first row of blocks */
+ y=-BLOCK_SIZE;
+ {
+ uint8_t *srcBlock= &(src[y*srcStride]);
+ uint8_t *dstBlock= tempDst + dstStride;
+
+ // From this point on it is guranteed that we can read and write 16 lines downward
+ // finish 1 block before the next otherwise we might have a problem
+ // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
+ for(x=0; x<width; x+=BLOCK_SIZE)
+ {
+
+#if HAVE_MMX2
+/*
+ prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32);
+ prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32);
+ prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32);
+ prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32);
+*/
+
+ asm(
+ "mov %4, %%"REG_a" \n\t"
+ "shr $2, %%"REG_a" \n\t"
+ "and $6, %%"REG_a" \n\t"
+ "add %5, %%"REG_a" \n\t"
+ "mov %%"REG_a", %%"REG_d" \n\t"
+ "imul %1, %%"REG_a" \n\t"
+ "imul %3, %%"REG_d" \n\t"
+ "prefetchnta 32(%%"REG_a", %0) \n\t"
+ "prefetcht0 32(%%"REG_d", %2) \n\t"
+ "add %1, %%"REG_a" \n\t"
+ "add %3, %%"REG_d" \n\t"
+ "prefetchnta 32(%%"REG_a", %0) \n\t"
+ "prefetcht0 32(%%"REG_d", %2) \n\t"
+ :: "r" (srcBlock), "r" ((stride_t)srcStride), "r" (dstBlock), "r" ((stride_t)dstStride),
+ "g" ((stride_t)x), "g" (copyAhead)
+ : "%"REG_a, "%"REG_d
+ );
+
+#elif HAVE_AMD3DNOW
+//FIXME check if this is faster on an 3dnow chip or if its faster without the prefetch or ...
+/* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
+ prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
+ prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
+ prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
+*/
+#endif
+
+ RENAME(blockCopy)(dstBlock + dstStride*8, dstStride,
+ srcBlock + srcStride*8, srcStride, mode & LEVEL_FIX, &c.packedYOffset);
+
+ RENAME(duplicate)(dstBlock + dstStride*8, dstStride);
+
+ if(mode & LINEAR_IPOL_DEINT_FILTER)
+ RENAME(deInterlaceInterpolateLinear)(dstBlock, dstStride);
+ else if(mode & LINEAR_BLEND_DEINT_FILTER)
+ RENAME(deInterlaceBlendLinear)(dstBlock, dstStride, c.deintTemp + x);
+ else if(mode & MEDIAN_DEINT_FILTER)
+ RENAME(deInterlaceMedian)(dstBlock, dstStride);
+ else if(mode & CUBIC_IPOL_DEINT_FILTER)
+ RENAME(deInterlaceInterpolateCubic)(dstBlock, dstStride);
+ else if(mode & FFMPEG_DEINT_FILTER)
+ RENAME(deInterlaceFF)(dstBlock, dstStride, c.deintTemp + x);
+ else if(mode & LOWPASS5_DEINT_FILTER)
+ RENAME(deInterlaceL5)(dstBlock, dstStride, c.deintTemp + x, c.deintTemp + width + x);
+/* else if(mode & CUBIC_BLEND_DEINT_FILTER)
+ RENAME(deInterlaceBlendCubic)(dstBlock, dstStride);
+*/
+ dstBlock+=8;
+ srcBlock+=8;
+ }
+ if(width==dstStride)
+ memcpy(dst, tempDst + 9*dstStride, copyAhead*dstStride );
+ else
+ {
+ int i;
+ for(i=0; i<copyAhead; i++)
+ {
+ memcpy(dst + i*dstStride, tempDst + (9+i)*dstStride, width);
+ }
+ }
+ }
+
+//printf("\n");
+ for(y=0; y<height; y+=BLOCK_SIZE)
+ {
+ //1% speedup if these are here instead of the inner loop
+ uint8_t *srcBlock= &(src[y*srcStride]);
+ uint8_t *dstBlock= &(dst[y*dstStride]);
+#if HAVE_MMX
+ uint8_t *tempBlock1= c.tempBlocks;
+ uint8_t *tempBlock2= c.tempBlocks + 8;
+#endif
+ int8_t *QPptr= &QPs[(y>>qpVShift)*QPStride];
+ int8_t *nonBQPptr= &c.nonBQPTable[(y>>qpVShift)*QPStride];
+ int QP=0;
+ /* can we mess with a 8x16 block from srcBlock/dstBlock downwards and 1 line upwards
+ if not than use a temporary buffer */
+ if(y+15 >= height)
+ {
+ int i;
+ /* copy from line (copyAhead) to (copyAhead+7) of src, these will be copied with
+ blockcopy to dst later */
+ for(i=0;i<MAX(height-y-copyAhead, 0);i++)
+ memcpy(tempSrc + srcStride*copyAhead+i*srcStride, srcBlock + srcStride*copyAhead+i*srcStride,width);
+
+ /* duplicate last line of src to fill the void upto line (copyAhead+7) */
+ for(i=MAX(height-y, 8); i<copyAhead+8; i++)
+ memcpy(tempSrc + srcStride*i, src + srcStride*(height-1), width);
+
+ /* copy up to (copyAhead+1) lines of dst (line -1 to (copyAhead-1))*/
+ for(i=0;i<MIN(height-y+1, copyAhead+1);i++)
+ memcpy(tempDst+i*dstStride, (dstBlock - dstStride)+i*dstStride, width);
+
+ /* duplicate last line of dst to fill the void upto line (copyAhead) */
+ for(i=height-y+1; i<=copyAhead; i++)
+ memcpy(tempDst + dstStride*i, dst + dstStride*(height-1), width);
+
+ dstBlock= tempDst + dstStride;
+ srcBlock= tempSrc;
+ }
+//printf("\n");
+
+ // From this point on it is guranteed that we can read and write 16 lines downward
+ // finish 1 block before the next otherwise we might have a problem
+ // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
+ for(x=0; x<width; x+=BLOCK_SIZE)
+ {
+ const stride_t stride= dstStride;
+#if HAVE_MMX
+ uint8_t *tmpXchg;
+#endif
+ if(isColor)
+ {
+ QP= QPptr[x>>qpHShift];
+ c.nonBQP= nonBQPptr[x>>qpHShift];
+ }
+ else
+ {
+ QP= QPptr[x>>4];
+ QP= (QP* QPCorrecture + 256*128)>>16;
+ c.nonBQP= nonBQPptr[x>>4];
+ c.nonBQP= (c.nonBQP* QPCorrecture + 256*128)>>16;
+ yHistogram[ srcBlock[srcStride*12 + 4] ]++;
+ }
+ c.QP= QP;
+#if HAVE_MMX
+ asm volatile(
+ "movd %1, %%mm7 \n\t"
+ "packuswb %%mm7, %%mm7 \n\t" // 0, 0, 0, QP, 0, 0, 0, QP
+ "packuswb %%mm7, %%mm7 \n\t" // 0,QP, 0, QP, 0,QP, 0, QP
+ "packuswb %%mm7, %%mm7 \n\t" // QP,..., QP
+ "movq %%mm7, %0 \n\t"
+ : "=m" (c.pQPb)
+ : "r" (QP)
+ );
+#endif
+
+
+#if HAVE_MMX2
+/*
+ prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32);
+ prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32);
+ prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32);
+ prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32);
+*/
+
+ asm(
+ "mov %4, %%"REG_a" \n\t"
+ "shr $2, %%"REG_a" \n\t"
+ "and $6, %%"REG_a" \n\t"
+ "add %5, %%"REG_a" \n\t"
+ "mov %%"REG_a", %%"REG_d" \n\t"
+ "imul %1, %%"REG_a" \n\t"
+ "imul %3, %%"REG_d" \n\t"
+ "prefetchnta 32(%%"REG_a", %0) \n\t"
+ "prefetcht0 32(%%"REG_d", %2) \n\t"
+ "add %1, %%"REG_a" \n\t"
+ "add %3, %%"REG_d" \n\t"
+ "prefetchnta 32(%%"REG_a", %0) \n\t"
+ "prefetcht0 32(%%"REG_d", %2) \n\t"
+ :: "r" (srcBlock), "r" ((stride_t)srcStride), "r" (dstBlock), "r" ((stride_t)dstStride),
+ "g" ((stride_t)x), "g" ((long)copyAhead)
+ : "%"REG_a, "%"REG_d
+ );
+
+#elif HAVE_AMD3DNOW
+//FIXME check if this is faster on an 3dnow chip or if its faster without the prefetch or ...
+/* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
+ prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
+ prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
+ prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
+*/
+#endif
+
+ RENAME(blockCopy)(dstBlock + dstStride*copyAhead, dstStride,
+ srcBlock + srcStride*copyAhead, srcStride, mode & LEVEL_FIX, &c.packedYOffset);
+
+ if(mode & LINEAR_IPOL_DEINT_FILTER)
+ RENAME(deInterlaceInterpolateLinear)(dstBlock, dstStride);
+ else if(mode & LINEAR_BLEND_DEINT_FILTER)
+ RENAME(deInterlaceBlendLinear)(dstBlock, dstStride, c.deintTemp + x);
+ else if(mode & MEDIAN_DEINT_FILTER)
+ RENAME(deInterlaceMedian)(dstBlock, dstStride);
+ else if(mode & CUBIC_IPOL_DEINT_FILTER)
+ RENAME(deInterlaceInterpolateCubic)(dstBlock, dstStride);
+ else if(mode & FFMPEG_DEINT_FILTER)
+ RENAME(deInterlaceFF)(dstBlock, dstStride, c.deintTemp + x);
+ else if(mode & LOWPASS5_DEINT_FILTER)
+ RENAME(deInterlaceL5)(dstBlock, dstStride, c.deintTemp + x, c.deintTemp + width + x);
+/* else if(mode & CUBIC_BLEND_DEINT_FILTER)
+ RENAME(deInterlaceBlendCubic)(dstBlock, dstStride);
+*/
+
+ /* only deblock if we have 2 blocks */
+ if(y + 8 < height)
+ {
+ if(mode & V_X1_FILTER)
+ RENAME(vertX1Filter)(dstBlock, stride, &c);
+ else if(mode & V_DEBLOCK)
+ {
+ const int t= RENAME(vertClassify)(dstBlock, stride, &c);
+
+ if(t==1)
+ RENAME(doVertLowPass)(dstBlock, stride, &c);
+ else if(t==2)
+ RENAME(doVertDefFilter)(dstBlock, stride, &c);
+ }else if(mode & V_A_DEBLOCK){
+ RENAME(do_a_deblock)(dstBlock, stride, 1, &c);
+ }
+ }
+
+#if HAVE_MMX
+ RENAME(transpose1)(tempBlock1, tempBlock2, dstBlock, dstStride);
+#endif
+ /* check if we have a previous block to deblock it with dstBlock */
+ if(x - 8 >= 0)
+ {
+#if HAVE_MMX
+ if(mode & H_X1_FILTER)
+ RENAME(vertX1Filter)(tempBlock1, 16, &c);
+ else if(mode & H_DEBLOCK)
+ {
+//START_TIMER
+ const int t= RENAME(vertClassify)(tempBlock1, 16, &c);
+//STOP_TIMER("dc & minmax")
+ if(t==1)
+ RENAME(doVertLowPass)(tempBlock1, 16, &c);
+ else if(t==2)
+ RENAME(doVertDefFilter)(tempBlock1, 16, &c);
+ }else if(mode & H_A_DEBLOCK){
+ RENAME(do_a_deblock)(tempBlock1, 16, 1, &c);
+ }
+
+ RENAME(transpose2)(dstBlock-4, dstStride, tempBlock1 + 4*16);
+
+#else
+ if(mode & H_X1_FILTER)
+ horizX1Filter(dstBlock-4, stride, QP);
+ else if(mode & H_DEBLOCK)
+ {
+#ifdef HAVE_ALTIVEC
+ unsigned char __attribute__ ((aligned(16))) tempBlock[272];
+ transpose_16x8_char_toPackedAlign_altivec(tempBlock, dstBlock - (4 + 1), stride);
+
+ const int t=vertClassify_altivec(tempBlock-48, 16, &c);
+ if(t==1) {
+ doVertLowPass_altivec(tempBlock-48, 16, &c);
+ transpose_8x16_char_fromPackedAlign_altivec(dstBlock - (4 + 1), tempBlock, stride);
+ }
+ else if(t==2) {
+ doVertDefFilter_altivec(tempBlock-48, 16, &c);
+ transpose_8x16_char_fromPackedAlign_altivec(dstBlock - (4 + 1), tempBlock, stride);
+ }
+#else
+ const int t= RENAME(horizClassify)(dstBlock-4, stride, &c);
+
+ if(t==1)
+ RENAME(doHorizLowPass)(dstBlock-4, stride, &c);
+ else if(t==2)
+ RENAME(doHorizDefFilter)(dstBlock-4, stride, &c);
+#endif
+ }else if(mode & H_A_DEBLOCK){
+ RENAME(do_a_deblock)(dstBlock-8, 1, stride, &c);
+ }
+#endif
+ if(mode & DERING)
+ {
+ //FIXME filter first line
+ if(y>0) RENAME(dering)(dstBlock - stride - 8, stride, &c);
+ }
+
+ if(mode & TEMP_NOISE_FILTER)
+ {
+ RENAME(tempNoiseReducer)(dstBlock-8, stride,
+ c.tempBlured[isColor] + y*dstStride + x+256,
+ c.tempBluredPast[isColor] + (y>>3)*256 + (x>>3)+256,
+ c.ppMode.maxTmpNoise);
+ }
+ }
+
+ dstBlock+=8;
+ srcBlock+=8;
+
+#if HAVE_MMX
+ tmpXchg= tempBlock1;
+ tempBlock1= tempBlock2;
+ tempBlock2 = tmpXchg;
+#endif
+ }
+
+ if(mode & DERING)
+ {
+ if(y > 0) RENAME(dering)(dstBlock - dstStride - 8, dstStride, &c);
+ }
+
+ if((mode & TEMP_NOISE_FILTER))
+ {
+ RENAME(tempNoiseReducer)(dstBlock-8, dstStride,
+ c.tempBlured[isColor] + y*dstStride + x+256,
+ c.tempBluredPast[isColor] + (y>>3)*256 + (x>>3)+256,
+ c.ppMode.maxTmpNoise);
+ }
+
+ /* did we use a tmp buffer for the last lines*/
+ if(y+15 >= height)
+ {
+ uint8_t *dstBlock= &(dst[y*dstStride]);
+ if(width==dstStride)
+ memcpy(dstBlock, tempDst + dstStride, dstStride*(height-y) );
+ else
+ {
+ int i;
+ for(i=0; i<height-y; i++)
+ {
+ memcpy(dstBlock + i*dstStride, tempDst + (i+1)*dstStride, width);
+ }
+ }
+ }
+/*
+ for(x=0; x<width; x+=32)
+ {
+ volatile int i;
+ i+= + dstBlock[x + 7*dstStride] + dstBlock[x + 8*dstStride]
+ + dstBlock[x + 9*dstStride] + dstBlock[x +10*dstStride]
+ + dstBlock[x +11*dstStride] + dstBlock[x +12*dstStride];
+// + dstBlock[x +13*dstStride]
+// + dstBlock[x +14*dstStride] + dstBlock[x +15*dstStride];
+ }*/
+ }
+#if HAVE_AMD3DNOW
+ asm volatile("femms");
+#elif HAVE_MMX
+ asm volatile("emms");
+#endif
+
+#ifdef DEBUG_BRIGHTNESS
+ if(!isColor)
+ {
+ int max=1;
+ int i;
+ for(i=0; i<256; i++)
+ if(yHistogram[i] > max) max=yHistogram[i];
+
+ for(i=1; i<256; i++)
+ {
+ int x;
+ int start=yHistogram[i-1]/(max/256+1);
+ int end=yHistogram[i]/(max/256+1);
+ int inc= end > start ? 1 : -1;
+ for(x=start; x!=end+inc; x+=inc)
+ dst[ i*dstStride + x]+=128;
+ }
+
+ for(i=0; i<100; i+=2)
+ {
+ dst[ (white)*dstStride + i]+=128;
+ dst[ (black)*dstStride + i]+=128;
+ }
+
+ }
+#endif
+
+ *c2= c; //copy local context back
+
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/rgb2rgb.c b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/rgb2rgb.c
new file mode 100644
index 000000000..dff5dfbf6
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/rgb2rgb.c
@@ -0,0 +1,659 @@
+/*
+ * software RGB to RGB converter
+ * pluralize by software PAL8 to RGB converter
+ * software YUV to YUV converter
+ * software YUV to RGB converter
+ * Written by Nick Kurshev.
+ * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * The C code (not assembly, MMX, ...) of this file can be used
+ * under the LGPL license.
+ */
+#include <inttypes.h>
+#include "../config.h"
+#include "../libavutil/internal.h"
+#include "../libavutil/x86_cpu.h"
+#include "../libavutil/bswap.h"
+#include "../libvo/fastmemcpy.h"
+#include "../cpudetect.h"
+#include "rgb2rgb.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+
+#define FAST_BGR2YV12 // use 7-bit instead of 15-bit coefficients
+
+void (*rgb24to32)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+void (*rgb24to16)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+void (*rgb24to15)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+void (*rgb32to24)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+void (*rgb32to16)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+void (*rgb32to15)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+void (*rgb15to16)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+void (*rgb15to24)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+void (*rgb15to32)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+void (*rgb16to15)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+void (*rgb16to24)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+void (*rgb16to32)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+//void (*rgb24tobgr32)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+void (*rgb24tobgr15)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+void (*rgb32tobgr32)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+//void (*rgb32tobgr24)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+void (*rgb32tobgr16)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+void (*rgb32tobgr15)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+
+void (*yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride);
+void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride);
+void (*yv12tovyuy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride);
+void (*yv12toyvyu)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride);
+void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride);
+void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t srcStride);
+void (*rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t srcStride);
+void (*planar2x)(const uint8_t *src, uint8_t *dst, long width, long height,
+ stride_t srcStride, stride_t dstStride);
+void (*interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dst,
+ long width, long height, stride_t src1Stride,
+ stride_t src2Stride, stride_t dstStride);
+void (*vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
+ uint8_t *dst1, uint8_t *dst2,
+ long width, long height,
+ stride_t srcStride1, stride_t srcStride2,
+ stride_t dstStride1, stride_t dstStride2);
+void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
+ uint8_t *dst,
+ long width, long height,
+ stride_t srcStride1, stride_t srcStride2,
+ stride_t srcStride3, stride_t dstStride);
+
+#if ARCH_X86 && CONFIG_GPL
+DECLARE_ASM_CONST(8, uint64_t, mmx_null) = 0x0000000000000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_one) = 0xFFFFFFFFFFFFFFFFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32b) = 0x000000FF000000FFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32g) = 0x0000FF000000FF00ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32r) = 0x00FF000000FF0000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32a) = 0xFF000000FF000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32) = 0x00FFFFFF00FFFFFFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask3216br) = 0x00F800F800F800F8ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask3216g) = 0x0000FC000000FC00ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask3215g) = 0x0000F8000000F800ULL;
+DECLARE_ASM_CONST(8, uint64_t, mul3216) = 0x2000000420000004ULL;
+DECLARE_ASM_CONST(8, uint64_t, mul3215) = 0x2000000820000008ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24b) = 0x00FF0000FF0000FFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24g) = 0xFF0000FF0000FF00ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24r) = 0x0000FF0000FF0000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24l) = 0x0000000000FFFFFFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24h) = 0x0000FFFFFF000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24hh) = 0xffff000000000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24hhh) = 0xffffffff00000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24hhhh) = 0xffffffffffff0000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask15b) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */
+DECLARE_ASM_CONST(8, uint64_t, mask15rg) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */
+DECLARE_ASM_CONST(8, uint64_t, mask15s) = 0xFFE0FFE0FFE0FFE0ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask15g) = 0x03E003E003E003E0ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask15r) = 0x7C007C007C007C00ULL;
+#define mask16b mask15b
+DECLARE_ASM_CONST(8, uint64_t, mask16g) = 0x07E007E007E007E0ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask16r) = 0xF800F800F800F800ULL;
+DECLARE_ASM_CONST(8, uint64_t, red_16mask) = 0x0000f8000000f800ULL;
+DECLARE_ASM_CONST(8, uint64_t, green_16mask) = 0x000007e0000007e0ULL;
+DECLARE_ASM_CONST(8, uint64_t, blue_16mask) = 0x0000001f0000001fULL;
+DECLARE_ASM_CONST(8, uint64_t, red_15mask) = 0x00007c0000007c00ULL;
+DECLARE_ASM_CONST(8, uint64_t, green_15mask) = 0x000003e0000003e0ULL;
+DECLARE_ASM_CONST(8, uint64_t, blue_15mask) = 0x0000001f0000001fULL;
+
+#ifdef FAST_BGR2YV12
+static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000000210041000DULL;
+static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000FFEEFFDC0038ULL;
+static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00000038FFD2FFF8ULL;
+#else
+static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000020E540830C8BULL;
+static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000ED0FDAC23831ULL;
+static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00003831D0E6F6EAULL;
+#endif
+static const uint64_t bgr2YOffset attribute_used __attribute__((aligned(8))) = 0x1010101010101010ULL;
+static const uint64_t bgr2UVOffset attribute_used __attribute__((aligned(8)))= 0x8080808080808080ULL;
+static const uint64_t w1111 attribute_used __attribute__((aligned(8))) = 0x0001000100010001ULL;
+
+#endif /* ARCH_X86 */
+
+#define RGB2YUV_SHIFT 8
+#define BY ((int)( 0.098*(1<<RGB2YUV_SHIFT)+0.5))
+#define BV ((int)(-0.071*(1<<RGB2YUV_SHIFT)+0.5))
+#define BU ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
+#define GY ((int)( 0.504*(1<<RGB2YUV_SHIFT)+0.5))
+#define GV ((int)(-0.368*(1<<RGB2YUV_SHIFT)+0.5))
+#define GU ((int)(-0.291*(1<<RGB2YUV_SHIFT)+0.5))
+#define RY ((int)( 0.257*(1<<RGB2YUV_SHIFT)+0.5))
+#define RV ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
+#define RU ((int)(-0.148*(1<<RGB2YUV_SHIFT)+0.5))
+
+//Note: We have C, MMX, MMX2, 3DNOW versions, there is no 3DNOW + MMX2 one.
+//plain C versions
+#undef HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#undef HAVE_SSE2
+#define RENAME(a) a ## _C
+#include "rgb2rgb_template.c"
+
+#if ARCH_X86 && CONFIG_GPL
+
+//MMX versions
+#undef RENAME
+#define HAVE_MMX 1
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#undef HAVE_SSE2
+#define RENAME(a) a ## _MMX
+#include "rgb2rgb_template.c"
+
+//MMX2 versions
+#undef RENAME
+#define HAVE_MMX 1
+#define HAVE_MMX2 1
+#undef HAVE_AMD3DNOW
+#undef HAVE_SSE2
+#define RENAME(a) a ## _MMX2
+#include "rgb2rgb_template.c"
+
+//3DNOW versions
+#undef RENAME
+#define HAVE_MMX 1
+#undef HAVE_MMX2
+#define HAVE_AMD3DNOW 1
+#undef HAVE_SSE2
+#define RENAME(a) a ## _3DNOW
+#include "rgb2rgb_template.c"
+
+#endif //ARCH_X86_32 || ARCH_X86_64
+
+/*
+ RGB15->RGB16 original by Strepto/Astral
+ ported to gcc & bugfixed : A'rpi
+ MMX2, 3DNOW optimization by Nick Kurshev
+ 32-bit C version, and and&add trick by Michael Niedermayer
+*/
+
+void sws_rgb2rgb_init(SwsParams *params){
+ yv12toyvyu= yv12toyvyu_C;
+ yv12tovyuy= yv12tovyuy_C;
+#if ARCH_X86_32 || ARCH_X86_64
+ if(params->cpu & SWS_CPU_CAPS_MMX2){
+ rgb15to16= rgb15to16_MMX2;
+ rgb15to24= rgb15to24_MMX2;
+ rgb15to32= rgb15to32_MMX2;
+ rgb16to24= rgb16to24_MMX2;
+ rgb16to32= rgb16to32_MMX2;
+ rgb16to15= rgb16to15_MMX2;
+ rgb24to16= rgb24to16_MMX2;
+ rgb24to15= rgb24to15_MMX2;
+ rgb24to32= rgb24to32_MMX2;
+ rgb32to16= rgb32to16_MMX2;
+ rgb32to15= rgb32to15_MMX2;
+ rgb32to24= rgb32to24_MMX2;
+ rgb24tobgr15= rgb24tobgr15_MMX2;
+ rgb24tobgr16= rgb24tobgr16_MMX2;
+ rgb24tobgr24= rgb24tobgr24_MMX2;
+ rgb32tobgr32= rgb32tobgr32_MMX2;
+ rgb32tobgr16= rgb32tobgr16_MMX2;
+ rgb32tobgr15= rgb32tobgr15_MMX2;
+ yv12toyuy2= yv12toyuy2_MMX2;
+ yv12touyvy= yv12touyvy_MMX2;
+ yuv422ptoyuy2= yuv422ptoyuy2_MMX2;
+ yuy2toyv12= yuy2toyv12_MMX2;
+// uyvytoyv12= uyvytoyv12_MMX2;
+// yvu9toyv12= yvu9toyv12_MMX2;
+ planar2x= planar2x_MMX2;
+ rgb24toyv12= rgb24toyv12_MMX2;
+ interleaveBytes= interleaveBytes_MMX2;
+ vu9_to_vu12= vu9_to_vu12_MMX2;
+ yvu9_to_yuy2= yvu9_to_yuy2_MMX2;
+ }else if(params->cpu & SWS_CPU_CAPS_3DNOW){
+ rgb15to16= rgb15to16_3DNOW;
+ rgb15to24= rgb15to24_3DNOW;
+ rgb15to32= rgb15to32_3DNOW;
+ rgb16to24= rgb16to24_3DNOW;
+ rgb16to32= rgb16to32_3DNOW;
+ rgb16to15= rgb16to15_3DNOW;
+ rgb24to16= rgb24to16_3DNOW;
+ rgb24to15= rgb24to15_3DNOW;
+ rgb24to32= rgb24to32_3DNOW;
+ rgb32to16= rgb32to16_3DNOW;
+ rgb32to15= rgb32to15_3DNOW;
+ rgb32to24= rgb32to24_3DNOW;
+ rgb24tobgr15= rgb24tobgr15_3DNOW;
+ rgb24tobgr16= rgb24tobgr16_3DNOW;
+ rgb24tobgr24= rgb24tobgr24_3DNOW;
+ rgb32tobgr32= rgb32tobgr32_3DNOW;
+ rgb32tobgr16= rgb32tobgr16_3DNOW;
+ rgb32tobgr15= rgb32tobgr15_3DNOW;
+ yv12toyuy2= yv12toyuy2_3DNOW;
+ yv12touyvy= yv12touyvy_3DNOW;
+ yuv422ptoyuy2= yuv422ptoyuy2_3DNOW;
+ yuy2toyv12= yuy2toyv12_3DNOW;
+// uyvytoyv12= uyvytoyv12_3DNOW;
+// yvu9toyv12= yvu9toyv12_3DNOW;
+ planar2x= planar2x_3DNOW;
+ rgb24toyv12= rgb24toyv12_3DNOW;
+ interleaveBytes= interleaveBytes_3DNOW;
+ vu9_to_vu12= vu9_to_vu12_3DNOW;
+ yvu9_to_yuy2= yvu9_to_yuy2_3DNOW;
+ }else if(params->cpu & SWS_CPU_CAPS_MMX){
+ rgb15to16= rgb15to16_MMX;
+ rgb15to24= rgb15to24_MMX;
+ rgb15to32= rgb15to32_MMX;
+ rgb16to24= rgb16to24_MMX;
+ rgb16to32= rgb16to32_MMX;
+ rgb16to15= rgb16to15_MMX;
+ rgb24to16= rgb24to16_MMX;
+ rgb24to15= rgb24to15_MMX;
+ rgb24to32= rgb24to32_MMX;
+ rgb32to16= rgb32to16_MMX;
+ rgb32to15= rgb32to15_MMX;
+ rgb32to24= rgb32to24_MMX;
+ rgb24tobgr15= rgb24tobgr15_MMX;
+ rgb24tobgr16= rgb24tobgr16_MMX;
+ rgb24tobgr24= rgb24tobgr24_MMX;
+ rgb32tobgr32= rgb32tobgr32_MMX;
+ rgb32tobgr16= rgb32tobgr16_MMX;
+ rgb32tobgr15= rgb32tobgr15_MMX;
+ yv12toyuy2= yv12toyuy2_MMX;
+ yv12touyvy= yv12touyvy_MMX;
+ yuv422ptoyuy2= yuv422ptoyuy2_MMX;
+ yuy2toyv12= yuy2toyv12_MMX;
+// uyvytoyv12= uyvytoyv12_MMX;
+// yvu9toyv12= yvu9toyv12_MMX;
+ planar2x= planar2x_MMX;
+ rgb24toyv12= rgb24toyv12_MMX;
+ interleaveBytes= interleaveBytes_MMX;
+ vu9_to_vu12= vu9_to_vu12_MMX;
+ yvu9_to_yuy2= yvu9_to_yuy2_MMX;
+ }else
+#endif /* ARCH_X86_32 || ARCH_X86_64 */
+ {
+ rgb15to16= rgb15to16_C;
+ rgb15to24= rgb15to24_C;
+ rgb15to32= rgb15to32_C;
+ rgb16to24= rgb16to24_C;
+ rgb16to32= rgb16to32_C;
+ rgb16to15= rgb16to15_C;
+ rgb24to16= rgb24to16_C;
+ rgb24to15= rgb24to15_C;
+ rgb24to32= rgb24to32_C;
+ rgb32to16= rgb32to16_C;
+ rgb32to15= rgb32to15_C;
+ rgb32to24= rgb32to24_C;
+ rgb24tobgr15= rgb24tobgr15_C;
+ rgb24tobgr16= rgb24tobgr16_C;
+ rgb24tobgr24= rgb24tobgr24_C;
+ rgb32tobgr32= rgb32tobgr32_C;
+ rgb32tobgr16= rgb32tobgr16_C;
+ rgb32tobgr15= rgb32tobgr15_C;
+ yv12toyuy2= yv12toyuy2_C;
+ yv12touyvy= yv12touyvy_C;
+ yuv422ptoyuy2= yuv422ptoyuy2_C;
+ yuy2toyv12= yuy2toyv12_C;
+// uyvytoyv12= uyvytoyv12_C;
+// yvu9toyv12= yvu9toyv12_C;
+ planar2x= planar2x_C;
+ rgb24toyv12= rgb24toyv12_C;
+ interleaveBytes= interleaveBytes_C;
+ vu9_to_vu12= vu9_to_vu12_C;
+ yvu9_to_yuy2= yvu9_to_yuy2_C;
+ }
+}
+
+/**
+ * Pallete is assumed to contain bgr32
+ */
+void palette8torgb32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+
+/*
+ for(i=0; i<num_pixels; i++)
+ ((unsigned *)dst)[i] = ((unsigned *)palette)[ src[i] ];
+*/
+
+ for(i=0; i<num_pixels; i++)
+ {
+ #ifdef WORDS_BIGENDIAN
+ dst[3]= palette[ src[i]*4+2 ];
+ dst[2]= palette[ src[i]*4+1 ];
+ dst[1]= palette[ src[i]*4+0 ];
+ #else
+ //FIXME slow?
+ dst[0]= palette[ src[i]*4+2 ];
+ dst[1]= palette[ src[i]*4+1 ];
+ dst[2]= palette[ src[i]*4+0 ];
+// dst[3]= 0; /* do we need this cleansing? */
+ #endif
+ dst+= 4;
+ }
+}
+
+void palette8tobgr32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+ for(i=0; i<num_pixels; i++)
+ {
+ #ifdef WORDS_BIGENDIAN
+ dst[3]= palette[ src[i]*4+0 ];
+ dst[2]= palette[ src[i]*4+1 ];
+ dst[1]= palette[ src[i]*4+2 ];
+ #else
+ //FIXME slow?
+ dst[0]= palette[ src[i]*4+0 ];
+ dst[1]= palette[ src[i]*4+1 ];
+ dst[2]= palette[ src[i]*4+2 ];
+// dst[3]= 0; /* do we need this cleansing? */
+ #endif
+
+ dst+= 4;
+ }
+}
+
+/**
+ * Pallete is assumed to contain bgr32
+ */
+void palette8torgb24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+/*
+ writes 1 byte o much and might cause alignment issues on some architectures?
+ for(i=0; i<num_pixels; i++)
+ ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ];
+*/
+ for(i=0; i<num_pixels; i++)
+ {
+ //FIXME slow?
+ dst[0]= palette[ src[i]*4+2 ];
+ dst[1]= palette[ src[i]*4+1 ];
+ dst[2]= palette[ src[i]*4+0 ];
+ dst+= 3;
+ }
+}
+
+void palette8tobgr24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+/*
+ writes 1 byte o much and might cause alignment issues on some architectures?
+ for(i=0; i<num_pixels; i++)
+ ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ];
+*/
+ for(i=0; i<num_pixels; i++)
+ {
+ //FIXME slow?
+ dst[0]= palette[ src[i]*4+0 ];
+ dst[1]= palette[ src[i]*4+1 ];
+ dst[2]= palette[ src[i]*4+2 ];
+ dst+= 3;
+ }
+}
+
+/**
+ * Palette is assumed to contain BGR16, see rgb32to16 to convert the palette.
+ */
+void palette8torgb16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+ for (i=0; i<num_pixels; i++)
+ ((uint16_t *)dst)[i] = ((const uint16_t *)palette)[src[i]];
+}
+void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+ for (i=0; i<num_pixels; i++)
+ ((uint16_t *)dst)[i] = bswap_16(((const uint16_t *)palette)[src[i]]);
+}
+
+/**
+ * Palette is assumed to contain BGR15, see rgb32to15 to convert the palette.
+ */
+void palette8torgb15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+ for (i=0; i<num_pixels; i++)
+ ((uint16_t *)dst)[i] = ((const uint16_t *)palette)[src[i]];
+}
+void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+ for (i=0; i<num_pixels; i++)
+ ((uint16_t *)dst)[i] = bswap_16(((const uint16_t *)palette)[src[i]]);
+}
+
+void rgb32tobgr24(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ long i;
+ stride_t num_pixels = src_size >> 2;
+ for (i=0; i<num_pixels; i++)
+ {
+ #ifdef WORDS_BIGENDIAN
+ /* RGB32 (= A,B,G,R) -> BGR24 (= B,G,R) */
+ dst[3*i + 0] = src[4*i + 1];
+ dst[3*i + 1] = src[4*i + 2];
+ dst[3*i + 2] = src[4*i + 3];
+ #else
+ dst[3*i + 0] = src[4*i + 2];
+ dst[3*i + 1] = src[4*i + 1];
+ dst[3*i + 2] = src[4*i + 0];
+ #endif
+ }
+}
+
+void rgb24tobgr32(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ long i;
+ for (i=0; 3*i<src_size; i++)
+ {
+ #ifdef WORDS_BIGENDIAN
+ /* RGB24 (= R,G,B) -> BGR32 (= A,R,G,B) */
+ dst[4*i + 0] = 255;
+ dst[4*i + 1] = src[3*i + 0];
+ dst[4*i + 2] = src[3*i + 1];
+ dst[4*i + 3] = src[3*i + 2];
+ #else
+ dst[4*i + 0] = src[3*i + 2];
+ dst[4*i + 1] = src[3*i + 1];
+ dst[4*i + 2] = src[3*i + 0];
+ dst[4*i + 3] = 255;
+ #endif
+ }
+}
+
+void rgb16tobgr32(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint16_t *end;
+ uint8_t *d = dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+ while (s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ #ifdef WORDS_BIGENDIAN
+ *d++ = 255;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0xF800)>>8;
+ #else
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = 255;
+ #endif
+ }
+}
+
+void rgb16tobgr24(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint16_t *end;
+ uint8_t *d = dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+ while (s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0x1F)<<3;
+ }
+}
+
+void rgb16tobgr16(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ long i;
+ stride_t num_pixels = src_size >> 1;
+
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = ((uint16_t*)src)[i];
+ r = rgb&0x1F;
+ g = (rgb&0x7E0)>>5;
+ b = (rgb&0xF800)>>11;
+ ((uint16_t*)dst)[i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
+ }
+}
+
+void rgb16tobgr15(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ long i;
+ stride_t num_pixels = src_size >> 1;
+
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = ((uint16_t*)src)[i];
+ r = rgb&0x1F;
+ g = (rgb&0x7E0)>>5;
+ b = (rgb&0xF800)>>11;
+ ((uint16_t*)dst)[i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10);
+ }
+}
+
+void rgb15tobgr32(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint16_t *end;
+ uint8_t *d = dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+ while (s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ #ifdef WORDS_BIGENDIAN
+ *d++ = 255;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x7C00)>>7;
+ #else
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = 255;
+ #endif
+ }
+}
+
+void rgb15tobgr24(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint16_t *end;
+ uint8_t *d = dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+ while (s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x1F)<<3;
+ }
+}
+
+void rgb15tobgr16(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ long i;
+ stride_t num_pixels = src_size >> 1;
+
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = ((uint16_t*)src)[i];
+ r = rgb&0x1F;
+ g = (rgb&0x3E0)>>5;
+ b = (rgb&0x7C00)>>10;
+ ((uint16_t*)dst)[i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
+ }
+}
+
+void rgb15tobgr15(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ long i;
+ stride_t num_pixels = src_size >> 1;
+
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = ((uint16_t*)src)[i];
+ r = rgb&0x1F;
+ g = (rgb&0x3E0)>>5;
+ b = (rgb&0x7C00)>>10;
+ ((uint16_t*)dst)[i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10);
+ }
+}
+
+void rgb8tobgr8(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ long i;
+ stride_t num_pixels = src_size;
+ for (i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint8_t rgb;
+ rgb = src[i];
+ r = (rgb&0x07);
+ g = (rgb&0x38)>>3;
+ b = (rgb&0xC0)>>6;
+ dst[i] = ((b<<1)&0x07) | ((g&0x07)<<3) | ((r&0x03)<<6);
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/rgb2rgb.h b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/rgb2rgb.h
new file mode 100644
index 000000000..3c6abaae6
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/rgb2rgb.h
@@ -0,0 +1,152 @@
+/*
+ * software RGB to RGB converter
+ * pluralize by Software PAL8 to RGB converter
+ * Software YUV to YUV converter
+ * Software YUV to RGB converter
+ * Written by Nick Kurshev.
+ * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef SWSCALE_RGB2RGB_H
+#define SWSCALE_RGB2RGB_H
+
+#define FF_CSP_ONLY
+#include "ffImgfmt.h"
+
+/* A full collection of RGB to RGB(BGR) converters */
+extern void (*rgb24to32)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+extern void (*rgb24to16)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+extern void (*rgb24to15)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+extern void (*rgb32to24)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+extern void (*rgb32to16)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+extern void (*rgb32to15)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+extern void (*rgb15to16)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+extern void (*rgb15to24)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+extern void (*rgb15to32)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+extern void (*rgb16to15)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+extern void (*rgb16to24)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+extern void (*rgb16to32)(const uint8_t *src,uint8_t *dst,stride_t src_size);
+extern void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void (*rgb24tobgr15)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void (*rgb32tobgr32)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void (*rgb32tobgr16)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void (*rgb32tobgr15)(const uint8_t *src, uint8_t *dst, stride_t src_size);
+
+extern void rgb24tobgr32(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void rgb32tobgr24(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void rgb16tobgr32(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void rgb16tobgr24(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void rgb16tobgr16(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void rgb16tobgr15(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void rgb15tobgr32(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void rgb15tobgr24(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void rgb15tobgr16(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void rgb15tobgr15(const uint8_t *src, uint8_t *dst, stride_t src_size);
+extern void rgb8tobgr8(const uint8_t *src, uint8_t *dst, stride_t src_size);
+
+
+extern void palette8torgb32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8tobgr32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8torgb24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8tobgr24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8torgb16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8torgb15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ * Chrominance data is only taken from every second line, others are ignored.
+ * FIXME: Write high quality version.
+ */
+//void uyvytoyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ */
+extern void (*yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride);
+
+/**
+ * Width should be a multiple of 16.
+ */
+extern void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride);
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ */
+extern void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t srcStride);
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ */
+extern void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride);
+
+extern void (*yv12tovyuy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride);
+
+extern void (*yv12toyvyu)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride);
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 2.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ * Chrominance data is only taken from every second line, others are ignored.
+ * FIXME: Write high quality version.
+ */
+extern void (*rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t srcStride);
+extern void (*planar2x)(const uint8_t *src, uint8_t *dst, long width, long height,
+ stride_t srcStride, stride_t dstStride);
+
+extern void (*interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dst,
+ long width, long height, stride_t src1Stride,
+ stride_t src2Stride, stride_t dstStride);
+
+extern void (*vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
+ uint8_t *dst1, uint8_t *dst2,
+ long width, long height,
+ stride_t srcStride1, stride_t srcStride2,
+ stride_t dstStride1, stride_t dstStride2);
+
+extern void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
+ uint8_t *dst,
+ long width, long height,
+ stride_t srcStride1, stride_t srcStride2,
+ stride_t srcStride3, stride_t dstStride);
+
+struct SwsParams;
+void sws_rgb2rgb_init(struct SwsParams *params);
+
+#endif /* SWSCALE_RGB2RGB_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/rgb2rgb_template.c b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/rgb2rgb_template.c
new file mode 100644
index 000000000..878de0fe6
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/rgb2rgb_template.c
@@ -0,0 +1,2750 @@
+/*
+ * software RGB to RGB converter
+ * pluralize by software PAL8 to RGB converter
+ * software YUV to YUV converter
+ * software YUV to RGB converter
+ * Written by Nick Kurshev.
+ * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
+ * lot of big-endian byte order fixes by Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * The C code (not assembly, MMX, ...) of this file can be used
+ * under the LGPL license.
+ */
+
+#include <stddef.h>
+
+#undef PREFETCH
+#undef MOVNTQ
+#undef EMMS
+#undef SFENCE
+#undef MMREG_SIZE
+#undef PREFETCHW
+#undef PAVGB
+
+#if HAVE_SSE2
+#define MMREG_SIZE 16
+#else
+#define MMREG_SIZE 8
+#endif
+
+#if HAVE_AMD3DNOW
+#define PREFETCH "prefetch"
+#define PREFETCHW "prefetchw"
+#define PAVGB "pavgusb"
+#elif HAVE_MMX2
+#define PREFETCH "prefetchnta"
+#define PREFETCHW "prefetcht0"
+#define PAVGB "pavgb"
+#else
+#define PREFETCH "/nop"
+#define PREFETCHW "/nop"
+#endif
+
+#if HAVE_AMD3DNOW
+/* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
+#define EMMS "femms"
+#else
+#define EMMS "emms"
+#endif
+
+#if HAVE_MMX2
+#define MOVNTQ "movntq"
+#define SFENCE "sfence"
+#else
+#define MOVNTQ "movq"
+#define SFENCE "/nop"
+#endif
+
+static inline void RENAME(rgb24to32)(const uint8_t *src,uint8_t *dst,stride_t src_size)
+{
+ uint8_t *dest = dst;
+ const uint8_t *s = src;
+ const uint8_t *end;
+ #if HAVE_MMX
+ const uint8_t *mm_end;
+ #endif
+ end = s + src_size;
+ #if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
+ mm_end = end - 23;
+ __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory");
+ while (s < mm_end)
+ {
+ __asm__ volatile(
+ PREFETCH" 32%1 \n\t"
+ "movd %1, %%mm0 \n\t"
+ "punpckldq 3%1, %%mm0 \n\t"
+ "movd 6%1, %%mm1 \n\t"
+ "punpckldq 9%1, %%mm1 \n\t"
+ "movd 12%1, %%mm2 \n\t"
+ "punpckldq 15%1, %%mm2 \n\t"
+ "movd 18%1, %%mm3 \n\t"
+ "punpckldq 21%1, %%mm3 \n\t"
+ "por %%mm7, %%mm0 \n\t"
+ "por %%mm7, %%mm1 \n\t"
+ "por %%mm7, %%mm2 \n\t"
+ "por %%mm7, %%mm3 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm1, 8%0 \n\t"
+ MOVNTQ" %%mm2, 16%0 \n\t"
+ MOVNTQ" %%mm3, 24%0"
+ :"=m"(*dest)
+ :"m"(*s)
+ :"memory");
+ dest += 32;
+ s += 24;
+ }
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
+ #endif
+ while (s < end)
+ {
+ #ifdef WORDS_BIGENDIAN
+ /* RGB24 (= R,G,B) -> RGB32 (= A,B,G,R) */
+ *dest++ = 255;
+ *dest++ = s[2];
+ *dest++ = s[1];
+ *dest++ = s[0];
+ s+=3;
+ #else
+ *dest++ = *s++;
+ *dest++ = *s++;
+ *dest++ = *s++;
+ *dest++ = 255;
+ #endif
+ }
+}
+
+static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,stride_t src_size)
+{
+ uint8_t *dest = dst;
+ const uint8_t *s = src;
+ const uint8_t *end;
+#if HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ end = s + src_size;
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
+ mm_end = end - 31;
+ while (s < mm_end)
+ {
+ __asm__ volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq 8%1, %%mm1 \n\t"
+ "movq 16%1, %%mm4 \n\t"
+ "movq 24%1, %%mm5 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+ "movq %%mm4, %%mm6 \n\t"
+ "movq %%mm5, %%mm7 \n\t"
+ "psrlq $8, %%mm2 \n\t"
+ "psrlq $8, %%mm3 \n\t"
+ "psrlq $8, %%mm6 \n\t"
+ "psrlq $8, %%mm7 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %2, %%mm1 \n\t"
+ "pand %2, %%mm4 \n\t"
+ "pand %2, %%mm5 \n\t"
+ "pand %3, %%mm2 \n\t"
+ "pand %3, %%mm3 \n\t"
+ "pand %3, %%mm6 \n\t"
+ "pand %3, %%mm7 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm3, %%mm1 \n\t"
+ "por %%mm6, %%mm4 \n\t"
+ "por %%mm7, %%mm5 \n\t"
+
+ "movq %%mm1, %%mm2 \n\t"
+ "movq %%mm4, %%mm3 \n\t"
+ "psllq $48, %%mm2 \n\t"
+ "psllq $32, %%mm3 \n\t"
+ "pand %4, %%mm2 \n\t"
+ "pand %5, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "psrlq $16, %%mm1 \n\t"
+ "psrlq $32, %%mm4 \n\t"
+ "psllq $16, %%mm5 \n\t"
+ "por %%mm3, %%mm1 \n\t"
+ "pand %6, %%mm5 \n\t"
+ "por %%mm5, %%mm4 \n\t"
+
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm1, 8%0 \n\t"
+ MOVNTQ" %%mm4, 16%0"
+ :"=m"(*dest)
+ :"m"(*s),"m"(mask24l),
+ "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
+ :"memory");
+ dest += 24;
+ s += 32;
+ }
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+#ifdef WORDS_BIGENDIAN
+ /* RGB32 (= A,B,G,R) -> RGB24 (= R,G,B) */
+ s++;
+ dest[2] = *s++;
+ dest[1] = *s++;
+ dest[0] = *s++;
+ dest += 3;
+#else
+ *dest++ = *s++;
+ *dest++ = *s++;
+ *dest++ = *s++;
+ s++;
+#endif
+ }
+}
+
+/*
+ original by Strepto/Astral
+ ported to gcc & bugfixed: A'rpi
+ MMX2, 3DNOW optimization by Nick Kurshev
+ 32-bit C version, and and&add trick by Michael Niedermayer
+*/
+static inline void RENAME(rgb15to16)(const uint8_t *src,uint8_t *dst,stride_t src_size)
+{
+ register const uint8_t* s=src;
+ register uint8_t* d=dst;
+ register const uint8_t *end;
+ const uint8_t *mm_end;
+ end = s + src_size;
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*s));
+ __asm__ volatile("movq %0, %%mm4"::"m"(mask15s));
+ mm_end = end - 15;
+ while (s<mm_end)
+ {
+ __asm__ volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq 8%1, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "pand %%mm4, %%mm0 \n\t"
+ "pand %%mm4, %%mm2 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm2, 8%0"
+ :"=m"(*d)
+ :"m"(*s)
+ );
+ d+=16;
+ s+=16;
+ }
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
+#endif
+ mm_end = end - 3;
+ while (s < mm_end)
+ {
+ register unsigned x= *((const uint32_t *)s);
+ *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
+ d+=4;
+ s+=4;
+ }
+ if (s < end)
+ {
+ register unsigned short x= *((const uint16_t *)s);
+ *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0);
+ }
+}
+
+static inline void RENAME(rgb16to15)(const uint8_t *src,uint8_t *dst,stride_t src_size)
+{
+ register const uint8_t* s=src;
+ register uint8_t* d=dst;
+ register const uint8_t *end;
+ const uint8_t *mm_end;
+ end = s + src_size;
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*s));
+ __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg));
+ __asm__ volatile("movq %0, %%mm6"::"m"(mask15b));
+ mm_end = end - 15;
+ while (s<mm_end)
+ {
+ __asm__ volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq 8%1, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $1, %%mm0 \n\t"
+ "psrlq $1, %%mm2 \n\t"
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm6, %%mm3 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm3, %%mm2 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm2, 8%0"
+ :"=m"(*d)
+ :"m"(*s)
+ );
+ d+=16;
+ s+=16;
+ }
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
+#endif
+ mm_end = end - 3;
+ while (s < mm_end)
+ {
+ register uint32_t x= *((const uint32_t*)s);
+ *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
+ s+=4;
+ d+=4;
+ }
+ if (s < end)
+ {
+ register uint16_t x= *((const uint16_t*)s);
+ *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
+ s+=2;
+ d+=2;
+ }
+}
+
+static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#if HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#if HAVE_MMX
+ mm_end = end - 15;
+#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
+ __asm__ volatile(
+ "movq %3, %%mm5 \n\t"
+ "movq %4, %%mm6 \n\t"
+ "movq %5, %%mm7 \n\t"
+ "jmp 2f \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1) \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "movd 4(%1), %%mm3 \n\t"
+ "punpckldq 8(%1), %%mm0 \n\t"
+ "punpckldq 12(%1), %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pand %%mm6, %%mm0 \n\t"
+ "pand %%mm6, %%mm3 \n\t"
+ "pmaddwd %%mm7, %%mm0 \n\t"
+ "pmaddwd %%mm7, %%mm3 \n\t"
+ "pand %%mm5, %%mm1 \n\t"
+ "pand %%mm5, %%mm4 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "psrld $5, %%mm0 \n\t"
+ "pslld $11, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, (%0) \n\t"
+ "add $16, %1 \n\t"
+ "add $8, %0 \n\t"
+ "2: \n\t"
+ "cmp %2, %1 \n\t"
+ " jb 1b \n\t"
+ : "+r" (d), "+r"(s)
+ : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
+ );
+#else
+ __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm__ volatile(
+ "movq %0, %%mm7 \n\t"
+ "movq %1, %%mm6 \n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ while (s < mm_end)
+ {
+ __asm__ volatile(
+ PREFETCH" 32%1 \n\t"
+ "movd %1, %%mm0 \n\t"
+ "movd 4%1, %%mm3 \n\t"
+ "punpckldq 8%1, %%mm0 \n\t"
+ "punpckldq 12%1, %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "psrlq $3, %%mm0 \n\t"
+ "psrlq $3, %%mm3 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %2, %%mm3 \n\t"
+ "psrlq $5, %%mm1 \n\t"
+ "psrlq $5, %%mm4 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm6, %%mm4 \n\t"
+ "psrlq $8, %%mm2 \n\t"
+ "psrlq $8, %%mm5 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "pand %%mm7, %%mm5 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ "psllq $16, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 16;
+ }
+#endif
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+ register int rgb = *(const uint32_t*)s; s += 4;
+ *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8);
+ }
+}
+
+static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#if HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#if HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ mm_end = end - 15;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 4%1, %%mm3\n\t"
+ "punpckldq 8%1, %%mm0\n\t"
+ "punpckldq 12%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psllq $8, %%mm0\n\t"
+ "psllq $8, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ "psrlq $5, %%mm1\n\t"
+ "psrlq $5, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $19, %%mm2\n\t"
+ "psrlq $19, %%mm5\n\t"
+ "pand %2, %%mm2\n\t"
+ "pand %2, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 16;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register int rgb = *(uint32_t*)s; s += 4;
+ *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19);
+ }
+}
+
+static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#if HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#if HAVE_MMX
+ mm_end = end - 15;
+#if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster)
+ asm volatile(
+ "movq %3, %%mm5 \n\t"
+ "movq %4, %%mm6 \n\t"
+ "movq %5, %%mm7 \n\t"
+ ".balign 16 \n\t"
+ "1: \n\t"
+ PREFETCH" 32(%1) \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "movd 4(%1), %%mm3 \n\t"
+ "punpckldq 8(%1), %%mm0 \n\t"
+ "punpckldq 12(%1), %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pand %%mm6, %%mm0 \n\t"
+ "pand %%mm6, %%mm3 \n\t"
+ "pmaddwd %%mm7, %%mm0 \n\t"
+ "pmaddwd %%mm7, %%mm3 \n\t"
+ "pand %%mm5, %%mm1 \n\t"
+ "pand %%mm5, %%mm4 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "psrld $6, %%mm0 \n\t"
+ "pslld $10, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, (%0) \n\t"
+ "add $16, %1 \n\t"
+ "add $8, %0 \n\t"
+ "cmp %2, %1 \n\t"
+ " jb 1b \n\t"
+ : "+r" (d), "+r"(s)
+ : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
+ );
+#else
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 4%1, %%mm3\n\t"
+ "punpckldq 8%1, %%mm0\n\t"
+ "punpckldq 12%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psrlq $3, %%mm0\n\t"
+ "psrlq $3, %%mm3\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm3\n\t"
+ "psrlq $6, %%mm1\n\t"
+ "psrlq $6, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $9, %%mm2\n\t"
+ "psrlq $9, %%mm5\n\t"
+ "pand %%mm7, %%mm2\n\t"
+ "pand %%mm7, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 16;
+ }
+#endif
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register int rgb = *(uint32_t*)s; s += 4;
+ *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9);
+ }
+}
+
+static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#if HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#if HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ mm_end = end - 15;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 4%1, %%mm3\n\t"
+ "punpckldq 8%1, %%mm0\n\t"
+ "punpckldq 12%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psllq $7, %%mm0\n\t"
+ "psllq $7, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ "psrlq $6, %%mm1\n\t"
+ "psrlq $6, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $19, %%mm2\n\t"
+ "psrlq $19, %%mm5\n\t"
+ "pand %2, %%mm2\n\t"
+ "pand %2, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 16;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register int rgb = *(uint32_t*)s; s += 4;
+ *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
+ }
+}
+
+static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#if HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#if HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ mm_end = end - 11;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 3%1, %%mm3\n\t"
+ "punpckldq 6%1, %%mm0\n\t"
+ "punpckldq 9%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psrlq $3, %%mm0\n\t"
+ "psrlq $3, %%mm3\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm3\n\t"
+ "psrlq $5, %%mm1\n\t"
+ "psrlq $5, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $8, %%mm2\n\t"
+ "psrlq $8, %%mm5\n\t"
+ "pand %%mm7, %%mm2\n\t"
+ "pand %%mm7, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ const int b= *s++;
+ const int g= *s++;
+ const int r= *s++;
+ *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
+ }
+}
+
+static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#if HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#if HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ mm_end = end - 15;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 3%1, %%mm3\n\t"
+ "punpckldq 6%1, %%mm0\n\t"
+ "punpckldq 9%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psllq $8, %%mm0\n\t"
+ "psllq $8, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ "psrlq $5, %%mm1\n\t"
+ "psrlq $5, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $19, %%mm2\n\t"
+ "psrlq $19, %%mm5\n\t"
+ "pand %2, %%mm2\n\t"
+ "pand %2, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ const int r= *s++;
+ const int g= *s++;
+ const int b= *s++;
+ *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
+ }
+}
+
+static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#if HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#if HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ mm_end = end - 11;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 3%1, %%mm3\n\t"
+ "punpckldq 6%1, %%mm0\n\t"
+ "punpckldq 9%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psrlq $3, %%mm0\n\t"
+ "psrlq $3, %%mm3\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm3\n\t"
+ "psrlq $6, %%mm1\n\t"
+ "psrlq $6, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $9, %%mm2\n\t"
+ "psrlq $9, %%mm5\n\t"
+ "pand %%mm7, %%mm2\n\t"
+ "pand %%mm7, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ const int b= *s++;
+ const int g= *s++;
+ const int r= *s++;
+ *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
+ }
+}
+
+static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#if HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#if HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ mm_end = end - 15;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 3%1, %%mm3\n\t"
+ "punpckldq 6%1, %%mm0\n\t"
+ "punpckldq 9%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psllq $7, %%mm0\n\t"
+ "psllq $7, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ "psrlq $6, %%mm1\n\t"
+ "psrlq $6, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $19, %%mm2\n\t"
+ "psrlq $19, %%mm5\n\t"
+ "pand %2, %%mm2\n\t"
+ "pand %2, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ const int r= *s++;
+ const int g= *s++;
+ const int b= *s++;
+ *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
+ }
+}
+
+/*
+ I use here less accurate approximation by simply
+ left-shifting the input
+ value and filling the low order bits with
+ zeroes. This method improves png's
+ compression but this scheme cannot reproduce white exactly, since it does not
+ generate an all-ones maximum value; the net effect is to darken the
+ image slightly.
+
+ The better method should be "left bit replication":
+
+ 4 3 2 1 0
+ ---------
+ 1 1 0 1 1
+
+ 7 6 5 4 3 2 1 0
+ ----------------
+ 1 1 0 1 1 1 1 0
+ |=======| |===|
+ | Leftmost Bits Repeated to Fill Open Bits
+ |
+ Original Bits
+*/
+static inline void RENAME(rgb15to24)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint16_t *end;
+#if HAVE_MMX
+ const uint16_t *mm_end;
+#endif
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (uint16_t *)src;
+ end = s + src_size/2;
+#if HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+ mm_end = end - 7;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ "movq %1, %%mm2\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %3, %%mm1\n\t"
+ "pand %4, %%mm2\n\t"
+ "psllq $3, %%mm0\n\t"
+ "psrlq $2, %%mm1\n\t"
+ "psrlq $7, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "movq %%mm2, %%mm5\n\t"
+ "punpcklwd %5, %%mm0\n\t"
+ "punpcklwd %5, %%mm1\n\t"
+ "punpcklwd %5, %%mm2\n\t"
+ "punpckhwd %5, %%mm3\n\t"
+ "punpckhwd %5, %%mm4\n\t"
+ "punpckhwd %5, %%mm5\n\t"
+ "psllq $8, %%mm1\n\t"
+ "psllq $16, %%mm2\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psllq $8, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm5, %%mm3\n\t"
+
+ "movq %%mm0, %%mm6\n\t"
+ "movq %%mm3, %%mm7\n\t"
+
+ "movq 8%1, %%mm0\n\t"
+ "movq 8%1, %%mm1\n\t"
+ "movq 8%1, %%mm2\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %3, %%mm1\n\t"
+ "pand %4, %%mm2\n\t"
+ "psllq $3, %%mm0\n\t"
+ "psrlq $2, %%mm1\n\t"
+ "psrlq $7, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "movq %%mm2, %%mm5\n\t"
+ "punpcklwd %5, %%mm0\n\t"
+ "punpcklwd %5, %%mm1\n\t"
+ "punpcklwd %5, %%mm2\n\t"
+ "punpckhwd %5, %%mm3\n\t"
+ "punpckhwd %5, %%mm4\n\t"
+ "punpckhwd %5, %%mm5\n\t"
+ "psllq $8, %%mm1\n\t"
+ "psllq $16, %%mm2\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psllq $8, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm5, %%mm3\n\t"
+
+ :"=m"(*d)
+ :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null)
+ :"memory");
+ /* Borrowed 32 to 24 */
+ __asm __volatile(
+ "movq %%mm0, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "movq %%mm6, %%mm0\n\t"
+ "movq %%mm7, %%mm1\n\t"
+
+ "movq %%mm4, %%mm6\n\t"
+ "movq %%mm5, %%mm7\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+
+ "psrlq $8, %%mm2\n\t"
+ "psrlq $8, %%mm3\n\t"
+ "psrlq $8, %%mm6\n\t"
+ "psrlq $8, %%mm7\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm1\n\t"
+ "pand %2, %%mm4\n\t"
+ "pand %2, %%mm5\n\t"
+ "pand %3, %%mm2\n\t"
+ "pand %3, %%mm3\n\t"
+ "pand %3, %%mm6\n\t"
+ "pand %3, %%mm7\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm3, %%mm1\n\t"
+ "por %%mm6, %%mm4\n\t"
+ "por %%mm7, %%mm5\n\t"
+
+ "movq %%mm1, %%mm2\n\t"
+ "movq %%mm4, %%mm3\n\t"
+ "psllq $48, %%mm2\n\t"
+ "psllq $32, %%mm3\n\t"
+ "pand %4, %%mm2\n\t"
+ "pand %5, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psrlq $16, %%mm1\n\t"
+ "psrlq $32, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm3, %%mm1\n\t"
+ "pand %6, %%mm5\n\t"
+ "por %%mm5, %%mm4\n\t"
+
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm1, 8%0\n\t"
+ MOVNTQ" %%mm4, 16%0"
+
+ :"=m"(*d)
+ :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
+ :"memory");
+ d += 24;
+ s += 8;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x7C00)>>7;
+ }
+}
+
+static inline void RENAME(rgb16to24)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint16_t *end;
+#if HAVE_MMX
+ const uint16_t *mm_end;
+#endif
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+#if HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+ mm_end = end - 7;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ "movq %1, %%mm2\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %3, %%mm1\n\t"
+ "pand %4, %%mm2\n\t"
+ "psllq $3, %%mm0\n\t"
+ "psrlq $3, %%mm1\n\t"
+ "psrlq $8, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "movq %%mm2, %%mm5\n\t"
+ "punpcklwd %5, %%mm0\n\t"
+ "punpcklwd %5, %%mm1\n\t"
+ "punpcklwd %5, %%mm2\n\t"
+ "punpckhwd %5, %%mm3\n\t"
+ "punpckhwd %5, %%mm4\n\t"
+ "punpckhwd %5, %%mm5\n\t"
+ "psllq $8, %%mm1\n\t"
+ "psllq $16, %%mm2\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psllq $8, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm5, %%mm3\n\t"
+
+ "movq %%mm0, %%mm6\n\t"
+ "movq %%mm3, %%mm7\n\t"
+
+ "movq 8%1, %%mm0\n\t"
+ "movq 8%1, %%mm1\n\t"
+ "movq 8%1, %%mm2\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %3, %%mm1\n\t"
+ "pand %4, %%mm2\n\t"
+ "psllq $3, %%mm0\n\t"
+ "psrlq $3, %%mm1\n\t"
+ "psrlq $8, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "movq %%mm2, %%mm5\n\t"
+ "punpcklwd %5, %%mm0\n\t"
+ "punpcklwd %5, %%mm1\n\t"
+ "punpcklwd %5, %%mm2\n\t"
+ "punpckhwd %5, %%mm3\n\t"
+ "punpckhwd %5, %%mm4\n\t"
+ "punpckhwd %5, %%mm5\n\t"
+ "psllq $8, %%mm1\n\t"
+ "psllq $16, %%mm2\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psllq $8, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm5, %%mm3\n\t"
+ :"=m"(*d)
+ :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null)
+ :"memory");
+ /* Borrowed 32 to 24 */
+ __asm __volatile(
+ "movq %%mm0, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "movq %%mm6, %%mm0\n\t"
+ "movq %%mm7, %%mm1\n\t"
+
+ "movq %%mm4, %%mm6\n\t"
+ "movq %%mm5, %%mm7\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+
+ "psrlq $8, %%mm2\n\t"
+ "psrlq $8, %%mm3\n\t"
+ "psrlq $8, %%mm6\n\t"
+ "psrlq $8, %%mm7\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm1\n\t"
+ "pand %2, %%mm4\n\t"
+ "pand %2, %%mm5\n\t"
+ "pand %3, %%mm2\n\t"
+ "pand %3, %%mm3\n\t"
+ "pand %3, %%mm6\n\t"
+ "pand %3, %%mm7\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm3, %%mm1\n\t"
+ "por %%mm6, %%mm4\n\t"
+ "por %%mm7, %%mm5\n\t"
+
+ "movq %%mm1, %%mm2\n\t"
+ "movq %%mm4, %%mm3\n\t"
+ "psllq $48, %%mm2\n\t"
+ "psllq $32, %%mm3\n\t"
+ "pand %4, %%mm2\n\t"
+ "pand %5, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psrlq $16, %%mm1\n\t"
+ "psrlq $32, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm3, %%mm1\n\t"
+ "pand %6, %%mm5\n\t"
+ "por %%mm5, %%mm4\n\t"
+
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm1, 8%0\n\t"
+ MOVNTQ" %%mm4, 16%0"
+
+ :"=m"(*d)
+ :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
+ :"memory");
+ d += 24;
+ s += 8;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0xF800)>>8;
+ }
+}
+
+static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint16_t *end;
+#if HAVE_MMX
+ const uint16_t *mm_end;
+#endif
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+#if HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+ __asm __volatile("pxor %%mm7,%%mm7\n\t":::"memory");
+ mm_end = end - 3;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ "movq %1, %%mm2\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %3, %%mm1\n\t"
+ "pand %4, %%mm2\n\t"
+ "psllq $3, %%mm0\n\t"
+ "psrlq $2, %%mm1\n\t"
+ "psrlq $7, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "movq %%mm2, %%mm5\n\t"
+ "punpcklwd %%mm7, %%mm0\n\t"
+ "punpcklwd %%mm7, %%mm1\n\t"
+ "punpcklwd %%mm7, %%mm2\n\t"
+ "punpckhwd %%mm7, %%mm3\n\t"
+ "punpckhwd %%mm7, %%mm4\n\t"
+ "punpckhwd %%mm7, %%mm5\n\t"
+ "psllq $8, %%mm1\n\t"
+ "psllq $16, %%mm2\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psllq $8, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm5, %%mm3\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm3, 8%0\n\t"
+ :"=m"(*d)
+ :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r)
+ :"memory");
+ d += 16;
+ s += 4;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+#if 0 //slightly slower on athlon
+ int bgr= *s++;
+ *((uint32_t*)d)++ = ((bgr&0x1F)<<3) + ((bgr&0x3E0)<<6) + ((bgr&0x7C00)<<9);
+#else
+ register uint16_t bgr;
+ bgr = *s++;
+#ifdef WORDS_BIGENDIAN
+ *d++ = 0;
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x1F)<<3;
+#else
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = 0;
+#endif
+
+#endif
+ }
+}
+
+static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ const uint16_t *end;
+#if HAVE_MMX
+ const uint16_t *mm_end;
+#endif
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (uint16_t *)src;
+ end = s + src_size/2;
+#if HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+ __asm __volatile("pxor %%mm7,%%mm7\n\t":::"memory");
+ mm_end = end - 3;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ "movq %1, %%mm2\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %3, %%mm1\n\t"
+ "pand %4, %%mm2\n\t"
+ "psllq $3, %%mm0\n\t"
+ "psrlq $3, %%mm1\n\t"
+ "psrlq $8, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "movq %%mm2, %%mm5\n\t"
+ "punpcklwd %%mm7, %%mm0\n\t"
+ "punpcklwd %%mm7, %%mm1\n\t"
+ "punpcklwd %%mm7, %%mm2\n\t"
+ "punpckhwd %%mm7, %%mm3\n\t"
+ "punpckhwd %%mm7, %%mm4\n\t"
+ "punpckhwd %%mm7, %%mm5\n\t"
+ "psllq $8, %%mm1\n\t"
+ "psllq $16, %%mm2\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psllq $8, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm5, %%mm3\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm3, 8%0\n\t"
+ :"=m"(*d)
+ :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r)
+ :"memory");
+ d += 16;
+ s += 4;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+#ifdef WORDS_BIGENDIAN
+ *d++ = 0;
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0x1F)<<3;
+#else
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = 0;
+#endif
+ }
+}
+
+static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+#if HAVE_MMX
+/* TODO: unroll this loop */
+ asm volatile (
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ ".balign 16 \n\t"
+ "1: \n\t"
+ PREFETCH" 32(%0, %%"REG_a") \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "pslld $16, %%mm0 \n\t"
+ "psrld $16, %%mm1 \n\t"
+ "pand "MANGLE(mask32r)", %%mm0 \n\t"
+ "pand "MANGLE(mask32g)", %%mm2 \n\t"
+ "pand "MANGLE(mask32b)", %%mm1 \n\t"
+ "por %%mm0, %%mm2 \n\t"
+ "por %%mm1, %%mm2 \n\t"
+ MOVNTQ" %%mm2, (%1, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ "cmp %2, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ :: "r" (src), "r"(dst), "r" (src_size-7)
+ : "%"REG_a
+ );
+
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#else
+ unsigned i;
+ stride_t num_pixels = src_size >> 2;
+ for(i=0; i<num_pixels; i++)
+ {
+#ifdef WORDS_BIGENDIAN
+ dst[4*i + 1] = src[4*i + 3];
+ dst[4*i + 2] = src[4*i + 2];
+ dst[4*i + 3] = src[4*i + 1];
+#else
+ dst[4*i + 0] = src[4*i + 2];
+ dst[4*i + 1] = src[4*i + 1];
+ dst[4*i + 2] = src[4*i + 0];
+#endif
+ }
+#endif
+}
+
+static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, stride_t src_size)
+{
+ unsigned i;
+#if HAVE_MMX
+ long mmx_size= 23 - src_size;
+ asm volatile (
+ "movq "MANGLE(mask24r)", %%mm5 \n\t"
+ "movq "MANGLE(mask24g)", %%mm6 \n\t"
+ "movq "MANGLE(mask24b)", %%mm7 \n\t"
+ ".balign 16 \n\t"
+ "1: \n\t"
+ PREFETCH" 32(%1, %%"REG_a") \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
+ "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG
+ "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B
+ "psllq $16, %%mm0 \n\t" // 00 BGR BGR
+ "pand %%mm5, %%mm0 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "por %%mm0, %%mm1 \n\t"
+ "por %%mm2, %%mm1 \n\t"
+ "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
+ MOVNTQ" %%mm1, (%2, %%"REG_a")\n\t" // RGB RGB RG
+ "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B
+ "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm5, %%mm1 \n\t"
+ "pand %%mm6, %%mm2 \n\t"
+ "por %%mm0, %%mm1 \n\t"
+ "por %%mm2, %%mm1 \n\t"
+ "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B
+ MOVNTQ" %%mm1, 8(%2, %%"REG_a")\n\t" // B RGB RGB R
+ "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR
+ "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG
+ "pand %%mm6, %%mm0 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm5, %%mm2 \n\t"
+ "por %%mm0, %%mm1 \n\t"
+ "por %%mm2, %%mm1 \n\t"
+ MOVNTQ" %%mm1, 16(%2, %%"REG_a")\n\t"
+ "add $24, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : "+a" (mmx_size)
+ : "r" (src-mmx_size), "r"(dst-mmx_size)
+ );
+
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+
+ if(mmx_size==23) return; //finihsed, was multiple of 8
+
+ src+= src_size;
+ dst+= src_size;
+ src_size= 23-mmx_size;
+ src-= src_size;
+ dst-= src_size;
+#endif
+ for(i=0; i<src_size; i+=3)
+ {
+ register uint8_t x;
+ x = src[i + 2];
+ dst[i + 1] = src[i + 1];
+ dst[i + 2] = src[i + 0];
+ dst[i + 0] = x;
+ }
+}
+
+static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride, long vertLumPerChroma)
+{
+ long y;
+ const stride_t chromWidth= width>>1;
+ for(y=0; y<height; y++)
+ {
+#if HAVE_MMX
+//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ ".balign 16 \n\t"
+ "1: \n\t"
+ PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
+ PREFETCH" 32(%2, %%"REG_a") \n\t"
+ PREFETCH" 32(%3, %%"REG_a") \n\t"
+ "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
+ "movq %%mm0, %%mm2 \n\t" // U(0)
+ "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
+ "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
+ "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
+
+ "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
+ "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
+ "movq %%mm3, %%mm4 \n\t" // Y(0)
+ "movq %%mm5, %%mm6 \n\t" // Y(8)
+ "punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0)
+ "punpckhbw %%mm0, %%mm4 \n\t" // YUYV YUYV(4)
+ "punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8)
+ "punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12)
+
+ MOVNTQ" %%mm3, (%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
+ : "%"REG_a
+ );
+#else
+
+#if defined ARCH_ALPHA && defined HAVE_MVI
+#define pl2yuy2(n) \
+ y1 = yc[n]; \
+ y2 = yc2[n]; \
+ u = uc[n]; \
+ v = vc[n]; \
+ asm("unpkbw %1, %0" : "=r"(y1) : "r"(y1)); \
+ asm("unpkbw %1, %0" : "=r"(y2) : "r"(y2)); \
+ asm("unpkbl %1, %0" : "=r"(u) : "r"(u)); \
+ asm("unpkbl %1, %0" : "=r"(v) : "r"(v)); \
+ yuv1 = (u << 8) + (v << 24); \
+ yuv2 = yuv1 + y2; \
+ yuv1 += y1; \
+ qdst[n] = yuv1; \
+ qdst2[n] = yuv2;
+
+ int i;
+ uint64_t *qdst = (uint64_t *) dst;
+ uint64_t *qdst2 = (uint64_t *) (dst + dstStride);
+ const uint32_t *yc = (uint32_t *) ysrc;
+ const uint32_t *yc2 = (uint32_t *) (ysrc + lumStride);
+ const uint16_t *uc = (uint16_t*) usrc, *vc = (uint16_t*) vsrc;
+ for(i = 0; i < chromWidth; i += 8){
+ uint64_t y1, y2, yuv1, yuv2;
+ uint64_t u, v;
+ /* Prefetch */
+ asm("ldq $31,64(%0)" :: "r"(yc));
+ asm("ldq $31,64(%0)" :: "r"(yc2));
+ asm("ldq $31,64(%0)" :: "r"(uc));
+ asm("ldq $31,64(%0)" :: "r"(vc));
+
+ pl2yuy2(0);
+ pl2yuy2(1);
+ pl2yuy2(2);
+ pl2yuy2(3);
+
+ yc += 4;
+ yc2 += 4;
+ uc += 4;
+ vc += 4;
+ qdst += 4;
+ qdst2 += 4;
+ }
+ y++;
+ ysrc += lumStride;
+ dst += dstStride;
+
+#elif HAVE_FAST_64BIT
+ int i;
+ uint64_t *ldst = (uint64_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for(i = 0; i < chromWidth; i += 2){
+ uint64_t k, l;
+ k = yc[0] + (uc[0] << 8) +
+ (yc[1] << 16) + (vc[0] << 24);
+ l = yc[2] + (uc[1] << 8) +
+ (yc[3] << 16) + (vc[1] << 24);
+ *ldst++ = k + (l << 32);
+ yc += 4;
+ uc += 2;
+ vc += 2;
+ }
+
+#else
+ int i, *idst = (int32_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for(i = 0; i < chromWidth; i++){
+#ifdef WORDS_BIGENDIAN
+ *idst++ = (yc[0] << 24)+ (uc[0] << 16) +
+ (yc[1] << 8) + (vc[0] << 0);
+#else
+ *idst++ = yc[0] + (uc[0] << 8) +
+ (yc[1] << 16) + (vc[0] << 24);
+#endif
+ yc += 2;
+ uc++;
+ vc++;
+ }
+#endif
+#endif
+ if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
+ {
+ usrc += chromStride;
+ vsrc += chromStride;
+ }
+ ysrc += lumStride;
+ dst += dstStride;
+ }
+#if HAVE_MMX
+asm( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
+#endif
+}
+
+/**
+ *
+ * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
+ * problem for anyone then tell me, and ill fix it)
+ */
+static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride)
+{
+ //FIXME interpolate chroma
+ RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
+}
+
+static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride, stride_t vertLumPerChroma)
+{
+ long y;
+ const x86_reg chromWidth= width>>1;
+ for (y=0; y<height; y++)
+ {
+#if HAVE_MMX
+//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
+ __asm__ volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
+ PREFETCH" 32(%2, %%"REG_a") \n\t"
+ PREFETCH" 32(%3, %%"REG_a") \n\t"
+ "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
+ "movq %%mm0, %%mm2 \n\t" // U(0)
+ "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
+ "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
+ "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
+
+ "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
+ "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
+ "movq %%mm0, %%mm4 \n\t" // Y(0)
+ "movq %%mm2, %%mm6 \n\t" // Y(8)
+ "punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0)
+ "punpckhbw %%mm3, %%mm4 \n\t" // YUYV YUYV(4)
+ "punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8)
+ "punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12)
+
+ MOVNTQ" %%mm0, (%0, %%"REG_a", 4) \n\t"
+ MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4) \n\t"
+ MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4) \n\t"
+ MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4) \n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
+ : "%"REG_a
+ );
+#else
+//FIXME adapt the Alpha ASM code from yv12->yuy2
+
+#if HAVE_FAST_64BIT
+ int i;
+ uint64_t *ldst = (uint64_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for (i = 0; i < chromWidth; i += 2){
+ uint64_t k, l;
+ k = uc[0] + (yc[0] << 8) +
+ (vc[0] << 16) + (yc[1] << 24);
+ l = uc[1] + (yc[2] << 8) +
+ (vc[1] << 16) + (yc[3] << 24);
+ *ldst++ = k + (l << 32);
+ yc += 4;
+ uc += 2;
+ vc += 2;
+ }
+
+#else
+ int i, *idst = (int32_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for (i = 0; i < chromWidth; i++){
+#ifdef WORDS_BIGENDIAN
+ *idst++ = (uc[0] << 24)+ (yc[0] << 16) +
+ (vc[0] << 8) + (yc[1] << 0);
+#else
+ *idst++ = uc[0] + (yc[0] << 8) +
+ (vc[0] << 16) + (yc[1] << 24);
+#endif
+ yc += 2;
+ uc++;
+ vc++;
+ }
+#endif
+#endif
+ if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1)
+ {
+ usrc += chromStride;
+ vsrc += chromStride;
+ }
+ ysrc += lumStride;
+ dst += dstStride;
+ }
+#if HAVE_MMX
+__asm__( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
+#endif
+}
+
+static inline void RENAME(yuvPlanartovyuy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride, stride_t vertLumPerChroma)
+{
+ long y;
+ const stride_t chromWidth= width>>1;
+ for(y=0; y<height; y++)
+ {
+ int i, *idst = (int32_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for(i = 0; i < chromWidth; i++){
+ *idst++ = vc[0] + (yc[0] << 8) +
+ (uc[0] << 16) + (yc[1] << 24);
+ yc += 2;
+ uc++;
+ vc++;
+ }
+ if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
+ {
+ usrc += chromStride;
+ vsrc += chromStride;
+ }
+ ysrc += lumStride;
+ dst += dstStride;
+ }
+}
+
+static inline void RENAME(yuvPlanartoyvyu)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride, stride_t vertLumPerChroma)
+{
+ long y;
+ const stride_t chromWidth= width>>1;
+ for(y=0; y<height; y++)
+ {
+ int i, *idst = (int32_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for(i = 0; i < chromWidth; i++){
+ *idst++ = yc[0] + (vc[0] << 8) +
+ (yc[1] << 16) + (uc[0] << 24);
+ yc += 2;
+ uc++;
+ vc++;
+ }
+ if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
+ {
+ usrc += chromStride;
+ vsrc += chromStride;
+ }
+ ysrc += lumStride;
+ dst += dstStride;
+ }
+}
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ */
+static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride)
+{
+ //FIXME interpolate chroma
+ RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
+}
+
+static inline void RENAME(yv12toyvyu)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride)
+{
+ //FIXME interpolate chroma
+ RENAME(yuvPlanartoyvyu)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
+}
+
+static inline void RENAME(yv12tovyuy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride)
+{
+ //FIXME interpolate chroma
+ RENAME(yuvPlanartovyuy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
+}
+
+/**
+ * Width should be a multiple of 16.
+ */
+static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t dstStride)
+{
+ RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
+}
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ */
+static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t srcStride)
+{
+ long y;
+ const x86_reg chromWidth= width>>1;
+ for (y=0; y<height; y+=2)
+ {
+#if HAVE_MMX
+ __asm__ volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
+ "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
+ "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
+ "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0)
+ "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4)
+ "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0)
+ "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4)
+ "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
+ "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
+ "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
+ "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
+
+ MOVNTQ" %%mm2, (%1, %%"REG_a", 2) \n\t"
+
+ "movq 16(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(8)
+ "movq 24(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(12)
+ "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8)
+ "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12)
+ "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8)
+ "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12)
+ "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
+ "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
+ "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
+ "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
+
+ MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2) \n\t"
+
+ "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
+ "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
+ "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
+ "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
+ "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
+ "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
+ "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
+ "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
+
+ MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
+ MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+ : "memory", "%"REG_a
+ );
+
+ ydst += lumStride;
+ src += srcStride;
+
+ __asm__ volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
+ "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
+ "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
+ "movq 16(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
+ "movq 24(%0, %%"REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
+ "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
+ "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
+ "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
+ "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
+ "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
+ "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
+
+ MOVNTQ" %%mm0, (%1, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2) \n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+ : "memory", "%"REG_a
+ );
+#else
+ long i;
+ for (i=0; i<chromWidth; i++)
+ {
+ ydst[2*i+0] = src[4*i+0];
+ udst[i] = src[4*i+1];
+ ydst[2*i+1] = src[4*i+2];
+ vdst[i] = src[4*i+3];
+ }
+ ydst += lumStride;
+ src += srcStride;
+
+ for (i=0; i<chromWidth; i++)
+ {
+ ydst[2*i+0] = src[4*i+0];
+ ydst[2*i+1] = src[4*i+2];
+ }
+#endif
+ udst += chromStride;
+ vdst += chromStride;
+ ydst += lumStride;
+ src += srcStride;
+ }
+#if HAVE_MMX
+__asm__ volatile( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
+#endif
+}
+
+static inline void RENAME(yvu9toyv12)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc,
+ uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height, stride_t lumStride, stride_t chromStride)
+{
+ /* Y Plane */
+ memcpy(ydst, ysrc, width*height);
+
+ /* XXX: implement upscaling for U,V */
+}
+
+static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWidth, long srcHeight, stride_t srcStride, stride_t dstStride)
+{
+ long x,y;
+
+ dst[0]= src[0];
+
+ // first line
+ for (x=0; x<srcWidth-1; x++){
+ dst[2*x+1]= (3*src[x] + src[x+1])>>2;
+ dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
+ }
+ dst[2*srcWidth-1]= src[srcWidth-1];
+
+ dst+= dstStride;
+
+ for (y=1; y<srcHeight; y++){
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ const x86_reg mmxSize= srcWidth&~15;
+ __asm__ volatile(
+ "mov %4, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq (%1, %%"REG_a"), %%mm1 \n\t"
+ "movq 1(%0, %%"REG_a"), %%mm2 \n\t"
+ "movq 1(%1, %%"REG_a"), %%mm3 \n\t"
+ "movq -1(%0, %%"REG_a"), %%mm4 \n\t"
+ "movq -1(%1, %%"REG_a"), %%mm5 \n\t"
+ PAVGB" %%mm0, %%mm5 \n\t"
+ PAVGB" %%mm0, %%mm3 \n\t"
+ PAVGB" %%mm0, %%mm5 \n\t"
+ PAVGB" %%mm0, %%mm3 \n\t"
+ PAVGB" %%mm1, %%mm4 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm4 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ "movq %%mm5, %%mm7 \n\t"
+ "movq %%mm4, %%mm6 \n\t"
+ "punpcklbw %%mm3, %%mm5 \n\t"
+ "punpckhbw %%mm3, %%mm7 \n\t"
+ "punpcklbw %%mm2, %%mm4 \n\t"
+ "punpckhbw %%mm2, %%mm6 \n\t"
+#if 1
+ MOVNTQ" %%mm5, (%2, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm4, (%3, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2) \n\t"
+#else
+ "movq %%mm5, (%2, %%"REG_a", 2) \n\t"
+ "movq %%mm7, 8(%2, %%"REG_a", 2) \n\t"
+ "movq %%mm4, (%3, %%"REG_a", 2) \n\t"
+ "movq %%mm6, 8(%3, %%"REG_a", 2) \n\t"
+#endif
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ :: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ),
+ "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
+ "g" (-mmxSize)
+ : "%"REG_a
+
+ );
+#else
+ const x86_reg mmxSize=1;
+#endif
+ dst[0 ]= (3*src[0] + src[srcStride])>>2;
+ dst[dstStride]= ( src[0] + 3*src[srcStride])>>2;
+
+ for (x=mmxSize-1; x<srcWidth-1; x++){
+ dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2;
+ dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2;
+ dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2;
+ dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2;
+ }
+ dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2;
+ dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2;
+
+ dst+=dstStride*2;
+ src+=srcStride;
+ }
+
+ // last line
+#if 1
+ dst[0]= src[0];
+
+ for (x=0; x<srcWidth-1; x++){
+ dst[2*x+1]= (3*src[x] + src[x+1])>>2;
+ dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
+ }
+ dst[2*srcWidth-1]= src[srcWidth-1];
+#else
+ for (x=0; x<srcWidth; x++){
+ dst[2*x+0]=
+ dst[2*x+1]= src[x];
+ }
+#endif
+
+#if HAVE_MMX
+__asm__ volatile( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
+#endif
+}
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ * Chrominance data is only taken from every second line, others are ignored.
+ * FIXME: Write HQ version.
+ */
+static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ stride_t lumStride, stride_t chromStride, stride_t srcStride)
+{
+ long y;
+ const x86_reg chromWidth= width>>1;
+ for (y=0; y<height; y+=2)
+ {
+#if HAVE_MMX
+ __asm__ volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
+ "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // UYVY UYVY(0)
+ "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // UYVY UYVY(4)
+ "movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0)
+ "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4)
+ "pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0)
+ "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(4)
+ "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
+ "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
+ "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
+ "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
+
+ MOVNTQ" %%mm2, (%1, %%"REG_a", 2) \n\t"
+
+ "movq 16(%0, %%"REG_a", 4), %%mm1 \n\t" // UYVY UYVY(8)
+ "movq 24(%0, %%"REG_a", 4), %%mm2 \n\t" // UYVY UYVY(12)
+ "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8)
+ "movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12)
+ "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8)
+ "pand %%mm7, %%mm2 \n\t" // U0V0 U0V0(12)
+ "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
+ "psrlw $8, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
+ "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
+ "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
+
+ MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2) \n\t"
+
+ "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
+ "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
+ "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
+ "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
+ "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
+ "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
+ "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
+ "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
+
+ MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
+ MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+ : "memory", "%"REG_a
+ );
+
+ ydst += lumStride;
+ src += srcStride;
+
+ __asm__ volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
+ "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
+ "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
+ "movq 16(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
+ "movq 24(%0, %%"REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
+ "psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
+ "psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
+ "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
+ "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
+ "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
+ "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
+
+ MOVNTQ" %%mm0, (%1, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2) \n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+ : "memory", "%"REG_a
+ );
+#else
+ long i;
+ for (i=0; i<chromWidth; i++)
+ {
+ udst[i] = src[4*i+0];
+ ydst[2*i+0] = src[4*i+1];
+ vdst[i] = src[4*i+2];
+ ydst[2*i+1] = src[4*i+3];
+ }
+ ydst += lumStride;
+ src += srcStride;
+
+ for (i=0; i<chromWidth; i++)
+ {
+ ydst[2*i+0] = src[4*i+1];
+ ydst[2*i+1] = src[4*i+3];
+ }
+#endif
+ udst += chromStride;
+ vdst += chromStride;
+ ydst += lumStride;
+ src += srcStride;
+ }
+#if HAVE_MMX
+__asm__ volatile( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
+#endif
+}
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 2.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ * Chrominance data is only taken from every second line,
+ * others are ignored in the C version.
+ * FIXME: Write HQ version.
+ */
+static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width0, long height,
+ stride_t lumStride, stride_t chromStride, stride_t srcStride)
+{
+ long y;
+ const stride_t chromWidth= width0>>1;
+ const stride_t width = width0;
+#if HAVE_MMX
+ for(y=0; y<height-2; y+=2)
+ {
+ long i;
+ for(i=0; i<2; i++)
+ {
+ asm volatile(
+ "mov %2, %%"REG_a" \n\t"
+ "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
+ "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
+ ".balign 16 \n\t"
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_b") \n\t"
+ "movd (%0, %%"REG_b"), %%mm0 \n\t"
+ "movd 3(%0, %%"REG_b"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "movd 6(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 9(%0, %%"REG_b"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm1 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+ "pmaddwd %%mm6, %%mm3 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm1, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "pmaddwd %%mm5, %%mm0 \n\t"
+ "pmaddwd %%mm5, %%mm2 \n\t"
+ "packssdw %%mm2, %%mm0 \n\t"
+ "psraw $7, %%mm0 \n\t"
+
+ "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
+ "movd 15(%0, %%"REG_b"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "movd 18(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 21(%0, %%"REG_b"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm1 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+ "pmaddwd %%mm6, %%mm3 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm1, %%mm4 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "pmaddwd %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm5, %%mm2 \n\t"
+ "add $24, %%"REG_b" \n\t"
+ "packssdw %%mm2, %%mm4 \n\t"
+ "psraw $7, %%mm4 \n\t"
+
+ "packuswb %%mm4, %%mm0 \n\t"
+ "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
+
+ MOVNTQ" %%mm0, (%1, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "r" (src+width*3), "r" (ydst+width), "g" (-width)
+ : "%"REG_a, "%"REG_b
+ );
+ ydst += lumStride;
+ src += srcStride;
+ }
+ src -= srcStride*2;
+ asm volatile(
+ "mov %4, %%"REG_a" \n\t"
+ "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
+ "add %%"REG_b", %%"REG_b" \n\t"
+ ".balign 16 \n\t"
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_b") \n\t"
+ PREFETCH" 64(%1, %%"REG_b") \n\t"
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ "movq (%0, %%"REG_b"), %%mm0 \n\t"
+ "movq (%1, %%"REG_b"), %%mm1 \n\t"
+ "movq 6(%0, %%"REG_b"), %%mm2 \n\t"
+ "movq 6(%1, %%"REG_b"), %%mm3 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $24, %%mm0 \n\t"
+ "psrlq $24, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+#else
+ "movd (%0, %%"REG_b"), %%mm0 \n\t"
+ "movd (%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 3(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 3(%1, %%"REG_b"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+ "movd 6(%0, %%"REG_b"), %%mm4 \n\t"
+ "movd 6(%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 9(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 9(%1, %%"REG_b"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm4 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm4, %%mm2 \n\t"
+ "psrlw $2, %%mm0 \n\t"
+ "psrlw $2, %%mm2 \n\t"
+#endif
+ "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
+
+ "pmaddwd %%mm0, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm2, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm1 \n\t"
+ "pmaddwd %%mm5, %%mm0 \n\t"
+ "pmaddwd %%mm5, %%mm1 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
+ "psraw $7, %%mm0 \n\t"
+
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ "movq 12(%0, %%"REG_b"), %%mm4 \n\t"
+ "movq 12(%1, %%"REG_b"), %%mm1 \n\t"
+ "movq 18(%0, %%"REG_b"), %%mm2 \n\t"
+ "movq 18(%1, %%"REG_b"), %%mm3 \n\t"
+ PAVGB" %%mm1, %%mm4 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "movq %%mm4, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $24, %%mm4 \n\t"
+ "psrlq $24, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm4 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+#else
+ "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
+ "movd 12(%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 15(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 15(%1, %%"REG_b"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm4 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm2, %%mm4 \n\t"
+ "movd 18(%0, %%"REG_b"), %%mm5 \n\t"
+ "movd 18(%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 21(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 21(%1, %%"REG_b"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm5 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm5, %%mm2 \n\t"
+ "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "psrlw $2, %%mm4 \n\t"
+ "psrlw $2, %%mm2 \n\t"
+#endif
+ "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
+
+ "pmaddwd %%mm4, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm2, %%mm4 \n\t"
+ "packssdw %%mm3, %%mm1 \n\t"
+ "pmaddwd %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm5, %%mm1 \n\t"
+ "add $24, %%"REG_b" \n\t"
+ "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
+ "psraw $7, %%mm4 \n\t"
+
+ "movq %%mm0, %%mm1 \n\t"
+ "punpckldq %%mm4, %%mm0 \n\t"
+ "punpckhdq %%mm4, %%mm1 \n\t"
+ "packsswb %%mm1, %%mm0 \n\t"
+ "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
+ "movd %%mm0, (%2, %%"REG_a") \n\t"
+ "punpckhdq %%mm0, %%mm0 \n\t"
+ "movd %%mm0, (%3, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
+ : "%"REG_a, "%"REG_b
+ );
+
+ udst += chromStride;
+ vdst += chromStride;
+ src += srcStride*2;
+ }
+
+ asm volatile( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
+#else
+ y=0;
+#endif
+ for(; y<height; y+=2)
+ {
+ long i;
+ for(i=0; i<chromWidth; i++)
+ {
+ unsigned int b= src[6*i+0];
+ unsigned int g= src[6*i+1];
+ unsigned int r= src[6*i+2];
+
+ unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+ unsigned int V = ((RV*r + GV*g + BV*b)>>RGB2YUV_SHIFT) + 128;
+ unsigned int U = ((RU*r + GU*g + BU*b)>>RGB2YUV_SHIFT) + 128;
+
+ udst[i] = U;
+ vdst[i] = V;
+ ydst[2*i] = Y;
+
+ b= src[6*i+3];
+ g= src[6*i+4];
+ r= src[6*i+5];
+
+ Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+ ydst[2*i+1] = Y;
+ }
+ ydst += lumStride;
+ src += srcStride;
+
+ for(i=0; i<chromWidth; i++)
+ {
+ unsigned int b= src[6*i+0];
+ unsigned int g= src[6*i+1];
+ unsigned int r= src[6*i+2];
+
+ unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+
+ ydst[2*i] = Y;
+
+ b= src[6*i+3];
+ g= src[6*i+4];
+ r= src[6*i+5];
+
+ Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+ ydst[2*i+1] = Y;
+ }
+ udst += chromStride;
+ vdst += chromStride;
+ ydst += lumStride;
+ src += srcStride;
+ }
+}
+
+void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest,
+ long width0, long height, stride_t src1Stride,
+ stride_t src2Stride, stride_t dstStride){
+ long h;
+ const stride_t width = width0;
+
+ for(h=0; h < height; h++)
+ {
+ long w;
+
+#if HAVE_MMX
+#if HAVE_SSE2
+ __asm__(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ "1: \n\t"
+ PREFETCH" 64(%1, %%"REG_a") \n\t"
+ PREFETCH" 64(%2, %%"REG_a") \n\t"
+ "movdqa (%1, %%"REG_a"), %%xmm0 \n\t"
+ "movdqa (%1, %%"REG_a"), %%xmm1 \n\t"
+ "movdqa (%2, %%"REG_a"), %%xmm2 \n\t"
+ "punpcklbw %%xmm2, %%xmm0 \n\t"
+ "punpckhbw %%xmm2, %%xmm1 \n\t"
+ "movntdq %%xmm0, (%0, %%"REG_a", 2) \n\t"
+ "movntdq %%xmm1, 16(%0, %%"REG_a", 2) \n\t"
+ "add $16, %%"REG_a" \n\t"
+ "cmp %3, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
+ : "memory", "%"REG_a""
+ );
+#else
+ __asm__(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ "1: \n\t"
+ PREFETCH" 64(%1, %%"REG_a") \n\t"
+ PREFETCH" 64(%2, %%"REG_a") \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a"), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq (%2, %%"REG_a"), %%mm4 \n\t"
+ "movq 8(%2, %%"REG_a"), %%mm5 \n\t"
+ "punpcklbw %%mm4, %%mm0 \n\t"
+ "punpckhbw %%mm4, %%mm1 \n\t"
+ "punpcklbw %%mm5, %%mm2 \n\t"
+ "punpckhbw %%mm5, %%mm3 \n\t"
+ MOVNTQ" %%mm0, (%0, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2) \n\t"
+ "add $16, %%"REG_a" \n\t"
+ "cmp %3, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
+ : "memory", "%"REG_a
+ );
+#endif
+ for (w= (width&(~15)); w < width; w++)
+ {
+ dest[2*w+0] = src1[w];
+ dest[2*w+1] = src2[w];
+ }
+#else
+ for (w=0; w < width; w++)
+ {
+ dest[2*w+0] = src1[w];
+ dest[2*w+1] = src2[w];
+ }
+#endif
+ dest += dstStride;
+ src1 += src1Stride;
+ src2 += src2Stride;
+ }
+#if HAVE_MMX
+ __asm__(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
+#endif
+}
+
+static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
+ uint8_t *dst1, uint8_t *dst2,
+ long width, long height,
+ stride_t srcStride1, stride_t srcStride2,
+ stride_t dstStride1, stride_t dstStride2)
+{
+ x86_reg y;
+ long x,w,h;
+ w=width/2; h=height/2;
+#if HAVE_MMX
+ __asm__ volatile(
+ PREFETCH" %0 \n\t"
+ PREFETCH" %1 \n\t"
+ ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
+#endif
+ for (y=0;y<h;y++){
+ const uint8_t* s1=src1+srcStride1*(y>>1);
+ uint8_t* d=dst1+dstStride1*y;
+ x=0;
+#if HAVE_MMX
+ for (;x<w-31;x+=32)
+ {
+ __asm__ volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq 8%1, %%mm2 \n\t"
+ "movq 16%1, %%mm4 \n\t"
+ "movq 24%1, %%mm6 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "movq %%mm6, %%mm7 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpckhbw %%mm1, %%mm1 \n\t"
+ "punpcklbw %%mm2, %%mm2 \n\t"
+ "punpckhbw %%mm3, %%mm3 \n\t"
+ "punpcklbw %%mm4, %%mm4 \n\t"
+ "punpckhbw %%mm5, %%mm5 \n\t"
+ "punpcklbw %%mm6, %%mm6 \n\t"
+ "punpckhbw %%mm7, %%mm7 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm1, 8%0 \n\t"
+ MOVNTQ" %%mm2, 16%0 \n\t"
+ MOVNTQ" %%mm3, 24%0 \n\t"
+ MOVNTQ" %%mm4, 32%0 \n\t"
+ MOVNTQ" %%mm5, 40%0 \n\t"
+ MOVNTQ" %%mm6, 48%0 \n\t"
+ MOVNTQ" %%mm7, 56%0"
+ :"=m"(d[2*x])
+ :"m"(s1[x])
+ :"memory");
+ }
+#endif
+ for (;x<w;x++) d[2*x]=d[2*x+1]=s1[x];
+ }
+ for (y=0;y<h;y++){
+ const uint8_t* s2=src2+srcStride2*(y>>1);
+ uint8_t* d=dst2+dstStride2*y;
+ x=0;
+#if HAVE_MMX
+ for (;x<w-31;x+=32)
+ {
+ __asm__ volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq 8%1, %%mm2 \n\t"
+ "movq 16%1, %%mm4 \n\t"
+ "movq 24%1, %%mm6 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "movq %%mm6, %%mm7 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpckhbw %%mm1, %%mm1 \n\t"
+ "punpcklbw %%mm2, %%mm2 \n\t"
+ "punpckhbw %%mm3, %%mm3 \n\t"
+ "punpcklbw %%mm4, %%mm4 \n\t"
+ "punpckhbw %%mm5, %%mm5 \n\t"
+ "punpcklbw %%mm6, %%mm6 \n\t"
+ "punpckhbw %%mm7, %%mm7 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm1, 8%0 \n\t"
+ MOVNTQ" %%mm2, 16%0 \n\t"
+ MOVNTQ" %%mm3, 24%0 \n\t"
+ MOVNTQ" %%mm4, 32%0 \n\t"
+ MOVNTQ" %%mm5, 40%0 \n\t"
+ MOVNTQ" %%mm6, 48%0 \n\t"
+ MOVNTQ" %%mm7, 56%0"
+ :"=m"(d[2*x])
+ :"m"(s2[x])
+ :"memory");
+ }
+#endif
+ for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
+ }
+#if HAVE_MMX
+ __asm__(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
+#endif
+}
+
+static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
+ uint8_t *dst,
+ long width, long height,
+ stride_t srcStride1, stride_t srcStride2,
+ stride_t srcStride3, stride_t dstStride)
+{
+ x86_reg x;
+ long y,w,h;
+ w=width/2; h=height;
+ for (y=0;y<h;y++){
+ const uint8_t* yp=src1+srcStride1*y;
+ const uint8_t* up=src2+srcStride2*(y>>2);
+ const uint8_t* vp=src3+srcStride3*(y>>2);
+ uint8_t* d=dst+dstStride*y;
+ x=0;
+#if HAVE_MMX
+ for (;x<w-7;x+=8)
+ {
+ __asm__ volatile(
+ PREFETCH" 32(%1, %0) \n\t"
+ PREFETCH" 32(%2, %0) \n\t"
+ PREFETCH" 32(%3, %0) \n\t"
+ "movq (%1, %0, 4), %%mm0 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
+ "movq (%2, %0), %%mm1 \n\t" /* U0U1U2U3U4U5U6U7 */
+ "movq (%3, %0), %%mm2 \n\t" /* V0V1V2V3V4V5V6V7 */
+ "movq %%mm0, %%mm3 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
+ "movq %%mm1, %%mm4 \n\t" /* U0U1U2U3U4U5U6U7 */
+ "movq %%mm2, %%mm5 \n\t" /* V0V1V2V3V4V5V6V7 */
+ "punpcklbw %%mm1, %%mm1 \n\t" /* U0U0 U1U1 U2U2 U3U3 */
+ "punpcklbw %%mm2, %%mm2 \n\t" /* V0V0 V1V1 V2V2 V3V3 */
+ "punpckhbw %%mm4, %%mm4 \n\t" /* U4U4 U5U5 U6U6 U7U7 */
+ "punpckhbw %%mm5, %%mm5 \n\t" /* V4V4 V5V5 V6V6 V7V7 */
+
+ "movq %%mm1, %%mm6 \n\t"
+ "punpcklbw %%mm2, %%mm1 \n\t" /* U0V0 U0V0 U1V1 U1V1*/
+ "punpcklbw %%mm1, %%mm0 \n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
+ "punpckhbw %%mm1, %%mm3 \n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
+ MOVNTQ" %%mm0, (%4, %0, 8) \n\t"
+ MOVNTQ" %%mm3, 8(%4, %0, 8) \n\t"
+
+ "punpckhbw %%mm2, %%mm6 \n\t" /* U2V2 U2V2 U3V3 U3V3*/
+ "movq 8(%1, %0, 4), %%mm0 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "punpcklbw %%mm6, %%mm0 \n\t" /* Y U2 Y V2 Y U2 Y V2*/
+ "punpckhbw %%mm6, %%mm3 \n\t" /* Y U3 Y V3 Y U3 Y V3*/
+ MOVNTQ" %%mm0, 16(%4, %0, 8) \n\t"
+ MOVNTQ" %%mm3, 24(%4, %0, 8) \n\t"
+
+ "movq %%mm4, %%mm6 \n\t"
+ "movq 16(%1, %0, 4), %%mm0 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "punpcklbw %%mm5, %%mm4 \n\t"
+ "punpcklbw %%mm4, %%mm0 \n\t" /* Y U4 Y V4 Y U4 Y V4*/
+ "punpckhbw %%mm4, %%mm3 \n\t" /* Y U5 Y V5 Y U5 Y V5*/
+ MOVNTQ" %%mm0, 32(%4, %0, 8) \n\t"
+ MOVNTQ" %%mm3, 40(%4, %0, 8) \n\t"
+
+ "punpckhbw %%mm5, %%mm6 \n\t"
+ "movq 24(%1, %0, 4), %%mm0 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "punpcklbw %%mm6, %%mm0 \n\t" /* Y U6 Y V6 Y U6 Y V6*/
+ "punpckhbw %%mm6, %%mm3 \n\t" /* Y U7 Y V7 Y U7 Y V7*/
+ MOVNTQ" %%mm0, 48(%4, %0, 8) \n\t"
+ MOVNTQ" %%mm3, 56(%4, %0, 8) \n\t"
+
+ : "+r" (x)
+ : "r"(yp), "r" (up), "r"(vp), "r"(d)
+ :"memory");
+ }
+#endif
+ for (; x<w; x++)
+ {
+ const long x2 = x<<2;
+ d[8*x+0] = yp[x2];
+ d[8*x+1] = up[x];
+ d[8*x+2] = yp[x2+1];
+ d[8*x+3] = vp[x];
+ d[8*x+4] = yp[x2+2];
+ d[8*x+5] = up[x];
+ d[8*x+6] = yp[x2+3];
+ d[8*x+7] = vp[x];
+ }
+ }
+#if HAVE_MMX
+ __asm__(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
+#endif
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale.c b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale.c
new file mode 100644
index 000000000..e2c3d5bf0
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale.c
@@ -0,0 +1,3070 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * the C code (not assembly, mmx, ...) of this file can be used
+ * under the LGPL license too
+ */
+
+/*
+ * Modified to support multi-thread related features
+ * by Haruhiko Yamagata <h.yamagata@nifty.com> in 2006.
+ */
+
+/*
+ supported Input formats: YV12, I420/IYUV, YUY2, UYVY, BGR32, BGR24, BGR16, BGR15, RGB32, RGB24, Y8/Y800, YVU9/IF09
+ supported output formats: YV12, I420/IYUV, YUY2, UYVY, {BGR,RGB}{1,4,8,15,16,24,32}, Y8/Y800, YVU9/IF09
+ {BGR,RGB}{1,4,8,15,16} support dithering
+
+ unscaled special converters (YV12=I420=IYUV, Y800=Y8)
+ YV12 -> {BGR,RGB}{1,4,8,15,16,24,32}
+ x -> x
+ YUV9 -> YV12
+ YUV9/YV12 -> Y800
+ Y800 -> YUV9/YV12
+ BGR24 -> BGR32 & RGB24 -> RGB32
+ BGR32 -> BGR24 & RGB32 -> RGB24
+ BGR15 -> BGR16
+*/
+
+/*
+tested special converters (most are tested actually, but I did not write it down ...)
+ YV12 -> BGR16
+ YV12 -> YV12
+ BGR15 -> BGR16
+ BGR16 -> BGR16
+ YVU9 -> YV12
+
+untested special converters
+ YV12/I420 -> BGR15/BGR24/BGR32 (it is the yuv2rgb stuff, so it should be OK)
+ YV12/I420 -> YV12/I420
+ YUY2/BGR15/BGR24/BGR32/RGB24/RGB32 -> same format
+ BGR24 -> BGR32 & RGB24 -> RGB32
+ BGR32 -> BGR24 & RGB32 -> RGB24
+ BGR24 -> YV12
+*/
+
+//based on libswscale Rev 19211
+
+#include <windows.h>
+#include <inttypes.h>
+#include <string.h>
+#include <math.h>
+#include <stdio.h>
+#include "config.h"
+#include "../libavutil/internal.h"
+#include "../libavutil/x86_cpu.h"
+#include "../libavutil/bswap.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+#include "ffImgfmt.h"
+#include "rgb2rgb.h"
+#include "libvo/fastmemcpy.h"
+
+#undef MOVNTQ
+#undef PAVGB
+
+//#undef HAVE_MMX2
+//#define HAVE_AMD3DNOW
+//#undef HAVE_MMX
+//#undef ARCH_X86
+//#define WORDS_BIGENDIAN
+#define DITHER1XBPP
+
+#define FAST_BGR2YV12 // use 7 bit coefficients instead of 15 bit
+
+#define RET 0xC3 //near return opcode for x86
+
+#ifdef MP_DEBUG
+#define ASSERT(x) assert(x);
+#else
+#define ASSERT(x) ;
+#endif
+
+#ifdef M_PI
+#define PI M_PI
+#else
+#define PI 3.14159265358979323846
+#endif
+
+//FIXME replace this with something faster
+#define isPlanarYUV(x) ((x)==IMGFMT_YV12 || (x)==IMGFMT_YVU9 \
+ || (x)==IMGFMT_NV12 || (x)==IMGFMT_NV21 \
+ || (x)==IMGFMT_444P || (x)==IMGFMT_422P || (x)==IMGFMT_411P)
+#define isYUV(x) ((x)==IMGFMT_UYVY || (x)==IMGFMT_YUY2 || (x)==IMGFMT_YVYU || (x)==IMGFMT_VYUY || isPlanarYUV(x))
+#define isGray(x) ((x)==IMGFMT_Y800)
+#define isRGB(x) (((x)&IMGFMT_RGB_MASK)==IMGFMT_RGB)
+#define isBGR(x) (((x)&IMGFMT_BGR_MASK)==IMGFMT_BGR)
+#define isSupportedIn(x) ((x)==IMGFMT_YV12 || (x)==IMGFMT_YUY2 || (x)==IMGFMT_UYVY ||(x)==IMGFMT_YVYU || (x)==IMGFMT_VYUY \
+ || isRGB(x) || isBGR(x) \
+ || (x)==IMGFMT_Y800 || (x)==IMGFMT_YVU9\
+ || (x)==IMGFMT_NV12 || (x)==IMGFMT_NV21\
+ || (x)==IMGFMT_444P || (x)==IMGFMT_422P || (x)==IMGFMT_411P)
+#define isSupportedOut(x) ((x)==IMGFMT_YV12 || (x)==IMGFMT_YUY2 || (x)==IMGFMT_UYVY || (x)==IMGFMT_YVYU || (x)==IMGFMT_VYUY \
+ || (x)==IMGFMT_444P || (x)==IMGFMT_422P || (x)==IMGFMT_411P\
+ || isRGB(x) || isBGR(x)\
+ || (x)==IMGFMT_NV12 || (x)==IMGFMT_NV21\
+ || (x)==IMGFMT_Y800 || (x)==IMGFMT_YVU9)
+#define isPacked(x) ((x)==IMGFMT_YUY2 || (x)==IMGFMT_UYVY || (x)==IMGFMT_YVYU || (x)==IMGFMT_VYUY || isRGB(x) || isBGR(x))
+
+#define RGB2YUV_SHIFT 16
+#define BY ((int)( 0.098*(1<<RGB2YUV_SHIFT)+0.5))
+#define BV ((int)(-0.071*(1<<RGB2YUV_SHIFT)+0.5))
+#define BU ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
+#define GY ((int)( 0.504*(1<<RGB2YUV_SHIFT)+0.5))
+#define GV ((int)(-0.368*(1<<RGB2YUV_SHIFT)+0.5))
+#define GU ((int)(-0.291*(1<<RGB2YUV_SHIFT)+0.5))
+#define RY ((int)( 0.257*(1<<RGB2YUV_SHIFT)+0.5))
+#define RV ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
+#define RU ((int)(-0.148*(1<<RGB2YUV_SHIFT)+0.5))
+
+extern const int32_t Inverse_Table_6_9[8][7];
+
+/*
+NOTES
+Special versions: fast Y 1:1 scaling (no interpolation in y direction)
+
+TODO
+more intelligent misalignment avoidance for the horizontal scaler
+write special vertical cubic upscale version
+optimize C code (YV12 / minmax)
+add support for packed pixel YUV input & output
+add support for Y8 output
+optimize BGR24 & BGR32
+add BGR4 output support
+write special BGR->BGR scaler
+*/
+
+#if ARCH_X86 && CONFIG_GPL
+DECLARE_ASM_CONST(8, uint64_t, bF8)= 0xF8F8F8F8F8F8F8F8LL;
+DECLARE_ASM_CONST(8, uint64_t, bFC)= 0xFCFCFCFCFCFCFCFCLL;
+DECLARE_ASM_CONST(8, uint64_t, w10)= 0x0010001000100010LL;
+DECLARE_ASM_CONST(8, uint64_t, w02)= 0x0002000200020002LL;
+DECLARE_ASM_CONST(8, uint64_t, bm00001111)=0x00000000FFFFFFFFLL;
+DECLARE_ASM_CONST(8, uint64_t, bm00000111)=0x0000000000FFFFFFLL;
+DECLARE_ASM_CONST(8, uint64_t, bm11111000)=0xFFFFFFFFFF000000LL;
+DECLARE_ASM_CONST(8, uint64_t, bm01010101)=0x00FF00FF00FF00FFLL;
+
+const DECLARE_ALIGNED(8, uint64_t, dither4[2]) = {
+ 0x0103010301030103LL,
+ 0x0200020002000200LL,};
+
+const DECLARE_ALIGNED(8, uint64_t, dither8[2]) = {
+ 0x0602060206020602LL,
+ 0x0004000400040004LL,};
+
+static volatile uint64_t attribute_used __attribute__((aligned(8))) b5Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) g5Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) g6Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) r5Dither;
+
+static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
+static uint64_t attribute_used __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
+static uint64_t attribute_used __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
+static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
+static uint64_t attribute_used __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
+static uint64_t attribute_used __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
+
+static uint64_t attribute_used __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL;
+static uint64_t attribute_used __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL;
+static uint64_t attribute_used __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL;
+
+#ifdef FAST_BGR2YV12
+static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000000210041000DULL;
+static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000FFEEFFDC0038ULL;
+static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00000038FFD2FFF8ULL;
+#else
+static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000020E540830C8BULL;
+static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000ED0FDAC23831ULL;
+static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00003831D0E6F6EAULL;
+#endif /* FAST_BGR2YV12 */
+static const uint64_t bgr2YOffset attribute_used __attribute__((aligned(8))) = 0x1010101010101010ULL;
+static const uint64_t bgr2UVOffset attribute_used __attribute__((aligned(8)))= 0x8080808080808080ULL;
+static const uint64_t w1111 attribute_used __attribute__((aligned(8))) = 0x0001000100010001ULL;
+#endif /* ARCH_X86 && CONFIG_GPL */
+
+// clipping helper table for C implementations:
+static unsigned char clip_table[768];
+
+static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b);
+
+static const uint8_t __attribute__((aligned(8))) dither_2x2_4[2][8]={
+{ 1, 3, 1, 3, 1, 3, 1, 3, },
+{ 2, 0, 2, 0, 2, 0, 2, 0, },
+};
+
+static const uint8_t __attribute__((aligned(8))) dither_2x2_8[2][8]={
+{ 6, 2, 6, 2, 6, 2, 6, 2, },
+{ 0, 4, 0, 4, 0, 4, 0, 4, },
+};
+
+const uint8_t __attribute__((aligned(8))) dither_8x8_32[8][8]={
+{ 17, 9, 23, 15, 16, 8, 22, 14, },
+{ 5, 29, 3, 27, 4, 28, 2, 26, },
+{ 21, 13, 19, 11, 20, 12, 18, 10, },
+{ 0, 24, 6, 30, 1, 25, 7, 31, },
+{ 16, 8, 22, 14, 17, 9, 23, 15, },
+{ 4, 28, 2, 26, 5, 29, 3, 27, },
+{ 20, 12, 18, 10, 21, 13, 19, 11, },
+{ 1, 25, 7, 31, 0, 24, 6, 30, },
+};
+
+#if 0
+const uint8_t __attribute__((aligned(8))) dither_8x8_64[8][8]={
+{ 0, 48, 12, 60, 3, 51, 15, 63, },
+{ 32, 16, 44, 28, 35, 19, 47, 31, },
+{ 8, 56, 4, 52, 11, 59, 7, 55, },
+{ 40, 24, 36, 20, 43, 27, 39, 23, },
+{ 2, 50, 14, 62, 1, 49, 13, 61, },
+{ 34, 18, 46, 30, 33, 17, 45, 29, },
+{ 10, 58, 6, 54, 9, 57, 5, 53, },
+{ 42, 26, 38, 22, 41, 25, 37, 21, },
+};
+#endif
+
+const uint8_t __attribute__((aligned(8))) dither_8x8_73[8][8]={
+{ 0, 55, 14, 68, 3, 58, 17, 72, },
+{ 37, 18, 50, 32, 40, 22, 54, 35, },
+{ 9, 64, 5, 59, 13, 67, 8, 63, },
+{ 46, 27, 41, 23, 49, 31, 44, 26, },
+{ 2, 57, 16, 71, 1, 56, 15, 70, },
+{ 39, 21, 52, 34, 38, 19, 51, 33, },
+{ 11, 66, 7, 62, 10, 65, 6, 60, },
+{ 48, 30, 43, 25, 47, 29, 42, 24, },
+};
+
+#if 0
+const uint8_t __attribute__((aligned(8))) dither_8x8_128[8][8]={
+{ 68, 36, 92, 60, 66, 34, 90, 58, },
+{ 20, 116, 12, 108, 18, 114, 10, 106, },
+{ 84, 52, 76, 44, 82, 50, 74, 42, },
+{ 0, 96, 24, 120, 6, 102, 30, 126, },
+{ 64, 32, 88, 56, 70, 38, 94, 62, },
+{ 16, 112, 8, 104, 22, 118, 14, 110, },
+{ 80, 48, 72, 40, 86, 54, 78, 46, },
+{ 4, 100, 28, 124, 2, 98, 26, 122, },
+};
+#endif
+
+#if 1
+const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
+{117, 62, 158, 103, 113, 58, 155, 100, },
+{ 34, 199, 21, 186, 31, 196, 17, 182, },
+{144, 89, 131, 76, 141, 86, 127, 72, },
+{ 0, 165, 41, 206, 10, 175, 52, 217, },
+{110, 55, 151, 96, 120, 65, 162, 107, },
+{ 28, 193, 14, 179, 38, 203, 24, 189, },
+{138, 83, 124, 69, 148, 93, 134, 79, },
+{ 7, 172, 48, 213, 3, 168, 45, 210, },
+};
+#elif 1
+// tries to correct a gamma of 1.5
+const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
+{ 0, 143, 18, 200, 2, 156, 25, 215, },
+{ 78, 28, 125, 64, 89, 36, 138, 74, },
+{ 10, 180, 3, 161, 16, 195, 8, 175, },
+{109, 51, 93, 38, 121, 60, 105, 47, },
+{ 1, 152, 23, 210, 0, 147, 20, 205, },
+{ 85, 33, 134, 71, 81, 30, 130, 67, },
+{ 14, 190, 6, 171, 12, 185, 5, 166, },
+{117, 57, 101, 44, 113, 54, 97, 41, },
+};
+#elif 1
+// tries to correct a gamma of 2.0
+const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
+{ 0, 124, 8, 193, 0, 140, 12, 213, },
+{ 55, 14, 104, 42, 66, 19, 119, 52, },
+{ 3, 168, 1, 145, 6, 187, 3, 162, },
+{ 86, 31, 70, 21, 99, 39, 82, 28, },
+{ 0, 134, 11, 206, 0, 129, 9, 200, },
+{ 62, 17, 114, 48, 58, 16, 109, 45, },
+{ 5, 181, 2, 157, 4, 175, 1, 151, },
+{ 95, 36, 78, 26, 90, 34, 74, 24, },
+};
+#else
+// tries to correct a gamma of 2.5
+const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
+{ 0, 107, 3, 187, 0, 125, 6, 212, },
+{ 39, 7, 86, 28, 49, 11, 102, 36, },
+{ 1, 158, 0, 131, 3, 180, 1, 151, },
+{ 68, 19, 52, 12, 81, 25, 64, 17, },
+{ 0, 119, 5, 203, 0, 113, 4, 195, },
+{ 45, 9, 96, 33, 42, 8, 91, 30, },
+{ 2, 172, 1, 144, 2, 165, 0, 137, },
+{ 77, 23, 60, 15, 72, 21, 56, 14, },
+};
+#endif
+
+#if !defined(HAVE_THREADS)
+int sws_thread_init(SwsContext *s, int thread_count)
+{
+ return -1;
+}
+void sws_thread_free(struct SwsContext *s) {}
+
+int GetCPUCount(void)
+{
+ return 1;
+}
+int isP4HT (void)
+{
+ return 0;
+}
+#else
+#include "isP4HT.c"
+#endif
+
+int sws_default_execute(SwsContext *c, int (*func)(SwsContext *c2), int *ret, int count){
+ int i;
+
+ for(i=0; i<count; i++){
+ int r= func(&c[i]);
+ if(ret) ret[i]= r;
+ }
+ return 0;
+}
+
+char *sws_format_name(int format)
+{
+ static char fmt_name[64];
+ char *res;
+ static int buffer;
+
+ res = fmt_name + buffer * 32;
+ buffer = 1 - buffer;
+ snprintf(res, 32, "0x%x (%c%c%c%c)", format,
+ format >> 24, (format >> 16) & 0xFF,
+ (format >> 8) & 0xFF,
+ format & 0xFF);
+
+ return res;
+}
+
+#if ARCH_X86_32 || ARCH_X86_64
+void in_asm_used_var_warning_killer()
+{
+ volatile int i= bF8+bFC+w10+
+ bm00001111+bm00000111+bm11111000+b16Mask+g16Mask+r16Mask+b15Mask+g15Mask+r15Mask+
+ M24A+M24B+M24C+w02 + b5Dither+g5Dither+r5Dither+g6Dither+dither4[0]+dither8[0]+bm01010101;
+ if(i) i=0;
+}
+#endif
+
+static inline void yuv2yuvXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
+{
+ //FIXME Optimize (just quickly writen not opti..)
+ int i;
+ for(i=0; i<dstW; i++)
+ {
+ int val=1<<18;
+ int j;
+ for(j=0; j<lumFilterSize; j++)
+ val += lumSrc[j][i] * lumFilter[j];
+
+ dest[i]= FFMIN(FFMAX(val>>19, 0), 255);
+ }
+
+ if(uDest != NULL)
+ for(i=0; i<chrDstW; i++)
+ {
+ int u=1<<18;
+ int v=1<<18;
+ int j;
+ for(j=0; j<chrFilterSize; j++)
+ {
+ u += chrSrc[j][i] * chrFilter[j];
+ v += chrSrc[j][i + 2048] * chrFilter[j];
+ }
+
+ uDest[i]= FFMIN(FFMAX(u>>19, 0), 255);
+ vDest[i]= FFMIN(FFMAX(v>>19, 0), 255);
+ }
+}
+
+static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
+{
+ //FIXME Optimize (just quickly writen not opti..)
+ int i;
+ for(i=0; i<dstW; i++)
+ {
+ int val=1<<18;
+ int j;
+ for(j=0; j<lumFilterSize; j++)
+ val += lumSrc[j][i] * lumFilter[j];
+
+ dest[i]= FFMIN(FFMAX(val>>19, 0), 255);
+ }
+
+ if(uDest == NULL)
+ return;
+
+ if(dstFormat == IMGFMT_NV12)
+ for(i=0; i<chrDstW; i++)
+ {
+ int u=1<<18;
+ int v=1<<18;
+ int j;
+ for(j=0; j<chrFilterSize; j++)
+ {
+ u += chrSrc[j][i] * chrFilter[j];
+ v += chrSrc[j][i + 2048] * chrFilter[j];
+ }
+
+ uDest[2*i]= FFMIN(FFMAX(u>>19, 0), 255);
+ uDest[2*i+1]= FFMIN(FFMAX(v>>19, 0), 255);
+ }
+ else
+ for(i=0; i<chrDstW; i++)
+ {
+ int u=1<<18;
+ int v=1<<18;
+ int j;
+ for(j=0; j<chrFilterSize; j++)
+ {
+ u += chrSrc[j][i] * chrFilter[j];
+ v += chrSrc[j][i + 2048] * chrFilter[j];
+ }
+
+ uDest[2*i]= FFMIN(FFMAX(v>>19, 0), 255);
+ uDest[2*i+1]= FFMIN(FFMAX(u>>19, 0), 255);
+ }
+}
+
+#define YSCALE_YUV_2_PACKEDX_C(type) \
+ for(i=0; i<(dstW>>1); i++){\
+ int j;\
+ int Y1=1<<18;\
+ int Y2=1<<18;\
+ int U=1<<18;\
+ int V=1<<18;\
+ type *r, *b, *g;\
+ const int i2= 2*i;\
+ \
+ for(j=0; j<lumFilterSize; j++)\
+ {\
+ Y1 += lumSrc[j][i2] * lumFilter[j];\
+ Y2 += lumSrc[j][i2+1] * lumFilter[j];\
+ }\
+ for(j=0; j<chrFilterSize; j++)\
+ {\
+ U += chrSrc[j][i] * chrFilter[j];\
+ V += chrSrc[j][i+2048] * chrFilter[j];\
+ }\
+ Y1>>=19;\
+ Y2>>=19;\
+ U >>=19;\
+ V >>=19;\
+ if((Y1|Y2|U|V)&256)\
+ {\
+ if(Y1>255) Y1=255;\
+ else if(Y1<0)Y1=0;\
+ if(Y2>255) Y2=255;\
+ else if(Y2<0)Y2=0;\
+ if(U>255) U=255;\
+ else if(U<0) U=0;\
+ if(V>255) V=255;\
+ else if(V<0) V=0;\
+ }
+
+#define YSCALE_YUV_2_RGBX_C(type) \
+ YSCALE_YUV_2_PACKEDX_C(type)\
+ r = c->table_rV[V];\
+ g = c->table_gU[U] + c->table_gV[V];\
+ b = c->table_bU[U];\
+
+#define YSCALE_YUV_2_PACKED2_C \
+ for(i=0; i<(dstW>>1); i++){\
+ const int i2= 2*i;\
+ int Y1= (buf0[i2 ]*yalpha1+buf1[i2 ]*yalpha)>>19;\
+ int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>19;\
+ int U= (uvbuf0[i ]*uvalpha1+uvbuf1[i ]*uvalpha)>>19;\
+ int V= (uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19;\
+
+#define YSCALE_YUV_2_RGB2_C(type) \
+ YSCALE_YUV_2_PACKED2_C\
+ type *r, *b, *g;\
+ r = c->table_rV[V];\
+ g = c->table_gU[U] + c->table_gV[V];\
+ b = c->table_bU[U];\
+
+#define YSCALE_YUV_2_PACKED1_C \
+ for(i=0; i<(dstW>>1); i++){\
+ const int i2= 2*i;\
+ int Y1= buf0[i2 ]>>7;\
+ int Y2= buf0[i2+1]>>7;\
+ int U= (uvbuf1[i ])>>7;\
+ int V= (uvbuf1[i+2048])>>7;\
+
+#define YSCALE_YUV_2_RGB1_C(type) \
+ YSCALE_YUV_2_PACKED1_C\
+ type *r, *b, *g;\
+ r = c->table_rV[V];\
+ g = c->table_gU[U] + c->table_gV[V];\
+ b = c->table_bU[U];\
+
+#define YSCALE_YUV_2_PACKED1B_C \
+ for(i=0; i<(dstW>>1); i++){\
+ const int i2= 2*i;\
+ int Y1= buf0[i2 ]>>7;\
+ int Y2= buf0[i2+1]>>7;\
+ int U= (uvbuf0[i ] + uvbuf1[i ])>>8;\
+ int V= (uvbuf0[i+2048] + uvbuf1[i+2048])>>8;\
+
+#define YSCALE_YUV_2_RGB1B_C(type) \
+ YSCALE_YUV_2_PACKED1B_C\
+ type *r, *b, *g;\
+ r = c->table_rV[V];\
+ g = c->table_gU[U] + c->table_gV[V];\
+ b = c->table_bU[U];\
+
+#define YSCALE_YUV_2_ANYRGB_C(func, func2)\
+ switch(c->dstFormat)\
+ {\
+ case IMGFMT_BGR32:\
+ case IMGFMT_RGB32:\
+ func(uint32_t)\
+ ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
+ ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
+ } \
+ break;\
+ case IMGFMT_RGB24:\
+ func(uint8_t)\
+ ((uint8_t*)dest)[0]= r[Y1];\
+ ((uint8_t*)dest)[1]= g[Y1];\
+ ((uint8_t*)dest)[2]= b[Y1];\
+ ((uint8_t*)dest)[3]= r[Y2];\
+ ((uint8_t*)dest)[4]= g[Y2];\
+ ((uint8_t*)dest)[5]= b[Y2];\
+ dest+=6;\
+ }\
+ break;\
+ case IMGFMT_BGR24:\
+ func(uint8_t)\
+ ((uint8_t*)dest)[0]= b[Y1];\
+ ((uint8_t*)dest)[1]= g[Y1];\
+ ((uint8_t*)dest)[2]= r[Y1];\
+ ((uint8_t*)dest)[3]= b[Y2];\
+ ((uint8_t*)dest)[4]= g[Y2];\
+ ((uint8_t*)dest)[5]= r[Y2];\
+ dest+=6;\
+ }\
+ break;\
+ case IMGFMT_RGB16:\
+ case IMGFMT_BGR16:\
+ {\
+ const int dr1= dither_2x2_8[y&1 ][0];\
+ const int dg1= dither_2x2_4[y&1 ][0];\
+ const int db1= dither_2x2_8[(y&1)^1][0];\
+ const int dr2= dither_2x2_8[y&1 ][1];\
+ const int dg2= dither_2x2_4[y&1 ][1];\
+ const int db2= dither_2x2_8[(y&1)^1][1];\
+ func(uint16_t)\
+ ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
+ ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
+ }\
+ }\
+ break;\
+ case IMGFMT_RGB15:\
+ case IMGFMT_BGR15:\
+ {\
+ const int dr1= dither_2x2_8[y&1 ][0];\
+ const int dg1= dither_2x2_8[y&1 ][1];\
+ const int db1= dither_2x2_8[(y&1)^1][0];\
+ const int dr2= dither_2x2_8[y&1 ][1];\
+ const int dg2= dither_2x2_8[y&1 ][0];\
+ const int db2= dither_2x2_8[(y&1)^1][1];\
+ func(uint16_t)\
+ ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
+ ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
+ }\
+ }\
+ break;\
+ case IMGFMT_RGB8:\
+ case IMGFMT_BGR8:\
+ {\
+ const uint8_t * const d64= dither_8x8_73[y&7];\
+ const uint8_t * const d32= dither_8x8_32[y&7];\
+ func(uint8_t)\
+ ((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];\
+ ((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];\
+ }\
+ }\
+ break;\
+ case IMGFMT_RGB4:\
+ case IMGFMT_BGR4:\
+ {\
+ const uint8_t * const d64= dither_8x8_73 [y&7];\
+ const uint8_t * const d128=dither_8x8_220[y&7];\
+ func(uint8_t)\
+ ((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]\
+ + ((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);\
+ }\
+ }\
+ break;\
+ case IMGFMT_RG4B:\
+ case IMGFMT_BG4B:\
+ {\
+ const uint8_t * const d64= dither_8x8_73 [y&7];\
+ const uint8_t * const d128=dither_8x8_220[y&7];\
+ func(uint8_t)\
+ ((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];\
+ ((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];\
+ }\
+ }\
+ break;\
+ case IMGFMT_RGB1:\
+ case IMGFMT_BGR1:\
+ {\
+ const uint8_t * const d128=dither_8x8_220[y&7];\
+ uint8_t *g= c->table_gU[128] + c->table_gV[128];\
+ for(i=0; i<dstW-7; i+=8){\
+ int acc;\
+ acc = g[((buf0[i ]*yalpha1+buf1[i ]*yalpha)>>19) + d128[0]];\
+ acc+= acc + g[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19) + d128[1]];\
+ acc+= acc + g[((buf0[i+2]*yalpha1+buf1[i+2]*yalpha)>>19) + d128[2]];\
+ acc+= acc + g[((buf0[i+3]*yalpha1+buf1[i+3]*yalpha)>>19) + d128[3]];\
+ acc+= acc + g[((buf0[i+4]*yalpha1+buf1[i+4]*yalpha)>>19) + d128[4]];\
+ acc+= acc + g[((buf0[i+5]*yalpha1+buf1[i+5]*yalpha)>>19) + d128[5]];\
+ acc+= acc + g[((buf0[i+6]*yalpha1+buf1[i+6]*yalpha)>>19) + d128[6]];\
+ acc+= acc + g[((buf0[i+7]*yalpha1+buf1[i+7]*yalpha)>>19) + d128[7]];\
+ ((uint8_t*)dest)[0]= acc;\
+ dest++;\
+ }\
+\
+/*\
+((uint8_t*)dest)-= dstW>>4;\
+{\
+ int acc=0;\
+ int left=0;\
+ static int top[1024];\
+ static int last_new[1024][1024];\
+ static int last_in3[1024][1024];\
+ static int drift[1024][1024];\
+ int topLeft=0;\
+ int shift=0;\
+ int count=0;\
+ const uint8_t * const d128=dither_8x8_220[y&7];\
+ int error_new=0;\
+ int error_in3=0;\
+ int f=0;\
+ \
+ for(i=dstW>>1; i<dstW; i++){\
+ int in= ((buf0[i ]*yalpha1+buf1[i ]*yalpha)>>19);\
+ int in2 = (76309 * (in - 16) + 32768) >> 16;\
+ int in3 = (in2 < 0) ? 0 : ((in2 > 255) ? 255 : in2);\
+ int old= (left*7 + topLeft + top[i]*5 + top[i+1]*3)/20 + in3\
+ + (last_new[y][i] - in3)*f/256;\
+ int new= old> 128 ? 255 : 0;\
+\
+ error_new+= FFABS(last_new[y][i] - new);\
+ error_in3+= FFABS(last_in3[y][i] - in3);\
+ f= error_new - error_in3*4;\
+ if(f<0) f=0;\
+ if(f>256) f=256;\
+\
+ topLeft= top[i];\
+ left= top[i]= old - new;\
+ last_new[y][i]= new;\
+ last_in3[y][i]= in3;\
+\
+ acc+= acc + (new&1);\
+ if((i&7)==6){\
+ ((uint8_t*)dest)[0]= acc;\
+ ((uint8_t*)dest)++;\
+ }\
+ }\
+}\
+*/\
+ }\
+ break;\
+ case IMGFMT_YUY2:\
+ func2\
+ ((uint8_t*)dest)[2*i2+0]= Y1;\
+ ((uint8_t*)dest)[2*i2+1]= U;\
+ ((uint8_t*)dest)[2*i2+2]= Y2;\
+ ((uint8_t*)dest)[2*i2+3]= V;\
+ } \
+ break;\
+ case IMGFMT_UYVY:\
+ func2\
+ ((uint8_t*)dest)[2*i2+0]= U;\
+ ((uint8_t*)dest)[2*i2+1]= Y1;\
+ ((uint8_t*)dest)[2*i2+2]= V;\
+ ((uint8_t*)dest)[2*i2+3]= Y2;\
+ } \
+ break;\
+ case IMGFMT_VYUY:\
+ func2\
+ ((uint8_t*)dest)[2*i2+0]= V;\
+ ((uint8_t*)dest)[2*i2+1]= Y1;\
+ ((uint8_t*)dest)[2*i2+2]= U;\
+ ((uint8_t*)dest)[2*i2+3]= Y2;\
+ } \
+ break;\
+ case IMGFMT_YVYU:\
+ func2\
+ ((uint8_t*)dest)[2*i2+0]= Y1;\
+ ((uint8_t*)dest)[2*i2+1]= V;\
+ ((uint8_t*)dest)[2*i2+2]= Y2;\
+ ((uint8_t*)dest)[2*i2+3]= U;\
+ } \
+ break;\
+ }\
+
+
+static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, int dstW, int y)
+{
+ int i;
+ switch(c->dstFormat)
+ {
+ case IMGFMT_RGB32:
+ case IMGFMT_BGR32:
+ YSCALE_YUV_2_RGBX_C(uint32_t)
+ ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];
+ ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];
+ }
+ break;
+ case IMGFMT_RGB24:
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[0]= r[Y1];
+ ((uint8_t*)dest)[1]= g[Y1];
+ ((uint8_t*)dest)[2]= b[Y1];
+ ((uint8_t*)dest)[3]= r[Y2];
+ ((uint8_t*)dest)[4]= g[Y2];
+ ((uint8_t*)dest)[5]= b[Y2];
+ dest+=6;
+ }
+ break;
+ case IMGFMT_BGR24:
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[0]= b[Y1];
+ ((uint8_t*)dest)[1]= g[Y1];
+ ((uint8_t*)dest)[2]= r[Y1];
+ ((uint8_t*)dest)[3]= b[Y2];
+ ((uint8_t*)dest)[4]= g[Y2];
+ ((uint8_t*)dest)[5]= r[Y2];
+ dest+=6;
+ }
+ break;
+ case IMGFMT_RGB16:
+ case IMGFMT_BGR16:
+ {
+ const int dr1= dither_2x2_8[y&1 ][0];
+ const int dg1= dither_2x2_4[y&1 ][0];
+ const int db1= dither_2x2_8[(y&1)^1][0];
+ const int dr2= dither_2x2_8[y&1 ][1];
+ const int dg2= dither_2x2_4[y&1 ][1];
+ const int db2= dither_2x2_8[(y&1)^1][1];
+ YSCALE_YUV_2_RGBX_C(uint16_t)
+ ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];
+ ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];
+ }
+ }
+ break;
+ case IMGFMT_RGB15:
+ case IMGFMT_BGR15:
+ {
+ const int dr1= dither_2x2_8[y&1 ][0];
+ const int dg1= dither_2x2_8[y&1 ][1];
+ const int db1= dither_2x2_8[(y&1)^1][0];
+ const int dr2= dither_2x2_8[y&1 ][1];
+ const int dg2= dither_2x2_8[y&1 ][0];
+ const int db2= dither_2x2_8[(y&1)^1][1];
+ YSCALE_YUV_2_RGBX_C(uint16_t)
+ ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];
+ ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];
+ }
+ }
+ break;
+ case IMGFMT_RGB8:
+ case IMGFMT_BGR8:
+ {
+ const uint8_t * const d64= dither_8x8_73[y&7];
+ const uint8_t * const d32= dither_8x8_32[y&7];
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];
+ ((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];
+ }
+ }
+ break;
+ case IMGFMT_RGB4:
+ case IMGFMT_BGR4:
+ {
+ const uint8_t * const d64= dither_8x8_73 [y&7];
+ const uint8_t * const d128=dither_8x8_220[y&7];
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]
+ +((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);
+ }
+ }
+ break;
+ case IMGFMT_RG4B:
+ case IMGFMT_BG4B:
+ {
+ const uint8_t * const d64= dither_8x8_73 [y&7];
+ const uint8_t * const d128=dither_8x8_220[y&7];
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];
+ ((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];
+ }
+ }
+ break;
+ case IMGFMT_RGB1:
+ case IMGFMT_BGR1:
+ {
+ const uint8_t * const d128=dither_8x8_220[y&7];
+ uint8_t *g= c->table_gU[128] + c->table_gV[128];
+ int acc=0;
+ for(i=0; i<dstW-1; i+=2){
+ int j;
+ int Y1=1<<18;
+ int Y2=1<<18;
+
+ for(j=0; j<lumFilterSize; j++)
+ {
+ Y1 += lumSrc[j][i] * lumFilter[j];
+ Y2 += lumSrc[j][i+1] * lumFilter[j];
+ }
+ Y1>>=19;
+ Y2>>=19;
+ if((Y1|Y2)&256)
+ {
+ if(Y1>255) Y1=255;
+ else if(Y1<0)Y1=0;
+ if(Y2>255) Y2=255;
+ else if(Y2<0)Y2=0;
+ }
+ acc+= acc + g[Y1+d128[(i+0)&7]];
+ acc+= acc + g[Y2+d128[(i+1)&7]];
+ if((i&7)==6){
+ ((uint8_t*)dest)[0]= acc;
+ dest++;
+ }
+ }
+ }
+ break;
+ case IMGFMT_YUY2:
+ YSCALE_YUV_2_PACKEDX_C(void)
+ ((uint8_t*)dest)[2*i2+0]= Y1;
+ ((uint8_t*)dest)[2*i2+1]= U;
+ ((uint8_t*)dest)[2*i2+2]= Y2;
+ ((uint8_t*)dest)[2*i2+3]= V;
+ }
+ break;
+ case IMGFMT_UYVY:
+ YSCALE_YUV_2_PACKEDX_C(void)
+ ((uint8_t*)dest)[2*i2+0]= U;
+ ((uint8_t*)dest)[2*i2+1]= Y1;
+ ((uint8_t*)dest)[2*i2+2]= V;
+ ((uint8_t*)dest)[2*i2+3]= Y2;
+ }
+ break;
+ case IMGFMT_VYUY:
+ YSCALE_YUV_2_PACKEDX_C(void)
+ ((uint8_t*)dest)[2*i2+0]= V;
+ ((uint8_t*)dest)[2*i2+1]= Y1;
+ ((uint8_t*)dest)[2*i2+2]= U;
+ ((uint8_t*)dest)[2*i2+3]= Y2;
+ }
+ break;
+ case IMGFMT_YVYU:
+ YSCALE_YUV_2_PACKEDX_C(void)
+ ((uint8_t*)dest)[2*i2+0]= Y1;
+ ((uint8_t*)dest)[2*i2+1]= V;
+ ((uint8_t*)dest)[2*i2+2]= Y2;
+ ((uint8_t*)dest)[2*i2+3]= U;
+ }
+ break;
+ }
+}
+
+
+//Note: we have C, X86, MMX, MMX2, 3DNOW version therse no 3DNOW+MMX2 one
+//Plain C versions
+#if !HAVE_MMX || defined (RUNTIME_CPUDETECT)
+#define COMPILE_C
+#endif
+
+#ifdef ARCH_POWERPC
+#ifdef HAVE_ALTIVEC
+#define COMPILE_ALTIVEC
+#endif //HAVE_ALTIVEC
+#endif //ARCH_POWERPC
+
+#if ARCH_X86_32 || ARCH_X86_64
+
+#if (HAVE_MMX && !HAVE_AMD3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
+#define COMPILE_MMX
+#endif
+
+#if HAVE_MMX2 || defined (RUNTIME_CPUDETECT)
+#define COMPILE_MMX2
+#endif
+
+#if (HAVE_AMD3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
+#define COMPILE_3DNOW
+#endif
+#endif //ARCH_X86_32 || ARCH_X86_64
+
+#undef HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+
+#ifdef COMPILE_C
+#undef HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#undef HAVE_ALTIVEC
+#define RENAME(a) a ## _C
+#include "swscale_template.c"
+#endif
+
+#ifdef ARCH_POWERPC
+#ifdef COMPILE_ALTIVEC
+#undef RENAME
+#define HAVE_ALTIVEC
+#define RENAME(a) a ## _altivec
+#include "swscale_template.c"
+#endif
+#endif //ARCH_POWERPC
+
+#if ARCH_X86_32 || ARCH_X86_64
+
+//X86 versions
+/*
+#undef RENAME
+#undef HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#define ARCH_X86 1
+#define RENAME(a) a ## _X86
+#include "swscale_template.c"
+*/
+//MMX versions
+#ifdef COMPILE_MMX
+#undef RENAME
+#define HAVE_MMX 1
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#define RENAME(a) a ## _MMX
+#include "swscale_template.c"
+#endif
+
+//MMX2 versions
+#ifdef COMPILE_MMX2
+#undef RENAME
+#define HAVE_MMX 1
+#define HAVE_MMX2 1
+#undef HAVE_AMD3DNOW
+#define RENAME(a) a ## _MMX2
+#include "swscale_template.c"
+#endif
+
+//3DNOW versions
+#ifdef COMPILE_3DNOW
+#undef RENAME
+#define HAVE_MMX 1
+#undef HAVE_MMX2
+#define HAVE_AMD3DNOW 1
+#define RENAME(a) a ## _3DNow
+#include "swscale_template.c"
+#endif
+
+#endif //ARCH_X86_32 || ARCH_X86_64
+
+// minor note: the HAVE_xyz are messed up after this line so don't use them
+
+static double getSplineCoeff(double a, double b, double c, double d, double dist)
+{
+// printf("%f %f %f %f %f\n", a,b,c,d,dist);
+ if (dist<=1.0) return ((d*dist + c)*dist + b)*dist +a;
+ else return getSplineCoeff( 0.0,
+ b+ 2.0*c + 3.0*d,
+ c + 3.0*d,
+ -b- 3.0*c - 6.0*d,
+ dist-1.0);
+}
+
+static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outFilterSize, int xInc,
+ int srcW, int dstW, int filterAlign, int one, SwsMethodParams *params,
+ int subsampling,int cpuflags, int debugflags,
+ SwsVector *srcFilter, SwsVector *dstFilter)
+{
+ int i;
+ int filterSize;
+ int filter2Size;
+ int minFilterSize;
+ double *filter=NULL;
+ double *filter2=NULL;
+#if ARCH_X86_32 || ARCH_X86_64
+ if(cpuflags & SWS_CPU_CAPS_MMX)
+ asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
+#endif
+
+ // Note the +1 is for the MMXscaler which reads over the end
+ *filterPos = av_malloc((dstW+1)*sizeof(int16_t));
+
+ if(FFABS(xInc - 0x10000) <10) // unscaled
+ {
+ int i;
+ filterSize= 1;
+ filter= av_malloc(dstW*sizeof(double)*filterSize);
+ for(i=0; i<dstW*filterSize; i++) filter[i]=0;
+
+ for(i=0; i<dstW; i++)
+ {
+ filter[i*filterSize]=1;
+ (*filterPos)[i]=i;
+ }
+
+ }
+ else if(params->method&SWS_POINT) // lame looking point sampling mode
+ {
+ int i;
+ int xDstInSrc;
+ filterSize= 1;
+ filter= av_malloc(dstW*sizeof(double)*filterSize);
+
+ xDstInSrc= xInc/2 - 0x8000;
+ for(i=0; i<dstW; i++)
+ {
+ int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
+
+ (*filterPos)[i]= xx;
+ filter[i]= 1.0;
+ xDstInSrc+= xInc;
+ }
+ }
+ else if((xInc <= (1<<16) && (params->method&SWS_AREA)) || (params->method&SWS_FAST_BILINEAR)) // bilinear upscale
+ {
+ int i;
+ int xDstInSrc;
+ if (params->method&SWS_BICUBIC) filterSize= 4;
+ else if(params->method&SWS_X ) filterSize= 4;
+ else filterSize= 2; // SWS_BILINEAR / SWS_AREA
+ filter= av_malloc(dstW*sizeof(double)*filterSize);
+
+ xDstInSrc= xInc/2 - 0x8000;
+ for(i=0; i<dstW; i++)
+ {
+ int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
+ int j;
+
+ (*filterPos)[i]= xx;
+ //Bilinear upscale / linear interpolate / Area averaging
+ for(j=0; j<filterSize; j++)
+ {
+ double d= FFABS((xx<<16) - xDstInSrc)/(double)(1<<16);
+ double coeff= 1.0 - d;
+ if(coeff<0) coeff=0;
+ filter[i*filterSize + j]= coeff;
+ xx++;
+ }
+ xDstInSrc+= xInc;
+ }
+ }
+ else
+ {
+ double xDstInSrc;
+ double sizeFactor, filterSizeInSrc;
+ const double xInc1= (double)xInc / (double)(1<<16);
+
+ if (params->method&SWS_BICUBIC) sizeFactor= 4.0;
+ else if(params->method&SWS_X) sizeFactor= 8.0;
+ else if(params->method&SWS_AREA) sizeFactor= 1.0; //downscale only, for upscale it is bilinear
+ else if(params->method&SWS_GAUSS) sizeFactor= 8.0; // infinite ;)
+ else if(params->method&SWS_LANCZOS) sizeFactor= params->param ? 2.0*params->param : 6.0;
+ else if(params->method&SWS_SINC) sizeFactor= 20.0; // infinite ;)
+ else if(params->method&SWS_SPLINE) sizeFactor= 20.0; // infinite ;)
+ else if(params->method&SWS_BILINEAR) sizeFactor= 2.0;
+ else {
+ sizeFactor= 0.0; //GCC warning killer
+ ASSERT(0)
+ }
+
+ if(xInc1 <= 1.0) filterSizeInSrc= sizeFactor; // upscale
+ else filterSizeInSrc= sizeFactor*srcW / (double)dstW;
+
+ filterSize= (int)ceil(1 + filterSizeInSrc); // will be reduced later if possible
+ if(filterSize > srcW-2) filterSize=srcW-2;
+
+ filter= av_malloc(dstW*sizeof(double)*filterSize);
+
+ xDstInSrc= xInc1 / 2.0 - 0.5;
+ for(i=0; i<dstW; i++)
+ {
+ int xx= (int)(xDstInSrc - (filterSize-1)*0.5 + 0.5);
+ int j;
+ (*filterPos)[i]= xx;
+ for(j=0; j<filterSize; j++)
+ {
+ double d= FFABS(xx - xDstInSrc)/filterSizeInSrc*sizeFactor;
+ double coeff;
+ if(params->method & SWS_BICUBIC)
+ {
+ double A= params->param ? -params->param*0.01 : -0.08; //old default was -0.60
+
+ // Equation is from VirtualDub
+ if(d<1.0)
+ coeff = (1.0 - (A+3.0)*d*d + (A+2.0)*d*d*d);
+ else if(d<2.0)
+ coeff = (-4.0*A + 8.0*A*d - 5.0*A*d*d + A*d*d*d);
+ else
+ coeff=0.0;
+ }
+/* else if(flags & SWS_X)
+ {
+ double p= param ? param*0.01 : 0.3;
+ coeff = d ? sin(d*PI)/(d*PI) : 1.0;
+ coeff*= pow(2.0, - p*d*d);
+ }*/
+ else if(params->method & SWS_X)
+ {
+ double A= params->param ? params->param*0.1 : 1.0;
+
+ if(d<1.0)
+ coeff = cos(d*PI);
+ else
+ coeff=-1.0;
+ if(coeff<0.0) coeff= -pow(-coeff, A);
+ else coeff= pow( coeff, A);
+ coeff= coeff*0.5 + 0.5;
+ }
+ else if(params->method & SWS_AREA)
+ {
+ double srcPixelSize= 1.0/xInc1;
+ if(d + srcPixelSize/2 < 0.5) coeff= 1.0;
+ else if(d - srcPixelSize/2 < 0.5) coeff= (0.5-d)/srcPixelSize + 0.5;
+ else coeff=0.0;
+ }
+ else if(params->method & SWS_GAUSS)
+ {
+ double p= params->param ? params->param*0.1 : 3.0;
+ coeff = pow(2.0, - p*d*d);
+ }
+ else if(params->method & SWS_SINC)
+ {
+ coeff = d ? sin(d*PI)/(d*PI) : 1.0;
+ }
+ else if(params->method & SWS_LANCZOS)
+ {
+ double p= params->param ? params->param : 3.0;
+ coeff = d ? sin(d*PI)*sin(d*PI/p)/(d*d*PI*PI/p) : 1.0;
+ if(d>p) coeff=0;
+ }
+ else if(params->method & SWS_BILINEAR)
+ {
+ coeff= 1.0 - d;
+ if(coeff<0) coeff=0;
+ }
+ else if(params->method & SWS_SPLINE)
+ {
+ static const double p=-2.196152422706632;
+ coeff = getSplineCoeff(1.0, 0.0, p, -p-1.0, d);
+ }
+ else {
+ coeff= 0.0; //GCC warning killer
+ ASSERT(0)
+ }
+
+ filter[i*filterSize + j]= coeff;
+ xx++;
+ }
+ xDstInSrc+= xInc1;
+ }
+ }
+
+ /* apply src & dst Filter to filter -> filter2
+ av_free(filter);
+ */
+ ASSERT(filterSize>0)
+ filter2Size= filterSize;
+ if(srcFilter) filter2Size+= srcFilter->length - 1;
+ if(dstFilter) filter2Size+= dstFilter->length - 1;
+ ASSERT(filter2Size>0)
+ filter2= av_malloc(filter2Size*dstW*sizeof(double));
+
+ for(i=0; i<dstW; i++)
+ {
+ int j;
+ SwsVector scaleFilter;
+ SwsVector *outVec;
+
+ scaleFilter.coeff= filter + i*filterSize;
+ scaleFilter.length= filterSize;
+
+ if(srcFilter) outVec= sws_getConvVec(srcFilter, &scaleFilter);
+ else outVec= &scaleFilter;
+
+ ASSERT(outVec->length == filter2Size)
+ //FIXME dstFilter
+
+ for(j=0; j<outVec->length; j++)
+ {
+ filter2[i*filter2Size + j]= outVec->coeff[j];
+ }
+
+ (*filterPos)[i]+= (filterSize-1)/2 - (filter2Size-1)/2;
+
+ if(outVec != &scaleFilter) sws_freeVec(outVec);
+ }
+ av_free(filter); filter=NULL;
+
+ /* try to reduce the filter-size (step1 find size and shift left) */
+ // Assume its near normalized (*0.5 or *2.0 is ok but * 0.001 is not)
+ minFilterSize= 0;
+ for(i=dstW-1; i>=0; i--)
+ {
+ int min= filter2Size;
+ int j;
+ double cutOff=0.0;
+
+ /* get rid off near zero elements on the left by shifting left */
+ for(j=0; j<filter2Size; j++)
+ {
+ int k;
+ cutOff += FFABS(filter2[i*filter2Size]);
+
+ if(cutOff > SWS_MAX_REDUCE_CUTOFF) break;
+
+ /* preserve Monotonicity because the core can't handle the filter otherwise */
+ if(i<dstW-1 && (*filterPos)[i] >= (*filterPos)[i+1]) break;
+
+ // Move filter coeffs left
+ for(k=1; k<filter2Size; k++)
+ filter2[i*filter2Size + k - 1]= filter2[i*filter2Size + k];
+ filter2[i*filter2Size + k - 1]= 0.0;
+ (*filterPos)[i]++;
+ }
+
+ cutOff=0.0;
+ /* count near zeros on the right */
+ for(j=filter2Size-1; j>0; j--)
+ {
+ cutOff += FFABS(filter2[i*filter2Size + j]);
+
+ if(cutOff > SWS_MAX_REDUCE_CUTOFF) break;
+ min--;
+ }
+
+ if(min>minFilterSize) minFilterSize= min;
+ }
+
+ if (cpuflags & SWS_CPU_CAPS_ALTIVEC) {
+ // we can handle the special case 4,
+ // so we don't want to go to the full 8
+ if (minFilterSize < 5)
+ filterAlign = 4;
+
+ // we really don't want to waste our time
+ // doing useless computation, so fall-back on
+ // the scalar C code for very small filter.
+ // vectorizing is worth it only if you have
+ // decent-sized vector.
+ if (minFilterSize < 3)
+ filterAlign = 1;
+ }
+
+ if (cpuflags & SWS_CPU_CAPS_MMX) {
+ // special case for unscaled vertical filtering
+ if(minFilterSize == 1 && filterAlign == 2)
+ filterAlign= 1;
+ }
+
+ ASSERT(minFilterSize > 0)
+ filterSize= (minFilterSize +(filterAlign-1)) & (~(filterAlign-1));
+ ASSERT(filterSize > 0)
+ filter= av_malloc(filterSize*dstW*sizeof(double));
+ if (filterSize >= MAX_FILTER_SIZE*16/((subsampling & SWS_ACCURATE_RND) ? APCK_SIZE : 16) || !filter)
+ return -1;
+ *outFilterSize= filterSize;
+
+ if(debugflags&SWS_PRINT_INFO)
+ MSG_V("SwScaler: reducing / aligning filtersize %d -> %d\n", filter2Size, filterSize);
+ /* try to reduce the filter-size (step2 reduce it) */
+ for(i=0; i<dstW; i++)
+ {
+ int j;
+
+ for(j=0; j<filterSize; j++)
+ {
+ if(j>=filter2Size) filter[i*filterSize + j]= 0.0;
+ else filter[i*filterSize + j]= filter2[i*filter2Size + j];
+ }
+ }
+ av_free(filter2); filter2=NULL;
+
+
+ //FIXME try to align filterpos if possible
+
+ //fix borders
+ for(i=0; i<dstW; i++)
+ {
+ int j;
+ if((*filterPos)[i] < 0)
+ {
+ // Move filter coeffs left to compensate for filterPos
+ for(j=1; j<filterSize; j++)
+ {
+ int left= FFMAX(j + (*filterPos)[i], 0);
+ filter[i*filterSize + left] += filter[i*filterSize + j];
+ filter[i*filterSize + j]=0;
+ }
+ (*filterPos)[i]= 0;
+ }
+
+ if((*filterPos)[i] + filterSize > srcW)
+ {
+ int shift= (*filterPos)[i] + filterSize - srcW;
+ // Move filter coeffs right to compensate for filterPos
+ for(j=filterSize-2; j>=0; j--)
+ {
+ int right= FFMIN(j + shift, filterSize-1);
+ filter[i*filterSize +right] += filter[i*filterSize +j];
+ filter[i*filterSize +j]=0;
+ }
+ (*filterPos)[i]= srcW - filterSize;
+ }
+ }
+
+ // Note the +1 is for the MMXscaler which reads over the end
+ /* align at 16 for AltiVec (needed by hScale_altivec_real) */
+ *outFilter= av_malloc(*outFilterSize*(dstW+1)*sizeof(int16_t));
+ memset(*outFilter, 0, *outFilterSize*(dstW+1)*sizeof(int16_t));
+
+ /* Normalize & Store in outFilter */
+ for(i=0; i<dstW; i++)
+ {
+ int j;
+ double error=0;
+ double sum=0;
+ double scale= one;
+
+ for(j=0; j<filterSize; j++)
+ {
+ sum+= filter[i*filterSize + j];
+ }
+ scale/= sum;
+ for(j=0; j<*outFilterSize; j++)
+ {
+ double v= filter[i*filterSize + j]*scale + error;
+ int intV= floor(v + 0.5);
+ (*outFilter)[i*(*outFilterSize) + j]= intV;
+ error = v - intV;
+ }
+ }
+
+ (*filterPos)[dstW]= (*filterPos)[dstW-1]; // the MMX scaler will read over the end
+ for(i=0; i<*outFilterSize; i++)
+ {
+ int j= dstW*(*outFilterSize);
+ (*outFilter)[j + i]= (*outFilter)[j + i - (*outFilterSize)];
+ }
+
+ av_free(filter);
+ return 0;
+}
+
+#if ARCH_X86_32 || ARCH_X86_64
+static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *filter, int32_t *filterPos, int numSplits)
+{
+ uint8_t *fragmentA;
+ stride_t imm8OfPShufW1A;
+ stride_t imm8OfPShufW2A;
+ stride_t fragmentLengthA;
+ uint8_t *fragmentB;
+ stride_t imm8OfPShufW1B;
+ stride_t imm8OfPShufW2B;
+ stride_t fragmentLengthB;
+ stride_t fragmentPos;
+
+ int xpos, i;
+
+ // create an optimized horizontal scaling routine
+
+ //code fragment
+
+ asm volatile(
+ "jmp 9f \n\t"
+ // Begin
+ "0: \n\t"
+ "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t"
+ "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t"
+ "movd 1(%%"REG_c", %%"REG_S"), %%mm1\n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "pshufw $0xFF, %%mm1, %%mm1 \n\t"
+ "1: \n\t"
+ "pshufw $0xFF, %%mm0, %%mm0 \n\t"
+ "2: \n\t"
+ "psubw %%mm1, %%mm0 \n\t"
+ "movl 8(%%"REG_b", %%"REG_a"), %%esi\n\t"
+ "pmullw %%mm3, %%mm0 \n\t"
+ "psllw $7, %%mm1 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+
+ "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ // End
+ "9: \n\t"
+// "int $3\n\t"
+ "lea " LOCAL_MANGLE(0b) ", %0 \n\t"
+ "lea " LOCAL_MANGLE(1b) ", %1 \n\t"
+ "lea " LOCAL_MANGLE(2b) ", %2 \n\t"
+ "dec %1 \n\t"
+ "dec %2 \n\t"
+ "sub %0, %1 \n\t"
+ "sub %0, %2 \n\t"
+ "lea " LOCAL_MANGLE(9b) ", %3 \n\t"
+ "sub %0, %3 \n\t"
+
+
+ :"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
+ "=r" (fragmentLengthA)
+ );
+
+ asm volatile(
+ "jmp 9f \n\t"
+ // Begin
+ "0: \n\t"
+ "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t"
+ "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "pshufw $0xFF, %%mm0, %%mm1 \n\t"
+ "1: \n\t"
+ "pshufw $0xFF, %%mm0, %%mm0 \n\t"
+ "2: \n\t"
+ "psubw %%mm1, %%mm0 \n\t"
+ "movl 8(%%"REG_b", %%"REG_a"), %%esi\n\t"
+ "pmullw %%mm3, %%mm0 \n\t"
+ "psllw $7, %%mm1 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+
+ "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ // End
+ "9: \n\t"
+// "int $3\n\t"
+ "lea " LOCAL_MANGLE(0b) ", %0 \n\t"
+ "lea " LOCAL_MANGLE(1b) ", %1 \n\t"
+ "lea " LOCAL_MANGLE(2b) ", %2 \n\t"
+ "dec %1 \n\t"
+ "dec %2 \n\t"
+ "sub %0, %1 \n\t"
+ "sub %0, %2 \n\t"
+ "lea " LOCAL_MANGLE(9b) ", %3 \n\t"
+ "sub %0, %3 \n\t"
+
+
+ :"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B),
+ "=r" (fragmentLengthB)
+ );
+
+ xpos= 0; //lumXInc/2 - 0x8000; // difference between pixel centers
+ fragmentPos=0;
+
+ for(i=0; i<dstW/numSplits; i++)
+ {
+ int xx=xpos>>16;
+
+ if((i&3) == 0)
+ {
+ int a=0;
+ int b=((xpos+xInc)>>16) - xx;
+ int c=((xpos+xInc*2)>>16) - xx;
+ int d=((xpos+xInc*3)>>16) - xx;
+
+ filter[i ] = (( xpos & 0xFFFF) ^ 0xFFFF)>>9;
+ filter[i+1] = (((xpos+xInc ) & 0xFFFF) ^ 0xFFFF)>>9;
+ filter[i+2] = (((xpos+xInc*2) & 0xFFFF) ^ 0xFFFF)>>9;
+ filter[i+3] = (((xpos+xInc*3) & 0xFFFF) ^ 0xFFFF)>>9;
+ filterPos[i/2]= xx;
+
+ if(d+1<4)
+ {
+ int maxShift= 3-(d+1);
+ int shift=0;
+
+ memcpy(funnyCode + fragmentPos, fragmentB, fragmentLengthB);
+
+ funnyCode[fragmentPos + imm8OfPShufW1B]=
+ (a+1) | ((b+1)<<2) | ((c+1)<<4) | ((d+1)<<6);
+ funnyCode[fragmentPos + imm8OfPShufW2B]=
+ a | (b<<2) | (c<<4) | (d<<6);
+
+ if(i+3>=dstW) shift=maxShift; //avoid overread
+ else if((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //Align
+
+ if(shift && i>=shift)
+ {
+ funnyCode[fragmentPos + imm8OfPShufW1B]+= 0x55*shift;
+ funnyCode[fragmentPos + imm8OfPShufW2B]+= 0x55*shift;
+ filterPos[i/2]-=shift;
+ }
+
+ fragmentPos+= fragmentLengthB;
+ }
+ else
+ {
+ int maxShift= 3-d;
+ int shift=0;
+
+ memcpy(funnyCode + fragmentPos, fragmentA, fragmentLengthA);
+
+ funnyCode[fragmentPos + imm8OfPShufW1A]=
+ funnyCode[fragmentPos + imm8OfPShufW2A]=
+ a | (b<<2) | (c<<4) | (d<<6);
+
+ if(i+4>=dstW) shift=maxShift; //avoid overread
+ else if((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //partial align
+
+ if(shift && i>=shift)
+ {
+ funnyCode[fragmentPos + imm8OfPShufW1A]+= 0x55*shift;
+ funnyCode[fragmentPos + imm8OfPShufW2A]+= 0x55*shift;
+ filterPos[i/2]-=shift;
+ }
+
+ fragmentPos+= fragmentLengthA;
+ }
+
+ funnyCode[fragmentPos]= RET;
+ }
+ xpos+=xInc;
+ }
+ filterPos[i/2]= xpos>>16; // needed to jump to the next part
+}
+#endif // ARCH_X86_32 || ARCH_X86_64
+
+static void globalInit(void){
+ // generating tables:
+ int i;
+ for(i=0; i<768; i++){
+ int c= FFMIN(FFMAX(i-256, 0), 255);
+ clip_table[i]=c;
+ }
+}
+
+static SwsFunc getSwsFunc(int flags){
+
+#ifdef RUNTIME_CPUDETECT
+#if ARCH_X86_32 || ARCH_X86_64
+ // ordered per speed fasterst first
+ if(flags & SWS_CPU_CAPS_MMX2)
+ return swScale_MMX2;
+ else if(flags & SWS_CPU_CAPS_3DNOW)
+ return swScale_3DNow;
+ else if(flags & SWS_CPU_CAPS_MMX)
+ return swScale_MMX;
+ else
+ return swScale_C;
+
+#else
+#ifdef ARCH_POWERPC
+ if(flags & SWS_CPU_CAPS_ALTIVEC)
+ return swScale_altivec;
+ else
+ return swScale_C;
+#endif
+ return swScale_C;
+#endif /* ARCH_X86_32 || ARCH_X86_64 */
+#else //RUNTIME_CPUDETECT
+#if HAVE_MMX2
+ return swScale_MMX2;
+#elif HAVE_AMD3DNOW
+ return swScale_3DNow;
+#elif HAVE_MMX
+ return swScale_MMX;
+#elif defined (HAVE_ALTIVEC)
+ return swScale_altivec;
+#else
+ return swScale_C;
+#endif
+#endif //!RUNTIME_CPUDETECT
+}
+
+static int PlanarToNV12Wrapper(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], stride_t dstStride[]){
+ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+ /* Copy Y plane */
+ if(dstStride[0]==srcStride[0] && srcStride[0] > 0)
+ memcpy(dst, src[0], srcSliceH*dstStride[0]);
+ else
+ {
+ int i;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst;
+ for(i=0; i<srcSliceH; i++)
+ {
+ memcpy(dstPtr, srcPtr, c->srcW);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+ dst = dstParam[1]; //dstParam[0] + dstStride[0]*c->srcH;
+ if (c->dstFormat == IMGFMT_NV12)
+ interleaveBytes( src[1],src[2],dst,c->srcW/2,srcSliceH/2,srcStride[1],srcStride[2],dstStride[0] );
+ else
+ interleaveBytes( src[2],src[1],dst,c->srcW/2,srcSliceH/2,srcStride[2],srcStride[1],dstStride[0] );
+
+ return srcSliceH;
+}
+
+static int NV12ToPlanarWrapper(SwsContext *c, uint8_t* srcParam[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], stride_t dstStride[]){
+ const uint8_t *src=srcParam[0] + srcStride[0]*srcSliceY;
+ unsigned char *dst_U =dst[1];
+ unsigned char *dst_V =dst[2];
+ int swapped=c->srcFormat == IMGFMT_NV12;
+ int x,y,idx;
+ int srcSliceHc = srcSliceH >> 1;
+ int srcWc = c->srcW >> 1;
+
+ if(dstStride[0]==srcStride[0] && srcStride[0]>0)
+ memcpy(dst[0], src, srcSliceH*dstStride[0]);
+ else
+ {
+ int i;
+ uint8_t *dstPtr= dst[0];
+ const uint8_t *srcPtr= src;
+ for(i=0; i<srcSliceH; i++)
+ {
+ memcpy(dstPtr, srcPtr, c->srcW);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+
+ idx=0;
+ src=srcParam[1];
+ if (swapped)
+ {
+ for(y = 0 ; y < srcSliceHc ; y++, dst_U += dstStride[1], dst_V += dstStride[2] ) {
+ idx = 0;
+ for(x = 0 ; x < srcWc ; x++, idx+=2){
+ *(dst_U + x) = *(src + idx);
+ *(dst_V + x) = *(src + idx + 1);
+ }
+ src += srcStride[1];
+ }
+ } else {
+ for(y = 0 ; y < srcSliceHc ; y++, dst_U += dstStride[1], dst_V += dstStride[2] ) {
+ idx = 0;
+ for(x = 0 ; x < srcWc ; x++, idx+=2){
+ *(dst_U + x) = *(src + idx + 1);
+ *(dst_V + x) = *(src + idx);
+ }
+ src += srcStride[1];
+ }
+ }
+ return srcSliceH;
+}
+
+static int PlanarToYuy2Wrapper(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], stride_t dstStride[]){
+ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+
+ yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+
+ return srcSliceH;
+}
+
+static int PlanarToUyvyWrapper(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], stride_t dstStride[]){
+ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+
+ yv12touyvy( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+
+ return srcSliceH;
+}
+
+static int PlanarToYvyuWrapper(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], stride_t dstStride[]){
+ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+
+ yv12toyvyu( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+
+ return srcSliceH;
+}
+static int PlanarToVyuyWrapper(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], stride_t dstStride[]){
+ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+
+ yv12tovyuy( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+
+ return srcSliceH;
+}
+
+/* {RGB,BGR}{15,16,24,32} -> {RGB,BGR}{15,16,24,32} */
+static int rgb2rgbWrapper(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], stride_t dstStride[]){
+ const int srcFormat= c->srcFormat;
+ const int dstFormat= c->dstFormat;
+ const int srcBpp= ((srcFormat&0xFF) + 7)>>3;
+ const int dstBpp= ((dstFormat&0xFF) + 7)>>3;
+ const int srcId= (srcFormat&0xFF)>>2; // 1:0, 4:1, 8:2, 15:3, 16:4, 24:6, 32:8
+ const int dstId= (dstFormat&0xFF)>>2;
+ void (*conv)(const uint8_t *src, uint8_t *dst, stride_t src_size)=NULL;
+
+ /* BGR -> BGR */
+ if( (isBGR(srcFormat) && isBGR(dstFormat))
+ || (isRGB(srcFormat) && isRGB(dstFormat))){
+ switch(srcId | (dstId<<4)){
+ case 0x34: conv= rgb16to15; break;
+ case 0x36: conv= rgb24to15; break;
+ case 0x38: conv= rgb32to15; break;
+ case 0x43: conv= rgb15to16; break;
+ case 0x46: conv= rgb24to16; break;
+ case 0x48: conv= rgb32to16; break;
+ case 0x63: conv= rgb15to24; break;
+ case 0x64: conv= rgb16to24; break;
+ case 0x68: conv= rgb32to24; break;
+ case 0x83: conv= rgb15to32; break;
+ case 0x84: conv= rgb16to32; break;
+ case 0x86: conv= rgb24to32; break;
+ default: MSG_ERR("swScaler: internal error %s -> %s converter\n",
+ sws_format_name(srcFormat), sws_format_name(dstFormat)); break;
+ }
+ }else if( (isBGR(srcFormat) && isRGB(dstFormat))
+ || (isRGB(srcFormat) && isBGR(dstFormat))){
+ switch(srcId | (dstId<<4)){
+ case 0x33: conv= rgb15tobgr15; break;
+ case 0x34: conv= rgb16tobgr15; break;
+ case 0x36: conv= rgb24tobgr15; break;
+ case 0x38: conv= rgb32tobgr15; break;
+ case 0x43: conv= rgb15tobgr16; break;
+ case 0x44: conv= rgb16tobgr16; break;
+ case 0x46: conv= rgb24tobgr16; break;
+ case 0x48: conv= rgb32tobgr16; break;
+ case 0x63: conv= rgb15tobgr24; break;
+ case 0x64: conv= rgb16tobgr24; break;
+ case 0x66: conv= rgb24tobgr24; break;
+ case 0x68: conv= rgb32tobgr24; break;
+ case 0x83: conv= rgb15tobgr32; break;
+ case 0x84: conv= rgb16tobgr32; break;
+ case 0x86: conv= rgb24tobgr32; break;
+ case 0x88: conv= rgb32tobgr32; break;
+ default: MSG_ERR("swScaler: internal error %s -> %s converter\n",
+ sws_format_name(srcFormat), sws_format_name(dstFormat)); break;
+ }
+ }else{
+ MSG_ERR("swScaler: internal error %s -> %s converter\n",
+ sws_format_name(srcFormat), sws_format_name(dstFormat));
+ }
+
+ if(dstStride[0]*srcBpp == srcStride[0]*dstBpp && dstStride[0]>0)
+ conv(src[0], dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]);
+ else
+ {
+ int i;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
+
+ for(i=0; i<srcSliceH; i++)
+ {
+ conv(srcPtr, dstPtr, c->srcW*srcBpp);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+ return srcSliceH;
+}
+
+static int bgr24toyv12Wrapper(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], stride_t dstStride[]){
+
+ rgb24toyv12(
+ src[0],
+ dst[0]+ srcSliceY *dstStride[0],
+ dst[1]+(srcSliceY>>1)*dstStride[1],
+ dst[2]+(srcSliceY>>1)*dstStride[2],
+ c->srcW, srcSliceH,
+ dstStride[0], dstStride[1], srcStride[0]);
+ return srcSliceH;
+}
+
+static int yvu9toyv12Wrapper(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], stride_t dstStride[]){
+ int i;
+
+ /* copy Y */
+ if(srcStride[0]==dstStride[0] && srcStride[0] > 0)
+ memcpy(dst[0]+ srcSliceY*dstStride[0], src[0], srcStride[0]*srcSliceH);
+ else{
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
+
+ for(i=0; i<srcSliceH; i++)
+ {
+ memcpy(dstPtr, srcPtr, c->srcW);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+
+ if(c->dstFormat==IMGFMT_YV12){
+ planar2x(src[1], dst[1], c->chrSrcW, c->chrSrcH, srcStride[1], dstStride[1]);
+ planar2x(src[2], dst[2], c->chrSrcW, c->chrSrcH, srcStride[2], dstStride[2]);
+ }else{
+ planar2x(src[1], dst[2], c->chrSrcW, c->chrSrcH, srcStride[1], dstStride[2]);
+ planar2x(src[2], dst[1], c->chrSrcW, c->chrSrcH, srcStride[2], dstStride[1]);
+ }
+ return srcSliceH;
+}
+
+/**
+ * bring pointers in YUV order instead of YVU
+ */
+static inline void sws_orderYUV(int format, uint8_t * sortedP[], stride_t sortedStride[], uint8_t * p[], stride_t stride[]){
+ if(format == IMGFMT_YV12 || format == IMGFMT_YVU9
+ || format == IMGFMT_444P || format == IMGFMT_422P || format == IMGFMT_411P){
+ sortedP[0]= p[0];
+ sortedP[1]= p[2];
+ sortedP[2]= p[1];
+ sortedStride[0]= stride[0];
+ sortedStride[1]= stride[2];
+ sortedStride[2]= stride[1];
+ }
+ else if(isPacked(format) || isGray(format) || format == IMGFMT_Y8)
+ {
+ sortedP[0]= p[0];
+ sortedP[1]=
+ sortedP[2]= NULL;
+ sortedStride[0]= stride[0];
+ sortedStride[1]=
+ sortedStride[2]= 0;
+ }
+ else if(format == IMGFMT_I420 || format == IMGFMT_IYUV)
+ {
+ sortedP[0]= p[0];
+ sortedP[1]= p[1];
+ sortedP[2]= p[2];
+ sortedStride[0]= stride[0];
+ sortedStride[1]= stride[1];
+ sortedStride[2]= stride[2];
+ }
+ else if(format == IMGFMT_NV12 || format == IMGFMT_NV21)
+ {
+ sortedP[0]= p[0];
+ sortedP[1]= p[1];
+ sortedP[2]= NULL;
+ sortedStride[0]= stride[0];
+ sortedStride[1]= stride[1];
+ sortedStride[2]= 0;
+ }else{
+ MSG_ERR("internal error in orderYUV\n");
+ }
+}
+
+/* unscaled copy like stuff (assumes nearly identical formats) */
+static int simpleCopy(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], stride_t dstStride[]){
+
+ if(isPacked(c->srcFormat))
+ {
+ int length= c->srcW * csp_mplayercsp2Bpp(c->srcFormat);
+
+ if(dstStride[0]==srcStride[0] && srcStride[0] > 0 && srcSliceH>0)
+ memcpy(dst[0] + dstStride[0]*srcSliceY, src[0], (srcSliceH-1)*dstStride[0]+length); // older version crashed in ffdshow-crop, which moves the image horizontally. Last line overflows.
+ else
+ {
+ int i;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
+
+ for(i=0; i<srcSliceH; i++)
+ {
+ memcpy(dstPtr, srcPtr, length);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+ }
+ else if (c->srcFormat == IMGFMT_NV12 || c->srcFormat == IMGFMT_NV21)
+ {
+ if(dstStride[0]==srcStride[0] && srcStride[0] > 0 && srcSliceH>0)
+ {
+ memcpy(dst[0] + dstStride[0] * srcSliceY, src[0], (srcSliceH - 1) * dstStride[0] + c->srcW);
+ memcpy(dst[1] + (dstStride[0] * srcSliceY >> 1), src[1], ((srcSliceH >> 1) - 1) * dstStride[0] + c->srcW);
+ }
+ else
+ {
+ int i;
+ int srcSliceHc = srcSliceH >> 1;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0] * srcSliceY;
+
+ for(i=0 ; i<srcSliceH ; i++)
+ {
+ memcpy(dstPtr, srcPtr, c->srcW);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ srcPtr = src[1];
+ dstPtr = dst[1] + (dstStride[0] * srcSliceY >> 1);
+ for(i=0 ; i<srcSliceHc ; i++)
+ {
+ memcpy(dstPtr, srcPtr, c->srcW);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+ }
+ else
+ { /* Planar YUV or gray */
+ int plane;
+ for(plane=0; plane<3; plane++)
+ {
+ int length= plane==0 ? c->srcW : -((-c->srcW )>>c->chrDstHSubSample);
+ int y= plane==0 ? srcSliceY: -((-srcSliceY)>>c->chrDstVSubSample);
+ int height= plane==0 ? srcSliceH: -((-srcSliceH)>>c->chrDstVSubSample);
+
+ if((isGray(c->srcFormat) || isGray(c->dstFormat)) && plane>0)
+ {
+ if(!isGray(c->dstFormat))
+ memset(dst[plane], 128, dstStride[plane]*height);
+ }
+ else
+ {
+ if(dstStride[plane]==srcStride[plane] && srcStride[plane] > 0 && height>0)
+ memcpy(dst[plane] + dstStride[plane]*y, src[plane], (height-1)*dstStride[plane] + length);
+ else
+ {
+ int i;
+ uint8_t *srcPtr= src[plane];
+ uint8_t *dstPtr= dst[plane] + dstStride[plane]*y;
+ for(i=0; i<height; i++)
+ {
+ memcpy(dstPtr, srcPtr, length);
+ srcPtr+= srcStride[plane];
+ dstPtr+= dstStride[plane];
+ }
+ }
+ }
+ }
+ }
+ return srcSliceH;
+}
+
+static int remove_dup_fourcc(int fourcc)
+{
+ switch(fourcc)
+ {
+ case IMGFMT_I420:
+ case IMGFMT_IYUV: return IMGFMT_YV12;
+ case IMGFMT_Y8 : return IMGFMT_Y800;
+ case IMGFMT_IF09: return IMGFMT_YVU9;
+ default: return fourcc;
+ }
+}
+
+static void getSubSampleFactors(int *h, int *v, int format){
+ switch(format){
+ case IMGFMT_UYVY:
+ case IMGFMT_YVYU:
+ case IMGFMT_VYUY:
+ case IMGFMT_YUY2:
+ *h=1;
+ *v=0;
+ break;
+ case IMGFMT_YV12:
+ case IMGFMT_Y800: //FIXME remove after different subsamplings are fully implemented
+ case IMGFMT_NV12:
+ case IMGFMT_NV21:
+ *h=1;
+ *v=1;
+ break;
+ case IMGFMT_YVU9:
+ *h=2;
+ *v=2;
+ break;
+ case IMGFMT_444P:
+ *h=0;
+ *v=0;
+ break;
+ case IMGFMT_422P:
+ *h=1;
+ *v=0;
+ break;
+ case IMGFMT_411P:
+ *h=2;
+ *v=0;
+ break;
+ default:
+ *h=0;
+ *v=0;
+ break;
+ }
+}
+
+static uint16_t roundToInt16(int64_t f){
+ int r= (f + (1<<15))>>16;
+ if(r<-0x7FFF) return 0x8000;
+ else if(r> 0x7FFF) return 0x7FFF;
+ else return r;
+}
+
+/**
+ * @param inv_table the yuv2rgb coeffs, normally Inverse_Table_6_9[x]
+ * @param fullRange if 1 then the luma range is 0..255 if 0 its 16..235
+ * @return -1 if not supported
+ */
+int sws_setColorspaceDetails(SwsContext *c, const int inv_table[7], int srcRange, const int table[7], int dstRange, int brightness, int contrast, int saturation){
+ int64_t crv = inv_table[0];
+ int64_t cbu = inv_table[1];
+ int64_t cgu = -inv_table[2];
+ int64_t cgv = -inv_table[3];
+ int64_t cy = inv_table[4];
+ int64_t oy = inv_table[5];
+
+ if(isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;
+ memcpy(c->srcColorspaceTable, inv_table, sizeof(int)*7);
+ memcpy(c->dstColorspaceTable, table, sizeof(int)*7);
+
+ c->brightness= brightness;
+ c->contrast = contrast;
+ c->saturation= saturation;
+ c->srcRange = srcRange;
+ c->dstRange = dstRange;
+
+ c->uOffset= 0x0400040004000400LL;
+ c->vOffset= 0x0400040004000400LL;
+
+ cy = (cy *contrast )>>16;
+ crv= (crv*contrast * saturation)>>32;
+ cbu= (cbu*contrast * saturation)>>32;
+ cgu= (cgu*contrast * saturation)>>32;
+ cgv= (cgv*contrast * saturation)>>32;
+
+ oy -= 256*brightness;
+
+ c->yCoeff= roundToInt16(cy *8192) * 0x0001000100010001ULL;
+ c->vrCoeff= roundToInt16(crv*8192) * 0x0001000100010001ULL;
+ c->ubCoeff= roundToInt16(cbu*8192) * 0x0001000100010001ULL;
+ c->vgCoeff= roundToInt16(cgv*8192) * 0x0001000100010001ULL;
+ c->ugCoeff= roundToInt16(cgu*8192) * 0x0001000100010001ULL;
+ c->yOffset= roundToInt16(oy * 8) * 0x0001000100010001ULL;
+
+ yuv2rgb_c_init_tables(c, inv_table, srcRange, brightness, contrast, saturation);
+ //FIXME factorize
+
+#ifdef COMPILE_ALTIVEC
+ if (c->flags & SWS_CPU_CAPS_ALTIVEC)
+ yuv2rgb_altivec_init_tables (c, inv_table, brightness, contrast, saturation);
+#endif
+ return 0;
+}
+
+/**
+ * @return -1 if not supported
+ */
+int sws_getColorspaceDetails(SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation){
+ if(isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;
+
+ *inv_table = c->srcColorspaceTable;
+ *table = c->dstColorspaceTable;
+ *srcRange = c->srcRange;
+ *dstRange = c->dstRange;
+ *brightness= c->brightness;
+ *contrast = c->contrast;
+ *saturation= c->saturation;
+
+ return 0;
+}
+
+// Be carefull when you modify SwsContext after returning from sws_getContext.
+// SwsContext is copied to array for multithreading.
+// sws_scale_ordered modify c->sliceDir. It does not matter for now.
+SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int dstH, int origDstFormat, SwsParams *params,
+ SwsFilter *srcFilter, SwsFilter *dstFilter, const int yuv2rgbTable[6]){
+ return sws_getContextEx(srcW, srcH, origSrcFormat, dstW, dstH, origDstFormat, params,
+ srcFilter, dstFilter, yuv2rgbTable, GetCPUCount());
+}
+
+SwsContext *sws_getContextEx(int srcW, int srcH, int origSrcFormat, int dstW, int dstH, int origDstFormat, SwsParams *params,
+ SwsFilter *srcFilter, SwsFilter *dstFilter, const int yuv2rgbTable[6], int threadCount){
+
+ SwsContext *c;
+ int i;
+ int usesVFilter, usesHFilter;
+ int unscaled, needsDither;
+ int srcFormat, dstFormat;
+ SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
+#if ARCH_X86_32 || ARCH_X86_64
+ if(params->cpu & SWS_CPU_CAPS_MMX)
+ asm volatile("emms\n\t"::: "memory");
+#else
+ params->cpu=0;
+#endif
+
+#ifndef RUNTIME_CPUDETECT //ensure that the flags match the compiled variant if cpudetect is off
+ flags &= ~(SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2|SWS_CPU_CAPS_3DNOW|SWS_CPU_CAPS_ALTIVEC);
+#if HAVE_MMX2
+ flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2;
+#elif HAVE_AMD3DNOW
+ flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_3DNOW;
+#elif HAVE_MMX
+ flags |= SWS_CPU_CAPS_MMX;
+#elif defined (HAVE_ALTIVEC)
+ flags |= SWS_CPU_CAPS_ALTIVEC;
+#endif
+#endif /* RUNTIME_CPUDETECT */
+ if(clip_table[512] != 255) globalInit();
+ if(rgb15to16 == NULL) sws_rgb2rgb_init(params);
+
+ /* avoid duplicate Formats, so we don't need to check to much */
+ srcFormat = remove_dup_fourcc(origSrcFormat);
+ dstFormat = remove_dup_fourcc(origDstFormat);
+
+ unscaled = (srcW == dstW && srcH == dstH);
+ needsDither= (isBGR(dstFormat) || isRGB(dstFormat))
+ && (dstFormat&0xFF)<24
+ && ((dstFormat&0xFF)<(srcFormat&0xFF) || (!(isRGB(srcFormat) || isBGR(srcFormat))));
+
+ if(!isSupportedIn(srcFormat))
+ {
+ MSG_ERR("swScaler: %s is not supported as input format\n", sws_format_name(srcFormat));
+ return NULL;
+ }
+ if(!isSupportedOut(dstFormat))
+ {
+ MSG_ERR("swScaler: %s is not supported as output format\n", sws_format_name(dstFormat));
+ return NULL;
+ }
+
+ /* sanity check */
+ if(srcW<4 || srcH<1 || dstW<8 || dstH<1) //FIXME check if these are enough and try to lowwer them after fixing the relevant parts of the code
+ {
+ MSG_ERR("swScaler: %dx%d -> %dx%d is invalid scaling dimension\n",
+ srcW, srcH, dstW, dstH);
+ return NULL;
+ }
+
+ if(!dstFilter) dstFilter= &dummyFilter;
+ if(!srcFilter) srcFilter= &dummyFilter;
+
+ c= av_malloc(sizeof(SwsContext)*threadCount); // array support up to c[threadCount-1]. "c->" is identical to "c[0]."
+ memset(c, 0, sizeof(SwsContext));
+
+ c->srcW= srcW;
+ c->srcH= srcH;
+ c->dstW= dstW;
+ c->dstH= dstH;
+ c->lumXInc= ((srcW<<16) + (dstW>>1))/dstW;
+ c->lumYInc= ((srcH<<16) + (dstH>>1))/dstH;
+ c->params= *params;
+ c->dstFormat= dstFormat;
+ c->srcFormat= srcFormat;
+ c->origDstFormat= origDstFormat;
+ c->origSrcFormat= origSrcFormat;
+ c->vRounder= 4* 0x0001000100010001ULL;
+
+ usesHFilter= usesVFilter= 0;
+ if(dstFilter->lumV!=NULL && dstFilter->lumV->length>1) usesVFilter=1;
+ if(dstFilter->lumH!=NULL && dstFilter->lumH->length>1) usesHFilter=1;
+ if(dstFilter->chrV!=NULL && dstFilter->chrV->length>1) usesVFilter=1;
+ if(dstFilter->chrH!=NULL && dstFilter->chrH->length>1) usesHFilter=1;
+ if(srcFilter->lumV!=NULL && srcFilter->lumV->length>1) usesVFilter=1;
+ if(srcFilter->lumH!=NULL && srcFilter->lumH->length>1) usesHFilter=1;
+ if(srcFilter->chrV!=NULL && srcFilter->chrV->length>1) usesVFilter=1;
+ if(srcFilter->chrH!=NULL && srcFilter->chrH->length>1) usesHFilter=1;
+
+ getSubSampleFactors(&c->chrSrcHSubSample, &c->chrSrcVSubSample, srcFormat);
+ getSubSampleFactors(&c->chrDstHSubSample, &c->chrDstVSubSample, dstFormat);
+
+ // reuse chroma for 2 pixles rgb/bgr unless user wants full chroma interpolation
+ if((isBGR(dstFormat) || isRGB(dstFormat)) && !(params->subsampling&SWS_FULL_CHR_H_INT)) c->chrDstHSubSample=1;
+
+ // drop some chroma lines if the user wants it
+ c->vChrDrop= params->v_chr_drop;// (flags&SWS_SRC_V_CHR_DROP_MASK)>>SWS_SRC_V_CHR_DROP_SHIFT;
+ c->chrSrcVSubSample+= c->vChrDrop;
+
+ // drop every 2. pixel for chroma calculation unless user wants full chroma
+ if((isBGR(srcFormat) || isRGB(srcFormat)) && !(params->subsampling&SWS_FULL_CHR_H_INP))
+ c->chrSrcHSubSample=1;
+
+ //c->chrIntHSubSample= c->chrDstHSubSample;
+ //c->chrIntVSubSample= c->chrSrcVSubSample;
+
+ // note the -((-x)>>y) is so that we allways round toward +inf
+ c->chrSrcW= -((-srcW) >> c->chrSrcHSubSample);
+ c->chrSrcH= -((-srcH) >> c->chrSrcVSubSample);
+ c->chrDstW= -((-dstW) >> c->chrDstHSubSample);
+ c->chrDstH= -((-dstH) >> c->chrDstVSubSample);
+
+ sws_setColorspaceDetails(c, yuv2rgbTable ? yuv2rgbTable : Inverse_Table_6_9[SWS_CS_DEFAULT], 0, Inverse_Table_6_9[SWS_CS_DEFAULT] /* FIXME*/, 0, 0, 1<<16, 1<<16);
+
+ /* unscaled special Cases */
+ if(unscaled && !usesHFilter && !usesVFilter)
+ {
+ /* yv12_to_nv12 */
+ if(srcFormat == IMGFMT_YV12 && (dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21))
+ {
+ c->swScale= PlanarToNV12Wrapper;
+ }
+ if((srcFormat == IMGFMT_NV12 || srcFormat == IMGFMT_NV21) && dstFormat == IMGFMT_YV12)
+ {
+ c->swScale= NV12ToPlanarWrapper;
+ }
+ /* yuv2bgr */
+ if((srcFormat==IMGFMT_YV12 || srcFormat==IMGFMT_422P) && (isBGR(dstFormat) || isRGB(dstFormat)))
+ {
+ c->swScale= yuv2rgb_get_func_ptr(c);
+ }
+
+ if( srcFormat==IMGFMT_YVU9 && dstFormat==IMGFMT_YV12 )
+ {
+ c->swScale= yvu9toyv12Wrapper;
+ }
+
+ /* bgr24toYV12 */
+ if(srcFormat==IMGFMT_BGR24 && dstFormat==IMGFMT_YV12)
+ c->swScale= bgr24toyv12Wrapper;
+
+ /* rgb/bgr -> rgb/bgr (no dither needed forms) */
+ if( (isBGR(srcFormat) || isRGB(srcFormat))
+ && (isBGR(dstFormat) || isRGB(dstFormat))
+ && !needsDither)
+ c->swScale= rgb2rgbWrapper;
+
+ /* LQ converters if -sws 0 or -sws 4*/
+ if(c->params.methodLuma.method&(SWS_FAST_BILINEAR|SWS_POINT) && c->params.methodChroma.method&(SWS_FAST_BILINEAR|SWS_POINT)){
+ /* rgb/bgr -> rgb/bgr (dither needed forms) */
+ if( (isBGR(srcFormat) || isRGB(srcFormat))
+ && (isBGR(dstFormat) || isRGB(dstFormat))
+ && needsDither)
+ c->swScale= rgb2rgbWrapper;
+
+ /* yv12_to_yuy2 */
+ if(srcFormat == IMGFMT_YV12 &&
+ (dstFormat == IMGFMT_YUY2 || dstFormat == IMGFMT_UYVY || dstFormat == IMGFMT_YVYU || dstFormat== IMGFMT_VYUY))
+ {
+ if (dstFormat == IMGFMT_YUY2)
+ c->swScale= PlanarToYuy2Wrapper;
+ else if (dstFormat == IMGFMT_UYVY)
+ c->swScale= PlanarToUyvyWrapper;
+ else if (dstFormat == IMGFMT_VYUY)
+ c->swScale= PlanarToVyuyWrapper;
+ else
+ c->swScale= PlanarToYvyuWrapper;
+ }
+ }
+
+#ifdef COMPILE_ALTIVEC
+ if ((c->flags & SWS_CPU_CAPS_ALTIVEC) &&
+ ((srcFormat == IMGFMT_YV12 &&
+ (dstFormat == IMGFMT_YUY2 || dstFormat == IMGFMT_UYVY)))) {
+ // unscaled YV12 -> packed YUV, we want speed
+ if (dstFormat == IMGFMT_YUY2)
+ c->swScale= yv12toyuy2_unscaled_altivec;
+ else
+ c->swScale= yv12touyvy_unscaled_altivec;
+ }
+#endif
+
+ /* simple copy */
+ if( srcFormat == dstFormat
+ || (isPlanarYUV(srcFormat) && isGray(dstFormat))
+ || (isPlanarYUV(dstFormat) && isGray(srcFormat))
+ )
+ {
+ c->swScale= simpleCopy;
+ }
+
+ if(c->swScale){
+ if(params->debug&SWS_PRINT_INFO)
+ MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n",
+ sws_format_name(srcFormat), sws_format_name(dstFormat));
+ return c;
+ }
+ }
+
+ if(params->cpu & SWS_CPU_CAPS_MMX2)
+ {
+ c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0;
+ if(!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (params->methodLuma.method&SWS_FAST_BILINEAR))
+ {
+ if(params->debug&SWS_PRINT_INFO)
+ MSG_INFO("SwScaler: output Width is not a multiple of 32 -> no MMX2 scaler\n");
+ }
+ if(usesHFilter) c->canMMX2BeUsed=0;
+ }
+ else
+ c->canMMX2BeUsed=0;
+
+ c->chrXInc= ((c->chrSrcW<<16) + (c->chrDstW>>1))/c->chrDstW;
+ c->chrYInc= ((c->chrSrcH<<16) + (c->chrDstH>>1))/c->chrDstH;
+
+ // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
+ // but only for the FAST_BILINEAR mode otherwise do correct scaling
+ // n-2 is the last chrominance sample available
+ // this is not perfect, but noone shuld notice the difference, the more correct variant
+ // would be like the vertical one, but that would require some special code for the
+ // first and last pixel
+ if(params->methodLuma.method&SWS_FAST_BILINEAR)
+ {
+ if(c->canMMX2BeUsed)
+ {
+ c->lumXInc = ((srcW-2)<<16)/(dstW);
+ }
+ //we don't use the x86asm scaler if mmx is available
+ else if(params->cpu& SWS_CPU_CAPS_MMX)
+ {
+ c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20;
+ }
+ else
+ c->lumXInc = ((srcW-2)<<16)/(dstW);
+ }
+
+ if(params->methodChroma.method&SWS_FAST_BILINEAR)
+ {
+ if(c->canMMX2BeUsed)
+ {
+ c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW);
+ }
+ //we don't use the x86asm scaler if mmx is available
+ else if(params->cpu& SWS_CPU_CAPS_MMX)
+ {
+ c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20;
+ }
+ else
+ c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW);
+ }
+
+ /* precalculate horizontal scaler filter coefficients */
+ {
+ const int filterAlign=
+ (params->cpu & SWS_CPU_CAPS_MMX) ? 4 :
+ (params->cpu & SWS_CPU_CAPS_ALTIVEC) ? 8 :
+ 1;
+
+ initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc,
+ srcW , dstW, filterAlign, 1<<14,
+ /*(flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags*/&params->methodLuma,params->subsampling,params->cpu,params->debug,
+ srcFilter->lumH, dstFilter->lumH);
+ initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc,
+ c->chrSrcW, c->chrDstW, filterAlign, 1<<14,
+ /*(flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags*/&params->methodChroma,params->subsampling,params->cpu,params->debug,
+ srcFilter->chrH, dstFilter->chrH);
+
+#if ARCH_X86 || ARCH_X86_64
+// can't downscale !!!
+#define MAX_FUNNY_CODE_SIZE 10000
+ if(c->canMMX2BeUsed && (params->methodLuma.method & SWS_FAST_BILINEAR))
+ {
+#ifdef HAVE_SYS_MMAN_H
+ c->funnyYCode = (uint8_t*)mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+#else
+ c->funnyYCode = (uint8_t*)VirtualAlloc(NULL,MAX_FUNNY_CODE_SIZE,MEM_COMMIT,PAGE_EXECUTE_READWRITE); //memalign(32, MAX_FUNNY_CODE_SIZE);
+#endif
+
+ c->lumMmx2Filter = av_malloc((dstW /8+8)*sizeof(int16_t));
+ c->lumMmx2FilterPos= av_malloc((dstW /2/8+8)*sizeof(int32_t));
+
+ initMMX2HScaler( dstW, c->lumXInc, c->funnyYCode , c->lumMmx2Filter, c->lumMmx2FilterPos, 8);
+ }
+ if(c->canMMX2BeUsed && (params->methodChroma.method & SWS_FAST_BILINEAR))
+ {
+#ifdef HAVE_SYS_MMAN_H
+ c->funnyUVCode = (uint8_t*)mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+#else
+ c->funnyUVCode = (uint8_t*)VirtualAlloc(NULL,MAX_FUNNY_CODE_SIZE,MEM_COMMIT,PAGE_EXECUTE_READWRITE); //memalign(32, MAX_FUNNY_CODE_SIZE);
+#endif
+
+ c->chrMmx2Filter = av_malloc((c->chrDstW /4+8)*sizeof(int16_t));
+ c->chrMmx2FilterPos= av_malloc((c->chrDstW/2/4+8)*sizeof(int32_t));
+
+ initMMX2HScaler(c->chrDstW, c->chrXInc, c->funnyUVCode, c->chrMmx2Filter, c->chrMmx2FilterPos, 4);
+ }
+#endif /* ARCH_X86 || ARCH_X86_64 */
+ } // Init Horizontal stuff
+
+
+
+ /* precalculate vertical scaler filter coefficients */
+ {
+ const int filterAlign=
+ (params->cpu & SWS_CPU_CAPS_MMX) && (params->subsampling & SWS_ACCURATE_RND) ? 2 :
+ (params->cpu & SWS_CPU_CAPS_ALTIVEC) ? 8 :
+ 1;
+
+ initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc,
+ srcH , dstH, filterAlign, (1<<12)-4,
+ /*(flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags*/&params->methodLuma,params->subsampling,params->cpu,params->debug,
+ srcFilter->lumV, dstFilter->lumV);
+ initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc,
+ c->chrSrcH, c->chrDstH, filterAlign, (1<<12)-4,
+ /*(flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags*/&params->methodChroma,params->subsampling,params->cpu,params->debug,
+ srcFilter->chrV, dstFilter->chrV);
+
+#ifdef HAVE_ALTIVEC
+ c->vYCoeffsBank = av_malloc(sizeof (vector signed short)*c->vLumFilterSize*c->dstH);
+ c->vCCoeffsBank = av_malloc(sizeof (vector signed short)*c->vChrFilterSize*c->chrDstH);
+
+ for (i=0;i<c->vLumFilterSize*c->dstH;i++) {
+ int j;
+ short *p = (short *)&c->vYCoeffsBank[i];
+ for (j=0;j<8;j++)
+ p[j] = c->vLumFilter[i];
+ }
+
+ for (i=0;i<c->vChrFilterSize*c->chrDstH;i++) {
+ int j;
+ short *p = (short *)&c->vCCoeffsBank[i];
+ for (j=0;j<8;j++)
+ p[j] = c->vChrFilter[i];
+ }
+#endif
+ }
+
+ // Calculate Buffer Sizes so that they won't run out while handling these damn slices
+ c->vLumBufSize= c->vLumFilterSize;
+ c->vChrBufSize= c->vChrFilterSize;
+ for(i=0; i<dstH; i++)
+ {
+ int chrI= i*c->chrDstH / dstH;
+ int nextSlice= FFMAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1,
+ ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<c->chrSrcVSubSample));
+
+ nextSlice>>= c->chrSrcVSubSample;
+ nextSlice<<= c->chrSrcVSubSample;
+ if(c->vLumFilterPos[i ] + c->vLumBufSize < nextSlice)
+ c->vLumBufSize= nextSlice - c->vLumFilterPos[i ];
+ if(c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice>>c->chrSrcVSubSample))
+ c->vChrBufSize= (nextSlice>>c->chrSrcVSubSample) - c->vChrFilterPos[chrI];
+ }
+
+ ASSERT(c->chrDstH <= dstH)
+
+ if(params->debug&SWS_PRINT_INFO)
+ {
+#ifdef DITHER1XBPP
+ char *dither= " dithered";
+#else
+ char *dither= "";
+#endif
+ if(params->methodLuma.method&SWS_FAST_BILINEAR)
+ MSG_INFO("\nSwScaler: luma FAST_BILINEAR scaler, ");
+ else if(params->methodLuma.method&SWS_BILINEAR)
+ MSG_INFO("\nSwScaler: luma BILINEAR scaler, ");
+ else if(params->methodLuma.method&SWS_BICUBIC)
+ MSG_INFO("\nSwScaler: luma BICUBIC scaler, ");
+ else if(params->methodLuma.method&SWS_X)
+ MSG_INFO("\nSwScaler: luma Experimental scaler, ");
+ else if(params->methodLuma.method&SWS_POINT)
+ MSG_INFO("\nSwScaler: luma Nearest Neighbor / POINT scaler, ");
+ else if(params->methodLuma.method&SWS_AREA)
+ MSG_INFO("\nSwScaler: luma Area Averageing scaler, ");
+ else if(params->methodLuma.method&SWS_GAUSS)
+ MSG_INFO("\nSwScaler: luma Gaussian scaler, ");
+ else if(params->methodLuma.method&SWS_SINC)
+ MSG_INFO("\nSwScaler: luma Sinc scaler, ");
+ else if(params->methodLuma.method&SWS_LANCZOS)
+ MSG_INFO("\nSwScaler: luma Lanczos scaler, ");
+ else if(params->methodLuma.method&SWS_SPLINE)
+ MSG_INFO("\nSwScaler: luma Bicubic spline scaler, ");
+ else
+ MSG_INFO("\nSwScaler: luma ehh flags invalid?! ");
+
+ if(params->methodLuma.method!=params->methodChroma.method)
+ if(params->methodChroma.method&SWS_FAST_BILINEAR)
+ MSG_INFO("\nSwScaler: chroma FAST_BILINEAR scaler, ");
+ else if(params->methodChroma.method&SWS_BILINEAR)
+ MSG_INFO("\nSwScaler: chroma BILINEAR scaler, ");
+ else if(params->methodChroma.method&SWS_BICUBIC)
+ MSG_INFO("\nSwScaler: chroma BICUBIC scaler, ");
+ else if(params->methodChroma.method&SWS_X)
+ MSG_INFO("\nSwScaler: chroma Experimental scaler, ");
+ else if(params->methodChroma.method&SWS_POINT)
+ MSG_INFO("\nSwScaler: chroma Nearest Neighbor / POINT scaler, ");
+ else if(params->methodChroma.method&SWS_AREA)
+ MSG_INFO("\nSwScaler: chroma Area Averageing scaler, ");
+ else if(params->methodChroma.method&SWS_GAUSS)
+ MSG_INFO("\nSwScaler: chroma Gaussian scaler, ");
+ else if(params->methodChroma.method&SWS_SINC)
+ MSG_INFO("\nSwScaler: chroma Sinc scaler, ");
+ else if(params->methodChroma.method&SWS_LANCZOS)
+ MSG_INFO("\nSwScaler: chroma Lanczos scaler, ");
+ else if(params->methodChroma.method&SWS_SPLINE)
+ MSG_INFO("\nSwScaler: chroma Bicubic spline scaler, ");
+ else
+ MSG_INFO("\nSwScaler: chroma ehh flags invalid?! ");
+
+ if(dstFormat==IMGFMT_BGR15 || dstFormat==IMGFMT_BGR16)
+ MSG_INFO("from %s to%s %s ",
+ sws_format_name(srcFormat), dither, sws_format_name(dstFormat));
+ else
+ MSG_INFO("from %s to %s ",
+ sws_format_name(srcFormat), sws_format_name(dstFormat));
+
+ if(params->cpu & SWS_CPU_CAPS_MMX2)
+ MSG_INFO("using MMX2\n");
+ else if(params->cpu & SWS_CPU_CAPS_3DNOW)
+ MSG_INFO("using 3DNOW\n");
+ else if(params->cpu & SWS_CPU_CAPS_MMX)
+ MSG_INFO("using MMX\n");
+ else if(params->cpu & SWS_CPU_CAPS_ALTIVEC)
+ MSG_INFO("using AltiVec\n");
+ else
+ MSG_INFO("using C\n");
+ }
+
+ if(params->debug & SWS_PRINT_INFO)
+ {
+ if(params->cpu & SWS_CPU_CAPS_MMX)
+ {
+ if(c->canMMX2BeUsed && (params->methodLuma.method&SWS_FAST_BILINEAR) && (params->methodChroma.method&SWS_FAST_BILINEAR))
+ MSG_V("SwScaler: using FAST_BILINEAR MMX2 scaler for horizontal scaling\n");
+ else
+ {
+ if(c->hLumFilterSize==4)
+ MSG_V("SwScaler: using 4-tap MMX scaler for horizontal luminance scaling\n");
+ else if(c->hLumFilterSize==8)
+ MSG_V("SwScaler: using 8-tap MMX scaler for horizontal luminance scaling\n");
+ else
+ MSG_V("SwScaler: using n-tap MMX scaler for horizontal luminance scaling\n");
+
+ if(c->hChrFilterSize==4)
+ MSG_V("SwScaler: using 4-tap MMX scaler for horizontal chrominance scaling\n");
+ else if(c->hChrFilterSize==8)
+ MSG_V("SwScaler: using 8-tap MMX scaler for horizontal chrominance scaling\n");
+ else
+ MSG_V("SwScaler: using n-tap MMX scaler for horizontal chrominance scaling\n");
+ }
+ }
+ else
+ {
+#if ARCH_X86_32 || ARCH_X86_64
+ MSG_V("SwScaler: using X86-Asm scaler for horizontal scaling\n");
+#else
+ if((params->methodLuma.method & SWS_FAST_BILINEAR) && (params->methodChroma.method & SWS_FAST_BILINEAR))
+ MSG_V("SwScaler: using FAST_BILINEAR C scaler for horizontal scaling\n");
+ else
+ MSG_V("SwScaler: using C scaler for horizontal scaling\n");
+#endif
+ }
+ if(isPlanarYUV(dstFormat))
+ {
+ if(c->vLumFilterSize==1)
+ MSG_V("SwScaler: using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else
+ MSG_V("SwScaler: using n-tap %s scaler for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ }
+ else
+ {
+ if(c->vLumFilterSize==1 && c->vChrFilterSize==2)
+ MSG_V("SwScaler: using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n"
+ "SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n",(flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else if(c->vLumFilterSize==2 && c->vChrFilterSize==2)
+ MSG_V("SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else
+ MSG_V("SwScaler: using n-tap %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ }
+
+ if(dstFormat==IMGFMT_BGR24)
+ MSG_V("SwScaler: using %s YV12->BGR24 Converter\n",
+ (flags & SWS_CPU_CAPS_MMX2) ? "MMX2" : ((flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"));
+ else if(dstFormat==IMGFMT_BGR32)
+ MSG_V("SwScaler: using %s YV12->BGR32 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else if(dstFormat==IMGFMT_BGR16)
+ MSG_V("SwScaler: using %s YV12->BGR16 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else if(dstFormat==IMGFMT_BGR15)
+ MSG_V("SwScaler: using %s YV12->BGR15 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+
+ MSG_V("SwScaler: %dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
+ }
+ if(params->debug & SWS_PRINT_INFO)
+ {
+ MSG_DBG2("SwScaler:Lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
+ c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc);
+ MSG_DBG2("SwScaler:Chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
+ c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH, c->chrXInc, c->chrYInc);
+ }
+
+ c->swScale= getSwsFunc(params->cpu);
+
+ c->ret=av_malloc(threadCount*sizeof(int));
+ c->thread_count= threadCount;
+ c->execute= sws_default_execute;
+ if (threadCount>=2)
+ if(sws_thread_init(c, threadCount)==-1)
+ {
+ threadCount=1;
+ c->thread_count= threadCount;
+ c->execute= sws_default_execute;
+ }
+
+ if(threadCount>=2)
+ {
+ for (i=1; i<threadCount; i++){
+ memcpy(&(c[i]), c,sizeof(SwsContext));
+ }
+ }
+
+ // allocate pixbufs (we use dynamic allocation because otherwise we would need to
+ //Note we need at least one pixel more at the end because of the mmx code (just in case someone wanna replace the 4000/8000)
+
+ for(i=0; i<threadCount; i++)
+ {
+ int j;
+ c[i].lumPixBuf=av_malloc(c->vLumBufSize*2*sizeof(int16_t*));
+ for(j=0; j<c->vLumBufSize; j++)
+ {
+ c[i].lumPixBuf[j]= c[i].lumPixBuf[j+c->vLumBufSize]= av_malloc(8000); /* align at 16 bytes for AltiVec */
+ memset(c[i].lumPixBuf[j], 0, 8000); //try to avoid drawing green stuff between the right end and the stride end
+ }
+ c[i].chrPixBuf=av_malloc(c->vChrBufSize*2*sizeof(int16_t*));
+ for(j=0; j<c->vChrBufSize; j++)
+ {
+ c[i].chrPixBuf[j]= c[i].chrPixBuf[j+c->vChrBufSize]= av_malloc(16000);
+ memset(c[i].chrPixBuf[j], 64, 16000); //try to avoid drawing green stuff between the right end and the stride end
+ }
+ }
+ return c;
+}
+
+/**
+ * swscale warper, so we don't need to export the SwsContext.
+ * assumes planar YUV to be in YUV order instead of YVU (except YV16)
+ */
+int sws_scale_ordered(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], stride_t dstStride[]){
+ if (c->sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->srcH) {
+ MSG_ERR("swScaler: slices start in the middle!\n");
+ return 0;
+ }
+ if (c->sliceDir == 0) {
+ if (srcSliceY == 0) c->sliceDir = 1; else c->sliceDir = -1;
+ }
+
+ //copy strides, so they can safely be modified
+ if (c->sliceDir == 1) {
+ // slices go from top to bottom
+ stride_t srcStride2[3]= {srcStride[0], srcStride[1], srcStride[2]};
+ stride_t dstStride2[3]= {dstStride[0], dstStride[1], dstStride[2]};
+ return c->swScale(c, src, srcStride2, srcSliceY, srcSliceH, dst, dstStride2);
+ } else {
+ // slices go from bottom to top => we flip the image internally
+ uint8_t* src2[3]= {src[0] + (srcSliceH-1)*srcStride[0],
+ src[1] + ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[1],
+ src[2] + ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[2]
+ };
+ uint8_t* dst2[3]= {dst[0] + (c->dstH-1)*dstStride[0],
+ dst[1] + ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[1],
+ dst[2] + ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[2]};
+ stride_t srcStride2[3]= {-srcStride[0], -srcStride[1], -srcStride[2]};
+ stride_t dstStride2[3]= {-dstStride[0], -dstStride[1], -dstStride[2]};
+
+ return c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH, srcSliceH, dst2, dstStride2);
+ }
+}
+
+/**
+ * swscale warper, so we don't need to export the SwsContext
+ */
+int sws_scale(SwsContext *c, uint8_t* srcParam[], stride_t srcStrideParam[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], stride_t dstStrideParam[]){
+ stride_t srcStride[3];
+ stride_t dstStride[3];
+ uint8_t *src[3];
+ uint8_t *dst[3];
+ sws_orderYUV(c->origSrcFormat, src, srcStride, srcParam, srcStrideParam);
+ sws_orderYUV(c->origDstFormat, dst, dstStride, dstParam, dstStrideParam);
+//printf("sws: slice %d %d\n", srcSliceY, srcSliceH);
+
+ return c->swScale(c, src, srcStride, srcSliceY, srcSliceH, dst, dstStride);
+}
+
+SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
+ float lumaSharpen, float chromaSharpen,
+ float chromaHShift, float chromaVShift,
+ int verbose)
+{
+ SwsFilter *filter= av_malloc(sizeof(SwsFilter));
+
+ //DPRINTF(_l("sws_getDefaultFilter lumaGBlur %f, chromaGBlur %f, lumaSharpen %f, chromaSharpen %f, chromaHShift %f, chromaVShift %f"),lumaGBlur, chromaGBlur, lumaSharpen, chromaSharpen, chromaHShift, chromaVShift);
+ if(lumaGBlur!=0.0){
+ filter->lumH= sws_getGaussianVec(lumaGBlur, 3.0);
+ filter->lumV= sws_getGaussianVec(lumaGBlur, 3.0);
+ }else{
+ filter->lumH= sws_getIdentityVec();
+ filter->lumV= sws_getIdentityVec();
+ }
+
+ if(chromaGBlur!=0.0){
+ filter->chrH= sws_getGaussianVec(chromaGBlur, 3.0);
+ filter->chrV= sws_getGaussianVec(chromaGBlur, 3.0);
+ }else{
+ filter->chrH= sws_getIdentityVec();
+ filter->chrV= sws_getIdentityVec();
+ }
+
+ if(chromaSharpen!=0.0){
+ SwsVector *g= sws_getConstVec(-1.0, 3);
+ SwsVector *id= sws_getConstVec(10.0/chromaSharpen, 1);
+ g->coeff[1]=2.0;
+ sws_addVec(id, g);
+ sws_convVec(filter->chrH, id);
+ sws_convVec(filter->chrV, id);
+ sws_freeVec(g);
+ sws_freeVec(id);
+ }
+
+ if(lumaSharpen!=0.0){
+ SwsVector *g= sws_getConstVec(-1.0, 3);
+ SwsVector *id= sws_getConstVec(10.0/lumaSharpen, 1);
+ g->coeff[1]=2.0;
+ sws_addVec(id, g);
+ sws_convVec(filter->lumH, id);
+ sws_convVec(filter->lumV, id);
+ sws_freeVec(g);
+ sws_freeVec(id);
+ }
+
+ if(chromaHShift != 0.0)
+ sws_shiftVec(filter->chrH, (int)(chromaHShift+0.5));
+
+ if(chromaVShift != 0.0)
+ sws_shiftVec(filter->chrV, (int)(chromaVShift+0.5));
+
+ sws_normalizeVec(filter->chrH, 1.0);
+ sws_normalizeVec(filter->chrV, 1.0);
+ sws_normalizeVec(filter->lumH, 1.0);
+ sws_normalizeVec(filter->lumV, 1.0);
+
+ if(verbose) sws_printVec(filter->chrH);
+ if(verbose) sws_printVec(filter->lumH);
+
+ return filter;
+}
+
+/**
+ * returns a normalized gaussian curve used to filter stuff
+ * quality=3 is high quality, lowwer is lowwer quality
+ */
+SwsVector *sws_getGaussianVec(double variance, double quality){
+ const int length= (int)(variance*quality + 0.5) | 1;
+ int i;
+ double *coeff= av_malloc(length*sizeof(double));
+ double middle= (length-1)*0.5;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= length;
+
+ for(i=0; i<length; i++)
+ {
+ double dist= i-middle;
+ coeff[i]= exp( -dist*dist/(2*variance*variance) ) / sqrt(2*variance*PI);
+ }
+
+ sws_normalizeVec(vec, 1.0);
+
+ return vec;
+}
+
+SwsVector *sws_getConstVec(double c, int length){
+ int i;
+ double *coeff= av_malloc(length*sizeof(double));
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= length;
+
+ for(i=0; i<length; i++)
+ coeff[i]= c;
+
+ return vec;
+}
+
+
+SwsVector *sws_getIdentityVec(void){
+ return sws_getConstVec(1.0, 1);
+}
+
+void sws_scaleVec(SwsVector *a, double scalar){
+ int i;
+
+ for(i=0; i<a->length; i++)
+ a->coeff[i]*= scalar;
+}
+
+void sws_normalizeVec(SwsVector *a, double height){
+ int i;
+ double sum=0;
+ double inv;
+
+ for(i=0; i<a->length; i++)
+ sum+= a->coeff[i];
+
+ inv= height/sum;
+
+ for(i=0; i<a->length; i++)
+ a->coeff[i]*= inv;
+}
+
+static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b){
+ int length= a->length + b->length - 1;
+ double *coeff= av_malloc(length*sizeof(double));
+ int i, j;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= length;
+
+ for (i=0; i<length; i++) coeff[i]= 0.0;
+
+ for (i=0; i<a->length; i++)
+ {
+ for (j=0; j<b->length; j++)
+ {
+ coeff[i+j]+= a->coeff[i]*b->coeff[j];
+ }
+ }
+
+ return vec;
+}
+
+static SwsVector *sws_sumVec(SwsVector *a, SwsVector *b){
+ int length= FFMAX(a->length, b->length);
+ double *coeff= av_malloc(length*sizeof(double));
+ int i;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= length;
+
+ for (i=0; i<length; i++) coeff[i]= 0.0;
+
+ for (i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
+ for (i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]+= b->coeff[i];
+
+ return vec;
+}
+
+static SwsVector *sws_diffVec(SwsVector *a, SwsVector *b){
+ int length= FFMAX(a->length, b->length);
+ double *coeff= av_malloc(length*sizeof(double));
+ int i;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= length;
+
+ for (i=0; i<length; i++) coeff[i]= 0.0;
+
+ for (i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
+ for (i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]-= b->coeff[i];
+
+ return vec;
+}
+
+/* shift left / or right if "shift" is negative */
+static SwsVector *sws_getShiftedVec(SwsVector *a, int shift){
+ int length= a->length + FFABS(shift)*2;
+ double *coeff= av_malloc(length*sizeof(double));
+ int i;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= length;
+
+ for (i=0; i<length; i++) coeff[i]= 0.0;
+
+ for (i=0; i<a->length; i++)
+ {
+ coeff[i + (length-1)/2 - (a->length-1)/2 - shift]= a->coeff[i];
+ }
+
+ return vec;
+}
+
+void sws_shiftVec(SwsVector *a, int shift){
+ SwsVector *shifted= sws_getShiftedVec(a, shift);
+ av_free(a->coeff);
+ a->coeff= shifted->coeff;
+ a->length= shifted->length;
+ av_free(shifted);
+}
+
+void sws_addVec(SwsVector *a, SwsVector *b){
+ SwsVector *sum= sws_sumVec(a, b);
+ av_free(a->coeff);
+ a->coeff= sum->coeff;
+ a->length= sum->length;
+ av_free(sum);
+}
+
+void sws_subVec(SwsVector *a, SwsVector *b){
+ SwsVector *diff= sws_diffVec(a, b);
+ av_free(a->coeff);
+ a->coeff= diff->coeff;
+ a->length= diff->length;
+ av_free(diff);
+}
+
+void sws_convVec(SwsVector *a, SwsVector *b){
+ SwsVector *conv= sws_getConvVec(a, b);
+ av_free(a->coeff);
+ a->coeff= conv->coeff;
+ a->length= conv->length;
+ av_free(conv);
+}
+
+SwsVector *sws_cloneVec(SwsVector *a){
+ double *coeff= av_malloc(a->length*sizeof(double));
+ int i;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= a->length;
+
+ for (i=0; i<a->length; i++) coeff[i]= a->coeff[i];
+
+ return vec;
+}
+
+void sws_printVec(SwsVector *a){
+ int i;
+ double max=0;
+ double min=0;
+ double range;
+
+ for(i=0; i<a->length; i++)
+ if(a->coeff[i]>max) max= a->coeff[i];
+
+ for(i=0; i<a->length; i++)
+ if(a->coeff[i]<min) min= a->coeff[i];
+
+ range= max - min;
+
+ for(i=0; i<a->length; i++)
+ {
+ int x= (int)((a->coeff[i]-min)*60.0/range +0.5);
+ MSG_DBG2("%1.3f ", a->coeff[i]);
+ for(;x>0; x--) MSG_DBG2(" ");
+ MSG_DBG2("|\n");
+ }
+}
+
+void sws_freeVec(SwsVector *a){
+ if (!a) return;
+ av_freep(&a->coeff);
+ a->length=0;
+ av_free(a);
+}
+
+void sws_freeFilter(SwsFilter *filter){
+ if (!filter) return;
+
+ if (filter->lumH) sws_freeVec(filter->lumH);
+ if (filter->lumV) sws_freeVec(filter->lumV);
+ if (filter->chrH) sws_freeVec(filter->chrH);
+ if (filter->chrV) sws_freeVec(filter->chrV);
+ av_free(filter);
+}
+
+
+void sws_freeContext(SwsContext *c){
+ int i;
+ if(!c) return;
+
+ if (c->thread_count>1)
+ sws_thread_free(c);
+ for(i=0; i<c->thread_count; i++)
+ {
+ int j;
+ if (c[i].lumPixBuf)
+ {
+ for(j=0; j<c->vLumBufSize; j++)
+ {
+ av_free(c[i].lumPixBuf[j]);
+ c[i].lumPixBuf[j]=NULL;
+ }
+ av_free(c[i].lumPixBuf);
+ c[i].lumPixBuf=NULL;
+ }
+ if (c[i].chrPixBuf)
+ {
+ for(j=0; j<c->vChrBufSize; j++)
+ {
+ av_free(c[i].chrPixBuf[j]);
+ c[i].chrPixBuf[j]=NULL;
+ }
+ av_free(c[i].chrPixBuf);
+ c[i].chrPixBuf=NULL;
+ }
+ }
+
+ av_free(c->vLumFilter);
+ c->vLumFilter = NULL;
+ av_free(c->vChrFilter);
+ c->vChrFilter = NULL;
+ av_free(c->hLumFilter);
+ c->hLumFilter = NULL;
+ av_free(c->hChrFilter);
+ c->hChrFilter = NULL;
+#ifdef HAVE_ALTIVEC
+ av_free(c->vYCoeffsBank);
+ c->vYCoeffsBank = NULL;
+ av_free(c->vCCoeffsBank);
+ c->vCCoeffsBank = NULL;
+#endif
+
+ av_free(c->vLumFilterPos);
+ c->vLumFilterPos = NULL;
+ av_free(c->vChrFilterPos);
+ c->vChrFilterPos = NULL;
+ av_free(c->hLumFilterPos);
+ c->hLumFilterPos = NULL;
+ av_free(c->hChrFilterPos);
+ c->hChrFilterPos = NULL;
+
+#if ARCH_X86_32 || ARCH_X86_64
+#ifdef HAVE_SYS_MMAN_H
+ if(c->funnyYCode) munmap(c->funnyYCode, MAX_FUNNY_CODE_SIZE);
+ if(c->funnyUVCode) munmap(c->funnyUVCode, MAX_FUNNY_CODE_SIZE);
+#else
+ if(c->funnyYCode) /*free*/VirtualFree(c->funnyYCode,0,MEM_RELEASE);
+ if(c->funnyUVCode) /*free*/VirtualFree(c->funnyUVCode,0,MEM_RELEASE);
+#endif
+ c->funnyYCode=NULL;
+ c->funnyUVCode=NULL;
+#endif /* ARCH_X86_32 || ARCH_X86_64 */
+
+ av_free(c->lumMmx2Filter);
+ c->lumMmx2Filter=NULL;
+ av_free(c->chrMmx2Filter);
+ c->chrMmx2Filter=NULL;
+ av_free(c->lumMmx2FilterPos);
+ c->lumMmx2FilterPos=NULL;
+ av_free(c->chrMmx2FilterPos);
+ c->chrMmx2FilterPos=NULL;
+ av_free(c->yuvTable);
+ c->yuvTable=NULL;
+
+ av_free(c);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale.h b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale.h
new file mode 100644
index 000000000..3e5217632
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale.h
@@ -0,0 +1,147 @@
+/*
+ Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#ifndef SWSCALE_H
+#define SWSCALE_H
+
+/**
+ * @file swscale.h
+ * @brief
+ * external api for the swscale stuff
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "ffImgfmt.h"
+
+/* values for the flags, the stuff on the command line is different */
+#define SWS_MAX_REDUCE_CUTOFF 0.002
+
+#define SWS_CS_ITU709 1
+#define SWS_CS_FCC 4
+#define SWS_CS_ITU601 5
+#define SWS_CS_ITU624 5
+#define SWS_CS_SMPTE170M 5
+#define SWS_CS_SMPTE240M 7
+#define SWS_CS_DEFAULT 5
+
+// when used for filters they must have an odd number of elements
+// coeffs cannot be shared between vectors
+typedef struct SwsVector {
+ double *coeff;
+ int length;
+} SwsVector;
+
+// vectors can be shared
+typedef struct SwsFilter {
+ SwsVector *lumH;
+ SwsVector *lumV;
+ SwsVector *chrH;
+ SwsVector *chrV;
+} SwsFilter;
+
+typedef struct SwsMethodParams {
+ int method;
+ #define SWS_FAST_BILINEAR 1
+ #define SWS_BILINEAR 2
+ #define SWS_BICUBIC 4
+ #define SWS_X 8
+ #define SWS_POINT 0x10
+ #define SWS_AREA 0x20
+ //#define SWS_BICUBLIN 0x40
+ #define SWS_GAUSS 0x80
+ #define SWS_SINC 0x100
+ #define SWS_LANCZOS 0x200
+ #define SWS_SPLINE 0x400
+
+ int param;
+} SwsMethodParams;
+
+typedef struct SwsParams {
+ int cpu;
+ #define SWS_CPU_CAPS_MMX 0x80000000
+ #define SWS_CPU_CAPS_MMX2 0x20000000
+ #define SWS_CPU_CAPS_3DNOW 0x40000000
+ #define SWS_CPU_CAPS_ALTIVEC 0x10000000
+
+ int debug;
+ #define SWS_PRINT_INFO 0x1000
+
+ int subsampling;
+ //the following 3 flags are not completly implemented
+ //internal chrominace subsamling info
+ #define SWS_FULL_CHR_H_INT 0x2000
+ //input subsampling info
+ #define SWS_FULL_CHR_H_INP 0x4000
+ #define SWS_DIRECT_BGR 0x8000
+ #define SWS_ACCURATE_RND 0x40000
+ int v_chr_drop;
+
+ SwsMethodParams methodLuma,methodChroma;
+} SwsParams;
+
+struct SwsContext;
+
+void sws_freeContext(struct SwsContext *swsContext);
+
+struct SwsContext *sws_getContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat, SwsParams *params,
+ SwsFilter *srcFilter, SwsFilter *dstFilter, const int32_t yuv2rgbTable[6]);
+struct SwsContext *sws_getContextEx(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat, SwsParams *params,
+ SwsFilter *srcFilter, SwsFilter *dstFilter, const int32_t yuv2rgbTable[6],int threadCount);
+int sws_scale(struct SwsContext *context, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], stride_t dstStride[]);
+int sws_scale_ordered(struct SwsContext *context, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], stride_t dstStride[]);
+
+
+int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[7], int srcRange, const int table[7], int dstRange, int brightness, int contrast, int saturation);
+int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation);
+SwsVector *sws_getGaussianVec(double variance, double quality);
+SwsVector *sws_getConstVec(double c, int length);
+SwsVector *sws_getIdentityVec(void);
+void sws_scaleVec(SwsVector *a, double scalar);
+void sws_normalizeVec(SwsVector *a, double height);
+void sws_convVec(SwsVector *a, SwsVector *b);
+void sws_addVec(SwsVector *a, SwsVector *b);
+void sws_subVec(SwsVector *a, SwsVector *b);
+void sws_shiftVec(SwsVector *a, int shift);
+SwsVector *sws_cloneVec(SwsVector *a);
+
+void sws_printVec(SwsVector *a);
+void sws_freeVec(SwsVector *a);
+
+SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
+ float lumaSarpen, float chromaSharpen,
+ float chromaHShift, float chromaVShift,
+ int verbose);
+void sws_freeFilter(SwsFilter *filter);
+
+int sws_thread_init(struct SwsContext *s, int thread_count);
+void sws_thread_free(struct SwsContext *s);
+int sws_thread_execute(struct SwsContext *s, int (*func)(struct SwsContext *c2), int *ret, int count);
+int sws_default_execute(struct SwsContext *c, int (*func)(struct SwsContext *c2), int *ret, int count);
+int GetCPUCount(void);
+int isP4HT (void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale_internal.h b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale_internal.h
new file mode 100644
index 000000000..bd2d5c00f
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale_internal.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Modified to support multi-thread related features
+ * by Haruhiko Yamagata <h.yamagata@nifty.com> in 2006.
+ */
+
+#ifndef SWSCALE_SWSCALE_INTERNAL_H
+#define SWSCALE_SWSCALE_INTERNAL_H
+
+#include "../ffmpeg/mp_msg.h"
+
+#ifdef __GNUC__
+ #define MSG_WARN(args...) mp_msg(MSGT_SWS,MSGL_WARN, ##args )
+ #define MSG_FATAL(args...) mp_msg(MSGT_SWS,MSGL_FATAL, ##args )
+ #define MSG_ERR(args...) mp_msg(MSGT_SWS,MSGL_ERR, ##args )
+ #define MSG_V(args...) mp_msg(MSGT_SWS,MSGL_V, ##args )
+ #define MSG_DBG2(args...) mp_msg(MSGT_SWS,MSGL_DBG2, ##args )
+ #define MSG_INFO(args...) mp_msg(MSGT_SWS,MSGL_INFO, ##args )
+ #define __align32(t,v) t v __attribute__ ((aligned (32)))
+#else
+ #define MSG_WARN(args)
+ #define MSG_FATAL(args)
+ #define MSG_ERR(args)
+ #define MSG_V(args)
+ #define MSG_DBG2(args)
+ #define MSG_INFO(args)
+ #define __align32(t,v) __declspec(align(32)) t v
+#endif
+
+#define AV_TOSTRING(s) #s
+#define STR(s) AV_TOSTRING(s) //AV_STRINGIFY is too long
+
+#define MAX_FILTER_SIZE 256
+
+#if ARCH_X86_64
+# define APCK_PTR2 8
+# define APCK_COEF 16
+# define APCK_SIZE 24
+#else
+# define APCK_PTR2 4
+# define APCK_COEF 8
+# define APCK_SIZE 16
+#endif
+
+typedef int (*SwsFunc)(struct SwsContext *context, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], stride_t dstStride[]);
+
+struct SwsContext;
+
+typedef struct SwsThreadParam{
+ struct SwsContext *c;
+ uint8_t **src;
+ stride_t* srcStride;
+ int srcSliceY;
+ int srcSliceH;
+ uint8_t **dst;
+ stride_t* dstStride;
+ int dstYstart;
+ int dstYend;
+} SwsThreadParam;
+
+/* This struct should be aligned on at least a 32-byte boundary. */
+typedef struct SwsContext{
+ /**
+ *
+ * Note the src,dst,srcStride,dstStride will be copied, in the sws_scale() warper so they can freely be modified here
+ */
+ SwsFunc swScale;
+ int srcW, srcH, dstH;
+ int chrSrcW, chrSrcH, chrDstW, chrDstH;
+ int lumXInc, chrXInc;
+ int lumYInc, chrYInc;
+ int dstFormat, srcFormat; ///< format 4:2:0 type is allways YV12
+ int origDstFormat, origSrcFormat; ///< format
+ int chrSrcHSubSample, chrSrcVSubSample;
+ //int chrIntHSubSample, chrIntVSubSample;
+ int chrDstHSubSample, chrDstVSubSample;
+ int vChrDrop;
+ int sliceDir;
+
+ void *thread_opaque;
+ int thread_count;
+ int (*execute)(struct SwsContext *c, int (*func)(struct SwsContext *c), int *ret, int count);
+ int *ret;
+ SwsThreadParam stp;
+ int16_t **lumPixBuf;
+ int16_t **chrPixBuf;
+ int16_t *hLumFilter;
+ int16_t *hLumFilterPos;
+ int16_t *hChrFilter;
+ int16_t *hChrFilterPos;
+ int16_t *vLumFilter;
+ int16_t *vLumFilterPos;
+ int16_t *vChrFilter;
+ int16_t *vChrFilterPos;
+
+ __align32(uint8_t,formatConvBuffer[8000]); //FIXME dynamic alloc, but we have to change alot of code for this to be usefull
+
+ int hLumFilterSize;
+ int hChrFilterSize;
+ int vLumFilterSize;
+ int vChrFilterSize;
+ int vLumBufSize;
+ int vChrBufSize;
+
+ uint8_t *funnyYCode;
+ uint8_t *funnyUVCode;
+ int32_t *lumMmx2FilterPos;
+ int32_t *chrMmx2FilterPos;
+ int16_t *lumMmx2Filter;
+ int16_t *chrMmx2Filter;
+
+ int canMMX2BeUsed;
+
+ int lastInLumBuf;
+ int lastInChrBuf;
+ int lumBufIndex;
+ int chrBufIndex;
+ int dstY;
+ SwsParams params;
+ void * yuvTable; // pointer to the yuv->rgb table start so it can be freed()
+ unsigned char * table_rV[256];
+ unsigned char * table_gU[256];
+ int table_gV[256];
+ unsigned char * table_bU[256];
+
+ //Colorspace stuff
+ int contrast, brightness, saturation; // for sws_getColorspaceDetails
+ int srcColorspaceTable[7];
+ int dstColorspaceTable[7];
+ int srcRange, dstRange;
+
+#define RED_DITHER "0*8"
+#define GREEN_DITHER "1*8"
+#define BLUE_DITHER "2*8"
+#define Y_COEFF "3*8"
+#define VR_COEFF "4*8"
+#define UB_COEFF "5*8"
+#define VG_COEFF "6*8"
+#define UG_COEFF "7*8"
+#define Y_OFFSET "8*8"
+#define U_OFFSET "9*8"
+#define V_OFFSET "10*8"
+#define LUM_MMX_FILTER_OFFSET "11*8"
+#define CHR_MMX_FILTER_OFFSET "11*8+4*4*256"
+#define DSTW_OFFSET "11*8+4*4*256*2" //do not change, it is hardcoded in the ASM
+#define ESP_OFFSET "11*8+4*4*256*2+8"
+#define VROUNDER_OFFSET "11*8+4*4*256*2+16"
+#define U_TEMP "11*8+4*4*256*2+24"
+#define V_TEMP "11*8+4*4*256*2+32"
+
+ uint64_t redDither __attribute__((aligned(8)));
+ uint64_t greenDither __attribute__((aligned(8)));
+ uint64_t blueDither __attribute__((aligned(8)));
+
+ uint64_t yCoeff __attribute__((aligned(8)));
+ uint64_t vrCoeff __attribute__((aligned(8)));
+ uint64_t ubCoeff __attribute__((aligned(8)));
+ uint64_t vgCoeff __attribute__((aligned(8)));
+ uint64_t ugCoeff __attribute__((aligned(8)));
+ uint64_t yOffset __attribute__((aligned(8)));
+ uint64_t uOffset __attribute__((aligned(8)));
+ uint64_t vOffset __attribute__((aligned(8)));
+ int32_t lumMmxFilter[4*MAX_FILTER_SIZE];
+ int32_t chrMmxFilter[4*MAX_FILTER_SIZE];
+ int dstW;
+ uint64_t esp __attribute__((aligned(8)));
+ uint64_t vRounder __attribute__((aligned(8)));
+ uint64_t u_temp __attribute__((aligned(8)));
+ uint64_t v_temp __attribute__((aligned(8)));
+
+} SwsContext;
+
+//FIXME check init (where 0)
+
+SwsFunc yuv2rgb_get_func_ptr (SwsContext *c);
+int yuv2rgb_c_init_tables (SwsContext *c, const int inv_table[7], int fullRange, int brightness, int contrast, int saturation);
+
+char *sws_format_name(int format);
+
+#endif /* SWSCALE_SWSCALE_INTERNAL_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale_template.c b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale_template.c
new file mode 100644
index 000000000..ca8bcc475
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/swscale_template.c
@@ -0,0 +1,3290 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * The C code (not assembly, MMX, ...) of this file can be used
+ * under the LGPL license.
+ */
+
+/*
+ * Modified to support multi-thread related features
+ * by Haruhiko Yamagata <h.yamagata@nifty.com> in 2006.
+ */
+
+#include "asmalign.h"
+
+#undef REAL_MOVNTQ
+#undef MOVNTQ
+#undef PAVGB
+#undef PREFETCH
+#undef PREFETCHW
+#undef EMMS
+#undef SFENCE
+
+#if HAVE_AMD3DNOW
+/* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
+#define EMMS "femms"
+#else
+#define EMMS "emms"
+#endif
+
+#if HAVE_AMD3DNOW
+#define PREFETCH "prefetch"
+#define PREFETCHW "prefetchw"
+#elif HAVE_MMX2
+#define PREFETCH "prefetchnta"
+#define PREFETCHW "prefetcht0"
+#else
+#define PREFETCH " # nop"
+#define PREFETCHW " # nop"
+#endif
+
+#if HAVE_MMX2
+#define SFENCE "sfence"
+#else
+#define SFENCE " # nop"
+#endif
+
+#if HAVE_MMX2
+#define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
+#elif HAVE_AMD3DNOW
+#define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
+#endif
+
+#if HAVE_MMX2
+#define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
+#else
+#define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
+#endif
+#define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
+
+#ifdef HAVE_ALTIVEC
+#include "swscale_altivec_template.c"
+#endif
+
+#define YSCALEYUV2YV12X(x, offset, dest, width) \
+ asm volatile(\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
+ "movq %%mm3, %%mm4 \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ ASMALIGN16 /* FIXME Unroll? */\
+ "1: \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
+ "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t"\
+ "pmulhw %%mm0, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm3 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ " jnz 1b \n\t"\
+ "psraw $3, %%mm3 \n\t"\
+ "psraw $3, %%mm4 \n\t"\
+ "packuswb %%mm4, %%mm3 \n\t"\
+ MOVNTQ(%%mm3, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
+ "cmp %2, %%"REG_a" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
+ "movq %%mm3, %%mm4 \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "jb 1b \n\t"\
+ :: "r" (&c->redDither),\
+ "r" (dest), "p" ((stride_t)width)\
+ : "%"REG_a, "%"REG_d, "%"REG_S\
+ );
+
+#define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
+ asm volatile(\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ "pxor %%mm4, %%mm4 \n\t"\
+ "pxor %%mm5, %%mm5 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ ASMALIGN16 \
+ "1: \n\t"\
+ "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm0\n\t" /* srcData */\
+ "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
+ "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
+ "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm1\n\t" /* srcData */\
+ "movq %%mm0, %%mm3 \n\t"\
+ "punpcklwd %%mm1, %%mm0 \n\t"\
+ "punpckhwd %%mm1, %%mm3 \n\t"\
+ "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
+ "pmaddwd %%mm1, %%mm0 \n\t"\
+ "pmaddwd %%mm1, %%mm3 \n\t"\
+ "paddd %%mm0, %%mm4 \n\t"\
+ "paddd %%mm3, %%mm5 \n\t"\
+ "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm3\n\t" /* srcData */\
+ "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
+ "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "punpcklwd %%mm3, %%mm2 \n\t"\
+ "punpckhwd %%mm3, %%mm0 \n\t"\
+ "pmaddwd %%mm1, %%mm2 \n\t"\
+ "pmaddwd %%mm1, %%mm0 \n\t"\
+ "paddd %%mm2, %%mm6 \n\t"\
+ "paddd %%mm0, %%mm7 \n\t"\
+ " jnz 1b \n\t"\
+ "psrad $16, %%mm4 \n\t"\
+ "psrad $16, %%mm5 \n\t"\
+ "psrad $16, %%mm6 \n\t"\
+ "psrad $16, %%mm7 \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
+ "packssdw %%mm5, %%mm4 \n\t"\
+ "packssdw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm0, %%mm4 \n\t"\
+ "paddw %%mm0, %%mm6 \n\t"\
+ "psraw $3, %%mm4 \n\t"\
+ "psraw $3, %%mm6 \n\t"\
+ "packuswb %%mm6, %%mm4 \n\t"\
+ MOVNTQ(%%mm4, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
+ "cmp %2, %%"REG_a" \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "pxor %%mm4, %%mm4 \n\t"\
+ "pxor %%mm5, %%mm5 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "jb 1b \n\t"\
+ :: "r" (&c->redDither),\
+ "r" (dest), "p" ((stride_t)width)\
+ : "%"REG_a, "%"REG_d, "%"REG_S\
+ );
+
+#define YSCALEYUV2YV121 \
+ "mov %2, %%"REG_a" \n\t"\
+ ASMALIGN16 /* FIXME Unroll? */\
+ "1: \n\t"\
+ "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
+ "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
+ "psraw $7, %%mm0 \n\t"\
+ "psraw $7, %%mm1 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ MOVNTQ(%%mm0, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
+ "jnc 1b \n\t"
+
+/*
+ :: "m" (-lumFilterSize), "m" (-chrFilterSize),
+ "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
+ "r" (dest), "m" (dstW),
+ "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
+ : "%eax", "%ebx", "%ecx", "%edx", "%esi"
+*/
+#define YSCALEYUV2PACKEDX \
+ asm volatile(\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ ASMALIGN16\
+ "nop \n\t"\
+ "1: \n\t"\
+ "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
+ "movq %%mm3, %%mm4 \n\t"\
+ ASMALIGN16\
+ "2: \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
+ "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t"\
+ "pmulhw %%mm0, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm3 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ " jnz 2b \n\t"\
+\
+ "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
+ "movq %%mm1, %%mm7 \n\t"\
+ ASMALIGN16\
+ "2: \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
+ "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t"\
+ "pmulhw %%mm0, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm1 \n\t"\
+ "paddw %%mm5, %%mm7 \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ " jnz 2b \n\t"\
+
+#define YSCALEYUV2PACKEDX_END\
+ :: "r" (&c->redDither), \
+ "m" (dummy), "m" (dummy), "m" (dummy),\
+ "r" (dest), "m" (dstW)\
+ : "%"REG_a, "%"REG_d, "%"REG_S\
+ );
+
+#define YSCALEYUV2PACKEDX_ACCURATE \
+ asm volatile(\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ ASMALIGN16\
+ "nop \n\t"\
+ "1: \n\t"\
+ "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pxor %%mm4, %%mm4 \n\t"\
+ "pxor %%mm5, %%mm5 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ ASMALIGN16\
+ "2: \n\t"\
+ "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
+ "movq 4096(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
+ "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
+ "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
+ "movq %%mm0, %%mm3 \n\t"\
+ "punpcklwd %%mm1, %%mm0 \n\t"\
+ "punpckhwd %%mm1, %%mm3 \n\t"\
+ "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
+ "pmaddwd %%mm1, %%mm0 \n\t"\
+ "pmaddwd %%mm1, %%mm3 \n\t"\
+ "paddd %%mm0, %%mm4 \n\t"\
+ "paddd %%mm3, %%mm5 \n\t"\
+ "movq 4096(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
+ "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
+ "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "punpcklwd %%mm3, %%mm2 \n\t"\
+ "punpckhwd %%mm3, %%mm0 \n\t"\
+ "pmaddwd %%mm1, %%mm2 \n\t"\
+ "pmaddwd %%mm1, %%mm0 \n\t"\
+ "paddd %%mm2, %%mm6 \n\t"\
+ "paddd %%mm0, %%mm7 \n\t"\
+ " jnz 2b \n\t"\
+ "psrad $16, %%mm4 \n\t"\
+ "psrad $16, %%mm5 \n\t"\
+ "psrad $16, %%mm6 \n\t"\
+ "psrad $16, %%mm7 \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
+ "packssdw %%mm5, %%mm4 \n\t"\
+ "packssdw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm0, %%mm4 \n\t"\
+ "paddw %%mm0, %%mm6 \n\t"\
+ "movq %%mm4, "U_TEMP"(%0) \n\t"\
+ "movq %%mm6, "V_TEMP"(%0) \n\t"\
+\
+ "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pxor %%mm1, %%mm1 \n\t"\
+ "pxor %%mm5, %%mm5 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ ASMALIGN16\
+ "2: \n\t"\
+ "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
+ "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
+ "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
+ "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
+ "movq %%mm0, %%mm3 \n\t"\
+ "punpcklwd %%mm4, %%mm0 \n\t"\
+ "punpckhwd %%mm4, %%mm3 \n\t"\
+ "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
+ "pmaddwd %%mm4, %%mm0 \n\t"\
+ "pmaddwd %%mm4, %%mm3 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t"\
+ "paddd %%mm3, %%mm5 \n\t"\
+ "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
+ "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
+ "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "punpcklwd %%mm3, %%mm2 \n\t"\
+ "punpckhwd %%mm3, %%mm0 \n\t"\
+ "pmaddwd %%mm4, %%mm2 \n\t"\
+ "pmaddwd %%mm4, %%mm0 \n\t"\
+ "paddd %%mm2, %%mm7 \n\t"\
+ "paddd %%mm0, %%mm6 \n\t"\
+ " jnz 2b \n\t"\
+ "psrad $16, %%mm1 \n\t"\
+ "psrad $16, %%mm5 \n\t"\
+ "psrad $16, %%mm7 \n\t"\
+ "psrad $16, %%mm6 \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
+ "packssdw %%mm5, %%mm1 \n\t"\
+ "packssdw %%mm6, %%mm7 \n\t"\
+ "paddw %%mm0, %%mm1 \n\t"\
+ "paddw %%mm0, %%mm7 \n\t"\
+ "movq "U_TEMP"(%0), %%mm3 \n\t"\
+ "movq "V_TEMP"(%0), %%mm4 \n\t"\
+
+#define YSCALEYUV2RGBX \
+ "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
+ "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
+ "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
+ "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
+
+#define REAL_YSCALEYUV2PACKED(index, c) \
+ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
+ "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
+ "psraw $3, %%mm0 \n\t"\
+ "psraw $3, %%mm1 \n\t"\
+ "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
+ "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+ "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+ "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+ "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+ "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
+ "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
+ "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
+ "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+
+#define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
+
+#define REAL_YSCALEYUV2RGB(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
+ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+ "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+ "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+ "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
+ "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
+ "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
+ "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
+ "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
+ "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
+ "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
+#define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
+
+#define REAL_YSCALEYUV2PACKED1(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
+ "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
+ "psraw $7, %%mm3 \n\t" \
+ "psraw $7, %%mm4 \n\t" \
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $7, %%mm1 \n\t" \
+ "psraw $7, %%mm7 \n\t" \
+
+#define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
+
+#define REAL_YSCALEYUV2RGB1(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
+ "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
+ "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
+ "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
+ "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
+ "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
+#define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
+
+#define REAL_YSCALEYUV2PACKED1b(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
+ "psrlw $8, %%mm3 \n\t" \
+ "psrlw $8, %%mm4 \n\t" \
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $7, %%mm1 \n\t" \
+ "psraw $7, %%mm7 \n\t"
+#define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
+
+// do vertical chrominance interpolation
+#define REAL_YSCALEYUV2RGB1b(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
+ "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
+ "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
+ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
+ "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
+ "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
+ "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
+#define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
+
+#define REAL_WRITEBGR32(dst, dstw, index) \
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq %%mm2, %%mm1 \n\t" /* B */\
+ "movq %%mm5, %%mm6 \n\t" /* R */\
+ "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
+ "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
+ "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
+ "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
+ "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
+ "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
+ "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
+\
+ MOVNTQ(%%mm0, (dst, index, 4))\
+ MOVNTQ(%%mm2, 8(dst, index, 4))\
+ MOVNTQ(%%mm1, 16(dst, index, 4))\
+ MOVNTQ(%%mm3, 24(dst, index, 4))\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+#define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
+
+#define REAL_WRITEBGR16(dst, dstw, index) \
+ "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
+ "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
+ "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
+ "psrlq $3, %%mm2 \n\t"\
+\
+ "movq %%mm2, %%mm1 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm5, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm5, %%mm1 \n\t"\
+\
+ "psllq $3, %%mm3 \n\t"\
+ "psllq $3, %%mm4 \n\t"\
+\
+ "por %%mm3, %%mm2 \n\t"\
+ "por %%mm4, %%mm1 \n\t"\
+\
+ MOVNTQ(%%mm2, (dst, index, 2))\
+ MOVNTQ(%%mm1, 8(dst, index, 2))\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+#define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index)
+
+#define REAL_WRITEBGR15(dst, dstw, index) \
+ "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
+ "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
+ "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
+ "psrlq $3, %%mm2 \n\t"\
+ "psrlq $1, %%mm5 \n\t"\
+\
+ "movq %%mm2, %%mm1 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm5, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm5, %%mm1 \n\t"\
+\
+ "psllq $2, %%mm3 \n\t"\
+ "psllq $2, %%mm4 \n\t"\
+\
+ "por %%mm3, %%mm2 \n\t"\
+ "por %%mm4, %%mm1 \n\t"\
+\
+ MOVNTQ(%%mm2, (dst, index, 2))\
+ MOVNTQ(%%mm1, 8(dst, index, 2))\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+#define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index)
+
+#define WRITEBGR24OLD(dst, dstw, index) \
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq %%mm2, %%mm1 \n\t" /* B */\
+ "movq %%mm5, %%mm6 \n\t" /* R */\
+ "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
+ "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
+ "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
+ "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
+ "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
+ "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
+ "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
+\
+ "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
+ "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
+ "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
+ "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
+ "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
+ "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
+ "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
+ "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
+\
+ "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
+ "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
+ "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
+ "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
+ "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
+ "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
+ "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
+ "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
+ "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
+ "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
+ "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
+ "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
+\
+ "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
+ "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
+ "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
+ "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
+ "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
+ "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
+ "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
+ "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
+\
+ MOVNTQ(%%mm0, (dst))\
+ MOVNTQ(%%mm2, 8(dst))\
+ MOVNTQ(%%mm3, 16(dst))\
+ "add $24, "#dst" \n\t"\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+
+#define WRITEBGR24MMX(dst, dstw, index) \
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq %%mm2, %%mm1 \n\t" /* B */\
+ "movq %%mm5, %%mm6 \n\t" /* R */\
+ "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
+ "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
+ "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
+ "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
+ "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
+ "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
+ "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
+\
+ "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
+ "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
+ "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
+ "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
+\
+ "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
+ "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
+ "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
+ "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
+\
+ "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
+ "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
+ "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
+ "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
+\
+ "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
+ "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
+ "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
+ "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
+ MOVNTQ(%%mm0, (dst))\
+\
+ "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
+ "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
+ "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
+ "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
+ MOVNTQ(%%mm6, 8(dst))\
+\
+ "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
+ "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
+ "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
+ MOVNTQ(%%mm5, 16(dst))\
+\
+ "add $24, "#dst" \n\t"\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+
+#define WRITEBGR24MMX2(dst, dstw, index) \
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq "MANGLE(M24A)", %%mm0 \n\t"\
+ "movq "MANGLE(M24C)", %%mm7 \n\t"\
+ "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
+ "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
+ "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
+\
+ "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
+ "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
+ "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
+\
+ "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
+ "por %%mm1, %%mm6 \n\t"\
+ "por %%mm3, %%mm6 \n\t"\
+ MOVNTQ(%%mm6, (dst))\
+\
+ "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
+ "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
+ "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
+ "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
+\
+ "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
+ "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
+ "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
+\
+ "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
+ "por %%mm3, %%mm6 \n\t"\
+ MOVNTQ(%%mm6, 8(dst))\
+\
+ "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
+ "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
+ "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
+\
+ "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
+ "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
+ "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
+\
+ "por %%mm1, %%mm3 \n\t"\
+ "por %%mm3, %%mm6 \n\t"\
+ MOVNTQ(%%mm6, 16(dst))\
+\
+ "add $24, "#dst" \n\t"\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+
+#if HAVE_MMX2
+#undef WRITEBGR24
+#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
+#else
+#undef WRITEBGR24
+#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
+#endif
+
+#define REAL_WRITEYUY2(dst, dstw, index) \
+ "packuswb %%mm3, %%mm3 \n\t"\
+ "packuswb %%mm4, %%mm4 \n\t"\
+ "packuswb %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm4, %%mm3 \n\t"\
+ "movq %%mm1, %%mm7 \n\t"\
+ "punpcklbw %%mm3, %%mm1 \n\t"\
+ "punpckhbw %%mm3, %%mm7 \n\t"\
+\
+ MOVNTQ(%%mm1, (dst, index, 2))\
+ MOVNTQ(%%mm7, 8(dst, index, 2))\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+#define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
+
+
+static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW0, long chrDstW0)
+{
+ const stride_t dstW = dstW0;
+ const stride_t chrDstW = chrDstW0;
+#if HAVE_MMX
+ if(c->params.subsampling & SWS_ACCURATE_RND){
+ if(uDest){
+ YSCALEYUV2YV12X_ACCURATE( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
+ YSCALEYUV2YV12X_ACCURATE(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
+ }
+
+ YSCALEYUV2YV12X_ACCURATE(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
+ }else{
+ if(uDest){
+ YSCALEYUV2YV12X( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
+ YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
+ }
+
+ YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
+ }
+#else
+#ifdef HAVE_ALTIVEC
+yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
+#else //HAVE_ALTIVEC
+yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
+#endif //!HAVE_ALTIVEC
+#endif
+}
+
+static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
+{
+yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, uDest, dstW, chrDstW, dstFormat);
+}
+
+static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW0, long chrDstW0)
+{
+ const stride_t dstW = dstW0;
+ const stride_t chrDstW = chrDstW0;
+#if HAVE_MMX
+ if(uDest != NULL)
+ {
+ asm volatile(
+ YSCALEYUV2YV121
+ :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
+ "g" (-chrDstW)
+ : "%"REG_a
+ );
+
+ asm volatile(
+ YSCALEYUV2YV121
+ :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
+ "g" (-chrDstW)
+ : "%"REG_a
+ );
+ }
+
+ asm volatile(
+ YSCALEYUV2YV121
+ :: "r" (lumSrc + dstW), "r" (dest + dstW),
+ "g" (-dstW)
+ : "%"REG_a
+ );
+#else
+ int i;
+ for(i=0; i<dstW; i++)
+ {
+ int val= lumSrc[i]>>7;
+
+ if(val&256){
+ if(val<0) val=0;
+ else val=255;
+ }
+
+ dest[i]= val;
+ }
+
+ if(uDest != NULL)
+ for(i=0; i<chrDstW; i++)
+ {
+ int u=chrSrc[i]>>7;
+ int v=chrSrc[i + 2048]>>7;
+
+ if((u|v)&256){
+ if(u<0) u=0;
+ else if (u>255) u=255;
+ if(v<0) v=0;
+ else if (v>255) v=255;
+ }
+
+ uDest[i]= u;
+ vDest[i]= v;
+ }
+#endif
+}
+
+
+/**
+ * vertical scale YV12 to RGB
+ */
+static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, long dstW, long dstY)
+{
+ long dummy=0;
+#if HAVE_MMX
+ if(c->params.subsampling & SWS_ACCURATE_RND){
+ switch(c->dstFormat){
+ case IMGFMT_BGR32:
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ WRITEBGR32(%4, %5, %%REGa)
+
+ YSCALEYUV2PACKEDX_END
+ return;
+ case IMGFMT_BGR24:
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize
+ "add %4, %%"REG_b" \n\t"
+ WRITEBGR24(%%REGb, %5, %%REGa)
+
+
+ :: "r" (&c->redDither),
+ "m" (dummy), "m" (dummy), "m" (dummy),
+ "r" (dest), "m" (dstW)
+ : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx
+ );
+ return;
+ case IMGFMT_BGR15:
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR15(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case IMGFMT_BGR16:
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR16(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case IMGFMT_YUY2:
+ YSCALEYUV2PACKEDX_ACCURATE
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+
+ "psraw $3, %%mm3 \n\t"
+ "psraw $3, %%mm4 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "psraw $3, %%mm7 \n\t"
+ WRITEYUY2(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ }
+ }else{
+ switch(c->dstFormat)
+ {
+ case IMGFMT_BGR32:
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ WRITEBGR32(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case IMGFMT_BGR24:
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize
+ "add %4, %%"REG_b" \n\t"
+ WRITEBGR24(%%REGb, %5, %%REGa)
+
+ :: "r" (&c->redDither),
+ "m" (dummy), "m" (dummy), "m" (dummy),
+ "r" (dest), "m" (dstW)
+ : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx
+ );
+ return;
+ case IMGFMT_BGR15:
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR15(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case IMGFMT_BGR16:
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR16(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case IMGFMT_YUY2:
+ YSCALEYUV2PACKEDX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+
+ "psraw $3, %%mm3 \n\t"
+ "psraw $3, %%mm4 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "psraw $3, %%mm7 \n\t"
+ WRITEYUY2(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ }
+ }
+#endif
+#ifdef HAVE_ALTIVEC
+ /* The following list of supported dstFormat values should
+ match what's found in the body of altivec_yuv2packedX() */
+ if(c->dstFormat==IMGFMT_ABGR || c->dstFormat==IMGFMT_BGRA ||
+ c->dstFormat==IMGFMT_BGR24 || c->dstFormat==IMGFMT_RGB24 ||
+ c->dstFormat==IMGFMT_RGBA || c->dstFormat==IMGFMT_ARGB)
+ altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, dstW, dstY);
+ else
+#endif
+ yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, dstW, dstY);
+}
+
+/**
+ * vertical bilinear scale YV12 to RGB
+ */
+static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
+ uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
+{
+ int yalpha1=yalpha^4095;
+ int uvalpha1=uvalpha^4095;
+ int i;
+
+#if 0 //isn't used
+ if(flags&SWS_FULL_CHR_H_INT)
+ {
+ switch(dstFormat)
+ {
+#if HAVE_MMX
+ case IMGFMT_BGR32:
+ asm volatile(
+
+
+FULL_YSCALEYUV2RGB
+ "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
+ "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
+
+ "movq %%mm3, %%mm1 \n\t"
+ "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
+ "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
+
+ MOVNTQ(%%mm3, (%4, %%REGa, 4))
+ MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
+
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
+ "m" (yalpha1), "m" (uvalpha1)
+ : "%"REG_a
+ );
+ break;
+ case IMGFMT_BGR24:
+ asm volatile(
+
+FULL_YSCALEYUV2RGB
+
+ // lsb ... msb
+ "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
+ "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
+
+ "movq %%mm3, %%mm1 \n\t"
+ "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
+ "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
+
+ "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
+ "psrlq $8, %%mm3 \n\t" // GR0BGR00
+ "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
+ "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
+ "por %%mm2, %%mm3 \n\t" // BGRBGR00
+ "movq %%mm1, %%mm2 \n\t"
+ "psllq $48, %%mm1 \n\t" // 000000BG
+ "por %%mm1, %%mm3 \n\t" // BGRBGRBG
+
+ "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
+ "psrld $16, %%mm2 \n\t" // R000R000
+ "psrlq $24, %%mm1 \n\t" // 0BGR0000
+ "por %%mm2, %%mm1 \n\t" // RBGRR000
+
+ "mov %4, %%"REG_b" \n\t"
+ "add %%"REG_a", %%"REG_b" \n\t"
+
+#if HAVE_MMX2
+ //FIXME Alignment
+ "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
+ "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
+#else
+ "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
+ "psrlq $32, %%mm3 \n\t"
+ "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
+ "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
+#endif
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
+ "m" (yalpha1), "m" (uvalpha1)
+ : "%"REG_a, "%"REG_b
+ );
+ break;
+ case IMGFMT_BGR15:
+ asm volatile(
+
+FULL_YSCALEYUV2RGB
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
+ "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
+#endif
+ "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
+ "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
+ "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
+
+ "psrlw $3, %%mm3 \n\t"
+ "psllw $2, %%mm1 \n\t"
+ "psllw $7, %%mm0 \n\t"
+ "pand "MANGLE(g15Mask)", %%mm1 \n\t"
+ "pand "MANGLE(r15Mask)", %%mm0 \n\t"
+
+ "por %%mm3, %%mm1 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+
+ MOVNTQ(%%mm0, (%4, %%REGa, 2))
+
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
+ "m" (yalpha1), "m" (uvalpha1)
+ : "%"REG_a
+ );
+ break;
+ case IMGFMT_BGR16:
+ asm volatile(
+
+FULL_YSCALEYUV2RGB
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
+ "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
+#endif
+ "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
+ "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
+ "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
+
+ "psrlw $3, %%mm3 \n\t"
+ "psllw $3, %%mm1 \n\t"
+ "psllw $8, %%mm0 \n\t"
+ "pand "MANGLE(g16Mask)", %%mm1 \n\t"
+ "pand "MANGLE(r16Mask)", %%mm0 \n\t"
+
+ "por %%mm3, %%mm1 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+
+ MOVNTQ(%%mm0, (%4, %%REGa, 2))
+
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
+ "m" (yalpha1), "m" (uvalpha1)
+ : "%"REG_a
+ );
+ break;
+#endif
+ case IMGFMT_RGB32:
+#if !HAVE_MMX
+ case IMGFMT_BGR32:
+#endif
+ if(dstFormat==IMGFMT_BGR32)
+ {
+ int i;
+#ifdef WORDS_BIGENDIAN
+ dest++;
+#endif
+ for(i=0;i<dstW;i++){
+ // vertical linear interpolation && yuv2rgb in a single step:
+ int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
+ int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
+ int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
+ dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
+ dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
+ dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
+ dest+= 4;
+ }
+ }
+ else if(dstFormat==IMGFMT_BGR24)
+ {
+ int i;
+ for(i=0;i<dstW;i++){
+ // vertical linear interpolation && yuv2rgb in a single step:
+ int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
+ int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
+ int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
+ dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
+ dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
+ dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
+ dest+= 3;
+ }
+ }
+ else if(dstFormat==IMGFMT_BGR16)
+ {
+ int i;
+ for(i=0;i<dstW;i++){
+ // vertical linear interpolation && yuv2rgb in a single step:
+ int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
+ int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
+ int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
+
+ ((uint16_t*)dest)[i] =
+ clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
+ clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
+ clip_table16r[(Y + yuvtab_3343[V]) >>13];
+ }
+ }
+ else if(dstFormat==IMGFMT_BGR15)
+ {
+ int i;
+ for(i=0;i<dstW;i++){
+ // vertical linear interpolation && yuv2rgb in a single step:
+ int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
+ int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
+ int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
+
+ ((uint16_t*)dest)[i] =
+ clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
+ clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
+ clip_table15r[(Y + yuvtab_3343[V]) >>13];
+ }
+ }
+ }//FULL_UV_IPOL
+ else
+ {
+#endif // if 0
+#if HAVE_MMX
+ switch(c->dstFormat)
+ {
+//Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
+ case IMGFMT_BGR32:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case IMGFMT_BGR24:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case IMGFMT_BGR15:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case IMGFMT_BGR16:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case IMGFMT_YUY2:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2PACKED(%%REGBP, %5)
+ WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ default: break;
+ }
+#endif //HAVE_MMX
+YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
+}
+
+/**
+ * YV12 to RGB without scaling or interpolating
+ */
+static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
+ uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
+{
+ const int yalpha1=0;
+ int i;
+
+ uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
+ const int yalpha= 4096; //FIXME ...
+
+ if(flags&SWS_FULL_CHR_H_INT)
+ {
+ RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
+ return;
+ }
+
+#if HAVE_MMX
+ if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
+ {
+ switch(dstFormat)
+ {
+ case IMGFMT_BGR32:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case IMGFMT_BGR24:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case IMGFMT_BGR15:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+ WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case IMGFMT_BGR16:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case IMGFMT_YUY2:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2PACKED1(%%REGBP, %5)
+ WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ }
+ }
+ else
+ {
+ switch(dstFormat)
+ {
+ case IMGFMT_BGR32:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case IMGFMT_BGR24:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case IMGFMT_BGR15:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+ WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case IMGFMT_BGR16:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case IMGFMT_YUY2:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2PACKED1b(%%REGBP, %5)
+ WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ }
+ }
+#endif
+ if( uvalpha < 2048 )
+ {
+ YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
+ }else{
+ YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
+ }
+}
+
+//FIXME yuy2* can read upto 7 samples to much
+
+static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width)
+{
+#if HAVE_MMX
+ asm volatile(
+ "movq "MANGLE(bm01010101)", %%mm2\n\t"
+ "mov %0, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a",2), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
+ "pand %%mm2, %%mm0 \n\t"
+ "pand %%mm2, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%2, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "g" ((stride_t)-width), "r" (src+width*2), "r" (dst+width)
+ : "%"REG_a
+ );
+#else
+ int i;
+ for(i=0; i<width; i++)
+ dst[i]= src[2*i];
+#endif
+}
+
+static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ asm volatile(
+ "movq "MANGLE(bm01010101)", %%mm4\n\t"
+ "mov %0, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a",4), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
+ "movq (%2, %%"REG_a",4), %%mm2 \n\t"
+ "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
+ PAVGB(%%mm2, %%mm0)
+ PAVGB(%%mm3, %%mm1)
+ "psrlw $8, %%mm0 \n\t"
+ "psrlw $8, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "pand %%mm4, %%mm1 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ "movd %%mm0, (%4, %%"REG_a") \n\t"
+ "movd %%mm1, (%3, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "g" ((stride_t)-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
+ : "%"REG_a
+ );
+#else
+ int i;
+ for(i=0; i<width; i++)
+ {
+ dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
+ dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
+ }
+#endif
+}
+
+//this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
+static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width)
+{
+#if HAVE_MMX
+ asm volatile(
+ "mov %0, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a",2), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "psrlw $8, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%2, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "g" ((stride_t)-width), "r" (src+width*2), "r" (dst+width)
+ : "%"REG_a
+ );
+#else
+ int i;
+ for(i=0; i<width; i++)
+ dst[i]= src[2*i+1];
+#endif
+}
+
+static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ asm volatile(
+ "movq "MANGLE(bm01010101)", %%mm4\n\t"
+ "mov %0, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a",4), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
+ "movq (%2, %%"REG_a",4), %%mm2 \n\t"
+ "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
+ PAVGB(%%mm2, %%mm0)
+ PAVGB(%%mm3, %%mm1)
+ "pand %%mm4, %%mm0 \n\t"
+ "pand %%mm4, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "pand %%mm4, %%mm1 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ "movd %%mm0, (%4, %%"REG_a") \n\t"
+ "movd %%mm1, (%3, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "g" ((stride_t)-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
+ : "%"REG_a
+ );
+#else
+ int i;
+ for(i=0; i<width; i++)
+ {
+ dstU[i]= (src1[4*i + 0] + src2[4*i + 0])>>1;
+ dstV[i]= (src1[4*i + 2] + src2[4*i + 2])>>1;
+ }
+#endif
+}
+
+static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int b= ((uint32_t*)src)[i]&0xFF;
+ int g= (((uint32_t*)src)[i]>>8)&0xFF;
+ int r= (((uint32_t*)src)[i]>>16)&0xFF;
+
+ dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
+ }
+}
+
+static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ const int a= ((uint32_t*)src1)[2*i+0];
+ const int e= ((uint32_t*)src1)[2*i+1];
+ const int c= ((uint32_t*)src2)[2*i+0];
+ const int d= ((uint32_t*)src2)[2*i+1];
+ const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
+ const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
+ const int b= l&0x3FF;
+ const int g= h>>8;
+ const int r= l>>16;
+
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
+ }
+}
+
+static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width)
+{
+#if HAVE_MMX
+ asm volatile(
+ "mov %2, %%"REG_a" \n\t"
+ "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
+ "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
+ ASMALIGN16
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_b") \n\t"
+ "movd (%0, %%"REG_b"), %%mm0 \n\t"
+ "movd 3(%0, %%"REG_b"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "movd 6(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 9(%0, %%"REG_b"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm1 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+ "pmaddwd %%mm6, %%mm3 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm1, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "pmaddwd %%mm5, %%mm0 \n\t"
+ "pmaddwd %%mm5, %%mm2 \n\t"
+ "packssdw %%mm2, %%mm0 \n\t"
+ "psraw $7, %%mm0 \n\t"
+
+ "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
+ "movd 15(%0, %%"REG_b"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "movd 18(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 21(%0, %%"REG_b"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm1 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+ "pmaddwd %%mm6, %%mm3 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm1, %%mm4 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "pmaddwd %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm5, %%mm2 \n\t"
+ "add $24, %%"REG_b" \n\t"
+ "packssdw %%mm2, %%mm4 \n\t"
+ "psraw $7, %%mm4 \n\t"
+
+ "packuswb %%mm4, %%mm0 \n\t"
+ "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
+
+ "movq %%mm0, (%1, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "r" (src+width*3), "r" (dst+width), "g" ((stride_t)-width)
+ : "%"REG_a, "%"REG_b
+ );
+#else
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int b= src[i*3+0];
+ int g= src[i*3+1];
+ int r= src[i*3+2];
+
+ dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
+ }
+#endif
+}
+
+static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
+{
+#if HAVE_MMX
+ asm volatile(
+ "mov %4, %%"REG_a" \n\t"
+ "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b" \n\t"
+ "add %%"REG_b", %%"REG_b" \n\t"
+ ASMALIGN16
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_b") \n\t"
+ PREFETCH" 64(%1, %%"REG_b") \n\t"
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ "movq (%0, %%"REG_b"), %%mm0 \n\t"
+ "movq (%1, %%"REG_b"), %%mm1 \n\t"
+ "movq 6(%0, %%"REG_b"), %%mm2 \n\t"
+ "movq 6(%1, %%"REG_b"), %%mm3 \n\t"
+ PAVGB(%%mm1, %%mm0)
+ PAVGB(%%mm3, %%mm2)
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $24, %%mm0 \n\t"
+ "psrlq $24, %%mm2 \n\t"
+ PAVGB(%%mm1, %%mm0)
+ PAVGB(%%mm3, %%mm2)
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+#else
+ "movd (%0, %%"REG_b"), %%mm0 \n\t"
+ "movd (%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 3(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 3(%1, %%"REG_b"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+ "movd 6(%0, %%"REG_b"), %%mm4 \n\t"
+ "movd 6(%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 9(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 9(%1, %%"REG_b"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm4 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm4, %%mm2 \n\t"
+ "psrlw $2, %%mm0 \n\t"
+ "psrlw $2, %%mm2 \n\t"
+#endif
+ "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
+
+ "pmaddwd %%mm0, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm2, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm1 \n\t"
+ "pmaddwd %%mm5, %%mm0 \n\t"
+ "pmaddwd %%mm5, %%mm1 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
+ "psraw $7, %%mm0 \n\t"
+
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ "movq 12(%0, %%"REG_b"), %%mm4 \n\t"
+ "movq 12(%1, %%"REG_b"), %%mm1 \n\t"
+ "movq 18(%0, %%"REG_b"), %%mm2 \n\t"
+ "movq 18(%1, %%"REG_b"), %%mm3 \n\t"
+ PAVGB(%%mm1, %%mm4)
+ PAVGB(%%mm3, %%mm2)
+ "movq %%mm4, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $24, %%mm4 \n\t"
+ "psrlq $24, %%mm2 \n\t"
+ PAVGB(%%mm1, %%mm4)
+ PAVGB(%%mm3, %%mm2)
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+#else
+ "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
+ "movd 12(%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 15(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 15(%1, %%"REG_b"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm4 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm2, %%mm4 \n\t"
+ "movd 18(%0, %%"REG_b"), %%mm5 \n\t"
+ "movd 18(%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 21(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 21(%1, %%"REG_b"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm5 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm5, %%mm2 \n\t"
+ "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "psrlw $2, %%mm4 \n\t"
+ "psrlw $2, %%mm2 \n\t"
+#endif
+ "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
+
+ "pmaddwd %%mm4, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm2, %%mm4 \n\t"
+ "packssdw %%mm3, %%mm1 \n\t"
+ "pmaddwd %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm5, %%mm1 \n\t"
+ "add $24, %%"REG_b" \n\t"
+ "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
+ "psraw $7, %%mm4 \n\t"
+
+ "movq %%mm0, %%mm1 \n\t"
+ "punpckldq %%mm4, %%mm0 \n\t"
+ "punpckhdq %%mm4, %%mm1 \n\t"
+ "packsswb %%mm1, %%mm0 \n\t"
+ "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
+
+ "movd %%mm0, (%2, %%"REG_a") \n\t"
+ "punpckhdq %%mm0, %%mm0 \n\t"
+ "movd %%mm0, (%3, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" ((stride_t)-width)
+ : "%"REG_a, "%"REG_b
+ );
+#else
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
+ int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
+ int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
+
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
+ }
+#endif
+}
+
+static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int d= ((uint16_t*)src)[i];
+ int b= d&0x1F;
+ int g= (d>>5)&0x3F;
+ int r= (d>>11)&0x1F;
+
+ dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
+ }
+}
+
+static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int d0= ((uint32_t*)src1)[i];
+ int d1= ((uint32_t*)src2)[i];
+
+ int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
+ int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
+
+ int dh2= (dh>>11) + (dh<<21);
+ int d= dh2 + dl;
+
+ int b= d&0x7F;
+ int r= (d>>11)&0x7F;
+ int g= d>>21;
+ dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
+ dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
+ }
+}
+
+static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int d= ((uint16_t*)src)[i];
+ int b= d&0x1F;
+ int g= (d>>5)&0x1F;
+ int r= (d>>10)&0x1F;
+
+ dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
+ }
+}
+
+static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int d0= ((uint32_t*)src1)[i];
+ int d1= ((uint32_t*)src2)[i];
+
+ int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
+ int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
+
+ int dh2= (dh>>11) + (dh<<21);
+ int d= dh2 + dl;
+
+ int b= d&0x7F;
+ int r= (d>>10)&0x7F;
+ int g= d>>21;
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
+ }
+}
+
+
+static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int r= ((uint32_t*)src)[i]&0xFF;
+ int g= (((uint32_t*)src)[i]>>8)&0xFF;
+ int b= (((uint32_t*)src)[i]>>16)&0xFF;
+
+ dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
+ }
+}
+
+static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ const int a= ((uint32_t*)src1)[2*i+0];
+ const int e= ((uint32_t*)src1)[2*i+1];
+ const int c= ((uint32_t*)src2)[2*i+0];
+ const int d= ((uint32_t*)src2)[2*i+1];
+ const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
+ const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
+ const int r= l&0x3FF;
+ const int g= h>>8;
+ const int b= l>>16;
+
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
+ }
+}
+
+static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int r= src[i*3+0];
+ int g= src[i*3+1];
+ int b= src[i*3+2];
+
+ dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
+ }
+}
+
+static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
+ int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
+ int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
+
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
+ }
+}
+
+static inline void RENAME(rgb16ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int d= ((uint16_t*)src)[i];
+ int b= d&0x1F;
+ int g= (d>>5)&0x3F;
+ int r= (d>>11)&0x1F;
+
+ dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
+ }
+}
+
+static inline void RENAME(rgb16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int d0= ((uint32_t*)src1)[i];
+ int d1= ((uint32_t*)src2)[i];
+
+ int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
+ int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
+
+ int dh2= (dh>>11) + (dh<<21);
+ int d= dh2 + dl;
+
+ int b= d&0x7F;
+ int r= (d>>11)&0x7F;
+ int g= d>>21;
+ dstV[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
+ dstU[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
+ }
+}
+
+static inline void RENAME(rgb15ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int d= ((uint16_t*)src)[i];
+ int b= d&0x1F;
+ int g= (d>>5)&0x1F;
+ int r= (d>>10)&0x1F;
+
+ dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
+ }
+}
+
+static inline void RENAME(rgb15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int d0= ((uint32_t*)src1)[i];
+ int d1= ((uint32_t*)src2)[i];
+
+ int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
+ int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
+
+ int dh2= (dh>>11) + (dh<<21);
+ int d= dh2 + dl;
+
+ int b= d&0x7F;
+ int r= (d>>10)&0x7F;
+ int g= d>>21;
+ dstV[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
+ dstU[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
+ }
+}
+
+static inline void RENAME(nv12ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ dstU[i]= src1[i<<1];
+ dstV[i]= src1[(i<<1)+1];
+ }
+}
+
+static inline void RENAME(nv21ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ dstV[i]= src1[i<<1];
+ dstU[i]= src1[(i<<1)+1];
+ }
+}
+
+// Bilinear / Bicubic scaling
+static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
+ int16_t *filter, int16_t *filterPos, long filterSize0)
+{
+ const stride_t filterSize = filterSize0;
+#if HAVE_MMX
+ assert(filterSize % 4 == 0 && filterSize>0);
+ if(filterSize==4) // allways true for upscaling, sometimes for down too
+ {
+ stride_t counter= -2*dstW;
+ filter-= counter*2;
+ filterPos-= counter/2;
+ dst-= counter/2;
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq "MANGLE(w02)", %%mm6 \n\t"
+ "push %%"REG_BP" \n\t" // we use 7 regs here ...
+ "mov %%"REG_a", %%"REG_BP" \n\t"
+ ASMALIGN16
+ "1: \n\t"
+ "movzwl (%2, %%"REG_BP"), %%eax \n\t"
+ "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
+ "movq (%1, %%"REG_BP", 4), %%mm1\n\t"
+ "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
+ "movd (%3, %%"REG_a"), %%mm0 \n\t"
+ "movd (%3, %%"REG_b"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "pmaddwd %%mm1, %%mm0 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm3 \n\t"
+ "packssdw %%mm3, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "packssdw %%mm0, %%mm0 \n\t"
+ "movd %%mm0, (%4, %%"REG_BP") \n\t"
+ "add $4, %%"REG_BP" \n\t"
+ " jnc 1b \n\t"
+
+ "pop %%"REG_BP" \n\t"
+ : "+a" (counter)
+ : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
+ : "%"REG_b
+ );
+ }
+ else if(filterSize==8)
+ {
+ stride_t counter= -2*dstW;
+ filter-= counter*4;
+ filterPos-= counter/2;
+ dst-= counter/2;
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq "MANGLE(w02)", %%mm6 \n\t"
+ "push %%"REG_BP" \n\t" // we use 7 regs here ...
+ "mov %%"REG_a", %%"REG_BP" \n\t"
+ ASMALIGN16
+ "1: \n\t"
+ "movzwl (%2, %%"REG_BP"), %%eax \n\t"
+ "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
+ "movq (%1, %%"REG_BP", 8), %%mm1\n\t"
+ "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
+ "movd (%3, %%"REG_a"), %%mm0 \n\t"
+ "movd (%3, %%"REG_b"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "pmaddwd %%mm1, %%mm0 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+
+ "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
+ "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
+ "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
+ "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "pmaddwd %%mm1, %%mm4 \n\t"
+ "pmaddwd %%mm2, %%mm5 \n\t"
+ "paddd %%mm4, %%mm0 \n\t"
+ "paddd %%mm5, %%mm3 \n\t"
+
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm3 \n\t"
+ "packssdw %%mm3, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "packssdw %%mm0, %%mm0 \n\t"
+ "movd %%mm0, (%4, %%"REG_BP") \n\t"
+ "add $4, %%"REG_BP" \n\t"
+ " jnc 1b \n\t"
+
+ "pop %%"REG_BP" \n\t"
+ : "+a" (counter)
+ : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
+ : "%"REG_b
+ );
+ }
+ else
+ {
+ uint8_t *offset = src+filterSize;
+ stride_t counter= -2*dstW;
+// filter-= counter*filterSize/2;
+ filterPos-= counter/2;
+ dst-= counter/2;
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq "MANGLE(w02)", %%mm6 \n\t"
+ ASMALIGN16
+ "1: \n\t"
+ "mov %2, %%"REG_c" \n\t"
+ "movzwl (%%"REG_c", %0), %%eax \n\t"
+ "movzwl 2(%%"REG_c", %0), %%ebx \n\t"
+ "mov %5, %%"REG_c" \n\t"
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t"
+ "2: \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "movq (%1, %6), %%mm3 \n\t"
+ "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
+ "movd (%%"REG_c", %%"REG_b"), %%mm2\n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "pmaddwd %%mm1, %%mm0 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "paddd %%mm3, %%mm5 \n\t"
+ "paddd %%mm0, %%mm4 \n\t"
+ "add $8, %1 \n\t"
+ "add $4, %%"REG_c" \n\t"
+ "cmp %4, %%"REG_c" \n\t"
+ " jb 2b \n\t"
+ "add %6, %1 \n\t"
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm5 \n\t"
+ "packssdw %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "packssdw %%mm4, %%mm4 \n\t"
+ "mov %3, %%"REG_a" \n\t"
+ "movd %%mm4, (%%"REG_a", %0) \n\t"
+ "add $4, %0 \n\t"
+ " jnc 1b \n\t"
+
+ : "+r" (counter), "+r" (filter)
+ : "m" (filterPos), "m" (dst), "m"(offset),
+ "m" (src), "r" (filterSize*2)
+ : "%"REG_b, "%"REG_a, "%"REG_c
+ );
+ }
+#else
+#ifdef HAVE_ALTIVEC
+ hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
+#else
+ int i;
+ for(i=0; i<dstW; i++)
+ {
+ int j;
+ int srcPos= filterPos[i];
+ int val=0;
+// printf("filterPos: %d\n", filterPos[i]);
+ for(j=0; j<filterSize; j++)
+ {
+// printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
+ val += ((int)src[srcPos + j])*filter[filterSize*i + j];
+ }
+// filter += hFilterSize;
+ dst[i] = FFMIN(FFMAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
+// dst[i] = val>>7;
+ }
+#endif
+#endif
+}
+ // *** horizontal scale Y line to temp buffer
+static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
+ int flags, int canMMX2BeUsed, int16_t *hLumFilter,
+ int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
+ int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
+ int32_t *mmx2FilterPos)
+{
+ if(srcFormat==IMGFMT_YUY2 || srcFormat==IMGFMT_YVYU)
+ {
+ RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==IMGFMT_UYVY || srcFormat==IMGFMT_VYUY)
+ {
+ RENAME(uyvyToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==IMGFMT_BGR32)
+ {
+ RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==IMGFMT_BGR24)
+ {
+ RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==IMGFMT_BGR16)
+ {
+ RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==IMGFMT_BGR15)
+ {
+ RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==IMGFMT_RGB32)
+ {
+ RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==IMGFMT_RGB24)
+ {
+ RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==IMGFMT_RGB16)
+ {
+ RENAME(rgb16ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==IMGFMT_RGB15)
+ {
+ RENAME(rgb15ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+
+#if HAVE_MMX
+ // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
+ if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
+#else
+ if(!(flags&SWS_FAST_BILINEAR))
+#endif
+ {
+ RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
+ }
+ else // Fast Bilinear upscale / crap downscale
+ {
+#if ARCH_X86_32 || ARCH_X86_64
+#if HAVE_MMX2
+ int i;
+ if(canMMX2BeUsed)
+ {
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "mov %0, %%"REG_c" \n\t"
+ "mov %1, %%"REG_D" \n\t"
+ "mov %2, %%"REG_d" \n\t"
+ "mov %3, %%"REG_b" \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ PREFETCH" (%%"REG_c") \n\t"
+ PREFETCH" 32(%%"REG_c") \n\t"
+ PREFETCH" 64(%%"REG_c") \n\t"
+
+#if ARCH_X86_64
+
+#define FUNNY_Y_CODE \
+ "movl (%%"REG_b"), %%esi \n\t"\
+ "call *%4 \n\t"\
+ "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
+ "add %%"REG_S", %%"REG_c" \n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+
+#else
+
+#define FUNNY_Y_CODE \
+ "movl (%%"REG_b"), %%esi \n\t"\
+ "call *%4 \n\t"\
+ "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+
+#endif
+
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+
+ :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
+ "m" (funnyYCode)
+ : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
+ );
+ for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
+ }
+ else
+ {
+#endif
+ long xInc_shr16 = xInc >> 16;
+ uint16_t xInc_mask = xInc & 0xffff;
+ //NO MMX just normal asm ...
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ "xor %%"REG_b", %%"REG_b" \n\t" // xx
+ "xorl %%ecx, %%ecx \n\t" // 2*xalpha
+ ASMALIGN16
+ "1: \n\t"
+ "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
+ "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
+ "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
+ "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
+ "shll $16, %%edi \n\t"
+ "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
+ "mov %1, %%"REG_D" \n\t"
+ "shrl $9, %%esi \n\t"
+ "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
+ "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
+ "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
+
+ "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
+ "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
+ "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
+ "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
+ "shll $16, %%edi \n\t"
+ "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
+ "mov %1, %%"REG_D" \n\t"
+ "shrl $9, %%esi \n\t"
+ "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
+ "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
+ "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
+
+
+ "add $2, %%"REG_a" \n\t"
+ "cmp %2, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+
+ :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
+ : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
+ );
+#if HAVE_MMX2
+ } //if MMX2 can't be used
+#endif
+#else
+ int i;
+ unsigned int xpos=0;
+ for(i=0;i<dstWidth;i++)
+ {
+ register unsigned int xx=xpos>>16;
+ register unsigned int xalpha=(xpos&0xFFFF)>>9;
+ dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
+ xpos+=xInc;
+ }
+#endif
+ }
+}
+
+inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
+ int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
+ int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
+ int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
+ int32_t *mmx2FilterPos)
+{
+ if(srcFormat==IMGFMT_YUY2)
+ {
+ RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_UYVY)
+ {
+ RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_YVYU)
+ {
+ RENAME(yuy2ToUV)(formatConvBuffer+2048, formatConvBuffer, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_VYUY)
+ {
+ RENAME(uyvyToUV)(formatConvBuffer+2048, formatConvBuffer, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_BGR32)
+ {
+ RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_BGR24)
+ {
+ RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_BGR16)
+ {
+ RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_BGR15)
+ {
+ RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_RGB32)
+ {
+ RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_RGB24)
+ {
+ RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_RGB16)
+ {
+ RENAME(rgb16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_RGB15)
+ {
+ RENAME(rgb15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_NV12)
+ {
+ RENAME(nv12ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==IMGFMT_NV21)
+ {
+ RENAME(nv21ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(isGray(srcFormat))
+ {
+ return;
+ }
+
+#if HAVE_MMX
+ // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
+ if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
+#else
+ if(!(flags&SWS_FAST_BILINEAR))
+#endif
+ {
+ RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
+ if (src2) // NV12 does not have this.
+ RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
+ }
+ else // Fast Bilinear upscale / crap downscale
+ {
+#if ARCH_X86_32 || ARCH_X86_64
+#if HAVE_MMX2
+ int i;
+ if(canMMX2BeUsed)
+ {
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "mov %0, %%"REG_c" \n\t"
+ "mov %1, %%"REG_D" \n\t"
+ "mov %2, %%"REG_d" \n\t"
+ "mov %3, %%"REG_b" \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ PREFETCH" (%%"REG_c") \n\t"
+ PREFETCH" 32(%%"REG_c") \n\t"
+ PREFETCH" 64(%%"REG_c") \n\t"
+
+#if ARCH_X86_64
+
+#define FUNNY_UV_CODE \
+ "movl (%%"REG_b"), %%esi \n\t"\
+ "call *%4 \n\t"\
+ "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
+ "add %%"REG_S", %%"REG_c" \n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+
+#else
+
+#define FUNNY_UV_CODE \
+ "movl (%%"REG_b"), %%esi \n\t"\
+ "call *%4 \n\t"\
+ "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+
+#endif
+
+FUNNY_UV_CODE
+FUNNY_UV_CODE
+FUNNY_UV_CODE
+FUNNY_UV_CODE
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ "mov %5, %%"REG_c" \n\t" // src
+ "mov %1, %%"REG_D" \n\t" // buf1
+ "add $4096, %%"REG_D" \n\t"
+ PREFETCH" (%%"REG_c") \n\t"
+ PREFETCH" 32(%%"REG_c") \n\t"
+ PREFETCH" 64(%%"REG_c") \n\t"
+
+FUNNY_UV_CODE
+FUNNY_UV_CODE
+FUNNY_UV_CODE
+FUNNY_UV_CODE
+
+ :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
+ "m" (funnyUVCode), "m" (src2)
+ : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
+ );
+ for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
+ {
+// printf("%d %d %d\n", dstWidth, i, srcW);
+ dst[i] = src1[srcW-1]*128;
+ dst[i+2048] = src2[srcW-1]*128;
+ }
+ }
+ else
+ {
+#endif
+ long xInc_shr16 = (long) (xInc >> 16);
+ uint16_t xInc_mask = xInc & 0xffff;
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ "xor %%"REG_b", %%"REG_b" \n\t" // xx
+ "xorl %%ecx, %%ecx \n\t" // 2*xalpha
+ ASMALIGN16
+ "1: \n\t"
+ "mov %0, %%"REG_S" \n\t"
+ "movzbl (%%"REG_S", %%"REG_b"), %%edi \n\t" //src[xx]
+ "movzbl 1(%%"REG_S", %%"REG_b"), %%esi \n\t" //src[xx+1]
+ "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
+ "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
+ "shll $16, %%edi \n\t"
+ "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
+ "mov %1, %%"REG_D" \n\t"
+ "shrl $9, %%esi \n\t"
+ "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
+
+ "movzbl (%5, %%"REG_b"), %%edi \n\t" //src[xx]
+ "movzbl 1(%5, %%"REG_b"), %%esi \n\t" //src[xx+1]
+ "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
+ "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
+ "shll $16, %%edi \n\t"
+ "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
+ "mov %1, %%"REG_D" \n\t"
+ "shrl $9, %%esi \n\t"
+ "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
+
+ "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
+ "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
+ "add $1, %%"REG_a" \n\t"
+ "cmp %2, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+/* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
+ which is needed to support GCC 4.0. */
+#if ARCH_X86_64 && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+ :: "m" (src1), "m" (dst), "g" ((stride_t)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
+#else
+ :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
+#endif
+ "r" (src2)
+ : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
+ );
+#if HAVE_MMX2
+ } //if MMX2 can't be used
+#endif
+#else
+ int i;
+ unsigned int xpos=0;
+ for(i=0;i<dstWidth;i++)
+ {
+ register unsigned int xx=xpos>>16;
+ register unsigned int xalpha=(xpos&0xFFFF)>>9;
+ dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
+ dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
+/* slower
+ dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
+ dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
+*/
+ xpos+=xInc;
+ }
+#endif
+ }
+}
+
+static int RENAME(swScaleI)(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], stride_t dstStride[], int dstYstart, int dstYend){
+
+ /* load a few things into local vars to make the code more readable? and faster */
+ const int srcW= c->srcW;
+ const int dstW= c->dstW;
+ const int dstH= c->dstH;
+ const int chrDstW= c->chrDstW;
+ const int chrSrcW= c->chrSrcW;
+ const int lumXInc= c->lumXInc;
+ const int chrXInc= c->chrXInc;
+ const int dstFormat= c->dstFormat;
+ const int srcFormat= c->srcFormat;
+ const SwsParams params= c->params;
+ const int canMMX2BeUsed= c->canMMX2BeUsed;
+ int16_t *vLumFilterPos= c->vLumFilterPos;
+ int16_t *vChrFilterPos= c->vChrFilterPos;
+ int16_t *hLumFilterPos= c->hLumFilterPos;
+ int16_t *hChrFilterPos= c->hChrFilterPos;
+ int16_t *vLumFilter= c->vLumFilter;
+ int16_t *vChrFilter= c->vChrFilter;
+ int16_t *hLumFilter= c->hLumFilter;
+ int16_t *hChrFilter= c->hChrFilter;
+ int32_t *lumMmxFilter= c->lumMmxFilter;
+ int32_t *chrMmxFilter= c->chrMmxFilter;
+ const int vLumFilterSize= c->vLumFilterSize;
+ const int vChrFilterSize= c->vChrFilterSize;
+ const int hLumFilterSize= c->hLumFilterSize;
+ const int hChrFilterSize= c->hChrFilterSize;
+ int16_t **lumPixBuf= c->lumPixBuf;
+ int16_t **chrPixBuf= c->chrPixBuf;
+ const int vLumBufSize= c->vLumBufSize;
+ const int vChrBufSize= c->vChrBufSize;
+ uint8_t *funnyYCode= c->funnyYCode;
+ uint8_t *funnyUVCode= c->funnyUVCode;
+ uint8_t *formatConvBuffer= c->formatConvBuffer;
+ const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
+ const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
+ int lastDstY;
+
+ /* vars which will change and which we need to store back in the context */
+ int dstY= dstYstart;
+ int lumBufIndex= c->lumBufIndex;
+ int chrBufIndex= c->chrBufIndex;
+ int lastInLumBuf= c->lastInLumBuf;
+ int lastInChrBuf= c->lastInChrBuf;
+
+ if(isPacked(c->srcFormat)){
+ src[0]=
+ src[1]=
+ src[2]= src[0];
+ srcStride[0]=
+ srcStride[1]=
+ srcStride[2]= srcStride[0];
+ }
+ srcStride[1]<<= c->vChrDrop;
+ srcStride[2]<<= c->vChrDrop;
+
+// printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
+// (int)dst[0], (int)dst[1], (int)dst[2]);
+
+#if 0 //self test FIXME move to a vfilter or something
+{
+static volatile int i=0;
+i++;
+if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH)
+ selfTest(src, srcStride, c->srcW, c->srcH);
+i--;
+}
+#endif
+
+//printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
+//dstStride[0],dstStride[1],dstStride[2]);
+
+ if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
+ {
+ static int firstTime=1; //FIXME move this into the context perhaps
+ if(params.debug & SWS_PRINT_INFO && firstTime)
+ {
+ MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n"
+ "SwScaler: ->cannot do aligned memory acesses anymore\n");
+ firstTime=0;
+ }
+ }
+
+ /* Note the user might start scaling the picture in the middle so this will not get executed
+ this is not really intended but works currently, so ppl might do it */
+ if(srcSliceY ==0){
+ lumBufIndex=0;
+ chrBufIndex=0;
+ // dstY=0; moved to RENAME(swScale)
+ lastInLumBuf= -1;
+ lastInChrBuf= -1;
+ }
+ lastDstY= dstY;
+
+ if(vLumFilterPos[dstYend]<srcSliceY || vLumFilterPos[dstYstart]>srcSliceY+srcSliceH){
+ goto ret0;
+ }
+
+ for(;dstY < dstYend; dstY++){
+ unsigned char *dest =dst[0]+dstStride[0]*dstY;
+ const int chrDstY= dstY>>c->chrDstVSubSample;
+ unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
+ unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
+
+ const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
+ const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
+ const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
+ const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
+
+//printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
+// dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
+ //handle holes (FAST_BILINEAR & weird filters)
+ if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
+ if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
+//printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
+ ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
+ ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
+
+ // Do we have enough lines in this slice to output the dstY line
+ if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
+ {
+ //Do horizontal scaling
+ while(lastInLumBuf < lastLumSrcY)
+ {
+ uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
+ lumBufIndex++;
+// printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
+ ASSERT(lumBufIndex < 2*vLumBufSize)
+ ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
+ ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
+// printf("%d %d\n", lumBufIndex, vLumBufSize);
+ RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
+ params.methodLuma.method, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
+ funnyYCode, c->srcFormat, formatConvBuffer,
+ c->lumMmx2Filter, c->lumMmx2FilterPos);
+ lastInLumBuf++;
+ }
+ while(lastInChrBuf < lastChrSrcY)
+ {
+ uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
+ uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
+ chrBufIndex++;
+ ASSERT(chrBufIndex < 2*vChrBufSize)
+ ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
+ ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
+ //FIXME replace parameters through context struct (some at least)
+
+ if(!(isGray(srcFormat) || isGray(dstFormat)))
+ RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
+ params.methodChroma.method, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
+ funnyUVCode, c->srcFormat, formatConvBuffer,
+ c->chrMmx2Filter, c->chrMmx2FilterPos);
+ lastInChrBuf++;
+ }
+ //wrap buf index around to stay inside the ring buffer
+ if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
+ if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
+ }
+ else // not enough lines left in this slice -> load the rest in the buffer
+ {
+/* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
+ firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
+ lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
+ vChrBufSize, vLumBufSize);*/
+
+ //Do horizontal scaling
+ while(lastInLumBuf+1 < srcSliceY + srcSliceH)
+ {
+ uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
+ lumBufIndex++;
+ ASSERT(lumBufIndex < 2*vLumBufSize)
+ ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
+ ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
+ RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
+ params.methodLuma.method, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
+ funnyYCode, c->srcFormat, formatConvBuffer,
+ c->lumMmx2Filter, c->lumMmx2FilterPos);
+ lastInLumBuf++;
+ }
+ while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
+ {
+ uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
+ uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
+ chrBufIndex++;
+ ASSERT(chrBufIndex < 2*vChrBufSize)
+ ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
+ ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
+
+ if(!(isGray(srcFormat) || isGray(dstFormat)))
+ RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
+ params.methodChroma.method, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
+ funnyUVCode, c->srcFormat, formatConvBuffer,
+ c->chrMmx2Filter, c->chrMmx2FilterPos);
+ lastInChrBuf++;
+ }
+ //wrap buf index around to stay inside the ring buffer
+ if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
+ if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
+ break; //we can't output a dstY line so let's try with the next slice
+ }
+
+#if HAVE_MMX
+ b5Dither= dither8[dstY&1];
+ g6Dither= dither4[dstY&1];
+ g5Dither= dither8[dstY&1];
+ r5Dither= dither8[(dstY+1)&1];
+#endif
+ if(dstY < dstH-2)
+ {
+ int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
+ int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
+#if HAVE_MMX
+ int i;
+ if(params.subsampling & SWS_ACCURATE_RND){
+ int s= APCK_SIZE / 8;
+ for(i=0; i<vLumFilterSize; i+=2){
+ *(void**)&lumMmxFilter[s*i+0 ]= lumSrcPtr[i ];
+ *(void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
+ lumMmxFilter[s*i+APCK_COEF/4 ]=
+ lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
+ + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
+ }
+ for(i=0; i<vChrFilterSize; i+=2){
+ *(void**)&chrMmxFilter[s*i+0 ]= chrSrcPtr[i ];
+ *(void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrSrcPtr[i+(vChrFilterSize>1)];
+ chrMmxFilter[s*i+APCK_COEF/4 ]=
+ chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
+ + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
+ }
+ }else{
+ for(i=0; i<vLumFilterSize; i++)
+ {
+ lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
+ lumMmxFilter[4*i+2]=
+ lumMmxFilter[4*i+3]=
+ ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
+ }
+ for(i=0; i<vChrFilterSize; i++)
+ {
+ chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
+ chrMmxFilter[4*i+2]=
+ chrMmxFilter[4*i+3]=
+ ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
+ }
+ }
+#endif
+ if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){
+ const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+ if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
+ RENAME(yuv2nv12X)(c,
+ vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
+ vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, /*uDest*/dstY&chrSkipMask?NULL:dst[1]+dstStride[1]*chrDstY, dstW, chrDstW, dstFormat);
+ }
+ else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
+ {
+ const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+ if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
+ if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
+ {
+ int16_t *lumBuf = lumPixBuf[0];
+ int16_t *chrBuf= chrPixBuf[0];
+ RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
+ }
+ else //General YV12
+ {
+ RENAME(yuv2yuvX)(c,
+ vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
+ vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
+ }
+ }
+ else
+ {
+ ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
+ ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
+ if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
+ {
+ int chrAlpha= vChrFilter[2*dstY+1];
+ RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
+ dest, dstW, chrAlpha, dstFormat, params.v_chr_drop, dstY);
+ }
+ else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
+ {
+ int lumAlpha= vLumFilter[2*dstY+1];
+ int chrAlpha= vChrFilter[2*dstY+1];
+ lumMmxFilter[2]=
+ lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
+ chrMmxFilter[2]=
+ chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
+ RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
+ dest, dstW, lumAlpha, chrAlpha, dstY);
+ }
+ else //General RGB
+ {
+ RENAME(yuv2packedX)(c,
+ vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
+ vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, dstW, dstY);
+ }
+ }
+ }
+ else // hmm looks like we can't use MMX here without overwriting this array's tail
+ {
+ int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
+ int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
+ if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){
+ const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+ if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
+ yuv2nv12XinC(
+ vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
+ vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, /*uDest*/dstY&chrSkipMask?NULL:dst[1]+dstStride[1]*chrDstY, dstW, chrDstW, dstFormat);
+ }
+ else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
+ {
+ const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+ if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
+ yuv2yuvXinC(
+ vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
+ vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
+ }
+ else
+ {
+ ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
+ ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
+ yuv2packedXinC(c,
+ vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
+ vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, dstW, dstY);
+ }
+ }
+ }
+
+#if HAVE_MMX
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ret0:
+ /* store changed local vars back in the context */
+ c->dstY= dstY;
+ c->lumBufIndex= lumBufIndex;
+ c->chrBufIndex= chrBufIndex;
+ c->lastInLumBuf= lastInLumBuf;
+ c->lastInChrBuf= lastInChrBuf;
+ return dstY - lastDstY;
+}
+
+int RENAME(sws_thread_work)(SwsContext *c) // Thread func
+{
+ SwsThreadParam *stp= &c->stp;
+ return RENAME(swScaleI)(c, stp->src, stp->srcStride, stp->srcSliceY,
+ stp->srcSliceH, stp->dst, stp->dstStride, stp->dstYstart, stp->dstYend);
+}
+
+static int RENAME(swScale)(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], stride_t dstStride[])
+{
+ int dstLines;
+ int i;
+ int lastDstY;
+ int processedLines=0;
+
+ if(srcSliceY==0) c->dstY= 0;
+
+ if (c->thread_count==1)
+ {
+ return RENAME(swScaleI)(c,src,srcStride,srcSliceY,
+ srcSliceH,dst,dstStride,c->dstY,c->dstH);
+ }
+ else
+ {
+ lastDstY= c->dstY;
+ c[0].stp.dstYstart= c->dstY;
+ dstLines= (c->dstH-c->dstY)/c->thread_count;
+ c[0].stp.dstYend= c->dstY + dstLines;
+ for (i=0; i<c->thread_count; i++){
+ c[i].stp.c= &c[i];
+ c[i].stp.src= src;
+ c[i].stp.srcStride= srcStride;
+ c[i].stp.srcSliceY= srcSliceY;
+ c[i].stp.srcSliceH= srcSliceH;
+ c[i].stp.dst= dst;
+ c[i].stp.dstStride= dstStride;
+ c[i].stp.dstYstart= c[0].stp.dstYstart+dstLines*i;// +2*i; //(+2*i makes green or black line in the middle of screen; test item to see multihreading)
+ c[i].stp.dstYend = c[0].stp.dstYend+ dstLines*i;
+ }
+ c[c->thread_count-1].stp.dstYend= c->dstH;
+ c->execute(c, RENAME(sws_thread_work), c->ret, c->thread_count);
+ for (i=0; i<c->thread_count; i++){
+ processedLines +=c->ret[i];
+ }
+ c->dstY= lastDstY+processedLines;
+ return processedLines;
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/x86/yuv2rgb_mmx.c b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/x86/yuv2rgb_mmx.c
new file mode 100644
index 000000000..b03cce2ae
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/x86/yuv2rgb_mmx.c
@@ -0,0 +1,99 @@
+/*
+ * software YUV to RGB converter
+ *
+ * Copyright (C) 2009 Konstantin Shishkov
+ *
+ * MMX/MMX2 template stuff (needed for fast movntq support),
+ * 1,4,8bpp support and context / deglobalize stuff
+ * by Michael Niedermayer (michaelni@gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#include "config.h"
+#include "libswscale/rgb2rgb.h"
+#include "libswscale/swscale.h"
+#include "libswscale/swscale_internal.h"
+#include "libavutil/x86_cpu.h"
+
+#define DITHER1XBPP // only for MMX
+
+/* hope these constant values are cache line aligned */
+DECLARE_ASM_CONST(8, uint64_t, mmx_00ffw) = 0x00ff00ff00ff00ffULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_redmask) = 0xf8f8f8f8f8f8f8f8ULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_grnmask) = 0xfcfcfcfcfcfcfcfcULL;
+
+//MMX versions
+#undef RENAME
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#define HAVE_MMX2 0
+#define HAVE_AMD3DNOW 0
+#define RENAME(a) a ## _MMX
+#include "yuv2rgb_template.c"
+
+//MMX2 versions
+#undef RENAME
+#undef HAVE_MMX2
+#define HAVE_MMX2 1
+#define RENAME(a) a ## _MMX2
+#include "yuv2rgb_template.c"
+
+SwsFunc ff_yuv2rgb_init_mmx(SwsContext *c)
+{
+ if (c->flags & SWS_CPU_CAPS_MMX2) {
+ switch (c->dstFormat) {
+ case PIX_FMT_RGB32:
+ if (CONFIG_SWSCALE_ALPHA && c->srcFormat == PIX_FMT_YUVA420P) {
+ if (HAVE_7REGS) return yuva420_rgb32_MMX2;
+ break;
+ } else return yuv420_rgb32_MMX2;
+ case PIX_FMT_BGR32:
+ if (CONFIG_SWSCALE_ALPHA && c->srcFormat == PIX_FMT_YUVA420P) {
+ if (HAVE_7REGS) return yuva420_bgr32_MMX2;
+ break;
+ } else return yuv420_bgr32_MMX2;
+ case PIX_FMT_BGR24: return yuv420_rgb24_MMX2;
+ case PIX_FMT_RGB565: return yuv420_rgb16_MMX2;
+ case PIX_FMT_RGB555: return yuv420_rgb15_MMX2;
+ }
+ }
+ if (c->flags & SWS_CPU_CAPS_MMX) {
+ switch (c->dstFormat) {
+ case PIX_FMT_RGB32:
+ if (CONFIG_SWSCALE_ALPHA && c->srcFormat == PIX_FMT_YUVA420P) {
+ if (HAVE_7REGS) return yuva420_rgb32_MMX;
+ break;
+ } else return yuv420_rgb32_MMX;
+ case PIX_FMT_BGR32:
+ if (CONFIG_SWSCALE_ALPHA && c->srcFormat == PIX_FMT_YUVA420P) {
+ if (HAVE_7REGS) return yuva420_bgr32_MMX;
+ break;
+ } else return yuv420_bgr32_MMX;
+ case PIX_FMT_BGR24: return yuv420_rgb24_MMX;
+ case PIX_FMT_RGB565: return yuv420_rgb16_MMX;
+ case PIX_FMT_RGB555: return yuv420_rgb15_MMX;
+ }
+ }
+
+ return NULL;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/x86/yuv2rgb_template.c b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/x86/yuv2rgb_template.c
new file mode 100644
index 000000000..994480d14
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/x86/yuv2rgb_template.c
@@ -0,0 +1,545 @@
+/*
+ * yuv2rgb_mmx.c, software YUV to RGB converter with Intel MMX "technology"
+ *
+ * Copyright (C) 2000, Silicon Integrated System Corp
+ *
+ * Author: Olie Lho <ollie@sis.com.tw>
+ *
+ * 15,24 bpp and dithering from Michael Niedermayer (michaelni@gmx.at)
+ * MMX/MMX2 Template stuff from Michael Niedermayer (needed for fast movntq support)
+ * context / deglobalize stuff by Michael Niedermayer
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video decoder
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with mpeg2dec; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#undef MOVNTQ
+#undef EMMS
+#undef SFENCE
+
+#if HAVE_AMD3DNOW
+/* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
+#define EMMS "femms"
+#else
+#define EMMS "emms"
+#endif
+
+#if HAVE_MMX2
+#define MOVNTQ "movntq"
+#define SFENCE "sfence"
+#else
+#define MOVNTQ "movq"
+#define SFENCE "/nop"
+#endif
+
+#define YUV2RGB \
+ /* Do the multiply part of the conversion for even and odd pixels,
+ register usage:
+ mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
+ mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
+ mm6 -> Y even, mm7 -> Y odd */\
+ /* convert the chroma part */\
+ "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
+ "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
+\
+ "psllw $3, %%mm0;" /* Promote precision */ \
+ "psllw $3, %%mm1;" /* Promote precision */ \
+\
+ "psubsw "U_OFFSET"(%4), %%mm0;" /* Cb -= 128 */ \
+ "psubsw "V_OFFSET"(%4), %%mm1;" /* Cr -= 128 */ \
+\
+ "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
+ "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
+\
+ "pmulhw "UG_COEFF"(%4), %%mm2;" /* Mul Cb with green coeff -> Cb green */ \
+ "pmulhw "VG_COEFF"(%4), %%mm3;" /* Mul Cr with green coeff -> Cr green */ \
+\
+ "pmulhw "UB_COEFF"(%4), %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */\
+ "pmulhw "VR_COEFF"(%4), %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */\
+\
+ "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */\
+\
+ /* convert the luma part */\
+ "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */\
+ "pand "MANGLE(mmx_00ffw)", %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */\
+\
+ "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */\
+\
+ "psllw $3, %%mm6;" /* Promote precision */\
+ "psllw $3, %%mm7;" /* Promote precision */\
+\
+ "psubw "Y_OFFSET"(%4), %%mm6;" /* Y -= 16 */\
+ "psubw "Y_OFFSET"(%4), %%mm7;" /* Y -= 16 */\
+\
+ "pmulhw "Y_COEFF"(%4), %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */\
+ "pmulhw "Y_COEFF"(%4), %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */\
+\
+ /* Do the addition part of the conversion for even and odd pixels,
+ register usage:
+ mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
+ mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
+ mm6 -> Y even, mm7 -> Y odd */\
+ "movq %%mm0, %%mm3;" /* Copy Cblue */\
+ "movq %%mm1, %%mm4;" /* Copy Cred */\
+ "movq %%mm2, %%mm5;" /* Copy Cgreen */\
+\
+ "paddsw %%mm6, %%mm0;" /* Y even + Cblue 00 B6 00 B4 00 B2 00 B0 */\
+ "paddsw %%mm7, %%mm3;" /* Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 */\
+\
+ "paddsw %%mm6, %%mm1;" /* Y even + Cred 00 R6 00 R4 00 R2 00 R0 */\
+ "paddsw %%mm7, %%mm4;" /* Y odd + Cred 00 R7 00 R5 00 R3 00 R1 */\
+\
+ "paddsw %%mm6, %%mm2;" /* Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 */\
+ "paddsw %%mm7, %%mm5;" /* Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 */\
+\
+ /* Limit RGB even to 0..255 */\
+ "packuswb %%mm0, %%mm0;" /* B6 B4 B2 B0 B6 B4 B2 B0 */\
+ "packuswb %%mm1, %%mm1;" /* R6 R4 R2 R0 R6 R4 R2 R0 */\
+ "packuswb %%mm2, %%mm2;" /* G6 G4 G2 G0 G6 G4 G2 G0 */\
+\
+ /* Limit RGB odd to 0..255 */\
+ "packuswb %%mm3, %%mm3;" /* B7 B5 B3 B1 B7 B5 B3 B1 */\
+ "packuswb %%mm4, %%mm4;" /* R7 R5 R3 R1 R7 R5 R3 R1 */\
+ "packuswb %%mm5, %%mm5;" /* G7 G5 G3 G1 G7 G5 G3 G1 */\
+\
+ /* Interleave RGB even and odd */\
+ "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */\
+ "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */\
+ "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */\
+
+
+#define YUV422_UNSHIFT \
+ if(c->srcFormat == PIX_FMT_YUV422P) {\
+ srcStride[1] *= 2; \
+ srcStride[2] *= 2; \
+ } \
+
+#define YUV2RGB_LOOP(depth) \
+ h_size= (c->dstW+7)&~7; \
+ if(h_size*depth > FFABS(dstStride[0])) h_size-=8; \
+\
+ __asm__ volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); \
+ for (y= 0; y<srcSliceH; y++ ) { \
+ uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0]; \
+ uint8_t *py = src[0] + y*srcStride[0]; \
+ uint8_t *pu = src[1] + (y>>1)*srcStride[1]; \
+ uint8_t *pv = src[2] + (y>>1)*srcStride[2]; \
+ x86_reg index= -h_size/2; \
+
+#define YUV2RGB_INIT \
+ /* This MMX assembly code deals with a SINGLE scan line at a time, \
+ * it converts 8 pixels in each iteration. */ \
+ __asm__ volatile ( \
+ /* load data for start of next scan line */ \
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ \
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ \
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ \
+ /* \
+ ".balign 16 \n\t" \
+ */ \
+ "1: \n\t" \
+ /* No speed difference on my p3@500 with prefetch, \
+ * if it is faster for anyone with -benchmark then tell me. \
+ PREFETCH" 64(%0) \n\t" \
+ PREFETCH" 64(%1) \n\t" \
+ PREFETCH" 64(%2) \n\t" \
+ */ \
+
+#define YUV2RGB_ENDLOOP(depth) \
+ "add $"AV_STRINGIFY(depth*8)", %1 \n\t" \
+ "add $4, %0 \n\t" \
+ " js 1b \n\t" \
+
+#define YUV2RGB_OPERANDS \
+ : "+r" (index), "+r" (image) \
+ : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index) \
+ ); \
+ } \
+ __asm__ volatile (EMMS); \
+ return srcSliceH; \
+
+#define YUV2RGB_OPERANDS_ALPHA \
+ : "+r" (index), "+r" (image) \
+ : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index), "r" (pa - 2*index) \
+ ); \
+ } \
+ __asm__ volatile (EMMS); \
+ return srcSliceH; \
+
+static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+ int y, h_size;
+
+ YUV422_UNSHIFT
+ YUV2RGB_LOOP(2)
+
+ c->blueDither= ff_dither8[y&1];
+ c->greenDither= ff_dither4[y&1];
+ c->redDither= ff_dither8[(y+1)&1];
+
+ YUV2RGB_INIT
+ YUV2RGB
+
+#ifdef DITHER1XBPP
+ "paddusb "BLUE_DITHER"(%4), %%mm0;"
+ "paddusb "GREEN_DITHER"(%4), %%mm2;"
+ "paddusb "RED_DITHER"(%4), %%mm1;"
+#endif
+ /* mask unneeded bits off */
+ "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
+ "pand "MANGLE(mmx_grnmask)", %%mm2;" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */
+ "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
+
+ "psrlw $3, %%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
+
+ "movq %%mm0, %%mm5;" /* Copy B7-B0 */
+ "movq %%mm2, %%mm7;" /* Copy G7-G0 */
+
+ /* convert RGB24 plane to RGB16 pack for pixel 0-3 */
+ "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
+ "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $3, %%mm2;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
+ "por %%mm2, %%mm0;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
+
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */
+
+ /* convert RGB24 plane to RGB16 pack for pixel 0-3 */
+ "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
+ "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $3, %%mm7;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "por %%mm7, %%mm5;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+
+ MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
+
+ YUV2RGB_ENDLOOP(2)
+ YUV2RGB_OPERANDS
+}
+
+static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+ int y, h_size;
+
+ YUV422_UNSHIFT
+ YUV2RGB_LOOP(2)
+
+ c->blueDither= ff_dither8[y&1];
+ c->greenDither= ff_dither8[y&1];
+ c->redDither= ff_dither8[(y+1)&1];
+
+ YUV2RGB_INIT
+ YUV2RGB
+
+#ifdef DITHER1XBPP
+ "paddusb "BLUE_DITHER"(%4), %%mm0 \n\t"
+ "paddusb "GREEN_DITHER"(%4), %%mm2 \n\t"
+ "paddusb "RED_DITHER"(%4), %%mm1 \n\t"
+#endif
+
+ /* mask unneeded bits off */
+ "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
+ "pand "MANGLE(mmx_redmask)", %%mm2;" /* g7g6g5g4 g3_0_0_0 g7g6g5g4 g3_0_0_0 */
+ "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
+
+ "psrlw $3, %%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
+ "psrlw $1, %%mm1;" /* 0_r7r6r5 r4r3_0_0 0_r7r6r5 r4r3_0_0 */
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
+
+ "movq %%mm0, %%mm5;" /* Copy B7-B0 */
+ "movq %%mm2, %%mm7;" /* Copy G7-G0 */
+
+ /* convert RGB24 plane to RGB16 pack for pixel 0-3 */
+ "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3_0_0_0 */
+ "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $2, %%mm2;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
+ "por %%mm2, %%mm0;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
+
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */
+
+ /* convert RGB24 plane to RGB16 pack for pixel 0-3 */
+ "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 0_g7g6g5 g4g3_0_0 */
+ "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $2, %%mm7;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "por %%mm7, %%mm5;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+
+ MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
+
+ YUV2RGB_ENDLOOP(2)
+ YUV2RGB_OPERANDS
+}
+
+static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+ int y, h_size;
+
+ YUV422_UNSHIFT
+ YUV2RGB_LOOP(3)
+
+ YUV2RGB_INIT
+ YUV2RGB
+ /* mm0=B, %%mm2=G, %%mm1=R */
+#if HAVE_MMX2
+ "movq "MANGLE(ff_M24A)", %%mm4 \n\t"
+ "movq "MANGLE(ff_M24C)", %%mm7 \n\t"
+ "pshufw $0x50, %%mm0, %%mm5 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */
+ "pshufw $0x50, %%mm2, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */
+ "pshufw $0x00, %%mm1, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */
+
+ "pand %%mm4, %%mm5 \n\t" /* B2 B1 B0 */
+ "pand %%mm4, %%mm3 \n\t" /* G2 G1 G0 */
+ "pand %%mm7, %%mm6 \n\t" /* R1 R0 */
+
+ "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */
+ "por %%mm5, %%mm6 \n\t"
+ "por %%mm3, %%mm6 \n\t"
+ MOVNTQ" %%mm6, (%1) \n\t"
+
+ "psrlq $8, %%mm2 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */
+ "pshufw $0xA5, %%mm0, %%mm5 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */
+ "pshufw $0x55, %%mm2, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */
+ "pshufw $0xA5, %%mm1, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */
+
+ "pand "MANGLE(ff_M24B)", %%mm5 \n\t" /* B5 B4 B3 */
+ "pand %%mm7, %%mm3 \n\t" /* G4 G3 */
+ "pand %%mm4, %%mm6 \n\t" /* R4 R3 R2 */
+
+ "por %%mm5, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */
+ "por %%mm3, %%mm6 \n\t"
+ MOVNTQ" %%mm6, 8(%1) \n\t"
+
+ "pshufw $0xFF, %%mm0, %%mm5 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */
+ "pshufw $0xFA, %%mm2, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */
+ "pshufw $0xFA, %%mm1, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "pand %%mm7, %%mm5 \n\t" /* B7 B6 */
+ "pand %%mm4, %%mm3 \n\t" /* G7 G6 G5 */
+ "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+\
+ "por %%mm5, %%mm3 \n\t"
+ "por %%mm3, %%mm6 \n\t"
+ MOVNTQ" %%mm6, 16(%1) \n\t"
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ "pxor %%mm4, %%mm4 \n\t"
+
+#else
+
+ "pxor %%mm4, %%mm4 \n\t"
+ "movq %%mm0, %%mm5 \n\t" /* B */
+ "movq %%mm1, %%mm6 \n\t" /* R */
+ "punpcklbw %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */
+ "punpcklbw %%mm4, %%mm1 \n\t" /* 0R0R0R0R 0 */
+ "punpckhbw %%mm2, %%mm5 \n\t" /* GBGBGBGB 2 */
+ "punpckhbw %%mm4, %%mm6 \n\t" /* 0R0R0R0R 2 */
+ "movq %%mm0, %%mm7 \n\t" /* GBGBGBGB 0 */
+ "movq %%mm5, %%mm3 \n\t" /* GBGBGBGB 2 */
+ "punpcklwd %%mm1, %%mm7 \n\t" /* 0RGB0RGB 0 */
+ "punpckhwd %%mm1, %%mm0 \n\t" /* 0RGB0RGB 1 */
+ "punpcklwd %%mm6, %%mm5 \n\t" /* 0RGB0RGB 2 */
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */
+
+ "movq %%mm7, %%mm2 \n\t" /* 0RGB0RGB 0 */
+ "movq %%mm0, %%mm6 \n\t" /* 0RGB0RGB 1 */
+ "movq %%mm5, %%mm1 \n\t" /* 0RGB0RGB 2 */
+ "movq %%mm3, %%mm4 \n\t" /* 0RGB0RGB 3 */
+
+ "psllq $40, %%mm7 \n\t" /* RGB00000 0 */
+ "psllq $40, %%mm0 \n\t" /* RGB00000 1 */
+ "psllq $40, %%mm5 \n\t" /* RGB00000 2 */
+ "psllq $40, %%mm3 \n\t" /* RGB00000 3 */
+
+ "punpckhdq %%mm2, %%mm7 \n\t" /* 0RGBRGB0 0 */
+ "punpckhdq %%mm6, %%mm0 \n\t" /* 0RGBRGB0 1 */
+ "punpckhdq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */
+ "punpckhdq %%mm4, %%mm3 \n\t" /* 0RGBRGB0 3 */
+
+ "psrlq $8, %%mm7 \n\t" /* 00RGBRGB 0 */
+ "movq %%mm0, %%mm6 \n\t" /* 0RGBRGB0 1 */
+ "psllq $40, %%mm0 \n\t" /* GB000000 1 */
+ "por %%mm0, %%mm7 \n\t" /* GBRGBRGB 0 */
+ MOVNTQ" %%mm7, (%1) \n\t"
+
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */
+ "movq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */
+ "psllq $24, %%mm5 \n\t" /* BRGB0000 2 */
+ "por %%mm5, %%mm6 \n\t" /* BRGBRGBR 1 */
+ MOVNTQ" %%mm6, 8(%1) \n\t"
+
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+
+ "psrlq $40, %%mm1 \n\t" /* 000000RG 2 */
+ "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */
+ "por %%mm3, %%mm1 \n\t" /* RGBRGBRG 2 */
+ MOVNTQ" %%mm1, 16(%1) \n\t"
+
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "pxor %%mm4, %%mm4 \n\t"
+#endif
+
+ YUV2RGB_ENDLOOP(3)
+ YUV2RGB_OPERANDS
+}
+
+/*
+
+RGB_PLANAR2PACKED32(red,green,blue,alpha)
+
+convert RGB plane to RGB packed format
+
+macro parameters specify the output color channel order:
+
+RGB_PLANAR2PACKED32(REG_RED, REG_GREEN, REG_BLUE, REG_ALPHA) for RGBA output,
+RGB_PLANAR2PACKED32(REG_BLUE, REG_GREEN, REG_RED, REG_ALPHA) for BGRA output,
+RGB_PLANAR2PACKED32(REG_ALPHA,REG_BLUE, REG_GREEN,REG_RED) for ABGR output,
+
+etc.
+*/
+
+#define REG_BLUE "0"
+#define REG_RED "1"
+#define REG_GREEN "2"
+#define REG_ALPHA "3"
+
+#define RGB_PLANAR2PACKED32(red,green,blue,alpha) \
+ /* convert RGB plane to RGB packed format, \
+ mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> A, \
+ mm4 -> GB, mm5 -> AR pixel 4-7, \
+ mm6 -> GB, mm7 -> AR pixel 0-3 */ \
+ "movq %%mm" blue ", %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ \
+ "movq %%mm" red ", %%mm7;" /* R7 R6 R5 R4 R3 R2 R1 R0 */ \
+\
+ "movq %%mm" blue ", %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ \
+ "movq %%mm" red ", %%mm5;" /* R7 R6 R5 R4 R3 R2 R1 R0 */ \
+\
+ "punpcklbw %%mm" green ", %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */ \
+ "punpcklbw %%mm" alpha ", %%mm7;" /* A3 R3 A2 R2 A1 R1 A0 R0 */ \
+\
+ "punpcklwd %%mm7, %%mm6;" /* A1 R1 B1 G1 A0 R0 B0 G0 */ \
+ MOVNTQ " %%mm6, (%1);" /* Store ARGB1 ARGB0 */ \
+\
+ "movq %%mm" blue ", %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ \
+ "punpcklbw %%mm" green ", %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */ \
+\
+ "punpckhwd %%mm7, %%mm6;" /* A3 R3 G3 B3 A2 R2 B3 G2 */ \
+ MOVNTQ " %%mm6, 8 (%1);" /* Store ARGB3 ARGB2 */ \
+\
+ "punpckhbw %%mm" green ", %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */ \
+ "punpckhbw %%mm" alpha ", %%mm5;" /* A7 R7 A6 R6 A5 R5 A4 R4 */ \
+\
+ "punpcklwd %%mm5, %%mm4;" /* A5 R5 B5 G5 A4 R4 B4 G4 */ \
+ MOVNTQ " %%mm4, 16 (%1);" /* Store ARGB5 ARGB4 */ \
+\
+ "movq %%mm" blue ", %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ \
+ "punpckhbw %%mm" green ", %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */ \
+\
+ "punpckhwd %%mm5, %%mm4;" /* A7 R7 G7 B7 A6 R6 B6 G6 */ \
+ MOVNTQ " %%mm4, 24 (%1);" /* Store ARGB7 ARGB6 */ \
+\
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ \
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ \
+\
+ "pxor %%mm4, %%mm4;" /* zero mm4 */ \
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ \
+
+static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+ int y, h_size;
+
+ YUV422_UNSHIFT
+ YUV2RGB_LOOP(4)
+
+ YUV2RGB_INIT
+ YUV2RGB
+ "pcmpeqd %%mm3, %%mm3;" /* fill mm3 */
+ RGB_PLANAR2PACKED32(REG_RED,REG_GREEN,REG_BLUE,REG_ALPHA)
+
+ YUV2RGB_ENDLOOP(4)
+ YUV2RGB_OPERANDS
+}
+
+static inline int RENAME(yuva420_rgb32)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+#if HAVE_7REGS
+ int y, h_size;
+
+ YUV2RGB_LOOP(4)
+
+ uint8_t *pa = src[3] + y*srcStride[3];
+ YUV2RGB_INIT
+ YUV2RGB
+ "movq (%6, %0, 2), %%mm3;" /* Load 8 A A7 A6 A5 A4 A3 A2 A1 A0 */
+ RGB_PLANAR2PACKED32(REG_RED,REG_GREEN,REG_BLUE,REG_ALPHA)
+
+ YUV2RGB_ENDLOOP(4)
+ YUV2RGB_OPERANDS_ALPHA
+#endif
+}
+
+static inline int RENAME(yuv420_bgr32)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+ int y, h_size;
+
+ YUV422_UNSHIFT
+ YUV2RGB_LOOP(4)
+
+ YUV2RGB_INIT
+ YUV2RGB
+ "pcmpeqd %%mm3, %%mm3;" /* fill mm3 */
+ RGB_PLANAR2PACKED32(REG_BLUE,REG_GREEN,REG_RED,REG_ALPHA)
+
+ YUV2RGB_ENDLOOP(4)
+ YUV2RGB_OPERANDS
+}
+
+static inline int RENAME(yuva420_bgr32)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+#if HAVE_7REGS
+ int y, h_size;
+
+ YUV2RGB_LOOP(4)
+
+ uint8_t *pa = src[3] + y*srcStride[3];
+ YUV2RGB_INIT
+ YUV2RGB
+ "movq (%6, %0, 2), %%mm3;" /* Load 8 A A7 A6 A5 A4 A3 A2 A1 A0 */
+ RGB_PLANAR2PACKED32(REG_BLUE,REG_GREEN,REG_RED,REG_ALPHA)
+
+ YUV2RGB_ENDLOOP(4)
+ YUV2RGB_OPERANDS_ALPHA
+#endif
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/yuv2rgb.c b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/yuv2rgb.c
new file mode 100644
index 000000000..742fd6aad
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/yuv2rgb.c
@@ -0,0 +1,728 @@
+/*
+ * software YUV to RGB converter
+ *
+ * Copyright (C) 2009 Konstantin Shishkov
+ *
+ * MMX/MMX2 template stuff (needed for fast movntq support),
+ * 1,4,8bpp support and context / deglobalize stuff
+ * by Michael Niedermayer (michaelni@gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#include "config.h"
+#include "rgb2rgb.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+#include "../libavutil/x86_cpu.h"
+#include "../libavutil/internal.h"
+#include "ffImgfmt.h"
+
+#define DITHER1XBPP // only for MMX
+
+extern const uint8_t dither_8x8_32[8][8];
+extern const uint8_t dither_8x8_73[8][8];
+extern const uint8_t dither_8x8_220[8][8];
+
+#if HAVE_MMX && CONFIG_GPL
+
+/* hope these constant values are cache line aligned */
+DECLARE_ASM_CONST(8, uint64_t, mmx_00ffw) = 0x00ff00ff00ff00ffULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_redmask) = 0xf8f8f8f8f8f8f8f8ULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_grnmask) = 0xfcfcfcfcfcfcfcfcULL;
+
+uint64_t attribute_used __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFULL;
+uint64_t attribute_used __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00ULL;
+uint64_t attribute_used __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000ULL;
+
+// the volatile is required because gcc otherwise optimizes some writes away not knowing that these
+// are read in the asm block
+volatile uint64_t attribute_used __attribute__((aligned(8))) b5Dither;
+volatile uint64_t attribute_used __attribute__((aligned(8))) g5Dither;
+volatile uint64_t attribute_used __attribute__((aligned(8))) g6Dither;
+volatile uint64_t attribute_used __attribute__((aligned(8))) r5Dither;
+
+extern const uint64_t dither4[2];
+extern const uint64_t dither8[2];
+
+#undef HAVE_MMX
+
+//MMX versions
+#undef RENAME
+#define HAVE_MMX 1
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#define RENAME(a) a ## _MMX
+#include "yuv2rgb_template.c"
+
+//MMX2 versions
+#undef RENAME
+#define HAVE_MMX 1
+#define HAVE_MMX2 1
+#undef HAVE_AMD3DNOW
+#define RENAME(a) a ## _MMX2
+#include "yuv2rgb_template.c"
+
+#endif /* ARCH_X86_32 || ARCH_X86_64 */
+
+const int32_t Inverse_Table_6_9[8][7] = {
+ // { crv, cbu, cgu, cgv, cy, oy, RGB_add} /* dived by 65536 to get familiar values */
+ {117504, 138453, 13954, 34903, 76309, 1048576, 0}, /* no sequence_display_extension */
+ {117504, 138453, 13954, 34903, 76309, 1048576, 0}, /* ITU-R Rec. 709 (1990) */
+ {104597, 132201, 25675, 53279, 76309, 1048576, 0}, /* unspecified */
+ {104597, 132201, 25675, 53279, 76309, 1048576, 0}, /* reserved */
+ {104448, 132798, 24759, 53109, 76309, 1048576, 0}, /* FCC */
+ {104597, 132201, 25675, 53279, 76309, 1048576, 0}, /* ITU-R Rec. 624-4 System B, G */
+ {104597, 132201, 25675, 53279, 76309, 1048576, 0}, /* SMPTE 170M */
+ {117579, 136230, 16907, 35559, 76309, 1048576, 0} /* SMPTE 240M (1987) */
+};
+
+#define RGB(i) \
+ U = pu[i]; \
+ V = pv[i]; \
+ r = c->table_rV[V]; \
+ g = c->table_gU[U] + c->table_gV[V]; \
+ b = c->table_bU[U];
+
+#define DST1(i) \
+ Y = py_1[2*i]; \
+ dst_1[2*i] = r[Y] + g[Y] + b[Y]; \
+ Y = py_1[2*i+1]; \
+ dst_1[2*i+1] = r[Y] + g[Y] + b[Y];
+
+#define DST2(i) \
+ Y = py_2[2*i]; \
+ dst_2[2*i] = r[Y] + g[Y] + b[Y]; \
+ Y = py_2[2*i+1]; \
+ dst_2[2*i+1] = r[Y] + g[Y] + b[Y];
+
+#define DST1RGB(i) \
+ Y = py_1[2*i]; \
+ dst_1[6*i] = r[Y]; dst_1[6*i+1] = g[Y]; dst_1[6*i+2] = b[Y]; \
+ Y = py_1[2*i+1]; \
+ dst_1[6*i+3] = r[Y]; dst_1[6*i+4] = g[Y]; dst_1[6*i+5] = b[Y];
+
+#define DST2RGB(i) \
+ Y = py_2[2*i]; \
+ dst_2[6*i] = r[Y]; dst_2[6*i+1] = g[Y]; dst_2[6*i+2] = b[Y]; \
+ Y = py_2[2*i+1]; \
+ dst_2[6*i+3] = r[Y]; dst_2[6*i+4] = g[Y]; dst_2[6*i+5] = b[Y];
+
+#define DST1BGR(i) \
+ Y = py_1[2*i]; \
+ dst_1[6*i] = b[Y]; dst_1[6*i+1] = g[Y]; dst_1[6*i+2] = r[Y]; \
+ Y = py_1[2*i+1]; \
+ dst_1[6*i+3] = b[Y]; dst_1[6*i+4] = g[Y]; dst_1[6*i+5] = r[Y];
+
+#define DST2BGR(i) \
+ Y = py_2[2*i]; \
+ dst_2[6*i] = b[Y]; dst_2[6*i+1] = g[Y]; dst_2[6*i+2] = r[Y]; \
+ Y = py_2[2*i+1]; \
+ dst_2[6*i+3] = b[Y]; dst_2[6*i+4] = g[Y]; dst_2[6*i+5] = r[Y];
+
+#define PROLOG(func_name, dst_type) \
+static int func_name(SwsContext *c, uint8_t* src[], stride_t srcStride[], int srcSliceY, \
+ int srcSliceH, uint8_t* dst[], stride_t dstStride[]){\
+ int y;\
+\
+ if(c->srcFormat == IMGFMT_422P){\
+ srcStride[1] *= 2;\
+ srcStride[2] *= 2;\
+ }\
+ for(y=0; y<srcSliceH; y+=2){\
+ dst_type *dst_1= (dst_type*)(dst[0] + (y+srcSliceY )*dstStride[0]);\
+ dst_type *dst_2= (dst_type*)(dst[0] + (y+srcSliceY+1)*dstStride[0]);\
+ dst_type *r, *g, *b;\
+ uint8_t *py_1= src[0] + y*srcStride[0];\
+ uint8_t *py_2= py_1 + srcStride[0];\
+ uint8_t *pu= src[1] + (y>>1)*srcStride[1];\
+ uint8_t *pv= src[2] + (y>>1)*srcStride[2];\
+ unsigned int h_size= c->dstW>>3;\
+ while (h_size--) {\
+ int U, V, Y;\
+
+#define EPILOG(dst_delta)\
+ pu += 4;\
+ pv += 4;\
+ py_1 += 8;\
+ py_2 += 8;\
+ dst_1 += dst_delta;\
+ dst_2 += dst_delta;\
+ }\
+ }\
+ return srcSliceH;\
+}
+
+PROLOG(yuv2rgb_c_32, uint32_t)
+ RGB(0);
+ DST1(0);
+ DST2(0);
+
+ RGB(1);
+ DST2(1);
+ DST1(1);
+
+ RGB(2);
+ DST1(2);
+ DST2(2);
+
+ RGB(3);
+ DST2(3);
+ DST1(3);
+EPILOG(8)
+
+PROLOG(yuv2rgb_c_24_rgb, uint8_t)
+ RGB(0);
+ DST1RGB(0);
+ DST2RGB(0);
+
+ RGB(1);
+ DST2RGB(1);
+ DST1RGB(1);
+
+ RGB(2);
+ DST1RGB(2);
+ DST2RGB(2);
+
+ RGB(3);
+ DST2RGB(3);
+ DST1RGB(3);
+EPILOG(24)
+
+// only trivial mods from yuv2rgb_c_24_rgb
+PROLOG(yuv2rgb_c_24_bgr, uint8_t)
+ RGB(0);
+ DST1BGR(0);
+ DST2BGR(0);
+
+ RGB(1);
+ DST2BGR(1);
+ DST1BGR(1);
+
+ RGB(2);
+ DST1BGR(2);
+ DST2BGR(2);
+
+ RGB(3);
+ DST2BGR(3);
+ DST1BGR(3);
+EPILOG(24)
+
+// This is exactly the same code as yuv2rgb_c_32 except for the types of
+// r, g, b, dst_1, dst_2
+PROLOG(yuv2rgb_c_16, uint16_t)
+ RGB(0);
+ DST1(0);
+ DST2(0);
+
+ RGB(1);
+ DST2(1);
+ DST1(1);
+
+ RGB(2);
+ DST1(2);
+ DST2(2);
+
+ RGB(3);
+ DST2(3);
+ DST1(3);
+EPILOG(8)
+
+// This is exactly the same code as yuv2rgb_c_32 except for the types of
+// r, g, b, dst_1, dst_2
+PROLOG(yuv2rgb_c_8, uint8_t)
+ RGB(0);
+ DST1(0);
+ DST2(0);
+
+ RGB(1);
+ DST2(1);
+ DST1(1);
+
+ RGB(2);
+ DST1(2);
+ DST2(2);
+
+ RGB(3);
+ DST2(3);
+ DST1(3);
+EPILOG(8)
+
+// r, g, b, dst_1, dst_2
+PROLOG(yuv2rgb_c_8_ordered_dither, uint8_t)
+ const uint8_t *d32= dither_8x8_32[y&7];
+ const uint8_t *d64= dither_8x8_73[y&7];
+#define DST1bpp8(i,o) \
+ Y = py_1[2*i]; \
+ dst_1[2*i] = r[Y+d32[0+o]] + g[Y+d32[0+o]] + b[Y+d64[0+o]]; \
+ Y = py_1[2*i+1]; \
+ dst_1[2*i+1] = r[Y+d32[1+o]] + g[Y+d32[1+o]] + b[Y+d64[1+o]];
+
+#define DST2bpp8(i,o) \
+ Y = py_2[2*i]; \
+ dst_2[2*i] = r[Y+d32[8+o]] + g[Y+d32[8+o]] + b[Y+d64[8+o]]; \
+ Y = py_2[2*i+1]; \
+ dst_2[2*i+1] = r[Y+d32[9+o]] + g[Y+d32[9+o]] + b[Y+d64[9+o]];
+
+
+ RGB(0);
+ DST1bpp8(0,0);
+ DST2bpp8(0,0);
+
+ RGB(1);
+ DST2bpp8(1,2);
+ DST1bpp8(1,2);
+
+ RGB(2);
+ DST1bpp8(2,4);
+ DST2bpp8(2,4);
+
+ RGB(3);
+ DST2bpp8(3,6);
+ DST1bpp8(3,6);
+EPILOG(8)
+
+
+// This is exactly the same code as yuv2rgb_c_32 except for the types of
+// r, g, b, dst_1, dst_2
+PROLOG(yuv2rgb_c_4, uint8_t)
+ int acc;
+#define DST1_4(i) \
+ Y = py_1[2*i]; \
+ acc = r[Y] + g[Y] + b[Y]; \
+ Y = py_1[2*i+1]; \
+ acc |= (r[Y] + g[Y] + b[Y])<<4;\
+ dst_1[i] = acc;
+
+#define DST2_4(i) \
+ Y = py_2[2*i]; \
+ acc = r[Y] + g[Y] + b[Y]; \
+ Y = py_2[2*i+1]; \
+ acc |= (r[Y] + g[Y] + b[Y])<<4;\
+ dst_2[i] = acc;
+
+ RGB(0);
+ DST1_4(0);
+ DST2_4(0);
+
+ RGB(1);
+ DST2_4(1);
+ DST1_4(1);
+
+ RGB(2);
+ DST1_4(2);
+ DST2_4(2);
+
+ RGB(3);
+ DST2_4(3);
+ DST1_4(3);
+EPILOG(4)
+
+PROLOG(yuv2rgb_c_4_ordered_dither, uint8_t)
+ const uint8_t *d64= dither_8x8_73[y&7];
+ const uint8_t *d128=dither_8x8_220[y&7];
+ int acc;
+
+#define DST1bpp4(i,o) \
+ Y = py_1[2*i]; \
+ acc = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \
+ Y = py_1[2*i+1]; \
+ acc |= (r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]])<<4;\
+ dst_1[i]= acc;
+
+#define DST2bpp4(i,o) \
+ Y = py_2[2*i]; \
+ acc = r[Y+d128[8+o]] + g[Y+d64[8+o]] + b[Y+d128[8+o]]; \
+ Y = py_2[2*i+1]; \
+ acc |= (r[Y+d128[9+o]] + g[Y+d64[9+o]] + b[Y+d128[9+o]])<<4;\
+ dst_2[i]= acc;
+
+
+ RGB(0);
+ DST1bpp4(0,0);
+ DST2bpp4(0,0);
+
+ RGB(1);
+ DST2bpp4(1,2);
+ DST1bpp4(1,2);
+
+ RGB(2);
+ DST1bpp4(2,4);
+ DST2bpp4(2,4);
+
+ RGB(3);
+ DST2bpp4(3,6);
+ DST1bpp4(3,6);
+EPILOG(4)
+
+// This is exactly the same code as yuv2rgb_c_32 except for the types of
+// r, g, b, dst_1, dst_2
+PROLOG(yuv2rgb_c_4b, uint8_t)
+ RGB(0);
+ DST1(0);
+ DST2(0);
+
+ RGB(1);
+ DST2(1);
+ DST1(1);
+
+ RGB(2);
+ DST1(2);
+ DST2(2);
+
+ RGB(3);
+ DST2(3);
+ DST1(3);
+EPILOG(8)
+
+PROLOG(yuv2rgb_c_4b_ordered_dither, uint8_t)
+ const uint8_t *d64= dither_8x8_73[y&7];
+ const uint8_t *d128=dither_8x8_220[y&7];
+
+#define DST1bpp4b(i,o) \
+ Y = py_1[2*i]; \
+ dst_1[2*i] = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \
+ Y = py_1[2*i+1]; \
+ dst_1[2*i+1] = r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]];
+
+#define DST2bpp4b(i,o) \
+ Y = py_2[2*i]; \
+ dst_2[2*i] = r[Y+d128[8+o]] + g[Y+d64[8+o]] + b[Y+d128[8+o]]; \
+ Y = py_2[2*i+1]; \
+ dst_2[2*i+1] = r[Y+d128[9+o]] + g[Y+d64[9+o]] + b[Y+d128[9+o]];
+
+
+ RGB(0);
+ DST1bpp4b(0,0);
+ DST2bpp4b(0,0);
+
+ RGB(1);
+ DST2bpp4b(1,2);
+ DST1bpp4b(1,2);
+
+ RGB(2);
+ DST1bpp4b(2,4);
+ DST2bpp4b(2,4);
+
+ RGB(3);
+ DST2bpp4b(3,6);
+ DST1bpp4b(3,6);
+EPILOG(8)
+
+PROLOG(yuv2rgb_c_1_ordered_dither, uint8_t)
+ const uint8_t *d128=dither_8x8_220[y&7];
+ char out_1=0, out_2=0;
+ g= c->table_gU[128] + c->table_gV[128];
+
+#define DST1bpp1(i,o) \
+ Y = py_1[2*i]; \
+ out_1+= out_1 + g[Y+d128[0+o]]; \
+ Y = py_1[2*i+1]; \
+ out_1+= out_1 + g[Y+d128[1+o]];
+
+#define DST2bpp1(i,o) \
+ Y = py_2[2*i]; \
+ out_2+= out_2 + g[Y+d128[8+o]]; \
+ Y = py_2[2*i+1]; \
+ out_2+= out_2 + g[Y+d128[9+o]];
+
+ DST1bpp1(0,0);
+ DST2bpp1(0,0);
+
+ DST2bpp1(1,2);
+ DST1bpp1(1,2);
+
+ DST1bpp1(2,4);
+ DST2bpp1(2,4);
+
+ DST2bpp1(3,6);
+ DST1bpp1(3,6);
+
+ dst_1[0]= out_1;
+ dst_2[0]= out_2;
+EPILOG(1)
+
+SwsFunc yuv2rgb_get_func_ptr (SwsContext *c)
+{
+#if ARCH_X86 || ARCH_X86_64
+ if(c->params.cpu & SWS_CPU_CAPS_MMX2){
+ switch(c->dstFormat){
+ case IMGFMT_BGR32: return yuv420_rgb32_MMX2;
+ case IMGFMT_BGR24: return yuv420_rgb24_MMX2;
+ case IMGFMT_BGR16: return yuv420_rgb16_MMX2;
+ case IMGFMT_BGR15: return yuv420_rgb15_MMX2;
+ }
+ }
+ if(c->params.cpu & SWS_CPU_CAPS_MMX){
+ switch(c->dstFormat){
+ case IMGFMT_BGR32: return yuv420_rgb32_MMX;
+ case IMGFMT_BGR24: return yuv420_rgb24_MMX;
+ case IMGFMT_BGR16: return yuv420_rgb16_MMX;
+ case IMGFMT_BGR15: return yuv420_rgb15_MMX;
+ }
+ }
+#endif
+#ifdef HAVE_MLIB
+ {
+ SwsFunc t= yuv2rgb_init_mlib(c);
+ if(t) return t;
+}
+#endif
+#ifdef HAVE_ALTIVEC
+ if (c->params.cpu & SWS_CPU_CAPS_ALTIVEC)
+ {
+ SwsFunc t = yuv2rgb_init_altivec(c);
+ if(t) return t;
+ }
+#endif
+
+ MSG_WARN("No accelerated colorspace conversion found\n");
+
+ switch(c->dstFormat){
+ case IMGFMT_RGB32:
+ case IMGFMT_BGR32: return yuv2rgb_c_32;
+ case IMGFMT_RGB24: return yuv2rgb_c_24_rgb;
+ case IMGFMT_BGR24: return yuv2rgb_c_24_bgr;
+ case IMGFMT_RGB16:
+ case IMGFMT_BGR16:
+ case IMGFMT_RGB15:
+ case IMGFMT_BGR15: return yuv2rgb_c_16;
+ case IMGFMT_RGB8:
+ case IMGFMT_BGR8: return yuv2rgb_c_8_ordered_dither;
+ case IMGFMT_RGB4:
+ case IMGFMT_BGR4: return yuv2rgb_c_4_ordered_dither;
+ case IMGFMT_RG4B:
+ case IMGFMT_BG4B: return yuv2rgb_c_4b_ordered_dither;
+ case IMGFMT_RGB1:
+ case IMGFMT_BGR1: return yuv2rgb_c_1_ordered_dither;
+ default:
+ assert(0);
+ }
+ return NULL;
+}
+
+static int div_round (int dividend, int divisor)
+{
+ if (dividend > 0)
+ return (dividend + (divisor>>1)) / divisor;
+ else
+ return -((-dividend + (divisor>>1)) / divisor);
+}
+
+int yuv2rgb_c_init_tables (SwsContext *c, const int inv_table[7], int fullRange, int brightness, int contrast, int saturation)
+{
+ const int isRgb = IMGFMT_IS_BGR(c->dstFormat);
+ const int bpp = isRgb?IMGFMT_RGB_DEPTH(c->dstFormat):IMGFMT_BGR_DEPTH(c->dstFormat);
+ int i;
+ uint8_t table_Y[1024];
+ uint32_t *table_32 = 0;
+ uint16_t *table_16 = 0;
+ uint8_t *table_8 = 0;
+ uint8_t *table_332 = 0;
+ uint8_t *table_121 = 0;
+ uint8_t *table_1 = 0;
+ int entry_size = 0;
+ unsigned char *table_r = 0, *table_g = 0, *table_b = 0;
+ unsigned char *table_start;
+
+ int64_t crv = inv_table[0];
+ int64_t cbu = inv_table[1];
+ int64_t cgu = -inv_table[2];
+ int64_t cgv = -inv_table[3];
+ int64_t cy = inv_table[4];
+ int64_t oy = inv_table[5];
+ int rgb_add = inv_table[6];
+
+//printf("%lld %lld %lld %lld %lld\n", cy, crv, cbu, cgu, cgv);
+
+ cy = (cy *contrast )>>16;
+ crv= (crv*contrast * saturation)>>32;
+ cbu= (cbu*contrast * saturation)>>32;
+ cgu= (cgu*contrast * saturation)>>32;
+ cgv= (cgv*contrast * saturation)>>32;
+//printf("%lld %lld %lld %lld %lld\n", cy, crv, cbu, cgu, cgv);
+ oy -= 256*brightness;
+
+ for (i = 0; i < 1024; i++) {
+ int j;
+
+ j= ((cy*(((i - 384)<<16) - oy) + (1<<31))>>32) + rgb_add;
+ j = (j < 0) ? 0 : ((j > 255) ? 255 : j);
+ table_Y[i] = j;
+ }
+
+ switch (bpp) {
+ case 32:
+ table_start= table_32 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint32_t));
+
+ entry_size = sizeof (uint32_t);
+ table_r = table_32 + 197;
+ table_b = table_32 + 197 + 685;
+ table_g = table_32 + 197 + 2*682;
+
+ for (i = -197; i < 256+197; i++)
+ ((uint32_t *)table_r)[i] = table_Y[i+384] << (isRgb ? 16 : 0);
+ for (i = -132; i < 256+132; i++)
+ ((uint32_t *)table_g)[i] = table_Y[i+384] << 8;
+ for (i = -232; i < 256+232; i++)
+ ((uint32_t *)table_b)[i] = table_Y[i+384] << (isRgb ? 0 : 16);
+ break;
+
+ case 24:
+ table_start= table_8 = av_malloc ((256 + 2*232) * sizeof (uint8_t));
+
+ entry_size = sizeof (uint8_t);
+ table_r = table_g = table_b = table_8 + 232;
+
+ for (i = -232; i < 256+232; i++)
+ ((uint8_t * )table_b)[i] = table_Y[i+384];
+ break;
+
+ case 15:
+ case 16:
+ table_start= table_16 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint16_t));
+
+ entry_size = sizeof (uint16_t);
+ table_r = table_16 + 197;
+ table_b = table_16 + 197 + 685;
+ table_g = table_16 + 197 + 2*682;
+
+ for (i = -197; i < 256+197; i++) {
+ int j = table_Y[i+384] >> 3;
+
+ if (isRgb)
+ j <<= ((bpp==16) ? 11 : 10);
+
+ ((uint16_t *)table_r)[i] = j;
+ }
+ for (i = -132; i < 256+132; i++) {
+ int j = table_Y[i+384] >> ((bpp==16) ? 2 : 3);
+
+ ((uint16_t *)table_g)[i] = j << 5;
+ }
+ for (i = -232; i < 256+232; i++) {
+ int j = table_Y[i+384] >> 3;
+
+ if (!isRgb)
+ j <<= ((bpp==16) ? 11 : 10);
+
+ ((uint16_t *)table_b)[i] = j;
+ }
+ break;
+
+ case 8:
+ table_start= table_332 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint8_t));
+
+ entry_size = sizeof (uint8_t);
+ table_r = table_332 + 197;
+ table_b = table_332 + 197 + 685;
+ table_g = table_332 + 197 + 2*682;
+
+ for (i = -197; i < 256+197; i++) {
+ int j = (table_Y[i+384 - 16] + 18)/36;
+
+ if (isRgb)
+ j <<= 5;
+
+ ((uint8_t *)table_r)[i] = j;
+ }
+ for (i = -132; i < 256+132; i++) {
+ int j = (table_Y[i+384 - 16] + 18)/36;
+
+ if (!isRgb)
+ j <<= 1;
+
+ ((uint8_t *)table_g)[i] = j << 2;
+ }
+ for (i = -232; i < 256+232; i++) {
+ int j = (table_Y[i+384 - 37] + 43)/85;
+
+ if (!isRgb)
+ j <<= 6;
+
+ ((uint8_t *)table_b)[i] = j;
+ }
+ break;
+ case 4:
+ case 4|128:
+ table_start= table_121 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint8_t));
+
+ entry_size = sizeof (uint8_t);
+ table_r = table_121 + 197;
+ table_b = table_121 + 197 + 685;
+ table_g = table_121 + 197 + 2*682;
+
+ for (i = -197; i < 256+197; i++) {
+ int j = table_Y[i+384 - 110] >> 7;
+
+ if (isRgb)
+ j <<= 3;
+
+ ((uint8_t *)table_r)[i] = j;
+ }
+ for (i = -132; i < 256+132; i++) {
+ int j = (table_Y[i+384 - 37]+ 43)/85;
+
+ ((uint8_t *)table_g)[i] = j << 1;
+ }
+ for (i = -232; i < 256+232; i++) {
+ int j =table_Y[i+384 - 110] >> 7;
+
+ if (!isRgb)
+ j <<= 3;
+
+ ((uint8_t *)table_b)[i] = j;
+ }
+ break;
+
+ case 1:
+ table_start= table_1 = av_malloc (256*2 * sizeof (uint8_t));
+
+ entry_size = sizeof (uint8_t);
+ table_g = table_1;
+ table_r = table_b = NULL;
+
+ for (i = 0; i < 256+256; i++) {
+ int j = table_Y[i + 384 - 110]>>7;
+
+ ((uint8_t *)table_g)[i] = j;
+ }
+ break;
+
+ default:
+ table_start= NULL;
+ MSG_ERR("%ibpp not supported by yuv2rgb\n", bpp);
+ //free mem?
+ return -1;
+ }
+
+ for (i = 0; i < 256; i++) {
+ c->table_rV[i] = table_r + entry_size * div_round (crv * (i-128), 76309);
+ c->table_gU[i] = table_g + entry_size * div_round (cgu * (i-128), 76309);
+ c->table_gV[i] = entry_size * div_round (cgv * (i-128), 76309);
+ c->table_bU[i] = table_b + entry_size * div_round (cbu * (i-128), 76309);
+ }
+
+ if(c->yuvTable) av_free(c->yuvTable);
+ c->yuvTable= table_start;
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/yuv2rgb_template.c b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/yuv2rgb_template.c
new file mode 100644
index 000000000..4cabb1d97
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libswscale/yuv2rgb_template.c
@@ -0,0 +1,538 @@
+/*
+ * yuv2rgb_mmx.c, software YUV to RGB converter with Intel MMX "technology"
+ *
+ * Copyright (C) 2000, Silicon Integrated System Corp
+ *
+ * Author: Olie Lho <ollie@sis.com.tw>
+ *
+ * 15,24 bpp and dithering from Michael Niedermayer (michaelni@gmx.at)
+ * MMX/MMX2 Template stuff from Michael Niedermayer (needed for fast movntq support)
+ * context / deglobalize stuff by Michael Niedermayer
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video decoder
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with mpeg2dec; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#undef MOVNTQ
+#undef EMMS
+#undef SFENCE
+
+#if HAVE_AMD3DNOW
+/* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
+#define EMMS "femms"
+#else
+#define EMMS "emms"
+#endif
+
+#if HAVE_MMX2
+#define MOVNTQ "movntq"
+#define SFENCE "sfence"
+#else
+#define MOVNTQ "movq"
+#define SFENCE "/nop"
+#endif
+
+#define YUV2RGB \
+ /* Do the multiply part of the conversion for even and odd pixels,
+ register usage:
+ mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
+ mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
+ mm6 -> Y even, mm7 -> Y odd */\
+ /* convert the chroma part */\
+ "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
+ "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
+\
+ "psllw $3, %%mm0;" /* Promote precision */ \
+ "psllw $3, %%mm1;" /* Promote precision */ \
+\
+ "psubsw "U_OFFSET"(%4), %%mm0;" /* Cb -= 128 */ \
+ "psubsw "V_OFFSET"(%4), %%mm1;" /* Cr -= 128 */ \
+\
+ "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
+ "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
+\
+ "pmulhw "UG_COEFF"(%4), %%mm2;" /* Mul Cb with green coeff -> Cb green */ \
+ "pmulhw "VG_COEFF"(%4), %%mm3;" /* Mul Cr with green coeff -> Cr green */ \
+\
+ "pmulhw "UB_COEFF"(%4), %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */\
+ "pmulhw "VR_COEFF"(%4), %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */\
+\
+ "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */\
+\
+ /* convert the luma part */\
+ "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */\
+ "pand "MANGLE(mmx_00ffw)", %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */\
+\
+ "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */\
+\
+ "psllw $3, %%mm6;" /* Promote precision */\
+ "psllw $3, %%mm7;" /* Promote precision */\
+\
+ "psubw "Y_OFFSET"(%4), %%mm6;" /* Y -= 16 */\
+ "psubw "Y_OFFSET"(%4), %%mm7;" /* Y -= 16 */\
+\
+ "pmulhw "Y_COEFF"(%4), %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */\
+ "pmulhw "Y_COEFF"(%4), %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */\
+\
+ /* Do the addition part of the conversion for even and odd pixels,
+ register usage:
+ mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
+ mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
+ mm6 -> Y even, mm7 -> Y odd */\
+ "movq %%mm0, %%mm3;" /* Copy Cblue */\
+ "movq %%mm1, %%mm4;" /* Copy Cred */\
+ "movq %%mm2, %%mm5;" /* Copy Cgreen */\
+\
+ "paddsw %%mm6, %%mm0;" /* Y even + Cblue 00 B6 00 B4 00 B2 00 B0 */\
+ "paddsw %%mm7, %%mm3;" /* Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 */\
+\
+ "paddsw %%mm6, %%mm1;" /* Y even + Cred 00 R6 00 R4 00 R2 00 R0 */\
+ "paddsw %%mm7, %%mm4;" /* Y odd + Cred 00 R7 00 R5 00 R3 00 R1 */\
+\
+ "paddsw %%mm6, %%mm2;" /* Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 */\
+ "paddsw %%mm7, %%mm5;" /* Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 */\
+\
+ /* Limit RGB even to 0..255 */\
+ "packuswb %%mm0, %%mm0;" /* B6 B4 B2 B0 B6 B4 B2 B0 */\
+ "packuswb %%mm1, %%mm1;" /* R6 R4 R2 R0 R6 R4 R2 R0 */\
+ "packuswb %%mm2, %%mm2;" /* G6 G4 G2 G0 G6 G4 G2 G0 */\
+\
+ /* Limit RGB odd to 0..255 */\
+ "packuswb %%mm3, %%mm3;" /* B7 B5 B3 B1 B7 B5 B3 B1 */\
+ "packuswb %%mm4, %%mm4;" /* R7 R5 R3 R1 R7 R5 R3 R1 */\
+ "packuswb %%mm5, %%mm5;" /* G7 G5 G3 G1 G7 G5 G3 G1 */\
+\
+ /* Interleave RGB even and odd */\
+ "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */\
+ "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */\
+ "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */\
+
+
+static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int y, h_size;
+
+ if(c->srcFormat == IMGFMT_422P){
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
+ }
+
+ h_size= (c->dstW+7)&~7;
+ if(h_size*2 > abs(dstStride[0])) h_size-=8;
+
+ __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
+//printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&b5Dither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0],
+//srcStride[0],srcStride[1],srcStride[2],dstStride[0]);
+ for (y= 0; y<srcSliceH; y++ ) {
+ uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
+ uint8_t *_py = src[0] + y*srcStride[0];
+ uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
+ uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
+ stride_t index= -h_size/2;
+
+ b5Dither= dither8[y&1];
+ g6Dither= dither4[y&1];
+ g5Dither= dither8[y&1];
+ r5Dither= dither8[(y+1)&1];
+ /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
+ pixels in each iteration */
+ __asm__ __volatile__ (
+ /* load data for start of next scan line */
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+// ".balign 16 \n\t"
+ "1: \n\t"
+/* no speed diference on my p3@500 with prefetch,
+ * if it is faster for anyone with -benchmark then tell me
+ PREFETCH" 64(%0) \n\t"
+ PREFETCH" 64(%1) \n\t"
+ PREFETCH" 64(%2) \n\t"
+*/
+YUV2RGB
+
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm0;"
+ "paddusb "MANGLE(g6Dither)", %%mm2;"
+ "paddusb "MANGLE(r5Dither)", %%mm1;"
+#endif
+ /* mask unneeded bits off */
+ "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
+ "pand "MANGLE(mmx_grnmask)", %%mm2;" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */
+ "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
+
+ "psrlw $3,%%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
+
+ "movq %%mm0, %%mm5;" /* Copy B7-B0 */
+ "movq %%mm2, %%mm7;" /* Copy G7-G0 */
+
+ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
+ "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
+ "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $3, %%mm2;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
+ "por %%mm2, %%mm0;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
+
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */
+
+ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
+ "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
+ "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $3, %%mm7;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "por %%mm7, %%mm5;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+
+ MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
+
+ "add $16, %1 \n\t"
+ "add $4, %0 \n\t"
+ " js 1b \n\t"
+
+ : "+r" (index), "+r" (_image)
+ : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
+ );
+ }
+
+ __asm__ __volatile__ (EMMS);
+
+ return srcSliceH;
+ }
+
+static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int y, h_size;
+
+ if(c->srcFormat == IMGFMT_422P){
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
+}
+
+ h_size= (c->dstW+7)&~7;
+ if(h_size*2 > abs(dstStride[0])) h_size-=8;
+
+ __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
+//printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&b5Dither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0],
+//srcStride[0],srcStride[1],srcStride[2],dstStride[0]);
+ for (y= 0; y<srcSliceH; y++ ) {
+ uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
+ uint8_t *_py = src[0] + y*srcStride[0];
+ uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
+ uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
+ stride_t index= -h_size/2;
+
+ b5Dither= dither8[y&1];
+ g6Dither= dither4[y&1];
+ g5Dither= dither8[y&1];
+ r5Dither= dither8[(y+1)&1];
+ /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
+ pixels in each iteration */
+ __asm__ __volatile__ (
+ /* load data for start of next scan line */
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+// ".balign 16 \n\t"
+ "1: \n\t"
+YUV2RGB
+
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm0 \n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm2 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm1 \n\t"
+#endif
+
+ /* mask unneeded bits off */
+ "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
+ "pand "MANGLE(mmx_redmask)", %%mm2;" /* g7g6g5g4 g3_0_0_0 g7g6g5g4 g3_0_0_0 */
+ "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
+
+ "psrlw $3,%%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
+ "psrlw $1,%%mm1;" /* 0_r7r6r5 r4r3_0_0 0_r7r6r5 r4r3_0_0 */
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
+
+ "movq %%mm0, %%mm5;" /* Copy B7-B0 */
+ "movq %%mm2, %%mm7;" /* Copy G7-G0 */
+
+ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
+ "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3_0_0_0 */
+ "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $2, %%mm2;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
+ "por %%mm2, %%mm0;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
+
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */
+
+ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
+ "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 0_g7g6g5 g4g3_0_0 */
+ "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $2, %%mm7;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "por %%mm7, %%mm5;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+
+ MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
+
+ "add $16, %1 \n\t"
+ "add $4, %0 \n\t"
+ " js 1b \n\t"
+ : "+r" (index), "+r" (_image)
+ : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
+ );
+ }
+
+ __asm__ __volatile__ (EMMS);
+ return srcSliceH;
+ }
+
+static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int y, h_size;
+
+ if(c->srcFormat == IMGFMT_422P){
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
+ }
+
+ h_size= (c->dstW+7)&~7;
+ if(h_size*3 > abs(dstStride[0])) h_size-=8;
+
+ __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
+
+ for (y= 0; y<srcSliceH; y++ ) {
+ uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
+ uint8_t *_py = src[0] + y*srcStride[0];
+ uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
+ uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
+ stride_t index= -h_size/2;
+
+ /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
+ pixels in each iteration */
+ __asm__ __volatile__ (
+ /* load data for start of next scan line */
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+// ".balign 16 \n\t"
+ "1: \n\t"
+YUV2RGB
+ /* mm0=B, %%mm2=G, %%mm1=R */
+#if HAVE_MMX2
+ "movq "MANGLE(M24A)", %%mm4 \n\t"
+ "movq "MANGLE(M24C)", %%mm7 \n\t"
+ "pshufw $0x50, %%mm0, %%mm5 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */
+ "pshufw $0x50, %%mm2, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */
+ "pshufw $0x00, %%mm1, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */
+
+ "pand %%mm4, %%mm5 \n\t" /* B2 B1 B0 */
+ "pand %%mm4, %%mm3 \n\t" /* G2 G1 G0 */
+ "pand %%mm7, %%mm6 \n\t" /* R1 R0 */
+
+ "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */
+ "por %%mm5, %%mm6 \n\t"
+ "por %%mm3, %%mm6 \n\t"
+ MOVNTQ" %%mm6, (%1) \n\t"
+
+ "psrlq $8, %%mm2 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */
+ "pshufw $0xA5, %%mm0, %%mm5 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */
+ "pshufw $0x55, %%mm2, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */
+ "pshufw $0xA5, %%mm1, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */
+
+ "pand "MANGLE(M24B)", %%mm5 \n\t" /* B5 B4 B3 */
+ "pand %%mm7, %%mm3 \n\t" /* G4 G3 */
+ "pand %%mm4, %%mm6 \n\t" /* R4 R3 R2 */
+
+ "por %%mm5, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */
+ "por %%mm3, %%mm6 \n\t"
+ MOVNTQ" %%mm6, 8(%1) \n\t"
+
+ "pshufw $0xFF, %%mm0, %%mm5 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */
+ "pshufw $0xFA, %%mm2, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */
+ "pshufw $0xFA, %%mm1, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "pand %%mm7, %%mm5 \n\t" /* B7 B6 */
+ "pand %%mm4, %%mm3 \n\t" /* G7 G6 G5 */
+ "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+\
+ "por %%mm5, %%mm3 \n\t"
+ "por %%mm3, %%mm6 \n\t"
+ MOVNTQ" %%mm6, 16(%1) \n\t"
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ "pxor %%mm4, %%mm4 \n\t"
+
+#else
+
+ "pxor %%mm4, %%mm4 \n\t"
+ "movq %%mm0, %%mm5 \n\t" /* B */
+ "movq %%mm1, %%mm6 \n\t" /* R */
+ "punpcklbw %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */
+ "punpcklbw %%mm4, %%mm1 \n\t" /* 0R0R0R0R 0 */
+ "punpckhbw %%mm2, %%mm5 \n\t" /* GBGBGBGB 2 */
+ "punpckhbw %%mm4, %%mm6 \n\t" /* 0R0R0R0R 2 */
+ "movq %%mm0, %%mm7 \n\t" /* GBGBGBGB 0 */
+ "movq %%mm5, %%mm3 \n\t" /* GBGBGBGB 2 */
+ "punpcklwd %%mm1, %%mm7 \n\t" /* 0RGB0RGB 0 */
+ "punpckhwd %%mm1, %%mm0 \n\t" /* 0RGB0RGB 1 */
+ "punpcklwd %%mm6, %%mm5 \n\t" /* 0RGB0RGB 2 */
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */
+
+ "movq %%mm7, %%mm2 \n\t" /* 0RGB0RGB 0 */
+ "movq %%mm0, %%mm6 \n\t" /* 0RGB0RGB 1 */
+ "movq %%mm5, %%mm1 \n\t" /* 0RGB0RGB 2 */
+ "movq %%mm3, %%mm4 \n\t" /* 0RGB0RGB 3 */
+
+ "psllq $40, %%mm7 \n\t" /* RGB00000 0 */
+ "psllq $40, %%mm0 \n\t" /* RGB00000 1 */
+ "psllq $40, %%mm5 \n\t" /* RGB00000 2 */
+ "psllq $40, %%mm3 \n\t" /* RGB00000 3 */
+
+ "punpckhdq %%mm2, %%mm7 \n\t" /* 0RGBRGB0 0 */
+ "punpckhdq %%mm6, %%mm0 \n\t" /* 0RGBRGB0 1 */
+ "punpckhdq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */
+ "punpckhdq %%mm4, %%mm3 \n\t" /* 0RGBRGB0 3 */
+
+ "psrlq $8, %%mm7 \n\t" /* 00RGBRGB 0 */
+ "movq %%mm0, %%mm6 \n\t" /* 0RGBRGB0 1 */
+ "psllq $40, %%mm0 \n\t" /* GB000000 1 */
+ "por %%mm0, %%mm7 \n\t" /* GBRGBRGB 0 */
+ MOVNTQ" %%mm7, (%1) \n\t"
+
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */
+ "movq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */
+ "psllq $24, %%mm5 \n\t" /* BRGB0000 2 */
+ "por %%mm5, %%mm6 \n\t" /* BRGBRGBR 1 */
+ MOVNTQ" %%mm6, 8(%1) \n\t"
+
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+
+ "psrlq $40, %%mm1 \n\t" /* 000000RG 2 */
+ "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */
+ "por %%mm3, %%mm1 \n\t" /* RGBRGBRG 2 */
+ MOVNTQ" %%mm1, 16(%1) \n\t"
+
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "pxor %%mm4, %%mm4 \n\t"
+#endif
+
+ "add $24, %1 \n\t"
+ "add $4, %0 \n\t"
+ " js 1b \n\t"
+
+ : "+r" (index), "+r" (_image)
+ : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
+ );
+ }
+
+ __asm__ __volatile__ (EMMS);
+ return srcSliceH;
+ }
+
+static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int y, h_size;
+
+ if(c->srcFormat == IMGFMT_422P){
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
+ }
+
+ h_size= (c->dstW+7)&~7;
+ if(h_size*4 > abs(dstStride[0])) h_size-=8;
+
+ __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
+
+ for (y= 0; y<srcSliceH; y++ ) {
+ uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
+ uint8_t *_py = src[0] + y*srcStride[0];
+ uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
+ uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
+ stride_t index= -h_size/2;
+
+ /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
+ pixels in each iteration */
+ __asm__ __volatile__ (
+ /* load data for start of next scan line */
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+// ".balign 16 \n\t"
+ "1: \n\t"
+YUV2RGB
+ /* convert RGB plane to RGB packed format,
+ mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0,
+ mm4 -> GB, mm5 -> AR pixel 4-7,
+ mm6 -> GB, mm7 -> AR pixel 0-3 */
+ "pxor %%mm3, %%mm3;" /* zero mm3 */
+
+ "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "movq %%mm1, %%mm7;" /* R7 R6 R5 R4 R3 R2 R1 R0 */
+
+ "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "movq %%mm1, %%mm5;" /* R7 R6 R5 R4 R3 R2 R1 R0 */
+
+ "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */
+ "punpcklbw %%mm3, %%mm7;" /* 00 R3 00 R2 00 R1 00 R0 */
+
+ "punpcklwd %%mm7, %%mm6;" /* 00 R1 B1 G1 00 R0 B0 G0 */
+ MOVNTQ " %%mm6, (%1);" /* Store ARGB1 ARGB0 */
+
+ "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */
+
+ "punpckhwd %%mm7, %%mm6;" /* 00 R3 G3 B3 00 R2 B3 G2 */
+ MOVNTQ " %%mm6, 8 (%1);" /* Store ARGB3 ARGB2 */
+
+ "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */
+ "punpckhbw %%mm3, %%mm5;" /* 00 R7 00 R6 00 R5 00 R4 */
+
+ "punpcklwd %%mm5, %%mm4;" /* 00 R5 B5 G5 00 R4 B4 G4 */
+ MOVNTQ " %%mm4, 16 (%1);" /* Store ARGB5 ARGB4 */
+
+ "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */
+
+ "punpckhwd %%mm5, %%mm4;" /* 00 R7 G7 B7 00 R6 B6 G6 */
+ MOVNTQ " %%mm4, 24 (%1);" /* Store ARGB7 ARGB6 */
+
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+
+ "add $32, %1 \n\t"
+ "add $4, %0 \n\t"
+ " js 1b \n\t"
+
+ : "+r" (index), "+r" (_image)
+ : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
+ );
+ }
+
+ __asm__ __volatile__ (EMMS);
+ return srcSliceH;
+}