Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/FFmpeg/FFmpeg.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-05-27 03:47:34 +0400
committerMichael Niedermayer <michaelni@gmx.at>2011-05-27 04:53:21 +0400
commit027264cb82134c83413810810b24340f6290e11a (patch)
tree11009209889b89443c2194dda88ef1ed7c4976ec /libavutil/arm/intmath.h
parent701012d676042608cd6ec3317c1936a246f436d7 (diff)
parentca7d8256e32e4dbafadc54a65b441945ac759ca9 (diff)
Merge remote-tracking branch 'qatar/master'
* qatar/master: (31 commits) ARM: add ARMv6 optimised av_clip_uintp2 ARM: remove volatile from asm statements in libavutil/intmath ARM: fix av_clipl_int32_arm() v4l: include avdevice.h ffserver: move close_connection() call to avoid a temporary string and copy. lavf: initialize demuxer private options. AVOptions: set string default values. lavdevice: mark v4l for removal on next major bump. swscale: fix compile on ppc. swscale: fix compile on x86-32. build: Remove generated .version file on distclean. configure: Add -D_GNU_SOURCE to CPPFLAGS on OS/2. doc: Drop hint at --enable-memalign-hack for MinGW, it is now autodetected. ffplay: Remove disabled code. Mark parameterless function declarations as 'void'. swscale: use av_clip_uint8() in yuv2yuv1_c(). swscale: remove VOF/VOFW. swscale: split chroma buffers into separate U/V planes. swscale: replace formatConvBuffer[VOF] by allocated array. rgb2rgb: remove duplicate mmx/mmx2/3dnow/sse2 functions. ... Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavutil/arm/intmath.h')
-rw-r--r--libavutil/arm/intmath.h40
1 files changed, 25 insertions, 15 deletions
diff --git a/libavutil/arm/intmath.h b/libavutil/arm/intmath.h
index 8f03d4bf90..8eb346cd64 100644
--- a/libavutil/arm/intmath.h
+++ b/libavutil/arm/intmath.h
@@ -34,11 +34,11 @@
static av_always_inline av_const int FASTDIV(int a, int b)
{
int r, t;
- __asm__ volatile("cmp %3, #2 \n\t"
- "ldr %1, [%4, %3, lsl #2] \n\t"
- "lsrle %0, %2, #1 \n\t"
- "smmulgt %0, %1, %2 \n\t"
- : "=&r"(r), "=&r"(t) : "r"(a), "r"(b), "r"(ff_inverse));
+ __asm__ ("cmp %3, #2 \n\t"
+ "ldr %1, [%4, %3, lsl #2] \n\t"
+ "lsrle %0, %2, #1 \n\t"
+ "smmulgt %0, %1, %2 \n\t"
+ : "=&r"(r), "=&r"(t) : "r"(a), "r"(b), "r"(ff_inverse));
return r;
}
@@ -46,7 +46,7 @@ static av_always_inline av_const int FASTDIV(int a, int b)
static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
{
unsigned x;
- __asm__ volatile ("usat %0, #8, %1" : "=r"(x) : "r"(a));
+ __asm__ ("usat %0, #8, %1" : "=r"(x) : "r"(a));
return x;
}
@@ -54,7 +54,7 @@ static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
{
unsigned x;
- __asm__ volatile ("ssat %0, #8, %1" : "=r"(x) : "r"(a));
+ __asm__ ("ssat %0, #8, %1" : "=r"(x) : "r"(a));
return x;
}
@@ -62,7 +62,7 @@ static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
{
unsigned x;
- __asm__ volatile ("usat %0, #16, %1" : "=r"(x) : "r"(a));
+ __asm__ ("usat %0, #16, %1" : "=r"(x) : "r"(a));
return x;
}
@@ -70,18 +70,27 @@ static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
static av_always_inline av_const int16_t av_clip_int16_arm(int a)
{
int x;
- __asm__ volatile ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
+ __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
return x;
}
+#define av_clip_uintp2 av_clip_uintp2_arm
+static av_always_inline av_const unsigned av_clip_uintp2_arm(int a, int p)
+{
+ unsigned x;
+ __asm__ ("usat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p));
+ return x;
+}
+
+
#else /* HAVE_ARMV6 */
#define FASTDIV FASTDIV
static av_always_inline av_const int FASTDIV(int a, int b)
{
int r, t;
- __asm__ volatile("umull %1, %0, %2, %3"
- : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
+ __asm__ ("umull %1, %0, %2, %3"
+ : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
return r;
}
@@ -91,10 +100,11 @@ static av_always_inline av_const int FASTDIV(int a, int b)
static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
{
int x, y;
- __asm__ volatile ("adds %1, %R2, %Q2, lsr #31 \n\t"
- "mvnne %1, #1<<31 \n\t"
- "eorne %0, %1, %R2, asr #31 \n\t"
- : "=r"(x), "=&r"(y) : "r"(a));
+ __asm__ ("adds %1, %R2, %Q2, lsr #31 \n\t"
+ "mvnne %1, #1<<31 \n\t"
+ "moveq %0, %Q2 \n\t"
+ "eorne %0, %1, %R2, asr #31 \n\t"
+ : "=r"(x), "=&r"(y) : "r"(a));
return x;
}