Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mpc-hc/LAVFilters.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHendrik Leppkes <h.leppkes@gmail.com>2011-09-18 01:02:06 +0400
committerHendrik Leppkes <h.leppkes@gmail.com>2011-09-18 01:04:51 +0400
commit796a808c2f3872e5dab3dd834799e0e623c8e996 (patch)
treee9af03b3e16a8f0595b587c89b46ee2ef9bd7e05
parent9da615c8c2f3c633d2bec8fd41f156c7d025a30a (diff)
Update pixel format converters and some related changes
-rw-r--r--decoder/LAVVideo/LAVPixFmtConverter.cpp39
-rw-r--r--decoder/LAVVideo/LAVPixFmtConverter.h10
-rw-r--r--decoder/LAVVideo/LAVVideo.cpp8
-rw-r--r--decoder/LAVVideo/LAVVideoSettings.h2
-rw-r--r--decoder/LAVVideo/decoders/ILAVDecoder.h4
-rw-r--r--decoder/LAVVideo/pixconv/convert_generic.cpp33
-rw-r--r--decoder/LAVVideo/pixconv/interleave.cpp2
-rw-r--r--decoder/LAVVideo/pixconv/yuv2rgb.cpp92
-rw-r--r--decoder/LAVVideo/pixconv/yuv2yuv_unscaled.cpp2
-rw-r--r--decoder/LAVVideo/pixconv/yuv444_ayuv.cpp2
10 files changed, 105 insertions, 89 deletions
diff --git a/decoder/LAVVideo/LAVPixFmtConverter.cpp b/decoder/LAVVideo/LAVPixFmtConverter.cpp
index ac0d1044..85491885 100644
--- a/decoder/LAVVideo/LAVPixFmtConverter.cpp
+++ b/decoder/LAVVideo/LAVPixFmtConverter.cpp
@@ -97,8 +97,6 @@ CLAVPixFmtConverter::CLAVPixFmtConverter()
, m_OutputPixFmt(LAVOutPixFmt_YV12)
, m_pSwsContext(NULL)
, swsWidth(0), swsHeight(0)
- , swsColorSpace(AVCOL_SPC_UNSPECIFIED)
- , swsColorRange(AVCOL_RANGE_UNSPECIFIED)
, m_RequiredAlignment(0)
, m_nAlignedBufferSize(0)
, m_pAlignedBuffer(NULL)
@@ -110,6 +108,8 @@ CLAVPixFmtConverter::CLAVPixFmtConverter()
SYSTEM_INFO systemInfo;
GetSystemInfo(&systemInfo);
m_NumThreads = min(8, max(1, systemInfo.dwNumberOfProcessors / 2));
+
+ ZeroMemory(&m_ColorProps, sizeof(m_ColorProps));
}
CLAVPixFmtConverter::~CLAVPixFmtConverter()
@@ -265,44 +265,45 @@ void CLAVPixFmtConverter::SelectConvertFunction()
int cpu = av_get_cpu_flags();
- /*if (m_OutputPixFmt == LAVOutPixFmt_YV12 && (m_InputPixFmt == PIX_FMT_YUV420P || m_InputPixFmt == PIX_FMT_YUVJ420P)) {
+ if (m_OutputPixFmt == LAVOutPixFmt_YV12 && m_InputPixFmt == LAVPixFmt_YUV420) {
convert = &CLAVPixFmtConverter::convert_yuv420_yv12;
m_RequiredAlignment = 0;
} else if (cpu & AV_CPU_FLAG_SSE2) {
- if (m_OutputPixFmt == LAVOutPixFmt_AYUV && (m_InputPixFmt == PIX_FMT_YUV444P10LE || m_InputPixFmt == PIX_FMT_YUV444P9LE || m_InputPixFmt == PIX_FMT_YUV444P16LE)) {
+ if (m_OutputPixFmt == LAVOutPixFmt_AYUV && m_InputPixFmt == LAVPixFmt_YUV444bX) {
convert = &CLAVPixFmtConverter::convert_yuv444_ayuv_dither_le;
- } else if (m_OutputPixFmt == LAVOutPixFmt_AYUV && (m_InputPixFmt == PIX_FMT_YUV444P || m_InputPixFmt == PIX_FMT_YUVJ444P)) {
+ } else if (m_OutputPixFmt == LAVOutPixFmt_AYUV && m_InputPixFmt == LAVPixFmt_YUV444) {
convert = &CLAVPixFmtConverter::convert_yuv444_ayuv;
- } else if (m_OutputPixFmt == LAVOutPixFmt_Y410 && (m_InputPixFmt == PIX_FMT_YUV444P10LE || m_InputPixFmt == PIX_FMT_YUV444P9LE)) {
+ } else if (m_OutputPixFmt == LAVOutPixFmt_Y410 && m_InputPixFmt == LAVPixFmt_YUV444bX && m_InBpp <= 10) {
convert = &CLAVPixFmtConverter::convert_yuv444_y410;
- } else if ((m_OutputPixFmt == LAVOutPixFmt_YV12 || m_OutputPixFmt == LAVOutPixFmt_NV12) && (m_InputPixFmt == PIX_FMT_YUV420P10LE || m_InputPixFmt == PIX_FMT_YUV420P9LE || m_InputPixFmt == PIX_FMT_YUV420P16LE)) {
+ } else if ((m_OutputPixFmt == LAVOutPixFmt_YV12 || m_OutputPixFmt == LAVOutPixFmt_NV12) && m_InputPixFmt == LAVPixFmt_YUV420bX) {
if (m_OutputPixFmt == LAVOutPixFmt_NV12) {
convert = &CLAVPixFmtConverter::convert_yuv420_yv12_nv12_dither_le<TRUE>;
} else {
convert = &CLAVPixFmtConverter::convert_yuv420_yv12_nv12_dither_le<FALSE>;
m_RequiredAlignment = 32; // the U/V planes need to be 16 aligned..
}
- } else if (((m_OutputPixFmt == LAVOutPixFmt_P010 || m_OutputPixFmt == LAVOutPixFmt_P016) && (m_InputPixFmt == PIX_FMT_YUV420P10LE || m_InputPixFmt == PIX_FMT_YUV420P9LE || PIX_FMT_YUV420P16LE))
- || ((m_OutputPixFmt == LAVOutPixFmt_P210 || m_OutputPixFmt == LAVOutPixFmt_P216) && (m_InputPixFmt == PIX_FMT_YUV422P10LE || m_InputPixFmt == PIX_FMT_YUV422P16LE))) {
- if (m_InputPixFmt == PIX_FMT_YUV420P10LE)
+ } else if (((m_OutputPixFmt == LAVOutPixFmt_P010 || m_OutputPixFmt == LAVOutPixFmt_P016) && m_InputPixFmt == LAVPixFmt_YUV420bX)
+ || ((m_OutputPixFmt == LAVOutPixFmt_P210 || m_OutputPixFmt == LAVOutPixFmt_P216) && m_InputPixFmt == LAVPixFmt_YUV422bX)) {
+ if (m_InBpp == 10)
convert = &CLAVPixFmtConverter::convert_yuv420_px1x_le<6>;
- else if (m_InputPixFmt == PIX_FMT_YUV420P9LE)
+ else if (m_InBpp == 9)
convert = &CLAVPixFmtConverter::convert_yuv420_px1x_le<7>;
- else if (m_InputPixFmt == PIX_FMT_YUV420P16LE)
+ else if (m_InBpp == 16)
convert = &CLAVPixFmtConverter::convert_yuv420_px1x_le<0>;
- } else if (m_OutputPixFmt == LAVOutPixFmt_NV12 && (m_InputPixFmt == PIX_FMT_YUV420P || m_InputPixFmt == PIX_FMT_YUVJ420P)) {
+ } else if (m_OutputPixFmt == LAVOutPixFmt_NV12 && m_InputPixFmt == LAVPixFmt_YUV420) {
convert = &CLAVPixFmtConverter::convert_yuv420_nv12;
m_RequiredAlignment = 32;
- } else if (m_OutputPixFmt == LAVOutPixFmt_YUY2 && (m_InputPixFmt == PIX_FMT_YUV422P || m_InputPixFmt == PIX_FMT_YUVJ422P)) {
+ } else if (m_OutputPixFmt == LAVOutPixFmt_YUY2 && m_InputPixFmt == LAVPixFmt_YUV422) {
convert = &CLAVPixFmtConverter::convert_yuv422_yuy2_uyvy<0>;
m_RequiredAlignment = 32;
- } else if (m_OutputPixFmt == LAVOutPixFmt_UYVY && (m_InputPixFmt == PIX_FMT_YUV422P || m_InputPixFmt == PIX_FMT_YUVJ422P)) {
+ } else if (m_OutputPixFmt == LAVOutPixFmt_UYVY && m_InputPixFmt == LAVPixFmt_YUV422) {
convert = &CLAVPixFmtConverter::convert_yuv422_yuy2_uyvy<1>;
m_RequiredAlignment = 32;
} else if ((m_OutputPixFmt == LAVOutPixFmt_RGB32 || m_OutputPixFmt == LAVOutPixFmt_RGB24)
- && (m_InputPixFmt == PIX_FMT_YUV420P || m_InputPixFmt == PIX_FMT_YUVJ420P || m_InputPixFmt == PIX_FMT_YUV420P10LE || m_InputPixFmt == PIX_FMT_YUV420P9LE
- || m_InputPixFmt == PIX_FMT_YUV422P || m_InputPixFmt == PIX_FMT_YUVJ422P || m_InputPixFmt == PIX_FMT_YUV422P10LE
- || m_InputPixFmt == PIX_FMT_YUV444P || m_InputPixFmt == PIX_FMT_YUVJ444P || m_InputPixFmt == PIX_FMT_YUV444P10LE || m_InputPixFmt == PIX_FMT_YUV444P9LE)) {
+ && (m_InputPixFmt == LAVPixFmt_YUV420 || m_InputPixFmt == LAVPixFmt_YUV420bX
+ || m_InputPixFmt == LAVPixFmt_YUV422 || m_InputPixFmt == LAVPixFmt_YUV422bX
+ || m_InputPixFmt == LAVPixFmt_YUV444 || m_InputPixFmt == LAVPixFmt_YUV444bX)
+ && m_InBpp <= 10) {
if (m_OutputPixFmt == LAVOutPixFmt_RGB32) {
convert = &CLAVPixFmtConverter::convert_yuv_rgb<1>;
m_RequiredAlignment = 4;
@@ -311,7 +312,7 @@ void CLAVPixFmtConverter::SelectConvertFunction()
}
m_bRGBConverter = TRUE;
}
- } */
+ }
if (convert == NULL) {
convert = &CLAVPixFmtConverter::convert_generic;
diff --git a/decoder/LAVVideo/LAVPixFmtConverter.h b/decoder/LAVVideo/LAVPixFmtConverter.h
index 64905b70..5bfc8bba 100644
--- a/decoder/LAVVideo/LAVPixFmtConverter.h
+++ b/decoder/LAVVideo/LAVPixFmtConverter.h
@@ -68,7 +68,7 @@ public:
LAVOutPixFmts GetPreferredOutput();
LAVOutPixFmts GetOutputPixFmt() { return m_OutputPixFmt; }
- void SetColorProps(AVColorSpace colorspace, AVColorRange range, int RGBOutputRange) { if (swsColorSpace != colorspace || swsColorRange != range || swsOutputRange != RGBOutputRange) { DestroySWScale(); swsColorSpace = colorspace; swsColorRange = range; swsOutputRange = RGBOutputRange; } }
+ void SetColorProps(DXVA2_ExtendedFormat props, int RGBOutputRange) { if (props.value != m_ColorProps.value || swsOutputRange != RGBOutputRange) { DestroySWScale(); m_ColorProps = props; swsOutputRange = RGBOutputRange; } }
int GetNumMediaTypes();
void GetMediaType(CMediaType *mt, int index, LONG biWidth, LONG biHeight, DWORD dwAspectX, DWORD dwAspectY, REFERENCE_TIME rtAvgTime, BOOL bVIH1 = FALSE);
@@ -140,15 +140,15 @@ private:
RGBCoeffs* getRGBCoeffs(int width, int height);
private:
- enum LAVPixelFormat m_InputPixFmt;
- enum LAVOutPixFmts m_OutputPixFmt;
+ LAVPixelFormat m_InputPixFmt;
+ LAVOutPixFmts m_OutputPixFmt;
int m_InBpp;
int swsWidth, swsHeight;
- AVColorSpace swsColorSpace;
- AVColorRange swsColorRange;
int swsOutputRange;
+ DXVA2_ExtendedFormat m_ColorProps;
+
unsigned m_RequiredAlignment;
SwsContext *m_pSwsContext;
diff --git a/decoder/LAVVideo/LAVVideo.cpp b/decoder/LAVVideo/LAVVideo.cpp
index 3bc433bf..8595eb8e 100644
--- a/decoder/LAVVideo/LAVVideo.cpp
+++ b/decoder/LAVVideo/LAVVideo.cpp
@@ -723,6 +723,14 @@ STDMETHODIMP CLAVVideo::Deliver(LAVFrame *pFrame)
NegotiatePixelFormat(mt, width, height);
}
}
+ m_PixFmtConverter.SetColorProps(pFrame->ext_format, m_settings.RGBRange);
+
+ // Update flags for cases where the converter can change the nominal range
+ if (m_PixFmtConverter.IsRGBConverterActive() && m_settings.RGBRange != 0) {
+ pFrame->ext_format.NominalRange = m_settings.RGBRange == 1 ? DXVA2_NominalRange_16_235 : DXVA2_NominalRange_0_255;
+ } else if (m_PixFmtConverter.GetOutputPixFmt() == LAVOutPixFmt_RGB32 || m_PixFmtConverter.GetOutputPixFmt() == LAVOutPixFmt_RGB24) {
+ pFrame->ext_format.NominalRange = DXVA2_NominalRange_0_255;
+ }
IMediaSample *pSampleOut = NULL;
BYTE *pDataOut = NULL;
diff --git a/decoder/LAVVideo/LAVVideoSettings.h b/decoder/LAVVideo/LAVVideoSettings.h
index 2320a2f3..784c92bb 100644
--- a/decoder/LAVVideo/LAVVideoSettings.h
+++ b/decoder/LAVVideo/LAVVideoSettings.h
@@ -83,7 +83,7 @@ typedef enum LAVOutPixFmts {
LAVOutPixFmt_RGB24, // 24-bit RGB (BGR)
LAVOutPixFmt_NB // Number of formats
-};
+} LAVOutPixFmts;
// LAV Audio configuration interface
[uuid("FA40D6E9-4D38-4761-ADD2-71A9EC5FD32F")]
diff --git a/decoder/LAVVideo/decoders/ILAVDecoder.h b/decoder/LAVVideo/decoders/ILAVDecoder.h
index 42c417a7..bbf0eac2 100644
--- a/decoder/LAVVideo/decoders/ILAVDecoder.h
+++ b/decoder/LAVVideo/decoders/ILAVDecoder.h
@@ -30,7 +30,7 @@
* That means that there are leading zero-bits, and not trailing like in DirectShow
* The actual number of valid bits is stored in the LAVFrame
*/
-enum LAVPixelFormat {
+typedef enum LAVPixelFormat {
LAVPixFmt_None = -1,
/* planar YUV */
LAVPixFmt_YUV420, ///< YUV 4:2:0, 8 bit
@@ -52,7 +52,7 @@ enum LAVPixelFormat {
LAVPixFmt_ARGB32, ///< ARGB32, in BGRA order
LAVPixFmt_NB, ///< number of formats
-};
+} LAVPixelFormat;
/**
* Structure describing a pixel format
diff --git a/decoder/LAVVideo/pixconv/convert_generic.cpp b/decoder/LAVVideo/pixconv/convert_generic.cpp
index bd8abba4..a6026289 100644
--- a/decoder/LAVVideo/pixconv/convert_generic.cpp
+++ b/decoder/LAVVideo/pixconv/convert_generic.cpp
@@ -85,19 +85,6 @@ DECLARE_CONV_FUNC_IMPL(convert_generic)
inline SwsContext *CLAVPixFmtConverter::GetSWSContext(int width, int height, enum PixelFormat srcPix, enum PixelFormat dstPix, int flags)
{
if (!m_pSwsContext || swsWidth != width || swsHeight != height) {
- // Map full-range formats to their limited-range variants
- // All target formats we have are limited range and we don't want compression
- if (dstPix != PIX_FMT_BGRA && dstPix != PIX_FMT_BGR24) {
- if (srcPix == PIX_FMT_YUVJ420P)
- srcPix = PIX_FMT_YUV420P;
- else if (srcPix == PIX_FMT_YUVJ422P)
- srcPix = PIX_FMT_YUV422P;
- else if (srcPix == PIX_FMT_YUVJ440P)
- srcPix = PIX_FMT_YUV440P;
- else if (srcPix == PIX_FMT_YUVJ444P)
- srcPix = PIX_FMT_YUV444P;
- }
-
if (m_pSettings->GetHighQualityPixelFormatConversion()) {
DbgLog((LOG_TRACE, 10, L"::GetSwsContext(): Activating HQ scaling mode"));
flags |= (SWS_FULL_CHR_H_INT|SWS_ACCURATE_RND);
@@ -114,15 +101,25 @@ inline SwsContext *CLAVPixFmtConverter::GetSWSContext(int width, int height, enu
int ret = sws_getColorspaceDetails(m_pSwsContext, &inv_tbl, &srcRange, &tbl, &dstRange, &brightness, &contrast, &saturation);
if (ret >= 0) {
const int *rgbTbl = NULL;
- if (swsColorSpace != AVCOL_SPC_UNSPECIFIED) {
- rgbTbl = sws_getCoefficients(swsColorSpace);
+ if (m_ColorProps.VideoTransferMatrix != DXVA2_VideoTransferMatrix_Unknown) {
+ int colorspace = SWS_CS_ITU709;
+ switch (m_ColorProps.VideoTransferMatrix) {
+ case DXVA2_VideoTransferMatrix_BT709:
+ colorspace = SWS_CS_ITU709;
+ break;
+ case DXVA2_VideoTransferMatrix_BT601:
+ colorspace = SWS_CS_ITU601;
+ break;
+ case DXVA2_VideoTransferMatrix_SMPTE240M:
+ colorspace = SWS_CS_SMPTE240M;
+ break;
+ }
+ rgbTbl = sws_getCoefficients(colorspace);
} else {
BOOL isHD = (height >= 720 || width >= 1280);
rgbTbl = sws_getCoefficients(isHD ? SWS_CS_ITU709 : SWS_CS_ITU601);
}
- if (swsColorRange != AVCOL_RANGE_UNSPECIFIED) {
- srcRange = dstRange = swsColorRange - 1;
- }
+ srcRange = dstRange = (m_ColorProps.NominalRange == DXVA2_NominalRange_0_255);
sws_setColorspaceDetails(m_pSwsContext, rgbTbl, srcRange, tbl, dstRange, brightness, contrast, saturation);
}
swsWidth = width;
diff --git a/decoder/LAVVideo/pixconv/interleave.cpp b/decoder/LAVVideo/pixconv/interleave.cpp
index a5a9a36a..7c98d0a9 100644
--- a/decoder/LAVVideo/pixconv/interleave.cpp
+++ b/decoder/LAVVideo/pixconv/interleave.cpp
@@ -33,7 +33,7 @@ DECLARE_CONV_FUNC_IMPL(convert_yuv444_y410)
int inStride = srcStride[0] >> 1;
int outStride = dstStride << 2;
- int shift = (inputFormat == PIX_FMT_YUV444P9LE ? 1 : 0);
+ int shift = 10 - bpp;
int line, i;
diff --git a/decoder/LAVVideo/pixconv/yuv2rgb.cpp b/decoder/LAVVideo/pixconv/yuv2rgb.cpp
index be4f0fe9..ad71ba68 100644
--- a/decoder/LAVVideo/pixconv/yuv2rgb.cpp
+++ b/decoder/LAVVideo/pixconv/yuv2rgb.cpp
@@ -27,7 +27,7 @@
#include "pixconv_sse2_templates.h"
// This function converts 4x2 pixels from the source into 4x2 RGB pixels in the destination
-template <PixelFormat inputFormat, int shift, int out32, int right_edge> __forceinline
+template <LAVPixelFormat inputFormat, int shift, int out32, int right_edge> __forceinline
static int yuv2rgb_convert_pixels(const uint8_t* &srcY, const uint8_t* &srcU, const uint8_t* &srcV, uint8_t* &dst, int srcStrideY, int srcStrideUV, int dstStride, int line, RGBCoeffs *coeffs)
{
__m128i xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7;
@@ -62,7 +62,7 @@ static int yuv2rgb_convert_pixels(const uint8_t* &srcY, const uint8_t* &srcU, co
// xmm0/xmm2 contain 4 interleaved U/V samples from two lines each in the 16bit parts, still in their native bitdepth
// Chroma upsampling required
- if (inputFormat == PIX_FMT_YUV420P || inputFormat == PIX_FMT_YUV422P) {
+ if (inputFormat == LAVPixFmt_YUV420 || inputFormat == LAVPixFmt_YUV422) {
if (shift > 0) {
srcU += 4;
srcV += 4;
@@ -91,7 +91,7 @@ static int yuv2rgb_convert_pixels(const uint8_t* &srcY, const uint8_t* &srcU, co
}
// 4:2:0 - upsample to 4:2:2 using 75:25
- if (inputFormat == PIX_FMT_YUV420P) {
+ if (inputFormat == LAVPixFmt_YUV420) {
xmm1 = xmm0;
xmm1 = _mm_add_epi16(xmm1, xmm0); /* 2x line 0 */
xmm1 = _mm_add_epi16(xmm1, xmm0); /* 3x line 0 */
@@ -140,16 +140,16 @@ static int yuv2rgb_convert_pixels(const uint8_t* &srcY, const uint8_t* &srcU, co
// Shift the result to 12 bit
// For 10-bit input, we need to shift one bit off, or we exceed the allowed processing depth
// For 8-bit, we need to add one bit
- if (inputFormat == PIX_FMT_YUV420P && shift == 2) {
+ if (inputFormat == LAVPixFmt_YUV420 && shift == 2) {
xmm1 = _mm_srli_epi16(xmm1, 1);
xmm3 = _mm_srli_epi16(xmm3, 1);
- } else if (inputFormat == PIX_FMT_YUV420P && shift == 0) {
+ } else if (inputFormat == LAVPixFmt_YUV420 && shift == 0) {
xmm1 = _mm_slli_epi16(xmm1, 1);
xmm3 = _mm_slli_epi16(xmm3, 1);
}
// 12-bit result, xmm1 & xmm3 with 4 UV combinations each
- } else if (inputFormat == PIX_FMT_YUV444P) {
+ } else if (inputFormat == LAVPixFmt_YUV444) {
if (shift > 0) {
srcU += 8;
srcV += 8;
@@ -288,7 +288,7 @@ static int yuv2rgb_convert_pixels(const uint8_t* &srcY, const uint8_t* &srcU, co
return 0;
}
-template <PixelFormat inputFormat, int shift, int out32>
+template <LAVPixelFormat inputFormat, int shift, int out32>
static int __stdcall yuv2rgb_process_lines(const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV, uint8_t *dst, int width, int height, int srcStrideY, int srcStrideUV, int dstStride, int sliceYStart, int sliceYEnd, RGBCoeffs *coeffs)
{
const uint8_t *y = srcY;
@@ -304,7 +304,7 @@ static int __stdcall yuv2rgb_process_lines(const uint8_t *srcY, const uint8_t *s
const int endx = width - 4;
// 4:2:0 needs special handling for the first and the last line
- if (inputFormat == PIX_FMT_YUV420P) {
+ if (inputFormat == LAVPixFmt_YUV420) {
if (line == 0) {
for (int i = 0; i < endx; i += 4) {
yuv2rgb_convert_pixels<inputFormat, shift, out32, 0>(y, u, v, rgb, 0, 0, 0, line, coeffs);
@@ -320,7 +320,7 @@ static int __stdcall yuv2rgb_process_lines(const uint8_t *srcY, const uint8_t *s
for (; line < lastLine; line += 2) {
y = srcY + line * srcStrideY;
- if (inputFormat == PIX_FMT_YUV420P) {
+ if (inputFormat == LAVPixFmt_YUV420) {
u = srcU + (line >> 1) * srcStrideUV;
v = srcV + (line >> 1) * srcStrideUV;
} else {
@@ -336,7 +336,7 @@ static int __stdcall yuv2rgb_process_lines(const uint8_t *srcY, const uint8_t *s
yuv2rgb_convert_pixels<inputFormat, shift, out32, 1>(y, u, v, rgb, srcStrideY, srcStrideUV, dstStride, line, coeffs);
}
- if (inputFormat == PIX_FMT_YUV420P) {
+ if (inputFormat == LAVPixFmt_YUV420) {
if (sliceYEnd == height) {
y = srcY + (height - 1) * srcStrideY;
u = srcU + ((height >> 1) - 1) * srcStrideUV;
@@ -352,13 +352,13 @@ static int __stdcall yuv2rgb_process_lines(const uint8_t *srcY, const uint8_t *s
return 0;
}
-template <PixelFormat inputFormat, int shift, int out32>
+template <LAVPixelFormat inputFormat, int shift, int out32>
inline int yuv2rgb_convert(const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV, uint8_t *dst, int width, int height, int srcStrideY, int srcStrideUV, int dstStride, RGBCoeffs *coeffs, int threads)
{
if (threads <= 1) {
yuv2rgb_process_lines<inputFormat, shift, out32>(srcY, srcU, srcV, dst, width, height, srcStrideY, srcStrideUV, dstStride, 0, height, coeffs);
} else {
- const int is_odd = (inputFormat == PIX_FMT_YUV420P);
+ const int is_odd = (inputFormat == LAVPixFmt_YUV420);
const int lines_per_thread = (height / threads)&~1;
Concurrency::parallel_for(0, threads, [&](int i) {
@@ -377,25 +377,36 @@ DECLARE_CONV_FUNC_IMPL(convert_yuv_rgb)
// Wrap the input format into template args
switch (inputFormat) {
- case PIX_FMT_YUV420P:
- case PIX_FMT_YUVJ420P:
- return yuv2rgb_convert<PIX_FMT_YUV420P, 0, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
- case PIX_FMT_YUV420P10LE:
- return yuv2rgb_convert<PIX_FMT_YUV420P, 2, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
- case PIX_FMT_YUV420P9LE:
- return yuv2rgb_convert<PIX_FMT_YUV420P, 1, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
- case PIX_FMT_YUV422P:
- case PIX_FMT_YUVJ422P:
- return yuv2rgb_convert<PIX_FMT_YUV422P, 0, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
- case PIX_FMT_YUV422P10LE:
- return yuv2rgb_convert<PIX_FMT_YUV422P, 2, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
- case PIX_FMT_YUV444P:
- case PIX_FMT_YUVJ444P:
- return yuv2rgb_convert<PIX_FMT_YUV444P, 0, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
- case PIX_FMT_YUV444P10LE:
- return yuv2rgb_convert<PIX_FMT_YUV444P, 2, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
- case PIX_FMT_YUV444P9LE:
- return yuv2rgb_convert<PIX_FMT_YUV444P, 1, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
+ case LAVPixFmt_YUV420:
+ return yuv2rgb_convert<LAVPixFmt_YUV420, 0, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
+ case LAVPixFmt_YUV420bX:
+ if (bpp == 10)
+ return yuv2rgb_convert<LAVPixFmt_YUV420, 2, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
+ else if (bpp == 9)
+ return yuv2rgb_convert<LAVPixFmt_YUV420, 1, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
+ else
+ ASSERT(0);
+ break;
+ case LAVPixFmt_YUV422:
+ return yuv2rgb_convert<LAVPixFmt_YUV422, 0, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
+ case LAVPixFmt_YUV422bX:
+ if (bpp == 10)
+ return yuv2rgb_convert<LAVPixFmt_YUV422, 2, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
+ else if (bpp == 9)
+ return yuv2rgb_convert<LAVPixFmt_YUV422, 1, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
+ else
+ ASSERT(0);
+ break;
+ case LAVPixFmt_YUV444:
+ return yuv2rgb_convert<LAVPixFmt_YUV444, 0, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
+ case LAVPixFmt_YUV444bX:
+ if (bpp == 10)
+ return yuv2rgb_convert<LAVPixFmt_YUV444, 2, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
+ else if (bpp == 9)
+ return yuv2rgb_convert<LAVPixFmt_YUV444, 1, out32>(src[0], src[1], src[2], dst, width, height, srcStride[0], srcStride[1], dstStride, coeffs, m_NumThreads);
+ else
+ ASSERT(0);
+ break;
default:
ASSERT(0);
}
@@ -415,12 +426,12 @@ RGBCoeffs* CLAVPixFmtConverter::getRGBCoeffs(int width, int height)
if (!m_rgbCoeffs)
m_rgbCoeffs = (RGBCoeffs *)_aligned_malloc(sizeof(RGBCoeffs), 16);
- AVColorSpace spc = swsColorSpace;
- if (spc == AVCOL_SPC_UNSPECIFIED) {
- spc = (swsHeight >= 720 || swsWidth >= 1280) ? AVCOL_SPC_BT709 : AVCOL_SPC_SMPTE170M;
+ DXVA2_VideoTransferMatrix matrix = (DXVA2_VideoTransferMatrix)m_ColorProps.VideoTransferMatrix;
+ if (matrix == DXVA2_VideoTransferMatrix_Unknown) {
+ matrix = (swsHeight >= 720 || swsWidth >= 1280) ? DXVA2_VideoTransferMatrix_BT709 : DXVA2_VideoTransferMatrix_BT601;
}
- BOOL inFullRange = (swsColorRange == AVCOL_RANGE_JPEG) || m_InputPixFmt == PIX_FMT_YUVJ420P || m_InputPixFmt == PIX_FMT_YUVJ422P || m_InputPixFmt == PIX_FMT_YUVJ444P;
+ BOOL inFullRange = (m_ColorProps.NominalRange == DXVA2_NominalRange_0_255);
BOOL outFullRange = (swsOutputRange == 0) ? inFullRange : (swsOutputRange == 2);
int inputWhite, inputBlack, inputChroma, outputWhite, outputBlack;
@@ -443,21 +454,20 @@ RGBCoeffs* CLAVPixFmtConverter::getRGBCoeffs(int width, int height)
}
double Kr, Kg, Kb;
- switch (spc) {
- case AVCOL_SPC_SMPTE170M:
- case AVCOL_SPC_BT470BG:
+ switch (matrix) {
+ case DXVA2_VideoTransferMatrix_BT601:
Kr = 0.299;
Kg = 0.587;
Kb = 0.114;
break;
- case AVCOL_SPC_SMPTE240M:
+ case DXVA2_VideoTransferMatrix_SMPTE240M:
Kr = 0.2120;
Kg = 0.7010;
Kb = 0.0870;
break;
default:
- DbgLog((LOG_TRACE, 10, L"::getRGBCoeffs(): Unknown color space: %d - defaulting to BT709", spc));
- case AVCOL_SPC_BT709:
+ DbgLog((LOG_TRACE, 10, L"::getRGBCoeffs(): Unknown color space: %d - defaulting to BT709", matrix));
+ case DXVA2_VideoTransferMatrix_BT709:
Kr = 0.2126;
Kg = 0.7152;
Kb = 0.0722;
diff --git a/decoder/LAVVideo/pixconv/yuv2yuv_unscaled.cpp b/decoder/LAVVideo/pixconv/yuv2yuv_unscaled.cpp
index 3c18b876..97a344ec 100644
--- a/decoder/LAVVideo/pixconv/yuv2yuv_unscaled.cpp
+++ b/decoder/LAVVideo/pixconv/yuv2yuv_unscaled.cpp
@@ -36,7 +36,7 @@ DECLARE_CONV_FUNC_IMPL(convert_yuv420_yv12_nv12_dither_le)
const int inUVStride = srcStride[1] >> 1;
const int outYStride = dstStride;
const int outUVStride = dstStride >> 1;
- const int shift = (inputFormat == PIX_FMT_YUV420P10LE ? 2 : (inputFormat == PIX_FMT_YUV420P9LE) ? 1 : 8);
+ const int shift = bpp - 8;
int line, i;
diff --git a/decoder/LAVVideo/pixconv/yuv444_ayuv.cpp b/decoder/LAVVideo/pixconv/yuv444_ayuv.cpp
index e9e3d1f3..aaacac9d 100644
--- a/decoder/LAVVideo/pixconv/yuv444_ayuv.cpp
+++ b/decoder/LAVVideo/pixconv/yuv444_ayuv.cpp
@@ -97,7 +97,7 @@ DECLARE_CONV_FUNC_IMPL(convert_yuv444_ayuv_dither_le)
const int outStride = dstStride << 2;
// Number of bits to shift to reach 8
- int shift = (inputFormat == PIX_FMT_YUV444P10LE ? 2 : (inputFormat == PIX_FMT_YUV444P9LE) ? 1 : 8);
+ int shift = bpp - 8;
int line, i;