Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mpc-hc/LAVFilters.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHendrik Leppkes <h.leppkes@gmail.com>2013-05-09 16:01:08 +0400
committerHendrik Leppkes <h.leppkes@gmail.com>2013-05-09 16:02:13 +0400
commited7726af6e24af0b1d2bf0a8ce0999ff6097f5ac (patch)
tree7c5d0669905153d4fb2e66e568d14ed1e071d84a /decoder
parentaf525159b68cd9d0c630f816bdaf1b1c67f05f21 (diff)
Use AVPixelFormat instead of deprecated PixelFormat and disable old names
Diffstat (limited to 'decoder')
-rw-r--r--decoder/LAVVideo/Filtering.cpp4
-rw-r--r--decoder/LAVVideo/LAVPixFmtConverter.h6
-rw-r--r--decoder/LAVVideo/LAVVideo.cpp2
-rw-r--r--decoder/LAVVideo/decoders/ILAVDecoder.h4
-rw-r--r--decoder/LAVVideo/decoders/avcodec.cpp8
-rw-r--r--decoder/LAVVideo/decoders/dxva2dec.cpp2
-rw-r--r--decoder/LAVVideo/decoders/pixfmt.cpp6
-rw-r--r--decoder/LAVVideo/pixconv/convert_generic.cpp6
-rw-r--r--decoder/LAVVideo/stdafx.h1
-rw-r--r--decoder/LAVVideo/subtitles/LAVSubtitleConsumer.cpp10
10 files changed, 25 insertions, 24 deletions
diff --git a/decoder/LAVVideo/Filtering.cpp b/decoder/LAVVideo/Filtering.cpp
index 047381c9..007a2890 100644
--- a/decoder/LAVVideo/Filtering.cpp
+++ b/decoder/LAVVideo/Filtering.cpp
@@ -43,7 +43,7 @@ HRESULT CLAVVideo::Filter(LAVFrame *pFrame)
int ret = 0;
BOOL bFlush = pFrame->flags & LAV_FRAME_FLAG_FLUSH;
if (m_Decoder.IsInterlaced() && m_settings.DeintMode != DeintMode_Disable && m_settings.SWDeintMode == SWDeintMode_YADIF && ((bFlush && m_pFilterGraph) || pFrame->format == LAVPixFmt_YUV420 || pFrame->format == LAVPixFmt_YUV422 || pFrame->format == LAVPixFmt_NV12)) {
- PixelFormat ff_pixfmt = (pFrame->format == LAVPixFmt_YUV420) ? AV_PIX_FMT_YUV420P : (pFrame->format == LAVPixFmt_YUV422) ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_NV12;
+ AVPixelFormat ff_pixfmt = (pFrame->format == LAVPixFmt_YUV420) ? AV_PIX_FMT_YUV420P : (pFrame->format == LAVPixFmt_YUV422) ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_NV12;
if (!bFlush && (!m_pFilterGraph || pFrame->format != m_filterPixFmt || pFrame->width != m_filterWidth || pFrame->height != m_filterHeight)) {
DbgLog((LOG_TRACE, 10, L":Filter()(init) Initializing YADIF deinterlacing filter..."));
@@ -58,7 +58,7 @@ HRESULT CLAVVideo::Filter(LAVFrame *pFrame)
m_filterHeight = pFrame->height;
char args[512];
- enum PixelFormat pix_fmts[3];
+ enum AVPixelFormat pix_fmts[3];
if (ff_pixfmt == AV_PIX_FMT_NV12) {
pix_fmts[0] = AV_PIX_FMT_NV12;
diff --git a/decoder/LAVVideo/LAVPixFmtConverter.h b/decoder/LAVVideo/LAVPixFmtConverter.h
index 872a5309..7ac3e228 100644
--- a/decoder/LAVVideo/LAVPixFmtConverter.h
+++ b/decoder/LAVVideo/LAVPixFmtConverter.h
@@ -98,7 +98,7 @@ public:
BOOL IsRGBConverterActive() { return m_bRGBConverter; }
private:
- PixelFormat GetFFInput() {
+ AVPixelFormat GetFFInput() {
return getFFPixelFormatFromLAV(m_InputPixFmt, m_InBpp);
}
@@ -108,7 +108,7 @@ private:
void SelectConvertFunction();
// Helper functions for convert_generic
- HRESULT swscale_scale(enum PixelFormat srcPix, enum PixelFormat dstPix, const uint8_t* const src[], const int srcStride[], BYTE *pOut, int width, int height, int stride, LAVOutPixFmtDesc pixFmtDesc, bool swapPlanes12 = false);
+ HRESULT swscale_scale(enum AVPixelFormat srcPix, enum AVPixelFormat dstPix, const uint8_t* const src[], const int srcStride[], BYTE *pOut, int width, int height, int stride, LAVOutPixFmtDesc pixFmtDesc, bool swapPlanes12 = false);
HRESULT ConvertTo422Packed(const uint8_t* const src[4], const int srcStride[4], uint8_t *dst, int width, int height, int dstStride);
HRESULT ConvertToAYUV(const uint8_t* const src[4], const int srcStride[4], uint8_t *dst, int width, int height, int dstStride);
HRESULT ConvertToPX1X(const uint8_t* const src[4], const int srcStride[4], uint8_t *dst, int width, int height, int dstStride, int chromaVertical);
@@ -118,7 +118,7 @@ private:
HRESULT ConvertTov410(const uint8_t* const src[4], const int srcStride[4], uint8_t *dst, int width, int height, int dstStride);
void DestroySWScale() { if (m_pSwsContext) sws_freeContext(m_pSwsContext); m_pSwsContext = NULL; if (m_rgbCoeffs) _aligned_free(m_rgbCoeffs); m_rgbCoeffs = NULL; if (m_pRandomDithers) _aligned_free(m_pRandomDithers); m_pRandomDithers = NULL; };
- SwsContext *GetSWSContext(int width, int height, enum PixelFormat srcPix, enum PixelFormat dstPix, int flags);
+ SwsContext *GetSWSContext(int width, int height, enum AVPixelFormat srcPix, enum AVPixelFormat dstPix, int flags);
void ChangeStride(const uint8_t* src, int srcStride, uint8_t *dst, int dstStride, int width, int height, LAVOutPixFmts format);
diff --git a/decoder/LAVVideo/LAVVideo.cpp b/decoder/LAVVideo/LAVVideo.cpp
index 95ccde32..600d4afe 100644
--- a/decoder/LAVVideo/LAVVideo.cpp
+++ b/decoder/LAVVideo/LAVVideo.cpp
@@ -614,7 +614,7 @@ HRESULT CLAVVideo::CreateDecoder(const CMediaType *pmt)
m_LAVPinInfoValid = TRUE;
m_LAVPinInfo.flags = pPinInfo->GetStreamFlags();
- m_LAVPinInfo.pix_fmt = (PixelFormat)pPinInfo->GetPixelFormat();
+ m_LAVPinInfo.pix_fmt = (AVPixelFormat)pPinInfo->GetPixelFormat();
m_LAVPinInfo.has_b_frames = pPinInfo->GetHasBFrames();
SafeRelease(&pPinInfo);
diff --git a/decoder/LAVVideo/decoders/ILAVDecoder.h b/decoder/LAVVideo/decoders/ILAVDecoder.h
index dc34aa04..fa015f3a 100644
--- a/decoder/LAVVideo/decoders/ILAVDecoder.h
+++ b/decoder/LAVVideo/decoders/ILAVDecoder.h
@@ -75,7 +75,7 @@ LAVPixFmtDesc getPixelFormatDesc(LAVPixelFormat pixFmt);
/**
* Map the LAV Pixel Format to a FFMpeg pixel format (for swscale, etc)
*/
-PixelFormat getFFPixelFormatFromLAV(LAVPixelFormat pixFmt, int bpp);
+AVPixelFormat getFFPixelFormatFromLAV(LAVPixelFormat pixFmt, int bpp);
/**
* A Video Frame
@@ -156,7 +156,7 @@ HRESULT CopyLAVFrameInPlace(LAVFrame *pFrame);
typedef struct LAVPinInfo
{
DWORD flags; ///< Flags that describe the video content (see ILAVPinInfo.h for valid values)
- PixelFormat pix_fmt; ///< The pixel format used
+ AVPixelFormat pix_fmt; ///< The pixel format used
int has_b_frames;
} LAVPinInfo;
diff --git a/decoder/LAVVideo/decoders/avcodec.cpp b/decoder/LAVVideo/decoders/avcodec.cpp
index ebffaa3c..520a2ffa 100644
--- a/decoder/LAVVideo/decoders/avcodec.cpp
+++ b/decoder/LAVVideo/decoders/avcodec.cpp
@@ -118,7 +118,7 @@ static DXVA2_ExtendedFormat GetDXVA2ExtendedFlags(AVCodecContext *ctx, AVFrame *
// This mapping table should contain all pixel formats, except hardware formats (VDPAU, XVMC, DXVA, etc)
// A format that is not listed will be converted to YUV420
static struct PixelFormatMapping {
- PixelFormat ffpixfmt;
+ AVPixelFormat ffpixfmt;
LAVPixelFormat lavpixfmt;
BOOL conversion;
int bpp;
@@ -284,7 +284,7 @@ static AVCodecID ff_interlace_capable[] = {
AV_CODEC_ID_UTVIDEO
};
-static struct PixelFormatMapping getPixFmtMapping(PixelFormat pixfmt) {
+static struct PixelFormatMapping getPixFmtMapping(AVPixelFormat pixfmt) {
const PixelFormatMapping def = { pixfmt, LAVPixFmt_YUV420, TRUE, 8 };
PixelFormatMapping result = def;
for (int i = 0; i < countof(ff_pix_map); i++) {
@@ -1017,7 +1017,7 @@ STDMETHODIMP CDecAvcodec::EndOfStream()
STDMETHODIMP CDecAvcodec::GetPixelFormat(LAVPixelFormat *pPix, int *pBpp)
{
- PixelFormat pixfmt = m_pAVCtx ? m_pAVCtx->pix_fmt : AV_PIX_FMT_NONE;
+ AVPixelFormat pixfmt = m_pAVCtx ? m_pAVCtx->pix_fmt : AV_PIX_FMT_NONE;
PixelFormatMapping mapping = getPixFmtMapping(pixfmt);
if (pPix)
*pPix = mapping.lavpixfmt;
@@ -1032,7 +1032,7 @@ STDMETHODIMP CDecAvcodec::ConvertPixFmt(AVFrame *pFrame, LAVFrame *pOutFrame)
AllocLAVFrameBuffers(pOutFrame);
// Map to swscale compatible format
- PixelFormat dstFormat = getFFPixelFormatFromLAV(pOutFrame->format, pOutFrame->bpp);
+ AVPixelFormat dstFormat = getFFPixelFormatFromLAV(pOutFrame->format, pOutFrame->bpp);
// Get a context
m_pSwsContext = sws_getCachedContext(m_pSwsContext, pFrame->width, pFrame->height, (AVPixelFormat)pFrame->format, pFrame->width, pFrame->height, dstFormat, SWS_BILINEAR | SWS_PRINT_INFO, NULL, NULL, NULL);
diff --git a/decoder/LAVVideo/decoders/dxva2dec.cpp b/decoder/LAVVideo/decoders/dxva2dec.cpp
index 97a48300..b541e1da 100644
--- a/decoder/LAVVideo/decoders/dxva2dec.cpp
+++ b/decoder/LAVVideo/decoders/dxva2dec.cpp
@@ -987,7 +987,7 @@ HRESULT CDecDXVA2::CreateDXVA2Decoder(int nSurfaces, IDirect3DSurface9 **ppSurfa
return S_OK;
}
-static enum PixelFormat get_dxva2_format(struct AVCodecContext *s, const enum PixelFormat * fmt)
+static enum AVPixelFormat get_dxva2_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt)
{
while (*fmt != AV_PIX_FMT_NONE && *fmt != AV_PIX_FMT_DXVA2_VLD) {
++fmt;
diff --git a/decoder/LAVVideo/decoders/pixfmt.cpp b/decoder/LAVVideo/decoders/pixfmt.cpp
index 0c902755..4a265c62 100644
--- a/decoder/LAVVideo/decoders/pixfmt.cpp
+++ b/decoder/LAVVideo/decoders/pixfmt.cpp
@@ -42,7 +42,7 @@ LAVPixFmtDesc getPixelFormatDesc(LAVPixelFormat pixFmt)
static struct {
LAVPixelFormat pixfmt;
- PixelFormat ffpixfmt;
+ AVPixelFormat ffpixfmt;
} lav_ff_pixfmt_map[] = {
{ LAVPixFmt_YUV420, AV_PIX_FMT_YUV420P },
{ LAVPixFmt_YUV422, AV_PIX_FMT_YUV422P },
@@ -55,9 +55,9 @@ static struct {
{ LAVPixFmt_RGB48, AV_PIX_FMT_BGR48LE },
};
-PixelFormat getFFPixelFormatFromLAV(LAVPixelFormat pixFmt, int bpp)
+AVPixelFormat getFFPixelFormatFromLAV(LAVPixelFormat pixFmt, int bpp)
{
- PixelFormat fmt = AV_PIX_FMT_NONE;
+ AVPixelFormat fmt = AV_PIX_FMT_NONE;
for(int i = 0; i < countof(lav_ff_pixfmt_map); i++) {
if (lav_ff_pixfmt_map[i].pixfmt == pixFmt) {
fmt = lav_ff_pixfmt_map[i].ffpixfmt;
diff --git a/decoder/LAVVideo/pixconv/convert_generic.cpp b/decoder/LAVVideo/pixconv/convert_generic.cpp
index 47ea2954..5801c3c3 100644
--- a/decoder/LAVVideo/pixconv/convert_generic.cpp
+++ b/decoder/LAVVideo/pixconv/convert_generic.cpp
@@ -30,7 +30,7 @@ DECLARE_CONV_FUNC_IMPL(convert_generic)
{
HRESULT hr = S_OK;
- PixelFormat inputFmt = GetFFInput();
+ AVPixelFormat inputFmt = GetFFInput();
switch (m_OutputPixFmt) {
case LAVOutPixFmt_YV12:
@@ -93,7 +93,7 @@ DECLARE_CONV_FUNC_IMPL(convert_generic)
return S_OK;
}
-inline SwsContext *CLAVPixFmtConverter::GetSWSContext(int width, int height, enum PixelFormat srcPix, enum PixelFormat dstPix, int flags)
+inline SwsContext *CLAVPixFmtConverter::GetSWSContext(int width, int height, enum AVPixelFormat srcPix, enum AVPixelFormat dstPix, int flags)
{
if (!m_pSwsContext || swsWidth != width || swsHeight != height) {
// Get context
@@ -134,7 +134,7 @@ inline SwsContext *CLAVPixFmtConverter::GetSWSContext(int width, int height, enu
return m_pSwsContext;
}
-HRESULT CLAVPixFmtConverter::swscale_scale(enum PixelFormat srcPix, enum PixelFormat dstPix, const uint8_t* const src[], const int srcStride[], BYTE *pOut, int width, int height, int stride, LAVOutPixFmtDesc pixFmtDesc, bool swapPlanes12)
+HRESULT CLAVPixFmtConverter::swscale_scale(enum AVPixelFormat srcPix, enum AVPixelFormat dstPix, const uint8_t* const src[], const int srcStride[], BYTE *pOut, int width, int height, int stride, LAVOutPixFmtDesc pixFmtDesc, bool swapPlanes12)
{
uint8_t *dst[4];
int dstStride[4];
diff --git a/decoder/LAVVideo/stdafx.h b/decoder/LAVVideo/stdafx.h
index 450dc649..e4f4221a 100644
--- a/decoder/LAVVideo/stdafx.h
+++ b/decoder/LAVVideo/stdafx.h
@@ -44,6 +44,7 @@
#pragma warning(disable:4244)
extern "C" {
#define __STDC_CONSTANT_MACROS
+#define FF_API_PIX_FMT 0
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"
#include "libavfilter/avfilter.h"
diff --git a/decoder/LAVVideo/subtitles/LAVSubtitleConsumer.cpp b/decoder/LAVVideo/subtitles/LAVSubtitleConsumer.cpp
index 62a27f8a..19326e58 100644
--- a/decoder/LAVVideo/subtitles/LAVSubtitleConsumer.cpp
+++ b/decoder/LAVVideo/subtitles/LAVSubtitleConsumer.cpp
@@ -207,7 +207,7 @@ STDMETHODIMP CLAVSubtitleConsumer::ProcessFrame(LAVFrame *pFrame)
static struct {
LAVPixelFormat pixfmt;
- PixelFormat ffpixfmt;
+ AVPixelFormat ffpixfmt;
} lav_ff_subtitle_pixfmt_map[] = {
{ LAVPixFmt_YUV420, AV_PIX_FMT_YUVA420P },
{ LAVPixFmt_YUV420bX, AV_PIX_FMT_YUVA420P },
@@ -229,7 +229,7 @@ static LAVPixFmtDesc ff_sub_pixfmt_desc[] = {
{ 4, 1, { 1 }, { 1 } }, ///< PIX_FMT_BGRA
};
-static LAVPixFmtDesc getFFSubPixelFormatDesc(PixelFormat pixFmt)
+static LAVPixFmtDesc getFFSubPixelFormatDesc(AVPixelFormat pixFmt)
{
int index = 0;
switch(pixFmt) {
@@ -251,9 +251,9 @@ static LAVPixFmtDesc getFFSubPixelFormatDesc(PixelFormat pixFmt)
return ff_sub_pixfmt_desc[index];
}
-static PixelFormat getFFPixFmtForSubtitle(LAVPixelFormat pixFmt)
+static AVPixelFormat getFFPixFmtForSubtitle(LAVPixelFormat pixFmt)
{
- PixelFormat fmt = AV_PIX_FMT_NONE;
+ AVPixelFormat fmt = AV_PIX_FMT_NONE;
for(int i = 0; i < countof(lav_ff_subtitle_pixfmt_map); i++) {
if (lav_ff_subtitle_pixfmt_map[i].pixfmt == pixFmt) {
return lav_ff_subtitle_pixfmt_map[i].ffpixfmt;
@@ -319,7 +319,7 @@ STDMETHODIMP CLAVSubtitleConsumer::ProcessSubtitleBitmap(LAVPixelFormat pixFmt,
// If we need scaling (either scaling or pixel conversion), do it here before starting the blend process
if (bNeedScaling) {
uint8_t *tmpBuf = NULL;
- const PixelFormat avPixFmt = getFFPixFmtForSubtitle(pixFmt);
+ const AVPixelFormat avPixFmt = getFFPixFmtForSubtitle(pixFmt);
// Calculate scaled size
// We must ensure that the scaled subs still fit into the video