Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mpc-hc/LAVFilters.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHendrik Leppkes <h.leppkes@gmail.com>2014-05-25 17:53:34 +0400
committerHendrik Leppkes <h.leppkes@gmail.com>2014-05-25 17:53:34 +0400
commit0c8c08104c3e0d92529a01701260254704740241 (patch)
tree5dbc06679285a79d6bd67ad5cace7c7e49823c11 /decoder
parent7a48a73fabc8d980cd0cda951951b751bb32ca72 (diff)
Convert frame stride to ptrdiff_t
Diffstat (limited to 'decoder')
-rw-r--r--decoder/LAVVideo/Filtering.cpp13
-rw-r--r--decoder/LAVVideo/LAVPixFmtConverter.cpp18
-rw-r--r--decoder/LAVVideo/LAVPixFmtConverter.h22
-rw-r--r--decoder/LAVVideo/Media.cpp12
-rw-r--r--decoder/LAVVideo/Media.h2
-rw-r--r--decoder/LAVVideo/decoders/ILAVDecoder.h4
-rw-r--r--decoder/LAVVideo/decoders/avcodec.cpp6
-rw-r--r--decoder/LAVVideo/decoders/pixfmt.cpp4
-rw-r--r--decoder/LAVVideo/decoders/wmv9.cpp2
-rw-r--r--decoder/LAVVideo/decoders/wmv9mft.cpp2
-rw-r--r--decoder/LAVVideo/pixconv/convert_generic.cpp103
-rw-r--r--decoder/LAVVideo/pixconv/pixconv_internal.h1
-rw-r--r--decoder/LAVVideo/pixconv/rgb2rgb_unscaled.cpp2
13 files changed, 105 insertions, 86 deletions
diff --git a/decoder/LAVVideo/Filtering.cpp b/decoder/LAVVideo/Filtering.cpp
index 3f651a53..84ecc10b 100644
--- a/decoder/LAVVideo/Filtering.cpp
+++ b/decoder/LAVVideo/Filtering.cpp
@@ -134,8 +134,10 @@ HRESULT CLAVVideo::Filter(LAVFrame *pFrame)
if (!bFlush) {
in_frame = av_frame_alloc();
- memcpy(in_frame->data, pFrame->data, sizeof(pFrame->data));
- memcpy(in_frame->linesize, pFrame->stride, sizeof(pFrame->stride));
+ for (int i = 0; i < 4; i++) {
+ in_frame->data[i] = pFrame->data[i];
+ in_frame->linesize[i] = (int)pFrame->stride[i];
+ }
in_frame->width = pFrame->width;
in_frame->height = pFrame->height;
@@ -199,8 +201,11 @@ HRESULT CLAVVideo::Filter(LAVFrame *pFrame)
REFERENCE_TIME pts = av_rescale(out_frame->pts, m_pFilterBufferSink->inputs[0]->time_base.num * 10000000LL, m_pFilterBufferSink->inputs[0]->time_base.den);
outFrame->rtStart = pts;
outFrame->rtStop = pts + rtDuration;
- memcpy(outFrame->data, out_frame->data, sizeof(outFrame->data));
- memcpy(outFrame->stride, out_frame->linesize, sizeof(outFrame->stride));
+
+ for (int i = 0; i < 4; i++) {
+ outFrame->data[i] = out_frame->data[i];
+ outFrame->stride[i] = out_frame->linesize[i];
+ }
outFrame->destruct = avfilter_free_lav_buffer;
outFrame->priv_data = av_frame_alloc();
diff --git a/decoder/LAVVideo/LAVPixFmtConverter.cpp b/decoder/LAVVideo/LAVPixFmtConverter.cpp
index feb4aa75..ef3534a0 100644
--- a/decoder/LAVVideo/LAVPixFmtConverter.cpp
+++ b/decoder/LAVVideo/LAVPixFmtConverter.cpp
@@ -400,9 +400,9 @@ void CLAVPixFmtConverter::SelectConvertFunction()
}
}
-HRESULT CLAVPixFmtConverter::Convert(LAVFrame *pFrame, uint8_t *dst, int width, int height, int dstStride, int planeHeight) {
+HRESULT CLAVPixFmtConverter::Convert(LAVFrame *pFrame, uint8_t *dst, int width, int height, ptrdiff_t dstStride, int planeHeight) {
uint8_t *out = dst;
- int outStride = dstStride, i;
+ ptrdiff_t outStride = dstStride, i;
planeHeight = max(height, planeHeight);
// Check if we have proper pixel alignment and the dst memory is actually aligned
if (m_RequiredAlignment && (FFALIGN(dstStride, m_RequiredAlignment) != dstStride || ((uintptr_t)dst % 16u))) {
@@ -439,16 +439,16 @@ HRESULT CLAVPixFmtConverter::Convert(LAVFrame *pFrame, uint8_t *dst, int width,
return hr;
}
-void CLAVPixFmtConverter::ChangeStride(const uint8_t* src, int srcStride, uint8_t *dst, int dstStride, int width, int height, int planeHeight, LAVOutPixFmts format)
+void CLAVPixFmtConverter::ChangeStride(const uint8_t* src, ptrdiff_t srcStride, uint8_t *dst, ptrdiff_t dstStride, int width, int height, int planeHeight, LAVOutPixFmts format)
{
LAVOutPixFmtDesc desc = lav_pixfmt_desc[format];
int line = 0;
// Copy first plane
- const int widthBytes = width * desc.codedbytes;
- const int srcStrideBytes = srcStride * desc.codedbytes;
- const int dstStrideBytes = dstStride * desc.codedbytes;
+ const size_t widthBytes = width * desc.codedbytes;
+ const ptrdiff_t srcStrideBytes = srcStride * desc.codedbytes;
+ const ptrdiff_t dstStrideBytes = dstStride * desc.codedbytes;
for (line = 0; line < height; ++line) {
memcpy(dst, src, widthBytes);
src += srcStrideBytes;
@@ -457,11 +457,11 @@ void CLAVPixFmtConverter::ChangeStride(const uint8_t* src, int srcStride, uint8_
dst += (planeHeight - height) * dstStrideBytes;
for (int plane = 1; plane < desc.planes; ++plane) {
- const int planeWidth = widthBytes / desc.planeWidth[plane];
+ const size_t planeWidth = widthBytes / desc.planeWidth[plane];
const int activePlaneHeight = height / desc.planeHeight[plane];
const int totalPlaneHeight = planeHeight / desc.planeHeight[plane];
- const int srcPlaneStride = srcStrideBytes / desc.planeWidth[plane];
- const int dstPlaneStride = dstStrideBytes / desc.planeWidth[plane];
+ const ptrdiff_t srcPlaneStride = srcStrideBytes / desc.planeWidth[plane];
+ const ptrdiff_t dstPlaneStride = dstStrideBytes / desc.planeWidth[plane];
for (line = 0; line < activePlaneHeight; ++line) {
memcpy(dst, src, planeWidth);
src += srcPlaneStride;
diff --git a/decoder/LAVVideo/LAVPixFmtConverter.h b/decoder/LAVVideo/LAVPixFmtConverter.h
index 10e7aa94..1eb37c1d 100644
--- a/decoder/LAVVideo/LAVPixFmtConverter.h
+++ b/decoder/LAVVideo/LAVPixFmtConverter.h
@@ -22,7 +22,7 @@
#include "LAVVideoSettings.h"
#include "decoders/ILAVDecoder.h"
-#define CONV_FUNC_PARAMS (const uint8_t* const src[4], const int srcStride[4], uint8_t* dst[4], const ptrdiff_t dstStride[4], int width, int height, LAVPixelFormat inputFormat, int bpp, LAVOutPixFmts outputFormat)
+#define CONV_FUNC_PARAMS (const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t* dst[4], const ptrdiff_t dstStride[4], int width, int height, LAVPixelFormat inputFormat, int bpp, LAVOutPixFmts outputFormat)
#define DECLARE_CONV_FUNC(name) \
HRESULT name CONV_FUNC_PARAMS
@@ -75,7 +75,7 @@ public:
void GetMediaType(CMediaType *mt, int index, LONG biWidth, LONG biHeight, DWORD dwAspectX, DWORD dwAspectY, REFERENCE_TIME rtAvgTime, BOOL bInterlaced = TRUE, BOOL bVIH1 = FALSE);
BOOL IsAllowedSubtype(const GUID *guid);
- HRESULT Convert(LAVFrame *pFrame, uint8_t *dst, int width, int height, int dstStride, int planeHeight);
+ HRESULT Convert(LAVFrame *pFrame, uint8_t *dst, int width, int height, ptrdiff_t dstStride, int planeHeight);
BOOL IsRGBConverterActive() { return m_bRGBConverter; }
@@ -92,19 +92,19 @@ private:
void SelectConvertFunction();
// Helper functions for convert_generic
- HRESULT swscale_scale(enum AVPixelFormat srcPix, enum AVPixelFormat dstPix, const uint8_t* const src[], const int srcStride[], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[], LAVOutPixFmtDesc pixFmtDesc, bool swapPlanes12 = false);
- HRESULT ConvertTo422Packed(const uint8_t* const src[4], const int srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4]);
- HRESULT ConvertToAYUV(const uint8_t* const src[4], const int srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4]);
- HRESULT ConvertToPX1X(const uint8_t* const src[4], const int srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4], int chromaVertical);
- HRESULT ConvertToY410(const uint8_t* const src[4], const int srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4]);
- HRESULT ConvertToY416(const uint8_t* const src[4], const int srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4]);
- HRESULT ConvertTov210(const uint8_t* const src[4], const int srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4]);
- HRESULT ConvertTov410(const uint8_t* const src[4], const int srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4]);
+ HRESULT swscale_scale(enum AVPixelFormat srcPix, enum AVPixelFormat dstPix, const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t* dst[4], int width, int height, const ptrdiff_t dstStride[4], LAVOutPixFmtDesc pixFmtDesc, bool swapPlanes12 = false);
+ HRESULT ConvertTo422Packed(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4]);
+ HRESULT ConvertToAYUV(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4]);
+ HRESULT ConvertToPX1X(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4], int chromaVertical);
+ HRESULT ConvertToY410(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4]);
+ HRESULT ConvertToY416(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4]);
+ HRESULT ConvertTov210(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4]);
+ HRESULT ConvertTov410(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t *dst[4], int width, int height, const ptrdiff_t dstStride[4]);
void DestroySWScale() { if (m_pSwsContext) sws_freeContext(m_pSwsContext); m_pSwsContext = nullptr; if (m_rgbCoeffs) _aligned_free(m_rgbCoeffs); m_rgbCoeffs = nullptr; if (m_pRandomDithers) _aligned_free(m_pRandomDithers); m_pRandomDithers = nullptr; };
SwsContext *GetSWSContext(int width, int height, enum AVPixelFormat srcPix, enum AVPixelFormat dstPix, int flags);
- void ChangeStride(const uint8_t* src, int srcStride, uint8_t *dst, int dstStride, int width, int height, int planeHeight, LAVOutPixFmts format);
+ void ChangeStride(const uint8_t* src, ptrdiff_t srcStride, uint8_t *dst, ptrdiff_t dstStride, int width, int height, int planeHeight, LAVOutPixFmts format);
typedef HRESULT (CLAVPixFmtConverter::*ConverterFn) CONV_FUNC_PARAMS;
diff --git a/decoder/LAVVideo/Media.cpp b/decoder/LAVVideo/Media.cpp
index 32bb4cfb..ac432e0d 100644
--- a/decoder/LAVVideo/Media.cpp
+++ b/decoder/LAVVideo/Media.cpp
@@ -743,3 +743,15 @@ const uint8_t* CheckForEndOfSequence(AVCodecID codec, const uint8_t *buf, long l
}
return nullptr;
}
+
+int sws_scale2(struct SwsContext *c, const uint8_t *const srcSlice[], const ptrdiff_t srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const ptrdiff_t dstStride[])
+{
+ int srcStride2[4];
+ int dstStride2[4];
+
+ for (int i = 0; i < 4; i++) {
+ srcStride2[i] = (int)srcStride[i];
+ dstStride2[i] = (int)dstStride[i];
+ }
+ return sws_scale(c, srcSlice, srcStride2, srcSliceY, srcSliceH, dst, dstStride2);
+}
diff --git a/decoder/LAVVideo/Media.h b/decoder/LAVVideo/Media.h
index 2f64b42e..bcd94da1 100644
--- a/decoder/LAVVideo/Media.h
+++ b/decoder/LAVVideo/Media.h
@@ -35,3 +35,5 @@ const codec_config_t *get_codec_config(LAVVideoCodec codec);
int flip_plane(BYTE *buffer, int stride, int height);
void fillDXVAExtFormat(DXVA2_ExtendedFormat &fmt, int range, int primaries, int matrix, int transfer);
const uint8_t* CheckForEndOfSequence(AVCodecID codec, const uint8_t *buf, long len, uint32_t *state);
+
+int sws_scale2(struct SwsContext *c, const uint8_t *const srcSlice[], const ptrdiff_t srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const ptrdiff_t dstStride[]);
diff --git a/decoder/LAVVideo/decoders/ILAVDecoder.h b/decoder/LAVVideo/decoders/ILAVDecoder.h
index 05bc2b1d..26a82de0 100644
--- a/decoder/LAVVideo/decoders/ILAVDecoder.h
+++ b/decoder/LAVVideo/decoders/ILAVDecoder.h
@@ -94,7 +94,7 @@ typedef struct LAVFrame {
int height; ///< height of the frame (in pixel)
BYTE *data[4]; ///< pointer to the picture planes
- int stride[4]; ///< stride of the planes (in bytes)
+ ptrdiff_t stride[4]; ///< stride of the planes (in bytes)
LAVPixelFormat format; ///< pixel format of the frame
int bpp; ///< bits per pixel, only meaningful for YUV420bX, YUV422bX or YUV444bX
@@ -134,7 +134,7 @@ typedef struct LAVFrame {
* @param stride stride to use (in pixel). If 0, a stride will be computed to fill usual alignment rules
* @return HRESULT
*/
-HRESULT AllocLAVFrameBuffers(LAVFrame *pFrame, int stride = 0);
+HRESULT AllocLAVFrameBuffers(LAVFrame *pFrame, ptrdiff_t stride = 0);
/**
* Destruct a LAV Frame, freeing its data pointers
diff --git a/decoder/LAVVideo/decoders/avcodec.cpp b/decoder/LAVVideo/decoders/avcodec.cpp
index b6071e86..4d41499d 100644
--- a/decoder/LAVVideo/decoders/avcodec.cpp
+++ b/decoder/LAVVideo/decoders/avcodec.cpp
@@ -998,8 +998,12 @@ STDMETHODIMP CDecAvcodec::ConvertPixFmt(AVFrame *pFrame, LAVFrame *pOutFrame)
// Get a context
m_pSwsContext = sws_getCachedContext(m_pSwsContext, pFrame->width, pFrame->height, (AVPixelFormat)pFrame->format, pFrame->width, pFrame->height, dstFormat, SWS_BILINEAR | SWS_PRINT_INFO, nullptr, nullptr, nullptr);
+ ptrdiff_t linesize[4];
+ for (int i = 0; i < 4; i++)
+ linesize[i] = pFrame->linesize[i];
+
// Perform conversion
- sws_scale(m_pSwsContext, pFrame->data, pFrame->linesize, 0, pFrame->height, pOutFrame->data, pOutFrame->stride);
+ sws_scale2(m_pSwsContext, pFrame->data, linesize, 0, pFrame->height, pOutFrame->data, pOutFrame->stride);
return S_OK;
}
diff --git a/decoder/LAVVideo/decoders/pixfmt.cpp b/decoder/LAVVideo/decoders/pixfmt.cpp
index 79b47359..3a0670e6 100644
--- a/decoder/LAVVideo/decoders/pixfmt.cpp
+++ b/decoder/LAVVideo/decoders/pixfmt.cpp
@@ -91,7 +91,7 @@ static void free_buffers(struct LAVFrame *pFrame)
memset(pFrame->data, 0, sizeof(pFrame->data));
}
-HRESULT AllocLAVFrameBuffers(LAVFrame *pFrame, int stride)
+HRESULT AllocLAVFrameBuffers(LAVFrame *pFrame, ptrdiff_t stride)
{
LAVPixFmtDesc desc = getPixelFormatDesc(pFrame->format);
@@ -105,7 +105,7 @@ HRESULT AllocLAVFrameBuffers(LAVFrame *pFrame, int stride)
memset(pFrame->data, 0, sizeof(pFrame->data));
memset(pFrame->stride, 0, sizeof(pFrame->stride));
for (int plane = 0; plane < desc.planes; plane++) {
- int planeStride = stride / desc.planeWidth[plane];
+ ptrdiff_t planeStride = stride / desc.planeWidth[plane];
size_t size = planeStride * (pFrame->height / desc.planeHeight[plane]);
pFrame->data[plane] = (BYTE *)_aligned_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE, 64);
pFrame->stride[plane] = planeStride;
diff --git a/decoder/LAVVideo/decoders/wmv9.cpp b/decoder/LAVVideo/decoders/wmv9.cpp
index f2e0c952..4b6cc52b 100644
--- a/decoder/LAVVideo/decoders/wmv9.cpp
+++ b/decoder/LAVVideo/decoders/wmv9.cpp
@@ -441,7 +441,7 @@ STDMETHODIMP CDecWMV9::Decode(const BYTE *buffer, int buflen, REFERENCE_TIME rtS
return ProcessOutput();
}
-static void memcpy_plane(BYTE *dst, const BYTE *src, int width, int stride, int height)
+static void memcpy_plane(BYTE *dst, const BYTE *src, ptrdiff_t width, ptrdiff_t stride, int height)
{
for (int i = 0; i < height; i++) {
memcpy(dst, src, width);
diff --git a/decoder/LAVVideo/decoders/wmv9mft.cpp b/decoder/LAVVideo/decoders/wmv9mft.cpp
index 39688eb8..2e4475c9 100644
--- a/decoder/LAVVideo/decoders/wmv9mft.cpp
+++ b/decoder/LAVVideo/decoders/wmv9mft.cpp
@@ -362,7 +362,7 @@ STDMETHODIMP CDecWMV9MFT::Decode(const BYTE *buffer, int buflen, REFERENCE_TIME
return ProcessOutput();
}
-static inline void memcpy_plane(BYTE *dst, const BYTE *src, int width, int stride, int height)
+static inline void memcpy_plane(BYTE *dst, const BYTE *src, ptrdiff_t width, ptrdiff_t stride, int height)
{
for (int i = 0; i < height; i++) {
memcpy(dst, src, width);
diff --git a/decoder/LAVVideo/pixconv/convert_generic.cpp b/decoder/LAVVideo/pixconv/convert_generic.cpp
index 103f0c75..e2b7d648 100644
--- a/decoder/LAVVideo/pixconv/convert_generic.cpp
+++ b/decoder/LAVVideo/pixconv/convert_generic.cpp
@@ -137,7 +137,7 @@ inline SwsContext *CLAVPixFmtConverter::GetSWSContext(int width, int height, enu
return m_pSwsContext;
}
-HRESULT CLAVPixFmtConverter::swscale_scale(enum AVPixelFormat srcPix, enum AVPixelFormat dstPix, const uint8_t* const src[], const int srcStride[], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[], LAVOutPixFmtDesc pixFmtDesc, bool swapPlanes12)
+HRESULT CLAVPixFmtConverter::swscale_scale(enum AVPixelFormat srcPix, enum AVPixelFormat dstPix, const uint8_t* const src[], const ptrdiff_t srcStride[], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[], LAVOutPixFmtDesc pixFmtDesc, bool swapPlanes12)
{
int ret;
@@ -150,29 +150,24 @@ HRESULT CLAVPixFmtConverter::swscale_scale(enum AVPixelFormat srcPix, enum AVPix
dst[2] = tmp;
}
- // sws needs int stride, not ptrdiff
- int stride[4];
- for (int i = 0; i < 4; i++)
- stride[i] = (int)dstStride[i];
-
- ret = sws_scale(ctx, src, srcStride, 0, height, dst, stride);
+ ret = sws_scale2(ctx, src, srcStride, 0, height, dst, dstStride);
return S_OK;
}
-HRESULT CLAVPixFmtConverter::ConvertTo422Packed(const uint8_t* const src[4], const int srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[])
+HRESULT CLAVPixFmtConverter::ConvertTo422Packed(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[])
{
const BYTE *y = nullptr;
const BYTE *u = nullptr;
const BYTE *v = nullptr;
- int line, i;
- int sourceStride = 0;
+ ptrdiff_t line, i;
+ ptrdiff_t sourceStride = 0;
BYTE *pTmpBuffer = nullptr;
if (m_InputPixFmt != LAVPixFmt_YUV422) {
- uint8_t *tmp[4] = {nullptr};
- int tmpStride[4] = {0};
- int scaleStride = FFALIGN(width, 32);
+ uint8_t *tmp[4] = {nullptr};
+ ptrdiff_t tmpStride[4] = {0};
+ ptrdiff_t scaleStride = FFALIGN(width, 32);
pTmpBuffer = (BYTE *)av_malloc(height * scaleStride * 2);
@@ -187,7 +182,7 @@ HRESULT CLAVPixFmtConverter::ConvertTo422Packed(const uint8_t* const src[4], con
tmpStride[3] = 0;
SwsContext *ctx = GetSWSContext(width, height, GetFFInput(), AV_PIX_FMT_YUV422P, SWS_BILINEAR);
- sws_scale(ctx, src, srcStride, 0, height, tmp, tmpStride);
+ sws_scale2(ctx, src, srcStride, 0, height, tmp, tmpStride);
y = tmp[0];
u = tmp[1];
@@ -205,7 +200,7 @@ HRESULT CLAVPixFmtConverter::ConvertTo422Packed(const uint8_t* const src[4], con
uint8_t *out = dst[0];
int halfwidth = width >> 1;
- int halfstride = sourceStride >> 1;
+ ptrdiff_t halfstride = sourceStride >> 1;
if (m_OutputPixFmt == LAVOutPixFmt_YUY2) {
for (line = 0; line < height; ++line) {
@@ -256,19 +251,19 @@ HRESULT CLAVPixFmtConverter::ConvertTo422Packed(const uint8_t* const src[4], con
return S_OK;
}
-HRESULT CLAVPixFmtConverter::ConvertToAYUV(const uint8_t* const src[4], const int srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[])
+HRESULT CLAVPixFmtConverter::ConvertToAYUV(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[])
{
const BYTE *y = nullptr;
const BYTE *u = nullptr;
const BYTE *v = nullptr;
- int line, i = 0;
- int sourceStride = 0;
+ ptrdiff_t line, i = 0;
+ ptrdiff_t sourceStride = 0;
BYTE *pTmpBuffer = nullptr;
if (m_InputPixFmt != LAVPixFmt_YUV444) {
- uint8_t *tmp[4] = {nullptr};
- int tmpStride[4] = {0};
- int scaleStride = FFALIGN(width, 32);
+ uint8_t *tmp[4] = {nullptr};
+ ptrdiff_t tmpStride[4] = {0};
+ ptrdiff_t scaleStride = FFALIGN(width, 32);
pTmpBuffer = (BYTE *)av_malloc(height * scaleStride * 3);
@@ -282,7 +277,7 @@ HRESULT CLAVPixFmtConverter::ConvertToAYUV(const uint8_t* const src[4], const in
tmpStride[3] = 0;
SwsContext *ctx = GetSWSContext(width, height, GetFFInput(), AV_PIX_FMT_YUV444P, SWS_BILINEAR);
- sws_scale(ctx, src, srcStride, 0, height, tmp, tmpStride);
+ sws_scale2(ctx, src, srcStride, 0, height, tmp, tmpStride);
y = tmp[0];
u = tmp[1];
@@ -324,22 +319,22 @@ HRESULT CLAVPixFmtConverter::ConvertToAYUV(const uint8_t* const src[4], const in
return S_OK;
}
-HRESULT CLAVPixFmtConverter::ConvertToPX1X(const uint8_t* const src[4], const int srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[], int chromaVertical)
+HRESULT CLAVPixFmtConverter::ConvertToPX1X(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[], int chromaVertical)
{
const BYTE *y = nullptr;
const BYTE *u = nullptr;
const BYTE *v = nullptr;
- int line, i = 0;
- int sourceStride = 0;
+ ptrdiff_t line, i = 0;
+ ptrdiff_t sourceStride = 0;
int shift = 0;
BYTE *pTmpBuffer = nullptr;
if ((chromaVertical == 1 && m_InputPixFmt != LAVPixFmt_YUV422bX) || (chromaVertical == 2 && m_InputPixFmt != LAVPixFmt_YUV420bX)) {
- uint8_t *tmp[4] = {nullptr};
- int tmpStride[4] = {0};
- int scaleStride = FFALIGN(width, 32) * 2;
+ uint8_t *tmp[4] = {nullptr};
+ ptrdiff_t tmpStride[4] = {0};
+ ptrdiff_t scaleStride = FFALIGN(width, 32) * 2;
pTmpBuffer = (BYTE *)av_malloc(height * scaleStride * 2);
@@ -353,7 +348,7 @@ HRESULT CLAVPixFmtConverter::ConvertToPX1X(const uint8_t* const src[4], const in
tmpStride[3] = 0;
SwsContext *ctx = GetSWSContext(width, height, GetFFInput(), chromaVertical == 1 ? AV_PIX_FMT_YUV422P16LE : AV_PIX_FMT_YUV420P16LE, SWS_BILINEAR);
- sws_scale(ctx, src, srcStride, 0, height, tmp, tmpStride);
+ sws_scale2(ctx, src, srcStride, 0, height, tmp, tmpStride);
y = tmp[0];
u = tmp[1];
@@ -432,20 +427,20 @@ HRESULT CLAVPixFmtConverter::ConvertToPX1X(const uint8_t* const src[4], const in
out += dstStride; \
}
-HRESULT CLAVPixFmtConverter::ConvertToY410(const uint8_t* const src[4], const int srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[])
+HRESULT CLAVPixFmtConverter::ConvertToY410(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[])
{
const int16_t *y = nullptr;
const int16_t *u = nullptr;
const int16_t *v = nullptr;
- int sourceStride = 0;
+ ptrdiff_t sourceStride = 0;
bool b9Bit = false;
BYTE *pTmpBuffer = nullptr;
if (m_InputPixFmt != LAVPixFmt_YUV444bX || m_InBpp > 10) {
- uint8_t *tmp[4] = {nullptr};
- int tmpStride[4] = {0};
- int scaleStride = FFALIGN(width, 32);
+ uint8_t *tmp[4] = {nullptr};
+ ptrdiff_t tmpStride[4] = {0};
+ ptrdiff_t scaleStride = FFALIGN(width, 32);
pTmpBuffer = (BYTE *)av_malloc(height * scaleStride * 6);
@@ -459,7 +454,7 @@ HRESULT CLAVPixFmtConverter::ConvertToY410(const uint8_t* const src[4], const in
tmpStride[3] = 0;
SwsContext *ctx = GetSWSContext(width, height, GetFFInput(), AV_PIX_FMT_YUV444P10LE, SWS_BILINEAR);
- sws_scale(ctx, src, srcStride, 0, height, tmp, tmpStride);
+ sws_scale2(ctx, src, srcStride, 0, height, tmp, tmpStride);
y = (int16_t *)tmp[0];
u = (int16_t *)tmp[1];
@@ -492,19 +487,19 @@ HRESULT CLAVPixFmtConverter::ConvertToY410(const uint8_t* const src[4], const in
return S_OK;
}
-HRESULT CLAVPixFmtConverter::ConvertToY416(const uint8_t* const src[4], const int srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[])
+HRESULT CLAVPixFmtConverter::ConvertToY416(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[])
{
const int16_t *y = nullptr;
const int16_t *u = nullptr;
const int16_t *v = nullptr;
- int sourceStride = 0;
+ ptrdiff_t sourceStride = 0;
BYTE *pTmpBuffer = nullptr;
if (m_InputPixFmt != LAVPixFmt_YUV444bX || m_InBpp != 16) {
- uint8_t *tmp[4] = {nullptr};
- int tmpStride[4] = {0};
- int scaleStride = FFALIGN(width, 32);
+ uint8_t *tmp[4] = {nullptr};
+ ptrdiff_t tmpStride[4] = {0};
+ ptrdiff_t scaleStride = FFALIGN(width, 32);
pTmpBuffer = (BYTE *)av_malloc(height * scaleStride * 6);
@@ -518,7 +513,7 @@ HRESULT CLAVPixFmtConverter::ConvertToY416(const uint8_t* const src[4], const in
tmpStride[3] = 0;
SwsContext *ctx = GetSWSContext(width, height, GetFFInput(), AV_PIX_FMT_YUV444P16LE, SWS_BILINEAR);
- sws_scale(ctx, src, srcStride, 0, height, tmp, tmpStride);
+ sws_scale2(ctx, src, srcStride, 0, height, tmp, tmpStride);
y = (int16_t *)tmp[0];
u = (int16_t *)tmp[1];
@@ -545,20 +540,20 @@ HRESULT CLAVPixFmtConverter::ConvertToY416(const uint8_t* const src[4], const in
return S_OK;
}
-HRESULT CLAVPixFmtConverter::ConvertTov210(const uint8_t* const src[4], const int srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[])
+HRESULT CLAVPixFmtConverter::ConvertTov210(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[])
{
const int16_t *y = nullptr;
const int16_t *u = nullptr;
const int16_t *v = nullptr;
- int srcyStride = 0;
- int srcuvStride = 0;
+ ptrdiff_t srcyStride = 0;
+ ptrdiff_t srcuvStride = 0;
BYTE *pTmpBuffer = nullptr;
if (m_InputPixFmt != LAVPixFmt_YUV422bX || m_InBpp != 10) {
- uint8_t *tmp[4] = {nullptr};
- int tmpStride[4] = {0};
- int scaleStride = FFALIGN(width, 32);
+ uint8_t *tmp[4] = {nullptr};
+ ptrdiff_t tmpStride[4] = {0};
+ ptrdiff_t scaleStride = FFALIGN(width, 32);
pTmpBuffer = (BYTE *)av_malloc(height * scaleStride * 6);
@@ -572,7 +567,7 @@ HRESULT CLAVPixFmtConverter::ConvertTov210(const uint8_t* const src[4], const in
tmpStride[3] = 0;
SwsContext *ctx = GetSWSContext(width, height, GetFFInput(), AV_PIX_FMT_YUV422P10LE, SWS_BILINEAR);
- sws_scale(ctx, src, srcStride, 0, height, tmp, tmpStride);
+ sws_scale2(ctx, src, srcStride, 0, height, tmp, tmpStride);
y = (int16_t *)tmp[0];
u = (int16_t *)tmp[1];
@@ -643,20 +638,20 @@ HRESULT CLAVPixFmtConverter::ConvertTov210(const uint8_t* const src[4], const in
return S_OK;
}
-HRESULT CLAVPixFmtConverter::ConvertTov410(const uint8_t* const src[4], const int srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[])
+HRESULT CLAVPixFmtConverter::ConvertTov410(const uint8_t* const src[4], const ptrdiff_t srcStride[4], uint8_t* dst[], int width, int height, const ptrdiff_t dstStride[])
{
const int16_t *y = nullptr;
const int16_t *u = nullptr;
const int16_t *v = nullptr;
- int sourceStride = 0;
+ ptrdiff_t sourceStride = 0;
bool b9Bit = false;
BYTE *pTmpBuffer = nullptr;
if (m_InputPixFmt != LAVPixFmt_YUV444bX || m_InBpp > 10) {
- uint8_t *tmp[4] = {nullptr};
- int tmpStride[4] = {0};
- int scaleStride = FFALIGN(width, 32);
+ uint8_t *tmp[4] = {nullptr};
+ ptrdiff_t tmpStride[4] = {0};
+ ptrdiff_t scaleStride = FFALIGN(width, 32);
pTmpBuffer = (BYTE *)av_malloc(height * scaleStride * 6);
@@ -670,7 +665,7 @@ HRESULT CLAVPixFmtConverter::ConvertTov410(const uint8_t* const src[4], const in
tmpStride[3] = 0;
SwsContext *ctx = GetSWSContext(width, height, GetFFInput(), AV_PIX_FMT_YUV444P10LE, SWS_BILINEAR);
- sws_scale(ctx, src, srcStride, 0, height, tmp, tmpStride);
+ sws_scale2(ctx, src, srcStride, 0, height, tmp, tmpStride);
y = (int16_t *)tmp[0];
u = (int16_t *)tmp[1];
diff --git a/decoder/LAVVideo/pixconv/pixconv_internal.h b/decoder/LAVVideo/pixconv/pixconv_internal.h
index ef0feee0..68ef2fc6 100644
--- a/decoder/LAVVideo/pixconv/pixconv_internal.h
+++ b/decoder/LAVVideo/pixconv/pixconv_internal.h
@@ -22,5 +22,6 @@
#include "LAVPixFmtConverter.h"
#include <emmintrin.h>
#include "timer.h"
+#include "Media.h"
extern DECLARE_ALIGNED(16, const uint16_t, dither_8x8_256)[8][8];
diff --git a/decoder/LAVVideo/pixconv/rgb2rgb_unscaled.cpp b/decoder/LAVVideo/pixconv/rgb2rgb_unscaled.cpp
index 23ebbbaa..8dc9c77a 100644
--- a/decoder/LAVVideo/pixconv/rgb2rgb_unscaled.cpp
+++ b/decoder/LAVVideo/pixconv/rgb2rgb_unscaled.cpp
@@ -91,7 +91,7 @@ DECLARE_CONV_FUNC_IMPL(convert_rgb48_rgb)
dstBS[0] = (BYTE *)av_malloc(height * srcStride[0]);
SwsContext *ctx = GetSWSContext(width, height, GetFFInput(), AV_PIX_FMT_BGR48LE, SWS_POINT);
- sws_scale(ctx, src, srcStride, 0, height, dstBS, srcStride);
+ sws_scale2(ctx, src, srcStride, 0, height, dstBS, srcStride);
// Dither to RGB24/32 with SSE2
const uint16_t *rgb = (const uint16_t *)dstBS[0];