Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--intern/ffmpeg/ffmpeg_compat.h523
-rw-r--r--source/blender/blenkernel/CMakeLists.txt4
-rw-r--r--source/blender/blenkernel/intern/writeffmpeg.c346
-rw-r--r--source/blender/imbuf/CMakeLists.txt6
-rw-r--r--source/blender/imbuf/intern/IMB_anim.h2
-rw-r--r--source/blender/imbuf/intern/anim_movie.c161
-rw-r--r--source/blender/imbuf/intern/indexer.c241
-rw-r--r--source/blender/imbuf/intern/util.c16
8 files changed, 400 insertions, 899 deletions
diff --git a/intern/ffmpeg/ffmpeg_compat.h b/intern/ffmpeg/ffmpeg_compat.h
index 727fd4b9601..1b1e2fad594 100644
--- a/intern/ffmpeg/ffmpeg_compat.h
+++ b/intern/ffmpeg/ffmpeg_compat.h
@@ -22,10 +22,17 @@
#include <libavformat/avformat.h>
-/* check our ffmpeg is new enough, avoids user complaints */
-#if (LIBAVFORMAT_VERSION_MAJOR < 52) || \
- ((LIBAVFORMAT_VERSION_MAJOR == 52) && (LIBAVFORMAT_VERSION_MINOR <= 64))
-# error "FFmpeg 0.7 or newer is needed, Upgrade your FFmpeg or disable it"
+/* Check if our ffmpeg is new enough, avoids user complaints.
+ * Minimum supported version is currently 3.2.0 which mean the following library versions:
+ * libavutil > 55.30
+ * libavcodec > 57.60
+ * libavformat > 57.50
+ *
+ * We only check for one of these as they are usually updated in tandem.
+ */
+#if (LIBAVFORMAT_VERSION_MAJOR < 57) || \
+ ((LIBAVFORMAT_VERSION_MAJOR == 57) && (LIBAVFORMAT_VERSION_MINOR <= 50))
+# error "FFmpeg 3.2.0 or newer is needed, Upgrade your FFmpeg or disable it"
#endif
/* end sanity check */
@@ -36,274 +43,6 @@
# define FFMPEG_INLINE static inline
#endif
-#include <libavcodec/avcodec.h>
-#include <libavutil/mathematics.h>
-#include <libavutil/opt.h>
-#include <libavutil/rational.h>
-
-#if (LIBAVFORMAT_VERSION_MAJOR > 52) || \
- ((LIBAVFORMAT_VERSION_MAJOR >= 52) && (LIBAVFORMAT_VERSION_MINOR >= 101))
-# define FFMPEG_HAVE_PARSE_UTILS 1
-# include <libavutil/parseutils.h>
-#endif
-
-#include <libswscale/swscale.h>
-
-#if (LIBAVFORMAT_VERSION_MAJOR > 52) || \
- ((LIBAVFORMAT_VERSION_MAJOR >= 52) && (LIBAVFORMAT_VERSION_MINOR >= 105))
-# define FFMPEG_HAVE_AVIO 1
-#endif
-
-#if (LIBAVCODEC_VERSION_MAJOR > 53) || \
- ((LIBAVCODEC_VERSION_MAJOR == 53) && (LIBAVCODEC_VERSION_MINOR > 1)) || \
- ((LIBAVCODEC_VERSION_MAJOR == 53) && (LIBAVCODEC_VERSION_MINOR == 1) && \
- (LIBAVCODEC_VERSION_MICRO >= 1)) || \
- ((LIBAVCODEC_VERSION_MAJOR == 52) && (LIBAVCODEC_VERSION_MINOR >= 121))
-# define FFMPEG_HAVE_DEFAULT_VAL_UNION 1
-#endif
-
-#if (LIBAVFORMAT_VERSION_MAJOR > 52) || \
- ((LIBAVFORMAT_VERSION_MAJOR >= 52) && (LIBAVFORMAT_VERSION_MINOR >= 101))
-# define FFMPEG_HAVE_AV_DUMP_FORMAT 1
-#endif
-
-#if (LIBAVFORMAT_VERSION_MAJOR > 52) || \
- ((LIBAVFORMAT_VERSION_MAJOR >= 52) && (LIBAVFORMAT_VERSION_MINOR >= 45))
-# define FFMPEG_HAVE_AV_GUESS_FORMAT 1
-#endif
-
-#if (LIBAVCODEC_VERSION_MAJOR > 52) || \
- ((LIBAVCODEC_VERSION_MAJOR >= 52) && (LIBAVCODEC_VERSION_MINOR >= 23))
-# define FFMPEG_HAVE_DECODE_AUDIO3 1
-# define FFMPEG_HAVE_DECODE_VIDEO2 1
-#endif
-
-#if (LIBAVCODEC_VERSION_MAJOR > 52) || \
- ((LIBAVCODEC_VERSION_MAJOR >= 52) && (LIBAVCODEC_VERSION_MINOR >= 64))
-# define FFMPEG_HAVE_AVMEDIA_TYPES 1
-#endif
-
-#if ((LIBAVCODEC_VERSION_MAJOR > 52) || \
- (LIBAVCODEC_VERSION_MAJOR >= 52) && (LIBAVCODEC_VERSION_MINOR >= 29)) && \
- ((LIBSWSCALE_VERSION_MAJOR > 0) || \
- (LIBSWSCALE_VERSION_MAJOR >= 0) && (LIBSWSCALE_VERSION_MINOR >= 10))
-# define FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
-#endif
-
-#if ((LIBAVCODEC_VERSION_MAJOR > 54) || \
- (LIBAVCODEC_VERSION_MAJOR >= 54) && (LIBAVCODEC_VERSION_MINOR > 14))
-# define FFMPEG_HAVE_CANON_H264_RESOLUTION_FIX
-#endif
-
-#if ((LIBAVCODEC_VERSION_MAJOR > 53) || \
- (LIBAVCODEC_VERSION_MAJOR >= 53) && (LIBAVCODEC_VERSION_MINOR >= 60))
-# define FFMPEG_HAVE_ENCODE_AUDIO2
-#endif
-
-#if ((LIBAVCODEC_VERSION_MAJOR > 53) || \
- (LIBAVCODEC_VERSION_MAJOR >= 53) && (LIBAVCODEC_VERSION_MINOR >= 42))
-# define FFMPEG_HAVE_DECODE_AUDIO4
-#endif
-
-#if ((LIBAVCODEC_VERSION_MAJOR > 54) || \
- (LIBAVCODEC_VERSION_MAJOR >= 54) && (LIBAVCODEC_VERSION_MINOR >= 13))
-# define FFMPEG_HAVE_AVFRAME_SAMPLE_RATE
-#endif
-
-#if ((LIBAVUTIL_VERSION_MAJOR > 51) || \
- (LIBAVUTIL_VERSION_MAJOR == 51) && (LIBAVUTIL_VERSION_MINOR >= 21))
-# define FFMPEG_FFV1_ALPHA_SUPPORTED
-# define FFMPEG_SAMPLE_FMT_S16P_SUPPORTED
-#else
-
-FFMPEG_INLINE
-int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
-{
- /* no planar formats in FFmpeg < 0.9 */
- (void)sample_fmt;
- return 0;
-}
-
-#endif
-
-/* XXX TODO Probably fix to correct modern flags in code? Not sure how old FFMPEG we want to
- * support though, so for now this will do. */
-
-#ifndef FF_MIN_BUFFER_SIZE
-# ifdef AV_INPUT_BUFFER_MIN_SIZE
-# define FF_MIN_BUFFER_SIZE AV_INPUT_BUFFER_MIN_SIZE
-# endif
-#endif
-
-#ifndef FF_INPUT_BUFFER_PADDING_SIZE
-# ifdef AV_INPUT_BUFFER_PADDING_SIZE
-# define FF_INPUT_BUFFER_PADDING_SIZE AV_INPUT_BUFFER_PADDING_SIZE
-# endif
-#endif
-
-#ifndef CODEC_FLAG_GLOBAL_HEADER
-# ifdef AV_CODEC_FLAG_GLOBAL_HEADER
-# define CODEC_FLAG_GLOBAL_HEADER AV_CODEC_FLAG_GLOBAL_HEADER
-# endif
-#endif
-
-#ifndef CODEC_FLAG_GLOBAL_HEADER
-# ifdef AV_CODEC_FLAG_GLOBAL_HEADER
-# define CODEC_FLAG_GLOBAL_HEADER AV_CODEC_FLAG_GLOBAL_HEADER
-# endif
-#endif
-
-#ifndef CODEC_FLAG_INTERLACED_DCT
-# ifdef AV_CODEC_FLAG_INTERLACED_DCT
-# define CODEC_FLAG_INTERLACED_DCT AV_CODEC_FLAG_INTERLACED_DCT
-# endif
-#endif
-
-#ifndef CODEC_FLAG_INTERLACED_ME
-# ifdef AV_CODEC_FLAG_INTERLACED_ME
-# define CODEC_FLAG_INTERLACED_ME AV_CODEC_FLAG_INTERLACED_ME
-# endif
-#endif
-
-/* FFmpeg upstream 1.0 is the first who added AV_ prefix. */
-#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54, 59, 100)
-# define AV_CODEC_ID_NONE CODEC_ID_NONE
-# define AV_CODEC_ID_MPEG4 CODEC_ID_MPEG4
-# define AV_CODEC_ID_MJPEG CODEC_ID_MJPEG
-# define AV_CODEC_ID_DNXHD CODEC_ID_DNXHD
-# define AV_CODEC_ID_MPEG2VIDEO CODEC_ID_MPEG2VIDEO
-# define AV_CODEC_ID_MPEG1VIDEO CODEC_ID_MPEG1VIDEO
-# define AV_CODEC_ID_DVVIDEO CODEC_ID_DVVIDEO
-# define AV_CODEC_ID_THEORA CODEC_ID_THEORA
-# define AV_CODEC_ID_PNG CODEC_ID_PNG
-# define AV_CODEC_ID_QTRLE CODEC_ID_QTRLE
-# define AV_CODEC_ID_FFV1 CODEC_ID_FFV1
-# define AV_CODEC_ID_HUFFYUV CODEC_ID_HUFFYUV
-# define AV_CODEC_ID_H264 CODEC_ID_H264
-# define AV_CODEC_ID_FLV1 CODEC_ID_FLV1
-
-# define AV_CODEC_ID_AAC CODEC_ID_AAC
-# define AV_CODEC_ID_AC3 CODEC_ID_AC3
-# define AV_CODEC_ID_MP3 CODEC_ID_MP3
-# define AV_CODEC_ID_MP2 CODEC_ID_MP2
-# define AV_CODEC_ID_FLAC CODEC_ID_FLAC
-# define AV_CODEC_ID_PCM_U8 CODEC_ID_PCM_U8
-# define AV_CODEC_ID_PCM_S16LE CODEC_ID_PCM_S16LE
-# define AV_CODEC_ID_PCM_S24LE CODEC_ID_PCM_S24LE
-# define AV_CODEC_ID_PCM_S32LE CODEC_ID_PCM_S32LE
-# define AV_CODEC_ID_PCM_F32LE CODEC_ID_PCM_F32LE
-# define AV_CODEC_ID_PCM_F64LE CODEC_ID_PCM_F64LE
-# define AV_CODEC_ID_VORBIS CODEC_ID_VORBIS
-#endif
-
-FFMPEG_INLINE
-int av_get_cropped_height_from_codec(AVCodecContext *pCodecCtx)
-{
- int y = pCodecCtx->height;
-
-#ifndef FFMPEG_HAVE_CANON_H264_RESOLUTION_FIX
- /* really bad hack to remove this dreadfull black bar at the bottom
- with Canon footage and old ffmpeg versions.
- (to fix this properly in older ffmpeg versions one has to write a new
- demuxer...)
-
- see the actual fix here for reference:
-
- http://git.libav.org/?p=libav.git;a=commit;h=30f515091c323da59c0f1b533703dedca2f4b95d
-
- We do our best to apply this only to matching footage.
-*/
- if (pCodecCtx->width == 1920 && pCodecCtx->height == 1088 &&
- pCodecCtx->pix_fmt == PIX_FMT_YUVJ420P && pCodecCtx->codec_id == AV_CODEC_ID_H264) {
- y = 1080;
- }
-#endif
-
- return y;
-}
-
-#if ((LIBAVUTIL_VERSION_MAJOR < 51) || \
- (LIBAVUTIL_VERSION_MAJOR == 51) && (LIBAVUTIL_VERSION_MINOR < 22))
-FFMPEG_INLINE
-int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
-{
- const AVOption *rv = NULL;
- (void)search_flags;
- av_set_string3(obj, name, val, 1, &rv);
- return rv != NULL;
-}
-
-FFMPEG_INLINE
-int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
-{
- const AVOption *rv = NULL;
- (void)search_flags;
- rv = av_set_int(obj, name, val);
- return rv != NULL;
-}
-
-FFMPEG_INLINE
-int av_opt_set_double(void *obj, const char *name, double val, int search_flags)
-{
- const AVOption *rv = NULL;
- (void)search_flags;
- rv = av_set_double(obj, name, val);
- return rv != NULL;
-}
-
-# define AV_OPT_TYPE_INT FF_OPT_TYPE_INT
-# define AV_OPT_TYPE_INT64 FF_OPT_TYPE_INT64
-# define AV_OPT_TYPE_STRING FF_OPT_TYPE_STRING
-# define AV_OPT_TYPE_CONST FF_OPT_TYPE_CONST
-# define AV_OPT_TYPE_DOUBLE FF_OPT_TYPE_DOUBLE
-# define AV_OPT_TYPE_FLOAT FF_OPT_TYPE_FLOAT
-#endif
-
-#if ((LIBAVUTIL_VERSION_MAJOR < 51) || \
- (LIBAVUTIL_VERSION_MAJOR == 51) && (LIBAVUTIL_VERSION_MINOR < 54))
-FFMPEG_INLINE
-enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
-{
- if (sample_fmt < 0 || sample_fmt >= AV_SAMPLE_FMT_NB)
- return AV_SAMPLE_FMT_NONE;
- return sample_fmt;
-}
-#endif
-
-#if ((LIBAVCODEC_VERSION_MAJOR < 53) || \
- (LIBAVCODEC_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR < 35))
-FFMPEG_INLINE
-int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options)
-{
- /* TODO: no options are taking into account */
- (void)options;
- return avcodec_open(avctx, codec);
-}
-#endif
-
-#if ((LIBAVFORMAT_VERSION_MAJOR < 53) || \
- (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR < 21))
-FFMPEG_INLINE
-AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
-{
- /* TODO: no codec is taking into account */
- (void)c;
- return av_new_stream(s, 0);
-}
-
-FFMPEG_INLINE
-int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
-{
- /* TODO: no options are taking into account */
- (void)options;
- return av_find_stream_info(ic);
-}
-#endif
-
-#if ((LIBAVFORMAT_VERSION_MAJOR > 53) || \
- ((LIBAVFORMAT_VERSION_MAJOR == 53) && (LIBAVFORMAT_VERSION_MINOR > 32)) || \
- ((LIBAVFORMAT_VERSION_MAJOR == 53) && (LIBAVFORMAT_VERSION_MINOR == 24) && \
- (LIBAVFORMAT_VERSION_MICRO >= 100)))
FFMPEG_INLINE
void my_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
{
@@ -323,103 +62,12 @@ void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
{
my_update_cur_dts(s, ref_st, timestamp);
}
-#endif
-
-#if ((LIBAVCODEC_VERSION_MAJOR < 54) || \
- (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR < 28))
-FFMPEG_INLINE
-void avcodec_free_frame(AVFrame **frame)
-{
- /* don't need to do anything with old AVFrame
- * since it does not have malloced members */
- (void)frame;
-}
-#endif
-
-#if ((LIBAVCODEC_VERSION_MAJOR > 54) || \
- (LIBAVCODEC_VERSION_MAJOR >= 54) && (LIBAVCODEC_VERSION_MINOR >= 13))
-# define FFMPEG_HAVE_AVFRAME_SAMPLE_RATE
-#endif
-
-#if ((LIBAVCODEC_VERSION_MAJOR > 54) || \
- (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 13))
-# define FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT
-#endif
-
-#ifndef FFMPEG_HAVE_AVIO
-# define AVIO_FLAG_WRITE URL_WRONLY
-# define avio_open url_fopen
-# define avio_tell url_ftell
-# define avio_close url_fclose
-# define avio_size url_fsize
-#endif
-
-/* There are some version in between, which have avio_... functions but no
- * AVIO_FLAG_... */
-#ifndef AVIO_FLAG_WRITE
-# define AVIO_FLAG_WRITE URL_WRONLY
-#endif
-
-#ifndef AV_PKT_FLAG_KEY
-# define AV_PKT_FLAG_KEY PKT_FLAG_KEY
-#endif
-
-#ifndef FFMPEG_HAVE_AV_DUMP_FORMAT
-# define av_dump_format dump_format
-#endif
-
-#ifndef FFMPEG_HAVE_AV_GUESS_FORMAT
-# define av_guess_format guess_format
-#endif
-
-#ifndef FFMPEG_HAVE_PARSE_UTILS
-# define av_parse_video_rate av_parse_video_frame_rate
-#endif
-
-#ifdef FFMPEG_HAVE_DEFAULT_VAL_UNION
-# define FFMPEG_DEF_OPT_VAL_INT(OPT) OPT->default_val.i64
-# define FFMPEG_DEF_OPT_VAL_DOUBLE(OPT) OPT->default_val.dbl
-#else
-# define FFMPEG_DEF_OPT_VAL_INT(OPT) OPT->default_val
-# define FFMPEG_DEF_OPT_VAL_DOUBLE(OPT) OPT->default_val
-#endif
-
-#ifndef FFMPEG_HAVE_AVMEDIA_TYPES
-# define AVMEDIA_TYPE_VIDEO CODEC_TYPE_VIDEO
-# define AVMEDIA_TYPE_AUDIO CODEC_TYPE_AUDIO
-#endif
-
-#ifndef FFMPEG_HAVE_DECODE_AUDIO3
-FFMPEG_INLINE
-int avcodec_decode_audio3(AVCodecContext *avctx,
- int16_t *samples,
- int *frame_size_ptr,
- AVPacket *avpkt)
-{
- return avcodec_decode_audio2(avctx, samples, frame_size_ptr, avpkt->data, avpkt->size);
-}
-#endif
-
-#ifndef FFMPEG_HAVE_DECODE_VIDEO2
-FFMPEG_INLINE
-int avcodec_decode_video2(AVCodecContext *avctx,
- AVFrame *picture,
- int *got_picture_ptr,
- AVPacket *avpkt)
-{
- return avcodec_decode_video(avctx, picture, got_picture_ptr, avpkt->data, avpkt->size);
-}
-#endif
FFMPEG_INLINE
int64_t av_get_pts_from_frame(AVFormatContext *avctx, AVFrame *picture)
{
int64_t pts;
-#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(55, 34, 100)
pts = picture->pts;
-#else
- pts = picture->pkt_pts;
-#endif
if (pts == AV_NOPTS_VALUE) {
pts = picture->pkt_dts;
@@ -432,124 +80,14 @@ int64_t av_get_pts_from_frame(AVFormatContext *avctx, AVFrame *picture)
return pts;
}
-/* obsolete constant formerly defined in FFMpeg libavcodec/avcodec.h */
-#ifndef AVCODEC_MAX_AUDIO_FRAME_SIZE
-# define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
-#endif
-
-#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54, 1, 0)
-FFMPEG_INLINE
-int avcodec_encode_video2(AVCodecContext *avctx,
- AVPacket *pkt,
- const AVFrame *frame,
- int *got_output)
-{
- int outsize, ret;
-
- ret = av_new_packet(pkt, avctx->width * avctx->height * 7 + 10000);
- if (ret < 0)
- return ret;
-
- outsize = avcodec_encode_video(avctx, pkt->data, pkt->size, frame);
- if (outsize <= 0) {
- *got_output = 0;
- av_free_packet(pkt);
- }
- else {
- *got_output = 1;
- av_shrink_packet(pkt, outsize);
- if (avctx->coded_frame) {
- pkt->pts = avctx->coded_frame->pts;
- if (avctx->coded_frame->key_frame)
- pkt->flags |= AV_PKT_FLAG_KEY;
- }
- }
-
- return outsize >= 0 ? 0 : outsize;
-}
-
-#endif
-
-#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 17, 0)
-FFMPEG_INLINE
-void avformat_close_input(AVFormatContext **ctx)
-{
- av_close_input_file(*ctx);
- *ctx = NULL;
-}
-#endif
-
-#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(52, 8, 0)
-FFMPEG_INLINE
-AVFrame *av_frame_alloc(void)
-{
- return avcodec_alloc_frame();
-}
-
-FFMPEG_INLINE
-void av_frame_free(AVFrame **frame)
-{
- av_freep(frame);
-}
-#endif
-
-FFMPEG_INLINE
-const char *av_get_metadata_key_value(AVDictionary *metadata, const char *key)
-{
- if (metadata == NULL) {
- return NULL;
- }
- AVDictionaryEntry *tag = NULL;
- while ((tag = av_dict_get(metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
- if (!strcmp(tag->key, key)) {
- return tag->value;
- }
- }
- return NULL;
-}
-
-FFMPEG_INLINE
-bool av_check_encoded_with_ffmpeg(AVFormatContext *ctx)
-{
- const char *encoder = av_get_metadata_key_value(ctx->metadata, "ENCODER");
- if (encoder != NULL && !strncmp(encoder, "Lavf", 4)) {
- return true;
- }
- return false;
-}
-
-#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(51, 32, 0)
-# define AV_OPT_SEARCH_FAKE_OBJ 0
-#endif
-
-#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54, 59, 100)
-# define FFMPEG_HAVE_DEPRECATED_FLAGS2
-#endif
-
-/* Since FFmpeg-1.1 this constant have AV_ prefix. */
-#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(52, 3, 100)
-# define AV_PIX_FMT_BGR32 PIX_FMT_BGR32
-# define AV_PIX_FMT_YUV422P PIX_FMT_YUV422P
-# define AV_PIX_FMT_BGRA PIX_FMT_BGRA
-# define AV_PIX_FMT_ARGB PIX_FMT_ARGB
-# define AV_PIX_FMT_RGBA PIX_FMT_RGBA
-#endif
-
-/* New API from FFmpeg-2.0 which soon became recommended one. */
-#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(52, 38, 100)
-# define av_frame_alloc avcodec_alloc_frame
-# define av_frame_free avcodec_free_frame
-# define av_frame_unref avcodec_get_frame_defaults
-#endif
-
-#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 24, 102)
+/* --- Deinterlace code block begin --- */
/* NOTE: The code in this block are from FFmpeg 2.6.4, which is licensed by LGPL. */
-# define MAX_NEG_CROP 1024
+#define MAX_NEG_CROP 1024
-# define times4(x) x, x, x, x
-# define times256(x) times4(times4(times4(times4(times4(x)))))
+#define times4(x) x, x, x, x
+#define times256(x) times4(times4(times4(times4(times4(x)))))
static const uint8_t ff_compat_crop_tab[256 + 2 * MAX_NEG_CROP] = {
times256(0x00), 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A,
@@ -575,8 +113,8 @@ static const uint8_t ff_compat_crop_tab[256 + 2 * MAX_NEG_CROP] = {
0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA,
0xFB, 0xFC, 0xFD, 0xFE, 0xFF, times256(0xFF)};
-# undef times4
-# undef times256
+#undef times4
+#undef times256
/* filter parameters: [-1 4 2 4 -1] // 8 */
FFMPEG_INLINE
@@ -668,8 +206,9 @@ int deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap, int width, int
uint8_t *src_m1, *src_0, *src_p1, *src_p2;
int y;
uint8_t *buf = (uint8_t *)av_malloc(width);
- if (!buf)
+ if (!buf) {
return AVERROR(ENOMEM);
+ }
src_m1 = src1;
memcpy(buf, src_m1, width);
@@ -689,24 +228,21 @@ int deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap, int width, int
return 0;
}
-# ifdef __GNUC__
-# pragma GCC diagnostic push
-# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-# endif
-
FFMPEG_INLINE
-int avpicture_deinterlace(
- AVPicture *dst, const AVPicture *src, enum AVPixelFormat pix_fmt, int width, int height)
+int av_image_deinterlace(
+ AVFrame *dst, const AVFrame *src, enum AVPixelFormat pix_fmt, int width, int height)
{
int i, ret;
if (pix_fmt != AV_PIX_FMT_YUV420P && pix_fmt != AV_PIX_FMT_YUVJ420P &&
pix_fmt != AV_PIX_FMT_YUV422P && pix_fmt != AV_PIX_FMT_YUVJ422P &&
pix_fmt != AV_PIX_FMT_YUV444P && pix_fmt != AV_PIX_FMT_YUV411P &&
- pix_fmt != AV_PIX_FMT_GRAY8)
+ pix_fmt != AV_PIX_FMT_GRAY8) {
return -1;
- if ((width & 3) != 0 || (height & 3) != 0)
+ }
+ if ((width & 3) != 0 || (height & 3) != 0) {
return -1;
+ }
for (i = 0; i < 3; i++) {
if (i == 1) {
@@ -732,8 +268,9 @@ int avpicture_deinterlace(
}
if (src == dst) {
ret = deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i], width, height);
- if (ret < 0)
+ if (ret < 0) {
return ret;
+ }
}
else {
deinterlace_bottom_field(
@@ -743,10 +280,6 @@ int avpicture_deinterlace(
return 0;
}
-# ifdef __GNUC__
-# pragma GCC diagnostic pop
-# endif
-
-#endif
+/* --- Deinterlace code block end --- */
#endif
diff --git a/source/blender/blenkernel/CMakeLists.txt b/source/blender/blenkernel/CMakeLists.txt
index 0944bbed2f3..acd9d54cfbe 100644
--- a/source/blender/blenkernel/CMakeLists.txt
+++ b/source/blender/blenkernel/CMakeLists.txt
@@ -592,10 +592,6 @@ if(WITH_CODEC_FFMPEG)
${FFMPEG_LIBRARIES}
)
add_definitions(-DWITH_FFMPEG)
-
- remove_strict_c_flags_file(
- intern/writeffmpeg.c
- )
endif()
if(WITH_PYTHON)
diff --git a/source/blender/blenkernel/intern/writeffmpeg.c b/source/blender/blenkernel/intern/writeffmpeg.c
index e5550cee124..5d03278f3c1 100644
--- a/source/blender/blenkernel/intern/writeffmpeg.c
+++ b/source/blender/blenkernel/intern/writeffmpeg.c
@@ -56,6 +56,7 @@
# include <libavcodec/avcodec.h>
# include <libavformat/avformat.h>
# include <libavutil/imgutils.h>
+# include <libavutil/opt.h>
# include <libavutil/rational.h>
# include <libavutil/samplefmt.h>
# include <libswscale/swscale.h>
@@ -80,6 +81,8 @@ typedef struct FFMpegContext {
int ffmpeg_preset; /* see eFFMpegPreset */
AVFormatContext *outfile;
+ AVCodecContext *video_codec;
+ AVCodecContext *audio_codec;
AVStream *video_stream;
AVStream *audio_stream;
AVFrame *current_frame; /* Image frame in output pixel format. */
@@ -91,10 +94,6 @@ typedef struct FFMpegContext {
uint8_t *audio_input_buffer;
uint8_t *audio_deinterleave_buffer;
int audio_input_samples;
-# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
- uint8_t *audio_output_buffer;
- int audio_outbuf_size;
-# endif
double audio_time;
bool audio_deinterleave;
int audio_sample_size;
@@ -141,33 +140,22 @@ static int request_float_audio_buffer(int codec_id)
}
# ifdef WITH_AUDASPACE
+
static int write_audio_frame(FFMpegContext *context)
{
- AVCodecContext *c = NULL;
- AVPacket pkt;
AVFrame *frame = NULL;
- int got_output = 0;
-
- c = context->audio_stream->codec;
-
- av_init_packet(&pkt);
- pkt.size = 0;
- pkt.data = NULL;
+ AVCodecContext *c = context->audio_codec;
AUD_Device_read(
context->audio_mixdown_device, context->audio_input_buffer, context->audio_input_samples);
context->audio_time += (double)context->audio_input_samples / (double)c->sample_rate;
-# ifdef FFMPEG_HAVE_ENCODE_AUDIO2
frame = av_frame_alloc();
- av_frame_unref(frame);
frame->pts = context->audio_time / av_q2d(c->time_base);
frame->nb_samples = context->audio_input_samples;
frame->format = c->sample_fmt;
frame->channels = c->channels;
-# ifdef FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT
frame->channel_layout = c->channel_layout;
-# endif
if (context->audio_deinterleave) {
int channel, i;
@@ -195,61 +183,49 @@ static int write_audio_frame(FFMpegContext *context)
context->audio_input_samples * c->channels * context->audio_sample_size,
1);
- if (avcodec_encode_audio2(c, &pkt, frame, &got_output) < 0) {
- // XXX error("Error writing audio packet");
- return -1;
- }
+ int success = 0;
- if (!got_output) {
- av_frame_free(&frame);
- return 0;
+ int ret = avcodec_send_frame(c, frame);
+ if (ret < 0) {
+ /* Can't send frame to encoder. This shouldn't happen. */
+ fprintf(stderr, "Can't send audio frame: %s\n", av_err2str(ret));
+ success = -1;
}
-# else
- pkt.size = avcodec_encode_audio(c,
- context->audio_output_buffer,
- context->audio_outbuf_size,
- (short *)context->audio_input_buffer);
- if (pkt.size < 0) {
- // XXX error("Error writing audio packet");
- return -1;
- }
+ AVPacket *pkt = av_packet_alloc();
- pkt.data = context->audio_output_buffer;
- got_output = 1;
-# endif
+ while (ret >= 0) {
- if (got_output) {
- if (pkt.pts != AV_NOPTS_VALUE) {
- pkt.pts = av_rescale_q(pkt.pts, c->time_base, context->audio_stream->time_base);
+ ret = avcodec_receive_packet(c, pkt);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ break;
}
- if (pkt.dts != AV_NOPTS_VALUE) {
- pkt.dts = av_rescale_q(pkt.dts, c->time_base, context->audio_stream->time_base);
+ if (ret < 0) {
+ fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
+ success = -1;
}
- if (pkt.duration > 0) {
- pkt.duration = av_rescale_q(pkt.duration, c->time_base, context->audio_stream->time_base);
+
+ av_packet_rescale_ts(pkt, c->time_base, context->audio_stream->time_base);
+ if (pkt->duration > 0) {
+ pkt->duration = av_rescale_q(pkt->duration, c->time_base, context->audio_stream->time_base);
}
- pkt.stream_index = context->audio_stream->index;
+ pkt->stream_index = context->audio_stream->index;
- pkt.flags |= AV_PKT_FLAG_KEY;
+ pkt->flags |= AV_PKT_FLAG_KEY;
- if (av_interleaved_write_frame(context->outfile, &pkt) != 0) {
- fprintf(stderr, "Error writing audio packet!\n");
- if (frame) {
- av_frame_free(&frame);
- }
- return -1;
+ int write_ret = av_interleaved_write_frame(context->outfile, pkt);
+ if (write_ret != 0) {
+ fprintf(stderr, "Error writing audio packet: %s\n", av_err2str(write_ret));
+ success = -1;
+ break;
}
-
- av_free_packet(&pkt);
}
- if (frame) {
- av_frame_free(&frame);
- }
+ av_packet_free(&pkt);
+ av_frame_free(&frame);
- return 0;
+ return success;
}
# endif /* #ifdef WITH_AUDASPACE */
@@ -265,14 +241,15 @@ static AVFrame *alloc_picture(int pix_fmt, int width, int height)
if (!f) {
return NULL;
}
- size = avpicture_get_size(pix_fmt, width, height);
+ size = av_image_get_buffer_size(pix_fmt, width, height, 1);
/* allocate the actual picture buffer */
buf = MEM_mallocN(size, "AVFrame buffer");
if (!buf) {
free(f);
return NULL;
}
- avpicture_fill((AVPicture *)f, buf, pix_fmt, width, height);
+
+ av_image_fill_arrays(f->data, f->linesize, buf, pix_fmt, width, height, 1);
f->format = pix_fmt;
f->width = width;
f->height = height;
@@ -342,58 +319,57 @@ static const char **get_file_extensions(int format)
}
/* Write a frame to the output file */
-static int write_video_frame(
- FFMpegContext *context, const RenderData *rd, int cfra, AVFrame *frame, ReportList *reports)
+static int write_video_frame(FFMpegContext *context, int cfra, AVFrame *frame, ReportList *reports)
{
- int got_output;
int ret, success = 1;
- AVCodecContext *c = context->video_stream->codec;
- AVPacket packet = {0};
+ AVPacket *packet = av_packet_alloc();
- av_init_packet(&packet);
+ AVCodecContext *c = context->video_codec;
frame->pts = cfra;
- ret = avcodec_encode_video2(c, &packet, frame, &got_output);
+ ret = avcodec_send_frame(c, frame);
+ if (ret < 0) {
+ /* Can't send frame to encoder. This shouldn't happen. */
+ fprintf(stderr, "Can't send video frame: %s\n", av_err2str(ret));
+ success = -1;
+ }
- if (ret >= 0 && got_output) {
- if (packet.pts != AV_NOPTS_VALUE) {
- packet.pts = av_rescale_q(packet.pts, c->time_base, context->video_stream->time_base);
- PRINT("Video Frame PTS: %d\n", (int)packet.pts);
- }
- else {
- PRINT("Video Frame PTS: not set\n");
- }
- if (packet.dts != AV_NOPTS_VALUE) {
- packet.dts = av_rescale_q(packet.dts, c->time_base, context->video_stream->time_base);
- PRINT("Video Frame DTS: %d\n", (int)packet.dts);
+ while (ret >= 0) {
+ ret = avcodec_receive_packet(c, packet);
+
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ /* No more packets available. */
+ break;
}
- else {
- PRINT("Video Frame DTS: not set\n");
+ if (ret < 0) {
+ fprintf(stderr, "Error encoding frame: %s\n", av_err2str(ret));
+ break;
}
- packet.stream_index = context->video_stream->index;
- ret = av_interleaved_write_frame(context->outfile, &packet);
- success = (ret == 0);
- }
- else if (ret < 0) {
- success = 0;
+ packet->stream_index = context->video_stream->index;
+ av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base);
+ if (av_interleaved_write_frame(context->outfile, packet) != 0) {
+ success = -1;
+ break;
+ }
}
if (!success) {
BKE_report(reports, RPT_ERROR, "Error writing frame");
+ PRINT("Error writing frame: %s\n", av_err2str(ret));
}
+ av_packet_free(&packet);
+
return success;
}
/* read and encode a frame of audio from the buffer */
-static AVFrame *generate_video_frame(FFMpegContext *context,
- const uint8_t *pixels,
- ReportList *reports)
+static AVFrame *generate_video_frame(FFMpegContext *context, const uint8_t *pixels)
{
- AVCodecContext *c = context->video_stream->codec;
- int height = c->height;
+ AVCodecParameters *codec = context->video_stream->codecpar;
+ int height = codec->height;
AVFrame *rgb_frame;
if (context->img_convert_frame != NULL) {
@@ -438,7 +414,7 @@ static AVFrame *generate_video_frame(FFMpegContext *context,
(const uint8_t *const *)rgb_frame->data,
rgb_frame->linesize,
0,
- c->height,
+ codec->height,
context->current_frame->data,
context->current_frame->linesize);
}
@@ -446,9 +422,7 @@ static AVFrame *generate_video_frame(FFMpegContext *context,
return context->current_frame;
}
-static void set_ffmpeg_property_option(AVCodecContext *c,
- IDProperty *prop,
- AVDictionary **dictionary)
+static void set_ffmpeg_property_option(IDProperty *prop, AVDictionary **dictionary)
{
char name[128];
char *param;
@@ -536,7 +510,7 @@ static void set_ffmpeg_properties(RenderData *rd,
for (curr = prop->data.group.first; curr; curr = curr->next) {
if (ffmpeg_proprty_valid(c, prop_name, curr)) {
- set_ffmpeg_property_option(c, curr, dictionary);
+ set_ffmpeg_property_option(curr, dictionary);
}
}
}
@@ -553,7 +527,6 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
int error_size)
{
AVStream *st;
- AVCodecContext *c;
AVCodec *codec;
AVDictionary *opts = NULL;
@@ -567,7 +540,8 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
/* Set up the codec context */
- c = st->codec;
+ context->video_codec = avcodec_alloc_context3(NULL);
+ AVCodecContext *c = context->video_codec;
c->codec_id = codec_id;
c->codec_type = AVMEDIA_TYPE_VIDEO;
@@ -650,11 +624,9 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
}
}
- /* Deprecated and not doing anything since July 2015, deleted in recent ffmpeg */
- // c->me_method = ME_EPZS;
-
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
+ avcodec_free_context(&c);
return NULL;
}
@@ -714,7 +686,7 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
if ((of->oformat->flags & AVFMT_GLOBALHEADER)) {
PRINT("Using global header\n");
- c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
/* xasp & yasp got float lately... */
@@ -742,6 +714,7 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
if (avcodec_open2(c, codec, &opts) < 0) {
BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
av_dict_free(&opts);
+ avcodec_free_context(&c);
return NULL;
}
av_dict_free(&opts);
@@ -769,6 +742,8 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
NULL);
}
+ avcodec_parameters_from_context(st->codecpar, c);
+
return st;
}
@@ -780,7 +755,6 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
int error_size)
{
AVStream *st;
- AVCodecContext *c;
AVCodec *codec;
AVDictionary *opts = NULL;
@@ -792,7 +766,8 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
}
st->id = 1;
- c = st->codec;
+ context->audio_codec = avcodec_alloc_context3(NULL);
+ AVCodecContext *c = context->audio_codec;
c->thread_count = BLI_system_thread_count();
c->thread_type = FF_THREAD_SLICE;
@@ -804,7 +779,6 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
c->sample_fmt = AV_SAMPLE_FMT_S16;
c->channels = rd->ffcodecdata.audio_channels;
-# ifdef FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT
switch (rd->ffcodecdata.audio_channels) {
case FFM_CHANNELS_MONO:
c->channel_layout = AV_CH_LAYOUT_MONO;
@@ -822,7 +796,6 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
c->channel_layout = AV_CH_LAYOUT_7POINT1;
break;
}
-# endif
if (request_float_audio_buffer(codec_id)) {
/* mainly for AAC codec which is experimental */
@@ -833,6 +806,7 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
// XXX error("Couldn't find a valid audio codec");
+ avcodec_free_context(&c);
return NULL;
}
@@ -844,13 +818,13 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
* Float samples in particular are not always supported. */
const enum AVSampleFormat *p = codec->sample_fmts;
for (; *p != -1; p++) {
- if (*p == st->codec->sample_fmt) {
+ if (*p == c->sample_fmt) {
break;
}
}
if (*p == -1) {
/* sample format incompatible with codec. Defaulting to a format known to work */
- st->codec->sample_fmt = codec->sample_fmts[0];
+ c->sample_fmt = codec->sample_fmts[0];
}
}
@@ -859,18 +833,18 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
int best = 0;
int best_dist = INT_MAX;
for (; *p; p++) {
- int dist = abs(st->codec->sample_rate - *p);
+ int dist = abs(c->sample_rate - *p);
if (dist < best_dist) {
best_dist = dist;
best = *p;
}
}
/* best is the closest supported sample rate (same as selected if best_dist == 0) */
- st->codec->sample_rate = best;
+ c->sample_rate = best;
}
if (of->oformat->flags & AVFMT_GLOBALHEADER) {
- c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
set_ffmpeg_properties(rd, c, "audio", &opts);
@@ -879,32 +853,25 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
// XXX error("Couldn't initialize audio codec");
BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
av_dict_free(&opts);
+ avcodec_free_context(&c);
return NULL;
}
av_dict_free(&opts);
/* need to prevent floating point exception when using vorbis audio codec,
* initialize this value in the same way as it's done in FFmpeg itself (sergey) */
- st->codec->time_base.num = 1;
- st->codec->time_base.den = st->codec->sample_rate;
-
-# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
- context->audio_outbuf_size = FF_MIN_BUFFER_SIZE;
-# endif
+ c->time_base.num = 1;
+ c->time_base.den = c->sample_rate;
if (c->frame_size == 0) {
/* Used to be if ((c->codec_id >= CODEC_ID_PCM_S16LE) && (c->codec_id <= CODEC_ID_PCM_DVD))
* not sure if that is needed anymore, so let's try out if there are any
* complaints regarding some FFmpeg versions users might have. */
- context->audio_input_samples = FF_MIN_BUFFER_SIZE * 8 / c->bits_per_coded_sample / c->channels;
+ context->audio_input_samples = AV_INPUT_BUFFER_MIN_SIZE * 8 / c->bits_per_coded_sample /
+ c->channels;
}
else {
context->audio_input_samples = c->frame_size;
-# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
- if (c->frame_size * c->channels * sizeof(int16_t) * 4 > context->audio_outbuf_size) {
- context->audio_outbuf_size = c->frame_size * c->channels * sizeof(int16_t) * 4;
- }
-# endif
}
context->audio_deinterleave = av_sample_fmt_is_planar(c->sample_fmt);
@@ -913,10 +880,6 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
context->audio_input_buffer = (uint8_t *)av_malloc(context->audio_input_samples * c->channels *
context->audio_sample_size);
-# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
- context->audio_output_buffer = (uint8_t *)av_malloc(context->audio_outbuf_size);
-# endif
-
if (context->audio_deinterleave) {
context->audio_deinterleave_buffer = (uint8_t *)av_malloc(
context->audio_input_samples * c->channels * context->audio_sample_size);
@@ -924,6 +887,8 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
context->audio_time = 0.0f;
+ avcodec_parameters_from_context(st->codecpar, c);
+
return st;
}
/* essential functions -- start, append, end */
@@ -949,7 +914,7 @@ static void ffmpeg_dict_set_float(AVDictionary **dict, const char *key, float va
static void ffmpeg_add_metadata_callback(void *data,
const char *propname,
char *propvalue,
- int len)
+ int UNUSED(len))
{
AVDictionary **metadata = (AVDictionary **)data;
av_dict_set(metadata, propname, propvalue, 0);
@@ -1040,7 +1005,7 @@ static int start_ffmpeg_impl(FFMpegContext *context,
fmt->audio_codec = context->ffmpeg_audio_codec;
- BLI_strncpy(of->filename, name, sizeof(of->filename));
+ of->url = av_strdup(name);
/* set the codec to the user's selection */
switch (context->ffmpeg_type) {
case FFMPEG_AVI:
@@ -1105,9 +1070,11 @@ static int start_ffmpeg_impl(FFMpegContext *context,
if (!context->video_stream) {
if (error[0]) {
BKE_report(reports, RPT_ERROR, error);
+ PRINT("Video stream error: %s\n", error);
}
else {
BKE_report(reports, RPT_ERROR, "Error initializing video stream");
+ PRINT("Error initializing video stream");
}
goto fail;
}
@@ -1119,9 +1086,11 @@ static int start_ffmpeg_impl(FFMpegContext *context,
if (!context->audio_stream) {
if (error[0]) {
BKE_report(reports, RPT_ERROR, error);
+ PRINT("Audio stream error: %s\n", error);
}
else {
BKE_report(reports, RPT_ERROR, "Error initializing audio stream");
+ PRINT("Error initializing audio stream");
}
goto fail;
}
@@ -1129,6 +1098,7 @@ static int start_ffmpeg_impl(FFMpegContext *context,
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&of->pb, name, AVIO_FLAG_WRITE) < 0) {
BKE_report(reports, RPT_ERROR, "Could not open file for writing");
+ PRINT("Could not open file for writing\n");
goto fail;
}
}
@@ -1138,10 +1108,12 @@ static int start_ffmpeg_impl(FFMpegContext *context,
&of->metadata, context->stamp_data, ffmpeg_add_metadata_callback, false);
}
- if (avformat_write_header(of, NULL) < 0) {
+ int ret = avformat_write_header(of, NULL);
+ if (ret < 0) {
BKE_report(reports,
RPT_ERROR,
"Could not initialize streams, probably unsupported codec combination");
+ PRINT("Could not write media header: %s\n", av_err2str(ret));
goto fail;
}
@@ -1156,13 +1128,11 @@ fail:
avio_close(of->pb);
}
- if (context->video_stream && context->video_stream->codec) {
- avcodec_close(context->video_stream->codec);
+ if (context->video_stream) {
context->video_stream = NULL;
}
- if (context->audio_stream && context->audio_stream->codec) {
- avcodec_close(context->audio_stream->codec);
+ if (context->audio_stream) {
context->audio_stream = NULL;
}
@@ -1190,46 +1160,36 @@ fail:
*/
static void flush_ffmpeg(FFMpegContext *context)
{
- int ret = 0;
+ AVCodecContext *c = context->video_codec;
+ AVPacket *packet = av_packet_alloc();
- AVCodecContext *c = context->video_stream->codec;
- /* get the delayed frames */
- while (1) {
- int got_output;
- AVPacket packet = {0};
- av_init_packet(&packet);
+ avcodec_send_frame(c, NULL);
- ret = avcodec_encode_video2(c, &packet, NULL, &got_output);
- if (ret < 0) {
- fprintf(stderr, "Error encoding delayed frame %d\n", ret);
+ /* Get the packets frames. */
+ int ret = 1;
+ while (ret >= 0) {
+ ret = avcodec_receive_packet(c, packet);
+
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ /* No more packets to flush. */
break;
}
- if (!got_output) {
+ if (ret < 0) {
+ fprintf(stderr, "Error encoding delayed frame: %s\n", av_err2str(ret));
break;
}
- if (packet.pts != AV_NOPTS_VALUE) {
- packet.pts = av_rescale_q(packet.pts, c->time_base, context->video_stream->time_base);
- PRINT("Video Frame PTS: %d\n", (int)packet.pts);
- }
- else {
- PRINT("Video Frame PTS: not set\n");
- }
- if (packet.dts != AV_NOPTS_VALUE) {
- packet.dts = av_rescale_q(packet.dts, c->time_base, context->video_stream->time_base);
- PRINT("Video Frame DTS: %d\n", (int)packet.dts);
- }
- else {
- PRINT("Video Frame DTS: not set\n");
- }
- packet.stream_index = context->video_stream->index;
- ret = av_interleaved_write_frame(context->outfile, &packet);
- if (ret != 0) {
- fprintf(stderr, "Error writing delayed frame %d\n", ret);
+ packet->stream_index = context->video_stream->index;
+ av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base);
+
+ int write_ret = av_interleaved_write_frame(context->outfile, packet);
+ if (write_ret != 0) {
+ fprintf(stderr, "Error writing delayed frame: %s\n", av_err2str(write_ret));
break;
}
}
- avcodec_flush_buffers(context->video_stream->codec);
+
+ av_packet_free(&packet);
}
/* **********************************************************************
@@ -1327,7 +1287,8 @@ int BKE_ffmpeg_start(void *context_v,
success = start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
# ifdef WITH_AUDASPACE
if (context->audio_stream) {
- AVCodecContext *c = context->audio_stream->codec;
+ AVCodecContext *c = context->audio_codec;
+
AUD_DeviceSpecs specs;
specs.channels = c->channels;
@@ -1354,10 +1315,6 @@ int BKE_ffmpeg_start(void *context_v,
specs.rate = rd->ffcodecdata.audio_mixrate;
context->audio_mixdown_device = BKE_sound_mixdown(
scene, specs, preview ? rd->psfra : rd->sfra, rd->ffcodecdata.audio_volume);
-# ifdef FFMPEG_CODEC_TIME_BASE
- c->time_base.den = specs.rate;
- c->time_base.num = 1;
-# endif
}
# endif
return success;
@@ -1398,8 +1355,8 @@ int BKE_ffmpeg_append(void *context_v,
// write_audio_frames(frame / (((double)rd->frs_sec) / rd->frs_sec_base));
if (context->video_stream) {
- avframe = generate_video_frame(context, (unsigned char *)pixels, reports);
- success = (avframe && write_video_frame(context, rd, frame - start_frame, avframe, reports));
+ avframe = generate_video_frame(context, (unsigned char *)pixels);
+ success = (avframe && write_video_frame(context, frame - start_frame, avframe, reports));
if (context->ffmpeg_autosplit) {
if (avio_tell(context->outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
@@ -1430,7 +1387,7 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
}
# endif
- if (context->video_stream && context->video_stream->codec) {
+ if (context->video_stream) {
PRINT("Flushing delayed frames...\n");
flush_ffmpeg(context);
}
@@ -1441,14 +1398,12 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
/* Close the video codec */
- if (context->video_stream != NULL && context->video_stream->codec != NULL) {
- avcodec_close(context->video_stream->codec);
+ if (context->video_stream != NULL) {
PRINT("zero video stream %p\n", context->video_stream);
context->video_stream = NULL;
}
- if (context->audio_stream != NULL && context->audio_stream->codec != NULL) {
- avcodec_close(context->audio_stream->codec);
+ if (context->audio_stream != NULL) {
context->audio_stream = NULL;
}
@@ -1467,6 +1422,16 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
avio_close(context->outfile->pb);
}
}
+
+ if (context->video_codec != NULL) {
+ avcodec_free_context(&context->video_codec);
+ context->video_codec = NULL;
+ }
+ if (context->audio_codec != NULL) {
+ avcodec_free_context(&context->audio_codec);
+ context->audio_codec = NULL;
+ }
+
if (context->outfile != NULL) {
avformat_free_context(context->outfile);
context->outfile = NULL;
@@ -1475,12 +1440,6 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
av_free(context->audio_input_buffer);
context->audio_input_buffer = NULL;
}
-# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
- if (context->audio_output_buffer != NULL) {
- av_free(context->audio_output_buffer);
- context->audio_output_buffer = NULL;
- }
-# endif
if (context->audio_deinterleave_buffer != NULL) {
av_free(context->audio_deinterleave_buffer);
@@ -1560,12 +1519,12 @@ static IDProperty *BKE_ffmpeg_property_add(RenderData *rd,
switch (o->type) {
case AV_OPT_TYPE_INT:
case AV_OPT_TYPE_INT64:
- val.i = FFMPEG_DEF_OPT_VAL_INT(o);
+ val.i = o->default_val.i64;
idp_type = IDP_INT;
break;
case AV_OPT_TYPE_DOUBLE:
case AV_OPT_TYPE_FLOAT:
- val.f = FFMPEG_DEF_OPT_VAL_DOUBLE(o);
+ val.f = o->default_val.dbl;
idp_type = IDP_FLOAT;
break;
case AV_OPT_TYPE_STRING:
@@ -1707,16 +1666,9 @@ static void ffmpeg_set_expert_options(RenderData *rd)
BKE_ffmpeg_property_add_string(rd, "video", "trellis:0");
BKE_ffmpeg_property_add_string(rd, "video", "weightb:1");
-# ifdef FFMPEG_HAVE_DEPRECATED_FLAGS2
- BKE_ffmpeg_property_add_string(rd, "video", "flags2:dct8x8");
- BKE_ffmpeg_property_add_string(rd, "video", "directpred:3");
- BKE_ffmpeg_property_add_string(rd, "video", "flags2:fastpskip");
- BKE_ffmpeg_property_add_string(rd, "video", "flags2:wpred");
-# else
BKE_ffmpeg_property_add_string(rd, "video", "8x8dct:1");
BKE_ffmpeg_property_add_string(rd, "video", "fast-pskip:1");
BKE_ffmpeg_property_add_string(rd, "video", "wpredp:2");
-# endif
}
else if (codec_id == AV_CODEC_ID_DNXHD) {
if (rd->ffcodecdata.flags & FFMPEG_LOSSLESS_OUTPUT) {
@@ -1871,14 +1823,12 @@ bool BKE_ffmpeg_alpha_channel_is_supported(const RenderData *rd)
{
int codec = rd->ffcodecdata.codec;
-# ifdef FFMPEG_FFV1_ALPHA_SUPPORTED
- /* Visual Studio 2019 doesn't like #ifdef within ELEM(). */
- if (codec == AV_CODEC_ID_FFV1) {
- return true;
- }
-# endif
-
- return ELEM(codec, AV_CODEC_ID_QTRLE, AV_CODEC_ID_PNG, AV_CODEC_ID_VP9, AV_CODEC_ID_HUFFYUV);
+ return ELEM(codec,
+ AV_CODEC_ID_FFV1,
+ AV_CODEC_ID_QTRLE,
+ AV_CODEC_ID_PNG,
+ AV_CODEC_ID_VP9,
+ AV_CODEC_ID_HUFFYUV);
}
void *BKE_ffmpeg_context_create(void)
diff --git a/source/blender/imbuf/CMakeLists.txt b/source/blender/imbuf/CMakeLists.txt
index 7ce795280a3..be0e364c85f 100644
--- a/source/blender/imbuf/CMakeLists.txt
+++ b/source/blender/imbuf/CMakeLists.txt
@@ -166,12 +166,6 @@ if(WITH_CODEC_FFMPEG)
${OPENJPEG_LIBRARIES}
)
add_definitions(-DWITH_FFMPEG)
-
- remove_strict_c_flags_file(
- intern/anim_movie.c
- intern/indexer.c
- intern/util.c
- )
endif()
if(WITH_IMAGE_DDS)
diff --git a/source/blender/imbuf/intern/IMB_anim.h b/source/blender/imbuf/intern/IMB_anim.h
index 1239d3881de..7d7864306a1 100644
--- a/source/blender/imbuf/intern/IMB_anim.h
+++ b/source/blender/imbuf/intern/IMB_anim.h
@@ -135,7 +135,7 @@ struct anim {
struct ImBuf *last_frame;
int64_t last_pts;
int64_t next_pts;
- AVPacket next_packet;
+ AVPacket *next_packet;
#endif
char index_dir[768];
diff --git a/source/blender/imbuf/intern/anim_movie.c b/source/blender/imbuf/intern/anim_movie.c
index 432b62d172a..cdf6ca5c181 100644
--- a/source/blender/imbuf/intern/anim_movie.c
+++ b/source/blender/imbuf/intern/anim_movie.c
@@ -79,6 +79,7 @@
# include <libavcodec/avcodec.h>
# include <libavformat/avformat.h>
+# include <libavutil/imgutils.h>
# include <libavutil/rational.h>
# include <libswscale/swscale.h>
@@ -519,12 +520,10 @@ static int startffmpeg(struct anim *anim)
double frs_den;
int streamcount;
-# ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
/* The following for color space determination */
int srcRange, dstRange, brightness, contrast, saturation;
int *table;
const int *inv_table;
-# endif
if (anim == NULL) {
return (-1);
@@ -547,7 +546,7 @@ static int startffmpeg(struct anim *anim)
video_stream_index = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++) {
- if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
if (streamcount > 0) {
streamcount--;
continue;
@@ -563,16 +562,17 @@ static int startffmpeg(struct anim *anim)
}
video_stream = pFormatCtx->streams[video_stream_index];
- pCodecCtx = video_stream->codec;
/* Find the decoder for the video stream */
- pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
+ pCodec = avcodec_find_decoder(video_stream->codecpar->codec_id);
if (pCodec == NULL) {
avformat_close_input(&pFormatCtx);
return -1;
}
- pCodecCtx->workaround_bugs = 1;
+ pCodecCtx = avcodec_alloc_context3(NULL);
+ avcodec_parameters_to_context(pCodecCtx, video_stream->codecpar);
+ pCodecCtx->workaround_bugs = FF_BUG_AUTODETECT;
if (pCodec->capabilities & AV_CODEC_CAP_AUTO_THREADS) {
pCodecCtx->thread_count = 0;
@@ -593,7 +593,7 @@ static int startffmpeg(struct anim *anim)
return -1;
}
if (pCodecCtx->pix_fmt == AV_PIX_FMT_NONE) {
- avcodec_close(anim->pCodecCtx);
+ avcodec_free_context(&anim->pCodecCtx);
avformat_close_input(&pFormatCtx);
return -1;
}
@@ -639,7 +639,7 @@ static int startffmpeg(struct anim *anim)
anim->params = 0;
anim->x = pCodecCtx->width;
- anim->y = av_get_cropped_height_from_codec(pCodecCtx);
+ anim->y = pCodecCtx->height;
anim->pFormatCtx = pFormatCtx;
anim->pCodecCtx = pCodecCtx;
@@ -654,7 +654,8 @@ static int startffmpeg(struct anim *anim)
anim->last_frame = 0;
anim->last_pts = -1;
anim->next_pts = -1;
- anim->next_packet.stream_index = -1;
+ anim->next_packet = av_packet_alloc();
+ anim->next_packet->stream_index = -1;
anim->pFrame = av_frame_alloc();
anim->pFrameComplete = false;
@@ -668,8 +669,9 @@ static int startffmpeg(struct anim *anim)
if (av_frame_get_buffer(anim->pFrameRGB, 32) < 0) {
fprintf(stderr, "Could not allocate frame data.\n");
- avcodec_close(anim->pCodecCtx);
+ avcodec_free_context(&anim->pCodecCtx);
avformat_close_input(&anim->pFormatCtx);
+ av_packet_free(&anim->next_packet);
av_frame_free(&anim->pFrameRGB);
av_frame_free(&anim->pFrameDeinterlaced);
av_frame_free(&anim->pFrame);
@@ -678,10 +680,11 @@ static int startffmpeg(struct anim *anim)
}
}
- if (avpicture_get_size(AV_PIX_FMT_RGBA, anim->x, anim->y) != anim->x * anim->y * 4) {
+ if (av_image_get_buffer_size(AV_PIX_FMT_RGBA, anim->x, anim->y, 1) != anim->x * anim->y * 4) {
fprintf(stderr, "ffmpeg has changed alloc scheme ... ARGHHH!\n");
- avcodec_close(anim->pCodecCtx);
+ avcodec_free_context(&anim->pCodecCtx);
avformat_close_input(&anim->pFormatCtx);
+ av_packet_free(&anim->next_packet);
av_frame_free(&anim->pFrameRGB);
av_frame_free(&anim->pFrameDeinterlaced);
av_frame_free(&anim->pFrame);
@@ -690,14 +693,17 @@ static int startffmpeg(struct anim *anim)
}
if (anim->ib_flags & IB_animdeinterlace) {
- avpicture_fill((AVPicture *)anim->pFrameDeinterlaced,
- MEM_callocN(avpicture_get_size(anim->pCodecCtx->pix_fmt,
- anim->pCodecCtx->width,
- anim->pCodecCtx->height),
- "ffmpeg deinterlace"),
- anim->pCodecCtx->pix_fmt,
- anim->pCodecCtx->width,
- anim->pCodecCtx->height);
+ av_image_fill_arrays(anim->pFrameDeinterlaced->data,
+ anim->pFrameDeinterlaced->linesize,
+ MEM_callocN(av_image_get_buffer_size(anim->pCodecCtx->pix_fmt,
+ anim->pCodecCtx->width,
+ anim->pCodecCtx->height,
+ 1),
+ "ffmpeg deinterlace"),
+ anim->pCodecCtx->pix_fmt,
+ anim->pCodecCtx->width,
+ anim->pCodecCtx->height,
+ 1);
}
if (pCodecCtx->has_b_frames) {
@@ -720,8 +726,9 @@ static int startffmpeg(struct anim *anim)
if (!anim->img_convert_ctx) {
fprintf(stderr, "Can't transform color space??? Bailing out...\n");
- avcodec_close(anim->pCodecCtx);
+ avcodec_free_context(&anim->pCodecCtx);
avformat_close_input(&anim->pFormatCtx);
+ av_packet_free(&anim->next_packet);
av_frame_free(&anim->pFrameRGB);
av_frame_free(&anim->pFrameDeinterlaced);
av_frame_free(&anim->pFrame);
@@ -729,7 +736,6 @@ static int startffmpeg(struct anim *anim)
return -1;
}
-# ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
/* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */
if (!sws_getColorspaceDetails(anim->img_convert_ctx,
(int **)&inv_table,
@@ -756,7 +762,6 @@ static int startffmpeg(struct anim *anim)
else {
fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n");
}
-# endif
return 0;
}
@@ -795,11 +800,11 @@ static void ffmpeg_postprocess(struct anim *anim)
input->data[3]);
if (anim->ib_flags & IB_animdeinterlace) {
- if (avpicture_deinterlace((AVPicture *)anim->pFrameDeinterlaced,
- (const AVPicture *)anim->pFrame,
- anim->pCodecCtx->pix_fmt,
- anim->pCodecCtx->width,
- anim->pCodecCtx->height) < 0) {
+ if (av_image_deinterlace(anim->pFrameDeinterlaced,
+ anim->pFrame,
+ anim->pCodecCtx->pix_fmt,
+ anim->pCodecCtx->width,
+ anim->pCodecCtx->height) < 0) {
filter_y = true;
}
else {
@@ -808,11 +813,13 @@ static void ffmpeg_postprocess(struct anim *anim)
}
if (!need_aligned_ffmpeg_buffer(anim)) {
- avpicture_fill((AVPicture *)anim->pFrameRGB,
- (unsigned char *)ibuf->rect,
- AV_PIX_FMT_RGBA,
- anim->x,
- anim->y);
+ av_image_fill_arrays(anim->pFrameRGB->data,
+ anim->pFrameRGB->linesize,
+ (unsigned char *)ibuf->rect,
+ AV_PIX_FMT_RGBA,
+ anim->x,
+ anim->y,
+ 1);
}
# if defined(__x86_64__) || defined(_M_X64)
@@ -903,82 +910,70 @@ static int ffmpeg_decode_video_frame(struct anim *anim)
av_log(anim->pFormatCtx, AV_LOG_DEBUG, " DECODE VIDEO FRAME\n");
- if (anim->next_packet.stream_index == anim->videoStream) {
- av_free_packet(&anim->next_packet);
- anim->next_packet.stream_index = -1;
+ if (anim->next_packet->stream_index == anim->videoStream) {
+ av_packet_unref(anim->next_packet);
+ anim->next_packet->stream_index = -1;
}
- while ((rval = av_read_frame(anim->pFormatCtx, &anim->next_packet)) >= 0) {
+ while ((rval = av_read_frame(anim->pFormatCtx, anim->next_packet)) >= 0) {
av_log(anim->pFormatCtx,
AV_LOG_DEBUG,
"%sREAD: strID=%d (VID: %d) dts=%" PRId64 " pts=%" PRId64 " %s\n",
- (anim->next_packet.stream_index == anim->videoStream) ? "->" : " ",
- anim->next_packet.stream_index,
+ (anim->next_packet->stream_index == anim->videoStream) ? "->" : " ",
+ anim->next_packet->stream_index,
anim->videoStream,
- (anim->next_packet.dts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->next_packet.dts,
- (anim->next_packet.pts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->next_packet.pts,
- (anim->next_packet.flags & AV_PKT_FLAG_KEY) ? " KEY" : "");
- if (anim->next_packet.stream_index == anim->videoStream) {
+ (anim->next_packet->dts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->next_packet->dts,
+ (anim->next_packet->pts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->next_packet->pts,
+ (anim->next_packet->flags & AV_PKT_FLAG_KEY) ? " KEY" : "");
+ if (anim->next_packet->stream_index == anim->videoStream) {
anim->pFrameComplete = 0;
- avcodec_decode_video2(
- anim->pCodecCtx, anim->pFrame, &anim->pFrameComplete, &anim->next_packet);
+ avcodec_send_packet(anim->pCodecCtx, anim->next_packet);
+ anim->pFrameComplete = avcodec_receive_frame(anim->pCodecCtx, anim->pFrame) == 0;
if (anim->pFrameComplete) {
anim->next_pts = av_get_pts_from_frame(anim->pFormatCtx, anim->pFrame);
av_log(anim->pFormatCtx,
AV_LOG_DEBUG,
- " FRAME DONE: next_pts=%" PRId64 " pkt_pts=%" PRId64 ", guessed_pts=%" PRId64 "\n",
+ " FRAME DONE: next_pts=%" PRId64 ", guessed_pts=%" PRId64 "\n",
(anim->pFrame->pts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->pFrame->pts,
- (anim->pFrame->pkt_pts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->pFrame->pkt_pts,
(int64_t)anim->next_pts);
break;
}
}
- av_free_packet(&anim->next_packet);
- anim->next_packet.stream_index = -1;
+ av_packet_unref(anim->next_packet);
+ anim->next_packet->stream_index = -1;
}
if (rval == AVERROR_EOF) {
- /* this sets size and data fields to zero,
- * which is necessary to decode the remaining data
- * in the decoder engine after EOF. It also prevents a memory
- * leak, since av_read_frame spills out a full size packet even
- * on EOF... (and: it's safe to call on NULL packets) */
-
- av_free_packet(&anim->next_packet);
-
- anim->next_packet.size = 0;
- anim->next_packet.data = 0;
-
+ /* Flush any remaining frames out of the decoder. */
anim->pFrameComplete = 0;
- avcodec_decode_video2(
- anim->pCodecCtx, anim->pFrame, &anim->pFrameComplete, &anim->next_packet);
+ avcodec_send_packet(anim->pCodecCtx, NULL);
+ anim->pFrameComplete = avcodec_receive_frame(anim->pCodecCtx, anim->pFrame) == 0;
if (anim->pFrameComplete) {
anim->next_pts = av_get_pts_from_frame(anim->pFormatCtx, anim->pFrame);
av_log(anim->pFormatCtx,
AV_LOG_DEBUG,
- " FRAME DONE (after EOF): next_pts=%" PRId64 " pkt_pts=%" PRId64
- ", guessed_pts=%" PRId64 "\n",
+ " FRAME DONE (after EOF): next_pts=%" PRId64 ", guessed_pts=%" PRId64 "\n",
(anim->pFrame->pts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->pFrame->pts,
- (anim->pFrame->pkt_pts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->pFrame->pkt_pts,
(int64_t)anim->next_pts);
rval = 0;
}
}
if (rval < 0) {
- anim->next_packet.stream_index = -1;
+ av_packet_unref(anim->next_packet);
+ anim->next_packet->stream_index = -1;
av_log(anim->pFormatCtx,
AV_LOG_ERROR,
" DECODE READ FAILED: av_read_frame() "
- "returned error: %d\n",
- rval);
+ "returned error: %s\n",
+ av_err2str(rval));
}
return (rval >= 0);
@@ -1154,7 +1149,7 @@ static int ffmpeg_generic_seek_workaround(struct anim *anim, int64_t requested_p
/* Read first video stream packet. */
AVPacket read_packet = {0};
while (av_read_frame(anim->pFormatCtx, &read_packet) >= 0) {
- if (anim->next_packet.stream_index == anim->videoStream) {
+ if (anim->next_packet->stream_index == anim->videoStream) {
break;
}
}
@@ -1246,9 +1241,9 @@ static void ffmpeg_seek_and_decode(struct anim *anim, int position, struct anim_
anim->next_pts = -1;
- if (anim->next_packet.stream_index == anim->videoStream) {
- av_free_packet(&anim->next_packet);
- anim->next_packet.stream_index = -1;
+ if (anim->next_packet->stream_index == anim->videoStream) {
+ av_packet_unref(anim->next_packet);
+ anim->next_packet->stream_index = -1;
}
/* memset(anim->pFrame, ...) ?? */
@@ -1351,32 +1346,30 @@ static void free_anim_ffmpeg(struct anim *anim)
}
if (anim->pCodecCtx) {
- avcodec_close(anim->pCodecCtx);
+ avcodec_free_context(&anim->pCodecCtx);
avformat_close_input(&anim->pFormatCtx);
+ av_packet_free(&anim->next_packet);
- /* Special case here: pFrame could share pointers with codec,
- * so in order to avoid double-free we don't use av_frame_free()
- * to free the frame.
- *
- * Could it be a bug in FFmpeg?
- */
- av_free(anim->pFrame);
+ av_frame_free(&anim->pFrame);
if (!need_aligned_ffmpeg_buffer(anim)) {
/* If there's no need for own aligned buffer it means that FFmpeg's
* frame shares the same buffer as temporary ImBuf. In this case we
* should not free the buffer when freeing the FFmpeg buffer.
*/
- avpicture_fill((AVPicture *)anim->pFrameRGB, NULL, AV_PIX_FMT_RGBA, anim->x, anim->y);
+ av_image_fill_arrays(anim->pFrameRGB->data,
+ anim->pFrameRGB->linesize,
+ NULL,
+ AV_PIX_FMT_RGBA,
+ anim->x,
+ anim->y,
+ 1);
}
av_frame_free(&anim->pFrameRGB);
av_frame_free(&anim->pFrameDeinterlaced);
sws_freeContext(anim->img_convert_ctx);
IMB_freeImBuf(anim->last_frame);
- if (anim->next_packet.stream_index != -1) {
- av_free_packet(&anim->next_packet);
- }
}
anim->duration_in_frames = 0;
}
diff --git a/source/blender/imbuf/intern/indexer.c b/source/blender/imbuf/intern/indexer.c
index ef9f6d861a3..11ce77e3091 100644
--- a/source/blender/imbuf/intern/indexer.c
+++ b/source/blender/imbuf/intern/indexer.c
@@ -48,6 +48,7 @@
#ifdef WITH_FFMPEG
# include "ffmpeg_compat.h"
+# include <libavutil/imgutils.h>
#endif
static const char magic[] = "BlenMIdx";
@@ -488,14 +489,14 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
rv->of = avformat_alloc_context();
rv->of->oformat = av_guess_format("avi", NULL, NULL);
- BLI_strncpy(rv->of->filename, fname, sizeof(rv->of->filename));
+ rv->of->url = av_strdup(fname);
- fprintf(stderr, "Starting work on proxy: %s\n", rv->of->filename);
+ fprintf(stderr, "Starting work on proxy: %s\n", rv->of->url);
rv->st = avformat_new_stream(rv->of, NULL);
rv->st->id = 0;
- rv->c = rv->st->codec;
+ rv->c = avcodec_alloc_context3(NULL);
rv->c->codec_type = AVMEDIA_TYPE_VIDEO;
rv->c->codec_id = AV_CODEC_ID_H264;
rv->c->width = width;
@@ -513,7 +514,9 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
fprintf(stderr,
"No ffmpeg encoder available? "
"Proxy not built!\n");
- av_free(rv->of);
+ avcodec_free_context(&rv->c);
+ avformat_free_context(rv->of);
+ MEM_freeN(rv);
return NULL;
}
@@ -524,7 +527,7 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
rv->c->pix_fmt = AV_PIX_FMT_YUVJ420P;
}
- rv->c->sample_aspect_ratio = rv->st->sample_aspect_ratio = st->codec->sample_aspect_ratio;
+ rv->c->sample_aspect_ratio = rv->st->sample_aspect_ratio = st->sample_aspect_ratio;
rv->c->time_base.den = 25;
rv->c->time_base.num = 1;
@@ -557,34 +560,54 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
}
if (rv->of->flags & AVFMT_GLOBALHEADER) {
- rv->c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ rv->c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
- if (avio_open(&rv->of->pb, fname, AVIO_FLAG_WRITE) < 0) {
+ avcodec_parameters_from_context(rv->st->codecpar, rv->c);
+
+ int ret = avio_open(&rv->of->pb, fname, AVIO_FLAG_WRITE);
+
+ if (ret < 0) {
fprintf(stderr,
- "Couldn't open outputfile! "
- "Proxy not built!\n");
- av_free(rv->of);
- return 0;
+ "Couldn't open IO: %s\n"
+ "Proxy not built!\n",
+ av_err2str(ret));
+ avcodec_free_context(&rv->c);
+ avformat_free_context(rv->of);
+ MEM_freeN(rv);
+ return NULL;
}
- avcodec_open2(rv->c, rv->codec, &codec_opts);
+ ret = avcodec_open2(rv->c, rv->codec, &codec_opts);
+ if (ret < 0) {
+ fprintf(stderr,
+ "Couldn't open codec: %s\n"
+ "Proxy not built!\n",
+ av_err2str(ret));
+ avcodec_free_context(&rv->c);
+ avformat_free_context(rv->of);
+ MEM_freeN(rv);
+ return NULL;
+ }
- rv->orig_height = av_get_cropped_height_from_codec(st->codec);
+ rv->orig_height = st->codecpar->height;
- if (st->codec->width != width || st->codec->height != height ||
- st->codec->pix_fmt != rv->c->pix_fmt) {
+ if (st->codecpar->width != width || st->codecpar->height != height ||
+ st->codecpar->format != rv->c->pix_fmt) {
rv->frame = av_frame_alloc();
- avpicture_fill((AVPicture *)rv->frame,
- MEM_mallocN(avpicture_get_size(rv->c->pix_fmt, round_up(width, 16), height),
- "alloc proxy output frame"),
- rv->c->pix_fmt,
- round_up(width, 16),
- height);
-
- rv->sws_ctx = sws_getContext(st->codec->width,
+ av_image_fill_arrays(
+ rv->frame->data,
+ rv->frame->linesize,
+ MEM_mallocN(av_image_get_buffer_size(rv->c->pix_fmt, round_up(width, 16), height, 1),
+ "alloc proxy output frame"),
+ rv->c->pix_fmt,
+ round_up(width, 16),
+ height,
+ 1);
+
+ rv->sws_ctx = sws_getContext(st->codecpar->width,
rv->orig_height,
- st->codec->pix_fmt,
+ st->codecpar->format,
width,
height,
rv->c->pix_fmt,
@@ -594,26 +617,30 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
NULL);
}
- if (avformat_write_header(rv->of, NULL) < 0) {
+ ret = avformat_write_header(rv->of, NULL);
+ if (ret < 0) {
fprintf(stderr,
- "Couldn't set output parameters? "
- "Proxy not built!\n");
- av_free(rv->of);
- return 0;
+ "Couldn't write header: %s\n"
+ "Proxy not built!\n",
+ av_err2str(ret));
+
+ if (rv->frame) {
+ av_frame_free(&rv->frame);
+ }
+
+ avcodec_free_context(&rv->c);
+ avformat_free_context(rv->of);
+ MEM_freeN(rv);
+ return NULL;
}
return rv;
}
-static int add_to_proxy_output_ffmpeg(struct proxy_output_ctx *ctx, AVFrame *frame)
+static void add_to_proxy_output_ffmpeg(struct proxy_output_ctx *ctx, AVFrame *frame)
{
- AVPacket packet = {0};
- int ret, got_output;
-
- av_init_packet(&packet);
-
if (!ctx) {
- return 0;
+ return;
}
if (ctx->sws_ctx && frame &&
@@ -633,35 +660,46 @@ static int add_to_proxy_output_ffmpeg(struct proxy_output_ctx *ctx, AVFrame *fra
frame->pts = ctx->cfra++;
}
- ret = avcodec_encode_video2(ctx->c, &packet, frame, &got_output);
+ int ret = avcodec_send_frame(ctx->c, frame);
if (ret < 0) {
- fprintf(stderr, "Error encoding proxy frame %d for '%s'\n", ctx->cfra - 1, ctx->of->filename);
- return 0;
+ /* Can't send frame to encoder. This shouldn't happen. */
+ fprintf(stderr, "Can't send video frame: %s\n", av_err2str(ret));
+ return;
}
+ AVPacket *packet = av_packet_alloc();
+
+ while (ret >= 0) {
+ ret = avcodec_receive_packet(ctx->c, packet);
- if (got_output) {
- if (packet.pts != AV_NOPTS_VALUE) {
- packet.pts = av_rescale_q(packet.pts, ctx->c->time_base, ctx->st->time_base);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ /* No more packets to flush. */
+ break;
}
- if (packet.dts != AV_NOPTS_VALUE) {
- packet.dts = av_rescale_q(packet.dts, ctx->c->time_base, ctx->st->time_base);
+ if (ret < 0) {
+ fprintf(stderr,
+ "Error encoding proxy frame %d for '%s': %s\n",
+ ctx->cfra - 1,
+ ctx->of->url,
+ av_err2str(ret));
+ break;
}
- packet.stream_index = ctx->st->index;
+ packet->stream_index = ctx->st->index;
+ av_packet_rescale_ts(packet, ctx->c->time_base, ctx->st->time_base);
- if (av_interleaved_write_frame(ctx->of, &packet) != 0) {
+ int write_ret = av_interleaved_write_frame(ctx->of, packet);
+ if (write_ret != 0) {
fprintf(stderr,
"Error writing proxy frame %d "
- "into '%s'\n",
+ "into '%s': %s\n",
ctx->cfra - 1,
- ctx->of->filename);
- return 0;
+ ctx->of->url,
+ av_err2str(write_ret));
+ break;
}
-
- return 1;
}
- return 0;
+ av_packet_free(&packet);
}
static void free_proxy_output_ffmpeg(struct proxy_output_ctx *ctx, int rollback)
@@ -674,15 +712,15 @@ static void free_proxy_output_ffmpeg(struct proxy_output_ctx *ctx, int rollback)
}
if (!rollback) {
- while (add_to_proxy_output_ffmpeg(ctx, NULL)) {
- }
+ /* Flush the remaining packets. */
+ add_to_proxy_output_ffmpeg(ctx, NULL);
}
avcodec_flush_buffers(ctx->c);
av_write_trailer(ctx->of);
- avcodec_close(ctx->c);
+ avcodec_free_context(&ctx->c);
if (ctx->of->oformat) {
if (!(ctx->of->oformat->flags & AVFMT_NOFILE)) {
@@ -777,7 +815,7 @@ static IndexBuildContext *index_ffmpeg_create_context(struct anim *anim,
/* Find the video stream */
context->videoStream = -1;
for (i = 0; i < context->iFormatCtx->nb_streams; i++) {
- if (context->iFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if (context->iFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
if (streamcount > 0) {
streamcount--;
continue;
@@ -794,9 +832,8 @@ static IndexBuildContext *index_ffmpeg_create_context(struct anim *anim,
}
context->iStream = context->iFormatCtx->streams[context->videoStream];
- context->iCodecCtx = context->iStream->codec;
- context->iCodec = avcodec_find_decoder(context->iCodecCtx->codec_id);
+ context->iCodec = avcodec_find_decoder(context->iStream->codecpar->codec_id);
if (context->iCodec == NULL) {
avformat_close_input(&context->iFormatCtx);
@@ -804,7 +841,9 @@ static IndexBuildContext *index_ffmpeg_create_context(struct anim *anim,
return NULL;
}
- context->iCodecCtx->workaround_bugs = 1;
+ context->iCodecCtx = avcodec_alloc_context3(NULL);
+ avcodec_parameters_to_context(context->iCodecCtx, context->iStream->codecpar);
+ context->iCodecCtx->workaround_bugs = FF_BUG_AUTODETECT;
if (context->iCodec->capabilities & AV_CODEC_CAP_AUTO_THREADS) {
context->iCodecCtx->thread_count = 0;
@@ -822,19 +861,19 @@ static IndexBuildContext *index_ffmpeg_create_context(struct anim *anim,
if (avcodec_open2(context->iCodecCtx, context->iCodec, NULL) < 0) {
avformat_close_input(&context->iFormatCtx);
+ avcodec_free_context(&context->iCodecCtx);
MEM_freeN(context);
return NULL;
}
for (i = 0; i < num_proxy_sizes; i++) {
if (proxy_sizes_in_use & proxy_sizes[i]) {
- context->proxy_ctx[i] = alloc_proxy_output_ffmpeg(
- anim,
- context->iStream,
- proxy_sizes[i],
- context->iCodecCtx->width * proxy_fac[i],
- av_get_cropped_height_from_codec(context->iCodecCtx) * proxy_fac[i],
- quality);
+ context->proxy_ctx[i] = alloc_proxy_output_ffmpeg(anim,
+ context->iStream,
+ proxy_sizes[i],
+ context->iCodecCtx->width * proxy_fac[i],
+ context->iCodecCtx->height * proxy_fac[i],
+ quality);
if (!context->proxy_ctx[i]) {
proxy_sizes_in_use &= ~proxy_sizes[i];
}
@@ -873,7 +912,7 @@ static void index_rebuild_ffmpeg_finish(FFmpegIndexBuilderContext *context, int
}
}
- avcodec_close(context->iCodecCtx);
+ avcodec_free_context(&context->iCodecCtx);
avformat_close_input(&context->iFormatCtx);
MEM_freeN(context);
@@ -938,23 +977,18 @@ static int index_rebuild_ffmpeg(FFmpegIndexBuilderContext *context,
short *do_update,
float *progress)
{
- AVFrame *in_frame = 0;
- AVPacket next_packet;
+ AVFrame *in_frame = av_frame_alloc();
+ AVPacket *next_packet = av_packet_alloc();
uint64_t stream_size;
- memset(&next_packet, 0, sizeof(AVPacket));
-
- in_frame = av_frame_alloc();
-
stream_size = avio_size(context->iFormatCtx->pb);
context->frame_rate = av_q2d(av_guess_frame_rate(context->iFormatCtx, context->iStream, NULL));
context->pts_time_base = av_q2d(context->iStream->time_base);
- while (av_read_frame(context->iFormatCtx, &next_packet) >= 0) {
- int frame_finished = 0;
+ while (av_read_frame(context->iFormatCtx, next_packet) >= 0) {
float next_progress =
- (float)((int)floor(((double)next_packet.pos) * 100 / ((double)stream_size) + 0.5)) / 100;
+ (float)((int)floor(((double)next_packet->pos) * 100 / ((double)stream_size) + 0.5)) / 100;
if (*progress != next_progress) {
*progress = next_progress;
@@ -962,50 +996,59 @@ static int index_rebuild_ffmpeg(FFmpegIndexBuilderContext *context,
}
if (*stop) {
- av_free_packet(&next_packet);
break;
}
- if (next_packet.stream_index == context->videoStream) {
- if (next_packet.flags & AV_PKT_FLAG_KEY) {
+ if (next_packet->stream_index == context->videoStream) {
+ if (next_packet->flags & AV_PKT_FLAG_KEY) {
context->last_seek_pos = context->seek_pos;
context->last_seek_pos_dts = context->seek_pos_dts;
- context->seek_pos = next_packet.pos;
- context->seek_pos_dts = next_packet.dts;
- context->seek_pos_pts = next_packet.pts;
+ context->seek_pos = next_packet->pos;
+ context->seek_pos_dts = next_packet->dts;
+ context->seek_pos_pts = next_packet->pts;
}
- avcodec_decode_video2(context->iCodecCtx, in_frame, &frame_finished, &next_packet);
- }
+ int ret = avcodec_send_packet(context->iCodecCtx, next_packet);
+ while (ret >= 0) {
+ ret = avcodec_receive_frame(context->iCodecCtx, in_frame);
- if (frame_finished) {
- index_rebuild_ffmpeg_proc_decoded_frame(context, &next_packet, in_frame);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ /* No more frames to flush. */
+ break;
+ }
+ if (ret < 0) {
+ fprintf(stderr, "Error decoding proxy frame: %s\n", av_err2str(ret));
+ break;
+ }
+ index_rebuild_ffmpeg_proc_decoded_frame(context, next_packet, in_frame);
+ }
}
- av_free_packet(&next_packet);
}
/* process pictures still stuck in decoder engine after EOF
- * according to ffmpeg docs using 0-size packets.
+ * according to ffmpeg docs using NULL packets.
*
* At least, if we haven't already stopped... */
- /* this creates the 0-size packet and prevents a memory leak. */
- av_free_packet(&next_packet);
-
if (!*stop) {
- int frame_finished;
-
- do {
- frame_finished = 0;
+ int ret = avcodec_send_packet(context->iCodecCtx, NULL);
- avcodec_decode_video2(context->iCodecCtx, in_frame, &frame_finished, &next_packet);
+ while (ret >= 0) {
+ ret = avcodec_receive_frame(context->iCodecCtx, in_frame);
- if (frame_finished) {
- index_rebuild_ffmpeg_proc_decoded_frame(context, &next_packet, in_frame);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ /* No more frames to flush. */
+ break;
}
- } while (frame_finished);
+ if (ret < 0) {
+ fprintf(stderr, "Error flushing proxy frame: %s\n", av_err2str(ret));
+ break;
+ }
+ index_rebuild_ffmpeg_proc_decoded_frame(context, next_packet, in_frame);
+ }
}
+ av_packet_free(&next_packet);
av_free(in_frame);
return 1;
diff --git a/source/blender/imbuf/intern/util.c b/source/blender/imbuf/intern/util.c
index 64dad5de902..fabf6c5c3bd 100644
--- a/source/blender/imbuf/intern/util.c
+++ b/source/blender/imbuf/intern/util.c
@@ -245,7 +245,6 @@ static void ffmpeg_log_callback(void *ptr, int level, const char *format, va_lis
void IMB_ffmpeg_init(void)
{
- av_register_all();
avdevice_register_all();
ffmpeg_last_error[0] = '\0';
@@ -269,7 +268,6 @@ static int isffmpeg(const char *filepath)
unsigned int i;
int videoStream;
AVCodec *pCodec;
- AVCodecContext *pCodecCtx;
if (BLI_path_extension_check_n(filepath,
".swf",
@@ -310,8 +308,8 @@ static int isffmpeg(const char *filepath)
/* Find the first video stream */
videoStream = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++) {
- if (pFormatCtx->streams[i] && pFormatCtx->streams[i]->codec &&
- (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)) {
+ if (pFormatCtx->streams[i] && pFormatCtx->streams[i]->codecpar &&
+ (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)) {
videoStream = i;
break;
}
@@ -322,21 +320,15 @@ static int isffmpeg(const char *filepath)
return 0;
}
- pCodecCtx = pFormatCtx->streams[videoStream]->codec;
+ AVCodecParameters *codec_par = pFormatCtx->streams[videoStream]->codecpar;
/* Find the decoder for the video stream */
- pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
+ pCodec = avcodec_find_decoder(codec_par->codec_id);
if (pCodec == NULL) {
avformat_close_input(&pFormatCtx);
return 0;
}
- if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
- avformat_close_input(&pFormatCtx);
- return 0;
- }
-
- avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 1;