Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Parborg <darkdefende@gmail.com>2021-05-07 18:10:03 +0300
committerSebastian Parborg <darkdefende@gmail.com>2021-05-07 18:12:25 +0300
commit8d6264ea12bfac0912c7249f00af2ac8e3409ed1 (patch)
treec3a819ace2123a05fe873f122f719f1e59e5fedd /source/blender/blenkernel/intern
parent5bfd5e77b7825c4a76299dee7ea3e64b4ec95e56 (diff)
Cleanup: Remove deprecated variables and functions calls from our ffmpeg code
There need to be more cleanup for ffmpeg 4.5 (ffmpeg master branch). However this now compiles on ffmpeg 4.4 without and deprication warnings. Reviewed By: Sergey, Richard Antalik Differential Revision: http://developer.blender.org/D10338
Diffstat (limited to 'source/blender/blenkernel/intern')
-rw-r--r--source/blender/blenkernel/intern/writeffmpeg.c346
1 files changed, 148 insertions, 198 deletions
diff --git a/source/blender/blenkernel/intern/writeffmpeg.c b/source/blender/blenkernel/intern/writeffmpeg.c
index e5550cee124..5d03278f3c1 100644
--- a/source/blender/blenkernel/intern/writeffmpeg.c
+++ b/source/blender/blenkernel/intern/writeffmpeg.c
@@ -56,6 +56,7 @@
# include <libavcodec/avcodec.h>
# include <libavformat/avformat.h>
# include <libavutil/imgutils.h>
+# include <libavutil/opt.h>
# include <libavutil/rational.h>
# include <libavutil/samplefmt.h>
# include <libswscale/swscale.h>
@@ -80,6 +81,8 @@ typedef struct FFMpegContext {
int ffmpeg_preset; /* see eFFMpegPreset */
AVFormatContext *outfile;
+ AVCodecContext *video_codec;
+ AVCodecContext *audio_codec;
AVStream *video_stream;
AVStream *audio_stream;
AVFrame *current_frame; /* Image frame in output pixel format. */
@@ -91,10 +94,6 @@ typedef struct FFMpegContext {
uint8_t *audio_input_buffer;
uint8_t *audio_deinterleave_buffer;
int audio_input_samples;
-# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
- uint8_t *audio_output_buffer;
- int audio_outbuf_size;
-# endif
double audio_time;
bool audio_deinterleave;
int audio_sample_size;
@@ -141,33 +140,22 @@ static int request_float_audio_buffer(int codec_id)
}
# ifdef WITH_AUDASPACE
+
static int write_audio_frame(FFMpegContext *context)
{
- AVCodecContext *c = NULL;
- AVPacket pkt;
AVFrame *frame = NULL;
- int got_output = 0;
-
- c = context->audio_stream->codec;
-
- av_init_packet(&pkt);
- pkt.size = 0;
- pkt.data = NULL;
+ AVCodecContext *c = context->audio_codec;
AUD_Device_read(
context->audio_mixdown_device, context->audio_input_buffer, context->audio_input_samples);
context->audio_time += (double)context->audio_input_samples / (double)c->sample_rate;
-# ifdef FFMPEG_HAVE_ENCODE_AUDIO2
frame = av_frame_alloc();
- av_frame_unref(frame);
frame->pts = context->audio_time / av_q2d(c->time_base);
frame->nb_samples = context->audio_input_samples;
frame->format = c->sample_fmt;
frame->channels = c->channels;
-# ifdef FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT
frame->channel_layout = c->channel_layout;
-# endif
if (context->audio_deinterleave) {
int channel, i;
@@ -195,61 +183,49 @@ static int write_audio_frame(FFMpegContext *context)
context->audio_input_samples * c->channels * context->audio_sample_size,
1);
- if (avcodec_encode_audio2(c, &pkt, frame, &got_output) < 0) {
- // XXX error("Error writing audio packet");
- return -1;
- }
+ int success = 0;
- if (!got_output) {
- av_frame_free(&frame);
- return 0;
+ int ret = avcodec_send_frame(c, frame);
+ if (ret < 0) {
+ /* Can't send frame to encoder. This shouldn't happen. */
+ fprintf(stderr, "Can't send audio frame: %s\n", av_err2str(ret));
+ success = -1;
}
-# else
- pkt.size = avcodec_encode_audio(c,
- context->audio_output_buffer,
- context->audio_outbuf_size,
- (short *)context->audio_input_buffer);
- if (pkt.size < 0) {
- // XXX error("Error writing audio packet");
- return -1;
- }
+ AVPacket *pkt = av_packet_alloc();
- pkt.data = context->audio_output_buffer;
- got_output = 1;
-# endif
+ while (ret >= 0) {
- if (got_output) {
- if (pkt.pts != AV_NOPTS_VALUE) {
- pkt.pts = av_rescale_q(pkt.pts, c->time_base, context->audio_stream->time_base);
+ ret = avcodec_receive_packet(c, pkt);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ break;
}
- if (pkt.dts != AV_NOPTS_VALUE) {
- pkt.dts = av_rescale_q(pkt.dts, c->time_base, context->audio_stream->time_base);
+ if (ret < 0) {
+ fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
+ success = -1;
}
- if (pkt.duration > 0) {
- pkt.duration = av_rescale_q(pkt.duration, c->time_base, context->audio_stream->time_base);
+
+ av_packet_rescale_ts(pkt, c->time_base, context->audio_stream->time_base);
+ if (pkt->duration > 0) {
+ pkt->duration = av_rescale_q(pkt->duration, c->time_base, context->audio_stream->time_base);
}
- pkt.stream_index = context->audio_stream->index;
+ pkt->stream_index = context->audio_stream->index;
- pkt.flags |= AV_PKT_FLAG_KEY;
+ pkt->flags |= AV_PKT_FLAG_KEY;
- if (av_interleaved_write_frame(context->outfile, &pkt) != 0) {
- fprintf(stderr, "Error writing audio packet!\n");
- if (frame) {
- av_frame_free(&frame);
- }
- return -1;
+ int write_ret = av_interleaved_write_frame(context->outfile, pkt);
+ if (write_ret != 0) {
+ fprintf(stderr, "Error writing audio packet: %s\n", av_err2str(write_ret));
+ success = -1;
+ break;
}
-
- av_free_packet(&pkt);
}
- if (frame) {
- av_frame_free(&frame);
- }
+ av_packet_free(&pkt);
+ av_frame_free(&frame);
- return 0;
+ return success;
}
# endif /* #ifdef WITH_AUDASPACE */
@@ -265,14 +241,15 @@ static AVFrame *alloc_picture(int pix_fmt, int width, int height)
if (!f) {
return NULL;
}
- size = avpicture_get_size(pix_fmt, width, height);
+ size = av_image_get_buffer_size(pix_fmt, width, height, 1);
/* allocate the actual picture buffer */
buf = MEM_mallocN(size, "AVFrame buffer");
if (!buf) {
free(f);
return NULL;
}
- avpicture_fill((AVPicture *)f, buf, pix_fmt, width, height);
+
+ av_image_fill_arrays(f->data, f->linesize, buf, pix_fmt, width, height, 1);
f->format = pix_fmt;
f->width = width;
f->height = height;
@@ -342,58 +319,57 @@ static const char **get_file_extensions(int format)
}
/* Write a frame to the output file */
-static int write_video_frame(
- FFMpegContext *context, const RenderData *rd, int cfra, AVFrame *frame, ReportList *reports)
+static int write_video_frame(FFMpegContext *context, int cfra, AVFrame *frame, ReportList *reports)
{
- int got_output;
int ret, success = 1;
- AVCodecContext *c = context->video_stream->codec;
- AVPacket packet = {0};
+ AVPacket *packet = av_packet_alloc();
- av_init_packet(&packet);
+ AVCodecContext *c = context->video_codec;
frame->pts = cfra;
- ret = avcodec_encode_video2(c, &packet, frame, &got_output);
+ ret = avcodec_send_frame(c, frame);
+ if (ret < 0) {
+ /* Can't send frame to encoder. This shouldn't happen. */
+ fprintf(stderr, "Can't send video frame: %s\n", av_err2str(ret));
+ success = -1;
+ }
- if (ret >= 0 && got_output) {
- if (packet.pts != AV_NOPTS_VALUE) {
- packet.pts = av_rescale_q(packet.pts, c->time_base, context->video_stream->time_base);
- PRINT("Video Frame PTS: %d\n", (int)packet.pts);
- }
- else {
- PRINT("Video Frame PTS: not set\n");
- }
- if (packet.dts != AV_NOPTS_VALUE) {
- packet.dts = av_rescale_q(packet.dts, c->time_base, context->video_stream->time_base);
- PRINT("Video Frame DTS: %d\n", (int)packet.dts);
+ while (ret >= 0) {
+ ret = avcodec_receive_packet(c, packet);
+
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ /* No more packets available. */
+ break;
}
- else {
- PRINT("Video Frame DTS: not set\n");
+ if (ret < 0) {
+ fprintf(stderr, "Error encoding frame: %s\n", av_err2str(ret));
+ break;
}
- packet.stream_index = context->video_stream->index;
- ret = av_interleaved_write_frame(context->outfile, &packet);
- success = (ret == 0);
- }
- else if (ret < 0) {
- success = 0;
+ packet->stream_index = context->video_stream->index;
+ av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base);
+ if (av_interleaved_write_frame(context->outfile, packet) != 0) {
+ success = -1;
+ break;
+ }
}
if (!success) {
BKE_report(reports, RPT_ERROR, "Error writing frame");
+ PRINT("Error writing frame: %s\n", av_err2str(ret));
}
+ av_packet_free(&packet);
+
return success;
}
/* read and encode a frame of audio from the buffer */
-static AVFrame *generate_video_frame(FFMpegContext *context,
- const uint8_t *pixels,
- ReportList *reports)
+static AVFrame *generate_video_frame(FFMpegContext *context, const uint8_t *pixels)
{
- AVCodecContext *c = context->video_stream->codec;
- int height = c->height;
+ AVCodecParameters *codec = context->video_stream->codecpar;
+ int height = codec->height;
AVFrame *rgb_frame;
if (context->img_convert_frame != NULL) {
@@ -438,7 +414,7 @@ static AVFrame *generate_video_frame(FFMpegContext *context,
(const uint8_t *const *)rgb_frame->data,
rgb_frame->linesize,
0,
- c->height,
+ codec->height,
context->current_frame->data,
context->current_frame->linesize);
}
@@ -446,9 +422,7 @@ static AVFrame *generate_video_frame(FFMpegContext *context,
return context->current_frame;
}
-static void set_ffmpeg_property_option(AVCodecContext *c,
- IDProperty *prop,
- AVDictionary **dictionary)
+static void set_ffmpeg_property_option(IDProperty *prop, AVDictionary **dictionary)
{
char name[128];
char *param;
@@ -536,7 +510,7 @@ static void set_ffmpeg_properties(RenderData *rd,
for (curr = prop->data.group.first; curr; curr = curr->next) {
if (ffmpeg_proprty_valid(c, prop_name, curr)) {
- set_ffmpeg_property_option(c, curr, dictionary);
+ set_ffmpeg_property_option(curr, dictionary);
}
}
}
@@ -553,7 +527,6 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
int error_size)
{
AVStream *st;
- AVCodecContext *c;
AVCodec *codec;
AVDictionary *opts = NULL;
@@ -567,7 +540,8 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
/* Set up the codec context */
- c = st->codec;
+ context->video_codec = avcodec_alloc_context3(NULL);
+ AVCodecContext *c = context->video_codec;
c->codec_id = codec_id;
c->codec_type = AVMEDIA_TYPE_VIDEO;
@@ -650,11 +624,9 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
}
}
- /* Deprecated and not doing anything since July 2015, deleted in recent ffmpeg */
- // c->me_method = ME_EPZS;
-
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
+ avcodec_free_context(&c);
return NULL;
}
@@ -714,7 +686,7 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
if ((of->oformat->flags & AVFMT_GLOBALHEADER)) {
PRINT("Using global header\n");
- c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
/* xasp & yasp got float lately... */
@@ -742,6 +714,7 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
if (avcodec_open2(c, codec, &opts) < 0) {
BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
av_dict_free(&opts);
+ avcodec_free_context(&c);
return NULL;
}
av_dict_free(&opts);
@@ -769,6 +742,8 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
NULL);
}
+ avcodec_parameters_from_context(st->codecpar, c);
+
return st;
}
@@ -780,7 +755,6 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
int error_size)
{
AVStream *st;
- AVCodecContext *c;
AVCodec *codec;
AVDictionary *opts = NULL;
@@ -792,7 +766,8 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
}
st->id = 1;
- c = st->codec;
+ context->audio_codec = avcodec_alloc_context3(NULL);
+ AVCodecContext *c = context->audio_codec;
c->thread_count = BLI_system_thread_count();
c->thread_type = FF_THREAD_SLICE;
@@ -804,7 +779,6 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
c->sample_fmt = AV_SAMPLE_FMT_S16;
c->channels = rd->ffcodecdata.audio_channels;
-# ifdef FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT
switch (rd->ffcodecdata.audio_channels) {
case FFM_CHANNELS_MONO:
c->channel_layout = AV_CH_LAYOUT_MONO;
@@ -822,7 +796,6 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
c->channel_layout = AV_CH_LAYOUT_7POINT1;
break;
}
-# endif
if (request_float_audio_buffer(codec_id)) {
/* mainly for AAC codec which is experimental */
@@ -833,6 +806,7 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
// XXX error("Couldn't find a valid audio codec");
+ avcodec_free_context(&c);
return NULL;
}
@@ -844,13 +818,13 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
* Float samples in particular are not always supported. */
const enum AVSampleFormat *p = codec->sample_fmts;
for (; *p != -1; p++) {
- if (*p == st->codec->sample_fmt) {
+ if (*p == c->sample_fmt) {
break;
}
}
if (*p == -1) {
/* sample format incompatible with codec. Defaulting to a format known to work */
- st->codec->sample_fmt = codec->sample_fmts[0];
+ c->sample_fmt = codec->sample_fmts[0];
}
}
@@ -859,18 +833,18 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
int best = 0;
int best_dist = INT_MAX;
for (; *p; p++) {
- int dist = abs(st->codec->sample_rate - *p);
+ int dist = abs(c->sample_rate - *p);
if (dist < best_dist) {
best_dist = dist;
best = *p;
}
}
/* best is the closest supported sample rate (same as selected if best_dist == 0) */
- st->codec->sample_rate = best;
+ c->sample_rate = best;
}
if (of->oformat->flags & AVFMT_GLOBALHEADER) {
- c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
set_ffmpeg_properties(rd, c, "audio", &opts);
@@ -879,32 +853,25 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
// XXX error("Couldn't initialize audio codec");
BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
av_dict_free(&opts);
+ avcodec_free_context(&c);
return NULL;
}
av_dict_free(&opts);
/* need to prevent floating point exception when using vorbis audio codec,
* initialize this value in the same way as it's done in FFmpeg itself (sergey) */
- st->codec->time_base.num = 1;
- st->codec->time_base.den = st->codec->sample_rate;
-
-# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
- context->audio_outbuf_size = FF_MIN_BUFFER_SIZE;
-# endif
+ c->time_base.num = 1;
+ c->time_base.den = c->sample_rate;
if (c->frame_size == 0) {
/* Used to be if ((c->codec_id >= CODEC_ID_PCM_S16LE) && (c->codec_id <= CODEC_ID_PCM_DVD))
* not sure if that is needed anymore, so let's try out if there are any
* complaints regarding some FFmpeg versions users might have. */
- context->audio_input_samples = FF_MIN_BUFFER_SIZE * 8 / c->bits_per_coded_sample / c->channels;
+ context->audio_input_samples = AV_INPUT_BUFFER_MIN_SIZE * 8 / c->bits_per_coded_sample /
+ c->channels;
}
else {
context->audio_input_samples = c->frame_size;
-# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
- if (c->frame_size * c->channels * sizeof(int16_t) * 4 > context->audio_outbuf_size) {
- context->audio_outbuf_size = c->frame_size * c->channels * sizeof(int16_t) * 4;
- }
-# endif
}
context->audio_deinterleave = av_sample_fmt_is_planar(c->sample_fmt);
@@ -913,10 +880,6 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
context->audio_input_buffer = (uint8_t *)av_malloc(context->audio_input_samples * c->channels *
context->audio_sample_size);
-# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
- context->audio_output_buffer = (uint8_t *)av_malloc(context->audio_outbuf_size);
-# endif
-
if (context->audio_deinterleave) {
context->audio_deinterleave_buffer = (uint8_t *)av_malloc(
context->audio_input_samples * c->channels * context->audio_sample_size);
@@ -924,6 +887,8 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
context->audio_time = 0.0f;
+ avcodec_parameters_from_context(st->codecpar, c);
+
return st;
}
/* essential functions -- start, append, end */
@@ -949,7 +914,7 @@ static void ffmpeg_dict_set_float(AVDictionary **dict, const char *key, float va
static void ffmpeg_add_metadata_callback(void *data,
const char *propname,
char *propvalue,
- int len)
+ int UNUSED(len))
{
AVDictionary **metadata = (AVDictionary **)data;
av_dict_set(metadata, propname, propvalue, 0);
@@ -1040,7 +1005,7 @@ static int start_ffmpeg_impl(FFMpegContext *context,
fmt->audio_codec = context->ffmpeg_audio_codec;
- BLI_strncpy(of->filename, name, sizeof(of->filename));
+ of->url = av_strdup(name);
/* set the codec to the user's selection */
switch (context->ffmpeg_type) {
case FFMPEG_AVI:
@@ -1105,9 +1070,11 @@ static int start_ffmpeg_impl(FFMpegContext *context,
if (!context->video_stream) {
if (error[0]) {
BKE_report(reports, RPT_ERROR, error);
+ PRINT("Video stream error: %s\n", error);
}
else {
BKE_report(reports, RPT_ERROR, "Error initializing video stream");
+ PRINT("Error initializing video stream");
}
goto fail;
}
@@ -1119,9 +1086,11 @@ static int start_ffmpeg_impl(FFMpegContext *context,
if (!context->audio_stream) {
if (error[0]) {
BKE_report(reports, RPT_ERROR, error);
+ PRINT("Audio stream error: %s\n", error);
}
else {
BKE_report(reports, RPT_ERROR, "Error initializing audio stream");
+ PRINT("Error initializing audio stream");
}
goto fail;
}
@@ -1129,6 +1098,7 @@ static int start_ffmpeg_impl(FFMpegContext *context,
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&of->pb, name, AVIO_FLAG_WRITE) < 0) {
BKE_report(reports, RPT_ERROR, "Could not open file for writing");
+ PRINT("Could not open file for writing\n");
goto fail;
}
}
@@ -1138,10 +1108,12 @@ static int start_ffmpeg_impl(FFMpegContext *context,
&of->metadata, context->stamp_data, ffmpeg_add_metadata_callback, false);
}
- if (avformat_write_header(of, NULL) < 0) {
+ int ret = avformat_write_header(of, NULL);
+ if (ret < 0) {
BKE_report(reports,
RPT_ERROR,
"Could not initialize streams, probably unsupported codec combination");
+ PRINT("Could not write media header: %s\n", av_err2str(ret));
goto fail;
}
@@ -1156,13 +1128,11 @@ fail:
avio_close(of->pb);
}
- if (context->video_stream && context->video_stream->codec) {
- avcodec_close(context->video_stream->codec);
+ if (context->video_stream) {
context->video_stream = NULL;
}
- if (context->audio_stream && context->audio_stream->codec) {
- avcodec_close(context->audio_stream->codec);
+ if (context->audio_stream) {
context->audio_stream = NULL;
}
@@ -1190,46 +1160,36 @@ fail:
*/
static void flush_ffmpeg(FFMpegContext *context)
{
- int ret = 0;
+ AVCodecContext *c = context->video_codec;
+ AVPacket *packet = av_packet_alloc();
- AVCodecContext *c = context->video_stream->codec;
- /* get the delayed frames */
- while (1) {
- int got_output;
- AVPacket packet = {0};
- av_init_packet(&packet);
+ avcodec_send_frame(c, NULL);
- ret = avcodec_encode_video2(c, &packet, NULL, &got_output);
- if (ret < 0) {
- fprintf(stderr, "Error encoding delayed frame %d\n", ret);
+ /* Get the packets frames. */
+ int ret = 1;
+ while (ret >= 0) {
+ ret = avcodec_receive_packet(c, packet);
+
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ /* No more packets to flush. */
break;
}
- if (!got_output) {
+ if (ret < 0) {
+ fprintf(stderr, "Error encoding delayed frame: %s\n", av_err2str(ret));
break;
}
- if (packet.pts != AV_NOPTS_VALUE) {
- packet.pts = av_rescale_q(packet.pts, c->time_base, context->video_stream->time_base);
- PRINT("Video Frame PTS: %d\n", (int)packet.pts);
- }
- else {
- PRINT("Video Frame PTS: not set\n");
- }
- if (packet.dts != AV_NOPTS_VALUE) {
- packet.dts = av_rescale_q(packet.dts, c->time_base, context->video_stream->time_base);
- PRINT("Video Frame DTS: %d\n", (int)packet.dts);
- }
- else {
- PRINT("Video Frame DTS: not set\n");
- }
- packet.stream_index = context->video_stream->index;
- ret = av_interleaved_write_frame(context->outfile, &packet);
- if (ret != 0) {
- fprintf(stderr, "Error writing delayed frame %d\n", ret);
+ packet->stream_index = context->video_stream->index;
+ av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base);
+
+ int write_ret = av_interleaved_write_frame(context->outfile, packet);
+ if (write_ret != 0) {
+ fprintf(stderr, "Error writing delayed frame: %s\n", av_err2str(write_ret));
break;
}
}
- avcodec_flush_buffers(context->video_stream->codec);
+
+ av_packet_free(&packet);
}
/* **********************************************************************
@@ -1327,7 +1287,8 @@ int BKE_ffmpeg_start(void *context_v,
success = start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
# ifdef WITH_AUDASPACE
if (context->audio_stream) {
- AVCodecContext *c = context->audio_stream->codec;
+ AVCodecContext *c = context->audio_codec;
+
AUD_DeviceSpecs specs;
specs.channels = c->channels;
@@ -1354,10 +1315,6 @@ int BKE_ffmpeg_start(void *context_v,
specs.rate = rd->ffcodecdata.audio_mixrate;
context->audio_mixdown_device = BKE_sound_mixdown(
scene, specs, preview ? rd->psfra : rd->sfra, rd->ffcodecdata.audio_volume);
-# ifdef FFMPEG_CODEC_TIME_BASE
- c->time_base.den = specs.rate;
- c->time_base.num = 1;
-# endif
}
# endif
return success;
@@ -1398,8 +1355,8 @@ int BKE_ffmpeg_append(void *context_v,
// write_audio_frames(frame / (((double)rd->frs_sec) / rd->frs_sec_base));
if (context->video_stream) {
- avframe = generate_video_frame(context, (unsigned char *)pixels, reports);
- success = (avframe && write_video_frame(context, rd, frame - start_frame, avframe, reports));
+ avframe = generate_video_frame(context, (unsigned char *)pixels);
+ success = (avframe && write_video_frame(context, frame - start_frame, avframe, reports));
if (context->ffmpeg_autosplit) {
if (avio_tell(context->outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
@@ -1430,7 +1387,7 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
}
# endif
- if (context->video_stream && context->video_stream->codec) {
+ if (context->video_stream) {
PRINT("Flushing delayed frames...\n");
flush_ffmpeg(context);
}
@@ -1441,14 +1398,12 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
/* Close the video codec */
- if (context->video_stream != NULL && context->video_stream->codec != NULL) {
- avcodec_close(context->video_stream->codec);
+ if (context->video_stream != NULL) {
PRINT("zero video stream %p\n", context->video_stream);
context->video_stream = NULL;
}
- if (context->audio_stream != NULL && context->audio_stream->codec != NULL) {
- avcodec_close(context->audio_stream->codec);
+ if (context->audio_stream != NULL) {
context->audio_stream = NULL;
}
@@ -1467,6 +1422,16 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
avio_close(context->outfile->pb);
}
}
+
+ if (context->video_codec != NULL) {
+ avcodec_free_context(&context->video_codec);
+ context->video_codec = NULL;
+ }
+ if (context->audio_codec != NULL) {
+ avcodec_free_context(&context->audio_codec);
+ context->audio_codec = NULL;
+ }
+
if (context->outfile != NULL) {
avformat_free_context(context->outfile);
context->outfile = NULL;
@@ -1475,12 +1440,6 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
av_free(context->audio_input_buffer);
context->audio_input_buffer = NULL;
}
-# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
- if (context->audio_output_buffer != NULL) {
- av_free(context->audio_output_buffer);
- context->audio_output_buffer = NULL;
- }
-# endif
if (context->audio_deinterleave_buffer != NULL) {
av_free(context->audio_deinterleave_buffer);
@@ -1560,12 +1519,12 @@ static IDProperty *BKE_ffmpeg_property_add(RenderData *rd,
switch (o->type) {
case AV_OPT_TYPE_INT:
case AV_OPT_TYPE_INT64:
- val.i = FFMPEG_DEF_OPT_VAL_INT(o);
+ val.i = o->default_val.i64;
idp_type = IDP_INT;
break;
case AV_OPT_TYPE_DOUBLE:
case AV_OPT_TYPE_FLOAT:
- val.f = FFMPEG_DEF_OPT_VAL_DOUBLE(o);
+ val.f = o->default_val.dbl;
idp_type = IDP_FLOAT;
break;
case AV_OPT_TYPE_STRING:
@@ -1707,16 +1666,9 @@ static void ffmpeg_set_expert_options(RenderData *rd)
BKE_ffmpeg_property_add_string(rd, "video", "trellis:0");
BKE_ffmpeg_property_add_string(rd, "video", "weightb:1");
-# ifdef FFMPEG_HAVE_DEPRECATED_FLAGS2
- BKE_ffmpeg_property_add_string(rd, "video", "flags2:dct8x8");
- BKE_ffmpeg_property_add_string(rd, "video", "directpred:3");
- BKE_ffmpeg_property_add_string(rd, "video", "flags2:fastpskip");
- BKE_ffmpeg_property_add_string(rd, "video", "flags2:wpred");
-# else
BKE_ffmpeg_property_add_string(rd, "video", "8x8dct:1");
BKE_ffmpeg_property_add_string(rd, "video", "fast-pskip:1");
BKE_ffmpeg_property_add_string(rd, "video", "wpredp:2");
-# endif
}
else if (codec_id == AV_CODEC_ID_DNXHD) {
if (rd->ffcodecdata.flags & FFMPEG_LOSSLESS_OUTPUT) {
@@ -1871,14 +1823,12 @@ bool BKE_ffmpeg_alpha_channel_is_supported(const RenderData *rd)
{
int codec = rd->ffcodecdata.codec;
-# ifdef FFMPEG_FFV1_ALPHA_SUPPORTED
- /* Visual Studio 2019 doesn't like #ifdef within ELEM(). */
- if (codec == AV_CODEC_ID_FFV1) {
- return true;
- }
-# endif
-
- return ELEM(codec, AV_CODEC_ID_QTRLE, AV_CODEC_ID_PNG, AV_CODEC_ID_VP9, AV_CODEC_ID_HUFFYUV);
+ return ELEM(codec,
+ AV_CODEC_ID_FFV1,
+ AV_CODEC_ID_QTRLE,
+ AV_CODEC_ID_PNG,
+ AV_CODEC_ID_VP9,
+ AV_CODEC_ID_HUFFYUV);
}
void *BKE_ffmpeg_context_create(void)