diff options
Diffstat (limited to 'source/blender/blenkernel/intern/writeffmpeg.c')
-rw-r--r-- | source/blender/blenkernel/intern/writeffmpeg.c | 44 |
1 files changed, 23 insertions, 21 deletions
diff --git a/source/blender/blenkernel/intern/writeffmpeg.c b/source/blender/blenkernel/intern/writeffmpeg.c index 9b3103a638b..323da7473b5 100644 --- a/source/blender/blenkernel/intern/writeffmpeg.c +++ b/source/blender/blenkernel/intern/writeffmpeg.c @@ -149,7 +149,6 @@ static int write_audio_frame(FFMpegContext *context) AUD_Device_read( context->audio_mixdown_device, context->audio_input_buffer, context->audio_input_samples); - context->audio_time += (double)context->audio_input_samples / (double)c->sample_rate; frame = av_frame_alloc(); frame->pts = context->audio_time / av_q2d(c->time_base); @@ -184,7 +183,7 @@ static int write_audio_frame(FFMpegContext *context) context->audio_input_samples * c->channels * context->audio_sample_size, 1); - int success = 0; + int success = 1; int ret = avcodec_send_frame(c, frame); if (ret < 0) { @@ -369,7 +368,7 @@ static int write_video_frame(FFMpegContext *context, int cfra, AVFrame *frame, R return success; } -/* read and encode a frame of audio from the buffer */ +/* read and encode a frame of video from the buffer */ static AVFrame *generate_video_frame(FFMpegContext *context, const uint8_t *pixels) { AVCodecParameters *codec = context->video_stream->codecpar; @@ -741,7 +740,7 @@ static AVStream *alloc_video_stream(FFMpegContext *context, } } - if ((of->oformat->flags & AVFMT_GLOBALHEADER)) { + if (of->oformat->flags & AVFMT_GLOBALHEADER) { PRINT("Using global header\n"); c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } @@ -1226,9 +1225,8 @@ fail: * parameter. * </p> */ -static void flush_ffmpeg(FFMpegContext *context) +static void flush_ffmpeg(AVCodecContext *c, AVStream *stream, AVFormatContext *outfile) { - AVCodecContext *c = context->video_codec; AVPacket *packet = av_packet_alloc(); avcodec_send_frame(c, NULL); @@ -1247,13 +1245,13 @@ static void flush_ffmpeg(FFMpegContext *context) break; } - packet->stream_index = context->video_stream->index; - av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base); + packet->stream_index = stream->index; + av_packet_rescale_ts(packet, c->time_base, stream->time_base); # ifdef FFMPEG_USE_DURATION_WORKAROUND - my_guess_pkt_duration(context->outfile, context->video_stream, packet); + my_guess_pkt_duration(context->outfile, stream, packet); # endif - int write_ret = av_interleaved_write_frame(context->outfile, packet); + int write_ret = av_interleaved_write_frame(outfile, packet); if (write_ret != 0) { fprintf(stderr, "Error writing delayed frame: %s\n", av_err2str(write_ret)); break; @@ -1396,12 +1394,13 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit); # ifdef WITH_AUDASPACE static void write_audio_frames(FFMpegContext *context, double to_pts) { - int finished = 0; + AVCodecContext *c = context->audio_codec; - while (context->audio_stream && !finished) { - if ((context->audio_time >= to_pts) || (write_audio_frame(context))) { - finished = 1; + while (context->audio_stream) { + if ((context->audio_time >= to_pts) || !write_audio_frame(context)) { + break; } + context->audio_time += (double)context->audio_input_samples / (double)c->sample_rate; } } # endif @@ -1422,9 +1421,6 @@ int BKE_ffmpeg_append(void *context_v, PRINT("Writing frame %i, render width=%d, render height=%d\n", frame, rectx, recty); - /* why is this done before writing the video frame and again at end_ffmpeg? */ - // write_audio_frames(frame / (((double)rd->frs_sec) / rd->frs_sec_base)); - if (context->video_stream) { avframe = generate_video_frame(context, (unsigned char *)pixels); success = (avframe && write_video_frame(context, frame - start_frame, avframe, reports)); @@ -1439,8 +1435,9 @@ int BKE_ffmpeg_append(void *context_v, } # ifdef WITH_AUDASPACE - write_audio_frames(context, - (frame - start_frame) / (((double)rd->frs_sec) / (double)rd->frs_sec_base)); + /* Add +1 frame because we want to encode audio up until the next video frame. */ + write_audio_frames( + context, (frame - start_frame + 1) / (((double)rd->frs_sec) / (double)rd->frs_sec_base)); # endif return success; } @@ -1461,8 +1458,13 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit) # endif if (context->video_stream) { - PRINT("Flushing delayed frames...\n"); - flush_ffmpeg(context); + PRINT("Flushing delayed video frames...\n"); + flush_ffmpeg(context->video_codec, context->video_stream, context->outfile); + } + + if (context->audio_stream) { + PRINT("Flushing delayed audio frames...\n"); + flush_ffmpeg(context->audio_codec, context->audio_stream, context->outfile); } if (context->outfile) { |