Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--source/blender/blenkernel/intern/writeffmpeg.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/source/blender/blenkernel/intern/writeffmpeg.c b/source/blender/blenkernel/intern/writeffmpeg.c
index a20c918c517..035e56993f9 100644
--- a/source/blender/blenkernel/intern/writeffmpeg.c
+++ b/source/blender/blenkernel/intern/writeffmpeg.c
@@ -87,6 +87,7 @@ typedef struct FFMpegContext {
AVStream *video_stream;
AVStream *audio_stream;
AVFrame *current_frame; /* Image frame in output pixel format. */
+ int video_time;
/* Image frame in Blender's own pixel format, may need conversion to the output pixel format. */
AVFrame *img_convert_frame;
@@ -96,6 +97,7 @@ typedef struct FFMpegContext {
uint8_t *audio_deinterleave_buffer;
int audio_input_samples;
double audio_time;
+ double audio_time_total;
bool audio_deinterleave;
int audio_sample_size;
@@ -318,14 +320,15 @@ static const char **get_file_extensions(int format)
}
/* Write a frame to the output file */
-static int write_video_frame(FFMpegContext *context, int cfra, AVFrame *frame, ReportList *reports)
+static int write_video_frame(FFMpegContext *context, AVFrame *frame, ReportList *reports)
{
int ret, success = 1;
AVPacket *packet = av_packet_alloc();
AVCodecContext *c = context->video_codec;
- frame->pts = cfra;
+ frame->pts = context->video_time;
+ context->video_time++;
ret = avcodec_send_frame(c, frame);
if (ret < 0) {
@@ -804,6 +807,8 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
avcodec_parameters_from_context(st->codecpar, c);
+ context->video_time = 0.0f;
+
return st;
}
@@ -1397,9 +1402,10 @@ static void write_audio_frames(FFMpegContext *context, double to_pts)
AVCodecContext *c = context->audio_codec;
while (context->audio_stream) {
- if ((context->audio_time >= to_pts) || !write_audio_frame(context)) {
+ if ((context->audio_time_total >= to_pts) || !write_audio_frame(context)) {
break;
}
+ context->audio_time_total += (double)context->audio_input_samples / (double)c->sample_rate;
context->audio_time += (double)context->audio_input_samples / (double)c->sample_rate;
}
}
@@ -1423,22 +1429,23 @@ int BKE_ffmpeg_append(void *context_v,
if (context->video_stream) {
avframe = generate_video_frame(context, (unsigned char *)pixels);
- success = (avframe && write_video_frame(context, frame - start_frame, avframe, reports));
+ success = (avframe && write_video_frame(context, avframe, reports));
+# ifdef WITH_AUDASPACE
+ /* Add +1 frame because we want to encode audio up until the next video frame. */
+ write_audio_frames(
+ context, (frame - start_frame + 1) / (((double)rd->frs_sec) / (double)rd->frs_sec_base));
+# endif
if (context->ffmpeg_autosplit) {
if (avio_tell(context->outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
end_ffmpeg_impl(context, true);
context->ffmpeg_autosplit_count++;
+
success &= start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
}
}
}
-# ifdef WITH_AUDASPACE
- /* Add +1 frame because we want to encode audio up until the next video frame. */
- write_audio_frames(
- context, (frame - start_frame + 1) / (((double)rd->frs_sec) / (double)rd->frs_sec_base));
-# endif
return success;
}
@@ -1881,6 +1888,7 @@ void *BKE_ffmpeg_context_create(void)
context->ffmpeg_autosplit_count = 0;
context->ffmpeg_preview = false;
context->stamp_data = NULL;
+ context->audio_time_total = 0.0;
return context;
}