Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2016-02-16 14:32:42 +0300
committerSergey Sharybin <sergey.vfx@gmail.com>2016-02-16 14:34:15 +0300
commit0b03785eb5d46dfc053d7f8ef4924219fe7759c5 (patch)
tree799459f58c2b69c243ee22811e6241d651832885 /source/blender/blenkernel
parent462b6dd919ba98738c76d0bba013c77b587c3577 (diff)
Make Blender compilable with FFmpeg-3.0
While it's not something we'll be using for the official release, it's nice to support new libraries at least on "it compiles" level, so it's not that many frustrated developers around. Nexyon, please have a look into Audaspace changes :)
Diffstat (limited to 'source/blender/blenkernel')
-rw-r--r--source/blender/blenkernel/intern/writeffmpeg.c38
1 files changed, 19 insertions, 19 deletions
diff --git a/source/blender/blenkernel/intern/writeffmpeg.c b/source/blender/blenkernel/intern/writeffmpeg.c
index 35fd94d0747..edeccf472c8 100644
--- a/source/blender/blenkernel/intern/writeffmpeg.c
+++ b/source/blender/blenkernel/intern/writeffmpeg.c
@@ -138,8 +138,8 @@ static int write_audio_frame(FFMpegContext *context)
context->audio_time += (double) context->audio_input_samples / (double) c->sample_rate;
#ifdef FFMPEG_HAVE_ENCODE_AUDIO2
- frame = avcodec_alloc_frame();
- avcodec_get_frame_defaults(frame);
+ frame = av_frame_alloc();
+ av_frame_unref(frame);
frame->pts = context->audio_time / av_q2d(c->time_base);
frame->nb_samples = context->audio_input_samples;
frame->format = c->sample_fmt;
@@ -172,7 +172,7 @@ static int write_audio_frame(FFMpegContext *context)
}
if (!got_output) {
- avcodec_free_frame(&frame);
+ av_frame_free(&frame);
return 0;
}
#else
@@ -202,7 +202,7 @@ static int write_audio_frame(FFMpegContext *context)
if (av_interleaved_write_frame(context->outfile, &pkt) != 0) {
fprintf(stderr, "Error writing audio packet!\n");
if (frame)
- avcodec_free_frame(&frame);
+ av_frame_free(&frame);
return -1;
}
@@ -210,7 +210,7 @@ static int write_audio_frame(FFMpegContext *context)
}
if (frame)
- avcodec_free_frame(&frame);
+ av_frame_free(&frame);
return 0;
}
@@ -224,7 +224,7 @@ static AVFrame *alloc_picture(int pix_fmt, int width, int height)
int size;
/* allocate space for the struct */
- f = avcodec_alloc_frame();
+ f = av_frame_alloc();
if (!f) return NULL;
size = avpicture_get_size(pix_fmt, width, height);
/* allocate the actual picture buffer */
@@ -363,8 +363,8 @@ static AVFrame *generate_video_frame(FFMpegContext *context, uint8_t *pixels, Re
int height = c->height;
AVFrame *rgb_frame;
- if (c->pix_fmt != PIX_FMT_BGR32) {
- rgb_frame = alloc_picture(PIX_FMT_BGR32, width, height);
+ if (c->pix_fmt != AV_PIX_FMT_BGR32) {
+ rgb_frame = alloc_picture(AV_PIX_FMT_BGR32, width, height);
if (!rgb_frame) {
BKE_report(reports, RPT_ERROR, "Could not allocate temporary frame");
return NULL;
@@ -414,14 +414,14 @@ static AVFrame *generate_video_frame(FFMpegContext *context, uint8_t *pixels, Re
}
}
- if (c->pix_fmt != PIX_FMT_BGR32) {
+ if (c->pix_fmt != AV_PIX_FMT_BGR32) {
sws_scale(context->img_convert_ctx, (const uint8_t *const *) rgb_frame->data,
rgb_frame->linesize, 0, c->height,
context->current_frame->data, context->current_frame->linesize);
delete_picture(rgb_frame);
}
- context->current_frame->format = PIX_FMT_BGR32;
+ context->current_frame->format = AV_PIX_FMT_BGR32;
context->current_frame->width = width;
context->current_frame->height = height;
@@ -586,12 +586,12 @@ static AVStream *alloc_video_stream(FFMpegContext *context, RenderData *rd, int
}
else {
/* makes HuffYUV happy ... */
- c->pix_fmt = PIX_FMT_YUV422P;
+ c->pix_fmt = AV_PIX_FMT_YUV422P;
}
if (context->ffmpeg_type == FFMPEG_XVID) {
/* arghhhh ... */
- c->pix_fmt = PIX_FMT_YUV420P;
+ c->pix_fmt = AV_PIX_FMT_YUV420P;
c->codec_tag = (('D' << 24) + ('I' << 16) + ('V' << 8) + 'X');
}
@@ -604,26 +604,26 @@ static AVStream *alloc_video_stream(FFMpegContext *context, RenderData *rd, int
/* Keep lossless encodes in the RGB domain. */
if (codec_id == AV_CODEC_ID_HUFFYUV) {
if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
- c->pix_fmt = PIX_FMT_BGRA;
+ c->pix_fmt = AV_PIX_FMT_BGRA;
}
else {
- c->pix_fmt = PIX_FMT_RGB32;
+ c->pix_fmt = AV_PIX_FMT_RGB32;
}
}
if (codec_id == AV_CODEC_ID_FFV1) {
- c->pix_fmt = PIX_FMT_RGB32;
+ c->pix_fmt = AV_PIX_FMT_RGB32;
}
if (codec_id == AV_CODEC_ID_QTRLE) {
if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
- c->pix_fmt = PIX_FMT_ARGB;
+ c->pix_fmt = AV_PIX_FMT_ARGB;
}
}
if (codec_id == AV_CODEC_ID_PNG) {
if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
- c->pix_fmt = PIX_FMT_RGBA;
+ c->pix_fmt = AV_PIX_FMT_RGBA;
}
}
@@ -661,8 +661,8 @@ static AVStream *alloc_video_stream(FFMpegContext *context, RenderData *rd, int
context->current_frame = alloc_picture(c->pix_fmt, c->width, c->height);
- context->img_convert_ctx = sws_getContext(c->width, c->height, PIX_FMT_BGR32, c->width, c->height, c->pix_fmt, SWS_BICUBIC,
- NULL, NULL, NULL);
+ context->img_convert_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_BGR32, c->width, c->height, c->pix_fmt, SWS_BICUBIC,
+ NULL, NULL, NULL);
return st;
}