diff options
author | James Almer <jamrial@gmail.com> | 2017-04-30 04:01:03 +0300 |
---|---|---|
committer | James Almer <jamrial@gmail.com> | 2017-05-05 23:30:35 +0300 |
commit | c4b08c8a4e54b752641d0792d9a73e16e62a0bbc (patch) | |
tree | 58145bd7ee6aea2d76ed1daf87d2c188e0007071 /libavcodec/hevcdec.c | |
parent | f52fbf4f3ed02a7d872d8a102006f29b4421f360 (diff) |
avcodec/hevcdec: remove HEVCContext usage from hevc_sei
Based on the H264 SEI implementation.
Reviewed-by: Hendrik Leppkes <h.leppkes@gmail.com>
Reviewed-by: Aaron Levinson <alevinsn@aracnet.com>
Signed-off-by: James Almer <jamrial@gmail.com>
Diffstat (limited to 'libavcodec/hevcdec.c')
-rw-r--r-- | libavcodec/hevcdec.c | 90 |
1 files changed, 46 insertions, 44 deletions
diff --git a/libavcodec/hevcdec.c b/libavcodec/hevcdec.c index 2fb08d81d2..2a02edab28 100644 --- a/libavcodec/hevcdec.c +++ b/libavcodec/hevcdec.c @@ -2559,18 +2559,18 @@ static int set_side_data(HEVCContext *s) { AVFrame *out = s->ref->frame; - if (s->sei_frame_packing_present && - s->frame_packing_arrangement_type >= 3 && - s->frame_packing_arrangement_type <= 5 && - s->content_interpretation_type > 0 && - s->content_interpretation_type < 3) { + if (s->sei.frame_packing.present && + s->sei.frame_packing.arrangement_type >= 3 && + s->sei.frame_packing.arrangement_type <= 5 && + s->sei.frame_packing.content_interpretation_type > 0 && + s->sei.frame_packing.content_interpretation_type < 3) { AVStereo3D *stereo = av_stereo3d_create_side_data(out); if (!stereo) return AVERROR(ENOMEM); - switch (s->frame_packing_arrangement_type) { + switch (s->sei.frame_packing.arrangement_type) { case 3: - if (s->quincunx_subsampling) + if (s->sei.frame_packing.quincunx_subsampling) stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX; else stereo->type = AV_STEREO3D_SIDEBYSIDE; @@ -2583,13 +2583,14 @@ static int set_side_data(HEVCContext *s) break; } - if (s->content_interpretation_type == 2) + if (s->sei.frame_packing.content_interpretation_type == 2) stereo->flags = AV_STEREO3D_FLAG_INVERT; } - if (s->sei_display_orientation_present && - (s->sei_anticlockwise_rotation || s->sei_hflip || s->sei_vflip)) { - double angle = s->sei_anticlockwise_rotation * 360 / (double) (1 << 16); + if (s->sei.display_orientation.present && + (s->sei.display_orientation.anticlockwise_rotation || + s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) { + double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16); AVFrameSideData *rotation = av_frame_new_side_data(out, AV_FRAME_DATA_DISPLAYMATRIX, sizeof(int32_t) * 9); @@ -2598,16 +2599,17 @@ static int set_side_data(HEVCContext *s) av_display_rotation_set((int32_t *)rotation->data, angle); av_display_matrix_flip((int32_t *)rotation->data, - s->sei_hflip, s->sei_vflip); + s->sei.display_orientation.hflip, + s->sei.display_orientation.vflip); } // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1 // so the side data persists for the entire coded video sequence. - if (s->sei_mastering_display_info_present > 0 && + if (s->sei.mastering_display.present > 0 && IS_IRAP(s) && s->no_rasl_output_flag) { - s->sei_mastering_display_info_present--; + s->sei.mastering_display.present--; } - if (s->sei_mastering_display_info_present) { + if (s->sei.mastering_display.present) { // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b const int mapping[3] = {2, 0, 1}; const int chroma_den = 50000; @@ -2620,19 +2622,19 @@ static int set_side_data(HEVCContext *s) for (i = 0; i < 3; i++) { const int j = mapping[i]; - metadata->display_primaries[i][0].num = s->display_primaries[j][0]; + metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0]; metadata->display_primaries[i][0].den = chroma_den; - metadata->display_primaries[i][1].num = s->display_primaries[j][1]; + metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1]; metadata->display_primaries[i][1].den = chroma_den; } - metadata->white_point[0].num = s->white_point[0]; + metadata->white_point[0].num = s->sei.mastering_display.white_point[0]; metadata->white_point[0].den = chroma_den; - metadata->white_point[1].num = s->white_point[1]; + metadata->white_point[1].num = s->sei.mastering_display.white_point[1]; metadata->white_point[1].den = chroma_den; - metadata->max_luminance.num = s->max_mastering_luminance; + metadata->max_luminance.num = s->sei.mastering_display.max_luminance; metadata->max_luminance.den = luma_den; - metadata->min_luminance.num = s->min_mastering_luminance; + metadata->min_luminance.num = s->sei.mastering_display.min_luminance; metadata->min_luminance.den = luma_den; metadata->has_luminance = 1; metadata->has_primaries = 1; @@ -2653,31 +2655,31 @@ static int set_side_data(HEVCContext *s) } // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1 // so the side data persists for the entire coded video sequence. - if (s->sei_content_light_present > 0 && + if (s->sei.content_light.present > 0 && IS_IRAP(s) && s->no_rasl_output_flag) { - s->sei_content_light_present--; + s->sei.content_light.present--; } - if (s->sei_content_light_present) { + if (s->sei.content_light.present) { AVContentLightMetadata *metadata = av_content_light_metadata_create_side_data(out); if (!metadata) return AVERROR(ENOMEM); - metadata->MaxCLL = s->max_content_light_level; - metadata->MaxFALL = s->max_pic_average_light_level; + metadata->MaxCLL = s->sei.content_light.max_content_light_level; + metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level; av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n"); av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n", metadata->MaxCLL, metadata->MaxFALL); } - if (s->a53_caption) { + if (s->sei.a53_caption.a53_caption) { AVFrameSideData* sd = av_frame_new_side_data(out, AV_FRAME_DATA_A53_CC, - s->a53_caption_size); + s->sei.a53_caption.a53_caption_size); if (sd) - memcpy(sd->data, s->a53_caption, s->a53_caption_size); - av_freep(&s->a53_caption); - s->a53_caption_size = 0; + memcpy(sd->data, s->sei.a53_caption.a53_caption, s->sei.a53_caption.a53_caption_size); + av_freep(&s->sei.a53_caption.a53_caption); + s->sei.a53_caption.a53_caption_size = 0; s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; } @@ -2772,7 +2774,7 @@ static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal) break; case HEVC_NAL_SEI_PREFIX: case HEVC_NAL_SEI_SUFFIX: - ret = ff_hevc_decode_nal_sei(s); + ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type); if (ret < 0) goto fail; break; @@ -2966,7 +2968,7 @@ static int verify_md5(HEVCContext *s, AVFrame *frame) int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height; uint8_t md5[16]; - av_md5_init(s->md5_ctx); + av_md5_init(s->sei.picture_hash.md5_ctx); for (j = 0; j < h; j++) { const uint8_t *src = frame->data[i] + j * frame->linesize[i]; #if HAVE_BIGENDIAN @@ -2976,11 +2978,11 @@ static int verify_md5(HEVCContext *s, AVFrame *frame) src = s->checksum_buf; } #endif - av_md5_update(s->md5_ctx, src, w << pixel_shift); + av_md5_update(s->sei.picture_hash.md5_ctx, src, w << pixel_shift); } - av_md5_final(s->md5_ctx, md5); + av_md5_final(s->sei.picture_hash.md5_ctx, md5); - if (!memcmp(md5, s->md5[i], 16)) { + if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) { av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i); print_md5(s->avctx, AV_LOG_DEBUG, md5); av_log (s->avctx, AV_LOG_DEBUG, "; "); @@ -2988,7 +2990,7 @@ static int verify_md5(HEVCContext *s, AVFrame *frame) av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i); print_md5(s->avctx, AV_LOG_ERROR, md5); av_log (s->avctx, AV_LOG_ERROR, " != "); - print_md5(s->avctx, AV_LOG_ERROR, s->md5[i]); + print_md5(s->avctx, AV_LOG_ERROR, s->sei.picture_hash.md5[i]); av_log (s->avctx, AV_LOG_ERROR, "\n"); return AVERROR_INVALIDDATA; } @@ -3061,7 +3063,7 @@ static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, } else { /* verify the SEI checksum */ if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded && - s->is_md5) { + s->sei.picture_hash.is_md5) { ret = verify_md5(s, s->ref->frame); if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) { ff_hevc_unref_frame(s, s->ref, ~0); @@ -3069,7 +3071,7 @@ static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, } } } - s->is_md5 = 0; + s->sei.picture_hash.is_md5 = 0; if (s->is_decoded) { av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc); @@ -3132,7 +3134,7 @@ static av_cold int hevc_decode_free(AVCodecContext *avctx) pic_arrays_free(s); - av_freep(&s->md5_ctx); + av_freep(&s->sei.picture_hash.md5_ctx); av_freep(&s->cabac_state); @@ -3207,8 +3209,8 @@ static av_cold int hevc_init_context(AVCodecContext *avctx) s->max_ra = INT_MAX; - s->md5_ctx = av_md5_alloc(); - if (!s->md5_ctx) + s->sei.picture_hash.md5_ctx = av_md5_alloc(); + if (!s->sei.picture_hash.md5_ctx) goto fail; ff_bswapdsp_init(&s->bdsp); @@ -3216,7 +3218,7 @@ static av_cold int hevc_init_context(AVCodecContext *avctx) s->context_initialized = 1; s->eos = 0; - ff_hevc_reset_sei(s); + ff_hevc_reset_sei(&s->sei); return 0; @@ -3313,7 +3315,7 @@ static av_cold int hevc_decode_init(AVCodecContext *avctx) return ret; s->enable_parallel_tiles = 0; - s->picture_struct = 0; + s->sei.picture_timing.picture_struct = 0; s->eos = 1; atomic_init(&s->wpp_err, 0); |