Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/FFmpeg/FFmpeg.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClément Bœsch <u@pkh.me>2017-03-25 14:10:13 +0300
committerClément Bœsch <u@pkh.me>2017-03-27 22:38:21 +0300
commit1c9f4b507888ac94c7d9f7a6ac9edfe6880fa821 (patch)
tree915625969a0bb69ef30124675632f97456039f03 /libavcodec/vp9.h
parent487ca38e8bc416239f49b9b7768814fa7be82b5f (diff)
lavc/vp9: split into vp9{block,data,mvs}
This is following Libav layout to ease merges.
Diffstat (limited to 'libavcodec/vp9.h')
-rw-r--r--libavcodec/vp9.h280
1 files changed, 280 insertions, 0 deletions
diff --git a/libavcodec/vp9.h b/libavcodec/vp9.h
index 89b1bd3e4c..0097385e1c 100644
--- a/libavcodec/vp9.h
+++ b/libavcodec/vp9.h
@@ -24,8 +24,13 @@
#ifndef AVCODEC_VP9_H
#define AVCODEC_VP9_H
+#include <stddef.h>
#include <stdint.h>
+#include "libavutil/buffer.h"
+#include "libavutil/internal.h"
+
+#include "avcodec.h"
#include "thread.h"
#include "vp56.h"
@@ -118,11 +123,143 @@ enum CompPredMode {
PRED_SWITCHABLE,
};
+enum MVJoint {
+ MV_JOINT_ZERO,
+ MV_JOINT_H,
+ MV_JOINT_V,
+ MV_JOINT_HV,
+};
+
+typedef struct ProbContext {
+ uint8_t y_mode[4][9];
+ uint8_t uv_mode[10][9];
+ uint8_t filter[4][2];
+ uint8_t mv_mode[7][3];
+ uint8_t intra[4];
+ uint8_t comp[5];
+ uint8_t single_ref[5][2];
+ uint8_t comp_ref[5];
+ uint8_t tx32p[2][3];
+ uint8_t tx16p[2][2];
+ uint8_t tx8p[2];
+ uint8_t skip[3];
+ uint8_t mv_joint[3];
+ struct {
+ uint8_t sign;
+ uint8_t classes[10];
+ uint8_t class0;
+ uint8_t bits[10];
+ uint8_t class0_fp[2][3];
+ uint8_t fp[3];
+ uint8_t class0_hp;
+ uint8_t hp;
+ } mv_comp[2];
+ uint8_t partition[4][4][3];
+} ProbContext;
+
+typedef void (*vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *ref, ptrdiff_t ref_stride,
+ int h, int mx, int my);
+typedef void (*vp9_scaled_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *ref, ptrdiff_t ref_stride,
+ int h, int mx, int my, int dx, int dy);
+
+typedef struct VP9DSPContext {
+ /*
+ * dimension 1: 0=4x4, 1=8x8, 2=16x16, 3=32x32
+ * dimension 2: intra prediction modes
+ *
+ * dst/left/top is aligned by transform-size (i.e. 4, 8, 16 or 32 pixels)
+ * stride is aligned by 16 pixels
+ * top[-1] is top/left; top[4,7] is top-right for 4x4
+ */
+ // FIXME(rbultje) maybe replace left/top pointers with HAVE_TOP/
+ // HAVE_LEFT/HAVE_TOPRIGHT flags instead, and then handle it in-place?
+ // also needs to fit in with what H.264/VP8/etc do
+ void (*intra_pred[N_TXFM_SIZES][N_INTRA_PRED_MODES])(uint8_t *dst,
+ ptrdiff_t stride,
+ const uint8_t *left,
+ const uint8_t *top);
+
+ /*
+ * dimension 1: 0=4x4, 1=8x8, 2=16x16, 3=32x32, 4=lossless (3-4=dct only)
+ * dimension 2: 0=dct/dct, 1=dct/adst, 2=adst/dct, 3=adst/adst
+ *
+ * dst is aligned by transform-size (i.e. 4, 8, 16 or 32 pixels)
+ * stride is aligned by 16 pixels
+ * block is 16-byte aligned
+ * eob indicates the position (+1) of the last non-zero coefficient,
+ * in scan-order. This can be used to write faster versions, e.g. a
+ * dc-only 4x4/8x8/16x16/32x32, or a 4x4-only (eob<10) 8x8/16x16/32x32,
+ * etc.
+ */
+ // FIXME also write idct_add_block() versions for whole (inter) pred
+ // blocks, so we can do 2 4x4s at once
+ void (*itxfm_add[N_TXFM_SIZES + 1][N_TXFM_TYPES])(uint8_t *dst,
+ ptrdiff_t stride,
+ int16_t *block, int eob);
+
+ /*
+ * dimension 1: width of filter (0=4, 1=8, 2=16)
+ * dimension 2: 0=col-edge filter (h), 1=row-edge filter (v)
+ *
+ * dst/stride are aligned by 8
+ */
+ void (*loop_filter_8[3][2])(uint8_t *dst, ptrdiff_t stride,
+ int mb_lim, int lim, int hev_thr);
+
+ /*
+ * dimension 1: 0=col-edge filter (h), 1=row-edge filter (v)
+ *
+ * The width of filter is assumed to be 16; dst/stride are aligned by 16
+ */
+ void (*loop_filter_16[2])(uint8_t *dst, ptrdiff_t stride,
+ int mb_lim, int lim, int hev_thr);
+
+ /*
+ * dimension 1/2: width of filter (0=4, 1=8) for each filter half
+ * dimension 3: 0=col-edge filter (h), 1=row-edge filter (v)
+ *
+ * dst/stride are aligned by operation size
+ * this basically calls loop_filter[d1][d3][0](), followed by
+ * loop_filter[d2][d3][0]() on the next 8 pixels
+ * mb_lim/lim/hev_thr contain two values in the lowest two bytes of the
+ * integer.
+ */
+ // FIXME perhaps a mix4 that operates on 32px (for AVX2)
+ void (*loop_filter_mix2[2][2][2])(uint8_t *dst, ptrdiff_t stride,
+ int mb_lim, int lim, int hev_thr);
+
+ /*
+ * dimension 1: hsize (0: 64, 1: 32, 2: 16, 3: 8, 4: 4)
+ * dimension 2: filter type (0: smooth, 1: regular, 2: sharp, 3: bilin)
+ * dimension 3: averaging type (0: put, 1: avg)
+ * dimension 4: x subpel interpolation (0: none, 1: 8tap/bilin)
+ * dimension 5: y subpel interpolation (0: none, 1: 8tap/bilin)
+ *
+ * dst/stride are aligned by hsize
+ */
+ vp9_mc_func mc[5][4][2][2][2];
+
+ /*
+ * for scalable MC, first 3 dimensions identical to above, the other two
+ * don't exist since it changes per stepsize.
+ */
+ vp9_scaled_mc_func smc[5][4][2];
+} VP9DSPContext;
+
+
struct VP9mvrefPair {
VP56mv mv[2];
int8_t ref[2];
};
+struct VP9Filter {
+ uint8_t level[8 * 8];
+ uint8_t /* bit=col */ mask[2 /* 0=y, 1=uv */][2 /* 0=col, 1=row */]
+ [8 /* rows */][4 /* 0=16, 1=8, 2=4, 3=inner4 */];
+};
+
typedef struct VP9Frame {
ThreadFrame tf;
AVBufferRef *extradata;
@@ -209,4 +346,147 @@ typedef struct VP9SharedContext {
VP9Frame frames[3];
} VP9SharedContext;
+typedef struct VP9Block {
+ uint8_t seg_id, intra, comp, ref[2], mode[4], uvmode, skip;
+ enum FilterMode filter;
+ VP56mv mv[4 /* b_idx */][2 /* ref */];
+ enum BlockSize bs;
+ enum TxfmMode tx, uvtx;
+ enum BlockLevel bl;
+ enum BlockPartition bp;
+} VP9Block;
+
+typedef struct VP9Context {
+ VP9SharedContext s;
+
+ VP9DSPContext dsp;
+ VideoDSPContext vdsp;
+ GetBitContext gb;
+ VP56RangeCoder c;
+ VP56RangeCoder *c_b;
+ unsigned c_b_size;
+ VP9Block *b_base, *b;
+ int pass;
+ int row, row7, col, col7;
+ uint8_t *dst[3];
+ ptrdiff_t y_stride, uv_stride;
+
+ uint8_t ss_h, ss_v;
+ uint8_t last_bpp, bpp_index, bytesperpixel;
+ uint8_t last_keyframe;
+ // sb_cols/rows, rows/cols and last_fmt are used for allocating all internal
+ // arrays, and are thus per-thread. w/h and gf_fmt are synced between threads
+ // and are therefore per-stream. pix_fmt represents the value in the header
+ // of the currently processed frame.
+ int w, h;
+ enum AVPixelFormat pix_fmt, last_fmt, gf_fmt;
+ unsigned sb_cols, sb_rows, rows, cols;
+ ThreadFrame next_refs[8];
+
+ struct {
+ uint8_t lim_lut[64];
+ uint8_t mblim_lut[64];
+ } filter_lut;
+ unsigned tile_row_start, tile_row_end, tile_col_start, tile_col_end;
+ struct {
+ ProbContext p;
+ uint8_t coef[4][2][2][6][6][3];
+ } prob_ctx[4];
+ struct {
+ ProbContext p;
+ uint8_t coef[4][2][2][6][6][11];
+ } prob;
+ struct {
+ unsigned y_mode[4][10];
+ unsigned uv_mode[10][10];
+ unsigned filter[4][3];
+ unsigned mv_mode[7][4];
+ unsigned intra[4][2];
+ unsigned comp[5][2];
+ unsigned single_ref[5][2][2];
+ unsigned comp_ref[5][2];
+ unsigned tx32p[2][4];
+ unsigned tx16p[2][3];
+ unsigned tx8p[2][2];
+ unsigned skip[3][2];
+ unsigned mv_joint[4];
+ struct {
+ unsigned sign[2];
+ unsigned classes[11];
+ unsigned class0[2];
+ unsigned bits[10][2];
+ unsigned class0_fp[2][4];
+ unsigned fp[4];
+ unsigned class0_hp[2];
+ unsigned hp[2];
+ } mv_comp[2];
+ unsigned partition[4][4][4];
+ unsigned coef[4][2][2][6][6][3];
+ unsigned eob[4][2][2][6][6][2];
+ } counts;
+
+ // contextual (left/above) cache
+ DECLARE_ALIGNED(16, uint8_t, left_y_nnz_ctx)[16];
+ DECLARE_ALIGNED(16, uint8_t, left_mode_ctx)[16];
+ DECLARE_ALIGNED(16, VP56mv, left_mv_ctx)[16][2];
+ DECLARE_ALIGNED(16, uint8_t, left_uv_nnz_ctx)[2][16];
+ DECLARE_ALIGNED(8, uint8_t, left_partition_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_skip_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_txfm_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_segpred_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_intra_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_comp_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_ref_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_filter_ctx)[8];
+ uint8_t *above_partition_ctx;
+ uint8_t *above_mode_ctx;
+ // FIXME maybe merge some of the below in a flags field?
+ uint8_t *above_y_nnz_ctx;
+ uint8_t *above_uv_nnz_ctx[2];
+ uint8_t *above_skip_ctx; // 1bit
+ uint8_t *above_txfm_ctx; // 2bit
+ uint8_t *above_segpred_ctx; // 1bit
+ uint8_t *above_intra_ctx; // 1bit
+ uint8_t *above_comp_ctx; // 1bit
+ uint8_t *above_ref_ctx; // 2bit
+ uint8_t *above_filter_ctx;
+ VP56mv (*above_mv_ctx)[2];
+
+ // whole-frame cache
+ uint8_t *intra_pred_data[3];
+ struct VP9Filter *lflvl;
+ DECLARE_ALIGNED(32, uint8_t, edge_emu_buffer)[135 * 144 * 2];
+
+ // block reconstruction intermediates
+ int block_alloc_using_2pass;
+ int16_t *block_base, *block, *uvblock_base[2], *uvblock[2];
+ uint8_t *eob_base, *uveob_base[2], *eob, *uveob[2];
+ struct { int x, y; } min_mv, max_mv;
+ DECLARE_ALIGNED(32, uint8_t, tmp_y)[64 * 64 * 2];
+ DECLARE_ALIGNED(32, uint8_t, tmp_uv)[2][64 * 64 * 2];
+ uint16_t mvscale[3][2];
+ uint8_t mvstep[3][2];
+} VP9Context;
+
+extern const int16_t ff_vp9_subpel_filters[3][16][8];
+
+void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact);
+
+void ff_vp9dsp_init_8(VP9DSPContext *dsp);
+void ff_vp9dsp_init_10(VP9DSPContext *dsp);
+void ff_vp9dsp_init_12(VP9DSPContext *dsp);
+
+void ff_vp9dsp_init_aarch64(VP9DSPContext *dsp, int bpp);
+void ff_vp9dsp_init_arm(VP9DSPContext *dsp, int bpp);
+void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact);
+void ff_vp9dsp_init_mips(VP9DSPContext *dsp, int bpp);
+
+void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb);
+
+void ff_vp9_adapt_probs(VP9Context *s);
+
+void ff_vp9_decode_block(AVCodecContext *ctx, int row, int col,
+ struct VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
+ enum BlockLevel bl, enum BlockPartition bp);
+
#endif /* AVCODEC_VP9_H */