Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mpc-hc/mpc-hc.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpovaddict <povaddict@users.sourceforge.net>2010-02-10 02:16:44 +0300
committerpovaddict <povaddict@users.sourceforge.net>2010-02-10 02:16:44 +0300
commit726a91b12a7524e45e7a901c9e4883af5b1bffe6 (patch)
treef5d25e3b2e84c92f4901280c73d5d3d7e6c3cd19 /src/filters/transform/MPCVideoDec/ffmpeg/libavcodec
parent02183f6e47ad4ea1057de9950482f291f2ae4290 (diff)
Rename several directories to use MixedCase instead of lowercase.
They now mostly match the case used in #includes, and they're consistent with the names of the .h files they contain. git-svn-id: https://mpc-hc.svn.sourceforge.net/svnroot/mpc-hc/trunk@1648 10f7b99b-c216-0410-bff0-8a66a9350fd8
Diffstat (limited to 'src/filters/transform/MPCVideoDec/ffmpeg/libavcodec')
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/CompilatorVersion.c42
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/aac_ac3_parser.c94
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/aac_ac3_parser.h65
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3.c281
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3.h187
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3_parser.c207
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3_parser.h52
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec.c1461
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec.h206
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec_data.c111
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec_data.h35
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3tab.c305
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3tab.h62
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/adpcm.c881
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/allcodecs.c181
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_dec.c880
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_dec.h55
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_enc.h58
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_rom.h1802
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/rom_dec.h13311
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/sp_dec.c6167
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/sp_dec.h81
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/sp_enc.h74
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/typedef.h22
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/audioconvert.h115
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/avcodec.h3117
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/bitstream.c294
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/bytestream.h71
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cabac.c168
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cabac.h588
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cook.c1239
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cookdata.h563
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/corepng.c195
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cscd.c264
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cyuv.c191
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/dsputil.c4775
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/dsputil.h975
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/dxva.h1963
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eac3dec.c606
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eac3dec_data.c1095
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eac3dec_data.h35
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/error_resilience.c1043
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eval.h96
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faandct.c232
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faandct.h39
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faanidct.c168
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faanidct.h32
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/fft.c359
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/flv.h30
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/flvdec.c135
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/get_bits.h712
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/golomb.c173
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/golomb.h541
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261.c54
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261.h51
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261_parser.c90
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261data.h164
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261dec.c656
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.c385
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.h246
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263_parser.c62
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263_parser.h29
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263data.h299
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263dec.c717
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264.c3280
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264.h1540
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_cabac.c1788
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_cavlc.c1037
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_direct.c465
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_dxva.c1102
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_loopfilter.c751
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_mvpred.h235
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_parser.c234
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_parser.h39
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_ps.c544
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_refs.c700
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_sei.c206
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264data.h262
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264idct.c218
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264pred.c1175
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264pred.h88
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/huffman.c109
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/huffman.h42
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/imgconvert.c1112
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/imgconvert.h38
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intelh263dec.c133
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/internal.h36
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8.c789
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8.h57
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8dsp.c432
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8huf.h918
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ituh263dec.c1131
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpegls.c89
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpegls.h111
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpeglsdec.c381
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpeglsdec.h41
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jrevdct.c1157
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/libamr.c186
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mathops.h129
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mdct.c235
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mdct_tablegen.c61
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mdct_tablegen.h59
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpeg.c145
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpeg.h155
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpegdec.c1494
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpegdec.h120
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpegenc.h60
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp.c115
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp.h127
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp_parser.c301
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp_parser.h59
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlpdec.c1171
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlpdsp.c67
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpc_helper.c93
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12.c2527
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12.h61
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12data.c366
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12data.h56
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12decdata.h124
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4data.h376
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video.c171
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video.h197
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video_parser.c63
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video_parser.h34
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4videodec.c2253
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpegvideo.c2012
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpegvideo.h821
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpegvideo_common.h901
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4.c1990
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4.h62
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4data.c2001
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4data.h86
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/nellymoser.c226
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/nellymoser.h57
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/nellymoserdec.c218
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/options.c115
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/parser.c343
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/parser.h71
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/put_bits.h343
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rangecoder.h141
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ratecontrol.h106
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rectangle.h124
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rl.h86
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv10.c739
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv30.c284
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv30data.h181
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv30dsp.c291
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34.c1535
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34.h130
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34data.h148
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34vlc.h4054
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40.c684
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40data.h115
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40dsp.c353
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40vlc2.h706
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/simple_idct.c582
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/simple_idct.h46
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/sp5x.h334
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/sp5xdec.c222
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1.c44
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1.h62
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1_cb.h1525
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1_vlc.h283
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1dec.c848
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq3.c1084
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/tableprint.c76
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/tableprint.h60
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/unary.h56
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/utils.c825
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1.c1031
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1.h372
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1_dxva.c103
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1acdata.h592
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1data.c645
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1data.h157
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1dec.c3331
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1dsp.c666
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp3.c2436
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp3data.h3181
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp3dsp.c255
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp5.c302
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56.c699
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56.h270
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56data.c66
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56data.h252
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp5data.h175
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp6.c662
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp6data.h308
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp6dsp.c59
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/w32thread.c168
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/wmv2.c159
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/wmv2.h58
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/wmv2dec.c502
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/cpuid.c125
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_h264_template_mmx.c304
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_h264_template_ssse3.c208
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx.c2938
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx.h159
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx_avg_template.c896
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx_qns_template.c101
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx_rnd_template.c590
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_yasm.asm423
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fdct_mmx.c580
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft.c48
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft.h36
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_3dn.c23
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_3dn2.c174
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_mmx.asm475
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_sse.c203
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264_deblock_sse2.asm759
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264_i386.h153
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264_idct_sse2.asm54
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264dsp_mmx.c2325
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_mmx.c605
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_mmx_xvid.c525
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_sse2_xvid.c395
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_xvid.h39
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mathops.h81
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mlpdsp.c181
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mmx.h267
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mpegvideo_mmx.c660
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mpegvideo_mmx_template.c376
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/rv40dsp_mmx.c61
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/simple_idct_mmx.c1296
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vc1dsp_mmx.c744
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_mmx.c396
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_mmx.h35
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_sse2.c186
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_sse2.h31
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_mmx.c108
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_mmx.h30
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_sse2.c98
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_sse2.h30
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/x86inc.asm625
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/x86util.asm515
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/xiph.c63
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/xiph.h43
237 files changed, 139615 insertions, 0 deletions
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/CompilatorVersion.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/CompilatorVersion.c
new file mode 100644
index 000000000..cbf3c9c3f
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/CompilatorVersion.c
@@ -0,0 +1,42 @@
+/*
+ * This file is part of Media Player Classic HomeCinema.
+ *
+ * MPC-HC is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * MPC-HC is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifdef _MSC_VER
+ #if (_MSC_VER == 1500)
+ #if (_MSC_FULL_VER >= 150030729)
+ char FfmpegCompiler[] = "MSVC 2008 SP1";
+ #else
+ char FfmpegCompiler[] = "MSVC 2008";
+ #endif
+ #elif (_MSC_VER < 1500)
+ #error Compiler is not supported!
+ #endif
+
+ char* GetFfmpegCompiler()
+ {
+ return FfmpegCompiler;
+ }
+#else // _MSC_VER
+ #include <stdio.h>
+ static char g_Gcc_Compiler[20];
+ char* GetFfmpegCompiler()
+ {
+ sprintf (g_Gcc_Compiler, "GCC %d.%d.%d", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__);
+ return g_Gcc_Compiler;
+ }
+#endif //_MSC_VER
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/aac_ac3_parser.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/aac_ac3_parser.c
new file mode 100644
index 000000000..839608875
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/aac_ac3_parser.c
@@ -0,0 +1,94 @@
+/*
+ * Common AAC and AC-3 parser
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "parser.h"
+#include "aac_ac3_parser.h"
+
+int ff_aac_ac3_parse(AVCodecParserContext *s1,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size)
+{
+ AACAC3ParseContext *s = s1->priv_data;
+ ParseContext *pc = &s->pc;
+ int len, i;
+ int new_frame_start;
+
+get_next:
+ i=END_NOT_FOUND;
+ if(s->remaining_size <= buf_size){
+ if(s->remaining_size && !s->need_next_header){
+ i= s->remaining_size;
+ s->remaining_size = 0;
+ }else{ //we need a header first
+ len=0;
+ for(i=s->remaining_size; i<buf_size; i++){
+ s->state = (s->state<<8) + buf[i];
+ if((len=s->sync(s->state, s, &s->need_next_header, &new_frame_start)))
+ break;
+ }
+ if(len<=0){
+ i=END_NOT_FOUND;
+ }else{
+ s->state=0;
+ i-= s->header_size -1;
+ s->remaining_size = len;
+ if(!new_frame_start || pc->index+i<=0){
+ s->remaining_size += i;
+ goto get_next;
+ }
+ }
+ }
+ }
+
+ if(ff_combine_frame(pc, i, &buf, &buf_size)<0){
+ s->remaining_size -= FFMIN(s->remaining_size, buf_size);
+ *poutbuf = NULL;
+ *poutbuf_size = 0;
+ return buf_size;
+ }
+
+ *poutbuf = buf;
+ *poutbuf_size = buf_size;
+
+ /* update codec info */
+ avctx->sample_rate = s->sample_rate;
+ if(s->codec_id)
+ avctx->codec_id = s->codec_id;
+
+ /* allow downmixing to stereo (or mono for AC-3) */
+ if(avctx->request_channels > 0 &&
+ avctx->request_channels < s->channels &&
+ (avctx->request_channels <= 2 ||
+ (avctx->request_channels == 1 &&
+ (avctx->codec_id == CODEC_ID_AC3 ||
+ avctx->codec_id == CODEC_ID_EAC3)))) {
+ avctx->channels = avctx->request_channels;
+ } else if (avctx->codec_id != CODEC_ID_AAC || s->channels) {
+ avctx->channels = s->channels;
+ avctx->channel_layout = s->channel_layout;
+ }
+ avctx->bit_rate = s->bit_rate;
+ avctx->frame_size = s->samples;
+
+ return i;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/aac_ac3_parser.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/aac_ac3_parser.h
new file mode 100644
index 000000000..b19d3d222
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/aac_ac3_parser.h
@@ -0,0 +1,65 @@
+/*
+ * Common AAC and AC-3 parser prototypes
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AAC_AC3_PARSER_H
+#define AVCODEC_AAC_AC3_PARSER_H
+
+#include <stdint.h>
+#include "avcodec.h"
+#include "parser.h"
+
+typedef enum {
+ AAC_AC3_PARSE_ERROR_SYNC = -1,
+ AAC_AC3_PARSE_ERROR_BSID = -2,
+ AAC_AC3_PARSE_ERROR_SAMPLE_RATE = -3,
+ AAC_AC3_PARSE_ERROR_FRAME_SIZE = -4,
+ AAC_AC3_PARSE_ERROR_FRAME_TYPE = -5,
+ AAC_AC3_PARSE_ERROR_CRC = -6,
+ AAC_AC3_PARSE_ERROR_CHANNEL_CFG = -7,
+} AACAC3ParseError;
+
+typedef struct AACAC3ParseContext {
+ ParseContext pc;
+ int frame_size;
+ int header_size;
+ int (*sync)(uint64_t state, struct AACAC3ParseContext *hdr_info,
+ int *need_next_header, int *new_frame_start);
+
+ int channels;
+ int sample_rate;
+ int bit_rate;
+ int samples;
+ int64_t channel_layout;
+
+ int remaining_size;
+ uint64_t state;
+
+ int need_next_header;
+ enum CodecID codec_id;
+} AACAC3ParseContext;
+
+int ff_aac_ac3_parse(AVCodecParserContext *s1,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size);
+
+#endif /* AVCODEC_AAC_AC3_PARSER_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3.c
new file mode 100644
index 000000000..255ec9081
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3.c
@@ -0,0 +1,281 @@
+/*
+ * Common code between the AC-3 encoder and decoder
+ * Copyright (c) 2000 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/ac3.c
+ * Common code between the AC-3 encoder and decoder.
+ */
+
+#include "avcodec.h"
+#include "ac3.h"
+#include "get_bits.h"
+
+#if CONFIG_HARDCODED_TABLES
+
+/**
+ * Starting frequency coefficient bin for each critical band.
+ */
+static const uint8_t band_start_tab[51] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 31,
+ 34, 37, 40, 43, 46, 49, 55, 61, 67, 73,
+ 79, 85, 97, 109, 121, 133, 157, 181, 205, 229, 253
+};
+
+/**
+ * Maps each frequency coefficient bin to the critical band that contains it.
+ */
+static const uint8_t bin_to_band_tab[253] = {
+ 0,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 28, 28, 29, 29, 29, 30, 30, 30,
+ 31, 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34,
+ 35, 35, 35, 35, 35, 35, 36, 36, 36, 36, 36, 36,
+ 37, 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, 38,
+ 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+ 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+ 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45,
+ 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45,
+ 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
+ 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
+ 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49
+};
+
+#else /* CONFIG_HARDCODED_TABLES */
+static uint8_t band_start_tab[51];
+static uint8_t bin_to_band_tab[253];
+#endif
+
+static inline int calc_lowcomp1(int a, int b0, int b1, int c)
+{
+ if ((b0 + 256) == b1) {
+ a = c;
+ } else if (b0 > b1) {
+ a = FFMAX(a - 64, 0);
+ }
+ return a;
+}
+
+static inline int calc_lowcomp(int a, int b0, int b1, int bin)
+{
+ if (bin < 7) {
+ return calc_lowcomp1(a, b0, b1, 384);
+ } else if (bin < 20) {
+ return calc_lowcomp1(a, b0, b1, 320);
+ } else {
+ return FFMAX(a - 128, 0);
+ }
+}
+
+void ff_ac3_bit_alloc_calc_psd(int8_t *exp, int start, int end, int16_t *psd,
+ int16_t *band_psd)
+{
+ int bin, band;
+
+ /* exponent mapping to PSD */
+ for (bin = start; bin < end; bin++) {
+ psd[bin]=(3072 - (exp[bin] << 7));
+ }
+
+ /* PSD integration */
+ bin = start;
+ band = bin_to_band_tab[start];
+ do {
+ int v = psd[bin++];
+ int band_end = FFMIN(band_start_tab[band+1], end);
+ for (; bin < band_end; bin++) {
+ int max = FFMAX(v, psd[bin]);
+ /* logadd */
+ int adr = FFMIN(max - ((v + psd[bin] + 1) >> 1), 255);
+ v = max + ff_ac3_log_add_tab[adr];
+ }
+ band_psd[band++] = v;
+ } while (end > band_start_tab[band]);
+}
+
+int ff_ac3_bit_alloc_calc_mask(AC3BitAllocParameters *s, int16_t *band_psd,
+ int start, int end, int fast_gain, int is_lfe,
+ int dba_mode, int dba_nsegs, uint8_t *dba_offsets,
+ uint8_t *dba_lengths, uint8_t *dba_values,
+ int16_t *mask)
+{
+ int16_t excite[50]; /* excitation */
+ int band;
+ int band_start, band_end, begin, end1;
+ int lowcomp, fastleak, slowleak;
+
+ /* excitation function */
+ band_start = bin_to_band_tab[start];
+ band_end = bin_to_band_tab[end-1] + 1;
+
+ if (band_start == 0) {
+ lowcomp = 0;
+ lowcomp = calc_lowcomp1(lowcomp, band_psd[0], band_psd[1], 384);
+ excite[0] = band_psd[0] - fast_gain - lowcomp;
+ lowcomp = calc_lowcomp1(lowcomp, band_psd[1], band_psd[2], 384);
+ excite[1] = band_psd[1] - fast_gain - lowcomp;
+ begin = 7;
+ for (band = 2; band < 7; band++) {
+ if (!(is_lfe && band == 6))
+ lowcomp = calc_lowcomp1(lowcomp, band_psd[band], band_psd[band+1], 384);
+ fastleak = band_psd[band] - fast_gain;
+ slowleak = band_psd[band] - s->slow_gain;
+ excite[band] = fastleak - lowcomp;
+ if (!(is_lfe && band == 6)) {
+ if (band_psd[band] <= band_psd[band+1]) {
+ begin = band + 1;
+ break;
+ }
+ }
+ }
+
+ end1 = FFMIN(band_end, 22);
+ for (band = begin; band < end1; band++) {
+ if (!(is_lfe && band == 6))
+ lowcomp = calc_lowcomp(lowcomp, band_psd[band], band_psd[band+1], band);
+ fastleak = FFMAX(fastleak - s->fast_decay, band_psd[band] - fast_gain);
+ slowleak = FFMAX(slowleak - s->slow_decay, band_psd[band] - s->slow_gain);
+ excite[band] = FFMAX(fastleak - lowcomp, slowleak);
+ }
+ begin = 22;
+ } else {
+ /* coupling channel */
+ begin = band_start;
+ fastleak = (s->cpl_fast_leak << 8) + 768;
+ slowleak = (s->cpl_slow_leak << 8) + 768;
+ }
+
+ for (band = begin; band < band_end; band++) {
+ fastleak = FFMAX(fastleak - s->fast_decay, band_psd[band] - fast_gain);
+ slowleak = FFMAX(slowleak - s->slow_decay, band_psd[band] - s->slow_gain);
+ excite[band] = FFMAX(fastleak, slowleak);
+ }
+
+ /* compute masking curve */
+
+ for (band = band_start; band < band_end; band++) {
+ int tmp = s->db_per_bit - band_psd[band];
+ if (tmp > 0) {
+ excite[band] += tmp >> 2;
+ }
+ mask[band] = FFMAX(ff_ac3_hearing_threshold_tab[band >> s->sr_shift][s->sr_code], excite[band]);
+ }
+
+ /* delta bit allocation */
+
+ if (dba_mode == DBA_REUSE || dba_mode == DBA_NEW) {
+ int i, seg, delta;
+ if (dba_nsegs >= 8)
+ return -1;
+ band = 0;
+ for (seg = 0; seg < dba_nsegs; seg++) {
+ band += dba_offsets[seg];
+ if (band >= 50 || dba_lengths[seg] > 50-band)
+ return -1;
+ if (dba_values[seg] >= 4) {
+ delta = (dba_values[seg] - 3) << 7;
+ } else {
+ delta = (dba_values[seg] - 4) << 7;
+ }
+ for (i = 0; i < dba_lengths[seg]; i++) {
+ mask[band++] += delta;
+ }
+ }
+ }
+ return 0;
+}
+
+void ff_ac3_bit_alloc_calc_bap(int16_t *mask, int16_t *psd, int start, int end,
+ int snr_offset, int floor,
+ const uint8_t *bap_tab, uint8_t *bap)
+{
+ int bin, band;
+
+ /* special case, if snr offset is -960, set all bap's to zero */
+ if (snr_offset == -960) {
+ memset(bap, 0, 256);
+ return;
+ }
+
+ bin = start;
+ band = bin_to_band_tab[start];
+ do {
+ int m = (FFMAX(mask[band] - snr_offset - floor, 0) & 0x1FE0) + floor;
+ int band_end = FFMIN(band_start_tab[band+1], end);
+ for (; bin < band_end; bin++) {
+ int address = av_clip((psd[bin] - m) >> 5, 0, 63);
+ bap[bin] = bap_tab[address];
+ }
+ } while (end > band_start_tab[band++]);
+}
+
+/* AC-3 bit allocation. The algorithm is the one described in the AC-3
+ spec. */
+void ac3_parametric_bit_allocation(AC3BitAllocParameters *s, uint8_t *bap,
+ int8_t *exp, int start, int end,
+ int snr_offset, int fast_gain, int is_lfe,
+ int dba_mode, int dba_nsegs,
+ uint8_t *dba_offsets, uint8_t *dba_lengths,
+ uint8_t *dba_values)
+{
+ int16_t psd[256]; /* scaled exponents */
+ int16_t band_psd[50]; /* interpolated exponents */
+ int16_t mask[50]; /* masking value */
+
+ ff_ac3_bit_alloc_calc_psd(exp, start, end, psd, band_psd);
+
+ ff_ac3_bit_alloc_calc_mask(s, band_psd, start, end, fast_gain, is_lfe,
+ dba_mode, dba_nsegs, dba_offsets, dba_lengths,
+ dba_values, mask);
+
+ ff_ac3_bit_alloc_calc_bap(mask, psd, start, end, snr_offset, s->floor,
+ ff_ac3_bap_tab, bap);
+}
+
+/**
+ * Initializes some tables.
+ * note: This function must remain thread safe because it is called by the
+ * AVParser init code.
+ */
+av_cold void ac3_common_init(void)
+{
+#if !CONFIG_HARDCODED_TABLES
+ /* compute bndtab and masktab from bandsz */
+ int bin = 0, band;
+ for (band = 0; band < 50; band++) {
+ int band_end = bin + ff_ac3_critical_band_size_tab[band];
+ band_start_tab[band] = bin;
+ while (bin < band_end)
+ bin_to_band_tab[bin++] = band;
+ }
+ band_start_tab[50] = bin;
+#endif /* !CONFIG_HARDCODED_TABLES */
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3.h
new file mode 100644
index 000000000..4a4d3709b
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3.h
@@ -0,0 +1,187 @@
+/*
+ * Common code between the AC-3 encoder and decoder
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/ac3.h
+ * Common code between the AC-3 encoder and decoder.
+ */
+
+#ifndef AVCODEC_AC3_H
+#define AVCODEC_AC3_H
+
+#include "ac3tab.h"
+
+#define AC3_MAX_CODED_FRAME_SIZE 3840 /* in bytes */
+#define AC3_MAX_CHANNELS 6 /* including LFE channel */
+
+#define NB_BLOCKS 6 /* number of PCM blocks inside an AC-3 frame */
+#define AC3_FRAME_SIZE (NB_BLOCKS * 256)
+
+/* exponent encoding strategy */
+#define EXP_REUSE 0
+#define EXP_NEW 1
+
+#define EXP_D15 1
+#define EXP_D25 2
+#define EXP_D45 3
+
+/** Delta bit allocation strategy */
+typedef enum {
+ DBA_REUSE = 0,
+ DBA_NEW,
+ DBA_NONE,
+ DBA_RESERVED
+} AC3DeltaStrategy;
+
+/** Channel mode (audio coding mode) */
+typedef enum {
+ AC3_CHMODE_DUALMONO = 0,
+ AC3_CHMODE_MONO,
+ AC3_CHMODE_STEREO,
+ AC3_CHMODE_3F,
+ AC3_CHMODE_2F1R,
+ AC3_CHMODE_3F1R,
+ AC3_CHMODE_2F2R,
+ AC3_CHMODE_3F2R
+} AC3ChannelMode;
+
+typedef struct AC3BitAllocParameters {
+ int sr_code;
+ int sr_shift;
+ int slow_gain, slow_decay, fast_decay, db_per_bit, floor;
+ int cpl_fast_leak, cpl_slow_leak;
+} AC3BitAllocParameters;
+
+/**
+ * @struct AC3HeaderInfo
+ * Coded AC-3 header values up to the lfeon element, plus derived values.
+ */
+typedef struct {
+ /** @defgroup coded Coded elements
+ * @{
+ */
+ uint16_t sync_word;
+ uint16_t crc1;
+ uint8_t sr_code;
+ uint8_t bitstream_id;
+ uint8_t channel_mode;
+ uint8_t lfe_on;
+ uint8_t frame_type;
+ int substreamid; ///< substream identification
+ int center_mix_level; ///< Center mix level index
+ int surround_mix_level; ///< Surround mix level index
+ uint16_t channel_map;
+ int num_blocks; ///< number of audio blocks
+ /** @} */
+
+ /** @defgroup derived Derived values
+ * @{
+ */
+ uint8_t sr_shift;
+ uint16_t sample_rate;
+ uint32_t bit_rate;
+ uint8_t channels;
+ uint16_t frame_size;
+ int64_t channel_layout;
+ /** @} */
+} AC3HeaderInfo;
+
+typedef enum {
+ EAC3_FRAME_TYPE_INDEPENDENT = 0,
+ EAC3_FRAME_TYPE_DEPENDENT,
+ EAC3_FRAME_TYPE_AC3_CONVERT,
+ EAC3_FRAME_TYPE_RESERVED
+} EAC3FrameType;
+
+void ac3_common_init(void);
+
+/**
+ * Calculates the log power-spectral density of the input signal.
+ * This gives a rough estimate of signal power in the frequency domain by using
+ * the spectral envelope (exponents). The psd is also separately grouped
+ * into critical bands for use in the calculating the masking curve.
+ * 128 units in psd = -6 dB. The dbknee parameter in AC3BitAllocParameters
+ * determines the reference level.
+ *
+ * @param[in] exp frequency coefficient exponents
+ * @param[in] start starting bin location
+ * @param[in] end ending bin location
+ * @param[out] psd signal power for each frequency bin
+ * @param[out] band_psd signal power for each critical band
+ */
+void ff_ac3_bit_alloc_calc_psd(int8_t *exp, int start, int end, int16_t *psd,
+ int16_t *band_psd);
+
+/**
+ * Calculates the masking curve.
+ * First, the excitation is calculated using parameters in s and the signal
+ * power in each critical band. The excitation is compared with a predefined
+ * hearing threshold table to produce the masking curve. If delta bit
+ * allocation information is provided, it is used for adjusting the masking
+ * curve, usually to give a closer match to a better psychoacoustic model.
+ *
+ * @param[in] s adjustable bit allocation parameters
+ * @param[in] band_psd signal power for each critical band
+ * @param[in] start starting bin location
+ * @param[in] end ending bin location
+ * @param[in] fast_gain fast gain (estimated signal-to-mask ratio)
+ * @param[in] is_lfe whether or not the channel being processed is the LFE
+ * @param[in] dba_mode delta bit allocation mode (none, reuse, or new)
+ * @param[in] dba_nsegs number of delta segments
+ * @param[in] dba_offsets location offsets for each segment
+ * @param[in] dba_lengths length of each segment
+ * @param[in] dba_values delta bit allocation for each segment
+ * @param[out] mask calculated masking curve
+ * @return returns 0 for success, non-zero for error
+ */
+int ff_ac3_bit_alloc_calc_mask(AC3BitAllocParameters *s, int16_t *band_psd,
+ int start, int end, int fast_gain, int is_lfe,
+ int dba_mode, int dba_nsegs, uint8_t *dba_offsets,
+ uint8_t *dba_lengths, uint8_t *dba_values,
+ int16_t *mask);
+
+/**
+ * Calculates bit allocation pointers.
+ * The SNR is the difference between the masking curve and the signal. AC-3
+ * uses this value for each frequency bin to allocate bits. The snroffset
+ * parameter is a global adjustment to the SNR for all bins.
+ *
+ * @param[in] mask masking curve
+ * @param[in] psd signal power for each frequency bin
+ * @param[in] start starting bin location
+ * @param[in] end ending bin location
+ * @param[in] snr_offset SNR adjustment
+ * @param[in] floor noise floor
+ * @param[in] bap_tab look-up table for bit allocation pointers
+ * @param[out] bap bit allocation pointers
+ */
+void ff_ac3_bit_alloc_calc_bap(int16_t *mask, int16_t *psd, int start, int end,
+ int snr_offset, int floor,
+ const uint8_t *bap_tab, uint8_t *bap);
+
+void ac3_parametric_bit_allocation(AC3BitAllocParameters *s, uint8_t *bap,
+ int8_t *exp, int start, int end,
+ int snr_offset, int fast_gain, int is_lfe,
+ int dba_mode, int dba_nsegs,
+ uint8_t *dba_offsets, uint8_t *dba_lengths,
+ uint8_t *dba_values);
+
+#endif /* AVCODEC_AC3_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3_parser.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3_parser.c
new file mode 100644
index 000000000..6331b64da
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3_parser.c
@@ -0,0 +1,207 @@
+/*
+ * AC-3 parser
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "parser.h"
+#include "ac3_parser.h"
+#include "aac_ac3_parser.h"
+#include "get_bits.h"
+
+
+#define AC3_HEADER_SIZE 7
+
+
+static const uint8_t eac3_blocks[4] = {
+ 1, 2, 3, 6
+};
+
+
+int ff_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr)
+{
+ int frame_size_code;
+
+ memset(hdr, 0, sizeof(*hdr));
+
+ hdr->sync_word = get_bits(gbc, 16);
+ if(hdr->sync_word != 0x0B77)
+ return AAC_AC3_PARSE_ERROR_SYNC;
+
+ /* read ahead to bsid to distinguish between AC-3 and E-AC-3 */
+ hdr->bitstream_id = show_bits_long(gbc, 29) & 0x1F;
+ if(hdr->bitstream_id > 16)
+ return AAC_AC3_PARSE_ERROR_BSID;
+
+ hdr->num_blocks = 6;
+
+ /* set default mix levels */
+ hdr->center_mix_level = 1; // -4.5dB
+ hdr->surround_mix_level = 1; // -6.0dB
+
+ if(hdr->bitstream_id <= 10) {
+ /* Normal AC-3 */
+ hdr->crc1 = get_bits(gbc, 16);
+ hdr->sr_code = get_bits(gbc, 2);
+ if(hdr->sr_code == 3)
+ return AAC_AC3_PARSE_ERROR_SAMPLE_RATE;
+
+ frame_size_code = get_bits(gbc, 6);
+ if(frame_size_code > 37)
+ return AAC_AC3_PARSE_ERROR_FRAME_SIZE;
+
+ skip_bits(gbc, 5); // skip bsid, already got it
+
+ skip_bits(gbc, 3); // skip bitstream mode
+ hdr->channel_mode = get_bits(gbc, 3);
+
+ if(hdr->channel_mode == AC3_CHMODE_STEREO) {
+ skip_bits(gbc, 2); // skip dsurmod
+ } else {
+ if((hdr->channel_mode & 1) && hdr->channel_mode != AC3_CHMODE_MONO)
+ hdr->center_mix_level = get_bits(gbc, 2);
+ if(hdr->channel_mode & 4)
+ hdr->surround_mix_level = get_bits(gbc, 2);
+ }
+ hdr->lfe_on = get_bits1(gbc);
+
+ hdr->sr_shift = FFMAX(hdr->bitstream_id, 8) - 8;
+ hdr->sample_rate = ff_ac3_sample_rate_tab[hdr->sr_code] >> hdr->sr_shift;
+ hdr->bit_rate = (ff_ac3_bitrate_tab[frame_size_code>>1] * 1000) >> hdr->sr_shift;
+ hdr->channels = ff_ac3_channels_tab[hdr->channel_mode] + hdr->lfe_on;
+ hdr->frame_size = ff_ac3_frame_size_tab[frame_size_code][hdr->sr_code] * 2;
+ hdr->frame_type = EAC3_FRAME_TYPE_AC3_CONVERT; //EAC3_FRAME_TYPE_INDEPENDENT;
+ hdr->substreamid = 0;
+ } else {
+ /* Enhanced AC-3 */
+ hdr->crc1 = 0;
+ hdr->frame_type = get_bits(gbc, 2);
+ if(hdr->frame_type == EAC3_FRAME_TYPE_RESERVED)
+ return AAC_AC3_PARSE_ERROR_FRAME_TYPE;
+
+ hdr->substreamid = get_bits(gbc, 3);
+
+ hdr->frame_size = (get_bits(gbc, 11) + 1) << 1;
+ if(hdr->frame_size < AC3_HEADER_SIZE)
+ return AAC_AC3_PARSE_ERROR_FRAME_SIZE;
+
+ hdr->sr_code = get_bits(gbc, 2);
+ if (hdr->sr_code == 3) {
+ int sr_code2 = get_bits(gbc, 2);
+ if(sr_code2 == 3)
+ return AAC_AC3_PARSE_ERROR_SAMPLE_RATE;
+ hdr->sample_rate = ff_ac3_sample_rate_tab[sr_code2] / 2;
+ hdr->sr_shift = 1;
+ } else {
+ hdr->num_blocks = eac3_blocks[get_bits(gbc, 2)];
+ hdr->sample_rate = ff_ac3_sample_rate_tab[hdr->sr_code];
+ hdr->sr_shift = 0;
+ }
+
+ hdr->channel_mode = get_bits(gbc, 3);
+ hdr->lfe_on = get_bits1(gbc);
+
+ hdr->bit_rate = (uint32_t)(8.0 * hdr->frame_size * hdr->sample_rate /
+ (hdr->num_blocks * 256.0));
+ hdr->channels = ff_ac3_channels_tab[hdr->channel_mode] + hdr->lfe_on;
+ }
+ hdr->channel_layout = ff_ac3_channel_layout_tab[hdr->channel_mode];
+ if (hdr->lfe_on)
+ hdr->channel_layout |= CH_LOW_FREQUENCY;
+
+ return 0;
+}
+
+int ff_ac3_parse_header_full(GetBitContext *gbc, AC3HeaderInfo *hdr){
+ int ret, i;
+ ret = ff_ac3_parse_header(gbc, hdr);
+ if(!ret){
+ if(hdr->bitstream_id>10){
+ /* Enhanced AC-3 */
+ skip_bits(gbc, 5); // skip bitstream id
+
+ /* skip dialog normalization and compression gain */
+ for (i = 0; i < (hdr->channel_mode ? 1 : 2); i++) {
+ skip_bits(gbc, 5); // skip dialog normalization
+ if (get_bits1(gbc)) {
+ skip_bits(gbc, 8); //skip Compression gain word
+ }
+ }
+ /* dependent stream channel map */
+ if (hdr->frame_type == EAC3_FRAME_TYPE_DEPENDENT && get_bits1(gbc)) {
+ hdr->channel_map = get_bits(gbc, 16); //custom channel map
+ return 0;
+ }
+ }
+ //default channel map based on acmod and lfeon
+ hdr->channel_map = ff_eac3_default_chmap[hdr->channel_mode];
+ if(hdr->lfe_on)
+ hdr->channel_map |= AC3_CHMAP_LFE;
+ }
+ return ret;
+}
+
+static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
+ int *need_next_header, int *new_frame_start)
+{
+ int err;
+ union {
+ uint64_t u64;
+ uint8_t u8[8];
+ } tmp = { be2me_64(state) };
+ AC3HeaderInfo hdr;
+ GetBitContext gbc;
+
+ init_get_bits(&gbc, tmp.u8+8-AC3_HEADER_SIZE, 54);
+ err = ff_ac3_parse_header(&gbc, &hdr);
+
+ if(err < 0)
+ return 0;
+
+ hdr_info->sample_rate = hdr.sample_rate;
+ hdr_info->bit_rate = hdr.bit_rate;
+ hdr_info->channels = hdr.channels;
+ hdr_info->channel_layout = hdr.channel_layout;
+ hdr_info->samples = hdr.num_blocks * 256;
+ if(hdr.bitstream_id>10)
+ hdr_info->codec_id = CODEC_ID_EAC3;
+ else
+ hdr_info->codec_id = CODEC_ID_AC3;
+
+ *need_next_header = (hdr.frame_type != EAC3_FRAME_TYPE_AC3_CONVERT);
+ *new_frame_start = (hdr.frame_type != EAC3_FRAME_TYPE_DEPENDENT);
+ return hdr.frame_size;
+}
+
+static av_cold int ac3_parse_init(AVCodecParserContext *s1)
+{
+ AACAC3ParseContext *s = s1->priv_data;
+ s->header_size = AC3_HEADER_SIZE;
+ s->sync = ac3_sync;
+ return 0;
+}
+
+
+AVCodecParser ac3_parser = {
+ { CODEC_ID_AC3, CODEC_ID_EAC3 },
+ sizeof(AACAC3ParseContext),
+ ac3_parse_init,
+ ff_aac_ac3_parse,
+ ff_parse_close,
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3_parser.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3_parser.h
new file mode 100644
index 000000000..70ce4de7a
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3_parser.h
@@ -0,0 +1,52 @@
+/*
+ * AC-3 parser prototypes
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AC3_PARSER_H
+#define AVCODEC_AC3_PARSER_H
+
+#include "ac3.h"
+#include "get_bits.h"
+
+/**
+ * Parses AC-3 frame header.
+ * Parses the header up to the lfeon element, which is the first 52 or 54 bits
+ * depending on the audio coding mode.
+ * @param gbc[in] BitContext containing the first 54 bits of the frame.
+ * @param hdr[out] Pointer to struct where header info is written.
+ * @return Returns 0 on success, -1 if there is a sync word mismatch,
+ * -2 if the bsid (version) element is invalid, -3 if the fscod (sample rate)
+ * element is invalid, or -4 if the frmsizecod (bit rate) element is invalid.
+ */
+int ff_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr);
+
+/**
+ * Parses AC-3 frame header and sets channel_map
+ * Parses the header up to the lfeon (channel_map in E-AC-3)
+ * element, which is the first 52, 54 or 104 bits depending
+ * on the audio coding mode.
+ * @param gbc[in] BitContext containing the first 54 bits of the frame.
+ * @param hdr[out] Pointer to struct where header info is written.
+ * @return value returned by ff_ac3_parse_header
+ */
+int ff_ac3_parse_header_full(GetBitContext *gbc, AC3HeaderInfo *hdr);
+
+#endif /* AVCODEC_AC3_PARSER_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec.c
new file mode 100644
index 000000000..36ca31fad
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec.c
@@ -0,0 +1,1461 @@
+/*
+ * AC-3 Audio Decoder
+ * This code was developed as part of Google Summer of Code 2006.
+ * E-AC-3 support was added as part of Google Summer of Code 2007.
+ *
+ * Copyright (c) 2006 Kartikey Mahendra BHATT (bhattkm at gmail dot com)
+ * Copyright (c) 2007-2008 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com>
+ * Copyright (c) 2007 Justin Ruggles <justin.ruggles@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stddef.h>
+#include <math.h>
+#include <string.h>
+
+#include "libavutil/crc.h"
+#include "internal.h"
+#include "aac_ac3_parser.h"
+#include "ac3_parser.h"
+#include "ac3dec.h"
+#include "ac3dec_data.h"
+
+/** Large enough for maximum possible frame size when the specification limit is ignored */
+#define AC3_FRAME_BUFFER_SIZE 32768
+
+/**
+ * table for ungrouping 3 values in 7 bits.
+ * used for exponents and bap=2 mantissas
+ */
+static uint8_t ungroup_3_in_7_bits_tab[128][3];
+
+
+/** tables for ungrouping mantissas */
+static int b1_mantissas[32][3];
+static int b2_mantissas[128][3];
+static int b3_mantissas[8];
+static int b4_mantissas[128][2];
+static int b5_mantissas[16];
+
+/**
+ * Quantization table: levels for symmetric. bits for asymmetric.
+ * reference: Table 7.18 Mapping of bap to Quantizer
+ */
+static const uint8_t quantization_tab[16] = {
+ 0, 3, 5, 7, 11, 15,
+ 5, 6, 7, 8, 9, 10, 11, 12, 14, 16
+};
+
+/** dynamic range table. converts codes to scale factors. */
+static float dynamic_range_tab[256];
+
+/** Adjustments in dB gain */
+#define LEVEL_PLUS_3DB 1.4142135623730950
+#define LEVEL_PLUS_1POINT5DB 1.1892071150027209
+#define LEVEL_MINUS_1POINT5DB 0.8408964152537145
+#define LEVEL_MINUS_3DB 0.7071067811865476
+#define LEVEL_MINUS_4POINT5DB 0.5946035575013605
+#define LEVEL_MINUS_6DB 0.5000000000000000
+#define LEVEL_MINUS_9DB 0.3535533905932738
+#define LEVEL_ZERO 0.0000000000000000
+#define LEVEL_ONE 1.0000000000000000
+
+static const float gain_levels[9] = {
+ LEVEL_PLUS_3DB,
+ LEVEL_PLUS_1POINT5DB,
+ LEVEL_ONE,
+ LEVEL_MINUS_1POINT5DB,
+ LEVEL_MINUS_3DB,
+ LEVEL_MINUS_4POINT5DB,
+ LEVEL_MINUS_6DB,
+ LEVEL_ZERO,
+ LEVEL_MINUS_9DB
+};
+
+/**
+ * Table for center mix levels
+ * reference: Section 5.4.2.4 cmixlev
+ */
+static const uint8_t center_levels[4] = { 4, 5, 6, 5 };
+
+/**
+ * Table for surround mix levels
+ * reference: Section 5.4.2.5 surmixlev
+ */
+static const uint8_t surround_levels[4] = { 4, 6, 7, 6 };
+
+/**
+ * Table for default stereo downmixing coefficients
+ * reference: Section 7.8.2 Downmixing Into Two Channels
+ */
+static const uint8_t ac3_default_coeffs[8][5][2] = {
+ { { 2, 7 }, { 7, 2 }, },
+ { { 4, 4 }, },
+ { { 2, 7 }, { 7, 2 }, },
+ { { 2, 7 }, { 5, 5 }, { 7, 2 }, },
+ { { 2, 7 }, { 7, 2 }, { 6, 6 }, },
+ { { 2, 7 }, { 5, 5 }, { 7, 2 }, { 8, 8 }, },
+ { { 2, 7 }, { 7, 2 }, { 6, 7 }, { 7, 6 }, },
+ { { 2, 7 }, { 5, 5 }, { 7, 2 }, { 6, 7 }, { 7, 6 }, },
+};
+
+/**
+ * Symmetrical Dequantization
+ * reference: Section 7.3.3 Expansion of Mantissas for Symmetrical Quantization
+ * Tables 7.19 to 7.23
+ */
+static inline int
+symmetric_dequant(int code, int levels)
+{
+ return ((code - (levels >> 1)) << 24) / levels;
+}
+
+/*
+ * Initialize tables at runtime.
+ */
+static av_cold void ac3_tables_init(void)
+{
+ int i;
+
+ /* generate table for ungrouping 3 values in 7 bits
+ reference: Section 7.1.3 Exponent Decoding */
+ for(i=0; i<128; i++) {
+ ungroup_3_in_7_bits_tab[i][0] = i / 25;
+ ungroup_3_in_7_bits_tab[i][1] = (i % 25) / 5;
+ ungroup_3_in_7_bits_tab[i][2] = (i % 25) % 5;
+ }
+
+ /* generate grouped mantissa tables
+ reference: Section 7.3.5 Ungrouping of Mantissas */
+ for(i=0; i<32; i++) {
+ /* bap=1 mantissas */
+ b1_mantissas[i][0] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][0], 3);
+ b1_mantissas[i][1] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][1], 3);
+ b1_mantissas[i][2] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][2], 3);
+ }
+ for(i=0; i<128; i++) {
+ /* bap=2 mantissas */
+ b2_mantissas[i][0] = symmetric_dequant(ungroup_3_in_7_bits_tab[i][0], 5);
+ b2_mantissas[i][1] = symmetric_dequant(ungroup_3_in_7_bits_tab[i][1], 5);
+ b2_mantissas[i][2] = symmetric_dequant(ungroup_3_in_7_bits_tab[i][2], 5);
+
+ /* bap=4 mantissas */
+ b4_mantissas[i][0] = symmetric_dequant(i / 11, 11);
+ b4_mantissas[i][1] = symmetric_dequant(i % 11, 11);
+ }
+ /* generate ungrouped mantissa tables
+ reference: Tables 7.21 and 7.23 */
+ for(i=0; i<7; i++) {
+ /* bap=3 mantissas */
+ b3_mantissas[i] = symmetric_dequant(i, 7);
+ }
+ for(i=0; i<15; i++) {
+ /* bap=5 mantissas */
+ b5_mantissas[i] = symmetric_dequant(i, 15);
+ }
+
+ /* generate dynamic range table
+ reference: Section 7.7.1 Dynamic Range Control */
+ for(i=0; i<256; i++) {
+ int v = (i >> 5) - ((i >> 7) << 3) - 5;
+ dynamic_range_tab[i] = powf(2.0f, v) * ((i & 0x1F) | 0x20);
+ }
+}
+
+
+/**
+ * AVCodec initialization
+ */
+static av_cold int ac3_decode_init(AVCodecContext *avctx)
+{
+ AC3DecodeContext *s = avctx->priv_data;
+ s->avctx = avctx;
+
+ ac3_common_init();
+ ac3_tables_init();
+ ff_mdct_init(&s->imdct_256, 8, 1, 1.0);
+ ff_mdct_init(&s->imdct_512, 9, 1, 1.0);
+ ff_kbd_window_init(s->window, 5.0, 256);
+ dsputil_init(&s->dsp, avctx);
+ av_lfg_init(&s->dith_state, 0);
+
+ /* set bias values for float to int16 conversion */
+ if(s->dsp.float_to_int16_interleave == ff_float_to_int16_interleave_c) {
+ s->add_bias = 385.0f;
+ s->mul_bias = 1.0f;
+ } else {
+ s->add_bias = 0.0f;
+ s->mul_bias = 32767.0f;
+ }
+
+ /* allow downmixing to stereo or mono */
+ if (avctx->channels > 0 && avctx->request_channels > 0 &&
+ avctx->request_channels < avctx->channels &&
+ avctx->request_channels <= 2) {
+ avctx->channels = avctx->request_channels;
+ }
+ s->downmixed = 1;
+
+ /* allocate context input buffer */
+ if (avctx->error_recognition >= FF_ER_CAREFUL) {
+ s->input_buffer = av_mallocz(AC3_FRAME_BUFFER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!s->input_buffer)
+ return AVERROR_NOMEM;
+ }
+
+ avctx->sample_fmt = SAMPLE_FMT_S16;
+ return 0;
+}
+
+/**
+ * Parse the 'sync info' and 'bit stream info' from the AC-3 bitstream.
+ * GetBitContext within AC3DecodeContext must point to
+ * the start of the synchronized AC-3 bitstream.
+ */
+static int ac3_parse_header(AC3DecodeContext *s)
+{
+ GetBitContext *gbc = &s->gbc;
+ int i;
+
+ /* read the rest of the bsi. read twice for dual mono mode. */
+ i = !(s->channel_mode);
+ do {
+ skip_bits(gbc, 5); // skip dialog normalization
+ if (get_bits1(gbc))
+ skip_bits(gbc, 8); //skip compression
+ if (get_bits1(gbc))
+ skip_bits(gbc, 8); //skip language code
+ if (get_bits1(gbc))
+ skip_bits(gbc, 7); //skip audio production information
+ } while (i--);
+
+ skip_bits(gbc, 2); //skip copyright bit and original bitstream bit
+
+ /* skip the timecodes (or extra bitstream information for Alternate Syntax)
+ TODO: read & use the xbsi1 downmix levels */
+ if (get_bits1(gbc))
+ skip_bits(gbc, 14); //skip timecode1 / xbsi1
+ if (get_bits1(gbc))
+ skip_bits(gbc, 14); //skip timecode2 / xbsi2
+
+ /* skip additional bitstream info */
+ if (get_bits1(gbc)) {
+ i = get_bits(gbc, 6);
+ do {
+ skip_bits(gbc, 8);
+ } while(i--);
+ }
+
+ return 0;
+}
+
+/**
+ * Common function to parse AC-3 or E-AC-3 frame header
+ */
+static int parse_frame_header(AC3DecodeContext *s)
+{
+ AC3HeaderInfo hdr;
+ int err;
+
+ err = ff_ac3_parse_header(&s->gbc, &hdr);
+ if(err)
+ return err;
+
+ /* get decoding parameters from header info */
+ s->bit_alloc_params.sr_code = hdr.sr_code;
+ s->channel_mode = hdr.channel_mode;
+ s->channel_layout = hdr.channel_layout;
+ s->lfe_on = hdr.lfe_on;
+ s->bit_alloc_params.sr_shift = hdr.sr_shift;
+ s->sample_rate = hdr.sample_rate;
+ s->bit_rate = hdr.bit_rate;
+ s->channels = hdr.channels;
+ s->fbw_channels = s->channels - s->lfe_on;
+ s->lfe_ch = s->fbw_channels + 1;
+ s->frame_size = hdr.frame_size;
+ s->center_mix_level = hdr.center_mix_level;
+ s->surround_mix_level = hdr.surround_mix_level;
+ s->num_blocks = hdr.num_blocks;
+ s->frame_type = hdr.frame_type;
+ s->substreamid = hdr.substreamid;
+
+ if(s->lfe_on) {
+ s->start_freq[s->lfe_ch] = 0;
+ s->end_freq[s->lfe_ch] = 7;
+ s->num_exp_groups[s->lfe_ch] = 2;
+ s->channel_in_cpl[s->lfe_ch] = 0;
+ }
+
+ if (hdr.bitstream_id <= 10) {
+ s->eac3 = 0;
+ s->snr_offset_strategy = 2;
+ s->block_switch_syntax = 1;
+ s->dither_flag_syntax = 1;
+ s->bit_allocation_syntax = 1;
+ s->fast_gain_syntax = 0;
+ s->first_cpl_leak = 0;
+ s->dba_syntax = 1;
+ s->skip_syntax = 1;
+ memset(s->channel_uses_aht, 0, sizeof(s->channel_uses_aht));
+ return ac3_parse_header(s);
+ } else if (CONFIG_EAC3_DECODER) {
+ s->eac3 = 1;
+ return ff_eac3_parse_header(s);
+ } else {
+ av_log(s->avctx, AV_LOG_ERROR, "E-AC-3 support not compiled in\n");
+ return -1;
+ }
+}
+
+/**
+ * Set stereo downmixing coefficients based on frame header info.
+ * reference: Section 7.8.2 Downmixing Into Two Channels
+ */
+static void set_downmix_coeffs(AC3DecodeContext *s)
+{
+ int i;
+ float cmix = gain_levels[center_levels[s->center_mix_level]];
+ float smix = gain_levels[surround_levels[s->surround_mix_level]];
+ float norm0, norm1;
+
+ for(i=0; i<s->fbw_channels; i++) {
+ s->downmix_coeffs[i][0] = gain_levels[ac3_default_coeffs[s->channel_mode][i][0]];
+ s->downmix_coeffs[i][1] = gain_levels[ac3_default_coeffs[s->channel_mode][i][1]];
+ }
+ if(s->channel_mode > 1 && s->channel_mode & 1) {
+ s->downmix_coeffs[1][0] = s->downmix_coeffs[1][1] = cmix;
+ }
+ if(s->channel_mode == AC3_CHMODE_2F1R || s->channel_mode == AC3_CHMODE_3F1R) {
+ int nf = s->channel_mode - 2;
+ s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf][1] = smix * LEVEL_MINUS_3DB;
+ }
+ if(s->channel_mode == AC3_CHMODE_2F2R || s->channel_mode == AC3_CHMODE_3F2R) {
+ int nf = s->channel_mode - 4;
+ s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf+1][1] = smix;
+ }
+
+ /* renormalize */
+ norm0 = norm1 = 0.0;
+ for(i=0; i<s->fbw_channels; i++) {
+ norm0 += s->downmix_coeffs[i][0];
+ norm1 += s->downmix_coeffs[i][1];
+ }
+ norm0 = 1.0f / norm0;
+ norm1 = 1.0f / norm1;
+ for(i=0; i<s->fbw_channels; i++) {
+ s->downmix_coeffs[i][0] *= norm0;
+ s->downmix_coeffs[i][1] *= norm1;
+ }
+
+ if(s->output_mode == AC3_CHMODE_MONO) {
+ for(i=0; i<s->fbw_channels; i++)
+ s->downmix_coeffs[i][0] = (s->downmix_coeffs[i][0] + s->downmix_coeffs[i][1]) * LEVEL_MINUS_3DB;
+ }
+}
+
+/**
+ * Decode the grouped exponents according to exponent strategy.
+ * reference: Section 7.1.3 Exponent Decoding
+ */
+static int decode_exponents(GetBitContext *gbc, int exp_strategy, int ngrps,
+ uint8_t absexp, int8_t *dexps)
+{
+ int i, j, grp, group_size;
+ int dexp[256];
+ int expacc, prevexp;
+
+ /* unpack groups */
+ group_size = exp_strategy + (exp_strategy == EXP_D45);
+ for(grp=0,i=0; grp<ngrps; grp++) {
+ expacc = get_bits(gbc, 7);
+ dexp[i++] = ungroup_3_in_7_bits_tab[expacc][0];
+ dexp[i++] = ungroup_3_in_7_bits_tab[expacc][1];
+ dexp[i++] = ungroup_3_in_7_bits_tab[expacc][2];
+ }
+
+ /* convert to absolute exps and expand groups */
+ prevexp = absexp;
+ for(i=0,j=0; i<ngrps*3; i++) {
+ prevexp += dexp[i] - 2;
+ if (prevexp > 24U)
+ return -1;
+ switch (group_size) {
+ case 4: dexps[j++] = prevexp;
+ dexps[j++] = prevexp;
+ case 2: dexps[j++] = prevexp;
+ case 1: dexps[j++] = prevexp;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Generate transform coefficients for each coupled channel in the coupling
+ * range using the coupling coefficients and coupling coordinates.
+ * reference: Section 7.4.3 Coupling Coordinate Format
+ */
+static void calc_transform_coeffs_cpl(AC3DecodeContext *s)
+{
+ int bin, band, ch;
+
+ bin = s->start_freq[CPL_CH];
+ for (band = 0; band < s->num_cpl_bands; band++) {
+ int band_start = bin;
+ int band_end = bin + s->cpl_band_sizes[band];
+ for (ch = 1; ch <= s->fbw_channels; ch++) {
+ if (s->channel_in_cpl[ch]) {
+ int cpl_coord = s->cpl_coords[ch][band] << 5;
+ for (bin = band_start; bin < band_end; bin++) {
+ s->fixed_coeffs[ch][bin] = MULH(s->fixed_coeffs[CPL_CH][bin] << 4, cpl_coord);
+ }
+ if (ch == 2 && s->phase_flags[band]) {
+ for (bin = band_start; bin < band_end; bin++)
+ s->fixed_coeffs[2][bin] = -s->fixed_coeffs[2][bin];
+ }
+ }
+ }
+ bin = band_end;
+ }
+}
+
+/**
+ * Grouped mantissas for 3-level 5-level and 11-level quantization
+ */
+typedef struct {
+ int b1_mant[2];
+ int b2_mant[2];
+ int b4_mant;
+ int b1;
+ int b2;
+ int b4;
+} mant_groups;
+
+/**
+ * Decode the transform coefficients for a particular channel
+ * reference: Section 7.3 Quantization and Decoding of Mantissas
+ */
+static void ac3_decode_transform_coeffs_ch(AC3DecodeContext *s, int ch_index, mant_groups *m)
+{
+ int start_freq = s->start_freq[ch_index];
+ int end_freq = s->end_freq[ch_index];
+ uint8_t *baps = s->bap[ch_index];
+ int8_t *exps = s->dexps[ch_index];
+ int *coeffs = s->fixed_coeffs[ch_index];
+ int dither = (ch_index == CPL_CH) || s->dither_flag[ch_index];
+ GetBitContext *gbc = &s->gbc;
+ int freq;
+
+ for(freq = start_freq; freq < end_freq; freq++){
+ int bap = baps[freq];
+ int mantissa;
+ switch(bap){
+ case 0:
+ if (dither)
+ mantissa = (av_lfg_get(&s->dith_state) & 0x7FFFFF) - 0x400000;
+ else
+ mantissa = 0;
+ break;
+ case 1:
+ if(m->b1){
+ m->b1--;
+ mantissa = m->b1_mant[m->b1];
+ }
+ else{
+ int bits = get_bits(gbc, 5);
+ mantissa = b1_mantissas[bits][0];
+ m->b1_mant[1] = b1_mantissas[bits][1];
+ m->b1_mant[0] = b1_mantissas[bits][2];
+ m->b1 = 2;
+ }
+ break;
+ case 2:
+ if(m->b2){
+ m->b2--;
+ mantissa = m->b2_mant[m->b2];
+ }
+ else{
+ int bits = get_bits(gbc, 7);
+ mantissa = b2_mantissas[bits][0];
+ m->b2_mant[1] = b2_mantissas[bits][1];
+ m->b2_mant[0] = b2_mantissas[bits][2];
+ m->b2 = 2;
+ }
+ break;
+ case 3:
+ mantissa = b3_mantissas[get_bits(gbc, 3)];
+ break;
+ case 4:
+ if(m->b4){
+ m->b4 = 0;
+ mantissa = m->b4_mant;
+ }
+ else{
+ int bits = get_bits(gbc, 7);
+ mantissa = b4_mantissas[bits][0];
+ m->b4_mant = b4_mantissas[bits][1];
+ m->b4 = 1;
+ }
+ break;
+ case 5:
+ mantissa = b5_mantissas[get_bits(gbc, 4)];
+ break;
+ default: /* 6 to 15 */
+ mantissa = get_bits(gbc, quantization_tab[bap]);
+ /* Shift mantissa and sign-extend it. */
+ mantissa = (mantissa << (32-quantization_tab[bap]))>>8;
+ break;
+ }
+ coeffs[freq] = mantissa >> exps[freq];
+ }
+}
+
+/**
+ * Remove random dithering from coupling range coefficients with zero-bit
+ * mantissas for coupled channels which do not use dithering.
+ * reference: Section 7.3.4 Dither for Zero Bit Mantissas (bap=0)
+ */
+static void remove_dithering(AC3DecodeContext *s) {
+ int ch, i;
+
+ for(ch=1; ch<=s->fbw_channels; ch++) {
+ if(!s->dither_flag[ch] && s->channel_in_cpl[ch]) {
+ for(i = s->start_freq[CPL_CH]; i<s->end_freq[CPL_CH]; i++) {
+ if(!s->bap[CPL_CH][i])
+ s->fixed_coeffs[ch][i] = 0;
+ }
+ }
+ }
+}
+
+static void decode_transform_coeffs_ch(AC3DecodeContext *s, int blk, int ch,
+ mant_groups *m)
+{
+ if (!s->channel_uses_aht[ch]) {
+ ac3_decode_transform_coeffs_ch(s, ch, m);
+ } else {
+ /* if AHT is used, mantissas for all blocks are encoded in the first
+ block of the frame. */
+ int bin;
+ if (!blk && CONFIG_EAC3_DECODER)
+ ff_eac3_decode_transform_coeffs_aht_ch(s, ch);
+ for (bin = s->start_freq[ch]; bin < s->end_freq[ch]; bin++) {
+ s->fixed_coeffs[ch][bin] = s->pre_mantissa[ch][bin][blk] >> s->dexps[ch][bin];
+ }
+ }
+}
+
+/**
+ * Decode the transform coefficients.
+ */
+static void decode_transform_coeffs(AC3DecodeContext *s, int blk)
+{
+ int ch, end;
+ int got_cplchan = 0;
+ mant_groups m;
+
+ m.b1 = m.b2 = m.b4 = 0;
+
+ for (ch = 1; ch <= s->channels; ch++) {
+ /* transform coefficients for full-bandwidth channel */
+ decode_transform_coeffs_ch(s, blk, ch, &m);
+ /* tranform coefficients for coupling channel come right after the
+ coefficients for the first coupled channel*/
+ if (s->channel_in_cpl[ch]) {
+ if (!got_cplchan) {
+ decode_transform_coeffs_ch(s, blk, CPL_CH, &m);
+ calc_transform_coeffs_cpl(s);
+ got_cplchan = 1;
+ }
+ end = s->end_freq[CPL_CH];
+ } else {
+ end = s->end_freq[ch];
+ }
+ do
+ s->fixed_coeffs[ch][end] = 0;
+ while(++end < 256);
+ }
+
+ /* zero the dithered coefficients for appropriate channels */
+ remove_dithering(s);
+}
+
+/**
+ * Stereo rematrixing.
+ * reference: Section 7.5.4 Rematrixing : Decoding Technique
+ */
+static void do_rematrixing(AC3DecodeContext *s)
+{
+ int bnd, i;
+ int end, bndend;
+
+ end = FFMIN(s->end_freq[1], s->end_freq[2]);
+
+ for(bnd=0; bnd<s->num_rematrixing_bands; bnd++) {
+ if(s->rematrixing_flags[bnd]) {
+ bndend = FFMIN(end, ff_ac3_rematrix_band_tab[bnd+1]);
+ for(i=ff_ac3_rematrix_band_tab[bnd]; i<bndend; i++) {
+ int tmp0 = s->fixed_coeffs[1][i];
+ s->fixed_coeffs[1][i] += s->fixed_coeffs[2][i];
+ s->fixed_coeffs[2][i] = tmp0 - s->fixed_coeffs[2][i];
+ }
+ }
+ }
+}
+
+/**
+ * Inverse MDCT Transform.
+ * Convert frequency domain coefficients to time-domain audio samples.
+ * reference: Section 7.9.4 Transformation Equations
+ */
+static inline void do_imdct(AC3DecodeContext *s, int channels)
+{
+ int ch;
+ float add_bias = s->add_bias;
+ if(s->out_channels==1 && channels>1)
+ add_bias *= LEVEL_MINUS_3DB; // compensate for the gain in downmix
+
+ for (ch=1; ch<=channels; ch++) {
+ if (s->block_switch[ch]) {
+ int i;
+ float *x = s->tmp_output+128;
+ for(i=0; i<128; i++)
+ x[i] = s->transform_coeffs[ch][2*i];
+ ff_imdct_half(&s->imdct_256, s->tmp_output, x);
+ s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, add_bias, 128);
+ for(i=0; i<128; i++)
+ x[i] = s->transform_coeffs[ch][2*i+1];
+ ff_imdct_half(&s->imdct_256, s->delay[ch-1], x);
+ } else {
+ ff_imdct_half(&s->imdct_512, s->tmp_output, s->transform_coeffs[ch]);
+ s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, add_bias, 128);
+ memcpy(s->delay[ch-1], s->tmp_output+128, 128*sizeof(float));
+ }
+ }
+}
+
+/**
+ * Downmix the output to mono or stereo.
+ */
+void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
+{
+ int i, j;
+ float v0, v1;
+ if(out_ch == 2) {
+ for(i=0; i<len; i++) {
+ v0 = v1 = 0.0f;
+ for(j=0; j<in_ch; j++) {
+ v0 += samples[j][i] * matrix[j][0];
+ v1 += samples[j][i] * matrix[j][1];
+ }
+ samples[0][i] = v0;
+ samples[1][i] = v1;
+ }
+ } else if(out_ch == 1) {
+ for(i=0; i<len; i++) {
+ v0 = 0.0f;
+ for(j=0; j<in_ch; j++)
+ v0 += samples[j][i] * matrix[j][0];
+ samples[0][i] = v0;
+ }
+ }
+}
+
+/**
+ * Upmix delay samples from stereo to original channel layout.
+ */
+static void ac3_upmix_delay(AC3DecodeContext *s)
+{
+ int channel_data_size = sizeof(s->delay[0]);
+ switch(s->channel_mode) {
+ case AC3_CHMODE_DUALMONO:
+ case AC3_CHMODE_STEREO:
+ /* upmix mono to stereo */
+ memcpy(s->delay[1], s->delay[0], channel_data_size);
+ break;
+ case AC3_CHMODE_2F2R:
+ memset(s->delay[3], 0, channel_data_size);
+ case AC3_CHMODE_2F1R:
+ memset(s->delay[2], 0, channel_data_size);
+ break;
+ case AC3_CHMODE_3F2R:
+ memset(s->delay[4], 0, channel_data_size);
+ case AC3_CHMODE_3F1R:
+ memset(s->delay[3], 0, channel_data_size);
+ case AC3_CHMODE_3F:
+ memcpy(s->delay[2], s->delay[1], channel_data_size);
+ memset(s->delay[1], 0, channel_data_size);
+ break;
+ }
+}
+
+/**
+ * Decode band structure for coupling, spectral extension, or enhanced coupling.
+ * The band structure defines how many subbands are in each band. For each
+ * subband in the range, 1 means it is combined with the previous band, and 0
+ * means that it starts a new band.
+ *
+ * @param[in] gbc bit reader context
+ * @param[in] blk block number
+ * @param[in] eac3 flag to indicate E-AC-3
+ * @param[in] ecpl flag to indicate enhanced coupling
+ * @param[in] start_subband subband number for start of range
+ * @param[in] end_subband subband number for end of range
+ * @param[in] default_band_struct default band structure table
+ * @param[out] num_bands number of bands (optionally NULL)
+ * @param[out] band_sizes array containing the number of bins in each band (optionally NULL)
+ */
+static void decode_band_structure(GetBitContext *gbc, int blk, int eac3,
+ int ecpl, int start_subband, int end_subband,
+ const uint8_t *default_band_struct,
+ int *num_bands, uint8_t *band_sizes)
+{
+ int subbnd, bnd, n_subbands, n_bands=0;
+ uint8_t bnd_sz[22];
+ uint8_t coded_band_struct[22];
+ const uint8_t *band_struct;
+
+ n_subbands = end_subband - start_subband;
+
+ /* decode band structure from bitstream or use default */
+ if (!eac3 || get_bits1(gbc)) {
+ for (subbnd = 0; subbnd < n_subbands - 1; subbnd++) {
+ coded_band_struct[subbnd] = get_bits1(gbc);
+ }
+ band_struct = coded_band_struct;
+ } else if (!blk) {
+ band_struct = &default_band_struct[start_subband+1];
+ } else {
+ /* no change in band structure */
+ return;
+ }
+
+ /* calculate number of bands and band sizes based on band structure.
+ note that the first 4 subbands in enhanced coupling span only 6 bins
+ instead of 12. */
+ if (num_bands || band_sizes ) {
+ n_bands = n_subbands;
+ bnd_sz[0] = ecpl ? 6 : 12;
+ for (bnd = 0, subbnd = 1; subbnd < n_subbands; subbnd++) {
+ int subbnd_size = (ecpl && subbnd < 4) ? 6 : 12;
+ if (band_struct[subbnd-1]) {
+ n_bands--;
+ bnd_sz[bnd] += subbnd_size;
+ } else {
+ bnd_sz[++bnd] = subbnd_size;
+ }
+ }
+ }
+
+ /* set optional output params */
+ if (num_bands)
+ *num_bands = n_bands;
+ if (band_sizes)
+ memcpy(band_sizes, bnd_sz, n_bands);
+}
+
+/**
+ * Decode a single audio block from the AC-3 bitstream.
+ */
+static int decode_audio_block(AC3DecodeContext *s, int blk)
+{
+ int fbw_channels = s->fbw_channels;
+ int channel_mode = s->channel_mode;
+ int i, bnd, seg, ch;
+ int different_transforms;
+ int downmix_output;
+ int cpl_in_use;
+ GetBitContext *gbc = &s->gbc;
+ uint8_t bit_alloc_stages[AC3_MAX_CHANNELS];
+
+ memset(bit_alloc_stages, 0, AC3_MAX_CHANNELS);
+
+ /* block switch flags */
+ different_transforms = 0;
+ if (s->block_switch_syntax) {
+ for (ch = 1; ch <= fbw_channels; ch++) {
+ s->block_switch[ch] = get_bits1(gbc);
+ if(ch > 1 && s->block_switch[ch] != s->block_switch[1])
+ different_transforms = 1;
+ }
+ }
+
+ /* dithering flags */
+ if (s->dither_flag_syntax) {
+ for (ch = 1; ch <= fbw_channels; ch++) {
+ s->dither_flag[ch] = get_bits1(gbc);
+ }
+ }
+
+ /* dynamic range */
+ i = !(s->channel_mode);
+ do {
+ if(get_bits1(gbc)) {
+ s->dynamic_range[i] = ((dynamic_range_tab[get_bits(gbc, 8)]-1.0) *
+ s->avctx->drc_scale)+1.0;
+ } else if(blk == 0) {
+ s->dynamic_range[i] = 1.0f;
+ }
+ } while(i--);
+
+ /* spectral extension strategy */
+ if (s->eac3 && (!blk || get_bits1(gbc))) {
+ s->spx_in_use = get_bits1(gbc);
+ if (s->spx_in_use) {
+ int begf, endf;
+ int spx_end_subband;
+
+ /* determine which channels use spx */
+ if (s->channel_mode == AC3_CHMODE_MONO) {
+ s->channel_in_spx[1] = 1;
+ } else {
+ for (ch = 1; ch <= fbw_channels; ch++)
+ s->channel_in_spx[ch] = get_bits1(gbc);
+ }
+
+ s->spx_copy_start_freq = get_bits(gbc, 2) * 12 + 25;
+ begf = get_bits(gbc, 3);
+ endf = get_bits(gbc, 3);
+ s->spx_start_subband = begf < 6 ? begf+2 : 2*begf-3;
+ spx_end_subband = endf < 4 ? endf+5 : 2*endf+3;
+ if (s->spx_start_subband >= spx_end_subband) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid spectral extension range (%d >= %d)\n",
+ s->spx_start_subband, spx_end_subband);
+ return -1;
+ }
+ s->spx_start_freq = s->spx_start_subband * 12 + 25;
+ s->spx_end_freq = spx_end_subband * 12 + 25;
+ if (s->spx_copy_start_freq >= s->spx_start_freq) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid spectral extension copy start bin (%d >= %d)\n",
+ s->spx_copy_start_freq, s->spx_start_freq);
+ return -1;
+ }
+ decode_band_structure(gbc, blk, s->eac3, 0,
+ s->spx_start_subband, spx_end_subband,
+ ff_eac3_default_spx_band_struct,
+ &s->num_spx_bands, s->spx_band_sizes);
+ } else {
+ for (ch = 1; ch <= fbw_channels; ch++) {
+ s->channel_in_spx[ch] = 0;
+ s->first_spx_coords[ch] = 1;
+ }
+ }
+ }
+
+ /* spectral extension coordinates */
+ if (s->spx_in_use) {
+ for (ch = 1; ch <= fbw_channels; ch++) {
+ if (s->channel_in_spx[ch]) {
+ if (s->first_spx_coords[ch] || get_bits1(gbc)) {
+ int bin;
+ float spx_blend;
+ int master_spx_coord;
+ s->first_spx_coords[ch] = 0;
+ spx_blend = get_bits(gbc, 5) / 32.0f;
+ master_spx_coord = get_bits(gbc, 2) * 3;
+ bin = s->spx_start_freq;
+ for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
+ int bandsize;
+ int spx_coord_exp, spx_coord_mant;
+ float nratio, sblend, nblend, spx_coord;
+
+ /* calculate blending factors */
+ bandsize = s->spx_band_sizes[bnd];
+ nratio = ((float)((bin + (bandsize >> 1))) / s->spx_end_freq) - spx_blend;
+ nratio = av_clipf(nratio, 0.0f, 1.0f);
+ nblend = sqrt( nratio);
+ sblend = sqrt(1.0f - nratio);
+ nblend *= 1.73205077648f; // scale noise to give unity variance
+ bin += bandsize;
+
+ /* decode spx coordinates */
+ spx_coord_exp = get_bits(gbc, 4);
+ spx_coord_mant = get_bits(gbc, 2);
+ if (spx_coord_exp == 15)
+ spx_coord = spx_coord_mant / 4.0f;
+ else
+ spx_coord = (spx_coord_mant + 4) / 8.0f;
+ spx_coord /= (float)(1 << (spx_coord_exp + master_spx_coord));
+
+ /* multiply noise and signal blending factors by spx coordinate */
+ s->spx_noise_blend [ch][bnd] = nblend * spx_coord * 32.0f;
+ s->spx_signal_blend[ch][bnd] = sblend * spx_coord * 32.0f;
+ }
+ }
+ } else {
+ s->first_spx_coords[ch] = 1;
+ }
+ }
+ }
+
+ /* coupling strategy */
+ if (s->eac3 ? s->cpl_strategy_exists[blk] : get_bits1(gbc)) {
+ memset(bit_alloc_stages, 3, AC3_MAX_CHANNELS);
+ if (!s->eac3)
+ s->cpl_in_use[blk] = get_bits1(gbc);
+ if (s->cpl_in_use[blk]) {
+ /* coupling in use */
+ int cpl_start_subband, cpl_end_subband;
+
+ if (channel_mode < AC3_CHMODE_STEREO) {
+ av_log(s->avctx, AV_LOG_ERROR, "coupling not allowed in mono or dual-mono\n");
+ return -1;
+ }
+
+ /* check for enhanced coupling */
+ if (s->eac3 && get_bits1(gbc)) {
+ /* TODO: parse enhanced coupling strategy info */
+ av_log_missing_feature(s->avctx, "Enhanced coupling", 1);
+ return -1;
+ }
+
+ /* determine which channels are coupled */
+ if (s->eac3 && s->channel_mode == AC3_CHMODE_STEREO) {
+ s->channel_in_cpl[1] = 1;
+ s->channel_in_cpl[2] = 1;
+ } else {
+ for (ch = 1; ch <= fbw_channels; ch++)
+ s->channel_in_cpl[ch] = get_bits1(gbc);
+ }
+
+ /* phase flags in use */
+ if (channel_mode == AC3_CHMODE_STEREO)
+ s->phase_flags_in_use = get_bits1(gbc);
+
+ /* coupling frequency range */
+ cpl_start_subband = get_bits(gbc, 4);
+ cpl_end_subband = s->spx_in_use ? s->spx_start_subband - 1 :
+ get_bits(gbc, 4) + 3;
+ if (cpl_start_subband >= cpl_end_subband) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid coupling range (%d >= %d)\n",
+ cpl_start_subband, cpl_end_subband);
+ return -1;
+ }
+ s->start_freq[CPL_CH] = cpl_start_subband * 12 + 37;
+ s->end_freq[CPL_CH] = cpl_end_subband * 12 + 37;
+
+ decode_band_structure(gbc, blk, s->eac3, 0, cpl_start_subband,
+ cpl_end_subband,
+ ff_eac3_default_cpl_band_struct,
+ &s->num_cpl_bands, s->cpl_band_sizes);
+ } else {
+ /* coupling not in use */
+ for (ch = 1; ch <= fbw_channels; ch++) {
+ s->channel_in_cpl[ch] = 0;
+ s->first_cpl_coords[ch] = 1;
+ }
+ s->first_cpl_leak = s->eac3;
+ s->phase_flags_in_use = 0;
+ }
+ } else if (!s->eac3) {
+ if(!blk) {
+ av_log(s->avctx, AV_LOG_ERROR, "new coupling strategy must be present in block 0\n");
+ return -1;
+ } else {
+ s->cpl_in_use[blk] = s->cpl_in_use[blk-1];
+ }
+ }
+ cpl_in_use = s->cpl_in_use[blk];
+
+ /* coupling coordinates */
+ if (cpl_in_use) {
+ int cpl_coords_exist = 0;
+
+ for (ch = 1; ch <= fbw_channels; ch++) {
+ if (s->channel_in_cpl[ch]) {
+ if ((s->eac3 && s->first_cpl_coords[ch]) || get_bits1(gbc)) {
+ int master_cpl_coord, cpl_coord_exp, cpl_coord_mant;
+ s->first_cpl_coords[ch] = 0;
+ cpl_coords_exist = 1;
+ master_cpl_coord = 3 * get_bits(gbc, 2);
+ for (bnd = 0; bnd < s->num_cpl_bands; bnd++) {
+ cpl_coord_exp = get_bits(gbc, 4);
+ cpl_coord_mant = get_bits(gbc, 4);
+ if (cpl_coord_exp == 15)
+ s->cpl_coords[ch][bnd] = cpl_coord_mant << 22;
+ else
+ s->cpl_coords[ch][bnd] = (cpl_coord_mant + 16) << 21;
+ s->cpl_coords[ch][bnd] >>= (cpl_coord_exp + master_cpl_coord);
+ }
+ } else if (!blk) {
+ av_log(s->avctx, AV_LOG_ERROR, "new coupling coordinates must be present in block 0\n");
+ return -1;
+ }
+ } else {
+ /* channel not in coupling */
+ s->first_cpl_coords[ch] = 1;
+ }
+ }
+ /* phase flags */
+ if (channel_mode == AC3_CHMODE_STEREO && cpl_coords_exist) {
+ for (bnd = 0; bnd < s->num_cpl_bands; bnd++) {
+ s->phase_flags[bnd] = s->phase_flags_in_use? get_bits1(gbc) : 0;
+ }
+ }
+ }
+
+ /* stereo rematrixing strategy and band structure */
+ if (channel_mode == AC3_CHMODE_STEREO) {
+ if ((s->eac3 && !blk) || get_bits1(gbc)) {
+ s->num_rematrixing_bands = 4;
+ if (cpl_in_use && s->start_freq[CPL_CH] <= 61) {
+ s->num_rematrixing_bands -= 1 + (s->start_freq[CPL_CH] == 37);
+ } else if (s->spx_in_use && s->spx_start_freq <= 61) {
+ s->num_rematrixing_bands -= 1;
+ }
+ for(bnd=0; bnd<s->num_rematrixing_bands; bnd++)
+ s->rematrixing_flags[bnd] = get_bits1(gbc);
+ } else if (!blk) {
+ av_log(s->avctx, AV_LOG_WARNING, "Warning: new rematrixing strategy not present in block 0\n");
+ s->num_rematrixing_bands = 0;
+ }
+ }
+
+ /* exponent strategies for each channel */
+ for (ch = !cpl_in_use; ch <= s->channels; ch++) {
+ if (!s->eac3)
+ s->exp_strategy[blk][ch] = get_bits(gbc, 2 - (ch == s->lfe_ch));
+ if(s->exp_strategy[blk][ch] != EXP_REUSE)
+ bit_alloc_stages[ch] = 3;
+ }
+
+ /* channel bandwidth */
+ for (ch = 1; ch <= fbw_channels; ch++) {
+ s->start_freq[ch] = 0;
+ if (s->exp_strategy[blk][ch] != EXP_REUSE) {
+ int group_size;
+ int prev = s->end_freq[ch];
+ if (s->channel_in_cpl[ch])
+ s->end_freq[ch] = s->start_freq[CPL_CH];
+ else if (s->channel_in_spx[ch])
+ s->end_freq[ch] = s->spx_start_freq;
+ else {
+ int bandwidth_code = get_bits(gbc, 6);
+ if (bandwidth_code > 60) {
+ av_log(s->avctx, AV_LOG_ERROR, "bandwidth code = %d > 60\n", bandwidth_code);
+ return -1;
+ }
+ s->end_freq[ch] = bandwidth_code * 3 + 73;
+ }
+ group_size = 3 << (s->exp_strategy[blk][ch] - 1);
+ s->num_exp_groups[ch] = (s->end_freq[ch]+group_size-4) / group_size;
+ if(blk > 0 && s->end_freq[ch] != prev)
+ memset(bit_alloc_stages, 3, AC3_MAX_CHANNELS);
+ }
+ }
+ if (cpl_in_use && s->exp_strategy[blk][CPL_CH] != EXP_REUSE) {
+ s->num_exp_groups[CPL_CH] = (s->end_freq[CPL_CH] - s->start_freq[CPL_CH]) /
+ (3 << (s->exp_strategy[blk][CPL_CH] - 1));
+ }
+
+ /* decode exponents for each channel */
+ for (ch = !cpl_in_use; ch <= s->channels; ch++) {
+ if (s->exp_strategy[blk][ch] != EXP_REUSE) {
+ s->dexps[ch][0] = get_bits(gbc, 4) << !ch;
+ if (decode_exponents(gbc, s->exp_strategy[blk][ch],
+ s->num_exp_groups[ch], s->dexps[ch][0],
+ &s->dexps[ch][s->start_freq[ch]+!!ch])) {
+ av_log(s->avctx, AV_LOG_ERROR, "exponent out-of-range\n");
+ return -1;
+ }
+ if(ch != CPL_CH && ch != s->lfe_ch)
+ skip_bits(gbc, 2); /* skip gainrng */
+ }
+ }
+
+ /* bit allocation information */
+ if (s->bit_allocation_syntax) {
+ if (get_bits1(gbc)) {
+ s->bit_alloc_params.slow_decay = ff_ac3_slow_decay_tab[get_bits(gbc, 2)] >> s->bit_alloc_params.sr_shift;
+ s->bit_alloc_params.fast_decay = ff_ac3_fast_decay_tab[get_bits(gbc, 2)] >> s->bit_alloc_params.sr_shift;
+ s->bit_alloc_params.slow_gain = ff_ac3_slow_gain_tab[get_bits(gbc, 2)];
+ s->bit_alloc_params.db_per_bit = ff_ac3_db_per_bit_tab[get_bits(gbc, 2)];
+ s->bit_alloc_params.floor = ff_ac3_floor_tab[get_bits(gbc, 3)];
+ for(ch=!cpl_in_use; ch<=s->channels; ch++)
+ bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
+ } else if (!blk) {
+ av_log(s->avctx, AV_LOG_ERROR, "new bit allocation info must be present in block 0\n");
+ return -1;
+ }
+ }
+
+ /* signal-to-noise ratio offsets and fast gains (signal-to-mask ratios) */
+ if(!s->eac3 || !blk){
+ if(s->snr_offset_strategy && get_bits1(gbc)) {
+ int snr = 0;
+ int csnr;
+ csnr = (get_bits(gbc, 6) - 15) << 4;
+ for (i = ch = !cpl_in_use; ch <= s->channels; ch++) {
+ /* snr offset */
+ if (ch == i || s->snr_offset_strategy == 2)
+ snr = (csnr + get_bits(gbc, 4)) << 2;
+ /* run at least last bit allocation stage if snr offset changes */
+ if(blk && s->snr_offset[ch] != snr) {
+ bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 1);
+ }
+ s->snr_offset[ch] = snr;
+
+ /* fast gain (normal AC-3 only) */
+ if (!s->eac3) {
+ int prev = s->fast_gain[ch];
+ s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)];
+ /* run last 2 bit allocation stages if fast gain changes */
+ if(blk && prev != s->fast_gain[ch])
+ bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
+ }
+ }
+ } else if (!s->eac3 && !blk) {
+ av_log(s->avctx, AV_LOG_ERROR, "new snr offsets must be present in block 0\n");
+ return -1;
+ }
+ }
+
+ /* fast gain (E-AC-3 only) */
+ if (s->fast_gain_syntax && get_bits1(gbc)) {
+ for (ch = !cpl_in_use; ch <= s->channels; ch++) {
+ int prev = s->fast_gain[ch];
+ s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)];
+ /* run last 2 bit allocation stages if fast gain changes */
+ if(blk && prev != s->fast_gain[ch])
+ bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
+ }
+ } else if (s->eac3 && !blk) {
+ for (ch = !cpl_in_use; ch <= s->channels; ch++)
+ s->fast_gain[ch] = ff_ac3_fast_gain_tab[4];
+ }
+
+ /* E-AC-3 to AC-3 converter SNR offset */
+ if (s->frame_type == EAC3_FRAME_TYPE_INDEPENDENT && get_bits1(gbc)) {
+ skip_bits(gbc, 10); // skip converter snr offset
+ }
+
+ /* coupling leak information */
+ if (cpl_in_use) {
+ if (s->first_cpl_leak || get_bits1(gbc)) {
+ int fl = get_bits(gbc, 3);
+ int sl = get_bits(gbc, 3);
+ /* run last 2 bit allocation stages for coupling channel if
+ coupling leak changes */
+ if(blk && (fl != s->bit_alloc_params.cpl_fast_leak ||
+ sl != s->bit_alloc_params.cpl_slow_leak)) {
+ bit_alloc_stages[CPL_CH] = FFMAX(bit_alloc_stages[CPL_CH], 2);
+ }
+ s->bit_alloc_params.cpl_fast_leak = fl;
+ s->bit_alloc_params.cpl_slow_leak = sl;
+ } else if (!s->eac3 && !blk) {
+ av_log(s->avctx, AV_LOG_ERROR, "new coupling leak info must be present in block 0\n");
+ return -1;
+ }
+ s->first_cpl_leak = 0;
+ }
+
+ /* delta bit allocation information */
+ if (s->dba_syntax && get_bits1(gbc)) {
+ /* delta bit allocation exists (strategy) */
+ for (ch = !cpl_in_use; ch <= fbw_channels; ch++) {
+ s->dba_mode[ch] = get_bits(gbc, 2);
+ if (s->dba_mode[ch] == DBA_RESERVED) {
+ av_log(s->avctx, AV_LOG_ERROR, "delta bit allocation strategy reserved\n");
+ return -1;
+ }
+ bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
+ }
+ /* channel delta offset, len and bit allocation */
+ for (ch = !cpl_in_use; ch <= fbw_channels; ch++) {
+ if (s->dba_mode[ch] == DBA_NEW) {
+ s->dba_nsegs[ch] = get_bits(gbc, 3);
+ for (seg = 0; seg <= s->dba_nsegs[ch]; seg++) {
+ s->dba_offsets[ch][seg] = get_bits(gbc, 5);
+ s->dba_lengths[ch][seg] = get_bits(gbc, 4);
+ s->dba_values[ch][seg] = get_bits(gbc, 3);
+ }
+ /* run last 2 bit allocation stages if new dba values */
+ bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
+ }
+ }
+ } else if(blk == 0) {
+ for(ch=0; ch<=s->channels; ch++) {
+ s->dba_mode[ch] = DBA_NONE;
+ }
+ }
+
+ /* Bit allocation */
+ for(ch=!cpl_in_use; ch<=s->channels; ch++) {
+ if(bit_alloc_stages[ch] > 2) {
+ /* Exponent mapping into PSD and PSD integration */
+ ff_ac3_bit_alloc_calc_psd(s->dexps[ch],
+ s->start_freq[ch], s->end_freq[ch],
+ s->psd[ch], s->band_psd[ch]);
+ }
+ if(bit_alloc_stages[ch] > 1) {
+ /* Compute excitation function, Compute masking curve, and
+ Apply delta bit allocation */
+ if (ff_ac3_bit_alloc_calc_mask(&s->bit_alloc_params, s->band_psd[ch],
+ s->start_freq[ch], s->end_freq[ch],
+ s->fast_gain[ch], (ch == s->lfe_ch),
+ s->dba_mode[ch], s->dba_nsegs[ch],
+ s->dba_offsets[ch], s->dba_lengths[ch],
+ s->dba_values[ch], s->mask[ch])) {
+ av_log(s->avctx, AV_LOG_ERROR, "error in bit allocation\n");
+ return -1;
+ }
+ }
+ if(bit_alloc_stages[ch] > 0) {
+ /* Compute bit allocation */
+ const uint8_t *bap_tab = s->channel_uses_aht[ch] ?
+ ff_eac3_hebap_tab : ff_ac3_bap_tab;
+ ff_ac3_bit_alloc_calc_bap(s->mask[ch], s->psd[ch],
+ s->start_freq[ch], s->end_freq[ch],
+ s->snr_offset[ch],
+ s->bit_alloc_params.floor,
+ bap_tab, s->bap[ch]);
+ }
+ }
+
+ /* unused dummy data */
+ if (s->skip_syntax && get_bits1(gbc)) {
+ int skipl = get_bits(gbc, 9);
+ while(skipl--)
+ skip_bits(gbc, 8);
+ }
+
+ /* unpack the transform coefficients
+ this also uncouples channels if coupling is in use. */
+ decode_transform_coeffs(s, blk);
+
+ /* TODO: generate enhanced coupling coordinates and uncouple */
+
+ /* recover coefficients if rematrixing is in use */
+ if(s->channel_mode == AC3_CHMODE_STEREO)
+ do_rematrixing(s);
+
+ /* apply scaling to coefficients (headroom, dynrng) */
+ for(ch=1; ch<=s->channels; ch++) {
+ float gain = s->mul_bias / 4194304.0f;
+ if(s->channel_mode == AC3_CHMODE_DUALMONO) {
+ gain *= s->dynamic_range[2-ch];
+ } else {
+ gain *= s->dynamic_range[0];
+ }
+ s->dsp.int32_to_float_fmul_scalar(s->transform_coeffs[ch], s->fixed_coeffs[ch], gain, 256);
+ }
+
+ /* apply spectral extension to high frequency bins */
+ if (s->spx_in_use)
+ ff_eac3_apply_spectral_extension(s);
+
+ /* downmix and MDCT. order depends on whether block switching is used for
+ any channel in this block. this is because coefficients for the long
+ and short transforms cannot be mixed. */
+ downmix_output = s->channels != s->out_channels &&
+ !((s->output_mode & AC3_OUTPUT_LFEON) &&
+ s->fbw_channels == s->out_channels);
+ if(different_transforms) {
+ /* the delay samples have already been downmixed, so we upmix the delay
+ samples in order to reconstruct all channels before downmixing. */
+ if(s->downmixed) {
+ s->downmixed = 0;
+ ac3_upmix_delay(s);
+ }
+
+ do_imdct(s, s->channels);
+
+ if(downmix_output) {
+ s->dsp.ac3_downmix(s->output, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256);
+ }
+ } else {
+ if(downmix_output) {
+ s->dsp.ac3_downmix(s->transform_coeffs+1, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256);
+ }
+
+ if(downmix_output && !s->downmixed) {
+ s->downmixed = 1;
+ s->dsp.ac3_downmix(s->delay, s->downmix_coeffs, s->out_channels, s->fbw_channels, 128);
+ }
+
+ do_imdct(s, s->out_channels);
+ }
+
+ return 0;
+}
+
+/**
+ * Decode a single AC-3 frame.
+ */
+static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ AC3DecodeContext *s = avctx->priv_data;
+ int16_t *out_samples = (int16_t *)data;
+ int blk, ch, err;
+ const uint8_t *channel_map;
+ const float *output[AC3_MAX_CHANNELS];
+
+ /* initialize the GetBitContext with the start of valid AC-3 Frame */
+ if (s->input_buffer) {
+ /* copy input buffer to decoder context to avoid reading past the end
+ of the buffer, which can be caused by a damaged input stream. */
+ memcpy(s->input_buffer, buf, FFMIN(buf_size, AC3_FRAME_BUFFER_SIZE));
+ init_get_bits(&s->gbc, s->input_buffer, buf_size * 8);
+ } else {
+ init_get_bits(&s->gbc, buf, buf_size * 8);
+ }
+
+ /* parse the syncinfo */
+ *data_size = 0;
+ err = parse_frame_header(s);
+
+ /* check that reported frame size fits in input buffer */
+ if(!err && s->frame_size > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
+ err = AAC_AC3_PARSE_ERROR_FRAME_SIZE;
+ }
+
+ /* check for crc mismatch */
+ if(err != AAC_AC3_PARSE_ERROR_FRAME_SIZE && avctx->error_recognition >= FF_ER_CAREFUL) {
+ if(av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, &buf[2], s->frame_size-2)) {
+ av_log(avctx, AV_LOG_ERROR, "frame CRC mismatch\n");
+ err = AAC_AC3_PARSE_ERROR_CRC;
+ }
+ }
+
+ if(err && err != AAC_AC3_PARSE_ERROR_CRC) {
+ switch(err) {
+ case AAC_AC3_PARSE_ERROR_SYNC:
+ av_log(avctx, AV_LOG_ERROR, "frame sync error\n");
+ return -1;
+ case AAC_AC3_PARSE_ERROR_BSID:
+ av_log(avctx, AV_LOG_ERROR, "invalid bitstream id\n");
+ break;
+ case AAC_AC3_PARSE_ERROR_SAMPLE_RATE:
+ av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n");
+ break;
+ case AAC_AC3_PARSE_ERROR_FRAME_SIZE:
+ av_log(avctx, AV_LOG_ERROR, "invalid frame size\n");
+ break;
+ case AAC_AC3_PARSE_ERROR_FRAME_TYPE:
+ /* skip frame if CRC is ok. otherwise use error concealment. */
+ /* TODO: add support for substreams and dependent frames */
+ if(s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) {
+ av_log(avctx, AV_LOG_ERROR, "unsupported frame type : skipping frame\n");
+ return s->frame_size;
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "invalid frame type\n");
+ }
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "invalid header\n");
+ break;
+ }
+ }
+
+ /* if frame is ok, set audio parameters */
+ if (!err) {
+ avctx->sample_rate = s->sample_rate;
+ avctx->bit_rate = s->bit_rate;
+
+ /* channel config */
+ s->out_channels = s->channels;
+ s->output_mode = s->channel_mode;
+ if(s->lfe_on)
+ s->output_mode |= AC3_OUTPUT_LFEON;
+ if (avctx->request_channels > 0 && avctx->request_channels <= 2 &&
+ avctx->request_channels < s->channels) {
+ s->out_channels = avctx->request_channels;
+ s->output_mode = avctx->request_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO;
+ s->channel_layout = ff_ac3_channel_layout_tab[s->output_mode];
+ }
+ avctx->channels = s->out_channels;
+ avctx->channel_layout = s->channel_layout;
+
+ /* set downmixing coefficients if needed */
+ if(s->channels != s->out_channels && !((s->output_mode & AC3_OUTPUT_LFEON) &&
+ s->fbw_channels == s->out_channels)) {
+ set_downmix_coeffs(s);
+ }
+ } else if (!s->out_channels) {
+ s->out_channels = avctx->channels;
+ if(s->out_channels < s->channels)
+ s->output_mode = s->out_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO;
+ }
+
+ /* decode the audio blocks */
+ channel_map = ff_ac3_dec_channel_map[s->output_mode & ~AC3_OUTPUT_LFEON][s->lfe_on];
+ for (ch = 0; ch < s->out_channels; ch++)
+ output[ch] = s->output[channel_map[ch]];
+ for (blk = 0; blk < s->num_blocks; blk++) {
+ if (!err && decode_audio_block(s, blk)) {
+ av_log(avctx, AV_LOG_ERROR, "error decoding the audio block\n");
+ err = 1;
+ }
+ s->dsp.float_to_int16_interleave(out_samples, output, 256, s->out_channels);
+ out_samples += 256 * s->out_channels;
+ }
+ *data_size = s->num_blocks * 256 * avctx->channels * sizeof (int16_t);
+ return s->frame_size;
+}
+
+/**
+ * Uninitialize the AC-3 decoder.
+ */
+static av_cold int ac3_decode_end(AVCodecContext *avctx)
+{
+ AC3DecodeContext *s = avctx->priv_data;
+ ff_mdct_end(&s->imdct_512);
+ ff_mdct_end(&s->imdct_256);
+
+ av_freep(&s->input_buffer);
+
+ return 0;
+}
+
+AVCodec ac3_decoder = {
+ /*.name = */"ac3",
+ /*.type = */CODEC_TYPE_AUDIO,
+ /*.id = */CODEC_ID_AC3,
+ /*.priv_data_size = */sizeof (AC3DecodeContext),
+ /*.init = */ac3_decode_init,
+ /*.encode = */NULL,
+ /*.close = */ac3_decode_end,
+ /*.decode = */ac3_decode_frame,
+ /*.capabilities = */0,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
+};
+
+#if CONFIG_EAC3_DECODER
+AVCodec eac3_decoder = {
+ /*.name = */"eac3",
+ /*.type = */CODEC_TYPE_AUDIO,
+ /*.id = */CODEC_ID_EAC3,
+ /*.priv_data_size = */sizeof (AC3DecodeContext),
+ /*.init = */ac3_decode_init,
+ /*.encode = */NULL,
+ /*.close = */ac3_decode_end,
+ /*.decode = */ac3_decode_frame,
+ /*.capabilities = */0,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
+};
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec.h
new file mode 100644
index 000000000..e982d4aca
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec.h
@@ -0,0 +1,206 @@
+/*
+ * Common code between the AC-3 and E-AC-3 decoders
+ * Copyright (c) 2007 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/ac3.h
+ * Common code between the AC-3 and E-AC-3 decoders.
+ */
+
+#ifndef AVCODEC_AC3DEC_H
+#define AVCODEC_AC3DEC_H
+
+#include "libavutil/lfg.h"
+#include "ac3.h"
+#include "get_bits.h"
+#include "dsputil.h"
+
+/* override ac3.h to include coupling channel */
+#undef AC3_MAX_CHANNELS
+#define AC3_MAX_CHANNELS 7
+#define CPL_CH 0
+
+#define AC3_OUTPUT_LFEON 8
+
+#define AC3_MAX_COEFS 256
+#define AC3_BLOCK_SIZE 256
+#define MAX_BLOCKS 6
+#define SPX_MAX_BANDS 17
+
+typedef struct {
+ AVCodecContext *avctx; ///< parent context
+ GetBitContext gbc; ///< bitstream reader
+ uint8_t *input_buffer; ///< temp buffer to prevent overread
+
+///@defgroup bsi bit stream information
+///@{
+ int frame_type; ///< frame type (strmtyp)
+ int substreamid; ///< substream identification
+ int frame_size; ///< current frame size, in bytes
+ int bit_rate; ///< stream bit rate, in bits-per-second
+ int sample_rate; ///< sample frequency, in Hz
+ int num_blocks; ///< number of audio blocks
+ int channel_mode; ///< channel mode (acmod)
+ int channel_layout; ///< channel layout
+ int lfe_on; ///< lfe channel in use
+ int channel_map; ///< custom channel map
+ int center_mix_level; ///< Center mix level index
+ int surround_mix_level; ///< Surround mix level index
+ int eac3; ///< indicates if current frame is E-AC-3
+///@}
+
+///@defgroup audfrm frame syntax parameters
+ int snr_offset_strategy; ///< SNR offset strategy (snroffststr)
+ int block_switch_syntax; ///< block switch syntax enabled (blkswe)
+ int dither_flag_syntax; ///< dither flag syntax enabled (dithflage)
+ int bit_allocation_syntax; ///< bit allocation model syntax enabled (bamode)
+ int fast_gain_syntax; ///< fast gain codes enabled (frmfgaincode)
+ int dba_syntax; ///< delta bit allocation syntax enabled (dbaflde)
+ int skip_syntax; ///< skip field syntax enabled (skipflde)
+ ///@}
+
+///@defgroup cpl standard coupling
+ int cpl_in_use[MAX_BLOCKS]; ///< coupling in use (cplinu)
+ int cpl_strategy_exists[MAX_BLOCKS]; ///< coupling strategy exists (cplstre)
+ int channel_in_cpl[AC3_MAX_CHANNELS]; ///< channel in coupling (chincpl)
+ int phase_flags_in_use; ///< phase flags in use (phsflginu)
+ int phase_flags[18]; ///< phase flags (phsflg)
+ int num_cpl_bands; ///< number of coupling bands (ncplbnd)
+ uint8_t cpl_band_sizes[18]; ///< number of coeffs in each coupling band
+ int firstchincpl; ///< first channel in coupling
+ int first_cpl_coords[AC3_MAX_CHANNELS]; ///< first coupling coordinates states (firstcplcos)
+ int cpl_coords[AC3_MAX_CHANNELS][18]; ///< coupling coordinates (cplco)
+///@}
+
+///@defgroup spx spectral extension
+///@{
+ int spx_in_use; ///< spectral extension in use (spxinu)
+ uint8_t channel_in_spx[AC3_MAX_CHANNELS]; ///< channel in spectral extension (chinspx)
+ int8_t spx_atten_code[AC3_MAX_CHANNELS]; ///< spx attenuation code (spxattencod)
+ int spx_start_subband; ///< spx beginning frequency band (spxbegf)
+ int spx_start_freq; ///< spx start frequency bin
+ int spx_end_freq; ///< spx end frequency bin
+ int spx_copy_start_freq; ///< spx starting frequency for copying (copystartmant)
+ int num_spx_bands; ///< number of spx bands (nspxbnds)
+ uint8_t spx_band_struct[SPX_MAX_BANDS]; ///< spectral extension band structure (spxbndstrc)
+ uint8_t spx_band_sizes[SPX_MAX_BANDS]; ///< number of bins in each band (spxbndsztab)
+ uint8_t first_spx_coords[AC3_MAX_CHANNELS]; ///< first spx coordinates states (firstspxcos)
+ float spx_noise_blend[AC3_MAX_CHANNELS][SPX_MAX_BANDS]; ///< spx noise blending factor (nblendfact)
+ float spx_signal_blend[AC3_MAX_CHANNELS][SPX_MAX_BANDS];///< spx signal blending factor (sblendfact)
+///@}
+
+///@defgroup aht adaptive hybrid transform
+ int channel_uses_aht[AC3_MAX_CHANNELS]; ///< channel AHT in use (chahtinu)
+ int pre_mantissa[AC3_MAX_CHANNELS][AC3_MAX_COEFS][MAX_BLOCKS]; ///< pre-IDCT mantissas
+///@}
+
+///@defgroup channel channel
+ int fbw_channels; ///< number of full-bandwidth channels
+ int channels; ///< number of total channels
+ int lfe_ch; ///< index of LFE channel
+ float downmix_coeffs[AC3_MAX_CHANNELS][2]; ///< stereo downmix coefficients
+ int downmixed; ///< indicates if coeffs are currently downmixed
+ int output_mode; ///< output channel configuration
+ int out_channels; ///< number of output channels
+///@}
+
+///@defgroup dynrng dynamic range
+ float dynamic_range[2]; ///< dynamic range
+///@}
+
+///@defgroup bandwidth bandwidth
+ int start_freq[AC3_MAX_CHANNELS]; ///< start frequency bin (strtmant)
+ int end_freq[AC3_MAX_CHANNELS]; ///< end frequency bin (endmant)
+///@}
+
+///@defgroup rematrixing rematrixing
+ int num_rematrixing_bands; ///< number of rematrixing bands (nrematbnd)
+ int rematrixing_flags[4]; ///< rematrixing flags (rematflg)
+///@}
+
+///@defgroup exponents exponents
+ int num_exp_groups[AC3_MAX_CHANNELS]; ///< Number of exponent groups (nexpgrp)
+ int8_t dexps[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< decoded exponents
+ int exp_strategy[MAX_BLOCKS][AC3_MAX_CHANNELS]; ///< exponent strategies (expstr)
+///@}
+
+///@defgroup bitalloc bit allocation
+ AC3BitAllocParameters bit_alloc_params; ///< bit allocation parameters
+ int first_cpl_leak; ///< first coupling leak state (firstcplleak)
+ int snr_offset[AC3_MAX_CHANNELS]; ///< signal-to-noise ratio offsets (snroffst)
+ int fast_gain[AC3_MAX_CHANNELS]; ///< fast gain values/SMR's (fgain)
+ uint8_t bap[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< bit allocation pointers
+ int16_t psd[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< scaled exponents
+ int16_t band_psd[AC3_MAX_CHANNELS][50]; ///< interpolated exponents
+ int16_t mask[AC3_MAX_CHANNELS][50]; ///< masking curve values
+ int dba_mode[AC3_MAX_CHANNELS]; ///< delta bit allocation mode
+ int dba_nsegs[AC3_MAX_CHANNELS]; ///< number of delta segments
+ uint8_t dba_offsets[AC3_MAX_CHANNELS][8]; ///< delta segment offsets
+ uint8_t dba_lengths[AC3_MAX_CHANNELS][8]; ///< delta segment lengths
+ uint8_t dba_values[AC3_MAX_CHANNELS][8]; ///< delta values for each segment
+///@}
+
+///@defgroup dithering zero-mantissa dithering
+ int dither_flag[AC3_MAX_CHANNELS]; ///< dither flags (dithflg)
+ AVLFG dith_state; ///< for dither generation
+///@}
+
+///@defgroup imdct IMDCT
+ int block_switch[AC3_MAX_CHANNELS]; ///< block switch flags (blksw)
+ FFTContext imdct_512; ///< for 512 sample IMDCT
+ FFTContext imdct_256; ///< for 256 sample IMDCT
+///@}
+
+///@defgroup opt optimization
+ DSPContext dsp; ///< for optimization
+ float add_bias; ///< offset for float_to_int16 conversion
+ float mul_bias; ///< scaling for float_to_int16 conversion
+///@}
+
+///@defgroup arrays aligned arrays
+ DECLARE_ALIGNED_16(int, fixed_coeffs)[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///> fixed-point transform coefficients
+ DECLARE_ALIGNED_16(float, transform_coeffs)[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< transform coefficients
+ DECLARE_ALIGNED_16(float, delay)[AC3_MAX_CHANNELS][AC3_BLOCK_SIZE]; ///< delay - added to the next block
+ DECLARE_ALIGNED_16(float, window)[AC3_BLOCK_SIZE]; ///< window coefficients
+ DECLARE_ALIGNED_16(float, tmp_output)[AC3_BLOCK_SIZE]; ///< temporary storage for output before windowing
+ DECLARE_ALIGNED_16(float, output)[AC3_MAX_CHANNELS][AC3_BLOCK_SIZE]; ///< output after imdct transform and windowing
+///@}
+} AC3DecodeContext;
+
+/**
+ * Parse the E-AC-3 frame header.
+ * This parses both the bit stream info and audio frame header.
+ */
+int ff_eac3_parse_header(AC3DecodeContext *s);
+
+/**
+ * Decode mantissas in a single channel for the entire frame.
+ * This is used when AHT mode is enabled.
+ */
+void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch);
+
+/**
+ * Apply spectral extension to each channel by copying lower frequency
+ * coefficients to higher frequency bins and applying side information to
+ * approximate the original high frequency signal.
+ */
+void ff_eac3_apply_spectral_extension(AC3DecodeContext *s);
+
+#endif /* AVCODEC_AC3DEC_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec_data.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec_data.c
new file mode 100644
index 000000000..57bed8de6
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec_data.c
@@ -0,0 +1,111 @@
+/*
+ * AC-3 and E-AC-3 decoder tables
+ * Copyright (c) 2007 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/ac3dec_data.c
+ * Tables taken directly from the AC-3 spec.
+ */
+
+#include "ac3dec_data.h"
+#include "ac3.h"
+
+/**
+ * Table used to ungroup 3 values stored in 5 bits.
+ * Used by bap=1 mantissas and GAQ.
+ * ff_ac3_ungroup_3_in_5_bits_tab[i] = { i/9, (i%9)/3, (i%9)%3 }
+ */
+const uint8_t ff_ac3_ungroup_3_in_5_bits_tab[32][3] = {
+ { 0, 0, 0 }, { 0, 0, 1 }, { 0, 0, 2 }, { 0, 1, 0 },
+ { 0, 1, 1 }, { 0, 1, 2 }, { 0, 2, 0 }, { 0, 2, 1 },
+ { 0, 2, 2 }, { 1, 0, 0 }, { 1, 0, 1 }, { 1, 0, 2 },
+ { 1, 1, 0 }, { 1, 1, 1 }, { 1, 1, 2 }, { 1, 2, 0 },
+ { 1, 2, 1 }, { 1, 2, 2 }, { 2, 0, 0 }, { 2, 0, 1 },
+ { 2, 0, 2 }, { 2, 1, 0 }, { 2, 1, 1 }, { 2, 1, 2 },
+ { 2, 2, 0 }, { 2, 2, 1 }, { 2, 2, 2 }, { 3, 0, 0 },
+ { 3, 0, 1 }, { 3, 0, 2 }, { 3, 1, 0 }, { 3, 1, 1 }
+};
+
+/**
+ * Table of bin locations for rematrixing bands
+ * reference: Section 7.5.2 Rematrixing : Frequency Band Definitions
+ */
+const uint8_t ff_ac3_rematrix_band_tab[5] = { 13, 25, 37, 61, 253 };
+
+const uint8_t ff_eac3_hebap_tab[64] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
+ 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
+ 11, 11, 11, 12, 12, 12, 12, 13, 13, 13,
+ 13, 14, 14, 14, 14, 15, 15, 15, 15, 16,
+ 16, 16, 16, 17, 17, 17, 17, 18, 18, 18,
+ 18, 18, 18, 18, 18, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19,
+};
+
+/**
+ * Table E2.16 Default Coupling Banding Structure
+ */
+const uint8_t ff_eac3_default_cpl_band_struct[18] =
+{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1 };
+
+/**
+ * Table E2.15 Default Spectral Extension Banding Structure
+ */
+const uint8_t ff_eac3_default_spx_band_struct[17] =
+{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 };
+
+/**
+ * Table E.25: Spectral Extension Attenuation Table
+ * ff_eac3_spx_atten_tab[code][bin]=pow(2.0,(bin+1)*(code+1)/-15.0);
+ */
+const float ff_eac3_spx_atten_tab[32][3] = {
+ { 0.954841603910416503f, 0.911722488558216804f, 0.870550563296124125f },
+ { 0.911722488558216804f, 0.831237896142787758f, 0.757858283255198995f },
+ { 0.870550563296124125f, 0.757858283255198995f, 0.659753955386447100f },
+ { 0.831237896142787758f, 0.690956439983888004f, 0.574349177498517438f },
+ { 0.793700525984099792f, 0.629960524947436595f, 0.500000000000000000f },
+ { 0.757858283255198995f, 0.574349177498517438f, 0.435275281648062062f },
+ { 0.723634618720189082f, 0.523647061410313364f, 0.378929141627599553f },
+ { 0.690956439983888004f, 0.477420801955208307f, 0.329876977693223550f },
+ { 0.659753955386447100f, 0.435275281648062062f, 0.287174588749258719f },
+ { 0.629960524947436595f, 0.396850262992049896f, 0.250000000000000000f },
+ { 0.601512518041058319f, 0.361817309360094541f, 0.217637640824031003f },
+ { 0.574349177498517438f, 0.329876977693223550f, 0.189464570813799776f },
+ { 0.548412489847312945f, 0.300756259020529160f, 0.164938488846611775f },
+ { 0.523647061410313364f, 0.274206244923656473f, 0.143587294374629387f },
+ { 0.500000000000000000f, 0.250000000000000000f, 0.125000000000000000f },
+ { 0.477420801955208307f, 0.227930622139554201f, 0.108818820412015502f },
+ { 0.455861244279108402f, 0.207809474035696939f, 0.094732285406899888f },
+ { 0.435275281648062062f, 0.189464570813799776f, 0.082469244423305887f },
+ { 0.415618948071393879f, 0.172739109995972029f, 0.071793647187314694f },
+ { 0.396850262992049896f, 0.157490131236859149f, 0.062500000000000000f },
+ { 0.378929141627599553f, 0.143587294374629387f, 0.054409410206007751f },
+ { 0.361817309360094541f, 0.130911765352578369f, 0.047366142703449930f },
+ { 0.345478219991944002f, 0.119355200488802049f, 0.041234622211652958f },
+ { 0.329876977693223550f, 0.108818820412015502f, 0.035896823593657347f },
+ { 0.314980262473718298f, 0.099212565748012460f, 0.031250000000000000f },
+ { 0.300756259020529160f, 0.090454327340023621f, 0.027204705103003875f },
+ { 0.287174588749258719f, 0.082469244423305887f, 0.023683071351724965f },
+ { 0.274206244923656473f, 0.075189064755132290f, 0.020617311105826479f },
+ { 0.261823530705156682f, 0.068551561230914118f, 0.017948411796828673f },
+ { 0.250000000000000000f, 0.062500000000000000f, 0.015625000000000000f },
+ { 0.238710400977604098f, 0.056982655534888536f, 0.013602352551501938f },
+ { 0.227930622139554201f, 0.051952368508924235f, 0.011841535675862483f }
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec_data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec_data.h
new file mode 100644
index 000000000..877ea0bde
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3dec_data.h
@@ -0,0 +1,35 @@
+/*
+ * AC-3 and E-AC-3 decoder tables
+ * Copyright (c) 2007 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AC3DEC_DATA_H
+#define AVCODEC_AC3DEC_DATA_H
+
+#include <stdint.h>
+
+extern const uint8_t ff_ac3_ungroup_3_in_5_bits_tab[32][3];
+extern const uint8_t ff_ac3_rematrix_band_tab[5];
+
+extern const uint8_t ff_eac3_hebap_tab[64];
+extern const uint8_t ff_eac3_default_cpl_band_struct[18];
+extern const uint8_t ff_eac3_default_spx_band_struct[17];
+extern const float ff_eac3_spx_atten_tab[32][3];
+
+#endif /* AVCODEC_AC3DEC_DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3tab.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3tab.c
new file mode 100644
index 000000000..0ee330589
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3tab.c
@@ -0,0 +1,305 @@
+/*
+ * AC-3 tables
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/ac3tab.c
+ * tables taken directly from the AC-3 spec.
+ */
+
+#include "avcodec.h"
+#include "ac3tab.h"
+
+/**
+ * Possible frame sizes.
+ * from ATSC A/52 Table 5.18 Frame Size Code Table.
+ */
+const uint16_t ff_ac3_frame_size_tab[38][3] = {
+ { 64, 69, 96 },
+ { 64, 70, 96 },
+ { 80, 87, 120 },
+ { 80, 88, 120 },
+ { 96, 104, 144 },
+ { 96, 105, 144 },
+ { 112, 121, 168 },
+ { 112, 122, 168 },
+ { 128, 139, 192 },
+ { 128, 140, 192 },
+ { 160, 174, 240 },
+ { 160, 175, 240 },
+ { 192, 208, 288 },
+ { 192, 209, 288 },
+ { 224, 243, 336 },
+ { 224, 244, 336 },
+ { 256, 278, 384 },
+ { 256, 279, 384 },
+ { 320, 348, 480 },
+ { 320, 349, 480 },
+ { 384, 417, 576 },
+ { 384, 418, 576 },
+ { 448, 487, 672 },
+ { 448, 488, 672 },
+ { 512, 557, 768 },
+ { 512, 558, 768 },
+ { 640, 696, 960 },
+ { 640, 697, 960 },
+ { 768, 835, 1152 },
+ { 768, 836, 1152 },
+ { 896, 975, 1344 },
+ { 896, 976, 1344 },
+ { 1024, 1114, 1536 },
+ { 1024, 1115, 1536 },
+ { 1152, 1253, 1728 },
+ { 1152, 1254, 1728 },
+ { 1280, 1393, 1920 },
+ { 1280, 1394, 1920 },
+};
+
+/**
+ * Maps audio coding mode (acmod) to number of full-bandwidth channels.
+ * from ATSC A/52 Table 5.8 Audio Coding Mode
+ */
+const uint8_t ff_ac3_channels_tab[8] = {
+ 2, 1, 2, 3, 3, 4, 4, 5
+};
+
+/**
+ * Maps audio coding mode (acmod) to channel layout mask.
+ */
+const uint16_t ff_ac3_channel_layout_tab[8] = {
+ CH_LAYOUT_STEREO,
+ CH_LAYOUT_MONO,
+ CH_LAYOUT_STEREO,
+ CH_LAYOUT_SURROUND,
+ CH_LAYOUT_2_1,
+ CH_LAYOUT_4POINT0,
+ CH_LAYOUT_2_2,
+ CH_LAYOUT_5POINT0
+};
+
+#define COMMON_CHANNEL_MAP \
+ { { 0, 1, }, { 0, 1, 2, } },\
+ { { 0, }, { 0, 1, } },\
+ { { 0, 1, }, { 0, 1, 2, } },\
+ { { 0, 2, 1, }, { 0, 2, 1, 3, } },\
+ { { 0, 1, 2, }, { 0, 1, 3, 2, } },\
+ { { 0, 2, 1, 3, }, { 0, 2, 1, 4, 3, } },
+
+/**
+ * Table to remap channels from SMPTE order to AC-3 order.
+ * [channel_mode][lfe][ch]
+ */
+const uint8_t ff_ac3_enc_channel_map[8][2][6] = {
+ COMMON_CHANNEL_MAP
+ { { 0, 1, 2, 3, }, { 0, 1, 3, 4, 2, } },
+ { { 0, 2, 1, 3, 4, }, { 0, 2, 1, 4, 5, 3 } },
+};
+
+/**
+ * Table to remap channels from from AC-3 order to SMPTE order.
+ * [channel_mode][lfe][ch]
+ */
+const uint8_t ff_ac3_dec_channel_map[8][2][6] = {
+ COMMON_CHANNEL_MAP
+ { { 0, 1, 2, 3, }, { 0, 1, 4, 2, 3, } },
+ { { 0, 2, 1, 3, 4, }, { 0, 2, 1, 5, 3, 4 } },
+};
+
+/* possible frequencies */
+const uint16_t ff_ac3_sample_rate_tab[3] = { 48000, 44100, 32000 };
+
+/* possible bitrates */
+const uint16_t ff_ac3_bitrate_tab[19] = {
+ 32, 40, 48, 56, 64, 80, 96, 112, 128,
+ 160, 192, 224, 256, 320, 384, 448, 512, 576, 640
+};
+
+/* AC-3 MDCT window */
+
+/* MDCT window */
+const int16_t ff_ac3_window[256] = {
+ 4, 7, 12, 16, 21, 28, 34, 42,
+ 51, 61, 72, 84, 97, 111, 127, 145,
+ 164, 184, 207, 231, 257, 285, 315, 347,
+ 382, 419, 458, 500, 544, 591, 641, 694,
+ 750, 810, 872, 937, 1007, 1079, 1155, 1235,
+ 1318, 1406, 1497, 1593, 1692, 1796, 1903, 2016,
+ 2132, 2253, 2379, 2509, 2644, 2783, 2927, 3076,
+ 3230, 3389, 3552, 3721, 3894, 4072, 4255, 4444,
+ 4637, 4835, 5038, 5246, 5459, 5677, 5899, 6127,
+ 6359, 6596, 6837, 7083, 7334, 7589, 7848, 8112,
+ 8380, 8652, 8927, 9207, 9491, 9778,10069,10363,
+10660,10960,11264,11570,11879,12190,12504,12820,
+13138,13458,13780,14103,14427,14753,15079,15407,
+15735,16063,16392,16720,17049,17377,17705,18032,
+18358,18683,19007,19330,19651,19970,20287,20602,
+20914,21225,21532,21837,22139,22438,22733,23025,
+23314,23599,23880,24157,24430,24699,24964,25225,
+25481,25732,25979,26221,26459,26691,26919,27142,
+27359,27572,27780,27983,28180,28373,28560,28742,
+28919,29091,29258,29420,29577,29729,29876,30018,
+30155,30288,30415,30538,30657,30771,30880,30985,
+31086,31182,31274,31363,31447,31528,31605,31678,
+31747,31814,31877,31936,31993,32046,32097,32145,
+32190,32232,32272,32310,32345,32378,32409,32438,
+32465,32490,32513,32535,32556,32574,32592,32608,
+32623,32636,32649,32661,32671,32681,32690,32698,
+32705,32712,32718,32724,32729,32733,32737,32741,
+32744,32747,32750,32752,32754,32756,32757,32759,
+32760,32761,32762,32763,32764,32764,32765,32765,
+32766,32766,32766,32766,32767,32767,32767,32767,
+32767,32767,32767,32767,32767,32767,32767,32767,
+32767,32767,32767,32767,32767,32767,32767,32767,
+};
+
+const uint8_t ff_ac3_log_add_tab[260]= {
+0x40,0x3f,0x3e,0x3d,0x3c,0x3b,0x3a,0x39,0x38,0x37,
+0x36,0x35,0x34,0x34,0x33,0x32,0x31,0x30,0x2f,0x2f,
+0x2e,0x2d,0x2c,0x2c,0x2b,0x2a,0x29,0x29,0x28,0x27,
+0x26,0x26,0x25,0x24,0x24,0x23,0x23,0x22,0x21,0x21,
+0x20,0x20,0x1f,0x1e,0x1e,0x1d,0x1d,0x1c,0x1c,0x1b,
+0x1b,0x1a,0x1a,0x19,0x19,0x18,0x18,0x17,0x17,0x16,
+0x16,0x15,0x15,0x15,0x14,0x14,0x13,0x13,0x13,0x12,
+0x12,0x12,0x11,0x11,0x11,0x10,0x10,0x10,0x0f,0x0f,
+0x0f,0x0e,0x0e,0x0e,0x0d,0x0d,0x0d,0x0d,0x0c,0x0c,
+0x0c,0x0c,0x0b,0x0b,0x0b,0x0b,0x0a,0x0a,0x0a,0x0a,
+0x0a,0x09,0x09,0x09,0x09,0x09,0x08,0x08,0x08,0x08,
+0x08,0x08,0x07,0x07,0x07,0x07,0x07,0x07,0x06,0x06,
+0x06,0x06,0x06,0x06,0x06,0x06,0x05,0x05,0x05,0x05,
+0x05,0x05,0x05,0x05,0x04,0x04,0x04,0x04,0x04,0x04,
+0x04,0x04,0x04,0x04,0x04,0x03,0x03,0x03,0x03,0x03,
+0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x02,
+0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,
+0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x01,0x01,
+0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,
+0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,
+0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+};
+
+const uint16_t ff_ac3_hearing_threshold_tab[50][3]= {
+{ 0x04d0,0x04f0,0x0580 },
+{ 0x04d0,0x04f0,0x0580 },
+{ 0x0440,0x0460,0x04b0 },
+{ 0x0400,0x0410,0x0450 },
+{ 0x03e0,0x03e0,0x0420 },
+{ 0x03c0,0x03d0,0x03f0 },
+{ 0x03b0,0x03c0,0x03e0 },
+{ 0x03b0,0x03b0,0x03d0 },
+{ 0x03a0,0x03b0,0x03c0 },
+{ 0x03a0,0x03a0,0x03b0 },
+{ 0x03a0,0x03a0,0x03b0 },
+{ 0x03a0,0x03a0,0x03b0 },
+{ 0x03a0,0x03a0,0x03a0 },
+{ 0x0390,0x03a0,0x03a0 },
+{ 0x0390,0x0390,0x03a0 },
+{ 0x0390,0x0390,0x03a0 },
+{ 0x0380,0x0390,0x03a0 },
+{ 0x0380,0x0380,0x03a0 },
+{ 0x0370,0x0380,0x03a0 },
+{ 0x0370,0x0380,0x03a0 },
+{ 0x0360,0x0370,0x0390 },
+{ 0x0360,0x0370,0x0390 },
+{ 0x0350,0x0360,0x0390 },
+{ 0x0350,0x0360,0x0390 },
+{ 0x0340,0x0350,0x0380 },
+{ 0x0340,0x0350,0x0380 },
+{ 0x0330,0x0340,0x0380 },
+{ 0x0320,0x0340,0x0370 },
+{ 0x0310,0x0320,0x0360 },
+{ 0x0300,0x0310,0x0350 },
+{ 0x02f0,0x0300,0x0340 },
+{ 0x02f0,0x02f0,0x0330 },
+{ 0x02f0,0x02f0,0x0320 },
+{ 0x02f0,0x02f0,0x0310 },
+{ 0x0300,0x02f0,0x0300 },
+{ 0x0310,0x0300,0x02f0 },
+{ 0x0340,0x0320,0x02f0 },
+{ 0x0390,0x0350,0x02f0 },
+{ 0x03e0,0x0390,0x0300 },
+{ 0x0420,0x03e0,0x0310 },
+{ 0x0460,0x0420,0x0330 },
+{ 0x0490,0x0450,0x0350 },
+{ 0x04a0,0x04a0,0x03c0 },
+{ 0x0460,0x0490,0x0410 },
+{ 0x0440,0x0460,0x0470 },
+{ 0x0440,0x0440,0x04a0 },
+{ 0x0520,0x0480,0x0460 },
+{ 0x0800,0x0630,0x0440 },
+{ 0x0840,0x0840,0x0450 },
+{ 0x0840,0x0840,0x04e0 },
+};
+
+const uint8_t ff_ac3_bap_tab[64]= {
+ 0, 1, 1, 1, 1, 1, 2, 2, 3, 3,
+ 3, 4, 4, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 7, 8, 8, 8, 8, 9, 9, 9,
+ 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,
+ 12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
+ 14, 14, 14, 14, 14, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15,
+};
+
+const uint8_t ff_ac3_slow_decay_tab[4]={
+ 0x0f, 0x11, 0x13, 0x15,
+};
+
+const uint8_t ff_ac3_fast_decay_tab[4]={
+ 0x3f, 0x53, 0x67, 0x7b,
+};
+
+const uint16_t ff_ac3_slow_gain_tab[4]= {
+ 0x540, 0x4d8, 0x478, 0x410,
+};
+
+const uint16_t ff_ac3_db_per_bit_tab[4]= {
+ 0x000, 0x700, 0x900, 0xb00,
+};
+
+const int16_t ff_ac3_floor_tab[8]= {
+ 0x2f0, 0x2b0, 0x270, 0x230, 0x1f0, 0x170, 0x0f0, 0xf800,
+};
+
+const uint16_t ff_ac3_fast_gain_tab[8]= {
+ 0x080, 0x100, 0x180, 0x200, 0x280, 0x300, 0x380, 0x400,
+};
+
+const uint8_t ff_ac3_critical_band_size_tab[50]={
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3,
+ 3, 6, 6, 6, 6, 6, 6, 12, 12, 12, 12, 24, 24, 24, 24, 24
+};
+/**
+ * Default channel map for a dependent substream defined by acmod
+ */
+const uint16_t ff_eac3_default_chmap[8] = {
+ AC3_CHMAP_L | AC3_CHMAP_R, // FIXME Ch1+Ch2
+ AC3_CHMAP_C,
+ AC3_CHMAP_L | AC3_CHMAP_R,
+ AC3_CHMAP_L | AC3_CHMAP_C | AC3_CHMAP_R,
+ AC3_CHMAP_L | AC3_CHMAP_R | AC3_CHMAP_C_SUR,
+ AC3_CHMAP_L | AC3_CHMAP_C | AC3_CHMAP_R | AC3_CHMAP_C_SUR,
+ AC3_CHMAP_L | AC3_CHMAP_R | AC3_CHMAP_L_SUR | AC3_CHMAP_R_SUR,
+ AC3_CHMAP_L | AC3_CHMAP_C | AC3_CHMAP_R | AC3_CHMAP_L_SUR | AC3_CHMAP_R_SUR
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3tab.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3tab.h
new file mode 100644
index 000000000..d3880d0a2
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ac3tab.h
@@ -0,0 +1,62 @@
+/*
+ * AC-3 tables
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AC3TAB_H
+#define AVCODEC_AC3TAB_H
+
+#include "libavutil/common.h"
+
+extern const uint16_t ff_ac3_frame_size_tab[38][3];
+extern const uint8_t ff_ac3_channels_tab[8];
+extern const uint16_t ff_ac3_channel_layout_tab[8];
+extern const uint8_t ff_ac3_enc_channel_map[8][2][6];
+extern const uint8_t ff_ac3_dec_channel_map[8][2][6];
+extern const uint16_t ff_ac3_sample_rate_tab[3];
+extern const uint16_t ff_ac3_bitrate_tab[19];
+extern const int16_t ff_ac3_window[256];
+extern const uint8_t ff_ac3_log_add_tab[260];
+extern const uint16_t ff_ac3_hearing_threshold_tab[50][3];
+extern const uint8_t ff_ac3_bap_tab[64];
+extern const uint8_t ff_ac3_slow_decay_tab[4];
+extern const uint8_t ff_ac3_fast_decay_tab[4];
+extern const uint16_t ff_ac3_slow_gain_tab[4];
+extern const uint16_t ff_ac3_db_per_bit_tab[4];
+extern const int16_t ff_ac3_floor_tab[8];
+extern const uint16_t ff_ac3_fast_gain_tab[8];
+extern const uint8_t ff_ac3_critical_band_size_tab[50];
+extern const uint16_t ff_eac3_default_chmap[8];
+
+/** Custom channel map locations bitmask
+ * Other channels described in documentation:
+ * Lc/Rc pair, Lrs/Rrs pair, Ts, Lsd/Rsd pair,
+ * Lw/Rw pair, Lvh/Rvh pair, Cvh, Reserved, LFE2
+ */
+enum CustomChannelMapLocation{
+ AC3_CHMAP_L= 1<<(15-0),
+ AC3_CHMAP_C= 1<<(15-1),
+ AC3_CHMAP_R= 1<<(15-2),
+ AC3_CHMAP_L_SUR= 1<<(15-3),
+ AC3_CHMAP_R_SUR = 1<<(15-4),
+ AC3_CHMAP_C_SUR= 1<<(15-7),
+ AC3_CHMAP_LFE = 1<<(15-15)
+};
+
+#endif /* AVCODEC_AC3TAB_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/adpcm.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/adpcm.c
new file mode 100644
index 000000000..365da9cbb
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/adpcm.c
@@ -0,0 +1,881 @@
+/*
+ * ADPCM codecs
+ * Copyright (c) 2001-2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avcodec.h"
+#include "get_bits.h"
+#include "put_bits.h"
+#include "bytestream.h"
+
+/**
+ * @file libavcodec/adpcm.c
+ * ADPCM codecs.
+ * First version by Francois Revol (revol@free.fr)
+ * Fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
+ * by Mike Melanson (melanson@pcisys.net)
+ * CD-ROM XA ADPCM codec by BERO
+ * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
+ *
+ * Features and limitations:
+ *
+ * Reference documents:
+ * http://www.pcisys.net/~melanson/codecs/simpleaudio.html
+ * http://www.geocities.com/SiliconValley/8682/aud3.txt
+ * http://openquicktime.sourceforge.net/plugins.htm
+ * XAnim sources (xa_codec.c) http://www.rasnaimaging.com/people/lapus/download.html
+ * http://www.cs.ucla.edu/~leec/mediabench/applications.html
+ * SoX source code http://home.sprynet.com/~cbagwell/sox.html
+ *
+ * CD-ROM XA:
+ * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html
+ * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html
+ * readstr http://www.geocities.co.jp/Playtown/2004/
+ */
+
+#define BLKSIZE 1024
+
+/* step_table[] and index_table[] are from the ADPCM reference source */
+/* This is the index table: */
+static const int index_table[16] = {
+ -1, -1, -1, -1, 2, 4, 6, 8,
+ -1, -1, -1, -1, 2, 4, 6, 8,
+};
+
+/**
+ * This is the step table. Note that many programs use slight deviations from
+ * this table, but such deviations are negligible:
+ */
+static const int step_table[89] = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
+ 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
+ 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
+ 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
+ 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
+ 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
+ 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
+ 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
+ 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
+};
+
+/* These are for MS-ADPCM */
+/* AdaptationTable[], AdaptCoeff1[], and AdaptCoeff2[] are from libsndfile */
+static const int AdaptationTable[] = {
+ 230, 230, 230, 230, 307, 409, 512, 614,
+ 768, 614, 512, 409, 307, 230, 230, 230
+};
+
+/** Divided by 4 to fit in 8-bit integers */
+static const uint8_t AdaptCoeff1[] = {
+ 64, 128, 0, 48, 60, 115, 98
+};
+
+/** Divided by 4 to fit in 8-bit integers */
+static const int8_t AdaptCoeff2[] = {
+ 0, -64, 0, 16, 0, -52, -58
+};
+
+/* These are for CD-ROM XA ADPCM */
+static const int xa_adpcm_table[5][2] = {
+ { 0, 0 },
+ { 60, 0 },
+ { 115, -52 },
+ { 98, -55 },
+ { 122, -60 }
+};
+
+static const int ea_adpcm_table[] = {
+ 0, 240, 460, 392, 0, 0, -208, -220, 0, 1,
+ 3, 4, 7, 8, 10, 11, 0, -1, -3, -4
+};
+
+// padded to zero where table size is less then 16
+static const int swf_index_tables[4][16] = {
+ /*2*/ { -1, 2 },
+ /*3*/ { -1, -1, 2, 4 },
+ /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
+ /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
+};
+
+static const int yamaha_indexscale[] = {
+ 230, 230, 230, 230, 307, 409, 512, 614,
+ 230, 230, 230, 230, 307, 409, 512, 614
+};
+
+static const int yamaha_difflookup[] = {
+ 1, 3, 5, 7, 9, 11, 13, 15,
+ -1, -3, -5, -7, -9, -11, -13, -15
+};
+
+/* end of tables */
+
+typedef struct ADPCMChannelStatus {
+ int predictor;
+ short int step_index;
+ int step;
+ /* for encoding */
+ int prev_sample;
+
+ /* MS version */
+ short sample1;
+ short sample2;
+ int coeff1;
+ int coeff2;
+ int idelta;
+} ADPCMChannelStatus;
+
+typedef struct ADPCMContext {
+ ADPCMChannelStatus status[2];
+} ADPCMContext;
+
+static av_cold int adpcm_decode_init(AVCodecContext * avctx)
+{
+ ADPCMContext *c = avctx->priv_data;
+ unsigned int max_channels = 2;
+
+ if(avctx->channels > max_channels){
+ return -1;
+ }
+
+ switch(avctx->codec->id) {
+ case CODEC_ID_ADPCM_CT:
+ c->status[0].step = c->status[1].step = 511;
+ break;
+ case CODEC_ID_ADPCM_IMA_WS:
+ if (avctx->extradata && avctx->extradata_size == 2 * 4) {
+ c->status[0].predictor = AV_RL32(avctx->extradata);
+ c->status[1].predictor = AV_RL32(avctx->extradata + 4);
+ }
+ break;
+ default:
+ break;
+ }
+ avctx->sample_fmt = SAMPLE_FMT_S16;
+ return 0;
+}
+
+static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift)
+{
+ int step_index;
+ int predictor;
+ int sign, delta, diff, step;
+
+ step = step_table[c->step_index];
+ step_index = c->step_index + index_table[(unsigned)nibble];
+ if (step_index < 0) step_index = 0;
+ else if (step_index > 88) step_index = 88;
+
+ sign = nibble & 8;
+ delta = nibble & 7;
+ /* perform direct multiplication instead of series of jumps proposed by
+ * the reference ADPCM implementation since modern CPUs can do the mults
+ * quickly enough */
+ diff = ((2 * delta + 1) * step) >> shift;
+ predictor = c->predictor;
+ if (sign) predictor -= diff;
+ else predictor += diff;
+
+ c->predictor = av_clip_int16(predictor);
+ c->step_index = step_index;
+
+ return (short)c->predictor;
+}
+
+static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble)
+{
+ int predictor;
+
+ predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
+ predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
+
+ c->sample2 = c->sample1;
+ c->sample1 = av_clip_int16(predictor);
+ c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
+ if (c->idelta < 16) c->idelta = 16;
+
+ return c->sample1;
+}
+
+static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
+{
+ int sign, delta, diff;
+ int new_step;
+
+ sign = nibble & 8;
+ delta = nibble & 7;
+ /* perform direct multiplication instead of series of jumps proposed by
+ * the reference ADPCM implementation since modern CPUs can do the mults
+ * quickly enough */
+ diff = ((2 * delta + 1) * c->step) >> 3;
+ /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
+ c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
+ c->predictor = av_clip_int16(c->predictor);
+ /* calculate new step and clamp it to range 511..32767 */
+ new_step = (AdaptationTable[nibble & 7] * c->step) >> 8;
+ c->step = av_clip(new_step, 511, 32767);
+
+ return (short)c->predictor;
+}
+
+static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
+{
+ int sign, delta, diff;
+
+ sign = nibble & (1<<(size-1));
+ delta = nibble & ((1<<(size-1))-1);
+ diff = delta << (7 + c->step + shift);
+
+ /* clamp result */
+ c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
+
+ /* calculate new step */
+ if (delta >= (2*size - 3) && c->step < 3)
+ c->step++;
+ else if (delta == 0 && c->step > 0)
+ c->step--;
+
+ return (short) c->predictor;
+}
+
+static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble)
+{
+ if(!c->step) {
+ c->predictor = 0;
+ c->step = 127;
+ }
+
+ c->predictor += (c->step * yamaha_difflookup[nibble]) / 8;
+ c->predictor = av_clip_int16(c->predictor);
+ c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
+ c->step = av_clip(c->step, 127, 24567);
+ return c->predictor;
+}
+
+static void xa_decode(short *out, const unsigned char *in,
+ ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc)
+{
+ int i, j;
+ int shift,filter,f0,f1;
+ int s_1,s_2;
+ int d,s,t;
+
+ for(i=0;i<4;i++) {
+
+ shift = 12 - (in[4+i*2] & 15);
+ filter = in[4+i*2] >> 4;
+ f0 = xa_adpcm_table[filter][0];
+ f1 = xa_adpcm_table[filter][1];
+
+ s_1 = left->sample1;
+ s_2 = left->sample2;
+
+ for(j=0;j<28;j++) {
+ d = in[16+i+j*4];
+
+ t = (signed char)(d<<4)>>4;
+ s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
+ s_2 = s_1;
+ s_1 = av_clip_int16(s);
+ *out = s_1;
+ out += inc;
+ }
+
+ if (inc==2) { /* stereo */
+ left->sample1 = s_1;
+ left->sample2 = s_2;
+ s_1 = right->sample1;
+ s_2 = right->sample2;
+ out = out + 1 - 28*2;
+ }
+
+ shift = 12 - (in[5+i*2] & 15);
+ filter = in[5+i*2] >> 4;
+
+ f0 = xa_adpcm_table[filter][0];
+ f1 = xa_adpcm_table[filter][1];
+
+ for(j=0;j<28;j++) {
+ d = in[16+i+j*4];
+
+ t = (signed char)d >> 4;
+ s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
+ s_2 = s_1;
+ s_1 = av_clip_int16(s);
+ *out = s_1;
+ out += inc;
+ }
+
+ if (inc==2) { /* stereo */
+ right->sample1 = s_1;
+ right->sample2 = s_2;
+ out -= 1;
+ } else {
+ left->sample1 = s_1;
+ left->sample2 = s_2;
+ }
+ }
+}
+
+
+/* DK3 ADPCM support macro */
+#define DK3_GET_NEXT_NIBBLE() \
+ if (decode_top_nibble_next) \
+ { \
+ nibble = last_byte >> 4; \
+ decode_top_nibble_next = 0; \
+ } \
+ else \
+ { \
+ last_byte = *src++; \
+ if (src >= buf + buf_size) break; \
+ nibble = last_byte & 0x0F; \
+ decode_top_nibble_next = 1; \
+ }
+
+static int adpcm_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ ADPCMContext *c = avctx->priv_data;
+ ADPCMChannelStatus *cs;
+ int n, m, channel, i;
+ int block_predictor[2];
+ short *samples;
+ short *samples_end;
+ const uint8_t *src;
+ int st; /* stereo */
+
+ /* DK3 ADPCM accounting variables */
+ unsigned char last_byte = 0;
+ unsigned char nibble;
+ int decode_top_nibble_next = 0;
+ int diff_channel;
+
+ /* EA ADPCM state variables */
+ uint32_t samples_in_chunk;
+ int32_t previous_left_sample, previous_right_sample;
+ int32_t current_left_sample, current_right_sample;
+ int32_t next_left_sample, next_right_sample;
+ int32_t coeff1l, coeff2l, coeff1r, coeff2r;
+ uint8_t shift_left, shift_right;
+ int count1, count2;
+
+ if (!buf_size)
+ return 0;
+
+ //should protect all 4bit ADPCM variants
+ //8 is needed for CODEC_ID_ADPCM_IMA_WAV with 2 channels
+ //
+ if(*data_size/4 < buf_size + 8)
+ return -1;
+
+ samples = data;
+ samples_end= samples + *data_size/2;
+ *data_size= 0;
+ src = buf;
+
+ st = avctx->channels == 2 ? 1 : 0;
+
+ switch(avctx->codec->id) {
+ case CODEC_ID_ADPCM_IMA_QT:
+ n = buf_size - 2*avctx->channels;
+ for (channel = 0; channel < avctx->channels; channel++) {
+ cs = &(c->status[channel]);
+ /* (pppppp) (piiiiiii) */
+
+ /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
+ cs->predictor = (*src++) << 8;
+ cs->predictor |= (*src & 0x80);
+ cs->predictor &= 0xFF80;
+
+ /* sign extension */
+ if(cs->predictor & 0x8000)
+ cs->predictor -= 0x10000;
+
+ cs->predictor = av_clip_int16(cs->predictor);
+
+ cs->step_index = (*src++) & 0x7F;
+
+ if (cs->step_index > 88){
+ av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
+ cs->step_index = 88;
+ }
+
+ cs->step = step_table[cs->step_index];
+
+ samples = (short*)data + channel;
+
+ for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */
+ *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3);
+ samples += avctx->channels;
+ *samples = adpcm_ima_expand_nibble(cs, src[0] >> 4 , 3);
+ samples += avctx->channels;
+ src ++;
+ }
+ }
+ if (st)
+ samples--;
+ break;
+ case CODEC_ID_ADPCM_IMA_WAV:
+ if (avctx->block_align != 0 && buf_size > avctx->block_align)
+ buf_size = avctx->block_align;
+
+// samples_per_block= (block_align-4*chanels)*8 / (bits_per_sample * chanels) + 1;
+
+ for(i=0; i<avctx->channels; i++){
+ cs = &(c->status[i]);
+ cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src);
+
+ cs->step_index = *src++;
+ if (cs->step_index > 88){
+ av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
+ cs->step_index = 88;
+ }
+ if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]); /* unused */
+ }
+
+ while(src < buf + buf_size){
+ for(m=0; m<4; m++){
+ for(i=0; i<=st; i++)
+ *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3);
+ for(i=0; i<=st; i++)
+ *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3);
+ src++;
+ }
+ src += 4*st;
+ }
+ break;
+ case CODEC_ID_ADPCM_4XM:
+ cs = &(c->status[0]);
+ c->status[0].predictor= (int16_t)bytestream_get_le16(&src);
+ if(st){
+ c->status[1].predictor= (int16_t)bytestream_get_le16(&src);
+ }
+ c->status[0].step_index= (int16_t)bytestream_get_le16(&src);
+ if(st){
+ c->status[1].step_index= (int16_t)bytestream_get_le16(&src);
+ }
+ if (cs->step_index < 0) cs->step_index = 0;
+ if (cs->step_index > 88) cs->step_index = 88;
+
+ m= (buf_size - (src - buf))>>st;
+ for(i=0; i<m; i++) {
+ *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4);
+ if (st)
+ *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4);
+ *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4);
+ if (st)
+ *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4);
+ }
+
+ src += m<<st;
+
+ break;
+ case CODEC_ID_ADPCM_MS:
+ if (avctx->block_align != 0 && buf_size > avctx->block_align)
+ buf_size = avctx->block_align;
+ n = buf_size - 7 * avctx->channels;
+ if (n < 0)
+ return -1;
+ block_predictor[0] = av_clip(*src++, 0, 6);
+ block_predictor[1] = 0;
+ if (st)
+ block_predictor[1] = av_clip(*src++, 0, 6);
+ c->status[0].idelta = (int16_t)bytestream_get_le16(&src);
+ if (st){
+ c->status[1].idelta = (int16_t)bytestream_get_le16(&src);
+ }
+ c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]];
+ c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]];
+ c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]];
+ c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]];
+
+ c->status[0].sample1 = bytestream_get_le16(&src);
+ if (st) c->status[1].sample1 = bytestream_get_le16(&src);
+ c->status[0].sample2 = bytestream_get_le16(&src);
+ if (st) c->status[1].sample2 = bytestream_get_le16(&src);
+
+ *samples++ = c->status[0].sample2;
+ if (st) *samples++ = c->status[1].sample2;
+ *samples++ = c->status[0].sample1;
+ if (st) *samples++ = c->status[1].sample1;
+ for(;n>0;n--) {
+ *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 );
+ *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F);
+ src ++;
+ }
+ break;
+ case CODEC_ID_ADPCM_IMA_DK4:
+ if (avctx->block_align != 0 && buf_size > avctx->block_align)
+ buf_size = avctx->block_align;
+
+ c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
+ c->status[0].step_index = *src++;
+ src++;
+ *samples++ = c->status[0].predictor;
+ if (st) {
+ c->status[1].predictor = (int16_t)bytestream_get_le16(&src);
+ c->status[1].step_index = *src++;
+ src++;
+ *samples++ = c->status[1].predictor;
+ }
+ while (src < buf + buf_size) {
+
+ /* take care of the top nibble (always left or mono channel) */
+ *samples++ = adpcm_ima_expand_nibble(&c->status[0],
+ src[0] >> 4, 3);
+
+ /* take care of the bottom nibble, which is right sample for
+ * stereo, or another mono sample */
+ if (st)
+ *samples++ = adpcm_ima_expand_nibble(&c->status[1],
+ src[0] & 0x0F, 3);
+ else
+ *samples++ = adpcm_ima_expand_nibble(&c->status[0],
+ src[0] & 0x0F, 3);
+
+ src++;
+ }
+ break;
+ case CODEC_ID_ADPCM_IMA_DK3:
+ if (avctx->block_align != 0 && buf_size > avctx->block_align)
+ buf_size = avctx->block_align;
+
+ if(buf_size + 16 > (samples_end - samples)*3/8)
+ return -1;
+
+ c->status[0].predictor = (int16_t)AV_RL16(src + 10);
+ c->status[1].predictor = (int16_t)AV_RL16(src + 12);
+ c->status[0].step_index = src[14];
+ c->status[1].step_index = src[15];
+ /* sign extend the predictors */
+ src += 16;
+ diff_channel = c->status[1].predictor;
+
+ /* the DK3_GET_NEXT_NIBBLE macro issues the break statement when
+ * the buffer is consumed */
+ while (1) {
+
+ /* for this algorithm, c->status[0] is the sum channel and
+ * c->status[1] is the diff channel */
+
+ /* process the first predictor of the sum channel */
+ DK3_GET_NEXT_NIBBLE();
+ adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
+
+ /* process the diff channel predictor */
+ DK3_GET_NEXT_NIBBLE();
+ adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
+
+ /* process the first pair of stereo PCM samples */
+ diff_channel = (diff_channel + c->status[1].predictor) / 2;
+ *samples++ = c->status[0].predictor + c->status[1].predictor;
+ *samples++ = c->status[0].predictor - c->status[1].predictor;
+
+ /* process the second predictor of the sum channel */
+ DK3_GET_NEXT_NIBBLE();
+ adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
+
+ /* process the second pair of stereo PCM samples */
+ diff_channel = (diff_channel + c->status[1].predictor) / 2;
+ *samples++ = c->status[0].predictor + c->status[1].predictor;
+ *samples++ = c->status[0].predictor - c->status[1].predictor;
+ }
+ break;
+ case CODEC_ID_ADPCM_IMA_WS:
+ /* no per-block initialization; just start decoding the data */
+ while (src < buf + buf_size) {
+
+ if (st) {
+ *samples++ = adpcm_ima_expand_nibble(&c->status[0],
+ src[0] >> 4 , 3);
+ *samples++ = adpcm_ima_expand_nibble(&c->status[1],
+ src[0] & 0x0F, 3);
+ } else {
+ *samples++ = adpcm_ima_expand_nibble(&c->status[0],
+ src[0] >> 4 , 3);
+ *samples++ = adpcm_ima_expand_nibble(&c->status[0],
+ src[0] & 0x0F, 3);
+ }
+
+ src++;
+ }
+ break;
+ case CODEC_ID_ADPCM_XA:
+ while (buf_size >= 128) {
+ xa_decode(samples, src, &c->status[0], &c->status[1],
+ avctx->channels);
+ src += 128;
+ samples += 28 * 8;
+ buf_size -= 128;
+ }
+ break;
+ case CODEC_ID_ADPCM_EA:
+ if (buf_size < 4 || AV_RL32(src) >= ((buf_size - 12) * 2)) {
+ src += buf_size;
+ break;
+ }
+ samples_in_chunk = AV_RL32(src);
+ src += 4;
+ current_left_sample = (int16_t)bytestream_get_le16(&src);
+ previous_left_sample = (int16_t)bytestream_get_le16(&src);
+ current_right_sample = (int16_t)bytestream_get_le16(&src);
+ previous_right_sample = (int16_t)bytestream_get_le16(&src);
+
+ for (count1 = 0; count1 < samples_in_chunk/28;count1++) {
+ coeff1l = ea_adpcm_table[ *src >> 4 ];
+ coeff2l = ea_adpcm_table[(*src >> 4 ) + 4];
+ coeff1r = ea_adpcm_table[*src & 0x0F];
+ coeff2r = ea_adpcm_table[(*src & 0x0F) + 4];
+ src++;
+
+ shift_left = (*src >> 4 ) + 8;
+ shift_right = (*src & 0x0F) + 8;
+ src++;
+
+ for (count2 = 0; count2 < 28; count2++) {
+ next_left_sample = (int32_t)((*src & 0xF0) << 24) >> shift_left;
+ next_right_sample = (int32_t)((*src & 0x0F) << 28) >> shift_right;
+ src++;
+
+ next_left_sample = (next_left_sample +
+ (current_left_sample * coeff1l) +
+ (previous_left_sample * coeff2l) + 0x80) >> 8;
+ next_right_sample = (next_right_sample +
+ (current_right_sample * coeff1r) +
+ (previous_right_sample * coeff2r) + 0x80) >> 8;
+
+ previous_left_sample = current_left_sample;
+ current_left_sample = av_clip_int16(next_left_sample);
+ previous_right_sample = current_right_sample;
+ current_right_sample = av_clip_int16(next_right_sample);
+ *samples++ = (unsigned short)current_left_sample;
+ *samples++ = (unsigned short)current_right_sample;
+ }
+ }
+
+ if (src - buf == buf_size - 2)
+ src += 2; // Skip terminating 0x0000
+
+ break;
+ case CODEC_ID_ADPCM_IMA_AMV:
+ case CODEC_ID_ADPCM_IMA_SMJPEG:
+ c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
+ c->status[0].step_index = bytestream_get_le16(&src);
+
+ if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
+ src+=4;
+
+ while (src < buf + buf_size) {
+ char hi, lo;
+ lo = *src & 0x0F;
+ hi = *src >> 4;
+
+ if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
+ FFSWAP(char, hi, lo);
+
+ *samples++ = adpcm_ima_expand_nibble(&c->status[0],
+ lo, 3);
+ *samples++ = adpcm_ima_expand_nibble(&c->status[0],
+ hi, 3);
+ src++;
+ }
+ break;
+ case CODEC_ID_ADPCM_CT:
+ while (src < buf + buf_size) {
+ if (st) {
+ *samples++ = adpcm_ct_expand_nibble(&c->status[0],
+ src[0] >> 4);
+ *samples++ = adpcm_ct_expand_nibble(&c->status[1],
+ src[0] & 0x0F);
+ } else {
+ *samples++ = adpcm_ct_expand_nibble(&c->status[0],
+ src[0] >> 4);
+ *samples++ = adpcm_ct_expand_nibble(&c->status[0],
+ src[0] & 0x0F);
+ }
+ src++;
+ }
+ break;
+ case CODEC_ID_ADPCM_SBPRO_4:
+ case CODEC_ID_ADPCM_SBPRO_3:
+ case CODEC_ID_ADPCM_SBPRO_2:
+ if (!c->status[0].step_index) {
+ /* the first byte is a raw sample */
+ *samples++ = 128 * (*src++ - 0x80);
+ if (st)
+ *samples++ = 128 * (*src++ - 0x80);
+ c->status[0].step_index = 1;
+ }
+ if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) {
+ while (src < buf + buf_size) {
+ *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
+ src[0] >> 4, 4, 0);
+ *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
+ src[0] & 0x0F, 4, 0);
+ src++;
+ }
+ } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) {
+ while (src < buf + buf_size && samples + 2 < samples_end) {
+ *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
+ src[0] >> 5 , 3, 0);
+ *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
+ (src[0] >> 2) & 0x07, 3, 0);
+ *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
+ src[0] & 0x03, 2, 0);
+ src++;
+ }
+ } else {
+ while (src < buf + buf_size && samples + 3 < samples_end) {
+ *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
+ src[0] >> 6 , 2, 2);
+ *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
+ (src[0] >> 4) & 0x03, 2, 2);
+ *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
+ (src[0] >> 2) & 0x03, 2, 2);
+ *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
+ src[0] & 0x03, 2, 2);
+ src++;
+ }
+ }
+ break;
+ case CODEC_ID_ADPCM_SWF:
+ {
+ GetBitContext gb;
+ const int *table;
+ int k0, signmask, nb_bits, count;
+ int size = buf_size*8;
+
+ init_get_bits(&gb, buf, size);
+
+ //read bits & initial values
+ nb_bits = get_bits(&gb, 2)+2;
+ //av_log(NULL,AV_LOG_INFO,"nb_bits: %d\n", nb_bits);
+ table = swf_index_tables[nb_bits-2];
+ k0 = 1 << (nb_bits-2);
+ signmask = 1 << (nb_bits-1);
+
+ while (get_bits_count(&gb) <= size - 22*avctx->channels) {
+ for (i = 0; i < avctx->channels; i++) {
+ *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
+ c->status[i].step_index = get_bits(&gb, 6);
+ }
+
+ for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
+ int i;
+
+ for (i = 0; i < avctx->channels; i++) {
+ // similar to IMA adpcm
+ int delta = get_bits(&gb, nb_bits);
+ int step = step_table[c->status[i].step_index];
+ long vpdiff = 0; // vpdiff = (delta+0.5)*step/4
+ int k = k0;
+
+ do {
+ if (delta & k)
+ vpdiff += step;
+ step >>= 1;
+ k >>= 1;
+ } while(k);
+ vpdiff += step;
+
+ if (delta & signmask)
+ c->status[i].predictor -= vpdiff;
+ else
+ c->status[i].predictor += vpdiff;
+
+ c->status[i].step_index += table[delta & (~signmask)];
+
+ c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
+ c->status[i].predictor = av_clip_int16(c->status[i].predictor);
+
+ *samples++ = c->status[i].predictor;
+ if (samples >= samples_end) {
+ av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
+ return -1;
+ }
+ }
+ }
+ }
+ src += buf_size;
+ break;
+ }
+ case CODEC_ID_ADPCM_YAMAHA:
+ while (src < buf + buf_size) {
+ if (st) {
+ *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
+ src[0] & 0x0F);
+ *samples++ = adpcm_yamaha_expand_nibble(&c->status[1],
+ src[0] >> 4 );
+ } else {
+ *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
+ src[0] & 0x0F);
+ *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
+ src[0] >> 4 );
+ }
+ src++;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+ *data_size = (uint8_t *)samples - (uint8_t *)data;
+ return src - buf;
+}
+
+#if CONFIG_DECODERS
+#define ADPCM_DECODER(id,name,long_name_) \
+AVCodec name ## _decoder = { \
+ #name, \
+ CODEC_TYPE_AUDIO, \
+ id, \
+ sizeof(ADPCMContext), \
+ adpcm_decode_init, \
+ NULL, \
+ NULL, \
+ adpcm_decode_frame, \
+ /*.capabilities = */0, \
+ /*.next = */NULL, \
+ /*.flush = */NULL, \
+ /*.supported_framerates = */NULL, \
+ /*.pix_fmts = */NULL, \
+ /*.long_name = */NULL_IF_CONFIG_SMALL(long_name_), \
+};
+#else
+#define ADPCM_DECODER(id,name,long_name_)
+#endif
+
+/* Note: Do not forget to add new entries to the Makefile as well. */
+ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie");
+ADPCM_DECODER(CODEC_ID_ADPCM_CT, adpcm_ct, "ADPCM Creative Technology");
+ADPCM_DECODER(CODEC_ID_ADPCM_EA, adpcm_ea, "ADPCM Electronic Arts");
+ADPCM_DECODER(CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV");
+ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
+ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
+ADPCM_DECODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
+ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
+ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
+ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood");
+ADPCM_DECODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
+ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
+ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
+ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
+ADPCM_DECODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
+ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA");
+ADPCM_DECODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/allcodecs.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/allcodecs.c
new file mode 100644
index 000000000..b3a10e214
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/allcodecs.c
@@ -0,0 +1,181 @@
+/*
+ * Provides registration of all codecs, parsers and bitstream filters for libavcodec.
+ * Copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/allcodecs.c
+ * Provides registration of all codecs, parsers and bitstream filters for libavcodec.
+ */
+
+#include "avcodec.h"
+
+#define REGISTER_ENCODER(X,x) { \
+ extern AVCodec x##_encoder; \
+ if(CONFIG_##X##_ENCODER) avcodec_register(&x##_encoder); }
+#define REGISTER_DECODER(X,x) { \
+ extern AVCodec x##_decoder; \
+ if(CONFIG_##X##_DECODER) avcodec_register(&x##_decoder); }
+#define REGISTER_ENCDEC(X,x) REGISTER_ENCODER(X,x); REGISTER_DECODER(X,x)
+
+#define REGISTER_PARSER(X,x) { \
+ extern AVCodecParser x##_parser; \
+ if(CONFIG_##X##_PARSER) av_register_codec_parser(&x##_parser); }
+
+void avcodec_register_all(void)
+{
+ static int initialized;
+
+ if (initialized)
+ return;
+ initialized = 1;
+
+ /* video codecs */
+ //REGISTER_DECODER (AASC, aasc);
+ REGISTER_DECODER (AMV, amv);
+ //REGISTER_DECODER (ASV1, asv1);
+ //REGISTER_DECODER (ASV2, asv2);
+ //REGISTER_DECODER (AVS, avs);
+ //REGISTER_DECODER (CAVS, cavs);
+ //REGISTER_DECODER (CINEPAK, cinepak);
+ //REGISTER_DECODER (COREPNG, corepng);
+ //REGISTER_DECODER (CSCD, cscd);
+ //REGISTER_DECODER (CYUV, cyuv);
+ //REGISTER_ENCDEC (DVVIDEO, dvvideo);
+ //REGISTER_DECODER (EIGHTBPS, eightbps);
+ //REGISTER_ENCDEC (FFV1, ffv1);
+ //REGISTER_ENCDEC (FFVHUFF, ffvhuff);
+ REGISTER_DECODER (FLV, flv);
+ //REGISTER_DECODER (FRAPS, fraps);
+ //REGISTER_ENCDEC (H261, h261);
+ REGISTER_DECODER (H263, h263);
+ //REGISTER_DECODER (H263I, h263i);
+ //REGISTER_ENCODER (H263P, h263p);
+ REGISTER_DECODER (H264, h264);
+ //REGISTER_ENCDEC (HUFFYUV, huffyuv);
+ //REGISTER_DECODER (INDEO2, indeo2);
+ //REGISTER_DECODER (INDEO3, indeo3);
+ //REGISTER_DECODER (JPEGLS, jpegls);
+ //REGISTER_ENCODER (LJPEG, ljpeg);
+ //REGISTER_DECODER (LOCO, loco);
+ //REGISTER_ENCDEC (MJPEG, mjpeg);
+ //REGISTER_DECODER (MJPEGB, mjpegb);
+ //REGISTER_ENCDEC (MPEG1VIDEO, mpeg1video);
+ REGISTER_DECODER (MPEG2VIDEO, mpeg2video);
+ REGISTER_DECODER (MPEG4, mpeg4);
+ //REGISTER_DECODER (MPEGVIDEO, mpegvideo);
+ REGISTER_DECODER (MSMPEG4V1, msmpeg4v1);
+ REGISTER_DECODER (MSMPEG4V2, msmpeg4v2);
+ REGISTER_DECODER (MSMPEG4V3, msmpeg4v3);
+ //REGISTER_DECODER (MSRLE, msrle);
+ //REGISTER_DECODER (MSVIDEO1, msvideo1);
+ //REGISTER_DECODER (MSZH, mszh);
+ //REGISTER_ENCDEC (PNG, png);
+ //REGISTER_DECODER (QPEG, qpeg);
+ //REGISTER_DECODER (QTRLE, qtrle);
+ //REGISTER_DECODER (RPZA, rpza);
+ REGISTER_DECODER (RV10, rv10);
+ REGISTER_DECODER (RV20, rv20);
+ REGISTER_DECODER (RV30, rv30);
+ REGISTER_DECODER (RV40, rv40);
+ //REGISTER_DECODER (SP5X, sp5x);
+ REGISTER_DECODER (SVQ1, svq1);
+ REGISTER_DECODER (SVQ3, svq3);
+ REGISTER_DECODER (THEORA, theora);
+ //REGISTER_DECODER (TRUEMOTION1, truemotion1);
+ //REGISTER_DECODER (TRUEMOTION2, truemotion2);
+ //REGISTER_DECODER (TSCC, tscc);
+ //REGISTER_DECODER (ULTI, ulti);
+ REGISTER_DECODER (VC1, vc1);
+ //REGISTER_DECODER (VCR1, vcr1);
+ REGISTER_DECODER (VP3, vp3);
+ REGISTER_DECODER (VP5, vp5);
+ REGISTER_DECODER (VP6, vp6);
+ REGISTER_DECODER (VP6A, vp6a);
+ REGISTER_DECODER (VP6F, vp6f);
+ REGISTER_DECODER (WMV1, wmv1);
+ REGISTER_DECODER (WMV2, wmv2);
+ REGISTER_DECODER (WMV3, wmv3);
+ //REGISTER_DECODER (WNV1, wnv1);
+ //REGISTER_DECODER (XL, xl);
+ //REGISTER_DECODER (ZLIB, zlib);
+ //REGISTER_DECODER (ZMBV, zmbv);
+
+ /* audio codecs */
+ //REGISTER_DECODER (AAC, aac);
+ REGISTER_DECODER (AC3, ac3);
+ //REGISTER_DECODER (ATRAC3, atrac3);
+ //REGISTER_DECODER (COOK, cook);
+ //REGISTER_DECODER (DCA, dca);
+ REGISTER_DECODER (EAC3, eac3);
+ //REGISTER_DECODER (FLAC, flac);
+ //REGISTER_DECODER (IMC, imc);
+ //REGISTER_DECODER (MACE3, mace3);
+ //REGISTER_DECODER (MACE6, mace6);
+ REGISTER_DECODER (MLP, mlp);
+ //REGISTER_DECODER (MP1, mp1);
+ //REGISTER_DECODER (MP2, mp2);
+ //REGISTER_DECODER (MP3, mp3);
+ //REGISTER_DECODER (MSGSM, msgsm);
+ REGISTER_DECODER (NELLYMOSER, nellymoser);
+ //REGISTER_DECODER (QDM2, qdm2);
+ //REGISTER_DECODER (RA_144, ra_144);
+ //REGISTER_DECODER (RA_288, ra_288);
+ REGISTER_DECODER (TRUEHD, truehd);
+ //REGISTER_DECODER (TRUESPEECH, truespeech);
+ //REGISTER_DECODER (TTA, tta);
+ //REGISTER_DECODER (VORBIS, vorbis);
+ //REGISTER_DECODER (WMAV1, wmav1);
+ //REGISTER_DECODER (WMAV2, wmav2);
+
+ /* pcm codecs */
+ //REGISTER_DECODER (PCM_ALAW, pcm_alaw);
+ //REGISTER_DECODER (PCM_MULAW,pcm_mulaw);
+
+ /* ADPCM codecs */
+ REGISTER_DECODER (ADPCM_4XM, adpcm_4xm);
+ REGISTER_DECODER (ADPCM_CT, adpcm_ct);
+ REGISTER_DECODER (ADPCM_EA, adpcm_ea);
+ //REGISTER_DECODER (ADPCM_G726, adpcm_g726);
+ REGISTER_DECODER (ADPCM_IMA_AMV, adpcm_ima_amv);
+ REGISTER_DECODER (ADPCM_IMA_DK3, adpcm_ima_dk3);
+ REGISTER_DECODER (ADPCM_IMA_DK4, adpcm_ima_dk4);
+ REGISTER_DECODER (ADPCM_IMA_QT, adpcm_ima_qt);
+ REGISTER_DECODER (ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg);
+ REGISTER_DECODER (ADPCM_IMA_WAV, adpcm_ima_wav);
+ REGISTER_DECODER (ADPCM_IMA_WS, adpcm_ima_ws);
+ REGISTER_DECODER (ADPCM_MS, adpcm_ms);
+ REGISTER_DECODER (ADPCM_SBPRO_2, adpcm_sbpro_2);
+ REGISTER_DECODER (ADPCM_SBPRO_3, adpcm_sbpro_3);
+ REGISTER_DECODER (ADPCM_SBPRO_4, adpcm_sbpro_4);
+ REGISTER_DECODER (ADPCM_SWF, adpcm_swf);
+ REGISTER_DECODER (ADPCM_XA, adpcm_xa);
+ REGISTER_DECODER (ADPCM_YAMAHA, adpcm_yamaha);
+
+ /* external libraries */
+ REGISTER_DECODER (LIBAMR_NB, libamr_nb);
+
+ /* parsers */
+ //REGISTER_PARSER (AAC, aac);
+ REGISTER_PARSER (AC3, ac3);
+ //REGISTER_PARSER (DCA, dca);
+ REGISTER_PARSER (MLP, mlp);
+ //REGISTER_PARSER (MPEGAUDIO, mpegaudio);
+}
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_dec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_dec.c
new file mode 100644
index 000000000..7b8dbfc93
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_dec.c
@@ -0,0 +1,880 @@
+/*
+ * ===================================================================
+ * TS 26.104
+ * REL-5 V5.4.0 2004-03
+ * REL-6 V6.1.0 2004-03
+ * 3GPP AMR Floating-point Speech Codec
+ * ===================================================================
+ *
+ */
+
+/*
+ * interf_dec.c
+ *
+ *
+ * Project:
+ * AMR Floating-Point Codec
+ *
+ * Contains:
+ * This module provides means to conversion from 3GPP or ETSI
+ * bitstream to AMR parameters
+ */
+
+/*
+ * include files
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <memory.h>
+#include "typedef.h"
+#include "sp_dec.h"
+#include "interf_rom.h"
+#include "rom_dec.h"
+
+/*
+ * definition of constants
+ */
+#define EHF_MASK 0x0008 /* encoder homing frame pattern */
+typedef
+
+struct
+{
+ int reset_flag_old; /* previous was homing frame */
+
+
+ enum RXFrameType prev_ft; /* previous frame type */
+ enum Mode prev_mode; /* previous mode */
+ void *decoder_State; /* Points decoder state */
+
+
+}dec_interface_State;
+
+#ifdef ETSI
+
+
+/*
+ * Bin2Int
+ *
+ *
+ * Parameters:
+ * no_of_bits I: number of bits associated with value
+ * bits O: address where bits are written
+ *
+ * Function:
+ * Read nuber of bits from the array and convert to integer.
+ *
+ * Returns:
+ * value
+ */
+static Word16 Bin2Int( Word16 no_of_bits, Word16 *bitstream )
+{
+ Word32 value, i, bit;
+
+
+ value = 0;
+
+ for ( i = 0; i < no_of_bits; i++ ) {
+ value = value << 1;
+ bit = *bitstream++;
+
+ if ( bit == 0x1 )
+ value = value + 1;
+ }
+ return( Word16 )( value );
+}
+
+
+/*
+ * Bits2Prm
+ *
+ *
+ * Parameters:
+ * mode I: AMR mode
+ * bits I: serial bits
+ * param O: AMR parameters
+ *
+ * Function:
+ * Retrieves the vector of encoder parameters from
+ * the received serial bits in a frame.
+ *
+ * Returns:
+ * void
+ */
+static void Bits2Prm( enum Mode mode, Word16 bits[], Word16 prm[] )
+{
+ Word32 i;
+
+
+ switch ( mode ) {
+ case MR122:
+ for ( i = 0; i < PRMNO_MR122; i++ ) {
+ prm[i] = Bin2Int( bitno_MR122[i], bits );
+ bits += bitno_MR122[i];
+ }
+ break;
+
+ case MR102:
+ for ( i = 0; i < PRMNO_MR102; i++ ) {
+ prm[i] = Bin2Int( bitno_MR102[i], bits );
+ bits += bitno_MR102[i];
+ }
+ break;
+
+ case MR795:
+ for ( i = 0; i < PRMNO_MR795; i++ ) {
+ prm[i] = Bin2Int( bitno_MR795[i], bits );
+ bits += bitno_MR795[i];
+ }
+ break;
+
+ case MR74:
+ for ( i = 0; i < PRMNO_MR74; i++ ) {
+ prm[i] = Bin2Int( bitno_MR74[i], bits );
+ bits += bitno_MR74[i];
+ }
+ break;
+
+ case MR67:
+ for ( i = 0; i < PRMNO_MR67; i++ ) {
+ prm[i] = Bin2Int( bitno_MR67[i], bits );
+ bits += bitno_MR67[i];
+ }
+ break;
+
+ case MR59:
+ for ( i = 0; i < PRMNO_MR59; i++ ) {
+ prm[i] = Bin2Int( bitno_MR59[i], bits );
+ bits += bitno_MR59[i];
+ }
+ break;
+
+ case MR515:
+ for ( i = 0; i < PRMNO_MR515; i++ ) {
+ prm[i] = Bin2Int( bitno_MR515[i], bits );
+ bits += bitno_MR515[i];
+ }
+ break;
+
+ case MR475:
+ for ( i = 0; i < PRMNO_MR475; i++ ) {
+ prm[i] = Bin2Int( bitno_MR475[i], bits );
+ bits += bitno_MR475[i];
+ }
+ break;
+
+ case MRDTX:
+ for ( i = 0; i < PRMNO_MRDTX; i++ ) {
+ prm[i] = Bin2Int( bitno_MRDTX[i], bits );
+ bits += bitno_MRDTX[i];
+ }
+ break;
+ }
+ return;
+}
+
+#else
+
+#ifndef IF2
+
+/*
+ * DecoderMMS
+ *
+ *
+ * Parameters:
+ * param O: AMR parameters
+ * stream I: input bitstream
+ * frame_type O: frame type
+ * speech_mode O: speech mode in DTX
+ *
+ * Function:
+ * AMR file storage format frame to decoder parameters
+ *
+ * Returns:
+ * mode used mode
+ */
+enum Mode DecoderMMS( Word16 *param, UWord8 *stream, enum RXFrameType
+ *frame_type, enum Mode *speech_mode, Word16 *q_bit )
+{
+ enum Mode mode;
+ Word32 j;
+ Word16 *mask;
+
+
+ memset( param, 0, PRMNO_MR122 <<1 );
+ *q_bit = 0x01 & (*stream >> 2);
+ mode = 0x0F & (*stream >> 3);
+ stream++;
+
+ if ( mode == MRDTX ) {
+ mask = order_MRDTX;
+
+ for ( j = 1; j < 36; j++ ) {
+ if ( *stream & 0x80 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream <<= 1;
+ else
+ stream++;
+ }
+
+ /* get SID type bit */
+
+ *frame_type = RX_SID_FIRST;
+ if (*stream & 0x80)
+ *frame_type = RX_SID_UPDATE;
+
+ /* since there is update, use it */
+ /* *frame_type = RX_SID_UPDATE; */
+
+ /* speech mode indicator */
+ *speech_mode = (*stream >> 4) && 0x07;
+
+ }
+ else if ( mode == 15 ) {
+ *frame_type = RX_NO_DATA;
+ }
+ else if ( mode == MR475 ) {
+ mask = order_MR475;
+
+ for ( j = 1; j < 96; j++ ) {
+ if ( *stream & 0x80 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream <<= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR515 ) {
+ mask = order_MR515;
+
+ for ( j = 1; j < 104; j++ ) {
+ if ( *stream & 0x80 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream <<= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR59 ) {
+ mask = order_MR59;
+
+ for ( j = 1; j < 119; j++ ) {
+ if ( *stream & 0x80 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream <<= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR67 ) {
+ mask = order_MR67;
+
+ for ( j = 1; j < 135; j++ ) {
+ if ( *stream & 0x80 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream <<= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR74 ) {
+ mask = order_MR74;
+
+ for ( j = 1; j < 149; j++ ) {
+ if ( *stream & 0x80 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream <<= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR795 ) {
+ mask = order_MR795;
+
+ for ( j = 1; j < 160; j++ ) {
+ if ( *stream & 0x80 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream <<= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR102 ) {
+ mask = order_MR102;
+
+ for ( j = 1; j < 205; j++ ) {
+ if ( *stream & 0x80 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream <<= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR122 ) {
+ mask = order_MR122;
+
+ for ( j = 1; j < 245; j++ ) {
+ if ( *stream & 0x80 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream <<= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else
+ *frame_type = RX_SPEECH_BAD;
+ return mode;
+}
+
+#else
+
+/*
+ * Decoder3GPP
+ *
+ *
+ * Parameters:
+ * param O: AMR parameters
+ * stream I: input bitstream
+ * frame_type O: frame type
+ * speech_mode O: speech mode in DTX
+ *
+ * Function:
+ * Resets state memory
+ *
+ * Returns:
+ * mode used mode
+ */
+enum Mode Decoder3GPP( Word16 *param, UWord8 *stream, enum RXFrameType
+ *frame_type, enum Mode *speech_mode )
+{
+ enum Mode mode;
+ Word32 j;
+ Word16 *mask;
+
+
+ memset( param, 0, PRMNO_MR122 <<1 );
+ mode = 0xF & *stream;
+ *stream >>= 4;
+
+ if ( mode == MRDTX ) {
+ mask = order_MRDTX;
+
+ for ( j = 5; j < 40; j++ ) {
+ if ( *stream & 0x1 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream >>= 1;
+ else
+ stream++;
+ }
+
+ /* get SID type bit */
+
+ *frame_type = RX_SID_FIRST;
+ if (*stream)
+ *frame_type = RX_SID_UPDATE;
+
+ /* since there is update, use it */
+ /* *frame_type = RX_SID_UPDATE; */
+ stream++;
+
+ /* speech mode indicator */
+ *speech_mode = *stream;
+ }
+ else if ( mode == 15 ) {
+ *frame_type = RX_NO_DATA;
+ }
+ else if ( mode == MR475 ) {
+ mask = order_MR475;
+
+ for ( j = 5; j < 100; j++ ) {
+ if ( *stream & 0x1 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream >>= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR515 ) {
+ mask = order_MR515;
+
+ for ( j = 5; j < 108; j++ ) {
+ if ( *stream & 0x1 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream >>= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR59 ) {
+ mask = order_MR59;
+
+ for ( j = 5; j < 123; j++ ) {
+ if ( *stream & 0x1 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream >>= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR67 ) {
+ mask = order_MR67;
+
+ for ( j = 5; j < 139; j++ ) {
+ if ( *stream & 0x1 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream >>= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR74 ) {
+ mask = order_MR74;
+
+ for ( j = 5; j < 153; j++ ) {
+ if ( *stream & 0x1 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream >>= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR795 ) {
+ mask = order_MR795;
+
+ for ( j = 5; j < 164; j++ ) {
+ if ( *stream & 0x1 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream >>= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR102 ) {
+ mask = order_MR102;
+
+ for ( j = 5; j < 209; j++ ) {
+ if ( *stream & 0x1 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream >>= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else if ( mode == MR122 ) {
+ mask = order_MR122;
+
+ for ( j = 5; j < 249; j++ ) {
+ if ( *stream & 0x1 )
+ param[ * mask] = ( short )( param[ * mask] + *( mask + 1 ) );
+ mask += 2;
+
+ if ( j % 8 )
+ *stream >>= 1;
+ else
+ stream++;
+ }
+ *frame_type = RX_SPEECH_GOOD;
+ }
+ else
+ *frame_type = RX_SPEECH_BAD;
+ return mode;
+}
+#endif
+#endif
+
+/*
+ * Decoder_Interface_reset
+ *
+ *
+ * Parameters:
+ * st O: state struct
+ *
+ * Function:
+ * Reset homing frame counter
+ *
+ * Returns:
+ * void
+ */
+void Decoder_Interface_reset( dec_interface_State *st )
+{
+ st->reset_flag_old = 1;
+ st->prev_ft = RX_SPEECH_GOOD;
+ st->prev_mode = MR475; /* minimum bitrate */
+}
+
+
+/*
+ * Decoder_Interface_init
+ *
+ *
+ * Parameters:
+ * void
+ *
+ * Function:
+ * Allocates state memory and initializes state memory
+ *
+ * Returns:
+ * success : pointer to structure
+ * failure : NULL
+ */
+void * Decoder_Interface_init( void )
+{
+ dec_interface_State * s;
+
+ /* allocate memory */
+ if ( ( s = ( dec_interface_State * ) malloc( sizeof( dec_interface_State ) ) ) ==
+ NULL ) {
+ fprintf( stderr, "Decoder_Interface_init: "
+ "can not malloc state structure\n" );
+ return NULL;
+ }
+ s->decoder_State = Speech_Decode_Frame_init( );
+
+ if ( s->decoder_State == NULL ) {
+ free( s );
+ return NULL;
+ }
+ Decoder_Interface_reset( s );
+ return s;
+}
+
+
+/*
+ * Decoder_Interface_exit
+ *
+ *
+ * Parameters:
+ * state I: state structure
+ *
+ * Function:
+ * The memory used for state memory is freed
+ *
+ * Returns:
+ * Void
+ */
+void Decoder_Interface_exit( void *state )
+{
+ dec_interface_State * s;
+ s = ( dec_interface_State * )state;
+
+ /* free memory */
+ Speech_Decode_Frame_exit(s->decoder_State );
+ free( s );
+ s = NULL;
+ state = NULL;
+}
+
+
+/*
+ * Decoder_Interface_Decode
+ *
+ *
+ * Parameters:
+ * st B: state structure
+ * bits I: bit stream
+ * synth O: synthesized speech
+ * bfi I: bad frame indicator
+ *
+ * Function:
+ * Decode bit stream to synthesized speech
+ *
+ * Returns:
+ * Void
+ */
+void Decoder_Interface_Decode( void *st,
+
+#ifndef ETSI
+ UWord8 *bits,
+
+#else
+ Word16 *bits,
+#endif
+
+ Word16 *synth, int bfi)
+{
+ enum Mode mode; /* AMR mode */
+
+#ifndef ETSI
+ enum Mode speech_mode = MR475; /* speech mode */
+#endif
+
+ Word16 prm[PRMNO_MR122]; /* AMR parameters */
+
+ enum RXFrameType frame_type; /* frame type */
+ dec_interface_State * s; /* pointer to structure */
+
+ const Word16 *homing; /* pointer to homing frame */
+ Word16 homingSize; /* frame size for homing frame */
+ Word32 i; /* counter */
+ Word32 resetFlag = 1; /* homing frame */
+
+#ifndef ETSI
+#ifndef IF2
+ Word16 q_bit;
+#endif
+#endif
+
+ s = ( dec_interface_State * )st;
+
+#ifndef ETSI
+
+ /*
+ * extract mode information and frametype,
+ * octets to parameters
+ */
+#ifdef IF2
+ mode = Decoder3GPP( prm, bits, &frame_type, &speech_mode );
+#else
+ mode = DecoderMMS( prm, bits, &frame_type, &speech_mode, &q_bit );
+ if (!bfi) bfi = 1 - q_bit;
+#endif
+
+ if ( bfi == 1 ) {
+ if ( mode <= MR122 ) {
+ frame_type = RX_SPEECH_BAD;
+ }
+ else if ( frame_type != RX_NO_DATA ) {
+ frame_type = RX_SID_BAD;
+ mode = s->prev_mode;
+ }
+ } else {
+ if ( frame_type == RX_SID_FIRST || frame_type == RX_SID_UPDATE) {
+ mode = speech_mode;
+ }
+ else if ( frame_type == RX_NO_DATA ) {
+ mode = s->prev_mode;
+ }
+ /*
+ * if no mode information
+ * guess one from the previous frame
+ */
+ if ( frame_type == RX_SPEECH_BAD ) {
+ mode = s->prev_mode;
+ if ( s->prev_ft >= RX_SID_FIRST ) {
+ frame_type = RX_SID_BAD;
+ }
+ }
+ }
+#else
+ bfi = 0;
+ frame_type = bits[0];
+
+ switch ( frame_type ) {
+ case 0:
+ frame_type = RX_SPEECH_GOOD;
+ mode = bits[245];
+ Bits2Prm( mode, &bits[1], prm );
+ break;
+
+ case 1:
+ frame_type = RX_SID_FIRST;
+ mode = bits[245];
+ break;
+
+ case 2:
+ frame_type = RX_SID_UPDATE;
+ mode = bits[245];
+ Bits2Prm( MRDTX, &bits[1], prm );
+ break;
+
+ case 3:
+ frame_type = RX_NO_DATA;
+ mode = s->prev_mode;
+ break;
+ }
+#endif
+
+ /* test for homing frame */
+ if ( s->reset_flag_old == 1 ) {
+ switch ( mode ) {
+ case MR122:
+ homing = dhf_MR122;
+ homingSize = 18;
+ break;
+
+ case MR102:
+ homing = dhf_MR102;
+ homingSize = 12;
+ break;
+
+ case MR795:
+ homing = dhf_MR795;
+ homingSize = 8;
+ break;
+
+ case MR74:
+ homing = dhf_MR74;
+ homingSize = 7;
+ break;
+
+ case MR67:
+ homing = dhf_MR67;
+ homingSize = 7;
+ break;
+
+ case MR59:
+ homing = dhf_MR59;
+ homingSize = 7;
+ break;
+
+ case MR515:
+ homing = dhf_MR515;
+ homingSize = 7;
+ break;
+
+ case MR475:
+ homing = dhf_MR475;
+ homingSize = 7;
+ break;
+
+ default:
+ homing = NULL;
+ homingSize = 0;
+ break;
+ }
+
+ for ( i = 0; i < homingSize; i++ ) {
+ resetFlag = prm[i] ^ homing[i];
+
+ if ( resetFlag )
+ break;
+ }
+ }
+
+ if ( ( resetFlag == 0 ) && ( s->reset_flag_old != 0 ) ) {
+ for ( i = 0; i < 160; i++ ) {
+ synth[i] = EHF_MASK;
+ }
+ }
+ else
+ Speech_Decode_Frame( s->decoder_State, mode, prm, frame_type, synth );
+
+ if ( s->reset_flag_old == 0 ) {
+ /* check whole frame */
+ switch ( mode ) {
+ case MR122:
+ homing = dhf_MR122;
+ homingSize = PRMNO_MR122;
+ break;
+
+ case MR102:
+ homing = dhf_MR102;
+ homingSize = PRMNO_MR102;
+ break;
+
+ case MR795:
+ homing = dhf_MR795;
+ homingSize = PRMNO_MR795;
+ break;
+
+ case MR74:
+ homing = dhf_MR74;
+ homingSize = PRMNO_MR74;
+ break;
+
+ case MR67:
+ homing = dhf_MR67;
+ homingSize = PRMNO_MR67;
+ break;
+
+ case MR59:
+ homing = dhf_MR59;
+ homingSize = PRMNO_MR59;
+ break;
+
+ case MR515:
+ homing = dhf_MR515;
+ homingSize = PRMNO_MR515;
+ break;
+
+ case MR475:
+ homing = dhf_MR475;
+ homingSize = PRMNO_MR475;
+ break;
+
+ default:
+ homing = NULL;
+ homingSize = 0;
+ }
+
+ for ( i = 0; i < homingSize; i++ ) {
+ resetFlag = prm[i] ^ homing[i];
+
+ if ( resetFlag )
+ break;
+ }
+ }
+
+ /* reset decoder if current frame is a homing frame */
+ if ( resetFlag == 0 ) {
+ Speech_Decode_Frame_reset( s->decoder_State );
+ }
+ s->reset_flag_old = !resetFlag;
+ s->prev_ft = frame_type;
+ s->prev_mode = mode;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_dec.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_dec.h
new file mode 100644
index 000000000..3f4653379
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_dec.h
@@ -0,0 +1,55 @@
+/*
+ * ===================================================================
+ * TS 26.104
+ * REL-5 V5.4.0 2004-03
+ * REL-6 V6.1.0 2004-03
+ * 3GPP AMR Floating-point Speech Codec
+ * ===================================================================
+ *
+ */
+
+/*
+ * interf_dec.h
+ *
+ *
+ * Project:
+ * AMR Floating-Point Codec
+ *
+ * Contains:
+ * Defines interface to AMR decoder
+ *
+ */
+
+#ifndef _interf_dec_h_
+#define _interf_dec_h_
+
+/*
+ * Function prototypes
+ */
+/*
+ * Conversion from packed bitstream to endoded parameters
+ * Decoding parameters to speech
+ */
+void Decoder_Interface_Decode( void *st,
+
+#ifndef ETSI
+ unsigned char *bits,
+
+#else
+ short *bits,
+#endif
+
+ short *synth, int bfi );
+
+/*
+ * Reserve and init. memory
+ */
+void *Decoder_Interface_init( void );
+
+/*
+ * Exit and free memory
+ */
+void Decoder_Interface_exit( void *state );
+
+#endif
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_enc.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_enc.h
new file mode 100644
index 000000000..6b89f9a8a
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_enc.h
@@ -0,0 +1,58 @@
+/*
+ * ===================================================================
+ * TS 26.104
+ * REL-5 V5.4.0 2004-03
+ * REL-6 V6.1.0 2004-03
+ * 3GPP AMR Floating-point Speech Codec
+ * ===================================================================
+ *
+ */
+
+/*
+ * interf_enc.h
+ *
+ *
+ * Project:
+ * AMR Floating-Point Codec
+ *
+ * Contains:
+ * Defines interface to AMR encoder
+ *
+ */
+
+#ifndef _interf_enc_h_
+#define _interf_enc_h_
+
+/*
+ * include files
+ */
+#include"sp_enc.h"
+
+/*
+ * Function prototypes
+ */
+/*
+ * Encodes one frame of speech
+ * Returns packed octets
+ */
+int Encoder_Interface_Encode( void *st, enum Mode mode, short *speech,
+
+#ifndef ETSI
+ unsigned char *serial, /* max size 31 bytes */
+
+#else
+ short *serial, /* size 500 bytes */
+#endif
+
+ int forceSpeech ); /* use speech mode */
+
+/*
+ * Reserve and init. memory
+ */
+void *Encoder_Interface_init( int dtx );
+
+/*
+ * Exit and free memory
+ */
+void Encoder_Interface_exit( void *state );
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_rom.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_rom.h
new file mode 100644
index 000000000..349f9d4d0
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/interf_rom.h
@@ -0,0 +1,1802 @@
+/*
+ * ===================================================================
+ * TS 26.104
+ * REL-5 V5.4.0 2004-03
+ * REL-6 V6.1.0 2004-03
+ * 3GPP AMR Floating-point Speech Codec
+ * ===================================================================
+ *
+ */
+
+/*
+ * interf_rom.h
+ *
+ *
+ * Project:
+ * AMR Floating-Point Codec
+ *
+ * Contains:
+ * Tables: Subjective importance
+ * Homing frames
+ *
+ *
+ */
+
+#ifndef _interf_rom_h_
+#define _interf_rom_h_
+
+/*
+ * include files
+ */
+#include"typedef.h"
+
+/*
+ * definition of constants
+ */
+
+/* number of parameters */
+#define PRMNO_MR475 17
+#define PRMNO_MR515 19
+#define PRMNO_MR59 19
+#define PRMNO_MR67 19
+#define PRMNO_MR74 19
+#define PRMNO_MR795 23
+#define PRMNO_MR102 39
+#define PRMNO_MR122 57
+#define PRMNO_MRDTX 5
+
+/*
+ * tables
+ */
+#ifndef IF2
+#ifndef ETSI
+static const UWord8 block_size[16]={ 13, 14, 16, 18, 20, 21, 27, 32,
+ 6 , 0 , 0 , 0 , 0 , 0 , 0 , 1 };
+
+static const UWord8 toc_byte[16]={0x04, 0x0C, 0x14, 0x1C, 0x24, 0x2C, 0x34, 0x3C,
+ 0x44, 0x4C, 0x54, 0x5C, 0x64, 0x6C, 0x74, 0x7C};
+#endif
+#else
+/* One encoded frame (bytes) */
+static const UWord8 block_size[16]={ 13, 14, 16, 18, 19, 21, 26, 31,
+ 5 , 0 , 0 , 0 , 0 , 0 , 0 , 1 };
+#endif
+
+/* Subjective importance of the speech encoded bits */
+static Word16 order_MR475[] =
+{
+ 0, 0x80,
+ 0, 0x40,
+ 0, 0x20,
+ 0, 0x10,
+ 0, 0x8,
+ 0, 0x4,
+ 0, 0x2,
+ 0, 0x1,
+ 1, 0x80,
+ 1, 0x40,
+ 1, 0x20,
+ 1, 0x10,
+ 1, 0x8,
+ 1, 0x4,
+ 1, 0x2,
+ 1, 0x1,
+ 3, 0x80,
+ 3, 0x40,
+ 3, 0x20,
+ 3, 0x10,
+ 3, 0x8,
+ 3, 0x4,
+ 7, 0x8,
+ 7, 0x4,
+ 10, 0x8,
+ 10, 0x4,
+ 14, 0x8,
+ 14, 0x4,
+ 6, 0x1,
+ 6, 0x2,
+ 6, 0x4,
+ 6, 0x8,
+ 13, 0x1,
+ 13, 0x2,
+ 13, 0x4,
+ 13, 0x8,
+ 2, 0x20,
+ 2, 0x10,
+ 2, 0x4,
+ 2, 0x1,
+ 13, 0x10,
+ 13, 0x20,
+ 13, 0x40,
+ 13, 0x80,
+ 3, 0x2,
+ 3, 0x1,
+ 6, 0x10,
+ 6, 0x20,
+ 6, 0x40,
+ 6, 0x80,
+ 5, 0x2,
+ 5, 0x1,
+ 2, 0x40,
+ 2, 0x8,
+ 2, 0x2,
+ 7, 0x2,
+ 7, 0x1,
+ 9, 0x2,
+ 9, 0x1,
+ 10, 0x2,
+ 10, 0x1,
+ 12, 0x2,
+ 12, 0x1,
+ 14, 0x2,
+ 14, 0x1,
+ 16, 0x2,
+ 16, 0x1,
+ 4, 0x20,
+ 4, 0x10,
+ 4, 0x4,
+ 4, 0x2,
+ 8, 0x20,
+ 8, 0x10,
+ 8, 0x4,
+ 8, 0x2,
+ 11, 0x20,
+ 11, 0x10,
+ 11, 0x4,
+ 11, 0x2,
+ 15, 0x20,
+ 15, 0x10,
+ 15, 0x4,
+ 15, 0x2,
+ 4, 0x8,
+ 8, 0x8,
+ 11, 0x8,
+ 15, 0x8,
+ 4, 0x1,
+ 8, 0x1,
+ 11, 0x1,
+ 15, 0x1,
+ 4, 0x40,
+ 8, 0x40,
+ 11, 0x40,
+ 15, 0x40
+};
+static Word16 order_MR515[] =
+{
+ 0, 0x1,
+ 0, 0x2,
+ 0, 0x4,
+ 0, 0x8,
+ 0, 0x10,
+ 0, 0x20,
+ 0, 0x40,
+ 0, 0x80,
+ 1, 0x1,
+ 1, 0x2,
+ 1, 0x4,
+ 1, 0x8,
+ 1, 0x10,
+ 1, 0x20,
+ 1, 0x40,
+ 1, 0x80,
+ 3, 0x80,
+ 3, 0x40,
+ 3, 0x20,
+ 3, 0x10,
+ 3, 0x8,
+ 7, 0x8,
+ 11, 0x8,
+ 15, 0x8,
+ 6, 0x1,
+ 6, 0x2,
+ 6, 0x4,
+ 10, 0x1,
+ 10, 0x2,
+ 10, 0x4,
+ 14, 0x1,
+ 14, 0x2,
+ 14, 0x4,
+ 18, 0x1,
+ 18, 0x2,
+ 18, 0x4,
+ 6, 0x8,
+ 10, 0x8,
+ 14, 0x8,
+ 18, 0x8,
+ 3, 0x4,
+ 7, 0x4,
+ 11, 0x4,
+ 15, 0x4,
+ 2, 0x10,
+ 6, 0x10,
+ 10, 0x10,
+ 14, 0x10,
+ 18, 0x10,
+ 3, 0x2,
+ 7, 0x2,
+ 11, 0x2,
+ 2, 0x20,
+ 2, 0x4,
+ 2, 0x1,
+ 6, 0x20,
+ 10, 0x20,
+ 14, 0x20,
+ 18, 0x20,
+ 2, 0x2,
+ 3, 0x1,
+ 7, 0x1,
+ 11, 0x1,
+ 15, 0x2,
+ 2, 0x8,
+ 2, 0x40,
+ 15, 0x1,
+ 5, 0x1,
+ 5, 0x2,
+ 9, 0x1,
+ 9, 0x2,
+ 13, 0x1,
+ 4, 0x4,
+ 8, 0x4,
+ 12, 0x4,
+ 16, 0x4,
+ 13, 0x2,
+ 17, 0x1,
+ 17, 0x2,
+ 4, 0x2,
+ 8, 0x2,
+ 12, 0x2,
+ 16, 0x2,
+ 4, 0x20,
+ 8, 0x20,
+ 4, 0x10,
+ 8, 0x10,
+ 12, 0x20,
+ 12, 0x10,
+ 16, 0x20,
+ 16, 0x10,
+ 4, 0x40,
+ 8, 0x40,
+ 12, 0x40,
+ 16, 0x40,
+ 4, 0x1,
+ 8, 0x1,
+ 12, 0x1,
+ 16, 0x1,
+ 4, 0x8,
+ 8, 0x8,
+ 12, 0x8,
+ 16, 0x8
+};
+static Word16 order_MR59[] =
+{
+ 0, 0x80,
+ 0, 0x40,
+ 0, 0x8,
+ 0, 0x4,
+ 0, 0x10,
+ 0, 0x2,
+ 0, 0x1,
+ 0, 0x20,
+ 1, 0x8,
+ 1, 0x2,
+ 1, 0x100,
+ 1, 0x80,
+ 1, 0x20,
+ 1, 0x10,
+ 1, 0x4,
+ 1, 0x40,
+ 1, 0x1,
+ 3, 0x20,
+ 11, 0x20,
+ 3, 0x10,
+ 11, 0x10,
+ 3, 0x40,
+ 11, 0x40,
+ 3, 0x80,
+ 11, 0x80,
+ 3, 0x8,
+ 11, 0x8,
+ 7, 0x8,
+ 15, 0x8,
+ 6, 0x1,
+ 10, 0x1,
+ 14, 0x1,
+ 18, 0x1,
+ 3, 0x4,
+ 11, 0x4,
+ 7, 0x4,
+ 15, 0x4,
+ 6, 0x2,
+ 10, 0x2,
+ 14, 0x2,
+ 18, 0x2,
+ 7, 0x2,
+ 15, 0x2,
+ 3, 0x2,
+ 11, 0x2,
+ 3, 0x1,
+ 11, 0x1,
+ 6, 0x4,
+ 10, 0x4,
+ 14, 0x4,
+ 18, 0x4,
+ 6, 0x8,
+ 10, 0x8,
+ 14, 0x8,
+ 18, 0x8,
+ 6, 0x10,
+ 10, 0x10,
+ 14, 0x10,
+ 18, 0x10,
+ 2, 0x40,
+ 2, 0x10,
+ 2, 0x4,
+ 2, 0x8,
+ 2, 0x80,
+ 2, 0x100,
+ 2, 0x20,
+ 2, 0x2,
+ 17, 0x1,
+ 5, 0x2,
+ 13, 0x2,
+ 17, 0x2,
+ 9, 0x2,
+ 9, 0x1,
+ 5, 0x1,
+ 13, 0x1,
+ 2, 0x1,
+ 6, 0x20,
+ 10, 0x20,
+ 14, 0x20,
+ 18, 0x20,
+ 7, 0x1,
+ 15, 0x1,
+ 4, 0x4,
+ 8, 0x4,
+ 12, 0x4,
+ 16, 0x4,
+ 4, 0x8,
+ 8, 0x8,
+ 12, 0x8,
+ 16, 0x8,
+ 4, 0x40,
+ 8, 0x40,
+ 12, 0x40,
+ 16, 0x40,
+ 4, 0x80,
+ 8, 0x80,
+ 12, 0x80,
+ 16, 0x80,
+ 4, 0x100,
+ 8, 0x100,
+ 12, 0x100,
+ 16, 0x100,
+ 4, 0x1,
+ 8, 0x1,
+ 12, 0x1,
+ 16, 0x1,
+ 4, 0x2,
+ 8, 0x2,
+ 12, 0x2,
+ 16, 0x2,
+ 4, 0x10,
+ 8, 0x10,
+ 12, 0x10,
+ 16, 0x10,
+ 4, 0x20,
+ 8, 0x20,
+ 12, 0x20,
+ 16, 0x20
+};
+static Word16 order_MR67[] =
+{
+ 0, 0x80,
+ 0, 0x40,
+ 0, 0x8,
+ 0, 0x10,
+ 0, 0x4,
+ 0, 0x2,
+ 1, 0x8,
+ 0, 0x1,
+ 0, 0x20,
+ 1, 0x100,
+ 1, 0x80,
+ 1, 0x20,
+ 1, 0x2,
+ 1, 0x10,
+ 1, 0x4,
+ 1, 0x40,
+ 3, 0x20,
+ 11, 0x20,
+ 3, 0x10,
+ 11, 0x10,
+ 3, 0x40,
+ 11, 0x40,
+ 3, 0x80,
+ 11, 0x80,
+ 3, 0x8,
+ 11, 0x8,
+ 1, 0x1,
+ 7, 0x8,
+ 15, 0x8,
+ 7, 0x4,
+ 15, 0x4,
+ 3, 0x4,
+ 11, 0x4,
+ 7, 0x2,
+ 15, 0x2,
+ 6, 0x40,
+ 10, 0x40,
+ 14, 0x40,
+ 18, 0x40,
+ 3, 0x2,
+ 11, 0x2,
+ 6, 0x8,
+ 10, 0x8,
+ 14, 0x8,
+ 18, 0x8,
+ 6, 0x4,
+ 10, 0x4,
+ 14, 0x4,
+ 18, 0x4,
+ 7, 0x1,
+ 15, 0x1,
+ 3, 0x1,
+ 11, 0x1,
+ 2, 0x40,
+ 2, 0x4,
+ 6, 0x2,
+ 10, 0x2,
+ 14, 0x2,
+ 18, 0x2,
+ 2, 0x10,
+ 2, 0x8,
+ 2, 0x80,
+ 2, 0x100,
+ 2, 0x20,
+ 2, 0x2,
+ 2, 0x1,
+ 6, 0x10,
+ 10, 0x10,
+ 14, 0x10,
+ 18, 0x10,
+ 5, 0x1,
+ 9, 0x1,
+ 13, 0x1,
+ 17, 0x1,
+ 6, 0x1,
+ 10, 0x1,
+ 14, 0x1,
+ 18, 0x1,
+ 5, 0x2,
+ 9, 0x2,
+ 13, 0x2,
+ 17, 0x2,
+ 18, 0x20,
+ 14, 0x20,
+ 10, 0x20,
+ 6, 0x20,
+ 5, 0x4,
+ 9, 0x4,
+ 13, 0x4,
+ 17, 0x4,
+ 4, 0x4,
+ 8, 0x4,
+ 12, 0x4,
+ 16, 0x4,
+ 4, 0x20,
+ 8, 0x20,
+ 12, 0x20,
+ 16, 0x20,
+ 4, 0x40,
+ 8, 0x40,
+ 12, 0x40,
+ 16, 0x40,
+ 4, 0x200,
+ 8, 0x200,
+ 12, 0x200,
+ 16, 0x200,
+ 4, 0x400,
+ 8, 0x400,
+ 12, 0x400,
+ 16, 0x400,
+ 4, 0x1,
+ 8, 0x1,
+ 12, 0x1,
+ 16, 0x1,
+ 4, 0x2,
+ 8, 0x2,
+ 12, 0x2,
+ 16, 0x2,
+ 4, 0x8,
+ 8, 0x8,
+ 12, 0x8,
+ 16, 0x8,
+ 4, 0x10,
+ 8, 0x10,
+ 12, 0x10,
+ 16, 0x10,
+ 4, 0x80,
+ 8, 0x80,
+ 12, 0x80,
+ 16, 0x80,
+ 4, 0x100,
+ 8, 0x100,
+ 12, 0x100,
+ 16, 0x100
+};
+static Word16 order_MR74[] =
+{
+ 0, 0x80,
+ 0, 0x40,
+ 0, 0x20,
+ 0, 0x10,
+ 0, 0x8,
+ 0, 0x4,
+ 0, 0x2,
+ 0, 0x1,
+ 1, 0x100,
+ 1, 0x80,
+ 1, 0x40,
+ 1, 0x20,
+ 1, 0x10,
+ 1, 0x8,
+ 1, 0x4,
+ 1, 0x2,
+ 1, 0x1,
+ 3, 0x80,
+ 11, 0x80,
+ 3, 0x40,
+ 11, 0x40,
+ 3, 0x20,
+ 11, 0x20,
+ 3, 0x10,
+ 11, 0x10,
+ 3, 0x8,
+ 11, 0x8,
+ 6, 0x40,
+ 10, 0x40,
+ 14, 0x40,
+ 18, 0x40,
+ 6, 0x20,
+ 10, 0x20,
+ 14, 0x20,
+ 18, 0x20,
+ 6, 0x8,
+ 10, 0x8,
+ 14, 0x8,
+ 18, 0x8,
+ 6, 0x4,
+ 10, 0x4,
+ 14, 0x4,
+ 18, 0x4,
+ 7, 0x10,
+ 15, 0x10,
+ 7, 0x8,
+ 15, 0x8,
+ 2, 0x10,
+ 2, 0x8,
+ 2, 0x4,
+ 2, 0x100,
+ 2, 0x80,
+ 2, 0x40,
+ 3, 0x4,
+ 7, 0x4,
+ 11, 0x4,
+ 15, 0x4,
+ 6, 0x2,
+ 10, 0x2,
+ 14, 0x2,
+ 18, 0x2,
+ 2, 0x20,
+ 2, 0x2,
+ 2, 0x1,
+ 5, 0x1,
+ 9, 0x1,
+ 13, 0x1,
+ 17, 0x1,
+ 6, 0x1,
+ 10, 0x1,
+ 14, 0x1,
+ 18, 0x1,
+ 5, 0x2,
+ 9, 0x2,
+ 13, 0x2,
+ 17, 0x2,
+ 5, 0x4,
+ 9, 0x4,
+ 6, 0x10,
+ 10, 0x10,
+ 14, 0x10,
+ 18, 0x10,
+ 13, 0x4,
+ 17, 0x4,
+ 5, 0x8,
+ 9, 0x8,
+ 13, 0x8,
+ 17, 0x8,
+ 3, 0x2,
+ 3, 0x1,
+ 7, 0x2,
+ 7, 0x1,
+ 11, 0x2,
+ 11, 0x1,
+ 15, 0x2,
+ 15, 0x1,
+ 4, 0x20,
+ 4, 0x10,
+ 4, 0x8,
+ 4, 0x4,
+ 4, 0x2,
+ 4, 0x1,
+ 8, 0x20,
+ 8, 0x10,
+ 8, 0x8,
+ 8, 0x4,
+ 8, 0x2,
+ 8, 0x1,
+ 12, 0x20,
+ 12, 0x10,
+ 12, 0x8,
+ 12, 0x4,
+ 12, 0x2,
+ 12, 0x1,
+ 16, 0x20,
+ 16, 0x10,
+ 16, 0x8,
+ 16, 0x4,
+ 16, 0x2,
+ 16, 0x1,
+ 4, 0x1000,
+ 8, 0x1000,
+ 12, 0x1000,
+ 16, 0x1000,
+ 4, 0x800,
+ 8, 0x800,
+ 12, 0x800,
+ 16, 0x800,
+ 4, 0x400,
+ 8, 0x400,
+ 12, 0x400,
+ 16, 0x400,
+ 4, 0x200,
+ 8, 0x200,
+ 12, 0x200,
+ 16, 0x200,
+ 4, 0x100,
+ 8, 0x100,
+ 12, 0x100,
+ 16, 0x100,
+ 4, 0x80,
+ 8, 0x80,
+ 12, 0x80,
+ 16, 0x80,
+ 4, 0x40,
+ 8, 0x40,
+ 12, 0x40,
+ 16, 0x40
+};
+static Word16 order_MR795[] =
+{
+ 0, 0x1,
+ 0, 0x2,
+ 0, 0x4,
+ 0, 0x8,
+ 0, 0x10,
+ 0, 0x20,
+ 0, 0x40,
+ 1, 0x8,
+ 1, 0x2,
+ 1, 0x100,
+ 1, 0x80,
+ 1, 0x20,
+ 1, 0x10,
+ 1, 0x4,
+ 1, 0x40,
+ 1, 0x1,
+ 2, 0x40,
+ 2, 0x10,
+ 2, 0x4,
+ 2, 0x8,
+ 2, 0x80,
+ 2, 0x100,
+ 2, 0x20,
+ 7, 0x10,
+ 12, 0x10,
+ 17, 0x10,
+ 22, 0x10,
+ 7, 0x8,
+ 12, 0x8,
+ 17, 0x8,
+ 22, 0x8,
+ 7, 0x4,
+ 12, 0x4,
+ 17, 0x4,
+ 22, 0x4,
+ 6, 0x8,
+ 11, 0x8,
+ 16, 0x8,
+ 21, 0x8,
+ 6, 0x4,
+ 11, 0x4,
+ 16, 0x4,
+ 21, 0x4,
+ 3, 0x80,
+ 13, 0x80,
+ 3, 0x40,
+ 13, 0x40,
+ 3, 0x20,
+ 13, 0x20,
+ 3, 0x10,
+ 13, 0x10,
+ 3, 0x8,
+ 13, 0x8,
+ 8, 0x20,
+ 18, 0x20,
+ 8, 0x10,
+ 18, 0x10,
+ 8, 0x8,
+ 18, 0x8,
+ 7, 0x2,
+ 12, 0x2,
+ 17, 0x2,
+ 22, 0x2,
+ 3, 0x4,
+ 13, 0x4,
+ 8, 0x4,
+ 18, 0x4,
+ 0, 0x80,
+ 0, 0x100,
+ 2, 0x2,
+ 2, 0x1,
+ 3, 0x2,
+ 13, 0x2,
+ 3, 0x1,
+ 13, 0x1,
+ 8, 0x2,
+ 18, 0x2,
+ 8, 0x1,
+ 18, 0x1,
+ 6, 0x2,
+ 11, 0x2,
+ 16, 0x2,
+ 21, 0x2,
+ 7, 0x1,
+ 12, 0x1,
+ 17, 0x1,
+ 22, 0x1,
+ 6, 0x1,
+ 11, 0x1,
+ 16, 0x1,
+ 21, 0x1,
+ 15, 0x1,
+ 15, 0x2,
+ 15, 0x4,
+ 4, 0x2,
+ 9, 0x2,
+ 14, 0x2,
+ 19, 0x2,
+ 4, 0x10,
+ 9, 0x10,
+ 14, 0x10,
+ 19, 0x10,
+ 4, 0x80,
+ 9, 0x80,
+ 14, 0x80,
+ 19, 0x80,
+ 4, 0x800,
+ 9, 0x800,
+ 14, 0x800,
+ 19, 0x800,
+ 15, 0x8,
+ 20, 0x1,
+ 20, 0x2,
+ 20, 0x4,
+ 20, 0x8,
+ 10, 0x1,
+ 10, 0x2,
+ 10, 0x4,
+ 10, 0x8,
+ 5, 0x1,
+ 5, 0x2,
+ 5, 0x4,
+ 5, 0x8,
+ 4, 0x1,
+ 4, 0x4,
+ 4, 0x8,
+ 4, 0x20,
+ 4, 0x100,
+ 4, 0x1000,
+ 9, 0x1,
+ 9, 0x4,
+ 9, 0x8,
+ 9, 0x20,
+ 9, 0x100,
+ 9, 0x1000,
+ 14, 0x1,
+ 14, 0x4,
+ 14, 0x8,
+ 14, 0x20,
+ 14, 0x100,
+ 14, 0x1000,
+ 19, 0x1,
+ 19, 0x4,
+ 19, 0x8,
+ 19, 0x20,
+ 19, 0x100,
+ 19, 0x1000,
+ 4, 0x40,
+ 9, 0x40,
+ 14, 0x40,
+ 19, 0x40,
+ 4, 0x400,
+ 9, 0x400,
+ 14, 0x400,
+ 19, 0x400,
+ 4, 0x200,
+ 9, 0x200,
+ 14, 0x200,
+ 19, 0x200,
+ 0, 0x1,
+ 0, 0x2,
+ 0, 0x4,
+ 0, 0x8,
+ 0, 0x10,
+ 0, 0x20,
+ 0, 0x40,
+ 1, 0x8,
+ 1, 0x2,
+ 1, 0x100,
+ 1, 0x80,
+ 1, 0x20,
+ 1, 0x10,
+ 1, 0x4,
+ 1, 0x40,
+ 1, 0x1,
+ 2, 0x40,
+ 2, 0x10,
+ 2, 0x4,
+ 2, 0x8,
+ 2, 0x80,
+ 2, 0x100,
+ 2, 0x20,
+ 7, 0x10,
+ 12, 0x10,
+ 17, 0x10,
+ 22, 0x10,
+ 7, 0x8,
+ 12, 0x8,
+ 17, 0x8,
+ 22, 0x8,
+ 7, 0x4,
+ 12, 0x4,
+ 17, 0x4,
+ 22, 0x4,
+ 6, 0x8,
+ 11, 0x8,
+ 16, 0x8,
+ 21, 0x8,
+ 6, 0x4,
+ 11, 0x4,
+ 16, 0x4,
+ 21, 0x4,
+ 3, 0x80,
+ 13, 0x80,
+ 3, 0x40,
+ 13, 0x40,
+ 3, 0x20,
+ 13, 0x20,
+ 3, 0x10,
+ 13, 0x10,
+ 3, 0x8,
+ 13, 0x8,
+ 8, 0x20,
+ 18, 0x20,
+ 8, 0x10,
+ 18, 0x10,
+ 8, 0x8,
+ 18, 0x8,
+ 7, 0x2,
+ 12, 0x2,
+ 17, 0x2,
+ 22, 0x2,
+ 3, 0x4,
+ 13, 0x4,
+ 8, 0x4,
+ 18, 0x4,
+ 0, 0x80,
+ 0, 0x100,
+ 2, 0x2,
+ 2, 0x1,
+ 3, 0x2,
+ 13, 0x2,
+ 3, 0x1,
+ 13, 0x1,
+ 8, 0x2,
+ 18, 0x2,
+ 8, 0x1,
+ 18, 0x1,
+ 6, 0x2,
+ 11, 0x2,
+ 16, 0x2,
+ 21, 0x2,
+ 7, 0x1,
+ 12, 0x1,
+ 17, 0x1,
+ 22, 0x1,
+ 6, 0x1,
+ 11, 0x1,
+ 16, 0x1,
+ 21, 0x1,
+ 15, 0x1,
+ 15, 0x2,
+ 15, 0x4,
+ 4, 0x2,
+ 9, 0x2,
+ 14, 0x2,
+ 19, 0x2,
+ 4, 0x10,
+ 9, 0x10,
+ 14, 0x10,
+ 19, 0x10,
+ 4, 0x80,
+ 9, 0x80,
+ 14, 0x80,
+ 19, 0x80,
+ 4, 0x800,
+ 9, 0x800,
+ 14, 0x800,
+ 19, 0x800,
+ 15, 0x8,
+ 20, 0x1,
+ 20, 0x2,
+ 20, 0x4,
+ 20, 0x8,
+ 10, 0x1,
+ 10, 0x2,
+ 10, 0x4,
+ 10, 0x8,
+ 5, 0x1,
+ 5, 0x2,
+ 5, 0x4,
+ 5, 0x8,
+ 4, 0x1,
+ 4, 0x4,
+ 4, 0x8,
+ 4, 0x20,
+ 4, 0x100,
+ 4, 0x1000,
+ 9, 0x1,
+ 9, 0x4,
+ 9, 0x8,
+ 9, 0x20,
+ 9, 0x100,
+ 9, 0x1000,
+ 14, 0x1,
+ 14, 0x4,
+ 14, 0x8,
+ 14, 0x20,
+ 14, 0x100,
+ 14, 0x1000,
+ 19, 0x1,
+ 19, 0x4,
+ 19, 0x8,
+ 19, 0x20,
+ 19, 0x100,
+ 19, 0x1000,
+ 4, 0x40,
+ 9, 0x40,
+ 14, 0x40,
+ 19, 0x40,
+ 4, 0x400,
+ 9, 0x400,
+ 14, 0x400,
+ 19, 0x400,
+ 4, 0x200,
+ 9, 0x200,
+ 14, 0x200,
+ 19, 0x200
+};
+static Word16 order_MR102[] =
+{
+ 0, 0x1,
+ 0, 0x2,
+ 0, 0x4,
+ 0, 0x8,
+ 0, 0x10,
+ 0, 0x20,
+ 0, 0x40,
+ 0, 0x80,
+ 1, 0x1,
+ 1, 0x2,
+ 1, 0x4,
+ 1, 0x8,
+ 1, 0x10,
+ 1, 0x20,
+ 1, 0x40,
+ 1, 0x80,
+ 1, 0x100,
+ 3, 0x80,
+ 3, 0x40,
+ 3, 0x20,
+ 3, 0x10,
+ 3, 0x8,
+ 3, 0x4,
+ 21, 0x80,
+ 21, 0x40,
+ 21, 0x20,
+ 21, 0x10,
+ 21, 0x8,
+ 21, 0x4,
+ 12, 0x10,
+ 12, 0x8,
+ 30, 0x10,
+ 30, 0x8,
+ 11, 0x40,
+ 11, 0x8,
+ 11, 0x4,
+ 20, 0x40,
+ 20, 0x8,
+ 20, 0x4,
+ 29, 0x40,
+ 29, 0x8,
+ 29, 0x4,
+ 38, 0x40,
+ 38, 0x8,
+ 38, 0x4,
+ 3, 0x2,
+ 3, 0x1,
+ 21, 0x2,
+ 21, 0x1,
+ 12, 0x4,
+ 12, 0x2,
+ 30, 0x4,
+ 30, 0x2,
+ 11, 0x20,
+ 20, 0x20,
+ 29, 0x20,
+ 38, 0x20,
+ 2, 0x40,
+ 2, 0x4,
+ 2, 0x10,
+ 2, 0x8,
+ 2, 0x80,
+ 2, 0x100,
+ 2, 0x20,
+ 2, 0x2,
+ 2, 0x1,
+ 7, 0x1,
+ 6, 0x1,
+ 5, 0x1,
+ 4, 0x1,
+ 16, 0x1,
+ 15, 0x1,
+ 14, 0x1,
+ 13, 0x1,
+ 25, 0x1,
+ 24, 0x1,
+ 23, 0x1,
+ 22, 0x1,
+ 34, 0x1,
+ 33, 0x1,
+ 32, 0x1,
+ 31, 0x1,
+ 11, 0x2,
+ 11, 0x10,
+ 11, 0x1,
+ 20, 0x2,
+ 20, 0x10,
+ 20, 0x1,
+ 29, 0x2,
+ 29, 0x10,
+ 29, 0x1,
+ 38, 0x2,
+ 38, 0x10,
+ 38, 0x1,
+ 12, 0x1,
+ 30, 0x1,
+ 17, 0x200,
+ 17, 0x100,
+ 18, 0x100,
+ 18, 0x200,
+ 18, 0x80,
+ 17, 0x80,
+ 18, 0x20,
+ 17, 0x20,
+ 17, 0x40,
+ 18, 0x40,
+ 19, 0x40,
+ 19, 0x20,
+ 18, 0x10,
+ 19, 0x8,
+ 17, 0x10,
+ 19, 0x10,
+ 17, 0x8,
+ 18, 0x8,
+ 26, 0x200,
+ 26, 0x100,
+ 27, 0x100,
+ 27, 0x200,
+ 27, 0x80,
+ 26, 0x80,
+ 27, 0x20,
+ 26, 0x20,
+ 26, 0x40,
+ 27, 0x40,
+ 28, 0x40,
+ 28, 0x20,
+ 27, 0x10,
+ 28, 0x8,
+ 26, 0x10,
+ 28, 0x10,
+ 26, 0x8,
+ 27, 0x8,
+ 35, 0x200,
+ 35, 0x100,
+ 36, 0x100,
+ 36, 0x200,
+ 36, 0x80,
+ 35, 0x80,
+ 36, 0x20,
+ 35, 0x20,
+ 35, 0x40,
+ 36, 0x40,
+ 37, 0x40,
+ 37, 0x20,
+ 36, 0x10,
+ 37, 0x8,
+ 35, 0x10,
+ 37, 0x10,
+ 35, 0x8,
+ 36, 0x8,
+ 8, 0x200,
+ 8, 0x100,
+ 9, 0x100,
+ 9, 0x200,
+ 9, 0x80,
+ 8, 0x80,
+ 9, 0x20,
+ 8, 0x20,
+ 8, 0x40,
+ 9, 0x40,
+ 10, 0x40,
+ 10, 0x20,
+ 9, 0x10,
+ 10, 0x8,
+ 8, 0x10,
+ 10, 0x10,
+ 8, 0x8,
+ 9, 0x8,
+ 37, 0x4,
+ 35, 0x1,
+ 36, 0x1,
+ 37, 0x1,
+ 35, 0x4,
+ 37, 0x2,
+ 35, 0x2,
+ 36, 0x4,
+ 36, 0x2,
+ 28, 0x4,
+ 26, 0x1,
+ 27, 0x1,
+ 28, 0x1,
+ 26, 0x4,
+ 28, 0x2,
+ 26, 0x2,
+ 27, 0x4,
+ 27, 0x2,
+ 19, 0x4,
+ 17, 0x1,
+ 18, 0x1,
+ 19, 0x1,
+ 17, 0x4,
+ 19, 0x2,
+ 17, 0x2,
+ 18, 0x4,
+ 18, 0x2,
+ 10, 0x4,
+ 8, 0x1,
+ 9, 0x1,
+ 10, 0x1,
+ 8, 0x4,
+ 10, 0x2,
+ 8, 0x2,
+ 9, 0x4,
+ 9, 0x2
+};
+static Word16 order_MR122[] =
+{
+ 0, 0x40,
+ 0, 0x20,
+ 0, 0x10,
+ 0, 0x8,
+ 0, 0x4,
+ 0, 0x2,
+ 0, 0x1,
+ 1, 0x80,
+ 1, 0x40,
+ 1, 0x20,
+ 1, 0x10,
+ 1, 0x8,
+ 1, 0x4,
+ 1, 0x2,
+ 1, 0x1,
+ 2, 0x1,
+ 2, 0x100,
+ 2, 0x80,
+ 2, 0x40,
+ 2, 0x20,
+ 2, 0x10,
+ 2, 0x8,
+ 2, 0x4,
+ 2, 0x2,
+ 3, 0x80,
+ 3, 0x40,
+ 3, 0x20,
+ 3, 0x10,
+ 3, 0x8,
+ 5, 0x100,
+ 31, 0x100,
+ 5, 0x80,
+ 31, 0x80,
+ 5, 0x40,
+ 31, 0x40,
+ 5, 0x20,
+ 31, 0x20,
+ 5, 0x10,
+ 31, 0x10,
+ 5, 0x8,
+ 31, 0x8,
+ 5, 0x4,
+ 31, 0x4,
+ 5, 0x2,
+ 31, 0x2,
+ 5, 0x1,
+ 31, 0x1,
+ 6, 0x8,
+ 19, 0x8,
+ 32, 0x8,
+ 45, 0x8,
+ 6, 0x4,
+ 19, 0x4,
+ 32, 0x4,
+ 45, 0x4,
+ 6, 0x2,
+ 19, 0x2,
+ 32, 0x2,
+ 45, 0x2,
+ 17, 0x10,
+ 30, 0x10,
+ 43, 0x10,
+ 56, 0x10,
+ 17, 0x8,
+ 30, 0x8,
+ 43, 0x8,
+ 56, 0x8,
+ 17, 0x4,
+ 30, 0x4,
+ 43, 0x4,
+ 56, 0x4,
+ 18, 0x20,
+ 44, 0x20,
+ 18, 0x10,
+ 44, 0x10,
+ 18, 0x8,
+ 44, 0x8,
+ 18, 0x4,
+ 44, 0x4,
+ 18, 0x2,
+ 44, 0x2,
+ 3, 0x4,
+ 3, 0x2,
+ 3, 0x1,
+ 4, 0x20,
+ 4, 0x10,
+ 4, 0x8,
+ 4, 0x4,
+ 6, 0x1,
+ 19, 0x1,
+ 32, 0x1,
+ 45, 0x1,
+ 17, 0x2,
+ 30, 0x2,
+ 43, 0x2,
+ 56, 0x2,
+ 7, 0x8,
+ 20, 0x8,
+ 33, 0x8,
+ 46, 0x8,
+ 8, 0x8,
+ 21, 0x8,
+ 34, 0x8,
+ 47, 0x8,
+ 17, 0x1,
+ 30, 0x1,
+ 43, 0x1,
+ 56, 0x1,
+ 9, 0x8,
+ 22, 0x8,
+ 35, 0x8,
+ 48, 0x8,
+ 10, 0x8,
+ 23, 0x8,
+ 36, 0x8,
+ 49, 0x8,
+ 11, 0x8,
+ 24, 0x8,
+ 37, 0x8,
+ 50, 0x8,
+ 4, 0x2,
+ 4, 0x1,
+ 7, 0x1,
+ 7, 0x2,
+ 7, 0x4,
+ 8, 0x1,
+ 8, 0x2,
+ 8, 0x4,
+ 9, 0x1,
+ 9, 0x2,
+ 9, 0x4,
+ 10, 0x1,
+ 10, 0x2,
+ 10, 0x4,
+ 11, 0x1,
+ 11, 0x2,
+ 11, 0x4,
+ 20, 0x1,
+ 20, 0x2,
+ 20, 0x4,
+ 21, 0x1,
+ 21, 0x2,
+ 21, 0x4,
+ 22, 0x1,
+ 22, 0x2,
+ 22, 0x4,
+ 23, 0x1,
+ 23, 0x2,
+ 23, 0x4,
+ 24, 0x1,
+ 24, 0x2,
+ 24, 0x4,
+ 33, 0x1,
+ 33, 0x2,
+ 33, 0x4,
+ 34, 0x1,
+ 34, 0x2,
+ 34, 0x4,
+ 35, 0x1,
+ 35, 0x2,
+ 35, 0x4,
+ 36, 0x1,
+ 36, 0x2,
+ 36, 0x4,
+ 37, 0x1,
+ 37, 0x2,
+ 37, 0x4,
+ 46, 0x1,
+ 46, 0x2,
+ 46, 0x4,
+ 47, 0x1,
+ 47, 0x2,
+ 47, 0x4,
+ 48, 0x1,
+ 48, 0x2,
+ 48, 0x4,
+ 49, 0x1,
+ 49, 0x2,
+ 49, 0x4,
+ 50, 0x1,
+ 50, 0x2,
+ 50, 0x4,
+ 12, 0x1,
+ 12, 0x2,
+ 12, 0x4,
+ 13, 0x1,
+ 13, 0x2,
+ 13, 0x4,
+ 14, 0x1,
+ 14, 0x2,
+ 14, 0x4,
+ 15, 0x1,
+ 15, 0x2,
+ 15, 0x4,
+ 16, 0x1,
+ 16, 0x2,
+ 16, 0x4,
+ 25, 0x1,
+ 25, 0x2,
+ 25, 0x4,
+ 26, 0x1,
+ 26, 0x2,
+ 26, 0x4,
+ 27, 0x1,
+ 27, 0x2,
+ 27, 0x4,
+ 28, 0x1,
+ 28, 0x2,
+ 28, 0x4,
+ 29, 0x1,
+ 29, 0x2,
+ 29, 0x4,
+ 38, 0x1,
+ 38, 0x2,
+ 38, 0x4,
+ 39, 0x1,
+ 39, 0x2,
+ 39, 0x4,
+ 40, 0x1,
+ 40, 0x2,
+ 40, 0x4,
+ 41, 0x1,
+ 41, 0x2,
+ 41, 0x4,
+ 42, 0x1,
+ 42, 0x2,
+ 42, 0x4,
+ 51, 0x1,
+ 51, 0x2,
+ 51, 0x4,
+ 52, 0x1,
+ 52, 0x2,
+ 52, 0x4,
+ 53, 0x1,
+ 53, 0x2,
+ 53, 0x4,
+ 54, 0x1,
+ 54, 0x2,
+ 54, 0x4,
+ 55, 0x1,
+ 55, 0x2,
+ 55, 0x4,
+ 18, 0x1,
+ 44, 0x1
+};
+static Word16 order_MRDTX[] =
+{
+ 0, 0x4,
+ 0, 0x2,
+ 0, 0x1,
+ 1, 0x80,
+ 1, 0x40,
+ 1, 0x20,
+ 1, 0x10,
+ 1, 0x8,
+ 1, 0x4,
+ 1, 0x2,
+ 1, 0x1,
+ 2, 0x100,
+ 2, 0x80,
+ 2, 0x40,
+ 2, 0x20,
+ 2, 0x10,
+ 2, 0x8,
+ 2, 0x4,
+ 2, 0x2,
+ 2, 0x1,
+ 3, 0x100,
+ 3, 0x80,
+ 3, 0x40,
+ 3, 0x20,
+ 3, 0x10,
+ 3, 0x8,
+ 3, 0x4,
+ 3, 0x2,
+ 3, 0x1,
+ 4, 0x20,
+ 4, 0x10,
+ 4, 0x8,
+ 4, 0x4,
+ 4, 0x2,
+ 4, 0x1
+};
+
+/* Homing frames for the decoder */
+static const Word16 dhf_MR475[PRMNO_MR475] =
+{
+ 0x00F8,
+ 0x009D,
+ 0x001C,
+ 0x0066,
+ 0x0000,
+ 0x0003,
+ 0x0028,
+ 0x000F,
+ 0x0038,
+ 0x0001,
+ 0x000F,
+ 0x0031,
+ 0x0002,
+ 0x0008,
+ 0x000F,
+ 0x0026,
+ 0x0003
+};
+static const Word16 dhf_MR515[PRMNO_MR515] =
+{
+ 0x00F8,
+ 0x009D,
+ 0x001C,
+ 0x0066,
+ 0x0000,
+ 0x0003,
+ 0x0037,
+ 0x000F,
+ 0x0000,
+ 0x0003,
+ 0x0005,
+ 0x000F,
+ 0x0037,
+ 0x0003,
+ 0x0037,
+ 0x000F,
+ 0x0023,
+ 0x0003,
+ 0x001F
+};
+static const Word16 dhf_MR59[PRMNO_MR59] =
+{
+ 0x00F8,
+ 0x00E3,
+ 0x002F,
+ 0x00BD,
+ 0x0000,
+ 0x0003,
+ 0x0037,
+ 0x000F,
+ 0x0001,
+ 0x0003,
+ 0x000F,
+ 0x0060,
+ 0x00F9,
+ 0x0003,
+ 0x0037,
+ 0x000F,
+ 0x0000,
+ 0x0003,
+ 0x0037
+};
+static const Word16 dhf_MR67[PRMNO_MR67] =
+{
+ 0x00F8,
+ 0x00E3,
+ 0x002F,
+ 0x00BD,
+ 0x0002,
+ 0x0007,
+ 0x0000,
+ 0x000F,
+ 0x0098,
+ 0x0007,
+ 0x0061,
+ 0x0060,
+ 0x05C5,
+ 0x0007,
+ 0x0000,
+ 0x000F,
+ 0x0318,
+ 0x0007,
+ 0x0000
+};
+static const Word16 dhf_MR74[PRMNO_MR74] =
+{
+ 0x00F8,
+ 0x00E3,
+ 0x002F,
+ 0x00BD,
+ 0x0006,
+ 0x000F,
+ 0x0000,
+ 0x001B,
+ 0x0208,
+ 0x000F,
+ 0x0062,
+ 0x0060,
+ 0x1BA6,
+ 0x000F,
+ 0x0000,
+ 0x001B,
+ 0x0006,
+ 0x000F,
+ 0x0000
+};
+static const Word16 dhf_MR795[PRMNO_MR795] =
+{
+ 0x00C2,
+ 0x00E3,
+ 0x002F,
+ 0x00BD,
+ 0x0006,
+ 0x000F,
+ 0x000A,
+ 0x0000,
+ 0x0039,
+ 0x1C08,
+ 0x0007,
+ 0x000A,
+ 0x000B,
+ 0x0063,
+ 0x11A6,
+ 0x000F,
+ 0x0001,
+ 0x0000,
+ 0x0039,
+ 0x09A0,
+ 0x000F,
+ 0x0002,
+ 0x0001
+};
+static const Word16 dhf_MR102[PRMNO_MR102] =
+{
+ 0x00F8,
+ 0x00E3,
+ 0x002F,
+ 0x0045,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x001B,
+ 0x0000,
+ 0x0001,
+ 0x0000,
+ 0x0001,
+ 0x0326,
+ 0x00CE,
+ 0x007E,
+ 0x0051,
+ 0x0062,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x015A,
+ 0x0359,
+ 0x0076,
+ 0x0000,
+ 0x001B,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x017C,
+ 0x0215,
+ 0x0038,
+ 0x0030
+};
+static const Word16 dhf_MR122[PRMNO_MR122] =
+{
+ 0x0004,
+ 0x002A,
+ 0x00DB,
+ 0x0096,
+ 0x002A,
+ 0x0156,
+ 0x000B,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0036,
+ 0x000B,
+ 0x0000,
+ 0x000F,
+ 0x000E,
+ 0x000C,
+ 0x000D,
+ 0x0000,
+ 0x0001,
+ 0x0005,
+ 0x0007,
+ 0x0001,
+ 0x0008,
+ 0x0024,
+ 0x0000,
+ 0x0001,
+ 0x0000,
+ 0x0005,
+ 0x0006,
+ 0x0001,
+ 0x0002,
+ 0x0004,
+ 0x0007,
+ 0x0004,
+ 0x0002,
+ 0x0003,
+ 0x0036,
+ 0x000B,
+ 0x0000,
+ 0x0002,
+ 0x0004,
+ 0x0000,
+ 0x0003,
+ 0x0006,
+ 0x0001,
+ 0x0007,
+ 0x0006,
+ 0x0005,
+ 0x0000
+};
+
+
+/* parameter sizes (# of bits), one table per mode */
+static const Word16 bitno_MR475[PRMNO_MR475] =
+{
+ 8, 8, 7, /* LSP VQ */
+ 8, 7, 2, 8, /* first subframe */
+ 4, 7, 2, /* second subframe */
+ 4, 7, 2, 8, /* third subframe */
+ 4, 7, 2 /* fourth subframe */
+};
+static const Word16 bitno_MR515[PRMNO_MR515] =
+{
+ 8, 8, 7, /* LSP VQ */
+ 8, 7, 2, 6, /* first subframe */
+ 4, 7, 2, 6, /* second subframe */
+ 4, 7, 2, 6, /* third subframe */
+ 4, 7, 2, 6 /* fourth subframe */
+};
+static const Word16 bitno_MR59[PRMNO_MR59] =
+{
+ 8, 9, 9, /* LSP VQ */
+ 8, 9, 2, 6, /* first subframe */
+ 4, 9, 2, 6, /* second subframe */
+ 8, 9, 2, 6, /* third subframe */
+ 4, 9, 2, 6 /* fourth subframe */
+};
+static const Word16 bitno_MR67[PRMNO_MR67] =
+{
+ 8, 9, 9, /* LSP VQ */
+ 8, 11, 3, 7, /* first subframe */
+ 4, 11, 3, 7, /* second subframe */
+ 8, 11, 3, 7, /* third subframe */
+ 4, 11, 3, 7 /* fourth subframe */
+};
+static const Word16 bitno_MR74[PRMNO_MR74] =
+{
+ 8, 9, 9, /* LSP VQ */
+ 8, 13, 4, 7, /* first subframe */
+ 5, 13, 4, 7, /* second subframe */
+ 8, 13, 4, 7, /* third subframe */
+ 5, 13, 4, 7 /* fourth subframe */
+};
+static const Word16 bitno_MR795[PRMNO_MR795] =
+{
+ 9, 9, 9, /* LSP VQ */
+ 8, 13, 4, 4, 5, /* first subframe */
+ 6, 13, 4, 4, 5, /* second subframe */
+ 8, 13, 4, 4, 5, /* third subframe */
+ 6, 13, 4, 4, 5 /* fourth subframe */
+};
+static const Word16 bitno_MR102[PRMNO_MR102] =
+{
+ 8, 9, 9, /* LSP VQ */
+ 8, 1, 1, 1, 1, 10, 10, 7, 7, /* first subframe */
+ 5, 1, 1, 1, 1, 10, 10, 7, 7, /* second subframe */
+ 8, 1, 1, 1, 1, 10, 10, 7, 7, /* third subframe */
+ 5, 1, 1, 1, 1, 10, 10, 7, 7 /* fourth subframe */
+};
+static const Word16 bitno_MR122[PRMNO_MR122] =
+{
+ 7, 8, 9, 8, 6, /* LSP VQ */
+ 9, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 5, /* first subframe */
+ 6, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 5, /* second subframe */
+ 9, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 5, /* third subframe */
+ 6, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 5 /* fourth subframe */
+};
+static const Word16 bitno_MRDTX[PRMNO_MRDTX] =
+{
+ 3, 8, 9, 9, 6
+};
+
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/rom_dec.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/rom_dec.h
new file mode 100644
index 000000000..49e2155e3
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/rom_dec.h
@@ -0,0 +1,13311 @@
+/*
+ * ===================================================================
+ * TS 26.104
+ * R99 V3.5.0 2003-03
+ * REL-4 V4.4.0 2003-03
+ * REL-5 V5.1.0 2003-03
+ * 3GPP AMR Floating-point Speech Codec
+ * ===================================================================
+ *
+ */
+
+/*
+ * rom_dec.h
+ *
+ *
+ * Project:
+ * AMR Floating-Point Codec
+ *
+ * Contains:
+ * This file contains all the tables needed by AMR decoder functions.
+ *
+ */
+
+#ifndef _ROM_DEC_H_
+#define _ROM_DEC_H_
+
+/*
+ * include files
+ */
+#include"typedef.h"
+#include"interf_rom.h"
+
+/*
+ * definition of constants
+ */
+#define M 10 /* Order of LP filter */
+#define MP1 (M+1) /* Order of LP filter + 1 */
+#define L_WINDOW 240 /* Window size in LP analysis */
+#define L_NEXT 40 /* Overhead in LP analysis */
+#define LTPG_MEM_SIZE 5 /* number of stored past LTP coding gains + 1 */
+#define N_FRAME 7 /* old pitch gains in average calculation */
+#define DTX_HIST_SIZE 8 /* DTX history size */
+#define L_TOTAL 320 /* Total size of speech buffer. */
+#define L_FRAME 160 /* Frame size */
+#define L_FRAME_BY2 80 /* Frame size divided by 2 */
+#define L_SUBFR 40 /* Subframe size */
+#define L_CODE 40 /* codevector length */
+#define PIT_MAX 143 /* Maximum pitch lag */
+#define PIT_MIN 20 /* Minimum pitch lag */
+#define PIT_MIN_MR122 18 /* Minimum pitch lag (MR122 mode) */
+#define L_INTERPOL (10+1) /* Length of filter for interpolation */
+#define NPRED 4 /* number of prediction taps */
+#define SHARPMIN 0 /* Minimum value of pitch sharpening */
+#define MAX_PRM_SIZE 57 /* max. num. of params */
+#define L_INTER_SRCH 4 /* Length of filter for CL LTP search interpolation */
+#define GP_CLIP 0.95F /* Pitch gain clipping */
+#define UP_SAMP_MAX 6
+#define NB_TRACK 5 /* number of tracks */
+#define NB_TRACK_MR102 4 /* number of tracks mode mr102 */
+#define STEP 5 /* codebook step size */
+#define STEP_MR102 4 /* codebook step size mode mr102 */
+#define NC M/2 /* Order of LP filter divided by 2 */
+
+/* vad */
+#define COMPLEN 9 /* Number of sub-bands used by VAD */
+#define L_ENERGYHIST 60
+#define L_CBGAINHIST 7
+#define PHDGAINMEMSIZE 5
+#define MIN_ENERGY -14336 /* 14 Q10 */
+#define MIN_ENERGY_MR122 -2381 /* 14 / (20*log10(2)) Q10 */
+#define PN_INITIAL_SEED 0x70816958L /* Pseudo noise generator seed value */
+#define MIN_16 (Word16)-32768
+#define MAX_16 (Word16)0x7fff
+#define MAX_32 (Word32)0x7fffffffL
+#define EXPCONST 5243 /* 0.16 in Q15 */
+#define DTX_MAX_EMPTY_THRESH 50
+#define DTX_ELAPSED_FRAMES_THRESH (24 + 7 -1)
+#define LSF_GAP 205 /* Minimum distance between LSF after quantization; 50 Hz = 205 */
+#define LSP_PRED_FAC_MR122 21299 /* MR122 LSP prediction factor (0.65 Q15) */
+#define POS_CODE 8191
+#define NEG_CODE 8191
+#define NMAX 9 /* largest N used in median calculation */
+#define MEAN_ENER_MR122 783741L /* 36/(20*log10(2)) (Q17) */
+#define SHARPMAX 13017 /* Maximum value of pitch sharpening */
+#define FRAMEENERGYLIMIT 17578 /* 150 */
+#define LOWERNOISELIMIT 20 /* 5 */
+#define UPPERNOISELIMIT 1953 /* 50 */
+#define AZ_SIZE (4*M+4) /* Size of array of LP filters in 4 subfr.s */
+#define AGC_FAC 29491 /* Factor for automatic gain control 0.9 */
+#define PHDGAINMEMSIZE 5
+#define PHDTHR1LTP 9830 /* 0.6 in Q14 */
+#define PHDTHR2LTP 14746 /* 0.9 in Q14 */
+#define ONFACTPLUS1 16384 /* 2.0 in Q13 */
+#define ONLENGTH 2
+#define DTX_HANG_CONST 7 /* yields eight frames of SP HANGOVER */
+
+/* number of parameters */
+#define PRMNO_MR475 17
+#define PRMNO_MR515 19
+#define PRMNO_MR59 19
+#define PRMNO_MR67 19
+#define PRMNO_MR74 19
+#define PRMNO_MR795 23
+#define PRMNO_MR102 39
+#define PRMNO_MR122 57
+#define PRMNO_MRDTX 5
+
+/*
+ * tables
+ */
+
+/* level adjustment for different modes Q11 */
+static const Word16 dtx_log_en_adjust[9] =
+{
+ - 1023,
+ /* MR475 */ - 878,
+ /* MR515 */ - 732,
+ /* MR59 */ - 586,
+ /* MR67 */ - 440,
+ /* MR74 */ - 294,
+ /* MR795 */ - 148,
+ /* MR102 */ 0,
+ /* MR122 */ 0,
+ /* MRDTX */
+};
+
+/* attenuation factors for codebook gain */
+static const Word32 cdown[7] =
+ {
+ 32767,
+ 32112,
+ 32112,
+ 32112,
+ 32112,
+ 32112,
+ 22937
+ };
+
+/* attenuation factors for adaptive codebook gain */
+static const Word32 pdown[7] =
+ {
+ 32767,
+ 32112,
+ 32112,
+ 26214,
+ 9830,
+ 6553,
+ 6553
+ };
+
+/* algebraic code book gain MA predictor coefficients */
+static const Word32 pred[NPRED] =
+{
+ 5571,
+ 4751,
+ 2785,
+ 1556
+};
+
+/* algebraic code book gain MA predictor coefficients (MR122) */
+static const Word32 pred_MR122[NPRED] =
+{
+ 44,
+ 37,
+ 22,
+ 12
+};
+
+static const Word32 gamma4_gamma3_MR122[M] =
+{
+ 22938,
+ 16057,
+ 11240,
+ 7868,
+ 5508,
+ 3856,
+ 2699,
+ 1889,
+ 1322,
+ 925
+};
+static const Word32 gamma3[M] =
+{
+ 18022,
+ 9912,
+ 5451,
+ 2998,
+ 1649,
+ 907,
+ 499,
+ 274,
+ 151,
+ 83
+};
+static const Word32 gamma4_MR122[M] =
+{
+ 24576,
+ 18432,
+ 13824,
+ 10368,
+ 7776,
+ 5832,
+ 4374,
+ 3281,
+ 2461,
+ 1846
+};
+
+
+/* adaptive codebook gain quantization table (MR122, MR795) */
+#define NB_QUA_PITCH 16
+static const Word32 qua_gain_pitch[NB_QUA_PITCH] =
+{
+ 0,
+ 3277,
+ 6556,
+ 8192,
+ 9830,
+ 11469,
+ 12288,
+ 13107,
+ 13926,
+ 14746,
+ 15565,
+ 16384,
+ 17203,
+ 18022,
+ 18842,
+ 19661
+};
+
+/* fixed codebook gain quantization table (MR122, MR795) */
+#define NB_QUA_CODE 32
+static const Word32 qua_gain_code[NB_QUA_CODE * 3] =
+{
+/* gain factor (g_fac) and quantized energy error (qua_ener_MR122, qua_ener)
+ * are stored:
+ *
+ * qua_ener_MR122 = log2(g_fac) (not the rounded floating point value, but
+ * the value the original EFR algorithm
+ * calculates from g_fac [using Log2])
+ * qua_ener = 20*log10(g_fac); (rounded floating point value)
+ *
+ *
+ * g_fac (Q11),
+ * qua_ener_MR122 (Q10),
+ * qua_ener (Q10)
+ */ 159,
+ - 3776,
+ - 22731,
+ 206,
+ - 3394,
+ - 20428,
+ 268,
+ - 3005,
+ - 18088,
+ 349,
+ - 2615,
+ - 15739,
+ 419,
+ - 2345,
+ - 14113,
+ 482,
+ - 2138,
+ - 12867,
+ 554,
+ - 1932,
+ - 11629,
+ 637,
+ - 1726,
+ - 10387,
+ 733,
+ - 1518,
+ - 9139,
+ 842,
+ - 1314,
+ - 7906,
+ 969,
+ - 1106,
+ - 6656,
+ 1114,
+ - 900,
+ - 5416,
+ 1281,
+ - 694,
+ - 4173,
+ 1473,
+ - 487,
+ - 2931,
+ 1694,
+ - 281,
+ - 1688,
+ 1948,
+ - 75,
+ - 445,
+ 2241,
+ 133,
+ 801,
+ 2577,
+ 339,
+ 2044,
+ 2963,
+ 545,
+ 3285,
+ 3408,
+ 752,
+ 4530,
+ 3919,
+ 958,
+ 5772,
+ 4507,
+ 1165,
+ 7016,
+ 5183,
+ 1371,
+ 8259,
+ 5960,
+ 1577,
+ 9501,
+ 6855,
+ 1784,
+ 10745,
+ 7883,
+ 1991,
+ 11988,
+ 9065,
+ 2197,
+ 13231,
+ 10425,
+ 2404,
+ 14474,
+ 12510,
+ 2673,
+ 16096,
+ 16263,
+ 3060,
+ 18429,
+ 21142,
+ 3448,
+ 20763,
+ 27485,
+ 3836,
+ 23097
+};
+
+/* gray coding table */
+static const Word8 gray[8] =
+{
+ 0,
+ 1,
+ 3,
+ 2,
+ 6,
+ 4,
+ 5,
+ 7
+};
+
+/* gray decoding table */
+static const Word32 dgray[8] =
+{
+ 0,
+ 1,
+ 3,
+ 2,
+ 5,
+ 6,
+ 4,
+ 7
+};
+
+/* table[i] = sqrt((i+16)*2^-6) * 2^15, i.e. sqrt(x) scaled Q15 */
+static const Word32 sqrt_table[49] =
+{
+ 16384,
+ 16888,
+ 17378,
+ 17854,
+ 18318,
+ 18770,
+ 19212,
+ 19644,
+ 20066,
+ 20480,
+ 20886,
+ 21283,
+ 21674,
+ 22058,
+ 22435,
+ 22806,
+ 23170,
+ 23530,
+ 23884,
+ 24232,
+ 24576,
+ 24915,
+ 25249,
+ 25580,
+ 25905,
+ 26227,
+ 26545,
+ 26859,
+ 27170,
+ 27477,
+ 27780,
+ 28081,
+ 28378,
+ 28672,
+ 28963,
+ 29251,
+ 29537,
+ 29819,
+ 30099,
+ 30377,
+ 30652,
+ 30924,
+ 31194,
+ 31462,
+ 31727,
+ 31991,
+ 32252,
+ 32511,
+ 32767
+};
+
+static const Word32 inv_sqrt_table[49] =
+{
+ 32767,
+ 31790,
+ 30894,
+ 30070,
+ 29309,
+ 28602,
+ 27945,
+ 27330,
+ 26755,
+ 26214,
+ 25705,
+ 25225,
+ 24770,
+ 24339,
+ 23930,
+ 23541,
+ 23170,
+ 22817,
+ 22479,
+ 22155,
+ 21845,
+ 21548,
+ 21263,
+ 20988,
+ 20724,
+ 20470,
+ 20225,
+ 19988,
+ 19760,
+ 19539,
+ 19326,
+ 19119,
+ 18919,
+ 18725,
+ 18536,
+ 18354,
+ 18176,
+ 18004,
+ 17837,
+ 17674,
+ 17515,
+ 17361,
+ 17211,
+ 17064,
+ 16921,
+ 16782,
+ 16646,
+ 16514,
+ 16384
+};
+/* table used inbase 2 logharithm computation */
+static const Word32 log2_table[33] =
+{
+ 0,
+ 1455,
+ 2866,
+ 4236,
+ 5568,
+ 6863,
+ 8124,
+ 9352,
+ 10549,
+ 11716,
+ 12855,
+ 13967,
+ 15054,
+ 16117,
+ 17156,
+ 18172,
+ 19167,
+ 20142,
+ 21097,
+ 22033,
+ 22951,
+ 23852,
+ 24735,
+ 25603,
+ 26455,
+ 27291,
+ 28113,
+ 28922,
+ 29716,
+ 30497,
+ 31266,
+ 32023,
+ 32767
+};
+
+/* table used in 2 to the power computation */
+static const Word32 pow2_table[33] =
+{
+ 16384,
+ 16743,
+ 17109,
+ 17484,
+ 17867,
+ 18258,
+ 18658,
+ 19066,
+ 19484,
+ 19911,
+ 20347,
+ 20792,
+ 21247,
+ 21713,
+ 22188,
+ 22674,
+ 23170,
+ 23678,
+ 24196,
+ 24726,
+ 25268,
+ 25821,
+ 26386,
+ 26964,
+ 27554,
+ 28158,
+ 28774,
+ 29405,
+ 30048,
+ 30706,
+ 31379,
+ 32066,
+ 32767
+};
+
+/* table of cos(x) */
+static const Word32 cos_table[65] =
+{
+ 32767,
+ 32729,
+ 32610,
+ 32413,
+ 32138,
+ 31786,
+ 31357,
+ 30853,
+ 30274,
+ 29622,
+ 28899,
+ 28106,
+ 27246,
+ 26320,
+ 25330,
+ 24279,
+ 23170,
+ 22006,
+ 20788,
+ 19520,
+ 18205,
+ 16846,
+ 15447,
+ 14010,
+ 12540,
+ 11039,
+ 9512,
+ 7962,
+ 6393,
+ 4808,
+ 3212,
+ 1608,
+ 0,
+ - 1608,
+ - 3212,
+ - 4808,
+ - 6393,
+ - 7962,
+ - 9512,
+ - 11039,
+ - 12540,
+ - 14010,
+ - 15447,
+ - 16846,
+ - 18205,
+ - 19520,
+ - 20788,
+ - 22006,
+ - 23170,
+ - 24279,
+ - 25330,
+ - 26320,
+ - 27246,
+ - 28106,
+ - 28899,
+ - 29622,
+ - 30274,
+ - 30853,
+ - 31357,
+ - 31786,
+ - 32138,
+ - 32413,
+ - 32610,
+ - 32729,
+ - 32768
+};
+
+/* slope used to compute y = acos(x) */
+static const Word32 acos_slope[64] =
+{
+ - 26887,
+ - 8812,
+ - 5323,
+ - 3813,
+ - 2979,
+ - 2444,
+ - 2081,
+ - 1811,
+ - 1608,
+ - 1450,
+ - 1322,
+ - 1219,
+ - 1132,
+ - 1059,
+ - 998,
+ - 946,
+ - 901,
+ - 861,
+ - 827,
+ - 797,
+ - 772,
+ - 750,
+ - 730,
+ - 713,
+ - 699,
+ - 687,
+ - 677,
+ - 668,
+ - 662,
+ - 657,
+ - 654,
+ - 652,
+ - 652,
+ - 654,
+ - 657,
+ - 662,
+ - 668,
+ - 677,
+ - 687,
+ - 699,
+ - 713,
+ - 730,
+ - 750,
+ - 772,
+ - 797,
+ - 827,
+ - 861,
+ - 901,
+ - 946,
+ - 998,
+ - 1059,
+ - 1132,
+ - 1219,
+ - 1322,
+ - 1450,
+ - 1608,
+ - 1811,
+ - 2081,
+ - 2444,
+ - 2979,
+ - 3813,
+ - 5323,
+ - 8812,
+ - 26887
+};
+
+/* All impulse responses are in Q15 */
+/* phase dispersion impulse response (MR795) */
+static const Word32 ph_imp_low_MR795[] =
+{
+ 26777,
+ 801,
+ 2505,
+ - 683,
+ - 1382,
+ 582,
+ 604,
+ - 1274,
+ 3511,
+ - 5894,
+ 4534,
+ - 499,
+ - 1940,
+ 3011,
+ - 5058,
+ 5614,
+ - 1990,
+ - 1061,
+ - 1459,
+ 4442,
+ - 700,
+ - 5335,
+ 4609,
+ 452,
+ - 589,
+ - 3352,
+ 2953,
+ 1267,
+ - 1212,
+ - 2590,
+ 1731,
+ 3670,
+ - 4475,
+ - 975,
+ 4391,
+ - 2537,
+ 949,
+ - 1363,
+ - 979,
+ 5734
+};
+/* phase dispersion impulse response (MR795) */
+static const Word32 ph_imp_mid_MR795[] =
+{
+ 30274,
+ 3831,
+ - 4036,
+ 2972,
+ - 1048,
+ - 1002,
+ 2477,
+ - 3043,
+ 2815,
+ - 2231,
+ 1753,
+ - 1611,
+ 1714,
+ - 1775,
+ 1543,
+ - 1008,
+ 429,
+ - 169,
+ 472,
+ - 1264,
+ 2176,
+ - 2706,
+ 2523,
+ - 1621,
+ 344,
+ 826,
+ - 1529,
+ 1724,
+ - 1657,
+ 1701,
+ - 2063,
+ 2644,
+ - 3060,
+ 2897,
+ - 1978,
+ 557,
+ 780,
+ - 1369,
+ 842,
+ 655
+};
+
+/* phase dispersion impulse response (MR475 - MR67) */
+static const Word32 ph_imp_low[] =
+{
+ 14690,
+ 11518,
+ 1268,
+ - 2761,
+ - 5671,
+ 7514,
+ - 35,
+ - 2807,
+ - 3040,
+ 4823,
+ 2952,
+ - 8424,
+ 3785,
+ 1455,
+ 2179,
+ - 8637,
+ 8051,
+ - 2103,
+ - 1454,
+ 777,
+ 1108,
+ - 2385,
+ 2254,
+ - 363,
+ - 674,
+ - 2103,
+ 6046,
+ - 5681,
+ 1072,
+ 3123,
+ - 5058,
+ 5312,
+ - 2329,
+ - 3728,
+ 6924,
+ - 3889,
+ 675,
+ - 1775,
+ 29,
+ 10145
+};
+
+/* phase dispersion impulse response (MR475 - MR67) */
+static const Word32 ph_imp_mid[] =
+{
+ 30274,
+ 3831,
+ - 4036,
+ 2972,
+ - 1048,
+ - 1002,
+ 2477,
+ - 3043,
+ 2815,
+ - 2231,
+ 1753,
+ - 1611,
+ 1714,
+ - 1775,
+ 1543,
+ - 1008,
+ 429,
+ - 169,
+ 472,
+ - 1264,
+ 2176,
+ - 2706,
+ 2523,
+ - 1621,
+ 344,
+ 826,
+ - 1529,
+ 1724,
+ - 1657,
+ 1701,
+ - 2063,
+ 2644,
+ - 3060,
+ 2897,
+ - 1978,
+ 557,
+ 780,
+ - 1369,
+ 842,
+ 655
+};
+
+/* initialization table for the MA predictor in DTX */
+#define PAST_RQ_INIT_SIZE 8
+
+/* initalization table for MA predictor in dtx mode */
+static const Word32 past_rq_init[80] =
+{
+ - 258,
+ - 318,
+ - 439,
+ - 634,
+ - 656,
+ - 773,
+ - 711,
+ - 502,
+ - 268,
+ - 193,
+ - 2,
+ 125,
+ 122,
+ - 39,
+ - 9,
+ 105,
+ 129,
+ 283,
+ 372,
+ 575,
+ - 277,
+ - 324,
+ - 197,
+ - 487,
+ - 445,
+ - 362,
+ - 292,
+ - 27,
+ 177,
+ 543,
+ 342,
+ 517,
+ 516,
+ 130,
+ 27,
+ - 104,
+ - 120,
+ - 140,
+ - 74,
+ - 56,
+ - 564,
+ - 943,
+ - 1520,
+ - 965,
+ - 814,
+ - 526,
+ - 322,
+ - 2,
+ 159,
+ 657,
+ - 312,
+ - 284,
+ - 386,
+ - 597,
+ - 493,
+ - 526,
+ - 418,
+ - 229,
+ 105,
+ 449,
+ - 557,
+ - 870,
+ - 1075,
+ - 919,
+ - 950,
+ - 752,
+ - 709,
+ - 316,
+ 62,
+ 486,
+ - 314,
+ - 191,
+ - 203,
+ - 330,
+ - 160,
+ - 103,
+ - 51,
+ 131,
+ 338,
+ 515
+};
+
+#define ALPHA 29491
+#define ONE_ALPHA 3277
+/* LSF means (not in MR122) */
+static const Word32 mean_lsf_3[10] =
+{
+ 1546,
+ 2272,
+ 3778,
+ 5488,
+ 6972,
+ 8382,
+ 10047,
+ 11229,
+ 12766,
+ 13714
+};
+
+#define ALPHA_122 31128
+#define ONE_ALPHA_122 1639
+/* LSF means ->normalize frequency domain */
+static const Word32 mean_lsf_5[10] =
+{
+ 1384,
+ 2077,
+ 3420,
+ 5108,
+ 6742,
+ 8122,
+ 9863,
+ 11092,
+ 12714,
+ 13701
+};
+
+/* LSF prediction factors (not in MR122) */
+static const Word32 pred_fac[10] =
+{
+ 9556,
+ 10769,
+ 12571,
+ 13292,
+ 14381,
+ 11651,
+ 10588,
+ 9767,
+ 8593,
+ 6484
+};
+#define DICO1_SIZE_3 256
+#define DICO2_SIZE_3 512
+#define DICO3_SIZE_3 512
+
+/* 1st LSF quantizer (not in MR122 and MR795) */
+static const Word32 dico1_lsf_3[] =
+{
+ 6,
+ 82,
+ - 131,
+ 154,
+ - 56,
+ - 735,
+ 183,
+ - 65,
+ - 265,
+ 9,
+ - 210,
+ - 361,
+ 113,
+ 718,
+ 1817,
+ 1010,
+ 1214,
+ 1573,
+ 857,
+ 1333,
+ 2276,
+ 827,
+ 1568,
+ 1933,
+ 717,
+ 1989,
+ 2206,
+ 838,
+ 1172,
+ 1823,
+ 721,
+ 1000,
+ 2154,
+ 286,
+ 476,
+ 1509,
+ - 247,
+ - 531,
+ 230,
+ 147,
+ - 82,
+ 569,
+ 26,
+ - 177,
+ - 944,
+ - 27,
+ - 273,
+ 692,
+ - 164,
+ - 264,
+ - 183,
+ 224,
+ 790,
+ 1039,
+ 899,
+ 946,
+ 601,
+ 485,
+ 771,
+ 1150,
+ 524,
+ 677,
+ 903,
+ - 140,
+ 375,
+ 778,
+ 410,
+ 676,
+ 429,
+ 301,
+ 530,
+ 1009,
+ 719,
+ 646,
+ 38,
+ 226,
+ 367,
+ 40,
+ 145,
+ - 45,
+ - 505,
+ 290,
+ 121,
+ - 121,
+ 302,
+ 127,
+ 166,
+ - 124,
+ - 383,
+ - 956,
+ - 358,
+ - 455,
+ - 977,
+ 715,
+ 878,
+ 894,
+ 978,
+ 923,
+ 211,
+ 477,
+ 272,
+ 64,
+ 188,
+ - 78,
+ 17,
+ - 143,
+ - 65,
+ 38,
+ 643,
+ 586,
+ 621,
+ - 134,
+ - 426,
+ - 651,
+ 347,
+ 545,
+ 2820,
+ 1188,
+ 2726,
+ 2442,
+ 142,
+ - 80,
+ 1735,
+ 283,
+ 130,
+ 461,
+ - 262,
+ - 399,
+ - 1145,
+ - 411,
+ 155,
+ 430,
+ 329,
+ 375,
+ 779,
+ 53,
+ - 226,
+ - 139,
+ - 129,
+ - 236,
+ 1682,
+ 285,
+ 744,
+ 1327,
+ 738,
+ 697,
+ 1664,
+ 312,
+ 409,
+ 266,
+ 325,
+ 720,
+ 135,
+ 1,
+ 221,
+ 453,
+ 8,
+ 203,
+ 145,
+ 299,
+ 640,
+ 760,
+ 29,
+ 468,
+ 638,
+ 103,
+ 429,
+ 379,
+ 420,
+ 954,
+ 932,
+ 1326,
+ 1210,
+ 1258,
+ 704,
+ 1012,
+ 1152,
+ - 166,
+ - 444,
+ - 266,
+ - 316,
+ - 130,
+ - 376,
+ 191,
+ 1151,
+ 1904,
+ - 240,
+ - 543,
+ - 1260,
+ - 112,
+ 268,
+ 1207,
+ 70,
+ 1062,
+ 1583,
+ 278,
+ 1360,
+ 1574,
+ - 258,
+ - 272,
+ - 768,
+ 19,
+ 563,
+ 2240,
+ - 3,
+ - 265,
+ 135,
+ - 295,
+ - 591,
+ - 388,
+ 140,
+ 354,
+ - 206,
+ - 260,
+ - 504,
+ - 795,
+ - 433,
+ - 718,
+ - 1319,
+ 109,
+ 331,
+ 962,
+ - 429,
+ - 87,
+ 652,
+ - 296,
+ 426,
+ 1019,
+ - 239,
+ 775,
+ 851,
+ 489,
+ 1334,
+ 1073,
+ - 334,
+ - 332,
+ 25,
+ 543,
+ 1206,
+ 1807,
+ 326,
+ 61,
+ 727,
+ 578,
+ 849,
+ 1405,
+ - 208,
+ - 277,
+ 329,
+ - 152,
+ 64,
+ 669,
+ - 434,
+ - 678,
+ - 727,
+ - 454,
+ - 71,
+ 251,
+ 605,
+ 480,
+ 254,
+ - 482,
+ 11,
+ 996,
+ - 289,
+ 395,
+ 486,
+ 722,
+ 1049,
+ 1440,
+ - 30,
+ - 316,
+ - 786,
+ - 106,
+ - 115,
+ - 619,
+ 861,
+ 1474,
+ 1412,
+ 1055,
+ 1366,
+ 1184,
+ 812,
+ 1237,
+ 925,
+ 42,
+ - 251,
+ - 576,
+ 342,
+ 141,
+ - 454,
+ - 168,
+ - 80,
+ 1359,
+ - 342,
+ - 656,
+ - 1763,
+ 100,
+ 821,
+ 725,
+ 990,
+ 747,
+ 800,
+ 332,
+ 440,
+ 568,
+ 663,
+ 379,
+ 852,
+ 112,
+ 165,
+ - 369,
+ 597,
+ 910,
+ 282,
+ - 8,
+ 834,
+ 1281,
+ - 352,
+ 572,
+ 695,
+ 462,
+ 2246,
+ 1806,
+ 345,
+ 190,
+ 1374,
+ 416,
+ 915,
+ 2166,
+ 168,
+ - 82,
+ 280,
+ - 516,
+ - 446,
+ 840,
+ 47,
+ 533,
+ 44,
+ - 362,
+ - 711,
+ - 1143,
+ 22,
+ 193,
+ 1472,
+ - 85,
+ 233,
+ 1813,
+ - 62,
+ 579,
+ 1504,
+ 550,
+ 944,
+ 1749,
+ 723,
+ 650,
+ 1148,
+ 972,
+ 884,
+ 1395,
+ - 425,
+ 643,
+ 0,
+ 1000,
+ 952,
+ 1098,
+ 249,
+ 1446,
+ 672,
+ - 334,
+ - 87,
+ 2172,
+ - 554,
+ 1882,
+ 2672,
+ 140,
+ 1826,
+ 1853,
+ 920,
+ 1749,
+ 2590,
+ 1076,
+ 1933,
+ 2038,
+ - 137,
+ - 443,
+ - 1555,
+ 1269,
+ 1174,
+ 468,
+ - 493,
+ - 122,
+ 1521,
+ - 451,
+ 1033,
+ 1214,
+ 482,
+ 1695,
+ 1118,
+ 815,
+ 649,
+ 384,
+ - 446,
+ - 692,
+ 107,
+ - 319,
+ - 605,
+ - 118,
+ - 207,
+ - 505,
+ 525,
+ - 468,
+ - 12,
+ 2736,
+ 75,
+ 1934,
+ 1305,
+ 880,
+ 2358,
+ 2267,
+ 1285,
+ 1575,
+ 2004,
+ - 48,
+ - 304,
+ - 1186,
+ - 435,
+ - 461,
+ - 251,
+ - 366,
+ - 404,
+ - 547,
+ - 289,
+ - 605,
+ - 597,
+ - 538,
+ - 810,
+ - 165,
+ - 120,
+ 3,
+ 356,
+ 639,
+ 1241,
+ 1502,
+ 96,
+ 177,
+ 750,
+ - 435,
+ - 585,
+ - 1174,
+ - 356,
+ 109,
+ - 79,
+ - 485,
+ 288,
+ 2005,
+ 9,
+ 1116,
+ 731,
+ 880,
+ 2134,
+ 946,
+ - 265,
+ 1585,
+ 1065,
+ 1157,
+ 1210,
+ 843,
+ - 498,
+ - 668,
+ 431,
+ 374,
+ 321,
+ - 229,
+ 1440,
+ 2101,
+ 1381,
+ 449,
+ 461,
+ 1155,
+ - 105,
+ 39,
+ - 384,
+ - 263,
+ 367,
+ 182,
+ - 371,
+ - 660,
+ 773,
+ - 188,
+ 1151,
+ 971,
+ 1333,
+ 1632,
+ 1435,
+ 774,
+ 1267,
+ 1221,
+ - 482,
+ - 832,
+ - 1489,
+ - 237,
+ - 210,
+ 860,
+ 890,
+ 1615,
+ 1064,
+ 472,
+ 1062,
+ 1192,
+ 185,
+ 1077,
+ 989,
+ - 568,
+ - 992,
+ - 1704,
+ - 449,
+ - 902,
+ - 2043,
+ - 142,
+ - 377,
+ - 458,
+ - 210,
+ - 554,
+ - 1029,
+ - 11,
+ 1133,
+ 2265,
+ - 329,
+ - 675,
+ - 893,
+ - 250,
+ 657,
+ 1187,
+ 519,
+ 1510,
+ 1779,
+ 520,
+ 539,
+ 1403,
+ 527,
+ 1421,
+ 1302,
+ - 563,
+ - 871,
+ - 1248,
+ - 147,
+ - 463,
+ 879,
+ - 76,
+ 2334,
+ 2840,
+ 563,
+ 2573,
+ 2385,
+ 632,
+ 1926,
+ 2920,
+ 719,
+ 2023,
+ 1840,
+ - 545,
+ - 723,
+ 1108,
+ 129,
+ - 125,
+ 884,
+ 1417,
+ 1632,
+ 925,
+ - 94,
+ 1566,
+ 1751,
+ - 341,
+ 1533,
+ 1551,
+ 591,
+ 395,
+ - 274,
+ - 76,
+ 981,
+ 2831,
+ 153,
+ 2985,
+ 1844,
+ 1032,
+ 2565,
+ 2749,
+ 1508,
+ 2832,
+ 1879,
+ 791,
+ 1199,
+ 538,
+ - 190,
+ - 453,
+ 1489,
+ - 278,
+ - 548,
+ 1158,
+ - 245,
+ 1941,
+ 2044,
+ 1024,
+ 1560,
+ 1650,
+ 512,
+ 253,
+ 466,
+ - 62,
+ - 323,
+ 1151,
+ - 473,
+ - 376,
+ 507,
+ - 433,
+ 1380,
+ 2162,
+ 899,
+ 1943,
+ 1445,
+ 134,
+ 704,
+ 440,
+ 460,
+ 525,
+ - 28,
+ - 450,
+ 279,
+ 1338,
+ 0,
+ 971,
+ 252,
+ - 445,
+ - 627,
+ - 991,
+ - 348,
+ - 602,
+ - 1424,
+ 398,
+ 712,
+ 1656,
+ - 107,
+ 314,
+ - 178,
+ 93,
+ 2226,
+ 2238,
+ 518,
+ 849,
+ 656,
+ - 462,
+ - 711,
+ - 447,
+ 174,
+ - 34,
+ 1191,
+ - 119,
+ 42,
+ 1005,
+ - 372,
+ 274,
+ 758,
+ 1036,
+ 2352,
+ 1838,
+ 675,
+ 1724,
+ 1498,
+ 430,
+ 1286,
+ 2133,
+ - 129,
+ - 439,
+ 0,
+ - 373,
+ 800,
+ 2144,
+ 6,
+ 1587,
+ 2478,
+ 478,
+ 596,
+ 2128,
+ - 428,
+ - 736,
+ 1505,
+ 385,
+ 178,
+ 980,
+ 139,
+ 449,
+ 1225,
+ - 526,
+ - 842,
+ - 982,
+ 145,
+ 1554,
+ 1242,
+ 623,
+ 1448,
+ 656,
+ 349,
+ 1016,
+ 1482,
+ 31,
+ - 280,
+ 415,
+ - 316,
+ 724,
+ 1641,
+ 360,
+ 1058,
+ 556,
+ - 436,
+ - 358,
+ 1201,
+ - 355,
+ 1123,
+ 1939,
+ 401,
+ 1584,
+ 2248,
+ - 527,
+ - 1012,
+ 355,
+ 233,
+ 238,
+ 2233,
+ - 550,
+ - 897,
+ - 639,
+ - 365,
+ - 501,
+ 1957,
+ 389,
+ 1860,
+ 1621,
+ 162,
+ 1132,
+ 1264,
+ - 237,
+ 1174,
+ 1390,
+ - 640,
+ - 411,
+ 116,
+ - 228,
+ 1694,
+ 2298,
+ 1639,
+ 2186,
+ 2267,
+ 562,
+ 1273,
+ 2658,
+ 323,
+ 338,
+ 1774,
+ 578,
+ 1107,
+ 852,
+ 22,
+ 594,
+ 934,
+ - 143,
+ 718,
+ 446
+};
+
+/* 2nd LSF quantizer (not in MR122) */
+static const Word32 dico2_lsf_3[] =
+{
+ 50,
+ 71,
+ - 9,
+ - 338,
+ - 698,
+ - 1407,
+ 102,
+ - 138,
+ - 820,
+ - 310,
+ - 469,
+ - 1147,
+ 414,
+ 67,
+ - 267,
+ 1060,
+ 814,
+ 1441,
+ 1548,
+ 1360,
+ 1272,
+ 1754,
+ 1895,
+ 1661,
+ 2019,
+ 2133,
+ 1820,
+ 1808,
+ 2318,
+ 1845,
+ 644,
+ - 93,
+ 454,
+ 858,
+ 329,
+ - 136,
+ 489,
+ - 258,
+ - 128,
+ - 198,
+ - 745,
+ - 41,
+ - 52,
+ - 265,
+ - 985,
+ 346,
+ 137,
+ 479,
+ - 1741,
+ - 748,
+ - 684,
+ - 1163,
+ - 1725,
+ - 367,
+ - 895,
+ - 1145,
+ - 784,
+ - 488,
+ - 946,
+ - 968,
+ - 85,
+ - 390,
+ - 725,
+ 215,
+ - 340,
+ - 171,
+ 1020,
+ 916,
+ 1969,
+ 564,
+ 179,
+ 746,
+ 662,
+ 977,
+ 1734,
+ 887,
+ 622,
+ 914,
+ 939,
+ 856,
+ 1165,
+ 309,
+ 688,
+ 803,
+ 917,
+ 161,
+ 570,
+ 118,
+ - 20,
+ - 283,
+ - 816,
+ - 42,
+ 204,
+ - 1228,
+ - 325,
+ - 462,
+ - 963,
+ - 202,
+ - 143,
+ - 988,
+ - 484,
+ - 361,
+ - 702,
+ - 978,
+ - 477,
+ - 302,
+ - 790,
+ - 1188,
+ - 100,
+ - 786,
+ - 1088,
+ - 1054,
+ - 947,
+ - 1684,
+ - 202,
+ - 843,
+ - 782,
+ - 1039,
+ - 1378,
+ - 901,
+ - 624,
+ - 110,
+ - 85,
+ 356,
+ 213,
+ - 10,
+ - 493,
+ 364,
+ 774,
+ 425,
+ 822,
+ 479,
+ - 83,
+ 557,
+ 520,
+ - 992,
+ - 1560,
+ - 572,
+ - 603,
+ - 741,
+ - 26,
+ - 502,
+ - 638,
+ - 903,
+ 209,
+ 306,
+ 147,
+ - 316,
+ - 593,
+ - 596,
+ - 85,
+ - 211,
+ - 225,
+ - 918,
+ - 529,
+ 117,
+ 233,
+ - 439,
+ - 738,
+ 1101,
+ 751,
+ 633,
+ 1457,
+ 1716,
+ 1511,
+ 1765,
+ 1457,
+ 910,
+ 1122,
+ 1156,
+ 849,
+ 1354,
+ 868,
+ 470,
+ - 871,
+ - 1150,
+ - 1796,
+ - 871,
+ - 861,
+ - 992,
+ - 118,
+ 155,
+ 212,
+ - 1051,
+ - 849,
+ - 606,
+ - 1117,
+ - 1849,
+ - 2750,
+ - 1019,
+ - 1427,
+ - 1869,
+ 370,
+ - 184,
+ - 414,
+ 959,
+ 493,
+ 104,
+ 958,
+ 1039,
+ 543,
+ 154,
+ 653,
+ 201,
+ 1249,
+ 507,
+ 150,
+ 663,
+ 503,
+ 230,
+ 623,
+ 777,
+ 675,
+ 659,
+ 88,
+ - 110,
+ 843,
+ 244,
+ 224,
+ 382,
+ 541,
+ 302,
+ 724,
+ 433,
+ 666,
+ 1166,
+ 734,
+ 341,
+ - 138,
+ 20,
+ - 397,
+ - 1183,
+ - 424,
+ - 46,
+ - 321,
+ - 352,
+ - 124,
+ 1333,
+ 1021,
+ 1080,
+ 262,
+ 366,
+ 723,
+ 922,
+ 283,
+ - 551,
+ 31,
+ - 636,
+ - 611,
+ - 689,
+ - 697,
+ - 415,
+ - 952,
+ - 779,
+ - 201,
+ - 1329,
+ - 598,
+ - 359,
+ - 953,
+ - 1285,
+ 166,
+ 493,
+ 305,
+ 221,
+ 846,
+ 703,
+ 610,
+ 840,
+ 936,
+ 774,
+ - 723,
+ - 1324,
+ - 1261,
+ - 357,
+ - 1025,
+ - 1388,
+ - 1096,
+ - 1376,
+ - 365,
+ - 1416,
+ - 1881,
+ - 608,
+ - 1798,
+ - 1727,
+ - 674,
+ - 545,
+ - 1173,
+ - 703,
+ 678,
+ 786,
+ 148,
+ - 123,
+ 696,
+ 1288,
+ 644,
+ 350,
+ - 10,
+ 414,
+ 614,
+ 15,
+ 137,
+ 344,
+ - 211,
+ - 814,
+ - 1512,
+ - 819,
+ - 391,
+ - 930,
+ - 588,
+ 47,
+ - 591,
+ - 898,
+ - 909,
+ - 1097,
+ - 163,
+ - 1272,
+ - 1167,
+ - 157,
+ - 1464,
+ - 1525,
+ - 389,
+ - 1274,
+ - 1188,
+ - 624,
+ 671,
+ 213,
+ 454,
+ 124,
+ - 274,
+ - 525,
+ - 729,
+ - 496,
+ - 152,
+ - 1344,
+ 122,
+ 135,
+ - 2905,
+ - 589,
+ - 394,
+ - 1728,
+ 441,
+ - 50,
+ 1476,
+ 904,
+ 787,
+ 316,
+ 236,
+ - 440,
+ - 347,
+ 217,
+ 413,
+ - 911,
+ - 917,
+ 121,
+ - 455,
+ - 932,
+ 202,
+ - 92,
+ - 465,
+ - 375,
+ 488,
+ 390,
+ 474,
+ 876,
+ 729,
+ 316,
+ - 1815,
+ - 1312,
+ - 669,
+ 87,
+ 962,
+ 432,
+ 563,
+ - 249,
+ - 1058,
+ 250,
+ 285,
+ 1105,
+ 1141,
+ 427,
+ 696,
+ - 1038,
+ - 1664,
+ - 1582,
+ - 948,
+ 346,
+ 160,
+ - 309,
+ - 272,
+ - 858,
+ 670,
+ 624,
+ 1250,
+ - 944,
+ - 408,
+ - 666,
+ - 606,
+ - 320,
+ - 384,
+ - 492,
+ 230,
+ 65,
+ 334,
+ - 50,
+ - 16,
+ - 16,
+ - 690,
+ - 1397,
+ 1791,
+ 1716,
+ 1399,
+ 2478,
+ 2063,
+ 1404,
+ 1245,
+ 1471,
+ 1426,
+ - 382,
+ - 1037,
+ - 2,
+ 173,
+ - 398,
+ 1145,
+ 1491,
+ 2024,
+ 1801,
+ 772,
+ 1274,
+ 1506,
+ 1429,
+ 1735,
+ 2001,
+ 1079,
+ 1218,
+ 1273,
+ - 1154,
+ - 1851,
+ - 1329,
+ - 808,
+ - 1133,
+ - 1096,
+ - 451,
+ - 1033,
+ - 1722,
+ 65,
+ 578,
+ - 84,
+ - 1476,
+ - 2434,
+ - 1778,
+ - 765,
+ - 1366,
+ - 494,
+ - 218,
+ - 594,
+ - 931,
+ 337,
+ - 236,
+ 562,
+ 2357,
+ 2662,
+ 1938,
+ 1489,
+ 1276,
+ 874,
+ 189,
+ 358,
+ 374,
+ - 1519,
+ - 2281,
+ - 2346,
+ - 967,
+ - 1271,
+ - 2095,
+ - 628,
+ - 1188,
+ - 1542,
+ 1661,
+ 1043,
+ 546,
+ 565,
+ 1061,
+ 732,
+ - 64,
+ - 836,
+ - 434,
+ - 436,
+ - 96,
+ 203,
+ 1078,
+ 1216,
+ 1636,
+ 907,
+ 1534,
+ 986,
+ 326,
+ 965,
+ 845,
+ 142,
+ - 84,
+ 197,
+ 470,
+ 2379,
+ 1570,
+ 1133,
+ 470,
+ 1214,
+ 395,
+ 1376,
+ 1200,
+ 1125,
+ 1042,
+ 348,
+ - 543,
+ - 1234,
+ - 376,
+ - 215,
+ - 181,
+ 481,
+ - 1947,
+ - 1621,
+ - 210,
+ - 750,
+ - 1185,
+ 390,
+ 29,
+ - 399,
+ 27,
+ 820,
+ 1236,
+ 755,
+ 695,
+ 979,
+ 409,
+ - 174,
+ 1197,
+ 1035,
+ 912,
+ 1356,
+ 1846,
+ - 992,
+ - 1437,
+ 484,
+ - 1485,
+ - 1700,
+ 208,
+ - 412,
+ 1204,
+ 1432,
+ - 271,
+ 896,
+ 1144,
+ - 416,
+ 1777,
+ 1434,
+ - 1696,
+ - 2644,
+ - 204,
+ - 1789,
+ - 1551,
+ 1033,
+ - 1656,
+ - 1559,
+ 1303,
+ - 1253,
+ - 1589,
+ 1081,
+ - 669,
+ - 1095,
+ - 66,
+ - 682,
+ 320,
+ - 345,
+ 659,
+ 305,
+ 1069,
+ - 1292,
+ - 804,
+ - 19,
+ - 1635,
+ - 1291,
+ 29,
+ - 1683,
+ - 497,
+ 71,
+ - 287,
+ - 7,
+ - 100,
+ - 494,
+ - 962,
+ - 237,
+ 852,
+ 1881,
+ 1740,
+ - 1217,
+ - 1387,
+ 227,
+ - 660,
+ 302,
+ 373,
+ 96,
+ 1087,
+ 1257,
+ - 1074,
+ - 1669,
+ 160,
+ 485,
+ 2076,
+ 1798,
+ - 934,
+ - 220,
+ 552,
+ - 596,
+ - 612,
+ 237,
+ 336,
+ 1720,
+ 879,
+ 643,
+ 629,
+ 434,
+ 1267,
+ 522,
+ 1633,
+ 15,
+ 244,
+ - 441,
+ 1475,
+ 717,
+ 184,
+ 1819,
+ 1590,
+ 1709,
+ 988,
+ 261,
+ 937,
+ 2093,
+ 2345,
+ 1520,
+ 2139,
+ 1858,
+ 1606,
+ - 577,
+ - 579,
+ - 1203,
+ - 956,
+ 135,
+ - 488,
+ - 464,
+ 51,
+ - 338,
+ - 629,
+ - 348,
+ - 723,
+ 1146,
+ 2073,
+ 1442,
+ 2192,
+ 1466,
+ 911,
+ - 1444,
+ - 1572,
+ - 2278,
+ 1400,
+ 710,
+ 1297,
+ 1335,
+ 633,
+ 928,
+ 1434,
+ 2194,
+ 2594,
+ 2422,
+ 2204,
+ 1881,
+ 982,
+ 2242,
+ 1854,
+ 380,
+ 792,
+ 1145,
+ - 63,
+ - 539,
+ 414,
+ - 252,
+ - 964,
+ - 314,
+ - 1261,
+ - 683,
+ - 780,
+ - 831,
+ - 526,
+ - 1005,
+ - 1666,
+ - 1135,
+ - 424,
+ - 1611,
+ - 452,
+ - 299,
+ 1268,
+ 1048,
+ 642,
+ 1147,
+ 853,
+ 856,
+ - 675,
+ - 336,
+ 139,
+ 2268,
+ 1343,
+ 1418,
+ 29,
+ 768,
+ 797,
+ - 1224,
+ 423,
+ 564,
+ - 1318,
+ - 1082,
+ 245,
+ - 1302,
+ - 812,
+ 573,
+ - 1298,
+ - 1617,
+ 646,
+ - 968,
+ 834,
+ 723,
+ 993,
+ 1652,
+ 2027,
+ - 191,
+ - 817,
+ 432,
+ 662,
+ 60,
+ 198,
+ 626,
+ 997,
+ 1330,
+ 1648,
+ 1963,
+ 1289,
+ - 1597,
+ - 93,
+ - 45,
+ - 1088,
+ 37,
+ - 84,
+ 1653,
+ 2607,
+ 2337,
+ 1065,
+ 2040,
+ 2377,
+ 1139,
+ 2326,
+ 2118,
+ 859,
+ 357,
+ 1510,
+ 664,
+ 1227,
+ 1099,
+ 479,
+ 1360,
+ 912,
+ 1897,
+ 1754,
+ 2019,
+ 1168,
+ 1909,
+ 1784,
+ 399,
+ 34,
+ 256,
+ - 593,
+ - 304,
+ - 1053,
+ 547,
+ 1694,
+ 1407,
+ 647,
+ - 99,
+ - 341,
+ 1492,
+ 1647,
+ 1190,
+ 38,
+ - 644,
+ - 212,
+ 395,
+ 846,
+ 222,
+ - 704,
+ - 765,
+ - 716,
+ - 724,
+ - 1964,
+ - 2804,
+ - 150,
+ 291,
+ - 82,
+ 1233,
+ 1459,
+ 1007,
+ - 140,
+ - 155,
+ 153,
+ 439,
+ 297,
+ 1568,
+ - 1529,
+ - 410,
+ - 636,
+ 1536,
+ 455,
+ - 237,
+ - 1328,
+ - 139,
+ - 260,
+ 531,
+ 554,
+ 868,
+ 269,
+ 1264,
+ 606,
+ - 233,
+ 883,
+ 463,
+ 742,
+ 600,
+ - 120,
+ - 73,
+ 421,
+ 212,
+ - 439,
+ - 58,
+ 804,
+ - 1286,
+ - 1241,
+ 728,
+ 294,
+ - 490,
+ 50,
+ - 591,
+ - 905,
+ - 1254,
+ 42,
+ - 687,
+ 147,
+ - 25,
+ 273,
+ 596,
+ - 311,
+ 1213,
+ 601,
+ - 754,
+ 849,
+ 584,
+ 429,
+ 607,
+ 587,
+ - 602,
+ - 166,
+ 461,
+ - 796,
+ - 823,
+ 777,
+ 1380,
+ 910,
+ 1755,
+ 119,
+ 1417,
+ 972,
+ - 219,
+ - 880,
+ - 1596,
+ - 1049,
+ - 1010,
+ 438,
+ - 713,
+ - 1379,
+ 78,
+ 0,
+ - 447,
+ - 1179,
+ - 1136,
+ - 1319,
+ - 1573,
+ 2248,
+ 1767,
+ 1309,
+ 946,
+ 1583,
+ 1432,
+ 1150,
+ 482,
+ 436,
+ - 469,
+ - 1108,
+ 618,
+ - 447,
+ - 966,
+ 1088,
+ - 1252,
+ - 1515,
+ - 114,
+ - 1104,
+ - 2008,
+ - 579,
+ 210,
+ 613,
+ 497,
+ - 1975,
+ - 1437,
+ 642,
+ - 1269,
+ - 856,
+ 1011,
+ - 1646,
+ - 1185,
+ 1063,
+ - 1555,
+ - 672,
+ 1204,
+ - 1692,
+ - 1114,
+ 623,
+ - 979,
+ - 1326,
+ - 1277,
+ 539,
+ - 147,
+ 894,
+ - 1354,
+ - 897,
+ - 434,
+ 888,
+ 475,
+ 428,
+ 153,
+ - 384,
+ 338,
+ - 1492,
+ - 511,
+ 359,
+ - 974,
+ - 1115,
+ - 470,
+ 105,
+ - 550,
+ 677,
+ - 937,
+ - 1145,
+ 877,
+ 380,
+ - 260,
+ 210,
+ 1685,
+ 924,
+ 1256,
+ 1775,
+ 1190,
+ 1095,
+ 1419,
+ 631,
+ 533,
+ 627,
+ 299,
+ - 347,
+ - 411,
+ - 534,
+ 647,
+ - 650,
+ 29,
+ - 595,
+ - 378,
+ - 1367,
+ 1563,
+ 1402,
+ 1121,
+ 1465,
+ 1089,
+ 1410,
+ 648,
+ - 2096,
+ - 1090,
+ - 6,
+ 311,
+ - 194,
+ - 869,
+ - 639,
+ - 831,
+ 416,
+ - 1162,
+ - 1224,
+ 1349,
+ - 1247,
+ - 941,
+ 1813,
+ - 2193,
+ - 1987,
+ 453,
+ - 619,
+ - 1367,
+ - 956,
+ - 1606,
+ - 1972,
+ - 1507,
+ - 1175,
+ - 1057,
+ - 1104,
+ - 377,
+ 601,
+ 201,
+ 1876,
+ 825,
+ 374,
+ - 430,
+ - 1323,
+ 29,
+ - 1397,
+ - 1249,
+ - 1331,
+ - 1007,
+ - 1504,
+ 960,
+ - 1401,
+ - 2009,
+ 197,
+ - 1379,
+ - 1949,
+ - 236,
+ - 1077,
+ 123,
+ 422,
+ 615,
+ 1269,
+ 546,
+ - 306,
+ 1526,
+ 904,
+ 1194,
+ 1788,
+ 1177,
+ - 626,
+ - 884,
+ - 1526,
+ 199,
+ 766,
+ 1504,
+ - 1065,
+ 862,
+ 197,
+ - 1034,
+ - 1773,
+ - 887,
+ - 800,
+ 145,
+ 599,
+ - 1134,
+ - 519,
+ 626,
+ - 1205,
+ - 1926,
+ 500,
+ - 910,
+ - 1041,
+ - 1395,
+ - 1476,
+ - 1567,
+ - 969,
+ - 523,
+ 842,
+ 34,
+ 1794,
+ 646,
+ 862,
+ - 1207,
+ - 1888,
+ - 1002,
+ - 78,
+ - 9,
+ - 672,
+ 1044,
+ 759,
+ 80,
+ - 600,
+ 1139,
+ 1019,
+ 57,
+ 2000,
+ 1422,
+ - 833,
+ 1414,
+ 1121,
+ - 1202,
+ 1630,
+ 1260,
+ - 461,
+ 1420,
+ 1244,
+ 1537,
+ 975,
+ 253,
+ - 283,
+ 324,
+ - 359,
+ 599,
+ - 195,
+ 106,
+ 588,
+ 62,
+ - 587,
+ - 757,
+ 645,
+ 205,
+ 51,
+ 1201,
+ 758,
+ - 1209,
+ 673,
+ - 390,
+ - 624,
+ 1581,
+ 941,
+ - 151,
+ 1023,
+ 735,
+ 2820,
+ 1301,
+ 690,
+ - 302,
+ 524,
+ - 99,
+ - 900,
+ - 1588,
+ - 1189,
+ 1084,
+ 251,
+ 238,
+ 2014,
+ 1792,
+ 1010,
+ 1245,
+ 1633,
+ 1741,
+ - 1227,
+ - 1540,
+ - 1208,
+ - 621,
+ 456,
+ - 109,
+ 40,
+ - 65,
+ 788,
+ - 805,
+ - 699,
+ - 1350,
+ - 583,
+ 904,
+ 832,
+ - 801,
+ 532,
+ 594,
+ 1972,
+ 1408,
+ 1351,
+ - 1177,
+ - 1880,
+ - 2114,
+ - 773,
+ 568,
+ 948,
+ - 1015,
+ 1079,
+ 1260,
+ - 1111,
+ 482,
+ - 130,
+ 1778,
+ 1044,
+ 780,
+ - 1491,
+ 245,
+ 912,
+ - 316,
+ - 1141,
+ - 917,
+ - 536,
+ - 1442,
+ - 2346,
+ - 785,
+ - 1546,
+ - 1988,
+ - 2003,
+ 257,
+ 909,
+ - 1849,
+ - 633,
+ - 1209,
+ - 1538,
+ - 1918,
+ - 1054,
+ 1606,
+ 2239,
+ 1576,
+ - 567,
+ - 1500,
+ - 1544,
+ - 1279,
+ 195,
+ 1369,
+ - 817,
+ 293,
+ 1219,
+ - 525,
+ 630,
+ 1197,
+ - 1698,
+ - 2425,
+ - 1840,
+ - 303,
+ 731,
+ 747,
+ - 1169,
+ - 251,
+ 269,
+ - 950,
+ - 75,
+ 1684,
+ - 1182,
+ - 453,
+ 1005,
+ - 1599,
+ 585,
+ 378,
+ - 2075,
+ - 571,
+ - 427,
+ - 529,
+ - 1159,
+ - 1171,
+ - 283,
+ - 205,
+ - 564,
+ - 796,
+ 1246,
+ 717,
+ 2277,
+ 927,
+ 539,
+ - 454,
+ 559,
+ 440,
+ - 717,
+ 1460,
+ 1615,
+ - 1030,
+ 1052,
+ 1610,
+ - 1169,
+ - 138,
+ 847,
+ 226,
+ 39,
+ - 612,
+ - 1251,
+ - 106,
+ - 729,
+ - 651,
+ 968,
+ 1302,
+ - 714,
+ - 636,
+ 1727,
+ 353,
+ 1069,
+ 410,
+ - 798,
+ - 156,
+ 1099,
+ - 574,
+ 918,
+ 446,
+ - 1310,
+ 1012,
+ 466,
+ 1408,
+ 1591,
+ 765,
+ 1429,
+ 1380,
+ 1757,
+ 1949,
+ 1956,
+ 2378,
+ 1578,
+ 2047,
+ 2148,
+ 916,
+ 98,
+ - 7,
+ 1893,
+ 1418,
+ 2141,
+ 348,
+ 1405,
+ 1579,
+ 152,
+ 1134,
+ 1801,
+ - 267,
+ 154,
+ 1395,
+ - 1166,
+ 469,
+ 1054,
+ - 1142,
+ - 405,
+ - 1073,
+ - 1341,
+ - 2264,
+ - 1581,
+ - 364,
+ 869,
+ 1706,
+ - 1162,
+ 549,
+ 1550,
+ - 1225,
+ - 1932,
+ - 1666,
+ - 1485,
+ - 1977,
+ - 2055,
+ - 1727,
+ - 906,
+ - 98,
+ - 1897,
+ 233,
+ 1492,
+ 892,
+ 108,
+ - 331,
+ - 1728,
+ - 1170,
+ - 1700,
+ - 1060,
+ 1980,
+ 1790,
+ - 1070,
+ - 1741,
+ - 1909,
+ - 11,
+ 1539,
+ 1317,
+ - 1600,
+ 94,
+ 497,
+ 421,
+ 443,
+ - 197,
+ - 1578,
+ - 349,
+ - 994,
+ - 599,
+ - 539,
+ 1140,
+ - 965,
+ - 1419,
+ - 129,
+ - 1341,
+ 175,
+ - 447,
+ - 375,
+ 1311,
+ 2055,
+ - 371,
+ - 650,
+ - 307,
+ - 1073,
+ 605,
+ 365,
+ - 2057,
+ - 113,
+ 430,
+ 652,
+ 914,
+ 967,
+ - 1012,
+ - 1586,
+ - 2323,
+ 1505,
+ 1248,
+ 559,
+ 262,
+ - 486,
+ - 401,
+ - 1727,
+ 1342,
+ 1546,
+ 50,
+ 56,
+ 432,
+ - 330,
+ 119,
+ - 604,
+ - 1517,
+ - 1080,
+ - 810,
+ 946,
+ 1127,
+ 1055,
+ - 1400,
+ - 1703,
+ - 1712,
+ - 1270,
+ - 704,
+ - 1317,
+ 807,
+ 1821,
+ 1143,
+ 2760,
+ 1606,
+ 2171,
+ 1120,
+ 409,
+ - 150,
+ - 147,
+ 404,
+ 959,
+ 2439,
+ 1911,
+ 2189,
+ - 906,
+ - 141,
+ - 866,
+ - 904,
+ - 142,
+ - 458,
+ - 557,
+ - 708,
+ - 1679,
+ - 830,
+ - 1431,
+ - 1583,
+ - 1842,
+ - 1346,
+ - 1086,
+ - 1604,
+ - 272,
+ 915,
+ - 1196,
+ 772,
+ 1056,
+ - 638,
+ - 1234,
+ - 1897,
+ - 500,
+ - 81,
+ - 822,
+ - 1289,
+ - 1613,
+ - 735,
+ - 117,
+ 785,
+ 168,
+ - 1090,
+ 1133,
+ 922,
+ - 1096,
+ - 746,
+ 1384,
+ 287,
+ - 547,
+ - 1063,
+ - 1376,
+ - 2201,
+ - 1204,
+ - 2176,
+ - 1570,
+ - 1757,
+ - 1511,
+ - 2241,
+ - 771,
+ - 1737,
+ 1099,
+ 830,
+ - 1588,
+ 724,
+ 1243,
+ - 1542,
+ 693,
+ 805,
+ - 1690,
+ - 240,
+ 1665,
+ - 1700,
+ - 4,
+ - 668,
+ 2149,
+ 816,
+ 1042,
+ - 818,
+ - 1841,
+ 22,
+ - 764,
+ - 507,
+ 449,
+ - 1151,
+ - 617,
+ 289,
+ - 843,
+ - 1596,
+ - 240,
+ 498,
+ - 234,
+ - 657,
+ - 752,
+ 480,
+ 1678,
+ - 319,
+ - 481,
+ 193,
+ - 811,
+ 171,
+ - 119,
+ - 2128,
+ - 202,
+ - 848,
+ 1717,
+ 1140,
+ 1700
+};
+
+/* 3rd LSF quantizer (not in MR122, MR515 and MR475) */
+static const Word32 dico3_lsf_3[] =
+{
+ 67,
+ - 17,
+ 66,
+ - 12,
+ - 1690,
+ - 581,
+ - 104,
+ - 272,
+ - 1076,
+ - 1186,
+ - 1845,
+ - 376,
+ - 1140,
+ - 926,
+ - 420,
+ - 58,
+ - 259,
+ - 656,
+ - 1134,
+ - 553,
+ 1788,
+ 1227,
+ 455,
+ 129,
+ 462,
+ 441,
+ - 240,
+ - 528,
+ 840,
+ 514,
+ 130,
+ - 75,
+ 1114,
+ 623,
+ 153,
+ 216,
+ 1068,
+ 564,
+ - 6,
+ - 276,
+ 1119,
+ 727,
+ 190,
+ - 68,
+ 704,
+ 306,
+ 119,
+ - 264,
+ 329,
+ 61,
+ - 100,
+ 156,
+ 364,
+ 123,
+ 183,
+ - 208,
+ - 171,
+ - 123,
+ 220,
+ - 65,
+ - 306,
+ - 62,
+ 402,
+ 17,
+ - 660,
+ - 938,
+ - 266,
+ 0,
+ 385,
+ 235,
+ 276,
+ 285,
+ 320,
+ 268,
+ - 336,
+ - 200,
+ - 724,
+ 17,
+ - 84,
+ 381,
+ - 544,
+ 429,
+ 494,
+ 519,
+ - 117,
+ 288,
+ 304,
+ 329,
+ 643,
+ 157,
+ 701,
+ 508,
+ 1200,
+ 625,
+ 796,
+ 608,
+ 998,
+ 421,
+ 492,
+ 632,
+ 1204,
+ 780,
+ 446,
+ 132,
+ 1257,
+ 844,
+ 547,
+ 449,
+ 829,
+ 658,
+ 541,
+ 470,
+ 1132,
+ 1258,
+ 918,
+ 639,
+ 547,
+ 51,
+ 423,
+ 279,
+ 9,
+ 392,
+ 83,
+ 94,
+ 542,
+ 543,
+ 229,
+ - 147,
+ - 198,
+ 129,
+ 194,
+ - 185,
+ - 863,
+ - 1321,
+ - 302,
+ 30,
+ - 597,
+ - 629,
+ - 19,
+ 114,
+ - 900,
+ - 1081,
+ 466,
+ 353,
+ - 1483,
+ - 1573,
+ 15,
+ - 143,
+ - 1708,
+ - 2059,
+ - 751,
+ 196,
+ - 1876,
+ - 2067,
+ - 642,
+ - 258,
+ - 2335,
+ - 1470,
+ - 450,
+ - 564,
+ - 584,
+ - 186,
+ - 872,
+ - 414,
+ - 1805,
+ - 988,
+ - 1125,
+ - 1310,
+ - 726,
+ - 1129,
+ 28,
+ 169,
+ - 1039,
+ - 864,
+ - 718,
+ - 246,
+ 484,
+ 36,
+ - 233,
+ - 49,
+ 265,
+ 67,
+ 289,
+ 467,
+ 178,
+ 543,
+ 810,
+ 540,
+ 84,
+ 282,
+ 672,
+ 703,
+ - 975,
+ - 777,
+ 129,
+ 287,
+ - 938,
+ - 227,
+ 955,
+ 595,
+ - 1617,
+ - 289,
+ 836,
+ 649,
+ - 1847,
+ - 215,
+ 1106,
+ 718,
+ - 2034,
+ - 1085,
+ 650,
+ 440,
+ - 2101,
+ - 529,
+ 907,
+ 575,
+ - 2011,
+ - 336,
+ 670,
+ 204,
+ - 2389,
+ - 692,
+ 360,
+ 137,
+ - 2156,
+ - 2204,
+ - 9,
+ 280,
+ - 266,
+ 119,
+ 39,
+ 193,
+ 78,
+ - 59,
+ - 120,
+ 226,
+ - 975,
+ - 858,
+ - 781,
+ - 1095,
+ - 619,
+ - 413,
+ - 451,
+ - 842,
+ - 1216,
+ - 1321,
+ - 813,
+ - 883,
+ - 1376,
+ - 1615,
+ - 394,
+ - 428,
+ - 737,
+ - 1113,
+ - 549,
+ - 790,
+ - 880,
+ - 975,
+ - 967,
+ - 642,
+ - 985,
+ - 886,
+ - 1273,
+ - 1361,
+ - 473,
+ - 804,
+ - 1401,
+ - 1407,
+ 160,
+ - 265,
+ - 919,
+ - 275,
+ - 248,
+ - 250,
+ - 718,
+ - 380,
+ 97,
+ - 103,
+ - 375,
+ - 229,
+ - 415,
+ - 193,
+ - 135,
+ - 555,
+ 628,
+ 361,
+ 119,
+ 216,
+ 579,
+ 364,
+ 391,
+ 209,
+ 634,
+ 522,
+ - 154,
+ - 148,
+ 526,
+ 389,
+ 170,
+ 33,
+ 105,
+ 267,
+ 64,
+ 380,
+ - 1503,
+ - 1000,
+ - 30,
+ - 369,
+ - 1070,
+ 58,
+ 647,
+ 223,
+ - 1520,
+ - 291,
+ 621,
+ 307,
+ - 1531,
+ 156,
+ 762,
+ 404,
+ - 2029,
+ 141,
+ 734,
+ 499,
+ - 1849,
+ - 650,
+ 306,
+ 512,
+ - 187,
+ - 104,
+ - 59,
+ 438,
+ 134,
+ - 230,
+ 156,
+ - 186,
+ - 61,
+ - 260,
+ - 16,
+ 10,
+ - 569,
+ - 3,
+ - 421,
+ - 297,
+ - 1725,
+ - 521,
+ - 346,
+ 178,
+ - 1362,
+ - 59,
+ - 44,
+ 157,
+ - 2146,
+ - 461,
+ - 470,
+ - 349,
+ - 2170,
+ - 1,
+ - 369,
+ - 121,
+ - 1579,
+ - 373,
+ - 900,
+ - 1015,
+ - 1117,
+ - 591,
+ - 613,
+ - 784,
+ - 561,
+ 122,
+ - 75,
+ - 449,
+ - 4,
+ - 171,
+ - 123,
+ - 372,
+ 192,
+ 168,
+ - 76,
+ - 132,
+ 252,
+ - 107,
+ 340,
+ 210,
+ 392,
+ 509,
+ 272,
+ 181,
+ - 109,
+ 145,
+ 218,
+ 119,
+ - 416,
+ - 263,
+ 485,
+ 265,
+ - 181,
+ - 8,
+ - 286,
+ 226,
+ - 244,
+ - 218,
+ 69,
+ - 290,
+ - 158,
+ 191,
+ - 1,
+ - 64,
+ - 592,
+ - 90,
+ 213,
+ - 96,
+ 255,
+ 435,
+ 178,
+ - 80,
+ - 369,
+ - 18,
+ - 33,
+ - 80,
+ - 42,
+ 415,
+ 140,
+ - 222,
+ 1143,
+ 651,
+ 649,
+ 329,
+ 767,
+ 556,
+ 249,
+ 235,
+ 948,
+ 413,
+ 442,
+ 279,
+ 141,
+ 339,
+ 356,
+ 557,
+ - 470,
+ - 170,
+ 99,
+ 237,
+ - 569,
+ - 800,
+ 352,
+ 565,
+ 282,
+ 473,
+ 470,
+ 332,
+ - 199,
+ - 690,
+ - 1284,
+ - 917,
+ - 193,
+ - 426,
+ - 800,
+ - 1122,
+ - 26,
+ - 371,
+ - 490,
+ - 193,
+ 637,
+ 595,
+ 519,
+ 330,
+ 408,
+ - 115,
+ 79,
+ 12,
+ 477,
+ 87,
+ - 103,
+ - 376,
+ - 666,
+ - 347,
+ - 277,
+ - 291,
+ - 510,
+ - 481,
+ 169,
+ 297,
+ - 829,
+ - 738,
+ - 205,
+ - 171,
+ - 320,
+ - 540,
+ 328,
+ 283,
+ - 859,
+ - 958,
+ 442,
+ - 2,
+ 556,
+ 686,
+ 130,
+ 56,
+ 1383,
+ 1012,
+ 755,
+ 427,
+ 612,
+ 741,
+ 628,
+ 553,
+ - 339,
+ - 796,
+ 134,
+ 277,
+ - 633,
+ - 1085,
+ - 2,
+ - 246,
+ - 880,
+ - 1035,
+ - 1607,
+ - 1064,
+ - 994,
+ - 474,
+ - 1138,
+ - 488,
+ - 414,
+ - 795,
+ 73,
+ - 206,
+ - 8,
+ - 139,
+ 439,
+ 204,
+ - 176,
+ - 578,
+ 23,
+ 131,
+ - 269,
+ - 757,
+ - 191,
+ 245,
+ - 109,
+ - 338,
+ 112,
+ 316,
+ 120,
+ - 406,
+ - 118,
+ 611,
+ - 180,
+ - 186,
+ - 645,
+ 115,
+ - 173,
+ 34,
+ - 518,
+ - 489,
+ - 151,
+ 61,
+ - 583,
+ - 844,
+ 220,
+ - 138,
+ - 681,
+ - 1020,
+ 391,
+ - 17,
+ - 598,
+ - 321,
+ 157,
+ - 295,
+ 129,
+ 155,
+ - 926,
+ - 875,
+ - 987,
+ 285,
+ 241,
+ - 83,
+ - 125,
+ - 125,
+ 620,
+ 597,
+ 432,
+ 92,
+ 393,
+ 78,
+ 409,
+ 61,
+ - 393,
+ - 739,
+ - 413,
+ - 748,
+ 83,
+ 54,
+ 361,
+ 27,
+ - 1084,
+ 130,
+ - 337,
+ - 694,
+ - 1565,
+ 297,
+ 318,
+ - 19,
+ - 1873,
+ 36,
+ 51,
+ - 317,
+ - 2323,
+ - 246,
+ 231,
+ - 84,
+ - 2306,
+ - 783,
+ 40,
+ - 179,
+ - 2233,
+ - 930,
+ - 474,
+ - 462,
+ - 754,
+ - 86,
+ - 288,
+ - 626,
+ - 2411,
+ - 455,
+ - 63,
+ 171,
+ - 1099,
+ - 1094,
+ - 26,
+ - 143,
+ - 1193,
+ - 455,
+ - 406,
+ - 381,
+ - 605,
+ - 210,
+ - 96,
+ - 51,
+ - 580,
+ - 476,
+ - 276,
+ - 15,
+ - 1195,
+ - 634,
+ - 1203,
+ - 881,
+ - 378,
+ - 221,
+ - 669,
+ - 952,
+ 594,
+ 178,
+ - 403,
+ - 676,
+ 763,
+ 327,
+ 601,
+ 290,
+ 172,
+ 300,
+ 203,
+ 157,
+ - 56,
+ - 336,
+ 356,
+ 24,
+ - 228,
+ - 296,
+ - 259,
+ - 29,
+ - 186,
+ 263,
+ 416,
+ 14,
+ - 353,
+ 373,
+ - 12,
+ - 216,
+ 257,
+ 96,
+ 174,
+ 57,
+ - 1526,
+ - 616,
+ - 954,
+ - 499,
+ - 497,
+ - 152,
+ - 333,
+ 125,
+ 105,
+ 200,
+ 179,
+ - 97,
+ - 331,
+ - 224,
+ 765,
+ 697,
+ 760,
+ 256,
+ 301,
+ 59,
+ 455,
+ - 85,
+ 204,
+ 288,
+ - 514,
+ 240,
+ 251,
+ - 109,
+ 256,
+ 417,
+ - 34,
+ - 413,
+ 101,
+ 430,
+ 384,
+ 156,
+ - 31,
+ - 10,
+ 206,
+ 426,
+ 589,
+ 145,
+ 143,
+ 71,
+ 808,
+ 906,
+ 333,
+ 349,
+ 986,
+ 938,
+ 589,
+ 331,
+ 1300,
+ 824,
+ 187,
+ 509,
+ 1062,
+ 653,
+ 379,
+ 466,
+ 1462,
+ 937,
+ 401,
+ 274,
+ 787,
+ 861,
+ 265,
+ 2,
+ 609,
+ 553,
+ 28,
+ 305,
+ 926,
+ 340,
+ 106,
+ 386,
+ 241,
+ - 267,
+ - 147,
+ 225,
+ - 178,
+ - 534,
+ 347,
+ 502,
+ - 643,
+ - 381,
+ 397,
+ 30,
+ - 651,
+ - 733,
+ - 435,
+ 398,
+ - 407,
+ - 726,
+ - 484,
+ - 248,
+ - 789,
+ - 914,
+ - 438,
+ - 476,
+ - 498,
+ - 390,
+ 75,
+ - 295,
+ - 964,
+ - 590,
+ - 606,
+ 150,
+ - 121,
+ - 49,
+ - 155,
+ - 78,
+ 935,
+ 550,
+ 389,
+ 38,
+ - 321,
+ 127,
+ 424,
+ 315,
+ - 285,
+ - 113,
+ 283,
+ 259,
+ 658,
+ 203,
+ 322,
+ 486,
+ 903,
+ 505,
+ 748,
+ 417,
+ 611,
+ 423,
+ 555,
+ 512,
+ 239,
+ - 83,
+ - 578,
+ - 19,
+ - 339,
+ - 731,
+ 349,
+ 13,
+ - 934,
+ - 1399,
+ - 114,
+ - 360,
+ 107,
+ 692,
+ 182,
+ 90,
+ - 1243,
+ - 1538,
+ - 1551,
+ - 725,
+ - 568,
+ - 903,
+ - 1363,
+ - 525,
+ - 517,
+ - 853,
+ - 861,
+ - 1004,
+ - 168,
+ - 690,
+ - 835,
+ 63,
+ - 137,
+ - 556,
+ - 547,
+ 144,
+ - 286,
+ - 817,
+ 485,
+ 319,
+ - 147,
+ - 408,
+ 526,
+ 246,
+ - 347,
+ - 434,
+ 297,
+ - 28,
+ - 290,
+ - 471,
+ - 1110,
+ - 1285,
+ - 460,
+ - 359,
+ - 988,
+ - 794,
+ 1347,
+ 1299,
+ 690,
+ 523,
+ 1216,
+ 1068,
+ 1094,
+ 757,
+ 825,
+ 1140,
+ 752,
+ 494,
+ 1252,
+ 1365,
+ 1195,
+ 898,
+ 521,
+ 1053,
+ 532,
+ 432,
+ - 334,
+ - 216,
+ - 313,
+ - 263,
+ - 160,
+ 52,
+ - 472,
+ - 155,
+ 127,
+ 136,
+ - 380,
+ 44,
+ 851,
+ 410,
+ - 162,
+ - 489,
+ 123,
+ - 255,
+ - 796,
+ - 667,
+ 1090,
+ 917,
+ 789,
+ 493,
+ 1397,
+ 1197,
+ 558,
+ 202,
+ - 51,
+ - 118,
+ - 342,
+ - 701,
+ 83,
+ 108,
+ - 42,
+ - 441,
+ 61,
+ 95,
+ 287,
+ 256,
+ - 27,
+ 89,
+ 524,
+ 531,
+ 351,
+ 227,
+ 592,
+ 545,
+ 697,
+ 155,
+ - 164,
+ 307,
+ 638,
+ 274,
+ - 489,
+ - 50,
+ 754,
+ 240,
+ - 166,
+ - 124,
+ - 116,
+ - 579,
+ - 1212,
+ - 63,
+ 190,
+ - 295,
+ - 1040,
+ - 1296,
+ 147,
+ - 376,
+ - 177,
+ - 113,
+ 841,
+ 1241,
+ 1051,
+ 668,
+ 2,
+ 293,
+ 551,
+ 304,
+ - 1096,
+ - 953,
+ - 248,
+ 376,
+ - 750,
+ - 965,
+ 87,
+ 516,
+ - 275,
+ - 516,
+ 689,
+ 391,
+ - 379,
+ - 643,
+ 876,
+ 594,
+ - 390,
+ - 1013,
+ - 645,
+ 573,
+ - 107,
+ - 568,
+ - 689,
+ - 826,
+ - 1025,
+ - 27,
+ - 328,
+ - 203,
+ 861,
+ 749,
+ 548,
+ 233,
+ - 1660,
+ - 1043,
+ 451,
+ 108,
+ - 660,
+ - 620,
+ 430,
+ 236,
+ 21,
+ - 396,
+ - 1158,
+ - 631,
+ 1372,
+ 1298,
+ 967,
+ 577,
+ 1125,
+ 1125,
+ 589,
+ 454,
+ - 323,
+ - 865,
+ - 467,
+ 153,
+ - 468,
+ - 699,
+ - 804,
+ - 509,
+ - 392,
+ - 718,
+ - 204,
+ - 35,
+ - 603,
+ - 1093,
+ - 567,
+ - 162,
+ - 505,
+ - 1004,
+ - 102,
+ 350,
+ 219,
+ 224,
+ 423,
+ 252,
+ 395,
+ 591,
+ 608,
+ 363,
+ - 746,
+ - 96,
+ 373,
+ 172,
+ 171,
+ 295,
+ 714,
+ 339,
+ 233,
+ 77,
+ 107,
+ 277,
+ 157,
+ 153,
+ - 499,
+ - 356,
+ 1547,
+ 1073,
+ 576,
+ 494,
+ - 292,
+ - 339,
+ - 504,
+ - 592,
+ - 903,
+ - 72,
+ - 619,
+ - 481,
+ - 1594,
+ - 1117,
+ - 567,
+ - 254,
+ - 793,
+ - 507,
+ - 564,
+ - 291,
+ - 492,
+ - 532,
+ 502,
+ 560,
+ - 382,
+ 427,
+ 600,
+ 230,
+ - 227,
+ 477,
+ 251,
+ 75,
+ 285,
+ 842,
+ 813,
+ 476,
+ - 1310,
+ - 1333,
+ 186,
+ 377,
+ - 587,
+ - 917,
+ 643,
+ 381,
+ - 1186,
+ - 553,
+ 411,
+ 82,
+ - 1127,
+ - 820,
+ - 174,
+ - 540,
+ - 604,
+ 119,
+ 543,
+ 205,
+ - 380,
+ 657,
+ 909,
+ 567,
+ 112,
+ - 298,
+ - 374,
+ 114,
+ - 857,
+ - 251,
+ 56,
+ 159,
+ 401,
+ 345,
+ - 34,
+ - 140,
+ - 111,
+ - 607,
+ 41,
+ 614,
+ 355,
+ - 114,
+ - 77,
+ 474,
+ 578,
+ 56,
+ 1450,
+ 924,
+ 1098,
+ 1420,
+ 741,
+ 400,
+ 246,
+ 22,
+ 588,
+ 313,
+ - 121,
+ 327,
+ 831,
+ 472,
+ - 1138,
+ - 608,
+ 856,
+ 552,
+ - 1241,
+ - 1072,
+ 638,
+ 600,
+ - 358,
+ 254,
+ - 333,
+ - 303,
+ - 646,
+ 739,
+ 358,
+ 74,
+ 1226,
+ 1671,
+ 1221,
+ 849,
+ 2241,
+ 1624,
+ 983,
+ 636,
+ 1841,
+ 1477,
+ 749,
+ 384,
+ 350,
+ 263,
+ 87,
+ 128,
+ - 1902,
+ - 941,
+ - 144,
+ - 64,
+ - 1734,
+ - 255,
+ 288,
+ - 31,
+ - 2644,
+ - 1238,
+ 366,
+ 235,
+ - 1643,
+ - 1092,
+ - 1344,
+ - 304,
+ - 541,
+ - 1075,
+ - 1116,
+ 123,
+ - 1178,
+ - 252,
+ - 816,
+ - 180,
+ - 1016,
+ 533,
+ 565,
+ 233,
+ - 487,
+ - 430,
+ - 188,
+ 334,
+ 867,
+ 1236,
+ 534,
+ 171,
+ - 1590,
+ - 1607,
+ 635,
+ 630,
+ - 2196,
+ 310,
+ 924,
+ 412,
+ - 2358,
+ - 328,
+ 956,
+ 529,
+ - 2639,
+ - 377,
+ 630,
+ 278,
+ - 2602,
+ 317,
+ 799,
+ 299,
+ - 2406,
+ 133,
+ 340,
+ 31,
+ - 2156,
+ - 1468,
+ 131,
+ 125,
+ - 1184,
+ - 490,
+ - 139,
+ 46,
+ - 744,
+ 447,
+ 891,
+ 564,
+ 67,
+ - 451,
+ 646,
+ 604,
+ - 553,
+ - 429,
+ - 876,
+ 396,
+ 162,
+ - 66,
+ 1305,
+ 915,
+ 479,
+ 579,
+ 1088,
+ 794,
+ 450,
+ 278,
+ 566,
+ 324,
+ - 1057,
+ - 154,
+ 148,
+ - 177,
+ - 2545,
+ 168,
+ 1070,
+ 592,
+ - 2351,
+ - 42,
+ 819,
+ 345,
+ - 2344,
+ - 707,
+ 721,
+ 250,
+ - 2175,
+ - 1497,
+ - 309,
+ 122,
+ - 78,
+ - 73,
+ 120,
+ 173,
+ - 4,
+ 262,
+ - 263,
+ - 261,
+ - 431,
+ - 64,
+ - 405,
+ - 732,
+ - 2609,
+ 116,
+ - 83,
+ - 193,
+ - 1525,
+ - 944,
+ - 477,
+ - 725,
+ - 508,
+ 307,
+ 170,
+ 172,
+ 832,
+ 417,
+ 832,
+ 686,
+ - 225,
+ 177,
+ 894,
+ 818,
+ - 482,
+ - 389,
+ 1279,
+ 1039,
+ - 383,
+ 201,
+ - 350,
+ 40,
+ 730,
+ 635,
+ 226,
+ 526,
+ 503,
+ 462,
+ 338,
+ 398,
+ 535,
+ 714,
+ 40,
+ - 282,
+ 1482,
+ 1471,
+ 1085,
+ 731,
+ 1561,
+ 1072,
+ 909,
+ 693,
+ 1419,
+ 1282,
+ 889,
+ 879,
+ 1153,
+ 728,
+ 1186,
+ 840,
+ - 226,
+ 1130,
+ 949,
+ 689,
+ - 494,
+ - 986,
+ - 1556,
+ - 128,
+ - 568,
+ - 721,
+ - 713,
+ - 26,
+ 317,
+ 524,
+ 70,
+ 135,
+ - 405,
+ - 865,
+ - 1766,
+ - 652,
+ - 174,
+ - 801,
+ 885,
+ 773,
+ - 153,
+ - 91,
+ 1099,
+ 751,
+ - 506,
+ - 1149,
+ 853,
+ 646,
+ 241,
+ 782,
+ 519,
+ 539,
+ 1853,
+ 1700,
+ 1101,
+ 684,
+ - 1249,
+ - 1486,
+ - 464,
+ 188,
+ - 893,
+ - 1409,
+ - 1312,
+ - 341,
+ - 135,
+ 438,
+ - 175,
+ 18,
+ 1111,
+ 976,
+ 319,
+ 208,
+ - 1430,
+ - 1768,
+ 83,
+ 458,
+ - 530,
+ - 1000,
+ 307,
+ 129,
+ - 840,
+ - 15,
+ - 29,
+ - 356,
+ - 911,
+ - 924,
+ - 1147,
+ - 242,
+ - 119,
+ - 528,
+ 127,
+ - 133,
+ - 761,
+ - 765,
+ 190,
+ - 83,
+ - 315,
+ 895,
+ 522,
+ 231,
+ - 222,
+ 102,
+ - 63,
+ - 428,
+ 316,
+ 699,
+ 379,
+ 70,
+ 25,
+ 716,
+ 314,
+ - 108,
+ 507,
+ 874,
+ 566,
+ 238,
+ 108,
+ 941,
+ 519,
+ 195,
+ 425,
+ - 60,
+ - 427,
+ 257,
+ 139,
+ - 103,
+ - 630,
+ 446,
+ 334,
+ 370,
+ 412,
+ 48,
+ - 172,
+ - 690,
+ - 283,
+ 557,
+ 187,
+ - 286,
+ 158,
+ 483,
+ 140,
+ 270,
+ - 344,
+ - 631,
+ 924,
+ 579,
+ - 116,
+ 132,
+ 142,
+ 466,
+ - 68,
+ - 64,
+ 230,
+ - 145,
+ - 302,
+ - 542,
+ - 803,
+ - 912,
+ 1018,
+ 737,
+ - 773,
+ 1015,
+ 630,
+ 297,
+ - 2596,
+ 95,
+ 445,
+ 336,
+ - 2122,
+ 491,
+ 510,
+ 191,
+ - 1253,
+ 161,
+ - 2,
+ - 324,
+ - 1450,
+ - 633,
+ - 712,
+ - 105,
+ - 842,
+ - 254,
+ - 411,
+ 100,
+ - 640,
+ - 290,
+ 1010,
+ 763,
+ - 650,
+ 313,
+ 1169,
+ 730,
+ 140,
+ 505,
+ 1030,
+ 766,
+ 772,
+ 287,
+ 1067,
+ 823,
+ 495,
+ 749,
+ 305,
+ 323,
+ - 164,
+ 462,
+ 78,
+ 399,
+ - 342,
+ - 874,
+ 69,
+ 597,
+ - 16,
+ 620,
+ 621,
+ 337,
+ - 138,
+ - 444,
+ - 265,
+ 218,
+ 84,
+ - 450,
+ 953,
+ 666,
+ - 222,
+ - 803,
+ 541,
+ 604,
+ - 921,
+ - 1376,
+ 244,
+ 116,
+ - 841,
+ - 723,
+ 630,
+ 588,
+ 140,
+ 663,
+ 294,
+ 368,
+ 935,
+ 1046,
+ 881,
+ 759,
+ 1746,
+ 1464,
+ 916,
+ 628,
+ 436,
+ 963,
+ 281,
+ 1,
+ - 119,
+ 74,
+ 542,
+ 213,
+ 1,
+ - 567,
+ 301,
+ 241,
+ 260,
+ 435,
+ 222,
+ 396,
+ 936,
+ 957,
+ 1108,
+ 703,
+ 510,
+ 506,
+ 808,
+ 478,
+ 601,
+ 694,
+ 960,
+ 620,
+ 972,
+ 741,
+ 980,
+ 600,
+ 834,
+ 717,
+ 767,
+ 684,
+ 643,
+ 972,
+ 935,
+ 638,
+ 501,
+ 661,
+ 720,
+ 851,
+ - 105,
+ - 632,
+ - 303,
+ - 117,
+ - 429,
+ 130,
+ 789,
+ 442,
+ - 522,
+ - 188,
+ 704,
+ 373,
+ - 759,
+ 42,
+ 814,
+ 523,
+ - 531,
+ - 1137,
+ 373,
+ 578,
+ - 682,
+ - 1203,
+ - 455,
+ 285,
+ - 1163,
+ - 1577,
+ - 1098,
+ 44,
+ 81,
+ - 82,
+ 712,
+ 363,
+ 477,
+ 246,
+ 954,
+ 622,
+ 1604,
+ 1622,
+ 1277,
+ 891,
+ 1409,
+ 859,
+ 924,
+ 892,
+ 774,
+ 1041,
+ 947,
+ 1142,
+ 40,
+ - 546,
+ - 75,
+ 288,
+ - 616,
+ - 106,
+ - 697,
+ - 26,
+ - 169,
+ - 160,
+ - 891,
+ - 739,
+ - 279,
+ - 384,
+ - 1029,
+ - 350,
+ 1781,
+ 1308,
+ 1046,
+ 816,
+ 1580,
+ 1533,
+ 1472,
+ 1178,
+ 1505,
+ 1076,
+ 1216,
+ 899,
+ 890,
+ 904,
+ 564,
+ 654,
+ 920,
+ 692,
+ 1021,
+ 856,
+ - 493,
+ 132,
+ 177,
+ 505,
+ 71,
+ 195,
+ - 28,
+ 97,
+ 456,
+ 351,
+ - 164,
+ 88,
+ 439,
+ 278,
+ - 40,
+ 350,
+ 1395,
+ 949,
+ 234,
+ - 95,
+ - 805,
+ - 472,
+ 38,
+ - 163,
+ 367,
+ - 98,
+ 489,
+ 523,
+ 1025,
+ 1178,
+ 1212,
+ 906,
+ 319,
+ 1314,
+ 814,
+ 461,
+ - 123,
+ - 543,
+ - 804,
+ 447,
+ - 748,
+ - 324,
+ - 897,
+ - 1127,
+ - 737,
+ - 501,
+ - 789,
+ - 713,
+ 715,
+ 777,
+ 1239,
+ 922,
+ 1949,
+ 1939,
+ 1368,
+ 865,
+ 730,
+ 880,
+ 758,
+ 388,
+ - 871,
+ 454,
+ 17,
+ - 251,
+ - 381,
+ - 810,
+ - 1583,
+ 239,
+ - 521,
+ - 966,
+ - 792,
+ 259,
+ - 890,
+ - 1358,
+ - 770,
+ - 73,
+ 166,
+ 349,
+ - 212,
+ 323,
+ - 840,
+ - 301,
+ 473,
+ 435,
+ - 679,
+ - 464,
+ 728,
+ 351,
+ - 156,
+ - 199,
+ 667,
+ 432,
+ 29,
+ - 252,
+ 415,
+ 480,
+ - 731,
+ - 379,
+ 145,
+ 559,
+ - 528,
+ - 631,
+ - 1158,
+ - 159,
+ 445,
+ 273,
+ 123,
+ 639,
+ 373,
+ - 126,
+ 800,
+ 568,
+ 84,
+ - 162,
+ 720,
+ 712,
+ - 830,
+ - 536,
+ - 185,
+ 222,
+ 408,
+ 452,
+ 501,
+ 771,
+ - 897,
+ - 1355,
+ - 67,
+ 442,
+ - 792,
+ - 1406,
+ 566,
+ 602,
+ 167,
+ - 326,
+ 509,
+ 330,
+ - 95,
+ - 626,
+ - 730,
+ - 344,
+ 1668,
+ 1217,
+ 779,
+ 455,
+ 1316,
+ 828,
+ 584,
+ 719,
+ 404,
+ - 31,
+ 1013,
+ 789,
+ 89,
+ 107,
+ 891,
+ 549,
+ 871,
+ 1581,
+ 917,
+ 671,
+ 866,
+ 1479,
+ 1289,
+ 854,
+ 391,
+ 1068,
+ 1122,
+ 812,
+ 78,
+ - 562,
+ 345,
+ 563,
+ 429,
+ - 103,
+ 417,
+ 787,
+ - 122,
+ - 437,
+ 411,
+ 788,
+ - 913,
+ - 417,
+ 602,
+ 754,
+ - 226,
+ - 16,
+ 151,
+ 760,
+ - 700,
+ 118,
+ - 104,
+ - 14,
+ - 1128,
+ 48,
+ 284,
+ 393,
+ - 390,
+ - 419,
+ - 639,
+ - 116,
+ - 910,
+ 306,
+ 316,
+ - 13,
+ 1207,
+ 984,
+ 821,
+ 669,
+ - 1195,
+ - 693,
+ 140,
+ - 213,
+ - 884,
+ - 416,
+ - 199,
+ - 558,
+ - 616,
+ 245,
+ - 404,
+ - 664,
+ 262,
+ 56,
+ - 617,
+ - 724,
+ - 85,
+ - 491,
+ - 320,
+ - 656,
+ - 570,
+ - 831,
+ - 129,
+ - 528,
+ - 1506,
+ - 63,
+ - 367,
+ - 385,
+ - 358,
+ - 321,
+ 4,
+ 51,
+ - 366,
+ - 214,
+ 319,
+ 511,
+ 146,
+ 671,
+ - 17,
+ - 291,
+ - 110,
+ 464,
+ - 139,
+ - 496,
+ - 202,
+ 220,
+ - 312,
+ - 631,
+ - 660,
+ - 73,
+ - 655,
+ - 820,
+ - 662,
+ - 653,
+ - 1288,
+ - 857,
+ - 430,
+ - 953,
+ - 959,
+ - 264,
+ - 49,
+ - 468,
+ - 72,
+ - 381,
+ - 350,
+ - 563,
+ - 193,
+ - 407,
+ 55,
+ - 408,
+ - 803,
+ 11,
+ - 309,
+ 649,
+ 188,
+ - 198,
+ - 512,
+ 461,
+ - 79,
+ - 458,
+ - 1318,
+ - 263,
+ - 134,
+ - 523,
+ - 1657,
+ - 435,
+ - 495,
+ - 765,
+ 57,
+ - 347,
+ - 414,
+ 434,
+ - 1141,
+ - 242,
+ - 664,
+ - 857,
+ 34,
+ - 68,
+ - 707,
+ - 338
+};
+#define MR515_3_SIZE 128
+
+/* 3rd LSF quantizer (MR515 and MR475) */
+static const Word32 mr515_3_lsf[] =
+{
+ 419,
+ 163,
+ - 30,
+ - 262,
+ - 455,
+ - 789,
+ - 1430,
+ - 721,
+ 1006,
+ 664,
+ 269,
+ 25,
+ 619,
+ 260,
+ 183,
+ 96,
+ - 968,
+ - 1358,
+ - 388,
+ 135,
+ - 693,
+ 835,
+ 456,
+ 154,
+ 1105,
+ 703,
+ 569,
+ 363,
+ 1625,
+ 1326,
+ 985,
+ 748,
+ - 220,
+ 219,
+ 76,
+ - 208,
+ - 1455,
+ - 1662,
+ 49,
+ 149,
+ - 964,
+ - 172,
+ - 752,
+ - 336,
+ 625,
+ 209,
+ - 250,
+ - 66,
+ - 1017,
+ - 838,
+ - 2,
+ 317,
+ - 2168,
+ - 1485,
+ - 138,
+ 123,
+ - 1876,
+ - 2099,
+ - 521,
+ 85,
+ - 967,
+ - 366,
+ - 695,
+ - 881,
+ - 921,
+ - 1011,
+ - 763,
+ - 949,
+ - 124,
+ - 256,
+ - 352,
+ - 660,
+ 178,
+ 463,
+ 354,
+ 304,
+ - 1744,
+ - 591,
+ - 282,
+ 79,
+ - 2249,
+ 175,
+ 867,
+ 499,
+ - 138,
+ - 180,
+ - 181,
+ - 21,
+ - 2291,
+ - 1241,
+ - 460,
+ - 520,
+ - 771,
+ 451,
+ - 10,
+ - 308,
+ 271,
+ - 65,
+ 4,
+ 214,
+ - 279,
+ - 435,
+ - 43,
+ - 348,
+ - 670,
+ 35,
+ - 65,
+ - 211,
+ 806,
+ 535,
+ 85,
+ 297,
+ 57,
+ 239,
+ 722,
+ 493,
+ 225,
+ 661,
+ 840,
+ 547,
+ - 540,
+ - 376,
+ 14,
+ 349,
+ 469,
+ 721,
+ 331,
+ 162,
+ - 544,
+ - 752,
+ - 62,
+ - 10,
+ 398,
+ - 88,
+ 724,
+ 701,
+ - 19,
+ - 533,
+ - 94,
+ 601,
+ 136,
+ - 71,
+ - 681,
+ - 747,
+ - 166,
+ - 344,
+ 261,
+ - 50,
+ 161,
+ - 52,
+ 485,
+ 337,
+ - 1675,
+ 50,
+ 190,
+ - 93,
+ - 2282,
+ - 231,
+ - 194,
+ - 82,
+ - 95,
+ - 595,
+ - 154,
+ 128,
+ 894,
+ 501,
+ 588,
+ 457,
+ - 345,
+ 206,
+ 122,
+ 110,
+ - 631,
+ - 227,
+ - 569,
+ 3,
+ 408,
+ 239,
+ 397,
+ 226,
+ - 197,
+ - 2,
+ 128,
+ 491,
+ 1281,
+ 904,
+ 292,
+ 215,
+ 538,
+ 306,
+ 259,
+ 509,
+ - 677,
+ - 1047,
+ 13,
+ 321,
+ - 679,
+ - 588,
+ - 358,
+ - 212,
+ - 558,
+ 243,
+ 646,
+ 479,
+ 486,
+ 342,
+ 634,
+ 532,
+ 107,
+ 802,
+ 331,
+ 136,
+ - 112,
+ - 398,
+ - 1031,
+ - 286,
+ - 326,
+ - 705,
+ 288,
+ 272,
+ 1299,
+ 1144,
+ 1178,
+ 860,
+ - 423,
+ 121,
+ - 385,
+ - 148,
+ - 295,
+ - 302,
+ - 834,
+ - 819,
+ 16,
+ - 24,
+ - 201,
+ - 476,
+ 555,
+ 91,
+ - 245,
+ 294,
+ - 38,
+ - 379,
+ - 962,
+ - 1221,
+ - 1191,
+ - 1518,
+ - 273,
+ - 395,
+ - 390,
+ - 1013,
+ - 645,
+ 573,
+ - 1843,
+ - 1030,
+ 505,
+ 468,
+ 744,
+ 947,
+ 609,
+ 493,
+ - 689,
+ - 1172,
+ - 628,
+ - 135,
+ - 1026,
+ 195,
+ 411,
+ 196,
+ 1582,
+ 1147,
+ 575,
+ 337,
+ - 1239,
+ - 777,
+ - 648,
+ - 142,
+ 595,
+ 825,
+ 967,
+ 735,
+ - 1206,
+ - 970,
+ - 81,
+ - 342,
+ - 745,
+ 13,
+ - 72,
+ 375,
+ 454,
+ 19,
+ 1407,
+ 921,
+ - 1647,
+ - 172,
+ 861,
+ 562,
+ 928,
+ 1537,
+ 1063,
+ 740,
+ - 2472,
+ - 952,
+ 264,
+ 82,
+ - 502,
+ - 965,
+ - 1334,
+ 123,
+ 867,
+ 1236,
+ 534,
+ 171,
+ - 2320,
+ - 460,
+ 780,
+ 363,
+ - 1190,
+ - 617,
+ 252,
+ - 61,
+ - 174,
+ 34,
+ 1011,
+ 788,
+ - 2333,
+ 247,
+ 423,
+ 153,
+ - 16,
+ - 355,
+ 262,
+ 449,
+ - 1576,
+ - 1073,
+ - 544,
+ - 371,
+ - 615,
+ - 305,
+ 1051,
+ 805,
+ 687,
+ 528,
+ 6,
+ - 182,
+ 935,
+ 875,
+ 1002,
+ 809,
+ 199,
+ 257,
+ 126,
+ 76,
+ - 584,
+ - 1138,
+ 599,
+ 556,
+ - 1105,
+ - 1391,
+ - 1591,
+ - 519,
+ - 977,
+ - 1325,
+ 108,
+ 347,
+ - 722,
+ - 975,
+ 365,
+ 101,
+ - 145,
+ 681,
+ 249,
+ - 153,
+ 0,
+ - 334,
+ - 570,
+ 159,
+ 412,
+ 285,
+ - 336,
+ - 617,
+ - 953,
+ - 966,
+ 887,
+ 689,
+ - 1251,
+ 84,
+ - 185,
+ - 398,
+ - 592,
+ 433,
+ 1044,
+ 653,
+ 85,
+ 329,
+ - 40,
+ 361,
+ - 433,
+ - 705,
+ 466,
+ 574,
+ - 154,
+ 654,
+ 592,
+ 290,
+ - 167,
+ 72,
+ 349,
+ 175,
+ 674,
+ 297,
+ 977,
+ 720,
+ 1235,
+ 1204,
+ 757,
+ 488,
+ - 400,
+ - 269,
+ 538,
+ 372,
+ - 1350,
+ - 1387,
+ - 1194,
+ - 91,
+ 1262,
+ 876,
+ 775,
+ 700,
+ - 599,
+ - 38,
+ - 430,
+ - 722,
+ 1976,
+ 1630,
+ 991,
+ 608,
+ 111,
+ 276,
+ - 226,
+ - 96,
+ - 947,
+ - 388,
+ - 11,
+ - 7,
+ - 303,
+ - 531,
+ - 839,
+ 338,
+ 1734,
+ 1710,
+ 1405,
+ 1013,
+ - 516,
+ - 855,
+ - 645,
+ 210,
+ - 688,
+ - 416,
+ 513,
+ 230,
+ - 822,
+ - 637,
+ - 1146,
+ - 320,
+ - 952,
+ - 658,
+ - 694,
+ 183,
+ - 114,
+ - 623,
+ 818,
+ 674,
+ - 191,
+ - 204,
+ 731,
+ 635,
+ 51,
+ 1221,
+ 883,
+ 576,
+ - 954,
+ - 431,
+ 826,
+ 598,
+ - 342,
+ - 755,
+ - 900,
+ - 407,
+ - 1126,
+ - 354,
+ - 206,
+ - 512,
+ - 547,
+ - 810,
+ - 357,
+ - 620,
+ 66,
+ 515,
+ - 73,
+ - 410,
+ - 872,
+ - 945,
+ - 1444,
+ - 1227,
+ 191,
+ - 17,
+ - 544,
+ - 231,
+ - 1540,
+ - 544,
+ - 901,
+ - 886
+};
+#define MR795_1_SIZE 512
+
+/* 1st LSF quantizer (MR795) */
+static const Word32 mr795_1_lsf[] =
+{
+ - 890,
+ - 1550,
+ - 2541,
+ - 819,
+ - 970,
+ 175,
+ - 826,
+ - 1234,
+ - 762,
+ - 599,
+ - 22,
+ 634,
+ - 811,
+ - 987,
+ - 902,
+ - 323,
+ 203,
+ 26,
+ - 383,
+ - 235,
+ - 781,
+ - 399,
+ 1262,
+ 906,
+ - 932,
+ - 1399,
+ - 1380,
+ - 624,
+ 93,
+ 87,
+ - 414,
+ - 539,
+ - 691,
+ 37,
+ 633,
+ 510,
+ - 387,
+ - 476,
+ - 1330,
+ 399,
+ 66,
+ 263,
+ - 407,
+ - 49,
+ - 335,
+ - 417,
+ 1041,
+ 1865,
+ - 779,
+ - 1089,
+ - 1440,
+ - 746,
+ - 858,
+ 832,
+ - 581,
+ - 759,
+ - 371,
+ - 673,
+ - 506,
+ 2088,
+ - 560,
+ - 634,
+ - 1179,
+ 271,
+ 241,
+ 14,
+ - 438,
+ - 244,
+ - 397,
+ 463,
+ 1202,
+ 1047,
+ - 606,
+ - 797,
+ - 1438,
+ - 51,
+ - 323,
+ 481,
+ - 224,
+ - 584,
+ - 527,
+ 494,
+ 881,
+ 682,
+ - 433,
+ - 306,
+ - 1002,
+ 554,
+ 659,
+ 222,
+ 171,
+ - 160,
+ - 353,
+ 681,
+ 1798,
+ 1565,
+ - 852,
+ - 1181,
+ - 1695,
+ - 336,
+ - 666,
+ 114,
+ - 581,
+ - 756,
+ - 744,
+ - 195,
+ 375,
+ 497,
+ - 465,
+ - 804,
+ - 1098,
+ 154,
+ 282,
+ - 131,
+ - 50,
+ - 191,
+ - 719,
+ 323,
+ 732,
+ 1542,
+ - 722,
+ - 819,
+ - 1404,
+ 105,
+ - 250,
+ 185,
+ - 178,
+ - 502,
+ - 742,
+ 321,
+ 510,
+ 1111,
+ - 323,
+ - 567,
+ - 966,
+ 127,
+ 484,
+ 338,
+ - 160,
+ 52,
+ - 338,
+ 732,
+ 1367,
+ 1554,
+ - 626,
+ - 802,
+ - 1696,
+ - 286,
+ - 586,
+ 676,
+ - 695,
+ - 343,
+ - 370,
+ - 490,
+ 295,
+ 1893,
+ - 630,
+ - 574,
+ - 1014,
+ - 80,
+ 645,
+ - 69,
+ - 6,
+ - 318,
+ - 364,
+ 782,
+ 1450,
+ 1038,
+ - 313,
+ - 733,
+ - 1395,
+ 120,
+ 60,
+ 477,
+ - 264,
+ - 585,
+ - 123,
+ 711,
+ 1245,
+ 633,
+ - 91,
+ - 355,
+ - 1016,
+ 771,
+ 758,
+ 261,
+ 253,
+ 81,
+ - 474,
+ 930,
+ 2215,
+ 1720,
+ - 808,
+ - 1099,
+ - 1925,
+ - 560,
+ - 782,
+ 169,
+ - 804,
+ - 1074,
+ - 188,
+ - 626,
+ - 55,
+ 1405,
+ - 694,
+ - 716,
+ - 1194,
+ - 660,
+ 354,
+ 329,
+ - 514,
+ - 55,
+ - 543,
+ 366,
+ 1033,
+ 1182,
+ - 658,
+ - 959,
+ - 1357,
+ - 55,
+ - 184,
+ 93,
+ - 605,
+ - 286,
+ - 662,
+ 404,
+ 449,
+ 827,
+ - 286,
+ - 350,
+ - 1263,
+ 628,
+ 306,
+ 227,
+ - 16,
+ 147,
+ - 623,
+ 186,
+ 923,
+ 2146,
+ - 674,
+ - 890,
+ - 1606,
+ - 443,
+ - 228,
+ 339,
+ - 369,
+ - 790,
+ - 409,
+ 231,
+ 86,
+ 1469,
+ - 448,
+ - 581,
+ - 1061,
+ 594,
+ 450,
+ - 177,
+ - 124,
+ - 170,
+ - 447,
+ 671,
+ 1159,
+ 1404,
+ - 476,
+ - 667,
+ - 1511,
+ - 77,
+ - 138,
+ 716,
+ - 177,
+ - 372,
+ - 381,
+ 451,
+ 934,
+ 915,
+ - 250,
+ - 432,
+ - 822,
+ 272,
+ 828,
+ 446,
+ 26,
+ 19,
+ - 31,
+ 698,
+ 1692,
+ 2168,
+ - 646,
+ - 977,
+ - 1924,
+ - 179,
+ - 473,
+ 268,
+ - 379,
+ - 745,
+ - 691,
+ 11,
+ 127,
+ 1033,
+ - 488,
+ - 917,
+ - 825,
+ 61,
+ 323,
+ 135,
+ 147,
+ - 145,
+ - 686,
+ 685,
+ 786,
+ 1682,
+ - 506,
+ - 848,
+ - 1297,
+ 35,
+ 90,
+ 222,
+ - 23,
+ - 346,
+ - 670,
+ 455,
+ 591,
+ 1287,
+ - 203,
+ - 593,
+ - 1086,
+ 652,
+ 352,
+ 437,
+ 39,
+ 63,
+ - 457,
+ 841,
+ 1265,
+ 2105,
+ - 520,
+ - 882,
+ - 1584,
+ - 328,
+ - 711,
+ 1421,
+ - 596,
+ - 342,
+ - 70,
+ 209,
+ 173,
+ 1928,
+ - 423,
+ - 598,
+ - 921,
+ 421,
+ 605,
+ - 38,
+ - 2,
+ - 245,
+ - 127,
+ 896,
+ 1969,
+ 1135,
+ - 379,
+ - 518,
+ - 1579,
+ 173,
+ 118,
+ 753,
+ - 55,
+ - 381,
+ - 52,
+ 985,
+ 1021,
+ 753,
+ - 2,
+ - 291,
+ - 891,
+ 753,
+ 992,
+ 423,
+ 264,
+ 131,
+ - 196,
+ 895,
+ 2274,
+ 2543,
+ - 635,
+ - 1088,
+ - 2499,
+ - 529,
+ - 982,
+ 526,
+ - 764,
+ - 830,
+ - 548,
+ - 436,
+ 316,
+ 599,
+ - 675,
+ - 940,
+ - 746,
+ - 57,
+ 236,
+ - 11,
+ - 201,
+ - 81,
+ - 798,
+ 16,
+ 845,
+ 1558,
+ - 737,
+ - 985,
+ - 1212,
+ - 468,
+ 17,
+ 290,
+ - 279,
+ - 584,
+ - 700,
+ 183,
+ 822,
+ 705,
+ - 265,
+ - 492,
+ - 1187,
+ 421,
+ 152,
+ 468,
+ - 390,
+ 166,
+ - 268,
+ 39,
+ 1550,
+ 1868,
+ - 635,
+ - 966,
+ - 1571,
+ - 453,
+ - 492,
+ 910,
+ - 284,
+ - 1027,
+ - 75,
+ - 181,
+ - 133,
+ 1852,
+ - 445,
+ - 624,
+ - 1174,
+ 420,
+ 367,
+ - 49,
+ - 389,
+ - 212,
+ - 169,
+ 707,
+ 1073,
+ 1208,
+ - 539,
+ - 710,
+ - 1449,
+ 83,
+ - 163,
+ 484,
+ - 236,
+ - 543,
+ - 355,
+ 338,
+ 1175,
+ 814,
+ - 246,
+ - 309,
+ - 958,
+ 606,
+ 760,
+ 60,
+ 166,
+ - 8,
+ - 163,
+ - 306,
+ 1849,
+ 2563,
+ - 747,
+ - 1025,
+ - 1783,
+ - 419,
+ - 446,
+ 209,
+ - 718,
+ - 566,
+ - 534,
+ - 506,
+ 693,
+ 857,
+ - 463,
+ - 697,
+ - 1082,
+ 325,
+ 431,
+ - 206,
+ - 15,
+ - 8,
+ - 763,
+ 545,
+ 919,
+ 1518,
+ - 611,
+ - 783,
+ - 1313,
+ 256,
+ - 55,
+ 208,
+ - 165,
+ - 348,
+ - 662,
+ 321,
+ 680,
+ 930,
+ - 326,
+ - 429,
+ - 951,
+ 484,
+ 446,
+ 570,
+ - 197,
+ 72,
+ - 73,
+ 909,
+ 1455,
+ 1741,
+ - 563,
+ - 737,
+ - 1974,
+ - 124,
+ - 416,
+ 718,
+ - 478,
+ - 404,
+ - 314,
+ - 16,
+ 446,
+ 1636,
+ - 551,
+ - 537,
+ - 750,
+ - 58,
+ 638,
+ 214,
+ 55,
+ - 185,
+ - 271,
+ 1148,
+ 1301,
+ 1212,
+ - 483,
+ - 671,
+ - 1264,
+ 117,
+ 285,
+ 543,
+ - 204,
+ - 391,
+ - 111,
+ 513,
+ 1538,
+ 854,
+ - 114,
+ - 190,
+ - 978,
+ 877,
+ 595,
+ 464,
+ 260,
+ 260,
+ - 311,
+ 748,
+ 2283,
+ 2216,
+ - 517,
+ - 945,
+ - 2171,
+ - 326,
+ - 708,
+ 378,
+ - 812,
+ - 691,
+ - 232,
+ - 560,
+ 687,
+ 1409,
+ - 732,
+ - 690,
+ - 836,
+ - 359,
+ 645,
+ 386,
+ - 265,
+ 62,
+ - 678,
+ 145,
+ 1644,
+ 1208,
+ - 555,
+ - 988,
+ - 1233,
+ - 78,
+ 14,
+ 114,
+ - 327,
+ - 358,
+ - 489,
+ 392,
+ 677,
+ 697,
+ - 201,
+ - 236,
+ - 1140,
+ 693,
+ 449,
+ 178,
+ - 243,
+ 256,
+ - 433,
+ 611,
+ 1385,
+ 2456,
+ - 612,
+ - 901,
+ - 1464,
+ - 307,
+ - 17,
+ 499,
+ - 315,
+ - 667,
+ - 254,
+ 256,
+ 428,
+ 1463,
+ - 486,
+ - 422,
+ - 1056,
+ 655,
+ 370,
+ 18,
+ - 102,
+ - 185,
+ - 276,
+ 755,
+ 1578,
+ 1335,
+ - 488,
+ - 603,
+ - 1418,
+ 182,
+ - 93,
+ 870,
+ - 73,
+ - 458,
+ - 348,
+ 835,
+ 862,
+ 957,
+ - 282,
+ - 333,
+ - 746,
+ 547,
+ 839,
+ 428,
+ 273,
+ - 89,
+ 13,
+ 940,
+ 1708,
+ 2576,
+ - 418,
+ - 1084,
+ - 1758,
+ - 44,
+ - 358,
+ 259,
+ - 497,
+ - 643,
+ - 560,
+ 99,
+ 557,
+ 961,
+ - 421,
+ - 766,
+ - 917,
+ 295,
+ 326,
+ 184,
+ 175,
+ 15,
+ - 626,
+ 532,
+ 878,
+ 1981,
+ - 443,
+ - 768,
+ - 1275,
+ 221,
+ 156,
+ 268,
+ 39,
+ - 363,
+ - 505,
+ 695,
+ 772,
+ 1140,
+ - 162,
+ - 459,
+ - 912,
+ 709,
+ 444,
+ 658,
+ 25,
+ 303,
+ - 312,
+ 1268,
+ 1410,
+ 1715,
+ - 297,
+ - 766,
+ - 1836,
+ - 263,
+ - 108,
+ 1070,
+ - 406,
+ - 13,
+ - 129,
+ 57,
+ 438,
+ 2734,
+ - 374,
+ - 487,
+ - 835,
+ 304,
+ 696,
+ 164,
+ 104,
+ - 235,
+ 5,
+ 1611,
+ 1900,
+ 1399,
+ - 229,
+ - 582,
+ - 1325,
+ 405,
+ 192,
+ 817,
+ - 87,
+ - 438,
+ 111,
+ 1028,
+ 1199,
+ 993,
+ 68,
+ - 175,
+ - 934,
+ 1033,
+ 1117,
+ 451,
+ 478,
+ 200,
+ - 248,
+ 2127,
+ 2696,
+ 2042,
+ - 835,
+ - 1323,
+ - 2131,
+ - 799,
+ - 692,
+ 466,
+ - 812,
+ - 1032,
+ - 469,
+ - 622,
+ 288,
+ 920,
+ - 701,
+ - 841,
+ - 1070,
+ - 411,
+ 512,
+ 8,
+ - 390,
+ - 91,
+ - 744,
+ - 30,
+ 1043,
+ 1161,
+ - 822,
+ - 1148,
+ - 1156,
+ - 294,
+ - 46,
+ 110,
+ - 411,
+ - 374,
+ - 678,
+ 214,
+ 531,
+ 668,
+ - 406,
+ - 420,
+ - 1194,
+ 487,
+ 232,
+ 303,
+ - 318,
+ 91,
+ - 472,
+ 123,
+ 1232,
+ 2445,
+ - 722,
+ - 952,
+ - 1495,
+ - 738,
+ - 675,
+ 1332,
+ - 543,
+ - 606,
+ - 211,
+ - 95,
+ - 98,
+ 1508,
+ - 549,
+ - 514,
+ - 1193,
+ 473,
+ 211,
+ 73,
+ - 288,
+ - 112,
+ - 389,
+ 537,
+ 1332,
+ 1258,
+ - 567,
+ - 755,
+ - 1545,
+ 71,
+ - 283,
+ 632,
+ - 170,
+ - 481,
+ - 493,
+ 681,
+ 1002,
+ 817,
+ - 356,
+ - 331,
+ - 877,
+ 419,
+ 706,
+ 346,
+ 241,
+ - 34,
+ - 326,
+ 377,
+ 1950,
+ 1883,
+ - 727,
+ - 1075,
+ - 1625,
+ - 233,
+ - 543,
+ 116,
+ - 524,
+ - 806,
+ - 585,
+ - 73,
+ 478,
+ 729,
+ - 288,
+ - 925,
+ - 1143,
+ 173,
+ 447,
+ - 52,
+ 68,
+ - 229,
+ - 606,
+ 449,
+ 529,
+ 1797,
+ - 591,
+ - 875,
+ - 1363,
+ 183,
+ - 144,
+ 324,
+ - 103,
+ - 452,
+ - 666,
+ 623,
+ 488,
+ 1176,
+ - 238,
+ - 511,
+ - 1004,
+ 326,
+ 552,
+ 458,
+ 136,
+ 108,
+ - 319,
+ 626,
+ 1343,
+ 1883,
+ - 490,
+ - 646,
+ - 1730,
+ - 186,
+ - 449,
+ 984,
+ - 738,
+ - 76,
+ - 170,
+ - 550,
+ 755,
+ 2560,
+ - 496,
+ - 510,
+ - 947,
+ 210,
+ 694,
+ - 52,
+ 84,
+ - 322,
+ - 199,
+ 1090,
+ 1625,
+ 1224,
+ - 376,
+ - 603,
+ - 1396,
+ 343,
+ 74,
+ 632,
+ - 175,
+ - 502,
+ - 32,
+ 972,
+ 1332,
+ 734,
+ 52,
+ - 295,
+ - 1113,
+ 1065,
+ 918,
+ 160,
+ 393,
+ 107,
+ - 397,
+ 1214,
+ 2649,
+ 1741,
+ - 632,
+ - 1201,
+ - 1891,
+ - 719,
+ - 277,
+ 353,
+ - 651,
+ - 880,
+ - 122,
+ - 211,
+ 209,
+ 1338,
+ - 562,
+ - 714,
+ - 1059,
+ - 208,
+ 388,
+ 159,
+ - 320,
+ - 61,
+ - 551,
+ 293,
+ 1092,
+ 1443,
+ - 648,
+ - 865,
+ - 1253,
+ - 49,
+ - 143,
+ 305,
+ - 401,
+ - 227,
+ - 585,
+ 561,
+ 532,
+ 927,
+ - 117,
+ - 443,
+ - 1188,
+ 507,
+ 436,
+ 292,
+ - 79,
+ 233,
+ - 458,
+ 671,
+ 1025,
+ 2396,
+ - 633,
+ - 842,
+ - 1525,
+ - 308,
+ - 286,
+ 640,
+ - 373,
+ - 621,
+ - 407,
+ 418,
+ 253,
+ 1305,
+ - 315,
+ - 581,
+ - 1137,
+ 572,
+ 685,
+ - 281,
+ 61,
+ - 68,
+ - 371,
+ 991,
+ 1101,
+ 1498,
+ - 493,
+ - 683,
+ - 1362,
+ - 47,
+ 164,
+ 704,
+ - 256,
+ - 314,
+ - 268,
+ 631,
+ 949,
+ 1052,
+ - 118,
+ - 348,
+ - 833,
+ 68,
+ 1180,
+ 568,
+ 152,
+ 117,
+ 34,
+ 1113,
+ 1902,
+ 2239,
+ - 601,
+ - 959,
+ - 1706,
+ - 143,
+ - 489,
+ 480,
+ - 332,
+ - 655,
+ - 574,
+ 54,
+ 353,
+ 1192,
+ - 462,
+ - 652,
+ - 796,
+ 150,
+ 549,
+ 112,
+ 195,
+ - 111,
+ - 515,
+ 679,
+ 1108,
+ 1647,
+ - 558,
+ - 749,
+ - 1217,
+ - 9,
+ 272,
+ 341,
+ - 53,
+ - 265,
+ - 535,
+ 489,
+ 843,
+ 1298,
+ - 120,
+ - 482,
+ - 1032,
+ 632,
+ 543,
+ 408,
+ 179,
+ 306,
+ - 526,
+ 1124,
+ 1464,
+ 2244,
+ - 417,
+ - 786,
+ - 1562,
+ - 224,
+ - 384,
+ 1364,
+ - 377,
+ - 459,
+ - 25,
+ 385,
+ 489,
+ 2174,
+ - 332,
+ - 651,
+ - 829,
+ 544,
+ 553,
+ 61,
+ 22,
+ - 113,
+ - 89,
+ 1128,
+ 1725,
+ 1524,
+ - 216,
+ - 373,
+ - 1653,
+ 161,
+ 316,
+ 908,
+ - 165,
+ - 222,
+ - 67,
+ 1362,
+ 1175,
+ 789,
+ 73,
+ - 252,
+ - 767,
+ 738,
+ 932,
+ 616,
+ 362,
+ 246,
+ - 126,
+ 787,
+ 2654,
+ 3027,
+ - 691,
+ - 1106,
+ - 2190,
+ - 565,
+ - 588,
+ 524,
+ - 590,
+ - 979,
+ - 490,
+ - 263,
+ 397,
+ 982,
+ - 577,
+ - 837,
+ - 945,
+ - 22,
+ 435,
+ - 49,
+ - 190,
+ - 118,
+ - 629,
+ - 88,
+ 1240,
+ 1513,
+ - 636,
+ - 1051,
+ - 1019,
+ - 291,
+ 189,
+ 259,
+ - 257,
+ - 470,
+ - 629,
+ 145,
+ 945,
+ 894,
+ - 326,
+ - 364,
+ - 1094,
+ 543,
+ 260,
+ 630,
+ - 202,
+ 189,
+ - 209,
+ 357,
+ 1379,
+ 2091,
+ - 569,
+ - 1075,
+ - 1449,
+ - 714,
+ - 239,
+ 919,
+ - 420,
+ - 705,
+ - 84,
+ - 109,
+ - 114,
+ 2407,
+ - 413,
+ - 529,
+ - 1177,
+ 482,
+ 368,
+ 131,
+ - 186,
+ - 72,
+ - 131,
+ 861,
+ 1255,
+ 1220,
+ - 611,
+ - 658,
+ - 1341,
+ 227,
+ - 121,
+ 631,
+ - 176,
+ - 489,
+ - 218,
+ 745,
+ 1175,
+ 957,
+ - 321,
+ - 148,
+ - 936,
+ 671,
+ 966,
+ 216,
+ 340,
+ - 3,
+ - 143,
+ 469,
+ 1848,
+ 2437,
+ - 729,
+ - 961,
+ - 1683,
+ - 213,
+ - 254,
+ 321,
+ - 511,
+ - 438,
+ - 521,
+ - 126,
+ 725,
+ 903,
+ - 340,
+ - 685,
+ - 1032,
+ 316,
+ 480,
+ 20,
+ 23,
+ - 89,
+ - 551,
+ 353,
+ 1051,
+ 1789,
+ - 544,
+ - 757,
+ - 1364,
+ 298,
+ - 25,
+ 436,
+ - 100,
+ - 392,
+ - 519,
+ 467,
+ 754,
+ 1078,
+ - 210,
+ - 398,
+ - 1078,
+ 620,
+ 658,
+ 630,
+ 33,
+ 147,
+ - 178,
+ 921,
+ 1687,
+ 1921,
+ - 325,
+ - 528,
+ - 1978,
+ 2,
+ - 285,
+ 910,
+ - 371,
+ - 490,
+ - 230,
+ 0,
+ 597,
+ 2010,
+ - 496,
+ - 395,
+ - 834,
+ 37,
+ 945,
+ 245,
+ 181,
+ - 160,
+ - 144,
+ 1481,
+ 1373,
+ 1357,
+ - 355,
+ - 601,
+ - 1270,
+ 298,
+ 322,
+ 672,
+ - 193,
+ - 336,
+ 77,
+ 1089,
+ 1533,
+ 922,
+ 177,
+ - 39,
+ - 1125,
+ 996,
+ 781,
+ 536,
+ 456,
+ 366,
+ - 432,
+ 1415,
+ 2440,
+ 2279,
+ - 466,
+ - 758,
+ - 2325,
+ - 303,
+ - 509,
+ 387,
+ - 727,
+ - 557,
+ 66,
+ - 145,
+ 643,
+ 1248,
+ - 544,
+ - 676,
+ - 916,
+ - 225,
+ 862,
+ 588,
+ - 152,
+ 40,
+ - 533,
+ 423,
+ 1423,
+ 1558,
+ - 572,
+ - 843,
+ - 1145,
+ - 128,
+ 85,
+ 461,
+ - 238,
+ - 257,
+ - 584,
+ 605,
+ 748,
+ 861,
+ 24,
+ - 202,
+ - 1409,
+ 797,
+ 487,
+ 303,
+ - 181,
+ 364,
+ - 182,
+ 616,
+ 1378,
+ 2942,
+ - 494,
+ - 852,
+ - 1441,
+ - 292,
+ 61,
+ 812,
+ - 84,
+ - 723,
+ - 182,
+ 555,
+ 532,
+ 1506,
+ - 365,
+ - 493,
+ - 1057,
+ 822,
+ 588,
+ 11,
+ - 14,
+ - 18,
+ - 230,
+ 1001,
+ 1401,
+ 1451,
+ - 474,
+ - 569,
+ - 1292,
+ 302,
+ 62,
+ 1062,
+ - 70,
+ - 376,
+ - 222,
+ 982,
+ 974,
+ 1149,
+ - 196,
+ - 234,
+ - 795,
+ 479,
+ 1098,
+ 499,
+ 362,
+ 58,
+ 70,
+ 1147,
+ 2069,
+ 2857,
+ - 487,
+ - 878,
+ - 1824,
+ 73,
+ - 288,
+ 348,
+ - 358,
+ - 500,
+ - 508,
+ 199,
+ 721,
+ 1242,
+ - 78,
+ - 697,
+ - 795,
+ 361,
+ 536,
+ 196,
+ 374,
+ 110,
+ - 735,
+ 847,
+ 1051,
+ 1896,
+ - 366,
+ - 713,
+ - 1182,
+ 315,
+ 320,
+ 429,
+ 72,
+ - 215,
+ - 450,
+ 759,
+ 886,
+ 1363,
+ - 30,
+ - 428,
+ - 834,
+ 861,
+ 627,
+ 796,
+ 118,
+ 468,
+ - 279,
+ 1355,
+ 1883,
+ 1893,
+ - 188,
+ - 642,
+ - 1612,
+ 63,
+ - 175,
+ 1198,
+ - 418,
+ - 211,
+ 51,
+ 414,
+ 587,
+ 2601,
+ - 234,
+ - 557,
+ - 858,
+ 424,
+ 889,
+ 222,
+ 136,
+ - 101,
+ 83,
+ 1413,
+ 2278,
+ 1383,
+ - 84,
+ - 445,
+ - 1389,
+ 414,
+ 313,
+ 1045,
+ 29,
+ - 343,
+ 65,
+ 1552,
+ 1647,
+ 980,
+ 183,
+ - 91,
+ - 829,
+ 1273,
+ 1413,
+ 360,
+ 553,
+ 272,
+ - 107,
+ 1587,
+ 3149,
+ 2603
+};
+#define DICO1_SIZE_5 128
+#define DICO2_SIZE_5 256
+#define DICO3_SIZE_5 256
+#define DICO4_SIZE_5 256
+#define DICO5_SIZE_5 64
+
+/* 1st LSF quantizer (MR122) */
+static const Word32 dico1_lsf_5[DICO1_SIZE_5 * 4] =
+{
+ - 451,
+ - 1065,
+ - 529,
+ - 1305,
+ - 450,
+ - 756,
+ - 497,
+ - 863,
+ - 384,
+ - 619,
+ - 413,
+ - 669,
+ - 317,
+ - 538,
+ - 331,
+ - 556,
+ - 414,
+ - 508,
+ - 424,
+ - 378,
+ - 274,
+ - 324,
+ - 434,
+ - 614,
+ - 226,
+ - 500,
+ - 232,
+ - 514,
+ - 263,
+ - 377,
+ - 298,
+ - 410,
+ - 151,
+ - 710,
+ - 174,
+ - 818,
+ - 149,
+ - 412,
+ - 156,
+ - 429,
+ - 288,
+ - 462,
+ - 186,
+ - 203,
+ - 170,
+ - 302,
+ - 191,
+ - 321,
+ - 131,
+ - 147,
+ - 297,
+ - 395,
+ - 228,
+ - 214,
+ - 245,
+ - 192,
+ - 67,
+ - 316,
+ - 71,
+ - 327,
+ - 104,
+ - 205,
+ - 94,
+ - 183,
+ - 143,
+ - 38,
+ - 193,
+ - 95,
+ 16,
+ - 76,
+ - 124,
+ - 248,
+ 23,
+ - 237,
+ 24,
+ - 244,
+ 18,
+ - 136,
+ 44,
+ - 111,
+ - 33,
+ - 24,
+ - 25,
+ 0,
+ 149,
+ 19,
+ 23,
+ - 143,
+ 158,
+ - 169,
+ 174,
+ - 181,
+ 133,
+ - 55,
+ 165,
+ - 26,
+ 111,
+ 84,
+ 98,
+ 75,
+ 87,
+ 183,
+ - 115,
+ - 11,
+ - 8,
+ 130,
+ 11,
+ 170,
+ 254,
+ 77,
+ 205,
+ 17,
+ 183,
+ 112,
+ 262,
+ 194,
+ 202,
+ 287,
+ 95,
+ 189,
+ - 42,
+ - 105,
+ 234,
+ 179,
+ 39,
+ 186,
+ 163,
+ 345,
+ 332,
+ 199,
+ 299,
+ 161,
+ - 54,
+ 285,
+ - 78,
+ 281,
+ - 133,
+ 141,
+ - 182,
+ 111,
+ 249,
+ 341,
+ 271,
+ 364,
+ 93,
+ 403,
+ 75,
+ 391,
+ 92,
+ 510,
+ - 138,
+ 220,
+ - 185,
+ - 29,
+ - 34,
+ 361,
+ - 115,
+ 320,
+ 3,
+ 554,
+ 99,
+ 286,
+ 218,
+ 591,
+ - 245,
+ 406,
+ - 268,
+ 453,
+ 0,
+ 580,
+ 25,
+ 606,
+ 275,
+ 532,
+ 148,
+ 450,
+ - 73,
+ 739,
+ - 285,
+ 518,
+ - 288,
+ 94,
+ - 203,
+ 674,
+ - 140,
+ - 74,
+ 205,
+ 714,
+ - 114,
+ 299,
+ 176,
+ 923,
+ 182,
+ 557,
+ 240,
+ 705,
+ - 16,
+ 513,
+ 485,
+ 593,
+ 293,
+ 384,
+ 451,
+ 617,
+ - 38,
+ 50,
+ 563,
+ 529,
+ 303,
+ 209,
+ 459,
+ 363,
+ 433,
+ 452,
+ 450,
+ 454,
+ 367,
+ 606,
+ 477,
+ 741,
+ 432,
+ 353,
+ 368,
+ 267,
+ 361,
+ 716,
+ 273,
+ 583,
+ 453,
+ 166,
+ 510,
+ 172,
+ 201,
+ 629,
+ 274,
+ 191,
+ 568,
+ 639,
+ 302,
+ 298,
+ 634,
+ 387,
+ 643,
+ 350,
+ 587,
+ 560,
+ 612,
+ 565,
+ 600,
+ 788,
+ 487,
+ 672,
+ 512,
+ 1015,
+ 321,
+ 333,
+ 357,
+ 854,
+ - 125,
+ 413,
+ 474,
+ 712,
+ 17,
+ - 151,
+ 564,
+ 285,
+ 270,
+ - 241,
+ 971,
+ 889,
+ 489,
+ 220,
+ 510,
+ 896,
+ 549,
+ 924,
+ 327,
+ 825,
+ 290,
+ 911,
+ 540,
+ 1108,
+ 158,
+ 805,
+ 199,
+ 957,
+ 511,
+ 730,
+ 100,
+ 874,
+ 13,
+ 791,
+ 435,
+ 632,
+ 676,
+ 972,
+ 249,
+ 900,
+ 467,
+ 1218,
+ 781,
+ 1074,
+ 585,
+ 785,
+ - 23,
+ 669,
+ 267,
+ 1043,
+ 619,
+ 1084,
+ 615,
+ 1145,
+ 622,
+ 905,
+ 916,
+ 1049,
+ 80,
+ 331,
+ 584,
+ 1075,
+ 89,
+ 639,
+ 988,
+ 961,
+ 770,
+ 720,
+ 798,
+ 699,
+ 492,
+ 447,
+ 899,
+ 627,
+ 271,
+ 1188,
+ 725,
+ 1333,
+ 87,
+ 603,
+ 832,
+ 1603,
+ 616,
+ 1127,
+ 890,
+ 1505,
+ 1000,
+ 1156,
+ 866,
+ 1009,
+ 995,
+ 827,
+ 1149,
+ 858,
+ 817,
+ 1450,
+ 773,
+ 1320,
+ 500,
+ 1389,
+ 312,
+ 1153,
+ - 20,
+ 1084,
+ 64,
+ 1283,
+ 2,
+ 1172,
+ 399,
+ 1869,
+ 514,
+ 1706,
+ 502,
+ 1636,
+ 886,
+ 1522,
+ 416,
+ 600,
+ 1131,
+ 1350,
+ 1275,
+ 1390,
+ 889,
+ 1795,
+ 914,
+ 1766,
+ 227,
+ 1183,
+ 1250,
+ 1826,
+ 505,
+ 1854,
+ 919,
+ 2353,
+ - 199,
+ 431,
+ 152,
+ 1735,
+ - 213,
+ - 28,
+ 392,
+ 1334,
+ - 153,
+ - 52,
+ 978,
+ 1151,
+ - 323,
+ - 400,
+ 813,
+ 1703,
+ - 136,
+ 84,
+ 1449,
+ 2015,
+ - 331,
+ - 143,
+ - 137,
+ 1192,
+ - 256,
+ 534,
+ - 157,
+ 1031,
+ - 307,
+ - 439,
+ 542,
+ 731,
+ - 329,
+ - 420,
+ - 97,
+ 616,
+ - 362,
+ - 168,
+ - 322,
+ 366,
+ - 247,
+ - 110,
+ - 211,
+ 89,
+ - 196,
+ - 309,
+ 20,
+ 59,
+ - 364,
+ - 463,
+ - 286,
+ 89,
+ - 336,
+ 175,
+ - 432,
+ 141,
+ - 379,
+ - 190,
+ - 434,
+ - 196,
+ - 79,
+ 150,
+ - 278,
+ - 227,
+ - 280,
+ 166,
+ - 555,
+ - 422,
+ - 155,
+ 541,
+ - 366,
+ 54,
+ - 29,
+ - 83,
+ - 301,
+ - 774,
+ 186,
+ 628,
+ - 397,
+ - 264,
+ 242,
+ 293,
+ - 197,
+ - 585,
+ 124,
+ 410,
+ 53,
+ - 133,
+ 10,
+ 340,
+ - 570,
+ - 1065,
+ 65,
+ - 446,
+ 68,
+ - 493,
+ 383,
+ 937,
+ - 357,
+ - 711,
+ - 359,
+ - 250,
+ - 677,
+ - 1068,
+ 292,
+ - 26,
+ 363,
+ 6,
+ 607,
+ 1313,
+ - 127,
+ - 10,
+ 1513,
+ 1886,
+ 713,
+ 972,
+ 1469,
+ 2181,
+ 1443,
+ 2016
+};
+
+/* 2nd LSF quantizer (MR122) */
+static const Word32 dico2_lsf_5[DICO2_SIZE_5 * 4] =
+{
+ - 1631,
+ - 1600,
+ - 1796,
+ - 2290,
+ - 1027,
+ - 1770,
+ - 1100,
+ - 2025,
+ - 1277,
+ - 1388,
+ - 1367,
+ - 1534,
+ - 947,
+ - 1461,
+ - 972,
+ - 1524,
+ - 999,
+ - 1222,
+ - 1020,
+ - 1172,
+ - 815,
+ - 987,
+ - 992,
+ - 1371,
+ - 1216,
+ - 1006,
+ - 1289,
+ - 1094,
+ - 744,
+ - 1268,
+ - 755,
+ - 1293,
+ - 862,
+ - 923,
+ - 905,
+ - 984,
+ - 678,
+ - 1051,
+ - 685,
+ - 1050,
+ - 1087,
+ - 985,
+ - 1062,
+ - 679,
+ - 989,
+ - 641,
+ - 1127,
+ - 976,
+ - 762,
+ - 654,
+ - 890,
+ - 806,
+ - 833,
+ - 1091,
+ - 706,
+ - 629,
+ - 621,
+ - 806,
+ - 640,
+ - 812,
+ - 775,
+ - 634,
+ - 779,
+ - 543,
+ - 996,
+ - 565,
+ - 1075,
+ - 580,
+ - 546,
+ - 611,
+ - 572,
+ - 619,
+ - 760,
+ - 290,
+ - 879,
+ - 526,
+ - 823,
+ - 462,
+ - 795,
+ - 253,
+ - 553,
+ - 415,
+ - 589,
+ - 439,
+ - 533,
+ - 340,
+ - 692,
+ - 935,
+ - 505,
+ - 772,
+ - 702,
+ - 1131,
+ - 263,
+ - 306,
+ - 971,
+ - 483,
+ - 445,
+ - 74,
+ - 555,
+ - 548,
+ - 614,
+ - 129,
+ - 693,
+ - 234,
+ - 396,
+ - 246,
+ - 475,
+ - 250,
+ - 265,
+ - 404,
+ - 376,
+ - 514,
+ - 417,
+ - 510,
+ - 300,
+ - 313,
+ - 334,
+ - 664,
+ - 463,
+ - 814,
+ - 386,
+ - 704,
+ - 337,
+ - 615,
+ - 234,
+ - 201,
+ - 233,
+ - 239,
+ - 167,
+ - 567,
+ - 203,
+ - 619,
+ - 147,
+ - 415,
+ - 115,
+ - 352,
+ - 166,
+ - 750,
+ - 171,
+ - 761,
+ - 270,
+ - 879,
+ - 264,
+ - 903,
+ - 367,
+ - 744,
+ 43,
+ - 475,
+ 14,
+ - 653,
+ 43,
+ - 670,
+ 11,
+ - 448,
+ - 59,
+ - 521,
+ - 126,
+ - 119,
+ - 155,
+ - 613,
+ - 42,
+ - 863,
+ - 27,
+ - 931,
+ 136,
+ - 483,
+ 183,
+ - 468,
+ 55,
+ - 298,
+ 55,
+ - 304,
+ 313,
+ - 609,
+ 313,
+ - 720,
+ 322,
+ - 167,
+ 100,
+ - 541,
+ - 3,
+ - 119,
+ - 111,
+ - 187,
+ 233,
+ - 236,
+ 260,
+ - 234,
+ 26,
+ - 165,
+ 134,
+ - 45,
+ - 40,
+ - 549,
+ 360,
+ - 203,
+ 378,
+ - 388,
+ 450,
+ - 383,
+ 275,
+ 20,
+ 182,
+ - 103,
+ 246,
+ - 111,
+ 431,
+ 37,
+ 462,
+ - 146,
+ 487,
+ - 157,
+ - 284,
+ - 59,
+ 503,
+ - 184,
+ 24,
+ 53,
+ - 3,
+ 54,
+ 122,
+ 259,
+ 333,
+ 66,
+ 484,
+ 104,
+ 436,
+ 68,
+ 195,
+ 116,
+ 190,
+ 206,
+ 269,
+ - 9,
+ 482,
+ 352,
+ 382,
+ 285,
+ 399,
+ 277,
+ 452,
+ 256,
+ 69,
+ 186,
+ 13,
+ 297,
+ - 13,
+ 259,
+ - 95,
+ 30,
+ 56,
+ 394,
+ 196,
+ 425,
+ 205,
+ 456,
+ 281,
+ 577,
+ 15,
+ 191,
+ 375,
+ 290,
+ 407,
+ 576,
+ - 56,
+ 227,
+ 544,
+ 405,
+ 0,
+ 549,
+ - 92,
+ 528,
+ - 229,
+ 351,
+ - 245,
+ 338,
+ - 362,
+ 435,
+ 167,
+ 527,
+ - 75,
+ 302,
+ 91,
+ 824,
+ 129,
+ 599,
+ 496,
+ 679,
+ 186,
+ 749,
+ 153,
+ 737,
+ - 281,
+ 600,
+ - 348,
+ 615,
+ - 236,
+ 769,
+ 41,
+ 881,
+ 38,
+ 890,
+ - 220,
+ 841,
+ - 357,
+ 883,
+ - 393,
+ 903,
+ - 634,
+ 474,
+ - 444,
+ 850,
+ - 175,
+ 678,
+ - 493,
+ 242,
+ - 519,
+ 785,
+ - 714,
+ 582,
+ - 541,
+ 366,
+ - 543,
+ 434,
+ - 597,
+ 500,
+ - 765,
+ 222,
+ - 702,
+ 917,
+ - 743,
+ 962,
+ - 869,
+ 501,
+ - 899,
+ 548,
+ - 379,
+ 200,
+ - 435,
+ 157,
+ - 819,
+ 214,
+ - 861,
+ 157,
+ - 614,
+ 40,
+ - 632,
+ 94,
+ - 883,
+ - 54,
+ - 741,
+ 516,
+ - 501,
+ 298,
+ - 614,
+ - 171,
+ - 870,
+ - 161,
+ - 865,
+ - 23,
+ - 818,
+ 93,
+ - 1015,
+ - 267,
+ - 662,
+ - 359,
+ - 549,
+ 2,
+ - 442,
+ - 121,
+ - 377,
+ 0,
+ - 227,
+ 33,
+ - 414,
+ - 126,
+ - 129,
+ 212,
+ - 934,
+ 34,
+ - 1082,
+ - 282,
+ - 1119,
+ - 268,
+ - 710,
+ - 825,
+ - 420,
+ - 191,
+ - 1076,
+ - 928,
+ - 917,
+ - 93,
+ - 628,
+ - 358,
+ 97,
+ 7,
+ - 206,
+ - 393,
+ - 101,
+ 24,
+ - 203,
+ 38,
+ - 168,
+ 83,
+ - 599,
+ - 423,
+ - 279,
+ 426,
+ - 700,
+ 118,
+ - 75,
+ 206,
+ - 981,
+ - 673,
+ - 680,
+ 417,
+ - 367,
+ 37,
+ - 279,
+ 474,
+ - 129,
+ - 318,
+ 319,
+ 296,
+ - 626,
+ - 39,
+ 343,
+ 602,
+ - 696,
+ - 39,
+ - 303,
+ 940,
+ 104,
+ 233,
+ - 380,
+ 137,
+ - 36,
+ 269,
+ - 75,
+ - 214,
+ 120,
+ 43,
+ - 529,
+ - 477,
+ 459,
+ 164,
+ - 202,
+ - 229,
+ - 49,
+ - 167,
+ 609,
+ 792,
+ 98,
+ - 220,
+ 915,
+ 148,
+ 293,
+ 283,
+ 869,
+ 91,
+ 575,
+ 394,
+ 326,
+ - 78,
+ 717,
+ 67,
+ 365,
+ - 323,
+ 616,
+ - 36,
+ 731,
+ 27,
+ 619,
+ 238,
+ 632,
+ 273,
+ 448,
+ 99,
+ 801,
+ 476,
+ 869,
+ 273,
+ 685,
+ 64,
+ 789,
+ 72,
+ 1021,
+ 217,
+ 793,
+ 459,
+ 734,
+ 360,
+ 646,
+ 480,
+ 360,
+ 322,
+ 429,
+ 464,
+ 638,
+ 430,
+ 756,
+ 363,
+ 1000,
+ 404,
+ 683,
+ 528,
+ 602,
+ 615,
+ 655,
+ 413,
+ 946,
+ 687,
+ 937,
+ 602,
+ 904,
+ 604,
+ 555,
+ 737,
+ 786,
+ 662,
+ 467,
+ 654,
+ 362,
+ 589,
+ 929,
+ 710,
+ 498,
+ 478,
+ 415,
+ 420,
+ 693,
+ 883,
+ 813,
+ 683,
+ 781,
+ 925,
+ 913,
+ 939,
+ 726,
+ 732,
+ 491,
+ 853,
+ 531,
+ 948,
+ 734,
+ 963,
+ 315,
+ 808,
+ 761,
+ 755,
+ 1144,
+ 760,
+ 655,
+ 1076,
+ 826,
+ 1057,
+ 1091,
+ 838,
+ 1003,
+ 808,
+ 1047,
+ 1133,
+ 659,
+ 1101,
+ 992,
+ 1050,
+ 1074,
+ 1075,
+ 971,
+ 694,
+ 1226,
+ 1054,
+ 571,
+ 841,
+ 884,
+ 1404,
+ 1379,
+ 1096,
+ 1080,
+ 861,
+ 1231,
+ 735,
+ 1284,
+ 760,
+ 1272,
+ 991,
+ 1367,
+ 1053,
+ 1257,
+ 700,
+ 1050,
+ 534,
+ 988,
+ 453,
+ 1264,
+ 599,
+ 1140,
+ 679,
+ 1621,
+ 815,
+ 1384,
+ 521,
+ 1317,
+ 393,
+ 1564,
+ 805,
+ 1448,
+ 686,
+ 1068,
+ 648,
+ 875,
+ 307,
+ 1083,
+ 361,
+ 1047,
+ 317,
+ 1417,
+ 964,
+ 675,
+ 571,
+ 1152,
+ 79,
+ 1114,
+ - 47,
+ 1530,
+ 311,
+ 1721,
+ 314,
+ 1166,
+ 689,
+ 514,
+ - 94,
+ 349,
+ 282,
+ 1412,
+ 328,
+ 1025,
+ 487,
+ - 65,
+ 57,
+ 805,
+ 970,
+ 36,
+ 62,
+ 769,
+ - 263,
+ 791,
+ - 346,
+ 637,
+ 699,
+ - 137,
+ 620,
+ 534,
+ 541,
+ - 735,
+ 194,
+ 711,
+ 300,
+ - 268,
+ - 863,
+ 926,
+ 769,
+ - 708,
+ - 428,
+ 506,
+ 174,
+ - 892,
+ - 630,
+ 435,
+ 547,
+ - 1435,
+ - 258,
+ 621,
+ 471,
+ - 1018,
+ - 1368,
+ - 393,
+ 521,
+ - 920,
+ - 686,
+ - 25,
+ 20,
+ - 982,
+ - 1156,
+ 340,
+ 9,
+ - 1558,
+ - 1135,
+ - 352,
+ 48,
+ - 1579,
+ - 402,
+ - 887,
+ 6,
+ - 1156,
+ - 888,
+ - 548,
+ - 352,
+ - 1643,
+ - 1168,
+ - 159,
+ 610,
+ - 2024,
+ - 963,
+ - 225,
+ 193,
+ - 1656,
+ - 1960,
+ - 245,
+ - 493,
+ - 964,
+ - 1680,
+ - 936,
+ - 635,
+ - 1299,
+ - 1744,
+ - 1388,
+ - 604,
+ - 1540,
+ - 835,
+ - 1397,
+ - 135,
+ - 1588,
+ - 290,
+ - 1670,
+ - 712,
+ - 2011,
+ - 1632,
+ - 1663,
+ - 27,
+ - 2258,
+ - 811,
+ - 1157,
+ 184,
+ - 1265,
+ 189,
+ - 1367,
+ 586,
+ - 2011,
+ 201,
+ - 790,
+ 712,
+ - 1210,
+ 3,
+ - 1033,
+ 808,
+ - 1251,
+ 830,
+ - 111,
+ 635,
+ - 1636,
+ 447,
+ - 463,
+ - 949,
+ - 445,
+ - 928,
+ - 504,
+ - 1162,
+ - 501,
+ - 1211,
+ 144,
+ - 351,
+ - 372,
+ - 1052,
+ - 283,
+ - 1059,
+ - 279,
+ - 1123,
+ - 575,
+ - 1438,
+ - 587,
+ - 1614,
+ - 935,
+ - 984,
+ 229,
+ 690,
+ - 921,
+ - 719,
+ - 403,
+ 1362,
+ - 685,
+ - 465,
+ 874,
+ 397,
+ - 509,
+ - 46,
+ 317,
+ 1334,
+ - 485,
+ 456,
+ 813,
+ 439,
+ - 411,
+ 339,
+ 898,
+ 1067,
+ - 425,
+ 46,
+ 1441,
+ 497,
+ - 909,
+ - 800,
+ 1465,
+ 1046,
+ - 254,
+ - 321,
+ 1430,
+ 1165,
+ 68,
+ 350,
+ 1034,
+ 666,
+ 370,
+ 11,
+ 1311,
+ 790,
+ 143,
+ 232,
+ 1041,
+ 1562,
+ - 114,
+ 663,
+ 1616,
+ 1078,
+ 454,
+ 579,
+ 1275,
+ 1040,
+ - 76,
+ 909,
+ 752,
+ 1067,
+ 153,
+ 512,
+ 348,
+ 1214,
+ 614,
+ 385,
+ 1843,
+ 808,
+ 269,
+ 1034,
+ 203,
+ 1086,
+ 652,
+ 1017,
+ 1783,
+ 1130,
+ 429,
+ 1327,
+ 387,
+ 1384,
+ - 49,
+ 1183,
+ - 72,
+ 1215,
+ - 416,
+ 1001,
+ 544,
+ 1749,
+ - 352,
+ 1223,
+ - 502,
+ 1199,
+ - 589,
+ 569,
+ - 227,
+ 1630,
+ - 142,
+ 1578,
+ - 230,
+ 1715,
+ - 714,
+ 1288,
+ - 838,
+ 1398,
+ 1131,
+ 1357,
+ - 208,
+ 1232,
+ 437,
+ 965,
+ - 929,
+ 818,
+ 811,
+ 1410,
+ 859,
+ 1507,
+ 164,
+ 1212,
+ 1387,
+ 1793,
+ 484,
+ 1874,
+ 456,
+ 2063,
+ 996,
+ 1170,
+ 1326,
+ 1402,
+ 1316,
+ 1360,
+ 1135,
+ 1262,
+ 1234,
+ 1618,
+ 1361,
+ 1768,
+ 1421,
+ 1227,
+ 1584,
+ 1347,
+ 854,
+ 672,
+ 1685,
+ 1566,
+ 1139,
+ 1270,
+ 2016,
+ 1825,
+ 1773,
+ 1581,
+ 1532,
+ 1460,
+ 1487,
+ 946,
+ 1659,
+ 1021,
+ 1744,
+ 1212,
+ 1392,
+ 977,
+ 1772,
+ 1161,
+ 1826,
+ 1164,
+ 1718,
+ 1429,
+ 1973,
+ 1591,
+ 1185,
+ 864,
+ 2132,
+ 1061,
+ 1799,
+ 814,
+ 1838,
+ 757,
+ 2104,
+ 1315,
+ 2054,
+ 1258,
+ 2113,
+ 915,
+ 2331,
+ 930,
+ 1467,
+ 1147,
+ 2590,
+ 1439,
+ 2245,
+ 1744,
+ 2090,
+ 1620,
+ 2358,
+ 1454,
+ 2666,
+ 1506,
+ 1876,
+ 1837,
+ 2070,
+ 1975,
+ 1739,
+ 1577,
+ 682,
+ 1289,
+ 1584,
+ 2045,
+ 1454,
+ 2098,
+ 2498,
+ 2004,
+ 2711,
+ 2066,
+ 726,
+ 1588,
+ 2756,
+ 2336,
+ 228,
+ 847,
+ 2456,
+ 1659,
+ 36,
+ 301,
+ 1942,
+ 1957,
+ - 446,
+ - 96,
+ 2154,
+ 1396,
+ 1533,
+ 1101,
+ 14,
+ 608,
+ - 923,
+ - 732,
+ 1383,
+ 1982,
+ 1345,
+ 952,
+ - 680,
+ 321,
+ 1281,
+ 1268,
+ - 1594,
+ 365,
+ 941,
+ 946,
+ - 1737,
+ - 822,
+ 2374,
+ 2787,
+ 1821,
+ 2788
+};
+
+/* 3rd LSF quantizer (MR122) */
+static const Word32 dico3_lsf_5[DICO3_SIZE_5 * 4] =
+{
+ - 1812,
+ - 2275,
+ - 1879,
+ - 2537,
+ - 1640,
+ - 1848,
+ - 1695,
+ - 2004,
+ - 1220,
+ - 1912,
+ - 1221,
+ - 2106,
+ - 1559,
+ - 1588,
+ - 1573,
+ - 1556,
+ - 1195,
+ - 1615,
+ - 1224,
+ - 1727,
+ - 1359,
+ - 1151,
+ - 1616,
+ - 1948,
+ - 1274,
+ - 1391,
+ - 1305,
+ - 1403,
+ - 1607,
+ - 1179,
+ - 1676,
+ - 1311,
+ - 1443,
+ - 1478,
+ - 1367,
+ - 898,
+ - 1256,
+ - 1059,
+ - 1331,
+ - 1134,
+ - 982,
+ - 1133,
+ - 1149,
+ - 1504,
+ - 1080,
+ - 1308,
+ - 1020,
+ - 1183,
+ - 980,
+ - 1486,
+ - 967,
+ - 1495,
+ - 988,
+ - 922,
+ - 1047,
+ - 1077,
+ - 838,
+ - 1179,
+ - 858,
+ - 1222,
+ - 1131,
+ - 1041,
+ - 1064,
+ - 767,
+ - 872,
+ - 1157,
+ - 701,
+ - 880,
+ - 706,
+ - 906,
+ - 774,
+ - 1016,
+ - 578,
+ - 1080,
+ - 801,
+ - 1478,
+ - 591,
+ - 1111,
+ - 592,
+ - 1146,
+ - 713,
+ - 1388,
+ - 640,
+ - 1376,
+ - 597,
+ - 1059,
+ - 416,
+ - 903,
+ - 686,
+ - 832,
+ - 661,
+ - 708,
+ - 444,
+ - 868,
+ - 490,
+ - 921,
+ - 374,
+ - 776,
+ - 619,
+ - 1170,
+ - 585,
+ - 549,
+ - 769,
+ - 795,
+ - 435,
+ - 659,
+ - 530,
+ - 741,
+ - 498,
+ - 837,
+ - 357,
+ - 597,
+ - 279,
+ - 871,
+ - 243,
+ - 887,
+ - 282,
+ - 665,
+ - 280,
+ - 667,
+ - 165,
+ - 560,
+ - 394,
+ - 903,
+ - 362,
+ - 410,
+ - 448,
+ - 583,
+ - 409,
+ - 574,
+ - 313,
+ - 357,
+ - 637,
+ - 548,
+ - 570,
+ - 436,
+ - 896,
+ - 504,
+ - 382,
+ - 757,
+ - 58,
+ - 481,
+ - 165,
+ - 618,
+ - 191,
+ - 374,
+ - 234,
+ - 382,
+ - 222,
+ - 683,
+ - 25,
+ - 480,
+ - 418,
+ - 359,
+ - 730,
+ - 353,
+ - 324,
+ - 157,
+ - 432,
+ - 322,
+ - 394,
+ - 303,
+ - 284,
+ - 104,
+ - 601,
+ - 289,
+ - 556,
+ - 196,
+ - 588,
+ - 150,
+ - 659,
+ - 608,
+ - 473,
+ - 24,
+ - 68,
+ - 448,
+ - 474,
+ - 8,
+ - 506,
+ - 45,
+ - 748,
+ - 184,
+ - 844,
+ - 252,
+ - 901,
+ - 91,
+ - 584,
+ - 97,
+ - 652,
+ 138,
+ - 764,
+ - 131,
+ - 678,
+ - 12,
+ - 670,
+ 165,
+ - 259,
+ - 3,
+ - 840,
+ - 107,
+ - 909,
+ 37,
+ - 992,
+ 44,
+ - 854,
+ - 415,
+ - 839,
+ 13,
+ - 1001,
+ - 271,
+ - 1026,
+ - 309,
+ - 798,
+ - 478,
+ - 832,
+ - 488,
+ - 943,
+ 168,
+ - 1112,
+ - 387,
+ - 1185,
+ - 101,
+ - 1183,
+ - 40,
+ - 941,
+ - 316,
+ - 1030,
+ - 770,
+ - 1044,
+ - 625,
+ - 1081,
+ - 538,
+ - 1224,
+ - 299,
+ - 1312,
+ - 436,
+ - 1197,
+ - 663,
+ - 1167,
+ - 161,
+ - 1216,
+ - 690,
+ - 1237,
+ - 831,
+ - 1432,
+ - 720,
+ - 1403,
+ - 493,
+ - 898,
+ - 740,
+ - 922,
+ - 801,
+ - 1102,
+ - 402,
+ - 1579,
+ - 964,
+ - 1061,
+ - 638,
+ - 1269,
+ - 1438,
+ - 1499,
+ - 934,
+ - 1502,
+ - 895,
+ - 1598,
+ - 564,
+ - 1723,
+ - 717,
+ - 606,
+ - 597,
+ - 1166,
+ - 1085,
+ - 1369,
+ - 468,
+ - 1946,
+ - 1493,
+ - 1838,
+ - 953,
+ - 1932,
+ - 931,
+ - 1499,
+ - 188,
+ - 1635,
+ - 421,
+ - 1457,
+ - 338,
+ - 1448,
+ - 22,
+ - 1942,
+ - 422,
+ - 2006,
+ - 249,
+ - 496,
+ - 114,
+ - 1910,
+ - 755,
+ - 1289,
+ 174,
+ - 1451,
+ - 109,
+ - 482,
+ - 257,
+ - 1221,
+ - 508,
+ - 1617,
+ 151,
+ - 1694,
+ 208,
+ - 654,
+ 107,
+ - 1651,
+ 29,
+ - 1141,
+ 279,
+ - 1215,
+ 306,
+ - 1228,
+ - 506,
+ - 730,
+ - 175,
+ - 1236,
+ - 101,
+ - 969,
+ 551,
+ - 870,
+ 278,
+ - 823,
+ 315,
+ - 563,
+ 376,
+ - 1051,
+ 228,
+ - 507,
+ 280,
+ - 599,
+ 281,
+ - 758,
+ 253,
+ - 305,
+ 379,
+ - 755,
+ - 134,
+ - 611,
+ 660,
+ - 824,
+ 536,
+ - 817,
+ 646,
+ - 413,
+ 49,
+ - 341,
+ 177,
+ - 453,
+ 526,
+ - 482,
+ 589,
+ - 71,
+ 339,
+ - 657,
+ 264,
+ - 244,
+ 295,
+ - 237,
+ 315,
+ - 387,
+ 569,
+ - 506,
+ - 9,
+ - 377,
+ 14,
+ - 160,
+ 661,
+ - 216,
+ 40,
+ - 308,
+ - 46,
+ 95,
+ 214,
+ - 242,
+ 167,
+ - 86,
+ 192,
+ - 56,
+ 27,
+ - 76,
+ 31,
+ 36,
+ 309,
+ - 106,
+ - 182,
+ - 113,
+ 74,
+ - 441,
+ - 22,
+ 23,
+ 139,
+ 81,
+ - 11,
+ 44,
+ 15,
+ - 87,
+ - 137,
+ - 118,
+ - 207,
+ - 158,
+ - 58,
+ 272,
+ - 92,
+ - 156,
+ - 441,
+ 8,
+ - 136,
+ 128,
+ - 221,
+ 101,
+ - 218,
+ 40,
+ - 197,
+ - 76,
+ - 456,
+ 9,
+ - 445,
+ 33,
+ - 423,
+ 226,
+ 60,
+ 73,
+ - 222,
+ 156,
+ - 399,
+ 280,
+ - 318,
+ 245,
+ - 341,
+ 166,
+ - 499,
+ 339,
+ - 190,
+ 327,
+ - 219,
+ 325,
+ - 137,
+ - 89,
+ - 596,
+ 100,
+ - 627,
+ 144,
+ - 677,
+ 487,
+ 28,
+ 252,
+ - 391,
+ 214,
+ - 41,
+ 282,
+ - 28,
+ 99,
+ - 286,
+ 331,
+ 49,
+ 459,
+ - 388,
+ 565,
+ - 369,
+ 436,
+ 28,
+ 336,
+ - 9,
+ 397,
+ - 167,
+ 618,
+ 34,
+ 596,
+ - 17,
+ 561,
+ - 140,
+ 299,
+ 79,
+ 522,
+ 125,
+ 203,
+ 2,
+ 244,
+ 288,
+ 255,
+ 211,
+ 175,
+ 82,
+ 596,
+ 187,
+ 517,
+ 108,
+ 381,
+ 255,
+ 365,
+ 297,
+ 497,
+ 352,
+ 327,
+ - 82,
+ 25,
+ 210,
+ 371,
+ 245,
+ 261,
+ 3,
+ 545,
+ 449,
+ 140,
+ 294,
+ 44,
+ 295,
+ 212,
+ 347,
+ 244,
+ 494,
+ 331,
+ 528,
+ 201,
+ 307,
+ 349,
+ 411,
+ 613,
+ 284,
+ 614,
+ 413,
+ 464,
+ 322,
+ 624,
+ 397,
+ 97,
+ 200,
+ - 160,
+ 384,
+ 149,
+ 362,
+ 495,
+ 525,
+ 269,
+ 585,
+ 33,
+ 491,
+ - 121,
+ 433,
+ 427,
+ 611,
+ 498,
+ 516,
+ 171,
+ 443,
+ 497,
+ 666,
+ 440,
+ 275,
+ 566,
+ 575,
+ 146,
+ 639,
+ 155,
+ 670,
+ - 33,
+ 173,
+ 212,
+ 696,
+ - 166,
+ 601,
+ - 191,
+ 695,
+ - 489,
+ 503,
+ 175,
+ 742,
+ 214,
+ 476,
+ 372,
+ 1083,
+ 578,
+ 530,
+ 586,
+ 777,
+ 425,
+ 874,
+ 315,
+ 841,
+ 374,
+ 848,
+ - 165,
+ 565,
+ 35,
+ 991,
+ - 39,
+ 1062,
+ 329,
+ 712,
+ 786,
+ 840,
+ 645,
+ 795,
+ 661,
+ 676,
+ 571,
+ 918,
+ 632,
+ 1079,
+ 673,
+ 817,
+ 318,
+ 388,
+ 874,
+ 1012,
+ 564,
+ 848,
+ 880,
+ 620,
+ 557,
+ 479,
+ 671,
+ 453,
+ 692,
+ 468,
+ 840,
+ 642,
+ 844,
+ 645,
+ 506,
+ 428,
+ 897,
+ 567,
+ 837,
+ 387,
+ 962,
+ 499,
+ 691,
+ 561,
+ 939,
+ 926,
+ 783,
+ 296,
+ 790,
+ 268,
+ 1028,
+ 530,
+ 874,
+ 329,
+ 548,
+ 143,
+ 675,
+ 291,
+ 503,
+ 66,
+ 1041,
+ 359,
+ 786,
+ 97,
+ 805,
+ 33,
+ 837,
+ 470,
+ 511,
+ 49,
+ 1092,
+ 327,
+ 1174,
+ 323,
+ 3,
+ 242,
+ 872,
+ 474,
+ 689,
+ 429,
+ 1329,
+ 678,
+ 1042,
+ 620,
+ 1109,
+ 664,
+ 321,
+ 193,
+ 889,
+ 950,
+ 1153,
+ 874,
+ 893,
+ 635,
+ 877,
+ 862,
+ 948,
+ 913,
+ 1293,
+ 665,
+ 1320,
+ 639,
+ 997,
+ 793,
+ 1402,
+ 1030,
+ 1176,
+ 1012,
+ 1110,
+ 959,
+ 1410,
+ 925,
+ 1403,
+ 915,
+ 543,
+ 862,
+ 1116,
+ 1222,
+ 835,
+ 1190,
+ 835,
+ 1190,
+ 959,
+ 1148,
+ 1147,
+ 1376,
+ 1300,
+ 1193,
+ 1415,
+ 1231,
+ 1335,
+ 1341,
+ 746,
+ 1092,
+ 1711,
+ 1283,
+ 1389,
+ 1073,
+ 1334,
+ 1566,
+ 1153,
+ 1475,
+ 1645,
+ 1137,
+ 1825,
+ 1220,
+ 1056,
+ 1382,
+ 1521,
+ 1730,
+ 1632,
+ 1545,
+ 1620,
+ 1542,
+ 855,
+ 1596,
+ 865,
+ 1667,
+ 693,
+ 885,
+ 1716,
+ 1519,
+ 1167,
+ 1296,
+ 2209,
+ 1760,
+ 1952,
+ 1493,
+ 2020,
+ 1482,
+ 1534,
+ 1866,
+ 1694,
+ 2008,
+ 1566,
+ 748,
+ 1761,
+ 825,
+ 294,
+ 1392,
+ 1084,
+ 2058,
+ 621,
+ 1315,
+ 365,
+ 1287,
+ 198,
+ 1028,
+ 488,
+ 1408,
+ 249,
+ 403,
+ 1014,
+ 1561,
+ 324,
+ 363,
+ 1645,
+ 1044,
+ 193,
+ 367,
+ 2034,
+ 1859,
+ - 251,
+ 579,
+ 750,
+ 994,
+ - 243,
+ 30,
+ 1325,
+ 879,
+ - 28,
+ - 169,
+ 624,
+ 917,
+ - 453,
+ 159,
+ 186,
+ 1370,
+ - 614,
+ 6,
+ 537,
+ 392,
+ - 94,
+ - 291,
+ 781,
+ 229,
+ - 128,
+ - 298,
+ 245,
+ 491,
+ - 701,
+ - 648,
+ 972,
+ 789,
+ - 501,
+ - 640,
+ 178,
+ 255,
+ - 365,
+ - 390,
+ - 255,
+ 317,
+ - 958,
+ - 294,
+ - 191,
+ 228,
+ - 775,
+ - 447,
+ 157,
+ - 237,
+ - 657,
+ - 720,
+ - 407,
+ 92,
+ - 117,
+ - 611,
+ 334,
+ - 230,
+ - 679,
+ - 1084,
+ - 144,
+ - 317,
+ - 901,
+ - 861,
+ - 738,
+ - 360,
+ - 85,
+ - 727,
+ - 90,
+ - 787,
+ 100,
+ - 22,
+ - 391,
+ - 263,
+ - 56,
+ - 73,
+ - 337,
+ - 754,
+ 5,
+ - 189,
+ - 706,
+ - 624,
+ 89,
+ - 344,
+ - 135,
+ - 1113,
+ - 353,
+ - 237,
+ - 684,
+ - 1135,
+ - 275,
+ - 1102,
+ - 269,
+ - 1203,
+ 152,
+ 145,
+ - 722,
+ - 1232,
+ 49,
+ 80,
+ - 1248,
+ - 776,
+ - 248,
+ 391,
+ - 732,
+ - 547,
+ 469,
+ 218,
+ - 255,
+ - 864,
+ 69,
+ 366,
+ - 166,
+ - 485,
+ - 688,
+ 191,
+ - 1212,
+ - 1196,
+ - 170,
+ - 169,
+ - 1308,
+ - 1631,
+ 321,
+ 470,
+ - 1419,
+ - 1243,
+ - 64,
+ 272,
+ - 1361,
+ - 248,
+ 492,
+ 565,
+ - 721,
+ - 609,
+ 195,
+ 485,
+ - 573,
+ - 133,
+ 427,
+ 202,
+ - 171,
+ - 118,
+ 199,
+ 575,
+ 2,
+ - 31,
+ 694,
+ 755,
+ - 1366,
+ - 39,
+ 552,
+ 557,
+ - 489,
+ 271,
+ 680,
+ 537,
+ 13,
+ - 453,
+ 855,
+ 954,
+ - 133,
+ - 52,
+ - 81,
+ 738,
+ - 1169,
+ 637,
+ 1055,
+ 1059,
+ - 95,
+ 676,
+ 1259,
+ 1081,
+ 489,
+ 305,
+ - 449,
+ 954,
+ - 534,
+ 996,
+ - 969,
+ 866,
+ - 1058,
+ 1059,
+ - 1294,
+ 618,
+ - 1416,
+ 617,
+ - 458,
+ 1366,
+ - 159,
+ 1821,
+ - 774,
+ - 528,
+ - 14,
+ 1110,
+ - 1202,
+ - 901,
+ - 772,
+ 433,
+ - 1256,
+ - 1255,
+ - 1011,
+ - 302,
+ - 602,
+ - 585,
+ - 759,
+ - 1618,
+ - 760,
+ - 1549,
+ - 840,
+ - 1921,
+ - 816,
+ - 539,
+ - 1769,
+ - 2235,
+ - 227,
+ - 36,
+ - 2034,
+ - 1831,
+ - 2107,
+ - 1126,
+ - 2471,
+ - 1816,
+ - 1470,
+ 252,
+ - 2701,
+ - 415,
+ - 571,
+ - 467,
+ 1509,
+ 1554,
+ 2180,
+ 1975,
+ 2326,
+ 2020
+};
+
+/* 4th LSF quantizer (MR122) */
+static const Word32 dico4_lsf_5[DICO4_SIZE_5 * 4] =
+{
+ - 1857,
+ - 1681,
+ - 1857,
+ - 1755,
+ - 2056,
+ - 1150,
+ - 2134,
+ - 1654,
+ - 1619,
+ - 1099,
+ - 1704,
+ - 1131,
+ - 1345,
+ - 1608,
+ - 1359,
+ - 1638,
+ - 1338,
+ - 1293,
+ - 1325,
+ - 1265,
+ - 1664,
+ - 1649,
+ - 1487,
+ - 851,
+ - 1346,
+ - 1832,
+ - 1413,
+ - 2188,
+ - 1282,
+ - 681,
+ - 1785,
+ - 1649,
+ - 966,
+ - 1082,
+ - 1183,
+ - 1676,
+ - 1054,
+ - 1073,
+ - 1142,
+ - 1158,
+ - 1207,
+ - 744,
+ - 1274,
+ - 997,
+ - 934,
+ - 1383,
+ - 927,
+ - 1416,
+ - 1010,
+ - 1305,
+ - 783,
+ - 955,
+ - 1049,
+ - 900,
+ - 993,
+ - 817,
+ - 737,
+ - 823,
+ - 972,
+ - 1189,
+ - 738,
+ - 1094,
+ - 738,
+ - 1154,
+ - 784,
+ - 801,
+ - 810,
+ - 786,
+ - 892,
+ - 520,
+ - 1000,
+ - 818,
+ - 644,
+ - 965,
+ - 577,
+ - 882,
+ - 541,
+ - 694,
+ - 671,
+ - 917,
+ - 595,
+ - 642,
+ - 646,
+ - 615,
+ - 956,
+ - 621,
+ - 925,
+ - 515,
+ - 727,
+ - 483,
+ - 815,
+ - 485,
+ - 840,
+ - 578,
+ - 440,
+ - 713,
+ - 578,
+ - 325,
+ - 657,
+ - 670,
+ - 386,
+ - 570,
+ - 441,
+ - 666,
+ - 514,
+ - 787,
+ - 392,
+ - 529,
+ - 522,
+ - 453,
+ - 487,
+ - 423,
+ - 616,
+ - 585,
+ - 617,
+ - 157,
+ - 662,
+ - 268,
+ - 680,
+ - 348,
+ - 322,
+ - 323,
+ - 632,
+ - 444,
+ - 304,
+ - 430,
+ - 332,
+ - 458,
+ - 277,
+ - 468,
+ - 659,
+ - 793,
+ - 319,
+ - 636,
+ - 227,
+ - 554,
+ - 373,
+ - 347,
+ - 334,
+ - 210,
+ - 456,
+ - 192,
+ - 530,
+ - 242,
+ - 216,
+ - 198,
+ - 366,
+ - 370,
+ - 338,
+ - 161,
+ - 409,
+ - 748,
+ - 107,
+ - 380,
+ - 294,
+ - 643,
+ - 223,
+ - 665,
+ - 234,
+ - 741,
+ - 141,
+ - 496,
+ - 130,
+ - 510,
+ - 139,
+ - 327,
+ - 172,
+ - 305,
+ - 306,
+ - 580,
+ - 164,
+ - 263,
+ - 262,
+ - 172,
+ - 67,
+ - 402,
+ 31,
+ - 366,
+ - 10,
+ - 436,
+ - 86,
+ - 527,
+ 71,
+ - 377,
+ - 22,
+ - 609,
+ - 12,
+ - 678,
+ - 67,
+ - 319,
+ 63,
+ - 191,
+ 35,
+ - 181,
+ - 39,
+ - 242,
+ 126,
+ - 167,
+ - 140,
+ - 544,
+ 155,
+ - 297,
+ 174,
+ - 297,
+ 38,
+ - 8,
+ 117,
+ - 380,
+ 197,
+ - 452,
+ 240,
+ - 522,
+ 223,
+ - 103,
+ 110,
+ - 187,
+ 87,
+ - 155,
+ 169,
+ - 47,
+ 157,
+ 26,
+ - 83,
+ - 100,
+ 128,
+ 80,
+ 209,
+ - 62,
+ 6,
+ 7,
+ 22,
+ 5,
+ 318,
+ - 20,
+ 248,
+ - 45,
+ - 200,
+ - 63,
+ 156,
+ - 69,
+ 250,
+ - 183,
+ 369,
+ - 126,
+ - 113,
+ - 76,
+ - 142,
+ - 122,
+ - 64,
+ - 254,
+ - 31,
+ 35,
+ - 177,
+ - 71,
+ - 7,
+ 171,
+ 93,
+ 27,
+ 108,
+ 212,
+ - 330,
+ - 209,
+ - 123,
+ - 70,
+ - 279,
+ 95,
+ - 96,
+ 20,
+ - 188,
+ - 61,
+ - 314,
+ 87,
+ - 300,
+ - 78,
+ - 354,
+ - 134,
+ 11,
+ 122,
+ - 140,
+ 122,
+ - 275,
+ 152,
+ - 293,
+ 140,
+ - 82,
+ 138,
+ - 321,
+ - 111,
+ - 480,
+ - 156,
+ - 359,
+ 76,
+ - 254,
+ - 40,
+ - 635,
+ - 96,
+ - 522,
+ 79,
+ - 507,
+ 8,
+ - 268,
+ 303,
+ - 539,
+ 68,
+ - 446,
+ 61,
+ - 522,
+ 306,
+ 111,
+ 189,
+ - 435,
+ 122,
+ - 379,
+ 166,
+ - 571,
+ - 398,
+ - 632,
+ - 74,
+ - 747,
+ - 95,
+ - 455,
+ 194,
+ - 952,
+ 83,
+ - 798,
+ 192,
+ - 755,
+ 192,
+ - 781,
+ - 162,
+ - 619,
+ 234,
+ - 663,
+ - 297,
+ - 488,
+ - 109,
+ - 964,
+ - 132,
+ - 838,
+ - 68,
+ - 843,
+ 58,
+ - 1112,
+ - 86,
+ - 805,
+ - 299,
+ - 944,
+ - 253,
+ - 778,
+ - 50,
+ - 965,
+ - 549,
+ - 352,
+ - 98,
+ - 992,
+ - 343,
+ - 1117,
+ - 315,
+ - 1117,
+ - 307,
+ - 1155,
+ - 374,
+ - 637,
+ - 230,
+ - 1166,
+ - 43,
+ - 1299,
+ - 100,
+ - 925,
+ - 393,
+ - 1274,
+ - 600,
+ - 689,
+ - 130,
+ - 1479,
+ - 312,
+ - 1321,
+ - 254,
+ - 1464,
+ - 442,
+ - 1292,
+ - 613,
+ - 1261,
+ - 503,
+ - 1501,
+ - 368,
+ - 1322,
+ 26,
+ - 1432,
+ - 66,
+ - 1743,
+ - 161,
+ - 1644,
+ - 467,
+ - 1760,
+ - 548,
+ - 1393,
+ - 568,
+ - 1556,
+ - 871,
+ - 1495,
+ - 1034,
+ - 1387,
+ - 571,
+ - 1917,
+ - 528,
+ - 1783,
+ - 123,
+ - 1897,
+ - 231,
+ - 2054,
+ - 323,
+ - 2052,
+ - 906,
+ - 1976,
+ - 567,
+ - 1917,
+ - 620,
+ - 2047,
+ - 989,
+ - 1077,
+ - 370,
+ - 2031,
+ - 704,
+ - 2355,
+ - 749,
+ - 2740,
+ - 1089,
+ - 1909,
+ 159,
+ - 2012,
+ 248,
+ - 626,
+ - 123,
+ - 2339,
+ - 962,
+ - 669,
+ - 408,
+ - 1379,
+ - 1174,
+ - 452,
+ - 364,
+ - 1044,
+ - 735,
+ - 132,
+ 183,
+ - 1620,
+ - 752,
+ - 547,
+ - 307,
+ - 777,
+ - 1261,
+ - 98,
+ 41,
+ - 880,
+ - 1091,
+ - 257,
+ 97,
+ - 1602,
+ - 1833,
+ 31,
+ - 26,
+ - 644,
+ - 561,
+ - 180,
+ - 546,
+ - 385,
+ - 1095,
+ - 410,
+ - 802,
+ - 414,
+ - 827,
+ - 457,
+ - 970,
+ - 490,
+ - 1109,
+ - 215,
+ - 916,
+ - 144,
+ - 937,
+ - 493,
+ - 1269,
+ - 517,
+ - 1507,
+ 181,
+ 101,
+ - 332,
+ - 889,
+ - 836,
+ - 937,
+ - 559,
+ - 429,
+ - 629,
+ - 547,
+ - 183,
+ - 337,
+ - 545,
+ - 82,
+ - 250,
+ - 286,
+ 5,
+ - 132,
+ - 348,
+ - 252,
+ - 293,
+ - 472,
+ - 158,
+ 100,
+ - 29,
+ 197,
+ - 236,
+ - 424,
+ - 861,
+ - 213,
+ - 140,
+ - 7,
+ - 427,
+ - 443,
+ 187,
+ - 97,
+ - 684,
+ - 736,
+ - 293,
+ 258,
+ - 368,
+ - 152,
+ - 150,
+ 392,
+ - 609,
+ 175,
+ - 142,
+ 299,
+ - 138,
+ 152,
+ - 119,
+ 329,
+ - 486,
+ - 52,
+ 293,
+ 198,
+ - 183,
+ 117,
+ 175,
+ 331,
+ - 58,
+ - 274,
+ 231,
+ 300,
+ - 288,
+ 330,
+ - 305,
+ 372,
+ - 111,
+ 409,
+ - 9,
+ 423,
+ 83,
+ 256,
+ 67,
+ 367,
+ - 19,
+ 248,
+ 91,
+ 113,
+ - 35,
+ 406,
+ - 191,
+ 154,
+ 238,
+ 296,
+ 5,
+ 197,
+ 141,
+ 221,
+ 313,
+ 198,
+ 211,
+ 421,
+ 244,
+ 334,
+ 88,
+ 426,
+ - 243,
+ 454,
+ 202,
+ 552,
+ - 5,
+ 403,
+ 291,
+ 185,
+ 219,
+ 301,
+ 251,
+ 138,
+ 128,
+ 69,
+ 197,
+ 288,
+ - 140,
+ - 61,
+ 188,
+ 361,
+ 197,
+ 598,
+ 442,
+ 273,
+ 290,
+ 143,
+ 472,
+ 482,
+ 157,
+ 370,
+ 415,
+ 321,
+ 372,
+ 385,
+ 402,
+ 552,
+ 155,
+ 24,
+ 550,
+ 263,
+ - 11,
+ 21,
+ 360,
+ 227,
+ 147,
+ - 254,
+ 424,
+ 97,
+ 366,
+ - 13,
+ 375,
+ 141,
+ 449,
+ 232,
+ 396,
+ 507,
+ 474,
+ 272,
+ 701,
+ 324,
+ 362,
+ - 47,
+ 587,
+ 148,
+ 543,
+ 69,
+ 400,
+ - 51,
+ 561,
+ 59,
+ 220,
+ - 10,
+ 352,
+ 147,
+ 206,
+ 211,
+ 653,
+ 185,
+ 563,
+ 297,
+ 565,
+ 284,
+ 594,
+ 121,
+ 766,
+ 192,
+ 398,
+ 118,
+ 642,
+ 434,
+ 233,
+ 264,
+ 481,
+ 467,
+ 129,
+ - 165,
+ 699,
+ 239,
+ 90,
+ 26,
+ 342,
+ 474,
+ - 55,
+ 27,
+ 388,
+ 94,
+ - 172,
+ 0,
+ 725,
+ 379,
+ - 60,
+ 337,
+ 370,
+ 465,
+ 95,
+ 319,
+ 806,
+ 595,
+ 78,
+ 260,
+ 497,
+ 851,
+ 210,
+ 560,
+ 458,
+ 574,
+ - 464,
+ 202,
+ 497,
+ 625,
+ - 202,
+ 152,
+ 48,
+ 712,
+ - 20,
+ 566,
+ 100,
+ 715,
+ 455,
+ 468,
+ 411,
+ 605,
+ 319,
+ 646,
+ 195,
+ 615,
+ 401,
+ 538,
+ 680,
+ 739,
+ 201,
+ 667,
+ 434,
+ 954,
+ 454,
+ 425,
+ 646,
+ 491,
+ 606,
+ 681,
+ 416,
+ 508,
+ 497,
+ 822,
+ 426,
+ 815,
+ 660,
+ 647,
+ 628,
+ 716,
+ 697,
+ 466,
+ 618,
+ 457,
+ 685,
+ 460,
+ 365,
+ 309,
+ 721,
+ 567,
+ 836,
+ 601,
+ 609,
+ 300,
+ 825,
+ 459,
+ 943,
+ 687,
+ 681,
+ 533,
+ 915,
+ 598,
+ 591,
+ 243,
+ 876,
+ 451,
+ 874,
+ 420,
+ 786,
+ 317,
+ 732,
+ 220,
+ 922,
+ 317,
+ 1108,
+ 367,
+ 531,
+ 466,
+ 1028,
+ 649,
+ 1053,
+ 615,
+ 1034,
+ 553,
+ 829,
+ 602,
+ 1021,
+ 799,
+ 927,
+ 803,
+ 878,
+ 763,
+ 799,
+ 496,
+ 1373,
+ 773,
+ 585,
+ 770,
+ 803,
+ 930,
+ 1099,
+ 793,
+ 1222,
+ 862,
+ 1209,
+ 895,
+ 1025,
+ 727,
+ 772,
+ 845,
+ 1172,
+ 1115,
+ 867,
+ 1021,
+ 830,
+ 1013,
+ 841,
+ 910,
+ 506,
+ 703,
+ 1239,
+ 1077,
+ 620,
+ 819,
+ 1196,
+ 1083,
+ 1155,
+ 1081,
+ 1142,
+ 907,
+ 1547,
+ 1121,
+ 1309,
+ 648,
+ 1343,
+ 612,
+ 1484,
+ 988,
+ 1479,
+ 937,
+ 985,
+ 1328,
+ 955,
+ 1341,
+ 429,
+ 910,
+ 841,
+ 1338,
+ 564,
+ 1179,
+ 412,
+ 1156,
+ 1427,
+ 1320,
+ 1434,
+ 1330,
+ 640,
+ 760,
+ 1726,
+ 1410,
+ 190,
+ 555,
+ 1073,
+ 1005,
+ 426,
+ 257,
+ 839,
+ 980,
+ 235,
+ 231,
+ 1520,
+ 1167,
+ 109,
+ 293,
+ 1014,
+ 1569,
+ 305,
+ 142,
+ 1148,
+ 539,
+ - 291,
+ - 108,
+ 1213,
+ 972,
+ 22,
+ - 216,
+ 667,
+ 828,
+ - 482,
+ 438,
+ 453,
+ 1431,
+ - 581,
+ - 422,
+ 789,
+ 387,
+ - 358,
+ - 454,
+ 174,
+ 780,
+ - 36,
+ - 372,
+ 390,
+ - 134,
+ - 629,
+ 160,
+ - 306,
+ 751,
+ - 1258,
+ - 331,
+ 177,
+ 522,
+ - 248,
+ 574,
+ - 251,
+ 639,
+ - 531,
+ 407,
+ - 596,
+ 394,
+ - 419,
+ 789,
+ - 617,
+ 801,
+ - 986,
+ 399,
+ - 857,
+ 727,
+ - 7,
+ 518,
+ - 703,
+ 310,
+ - 1143,
+ - 24,
+ - 1002,
+ 287,
+ - 960,
+ 363,
+ - 1299,
+ 312,
+ - 1534,
+ 245,
+ - 1557,
+ 305,
+ 28,
+ 153,
+ - 859,
+ - 175,
+ - 33,
+ 332,
+ - 1398,
+ - 154,
+ 212,
+ 410,
+ - 593,
+ - 197,
+ - 1092,
+ - 704,
+ - 904,
+ - 65,
+ 282,
+ 367,
+ - 918,
+ - 686,
+ 345,
+ 93,
+ - 258,
+ - 357,
+ 696,
+ 644,
+ - 693,
+ - 28,
+ 448,
+ 493,
+ - 273,
+ 193,
+ 527,
+ 546,
+ - 243,
+ - 513,
+ 384,
+ - 136,
+ 273,
+ - 353,
+ 512,
+ - 142,
+ 537,
+ - 198,
+ 941,
+ 750,
+ 83,
+ 248,
+ 578,
+ 861,
+ - 56,
+ 592,
+ 842,
+ 44,
+ 892,
+ 24,
+ 33,
+ 890,
+ - 16,
+ 982,
+ 831,
+ 1398,
+ 1535,
+ 1898,
+ 1716,
+ 1376,
+ 1948,
+ 1465
+};
+
+/* 5th LSF quantizer (MR122) */
+static const Word32 dico5_lsf_5[DICO5_SIZE_5 * 4] =
+{
+ - 1002,
+ - 929,
+ - 1096,
+ - 1203,
+ - 641,
+ - 931,
+ - 604,
+ - 961,
+ - 779,
+ - 673,
+ - 835,
+ - 788,
+ - 416,
+ - 664,
+ - 458,
+ - 766,
+ - 652,
+ - 521,
+ - 662,
+ - 495,
+ - 1023,
+ - 509,
+ - 1023,
+ - 428,
+ - 444,
+ - 552,
+ - 368,
+ - 449,
+ - 479,
+ - 211,
+ - 1054,
+ - 903,
+ - 316,
+ - 249,
+ - 569,
+ - 591,
+ - 569,
+ - 275,
+ - 541,
+ - 191,
+ - 716,
+ - 188,
+ - 842,
+ - 264,
+ - 333,
+ - 248,
+ - 318,
+ - 228,
+ - 275,
+ 1,
+ - 567,
+ - 228,
+ - 115,
+ - 221,
+ - 238,
+ - 374,
+ - 197,
+ - 507,
+ - 222,
+ - 579,
+ - 258,
+ - 432,
+ - 61,
+ - 244,
+ - 345,
+ 2,
+ - 338,
+ 39,
+ - 215,
+ - 169,
+ - 58,
+ 0,
+ - 56,
+ - 6,
+ - 203,
+ - 131,
+ 1,
+ - 186,
+ - 5,
+ - 211,
+ 6,
+ - 380,
+ 11,
+ - 418,
+ - 116,
+ 131,
+ - 134,
+ 113,
+ 89,
+ - 4,
+ 71,
+ - 2,
+ - 19,
+ - 192,
+ 262,
+ 24,
+ 189,
+ 151,
+ - 133,
+ - 109,
+ 186,
+ - 153,
+ 166,
+ - 219,
+ 37,
+ 139,
+ 193,
+ 171,
+ 337,
+ 124,
+ 158,
+ - 61,
+ 141,
+ 226,
+ - 13,
+ 190,
+ 231,
+ 34,
+ 354,
+ 109,
+ 316,
+ 201,
+ 244,
+ 164,
+ 330,
+ - 85,
+ 390,
+ - 84,
+ 254,
+ 327,
+ 257,
+ 335,
+ 491,
+ 147,
+ 476,
+ 105,
+ 54,
+ 77,
+ 437,
+ 370,
+ 421,
+ 314,
+ 449,
+ 342,
+ 329,
+ 126,
+ 673,
+ 292,
+ 571,
+ 388,
+ 243,
+ 193,
+ 653,
+ 320,
+ 621,
+ 280,
+ 194,
+ 380,
+ 517,
+ 581,
+ 45,
+ 323,
+ 111,
+ 422,
+ 489,
+ 395,
+ 734,
+ 534,
+ 622,
+ 546,
+ 486,
+ 502,
+ 318,
+ 572,
+ 189,
+ 550,
+ 385,
+ 422,
+ - 157,
+ 153,
+ - 125,
+ 382,
+ - 197,
+ 386,
+ - 263,
+ 334,
+ 228,
+ 697,
+ - 188,
+ 1,
+ 51,
+ 297,
+ - 507,
+ 213,
+ - 376,
+ 397,
+ - 24,
+ 255,
+ - 547,
+ 89,
+ - 502,
+ - 94,
+ 387,
+ 179,
+ - 620,
+ 68,
+ - 684,
+ 112,
+ - 642,
+ - 350,
+ - 260,
+ 172,
+ - 438,
+ - 324,
+ 264,
+ 648,
+ - 964,
+ - 4,
+ - 1121,
+ 7,
+ - 134,
+ 134,
+ - 1133,
+ - 306,
+ 143,
+ 96,
+ - 420,
+ - 497,
+ - 1221,
+ - 350,
+ - 1527,
+ - 685,
+ - 161,
+ 72,
+ 873,
+ 691,
+ 732,
+ 283,
+ 921,
+ 353,
+ 334,
+ 475,
+ 1095,
+ 821,
+ 864,
+ 524,
+ 843,
+ 497,
+ 714,
+ 711,
+ 788,
+ 750,
+ 1076,
+ 714,
+ 1204,
+ 753
+};
+
+
+
+/* Scaling factors for the lsp variability operation */
+static const Word16 lsf_hist_mean_scale[M] =
+{
+ 20000,
+ 20000,
+ 20000,
+ 20000,
+ 20000,
+ 18000,
+ 16384,
+ 8192,
+ 0,
+ 0
+};
+
+/*
+ * The tables contains the following data:
+ *
+ * g_pitch (Q14),
+ * g_fac (Q12), (g_code = g_code0*g_fac),
+ * qua_ener_MR122 (Q10), (log2(g_fac))
+ * qua_ener (Q10) (20*log10(g_fac))
+ *
+ * The log2() and log10() values are calculated on the fixed point value
+ * (g_fac Q12) and not on the original floating point value of g_fac
+ * to make the quantizer/MA predictdor use corresponding values.
+ */
+
+#define MR475_VQ_SIZE 256
+
+/* The table contains the following data:
+ *
+ * g_pitch(0) (Q14) for sub-
+ * g_fac(0) (Q12) frame 0 and 2
+ * g_pitch(1) (Q14) for sub-
+ * g_fac(2) (Q12) frame 1 and 3
+ *
+ */
+static const Word32 table_gain_MR475[MR475_VQ_SIZE * 4] =
+{
+/*
+ * g_pit(0),
+ * g_fac(0),
+ * g_pit(1),
+ * g_fac(1)
+ */ 812,
+ 128,
+ 542,
+ 140,
+ 2873,
+ 1135,
+ 2266,
+ 3402,
+ 2067,
+ 563,
+ 12677,
+ 647,
+ 4132,
+ 1798,
+ 5601,
+ 5285,
+ 7689,
+ 374,
+ 3735,
+ 441,
+ 10912,
+ 2638,
+ 11807,
+ 2494,
+ 20490,
+ 797,
+ 5218,
+ 675,
+ 6724,
+ 8354,
+ 5282,
+ 1696,
+ 1488,
+ 428,
+ 5882,
+ 452,
+ 5332,
+ 4072,
+ 3583,
+ 1268,
+ 2469,
+ 901,
+ 15894,
+ 1005,
+ 14982,
+ 3271,
+ 10331,
+ 4858,
+ 3635,
+ 2021,
+ 2596,
+ 835,
+ 12360,
+ 4892,
+ 12206,
+ 1704,
+ 13432,
+ 1604,
+ 9118,
+ 2341,
+ 3968,
+ 1538,
+ 5479,
+ 9936,
+ 3795,
+ 417,
+ 1359,
+ 414,
+ 3640,
+ 1569,
+ 7995,
+ 3541,
+ 11405,
+ 645,
+ 8552,
+ 635,
+ 4056,
+ 1377,
+ 16608,
+ 6124,
+ 11420,
+ 700,
+ 2007,
+ 607,
+ 12415,
+ 1578,
+ 11119,
+ 4654,
+ 13680,
+ 1708,
+ 11990,
+ 1229,
+ 7996,
+ 7297,
+ 13231,
+ 5715,
+ 2428,
+ 1159,
+ 2073,
+ 1941,
+ 6218,
+ 6121,
+ 3546,
+ 1804,
+ 8925,
+ 1802,
+ 8679,
+ 1580,
+ 13935,
+ 3576,
+ 13313,
+ 6237,
+ 6142,
+ 1130,
+ 5994,
+ 1734,
+ 14141,
+ 4662,
+ 11271,
+ 3321,
+ 12226,
+ 1551,
+ 13931,
+ 3015,
+ 5081,
+ 10464,
+ 9444,
+ 6706,
+ 1689,
+ 683,
+ 1436,
+ 1306,
+ 7212,
+ 3933,
+ 4082,
+ 2713,
+ 7793,
+ 704,
+ 15070,
+ 802,
+ 6299,
+ 5212,
+ 4337,
+ 5357,
+ 6676,
+ 541,
+ 6062,
+ 626,
+ 13651,
+ 3700,
+ 11498,
+ 2408,
+ 16156,
+ 716,
+ 12177,
+ 751,
+ 8065,
+ 11489,
+ 6314,
+ 2256,
+ 4466,
+ 496,
+ 7293,
+ 523,
+ 10213,
+ 3833,
+ 8394,
+ 3037,
+ 8403,
+ 966,
+ 14228,
+ 1880,
+ 8703,
+ 5409,
+ 16395,
+ 4863,
+ 7420,
+ 1979,
+ 6089,
+ 1230,
+ 9371,
+ 4398,
+ 14558,
+ 3363,
+ 13559,
+ 2873,
+ 13163,
+ 1465,
+ 5534,
+ 1678,
+ 13138,
+ 14771,
+ 7338,
+ 600,
+ 1318,
+ 548,
+ 4252,
+ 3539,
+ 10044,
+ 2364,
+ 10587,
+ 622,
+ 13088,
+ 669,
+ 14126,
+ 3526,
+ 5039,
+ 9784,
+ 15338,
+ 619,
+ 3115,
+ 590,
+ 16442,
+ 3013,
+ 15542,
+ 4168,
+ 15537,
+ 1611,
+ 15405,
+ 1228,
+ 16023,
+ 9299,
+ 7534,
+ 4976,
+ 1990,
+ 1213,
+ 11447,
+ 1157,
+ 12512,
+ 5519,
+ 9475,
+ 2644,
+ 7716,
+ 2034,
+ 13280,
+ 2239,
+ 16011,
+ 5093,
+ 8066,
+ 6761,
+ 10083,
+ 1413,
+ 5002,
+ 2347,
+ 12523,
+ 5975,
+ 15126,
+ 2899,
+ 18264,
+ 2289,
+ 15827,
+ 2527,
+ 16265,
+ 10254,
+ 14651,
+ 11319,
+ 1797,
+ 337,
+ 3115,
+ 397,
+ 3510,
+ 2928,
+ 4592,
+ 2670,
+ 7519,
+ 628,
+ 11415,
+ 656,
+ 5946,
+ 2435,
+ 6544,
+ 7367,
+ 8238,
+ 829,
+ 4000,
+ 863,
+ 10032,
+ 2492,
+ 16057,
+ 3551,
+ 18204,
+ 1054,
+ 6103,
+ 1454,
+ 5884,
+ 7900,
+ 18752,
+ 3468,
+ 1864,
+ 544,
+ 9198,
+ 683,
+ 11623,
+ 4160,
+ 4594,
+ 1644,
+ 3158,
+ 1157,
+ 15953,
+ 2560,
+ 12349,
+ 3733,
+ 17420,
+ 5260,
+ 6106,
+ 2004,
+ 2917,
+ 1742,
+ 16467,
+ 5257,
+ 16787,
+ 1680,
+ 17205,
+ 1759,
+ 4773,
+ 3231,
+ 7386,
+ 6035,
+ 14342,
+ 10012,
+ 4035,
+ 442,
+ 4194,
+ 458,
+ 9214,
+ 2242,
+ 7427,
+ 4217,
+ 12860,
+ 801,
+ 11186,
+ 825,
+ 12648,
+ 2084,
+ 12956,
+ 6554,
+ 9505,
+ 996,
+ 6629,
+ 985,
+ 10537,
+ 2502,
+ 15289,
+ 5006,
+ 12602,
+ 2055,
+ 15484,
+ 1653,
+ 16194,
+ 6921,
+ 14231,
+ 5790,
+ 2626,
+ 828,
+ 5615,
+ 1686,
+ 13663,
+ 5778,
+ 3668,
+ 1554,
+ 11313,
+ 2633,
+ 9770,
+ 1459,
+ 14003,
+ 4733,
+ 15897,
+ 6291,
+ 6278,
+ 1870,
+ 7910,
+ 2285,
+ 16978,
+ 4571,
+ 16576,
+ 3849,
+ 15248,
+ 2311,
+ 16023,
+ 3244,
+ 14459,
+ 17808,
+ 11847,
+ 2763,
+ 1981,
+ 1407,
+ 1400,
+ 876,
+ 4335,
+ 3547,
+ 4391,
+ 4210,
+ 5405,
+ 680,
+ 17461,
+ 781,
+ 6501,
+ 5118,
+ 8091,
+ 7677,
+ 7355,
+ 794,
+ 8333,
+ 1182,
+ 15041,
+ 3160,
+ 14928,
+ 3039,
+ 20421,
+ 880,
+ 14545,
+ 852,
+ 12337,
+ 14708,
+ 6904,
+ 1920,
+ 4225,
+ 933,
+ 8218,
+ 1087,
+ 10659,
+ 4084,
+ 10082,
+ 4533,
+ 2735,
+ 840,
+ 20657,
+ 1081,
+ 16711,
+ 5966,
+ 15873,
+ 4578,
+ 10871,
+ 2574,
+ 3773,
+ 1166,
+ 14519,
+ 4044,
+ 20699,
+ 2627,
+ 15219,
+ 2734,
+ 15274,
+ 2186,
+ 6257,
+ 3226,
+ 13125,
+ 19480,
+ 7196,
+ 930,
+ 2462,
+ 1618,
+ 4515,
+ 3092,
+ 13852,
+ 4277,
+ 10460,
+ 833,
+ 17339,
+ 810,
+ 16891,
+ 2289,
+ 15546,
+ 8217,
+ 13603,
+ 1684,
+ 3197,
+ 1834,
+ 15948,
+ 2820,
+ 15812,
+ 5327,
+ 17006,
+ 2438,
+ 16788,
+ 1326,
+ 15671,
+ 8156,
+ 11726,
+ 8556,
+ 3762,
+ 2053,
+ 9563,
+ 1317,
+ 13561,
+ 6790,
+ 12227,
+ 1936,
+ 8180,
+ 3550,
+ 13287,
+ 1778,
+ 16299,
+ 6599,
+ 16291,
+ 7758,
+ 8521,
+ 2551,
+ 7225,
+ 2645,
+ 18269,
+ 7489,
+ 16885,
+ 2248,
+ 17882,
+ 2884,
+ 17265,
+ 3328,
+ 9417,
+ 20162,
+ 11042,
+ 8320,
+ 1286,
+ 620,
+ 1431,
+ 583,
+ 5993,
+ 2289,
+ 3978,
+ 3626,
+ 5144,
+ 752,
+ 13409,
+ 830,
+ 5553,
+ 2860,
+ 11764,
+ 5908,
+ 10737,
+ 560,
+ 5446,
+ 564,
+ 13321,
+ 3008,
+ 11946,
+ 3683,
+ 19887,
+ 798,
+ 9825,
+ 728,
+ 13663,
+ 8748,
+ 7391,
+ 3053,
+ 2515,
+ 778,
+ 6050,
+ 833,
+ 6469,
+ 5074,
+ 8305,
+ 2463,
+ 6141,
+ 1865,
+ 15308,
+ 1262,
+ 14408,
+ 4547,
+ 13663,
+ 4515,
+ 3137,
+ 2983,
+ 2479,
+ 1259,
+ 15088,
+ 4647,
+ 15382,
+ 2607,
+ 14492,
+ 2392,
+ 12462,
+ 2537,
+ 7539,
+ 2949,
+ 12909,
+ 12060,
+ 5468,
+ 684,
+ 3141,
+ 722,
+ 5081,
+ 1274,
+ 12732,
+ 4200,
+ 15302,
+ 681,
+ 7819,
+ 592,
+ 6534,
+ 2021,
+ 16478,
+ 8737,
+ 13364,
+ 882,
+ 5397,
+ 899,
+ 14656,
+ 2178,
+ 14741,
+ 4227,
+ 14270,
+ 1298,
+ 13929,
+ 2029,
+ 15477,
+ 7482,
+ 15815,
+ 4572,
+ 2521,
+ 2013,
+ 5062,
+ 1804,
+ 5159,
+ 6582,
+ 7130,
+ 3597,
+ 10920,
+ 1611,
+ 11729,
+ 1708,
+ 16903,
+ 3455,
+ 16268,
+ 6640,
+ 9306,
+ 1007,
+ 9369,
+ 2106,
+ 19182,
+ 5037,
+ 12441,
+ 4269,
+ 15919,
+ 1332,
+ 15357,
+ 3512,
+ 11898,
+ 14141,
+ 16101,
+ 6854,
+ 2010,
+ 737,
+ 3779,
+ 861,
+ 11454,
+ 2880,
+ 3564,
+ 3540,
+ 9057,
+ 1241,
+ 12391,
+ 896,
+ 8546,
+ 4629,
+ 11561,
+ 5776,
+ 8129,
+ 589,
+ 8218,
+ 588,
+ 18728,
+ 3755,
+ 12973,
+ 3149,
+ 15729,
+ 758,
+ 16634,
+ 754,
+ 15222,
+ 11138,
+ 15871,
+ 2208,
+ 4673,
+ 610,
+ 10218,
+ 678,
+ 15257,
+ 4146,
+ 5729,
+ 3327,
+ 8377,
+ 1670,
+ 19862,
+ 2321,
+ 15450,
+ 5511,
+ 14054,
+ 5481,
+ 5728,
+ 2888,
+ 7580,
+ 1346,
+ 14384,
+ 5325,
+ 16236,
+ 3950,
+ 15118,
+ 3744,
+ 15306,
+ 1435,
+ 14597,
+ 4070,
+ 12301,
+ 15696,
+ 7617,
+ 1699,
+ 2170,
+ 884,
+ 4459,
+ 4567,
+ 18094,
+ 3306,
+ 12742,
+ 815,
+ 14926,
+ 907,
+ 15016,
+ 4281,
+ 15518,
+ 8368,
+ 17994,
+ 1087,
+ 2358,
+ 865,
+ 16281,
+ 3787,
+ 15679,
+ 4596,
+ 16356,
+ 1534,
+ 16584,
+ 2210,
+ 16833,
+ 9697,
+ 15929,
+ 4513,
+ 3277,
+ 1085,
+ 9643,
+ 2187,
+ 11973,
+ 6068,
+ 9199,
+ 4462,
+ 8955,
+ 1629,
+ 10289,
+ 3062,
+ 16481,
+ 5155,
+ 15466,
+ 7066,
+ 13678,
+ 2543,
+ 5273,
+ 2277,
+ 16746,
+ 6213,
+ 16655,
+ 3408,
+ 20304,
+ 3363,
+ 18688,
+ 1985,
+ 14172,
+ 12867,
+ 15154,
+ 15703,
+ 4473,
+ 1020,
+ 1681,
+ 886,
+ 4311,
+ 4301,
+ 8952,
+ 3657,
+ 5893,
+ 1147,
+ 11647,
+ 1452,
+ 15886,
+ 2227,
+ 4582,
+ 6644,
+ 6929,
+ 1205,
+ 6220,
+ 799,
+ 12415,
+ 3409,
+ 15968,
+ 3877,
+ 19859,
+ 2109,
+ 9689,
+ 2141,
+ 14742,
+ 8830,
+ 14480,
+ 2599,
+ 1817,
+ 1238,
+ 7771,
+ 813,
+ 19079,
+ 4410,
+ 5554,
+ 2064,
+ 3687,
+ 2844,
+ 17435,
+ 2256,
+ 16697,
+ 4486,
+ 16199,
+ 5388,
+ 8028,
+ 2763,
+ 3405,
+ 2119,
+ 17426,
+ 5477,
+ 13698,
+ 2786,
+ 19879,
+ 2720,
+ 9098,
+ 3880,
+ 18172,
+ 4833,
+ 17336,
+ 12207,
+ 5116,
+ 996,
+ 4935,
+ 988,
+ 9888,
+ 3081,
+ 6014,
+ 5371,
+ 15881,
+ 1667,
+ 8405,
+ 1183,
+ 15087,
+ 2366,
+ 19777,
+ 7002,
+ 11963,
+ 1562,
+ 7279,
+ 1128,
+ 16859,
+ 1532,
+ 15762,
+ 5381,
+ 14708,
+ 2065,
+ 20105,
+ 2155,
+ 17158,
+ 8245,
+ 17911,
+ 6318,
+ 5467,
+ 1504,
+ 4100,
+ 2574,
+ 17421,
+ 6810,
+ 5673,
+ 2888,
+ 16636,
+ 3382,
+ 8975,
+ 1831,
+ 20159,
+ 4737,
+ 19550,
+ 7294,
+ 6658,
+ 2781,
+ 11472,
+ 3321,
+ 19397,
+ 5054,
+ 18878,
+ 4722,
+ 16439,
+ 2373,
+ 20430,
+ 4386,
+ 11353,
+ 26526,
+ 11593,
+ 3068,
+ 2866,
+ 1566,
+ 5108,
+ 1070,
+ 9614,
+ 4915,
+ 4939,
+ 3536,
+ 7541,
+ 878,
+ 20717,
+ 851,
+ 6938,
+ 4395,
+ 16799,
+ 7733,
+ 10137,
+ 1019,
+ 9845,
+ 964,
+ 15494,
+ 3955,
+ 15459,
+ 3430,
+ 18863,
+ 982,
+ 20120,
+ 963,
+ 16876,
+ 12887,
+ 14334,
+ 4200,
+ 6599,
+ 1220,
+ 9222,
+ 814,
+ 16942,
+ 5134,
+ 5661,
+ 4898,
+ 5488,
+ 1798,
+ 20258,
+ 3962,
+ 17005,
+ 6178,
+ 17929,
+ 5929,
+ 9365,
+ 3420,
+ 7474,
+ 1971,
+ 19537,
+ 5177,
+ 19003,
+ 3006,
+ 16454,
+ 3788,
+ 16070,
+ 2367,
+ 8664,
+ 2743,
+ 9445,
+ 26358,
+ 10856,
+ 1287,
+ 3555,
+ 1009,
+ 5606,
+ 3622,
+ 19453,
+ 5512,
+ 12453,
+ 797,
+ 20634,
+ 911,
+ 15427,
+ 3066,
+ 17037,
+ 10275,
+ 18883,
+ 2633,
+ 3913,
+ 1268,
+ 19519,
+ 3371,
+ 18052,
+ 5230,
+ 19291,
+ 1678,
+ 19508,
+ 3172,
+ 18072,
+ 10754,
+ 16625,
+ 6845,
+ 3134,
+ 2298,
+ 10869,
+ 2437,
+ 15580,
+ 6913,
+ 12597,
+ 3381,
+ 11116,
+ 3297,
+ 16762,
+ 2424,
+ 18853,
+ 6715,
+ 17171,
+ 9887,
+ 12743,
+ 2605,
+ 8937,
+ 3140,
+ 19033,
+ 7764,
+ 18347,
+ 3880,
+ 20475,
+ 3682,
+ 19602,
+ 3380,
+ 13044,
+ 19373,
+ 10526,
+ 23124
+};
+
+
+/* table used in 'high' rates: MR67 MR74 */
+#define VQ_SIZE_HIGHRATES 128
+static const Word32 table_gain_highrates[VQ_SIZE_HIGHRATES * 4] =
+{
+/*
+ * Note: every 4th value (qua_ener) contains the original values from IS641
+ * to ensure bit-exactness; however, they are not exactly the
+ * rounded value of (20*log10(g_fac))
+ */ /*
+ * g_pit,
+ * g_fac,
+ * qua_ener_MR122,
+ * qua_ener
+ */ 577,
+ 662,
+ - 2692,
+ - 16214,
+ 806,
+ 1836,
+ - 1185,
+ - 7135,
+ 3109,
+ 1052,
+ - 2008,
+ - 12086,
+ 4181,
+ 1387,
+ - 1600,
+ - 9629,
+ 2373,
+ 1425,
+ - 1560,
+ - 9394,
+ 3248,
+ 1985,
+ - 1070,
+ - 6442,
+ 1827,
+ 2320,
+ - 840,
+ - 5056,
+ 941,
+ 3314,
+ - 313,
+ - 1885,
+ 2351,
+ 2977,
+ - 471,
+ - 2838,
+ 3616,
+ 2420,
+ - 777,
+ - 4681,
+ 3451,
+ 3096,
+ - 414,
+ - 2490,
+ 2955,
+ 4301,
+ 72,
+ 434,
+ 1848,
+ 4500,
+ 139,
+ 836,
+ 3884,
+ 5416,
+ 413,
+ 2484,
+ 1187,
+ 7210,
+ 835,
+ 5030,
+ 3083,
+ 9000,
+ 1163,
+ 7002,
+ 7384,
+ 883,
+ - 2267,
+ - 13647,
+ 5962,
+ 1506,
+ - 1478,
+ - 8900,
+ 5155,
+ 2134,
+ - 963,
+ - 5800,
+ 7944,
+ 2009,
+ - 1052,
+ - 6335,
+ 6507,
+ 2250,
+ - 885,
+ - 5327,
+ 7670,
+ 2752,
+ - 588,
+ - 3537,
+ 5952,
+ 3016,
+ - 452,
+ - 2724,
+ 4898,
+ 3764,
+ - 125,
+ - 751,
+ 6989,
+ 3588,
+ - 196,
+ - 1177,
+ 8174,
+ 3978,
+ - 43,
+ - 260,
+ 6064,
+ 4404,
+ 107,
+ 645,
+ 7709,
+ 5087,
+ 320,
+ 1928,
+ 5523,
+ 6021,
+ 569,
+ 3426,
+ 7769,
+ 7126,
+ 818,
+ 4926,
+ 6060,
+ 7938,
+ 977,
+ 5885,
+ 5594,
+ 11487,
+ 1523,
+ 9172,
+ 10581,
+ 1356,
+ - 1633,
+ - 9831,
+ 9049,
+ 1597,
+ - 1391,
+ - 8380,
+ 9794,
+ 2035,
+ - 1033,
+ - 6220,
+ 8946,
+ 2415,
+ - 780,
+ - 4700,
+ 10296,
+ 2584,
+ - 681,
+ - 4099,
+ 9407,
+ 2734,
+ - 597,
+ - 3595,
+ 8700,
+ 3218,
+ - 356,
+ - 2144,
+ 9757,
+ 3395,
+ - 277,
+ - 1669,
+ 10177,
+ 3892,
+ - 75,
+ - 454,
+ 9170,
+ 4528,
+ 148,
+ 891,
+ 10152,
+ 5004,
+ 296,
+ 1781,
+ 9114,
+ 5735,
+ 497,
+ 2993,
+ 10500,
+ 6266,
+ 628,
+ 3782,
+ 10110,
+ 7631,
+ 919,
+ 5534,
+ 8844,
+ 8727,
+ 1117,
+ 6728,
+ 8956,
+ 12496,
+ 1648,
+ 9921,
+ 12924,
+ 976,
+ - 2119,
+ - 12753,
+ 11435,
+ 1755,
+ - 1252,
+ - 7539,
+ 12138,
+ 2328,
+ - 835,
+ - 5024,
+ 11388,
+ 2368,
+ - 810,
+ - 4872,
+ 10700,
+ 3064,
+ - 429,
+ - 2580,
+ 12332,
+ 2861,
+ - 530,
+ - 3192,
+ 11722,
+ 3327,
+ - 307,
+ - 1848,
+ 11270,
+ 3700,
+ - 150,
+ - 904,
+ 10861,
+ 4413,
+ 110,
+ 663,
+ 12082,
+ 4533,
+ 150,
+ 902,
+ 11283,
+ 5205,
+ 354,
+ 2132,
+ 11960,
+ 6305,
+ 637,
+ 3837,
+ 11167,
+ 7534,
+ 900,
+ 5420,
+ 12128,
+ 8329,
+ 1049,
+ 6312,
+ 10969,
+ 10777,
+ 1429,
+ 8604,
+ 10300,
+ 17376,
+ 2135,
+ 12853,
+ 13899,
+ 1681,
+ - 1316,
+ - 7921,
+ 12580,
+ 2045,
+ - 1026,
+ - 6179,
+ 13265,
+ 2439,
+ - 766,
+ - 4610,
+ 14033,
+ 2989,
+ - 465,
+ - 2802,
+ 13452,
+ 3098,
+ - 413,
+ - 2482,
+ 12396,
+ 3658,
+ - 167,
+ - 1006,
+ 13510,
+ 3780,
+ - 119,
+ - 713,
+ 12880,
+ 4272,
+ 62,
+ 374,
+ 13533,
+ 4861,
+ 253,
+ 1523,
+ 12667,
+ 5457,
+ 424,
+ 2552,
+ 13854,
+ 6106,
+ 590,
+ 3551,
+ 13031,
+ 6483,
+ 678,
+ 4084,
+ 13557,
+ 7721,
+ 937,
+ 5639,
+ 12957,
+ 9311,
+ 1213,
+ 7304,
+ 13714,
+ 11551,
+ 1532,
+ 9221,
+ 12591,
+ 15206,
+ 1938,
+ 11667,
+ 15113,
+ 1540,
+ - 1445,
+ - 8700,
+ 15072,
+ 2333,
+ - 832,
+ - 5007,
+ 14527,
+ 2511,
+ - 723,
+ - 4352,
+ 14692,
+ 3199,
+ - 365,
+ - 2197,
+ 15382,
+ 3560,
+ - 207,
+ - 1247,
+ 14133,
+ 3960,
+ - 50,
+ - 300,
+ 15102,
+ 4236,
+ 50,
+ 298,
+ 14332,
+ 4824,
+ 242,
+ 1454,
+ 14846,
+ 5451,
+ 422,
+ 2542,
+ 15306,
+ 6083,
+ 584,
+ 3518,
+ 14329,
+ 6888,
+ 768,
+ 4623,
+ 15060,
+ 7689,
+ 930,
+ 5602,
+ 14406,
+ 9426,
+ 1231,
+ 7413,
+ 15387,
+ 9741,
+ 1280,
+ 7706,
+ 14824,
+ 14271,
+ 1844,
+ 11102,
+ 13600,
+ 24939,
+ 2669,
+ 16067,
+ 16396,
+ 1969,
+ - 1082,
+ - 6517,
+ 16817,
+ 2832,
+ - 545,
+ - 3283,
+ 15713,
+ 2843,
+ - 539,
+ - 3248,
+ 16104,
+ 3336,
+ - 303,
+ - 1825,
+ 16384,
+ 3963,
+ - 49,
+ - 294,
+ 16940,
+ 4579,
+ 165,
+ 992,
+ 15711,
+ 4599,
+ 171,
+ 1030,
+ 16222,
+ 5448,
+ 421,
+ 2537,
+ 16832,
+ 6382,
+ 655,
+ 3945,
+ 15745,
+ 7141,
+ 821,
+ 4944,
+ 16326,
+ 7469,
+ 888,
+ 5343,
+ 16611,
+ 8624,
+ 1100,
+ 6622,
+ 17028,
+ 10418,
+ 1379,
+ 8303,
+ 15905,
+ 11817,
+ 1565,
+ 9423,
+ 16878,
+ 14690,
+ 1887,
+ 11360,
+ 16515,
+ 20870,
+ 2406,
+ 14483,
+ 18142,
+ 2083,
+ - 999,
+ - 6013,
+ 19401,
+ 3178,
+ - 375,
+ - 2257,
+ 17508,
+ 3426,
+ - 264,
+ - 1589,
+ 20054,
+ 4027,
+ - 25,
+ - 151,
+ 18069,
+ 4249,
+ 54,
+ 326,
+ 18952,
+ 5066,
+ 314,
+ 1890,
+ 17711,
+ 5402,
+ 409,
+ 2461,
+ 19835,
+ 6192,
+ 610,
+ 3676,
+ 17950,
+ 7014,
+ 795,
+ 4784,
+ 21318,
+ 7877,
+ 966,
+ 5816,
+ 17910,
+ 9289,
+ 1210,
+ 7283,
+ 19144,
+ 9290,
+ 1210,
+ 7284,
+ 20517,
+ 11381,
+ 1510,
+ 9089,
+ 18075,
+ 14485,
+ 1866,
+ 11234,
+ 19999,
+ 17882,
+ 2177,
+ 13108,
+ 18842,
+ 32764,
+ 3072,
+ 18494
+};
+
+/* table used in 'low' rates: MR475, MR515, MR59 */
+#define VQ_SIZE_LOWRATES 64
+static const Word32 table_gain_lowrates[VQ_SIZE_LOWRATES * 4] =
+{
+/*
+ * g_pit,
+ * g_fac,
+ * qua_ener_MR122,
+ * qua_ener
+ */ 10813,
+ 28753,
+ 2879,
+ 17333,
+ 20480,
+ 2785,
+ - 570,
+ - 3431,
+ 18841,
+ 6594,
+ 703,
+ 4235,
+ 6225,
+ 7413,
+ 876,
+ 5276,
+ 17203,
+ 10444,
+ 1383,
+ 8325,
+ 21626,
+ 1269,
+ - 1731,
+ - 10422,
+ 21135,
+ 4423,
+ 113,
+ 683,
+ 11304,
+ 1556,
+ - 1430,
+ - 8609,
+ 19005,
+ 12820,
+ 1686,
+ 10148,
+ 17367,
+ 2498,
+ - 731,
+ - 4398,
+ 17858,
+ 4833,
+ 244,
+ 1472,
+ 9994,
+ 2498,
+ - 731,
+ - 4398,
+ 17530,
+ 7864,
+ 964,
+ 5802,
+ 14254,
+ 1884,
+ - 1147,
+ - 6907,
+ 15892,
+ 3153,
+ - 387,
+ - 2327,
+ 6717,
+ 1802,
+ - 1213,
+ - 7303,
+ 18186,
+ 20193,
+ 2357,
+ 14189,
+ 18022,
+ 3031,
+ - 445,
+ - 2678,
+ 16711,
+ 5857,
+ 528,
+ 3181,
+ 8847,
+ 4014,
+ - 30,
+ - 180,
+ 15892,
+ 8970,
+ 1158,
+ 6972,
+ 18022,
+ 1392,
+ - 1594,
+ - 9599,
+ 16711,
+ 4096,
+ 0,
+ 0,
+ 8192,
+ 655,
+ - 2708,
+ - 16305,
+ 15237,
+ 13926,
+ 1808,
+ 10884,
+ 14254,
+ 3112,
+ - 406,
+ - 2444,
+ 14090,
+ 4669,
+ 193,
+ 1165,
+ 5406,
+ 2703,
+ - 614,
+ - 3697,
+ 13434,
+ 6553,
+ 694,
+ 4180,
+ 12451,
+ 901,
+ - 2237,
+ - 13468,
+ 12451,
+ 2662,
+ - 637,
+ - 3833,
+ 3768,
+ 655,
+ - 2708,
+ - 16305,
+ 14745,
+ 23511,
+ 2582,
+ 15543,
+ 19169,
+ 2457,
+ - 755,
+ - 4546,
+ 20152,
+ 5079,
+ 318,
+ 1913,
+ 6881,
+ 4096,
+ 0,
+ 0,
+ 20480,
+ 8560,
+ 1089,
+ 6556,
+ 19660,
+ 737,
+ - 2534,
+ - 15255,
+ 19005,
+ 4259,
+ 58,
+ 347,
+ 7864,
+ 2088,
+ - 995,
+ - 5993,
+ 11468,
+ 12288,
+ 1623,
+ 9771,
+ 15892,
+ 1474,
+ - 1510,
+ - 9090,
+ 15728,
+ 4628,
+ 180,
+ 1086,
+ 9175,
+ 1433,
+ - 1552,
+ - 9341,
+ 16056,
+ 7004,
+ 793,
+ 4772,
+ 14827,
+ 737,
+ - 2534,
+ - 15255,
+ 15073,
+ 2252,
+ - 884,
+ - 5321,
+ 5079,
+ 1228,
+ - 1780,
+ - 10714,
+ 13271,
+ 17326,
+ 2131,
+ 12827,
+ 16547,
+ 2334,
+ - 831,
+ - 5002,
+ 15073,
+ 5816,
+ 518,
+ 3118,
+ 3932,
+ 3686,
+ - 156,
+ - 938,
+ 14254,
+ 8601,
+ 1096,
+ 6598,
+ 16875,
+ 778,
+ - 2454,
+ - 14774,
+ 15073,
+ 3809,
+ - 107,
+ - 646,
+ 6062,
+ 614,
+ - 2804,
+ - 16879,
+ 9338,
+ 9256,
+ 1204,
+ 7251,
+ 13271,
+ 1761,
+ - 1247,
+ - 7508,
+ 13271,
+ 3522,
+ - 223,
+ - 1343,
+ 2457,
+ 1966,
+ - 1084,
+ - 6529,
+ 11468,
+ 5529,
+ 443,
+ 2668,
+ 10485,
+ 737,
+ - 2534,
+ - 15255,
+ 11632,
+ 3194,
+ - 367,
+ - 2212,
+ 1474,
+ 778,
+ - 2454,
+ - 14774
+};
+
+static const Word32 inter6[61] =
+{
+ 29443,
+ 28346,
+ 25207,
+ 20449,
+ 14701,
+ 8693,
+ 3143,
+ - 1352,
+ - 4402,
+ - 5865,
+ - 5850,
+ - 4673,
+ - 2783,
+ - 672,
+ 1211,
+ 2536,
+ 3130,
+ 2991,
+ 2259,
+ 1170,
+ 0,
+ - 1001,
+ - 1652,
+ - 1868,
+ - 1666,
+ - 1147,
+ - 464,
+ 218,
+ 756,
+ 1060,
+ 1099,
+ 904,
+ 550,
+ 135,
+ - 245,
+ - 514,
+ - 634,
+ - 602,
+ - 451,
+ - 231,
+ 0,
+ 191,
+ 308,
+ 340,
+ 296,
+ 198,
+ 78,
+ - 36,
+ - 120,
+ - 163,
+ - 165,
+ - 132,
+ - 79,
+ - 19,
+ 34,
+ 73,
+ 91,
+ 89,
+ 70,
+ 38,
+ 0
+};
+
+/*
+ * window for non-MR122 modesm; uses 40 samples lookahead
+ * used only in BuildCNParam
+ */
+static const Word32 window_200_40[L_WINDOW] = {
+ 2621, 2623, 2629, 2638, 2651, 2668, 2689, 2713, 2741, 2772,
+ 2808, 2847, 2890, 2936, 2986, 3040, 3097, 3158, 3223, 3291,
+ 3363, 3438, 3517, 3599, 3685, 3774, 3867, 3963, 4063, 4166,
+ 4272, 4382, 4495, 4611, 4731, 4853, 4979, 5108, 5240, 5376,
+ 5514, 5655, 5800, 5947, 6097, 6250, 6406, 6565, 6726, 6890,
+ 7057, 7227, 7399, 7573, 7750, 7930, 8112, 8296, 8483, 8672,
+ 8863, 9057, 9252, 9450, 9650, 9852, 10055, 10261, 10468, 10677,
+ 10888, 11101, 11315, 11531, 11748, 11967, 12187, 12409, 12632, 12856,
+ 13082, 13308, 13536, 13764, 13994, 14225, 14456, 14688, 14921, 15155,
+ 15389, 15624, 15859, 16095, 16331, 16568, 16805, 17042, 17279, 17516,
+ 17754, 17991, 18228, 18465, 18702, 18939, 19175, 19411, 19647, 19882,
+ 20117, 20350, 20584, 20816, 21048, 21279, 21509, 21738, 21967, 22194,
+ 22420, 22644, 22868, 23090, 23311, 23531, 23749, 23965, 24181, 24394,
+ 24606, 24816, 25024, 25231, 25435, 25638, 25839, 26037, 26234, 26428,
+ 26621, 26811, 26999, 27184, 27368, 27548, 27727, 27903, 28076, 28247,
+ 28415, 28581, 28743, 28903, 29061, 29215, 29367, 29515, 29661, 29804,
+ 29944, 30081, 30214, 30345, 30472, 30597, 30718, 30836, 30950, 31062,
+ 31170, 31274, 31376, 31474, 31568, 31659, 31747, 31831, 31911, 31988,
+ 32062, 32132, 32198, 32261, 32320, 32376, 32428, 32476, 32521, 32561,
+ 32599, 32632, 32662, 32688, 32711, 32729, 32744, 32755, 32763, 32767,
+ 32767, 32741, 32665, 32537, 32359, 32129, 31850, 31521, 31143, 30716,
+ 30242, 29720, 29151, 28538, 27879, 27177, 26433, 25647, 24821, 23957,
+ 23055, 22117, 21145, 20139, 19102, 18036, 16941, 15820, 14674, 13505,
+ 12315, 11106, 9879, 8637, 7381, 6114, 4838, 3554, 2264, 971};
+
+
+
+
+/* comparision optimization tables */
+/* definition of bad speech */
+static const UWord8 table_speech_bad[9] = {0, 0, 1, 1, 0, 0, 0, 1, 0};
+static const UWord8 table_SID[9] = {0, 0, 0, 0, 1, 1, 1, 0, 0};
+static const UWord8 table_DTX[9] = {0, 0, 0, 0, 1, 1, 1, 1, 0};
+static const UWord8 table_mute[9] = {0, 0, 0, 0, 1, 0, 1, 1, 0};
+
+/* track start positions for fixed codebook routines */
+static const Word8 startPos[16] =
+ {
+ 0,
+ 2,
+ 0,
+ 3,
+ 0,
+ 2,
+ 0,
+ 3,
+ 1,
+ 3,
+ 2,
+ 4,
+ 1,
+ 4,
+ 1,
+ 4
+ };
+
+
+
+
+
+
+
+#endif
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/sp_dec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/sp_dec.c
new file mode 100644
index 000000000..dae5a01cd
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/sp_dec.c
@@ -0,0 +1,6167 @@
+/*
+ * ===================================================================
+ * TS 26.104
+ * REL-5 V5.4.0 2004-03
+ * REL-6 V6.1.0 2004-03
+ * 3GPP AMR Floating-point Speech Codec
+ * ===================================================================
+ *
+ */
+
+/*
+ * sp_dec.c
+ *
+ *
+ * Project:
+ * AMR Floating-Point Codec
+ *
+ * Contains:
+ * This module contains all the functions needed decoding AMR
+ * encoder parameters to 16-bit speech samples
+ *
+ */
+/*
+ * include files
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <math.h>
+#include "sp_dec.h"
+#include "rom_dec.h"
+
+/*
+ * Declare structure types
+ */
+enum DTXStateType
+{
+ SPEECH = 0, DTX, DTX_MUTE
+};
+
+/*
+ * Decoder memory structure
+ */
+typedef struct
+{
+ /* history vector of past synthesis speech energy */
+ Word32 frameEnergyHist[L_ENERGYHIST];
+
+
+ /* state flags */
+ Word16 bgHangover; /* counter; number of frames after last speech frame */
+
+
+}Bgn_scdState;
+typedef struct
+{
+ Word32 hangCount; /* counter; */
+ /* history vector of past synthesis speech energy */
+ Word32 cbGainHistory[L_CBGAINHIST];
+ Word16 hangVar; /* counter; */
+
+}Cb_gain_averageState;
+typedef struct
+{
+ Word32 lsp_meanSave[M]; /* Averaged LSPs saved for efficiency */
+
+
+}lsp_avgState;
+typedef struct
+{
+ Word32 past_r_q[M]; /* Past quantized prediction error, Q15 */
+ Word32 past_lsf_q[M]; /* Past dequantized lsfs, Q15 */
+
+
+}D_plsfState;
+typedef struct
+{
+ Word32 pbuf[5];
+ Word32 past_gain_pit;
+ Word32 prev_gp;
+
+
+}ec_gain_pitchState;
+typedef struct
+{
+ Word32 gbuf[5];
+ Word32 past_gain_code;
+ Word32 prev_gc;
+
+
+}ec_gain_codeState;
+typedef struct
+{
+ /*
+ * normal MA predictor memory, Q10
+ * (contains 20*log10(quaErr))
+ */
+ Word32 past_qua_en[4];
+
+
+ /*
+ * MA predictor memory for MR122 mode, Q10
+ * (contains log2(quaErr))
+ */
+ Word32 past_qua_en_MR122[4];
+
+
+}gc_predState;
+typedef struct
+{
+ Word32 gainMem[PHDGAINMEMSIZE];
+ Word32 prevCbGain;
+ Word32 prevState;
+ Word16 lockFull;
+ Word16 onset;
+
+
+}ph_dispState;
+typedef struct
+{
+ enum DTXStateType dtxGlobalState; /* contains previous state */
+
+ Word32 log_en;
+ Word32 old_log_en;
+ Word32 pn_seed_rx;
+ Word32 lsp[M];
+ Word32 lsp_old[M];
+ Word32 lsf_hist[M * DTX_HIST_SIZE];
+ Word32 lsf_hist_mean[M * DTX_HIST_SIZE];
+ Word32 log_en_hist[DTX_HIST_SIZE];
+ Word32 true_sid_period_inv;
+ Word16 since_last_sid;
+ Word16 lsf_hist_ptr;
+ Word16 log_pg_mean;
+ Word16 log_en_hist_ptr;
+ Word16 log_en_adjust;
+ Word16 dtxHangoverCount;
+ Word16 decAnaElapsedCount;
+ Word16 sid_frame;
+ Word16 valid_data;
+ Word16 dtxHangoverAdded;
+
+
+ /* updated in main decoder */
+ Word16 data_updated; /* marker to know if CNI data is ever renewed */
+
+
+}dtx_decState;
+typedef struct
+{
+ Word32 past_gain;
+
+
+}agcState;
+typedef struct
+{
+ /* Excitation vector */
+ Word32 old_exc[L_SUBFR + PIT_MAX + L_INTERPOL];
+ Word32 *exc;
+ Word32 lsp_old[M];
+
+
+ /* Filter's memory */
+ Word32 mem_syn[M];
+
+
+ /* pitch sharpening */
+ Word32 sharp;
+ Word32 old_T0;
+
+
+ /* Variable holding received ltpLag, used in background noise and BFI */
+ Word32 T0_lagBuff;
+
+
+ /* Variables for the source characteristic detector (SCD) */
+ Word32 inBackgroundNoise;
+ Word32 voicedHangover;
+ Word32 ltpGainHistory[9];
+
+
+ /* Memories for bad frame handling */
+ Word32 excEnergyHist[9];
+ Word16 prev_bf;
+ Word16 prev_pdf;
+ Word16 state;
+ Word16 nodataSeed;
+
+
+ Bgn_scdState * background_state;
+ Cb_gain_averageState * Cb_gain_averState;
+ lsp_avgState * lsp_avg_st;
+ D_plsfState * lsfState;
+ ec_gain_pitchState * ec_gain_p_st;
+ ec_gain_codeState * ec_gain_c_st;
+ gc_predState * pred_state;
+ ph_dispState * ph_disp_st;
+ dtx_decState * dtxDecoderState;
+}Decoder_amrState;
+typedef struct
+{
+ Word32 res2[L_SUBFR];
+ Word32 mem_syn_pst[M];
+ Word32 synth_buf[M + L_FRAME];
+ Word32 preemph_state_mem_pre;
+ agcState * agc_state;
+}Post_FilterState;
+typedef struct
+{
+ Word32 y2_hi;
+ Word32 y2_lo;
+ Word32 y1_hi;
+ Word32 y1_lo;
+ Word32 x0;
+ Word32 x1;
+
+
+}Post_ProcessState;
+typedef struct
+{
+ Decoder_amrState * decoder_amrState;
+ Post_FilterState * post_state;
+ Post_ProcessState * postHP_state;
+}Speech_Decode_FrameState;
+
+
+/*
+ * CodAmrReset
+ *
+ *
+ * Parameters:
+ * state B: state structure
+ * mode I: AMR mode
+ *
+ * Function:
+ * Resets state memory
+ *
+ * Returns:
+ * void
+ */
+static void Decoder_amr_reset( Decoder_amrState *state, enum Mode mode )
+{
+ Word32 i;
+
+ /* Cb_gain_average_reset */
+ memset(state->Cb_gain_averState->cbGainHistory, 0, L_CBGAINHIST << 2);
+ state->Cb_gain_averState->hangVar = 0;
+ state->Cb_gain_averState->hangCount= 0;
+
+ /* Initialize static pointer */
+ state->exc = state->old_exc + PIT_MAX + L_INTERPOL;
+
+ /* Static vectors to zero */
+ memset( state->old_exc, 0, ( PIT_MAX + L_INTERPOL )<<2 );
+
+ if ( mode != MRDTX )
+ memset( state->mem_syn, 0, M <<2 );
+
+ /* initialize pitch sharpening */
+ state->sharp = SHARPMIN;
+ state->old_T0 = 40;
+
+ /* Initialize state->lsp_old [] */
+ if ( mode != MRDTX ) {
+ state->lsp_old[0] = 30000;
+ state->lsp_old[1] = 26000;
+ state->lsp_old[2] = 21000;
+ state->lsp_old[3] = 15000;
+ state->lsp_old[4] = 8000;
+ state->lsp_old[5] = 0;
+ state->lsp_old[6] = -8000;
+ state->lsp_old[7] = -15000;
+ state->lsp_old[8] = -21000;
+ state->lsp_old[9] = -26000;
+ }
+
+ /* Initialize memories of bad frame handling */
+ state->prev_bf = 0;
+ state->prev_pdf = 0;
+ state->state = 0;
+ state->T0_lagBuff = 40;
+ state->inBackgroundNoise = 0;
+ state->voicedHangover = 0;
+
+ if ( mode != MRDTX )
+ memset( state->excEnergyHist, 0, 9 <<2 );
+ memset( state->ltpGainHistory, 0, 9 <<2 );
+
+ if ( mode != MRDTX ) {
+ state->lsp_avg_st->lsp_meanSave[0] = 1384;
+ state->lsp_avg_st->lsp_meanSave[1] = 2077;
+ state->lsp_avg_st->lsp_meanSave[2] = 3420;
+ state->lsp_avg_st->lsp_meanSave[3] = 5108;
+ state->lsp_avg_st->lsp_meanSave[4] = 6742;
+ state->lsp_avg_st->lsp_meanSave[5] = 8122;
+ state->lsp_avg_st->lsp_meanSave[6] = 9863;
+ state->lsp_avg_st->lsp_meanSave[7] = 11092;
+ state->lsp_avg_st->lsp_meanSave[8] = 12714;
+ state->lsp_avg_st->lsp_meanSave[9] = 13701;
+ }
+ memset( state->lsfState->past_r_q, 0, M <<2 );
+
+ /* Past dequantized lsfs */
+ state->lsfState->past_lsf_q[0] = 1384;
+ state->lsfState->past_lsf_q[1] = 2077;
+ state->lsfState->past_lsf_q[2] = 3420;
+ state->lsfState->past_lsf_q[3] = 5108;
+ state->lsfState->past_lsf_q[4] = 6742;
+ state->lsfState->past_lsf_q[5] = 8122;
+ state->lsfState->past_lsf_q[6] = 9863;
+ state->lsfState->past_lsf_q[7] = 11092;
+ state->lsfState->past_lsf_q[8] = 12714;
+ state->lsfState->past_lsf_q[9] = 13701;
+
+ for ( i = 0; i < 5; i++ )
+ state->ec_gain_p_st->pbuf[i] = 1640;
+ state->ec_gain_p_st->past_gain_pit = 0;
+ state->ec_gain_p_st->prev_gp = 16384;
+
+ for ( i = 0; i < 5; i++ )
+ state->ec_gain_c_st->gbuf[i] = 1;
+ state->ec_gain_c_st->past_gain_code = 0;
+ state->ec_gain_c_st->prev_gc = 1;
+
+ if ( mode != MRDTX ) {
+ for ( i = 0; i < NPRED; i++ ) {
+ state->pred_state->past_qua_en[i] = MIN_ENERGY;
+ state->pred_state->past_qua_en_MR122[i] = MIN_ENERGY_MR122;
+ }
+ }
+ state->nodataSeed = 21845;
+
+ /* Static vectors to zero */
+ memset( state->background_state->frameEnergyHist, 0, L_ENERGYHIST <<2 );
+
+ /* Initialize hangover handling */
+ state->background_state->bgHangover = 0;
+
+ /* phDispReset */
+ memset( state->ph_disp_st->gainMem, 0, PHDGAINMEMSIZE <<2 );
+ state->ph_disp_st->prevState = 0;
+ state->ph_disp_st->prevCbGain = 0;
+ state->ph_disp_st->lockFull = 0;
+ state->ph_disp_st->onset = 0; /* assume no onset in start */
+
+ if ( mode != MRDTX ) {
+ state->dtxDecoderState->since_last_sid = 0;
+ state->dtxDecoderState->true_sid_period_inv = 8192;
+ state->dtxDecoderState->log_en = 3500;
+ state->dtxDecoderState->old_log_en = 3500;
+
+ /* low level noise for better performance in DTX handover cases*/
+ state->dtxDecoderState->pn_seed_rx = PN_INITIAL_SEED;
+
+ /* Initialize state->lsp [] */
+ state->dtxDecoderState->lsp[0] = 30000;
+ state->dtxDecoderState->lsp[1] = 26000;
+ state->dtxDecoderState->lsp[2] = 21000;
+ state->dtxDecoderState->lsp[3] = 15000;
+ state->dtxDecoderState->lsp[4] = 8000;
+ state->dtxDecoderState->lsp[5] = 0;
+ state->dtxDecoderState->lsp[6] = -8000;
+ state->dtxDecoderState->lsp[7] = -15000;
+ state->dtxDecoderState->lsp[8] = -21000;
+ state->dtxDecoderState->lsp[9] = -26000;
+
+ /* Initialize state->lsp_old [] */
+ state->dtxDecoderState->lsp_old[0] = 30000;
+ state->dtxDecoderState->lsp_old[1] = 26000;
+ state->dtxDecoderState->lsp_old[2] = 21000;
+ state->dtxDecoderState->lsp_old[3] = 15000;
+ state->dtxDecoderState->lsp_old[4] = 8000;
+ state->dtxDecoderState->lsp_old[5] = 0;
+ state->dtxDecoderState->lsp_old[6] = -8000;
+ state->dtxDecoderState->lsp_old[7] = -15000;
+ state->dtxDecoderState->lsp_old[8] = -21000;
+ state->dtxDecoderState->lsp_old[9] = -26000;
+ state->dtxDecoderState->lsf_hist_ptr = 0;
+ state->dtxDecoderState->log_pg_mean = 0;
+ state->dtxDecoderState->log_en_hist_ptr = 0;
+
+ /* initialize decoder lsf history */
+ state->dtxDecoderState->lsf_hist[0] = 1384;
+ state->dtxDecoderState->lsf_hist[1] = 2077;
+ state->dtxDecoderState->lsf_hist[2] = 3420;
+ state->dtxDecoderState->lsf_hist[3] = 5108;
+ state->dtxDecoderState->lsf_hist[4] = 6742;
+ state->dtxDecoderState->lsf_hist[5] = 8122;
+ state->dtxDecoderState->lsf_hist[6] = 9863;
+ state->dtxDecoderState->lsf_hist[7] = 11092;
+ state->dtxDecoderState->lsf_hist[8] = 12714;
+ state->dtxDecoderState->lsf_hist[9] = 13701;
+
+ for ( i = 1; i < DTX_HIST_SIZE; i++ ) {
+ memcpy( &state->dtxDecoderState->lsf_hist[M * i], &state->
+ dtxDecoderState->lsf_hist[0], M <<2 );
+ }
+ memset( state->dtxDecoderState->lsf_hist_mean, 0, M * DTX_HIST_SIZE <<2 );
+
+ /* initialize decoder log frame energy */
+ for ( i = 0; i < DTX_HIST_SIZE; i++ ) {
+ state->dtxDecoderState->log_en_hist[i] = state->dtxDecoderState->log_en
+ ;
+ }
+ state->dtxDecoderState->log_en_adjust = 0;
+ state->dtxDecoderState->dtxHangoverCount = DTX_HANG_CONST;
+ state->dtxDecoderState->decAnaElapsedCount = 31;
+ state->dtxDecoderState->sid_frame = 0;
+ state->dtxDecoderState->valid_data = 0;
+ state->dtxDecoderState->dtxHangoverAdded = 0;
+ state->dtxDecoderState->dtxGlobalState = DTX;
+ state->dtxDecoderState->data_updated = 0;
+ }
+ return;
+}
+
+
+/*
+ * rx_dtx_handler
+ *
+ *
+ * Parameters:
+ * st->dtxGlobalState I: DTX state
+ * st->since_last_sid B: Frames after last SID frame
+ * st->data_updated I: SID update flag
+ * st->decAnaElapsedCount B: state machine that synch with the GSMEFR txDtx machine
+ * st->dtxHangoverAdded B: DTX hangover
+ * st->sid_frame O: SID frame indicator
+ * st->valid_data O: Vaild data indicator
+ * frame_type O: Frame type
+ *
+ * Function:
+ * Find the new DTX state
+ *
+ * Returns:
+ * DTXStateType DTX, DTX_MUTE or SPEECH
+ */
+static enum DTXStateType rx_dtx_handler( dtx_decState *st, enum RXFrameType frame_type )
+{
+ enum DTXStateType newState;
+ enum DTXStateType encState;
+
+ /* DTX if SID frame or previously in DTX{_MUTE} and (NO_RX OR BAD_SPEECH) */
+ if ( table_SID[frame_type] | ( ( st->dtxGlobalState != SPEECH ) &
+ table_speech_bad[frame_type] ) ) {
+ newState = DTX;
+
+ /* stay in mute for these input types */
+ if ( ( st->dtxGlobalState == DTX_MUTE ) & table_mute[frame_type] ) {
+ newState = DTX_MUTE;
+ }
+
+ /*
+ * evaluate if noise parameters are too old
+ * since_last_sid is reset when CN parameters have been updated
+ */
+ st->since_last_sid += 1;
+
+ /* no update of sid parameters in DTX for a long while */
+ if ((frame_type != RX_SID_UPDATE) & ( st->since_last_sid > DTX_MAX_EMPTY_THRESH )) {
+ newState = DTX_MUTE;
+ }
+ }
+ else {
+ newState = SPEECH;
+ st->since_last_sid = 0;
+ }
+
+ /*
+ * reset the decAnaElapsed Counter when receiving CNI data the first
+ * time, to robustify counter missmatch after handover
+ * this might delay the bwd CNI analysis in the new decoder slightly.
+ */
+ if ( ( st->data_updated == 0 ) & ( frame_type == RX_SID_UPDATE ) ) {
+ st->decAnaElapsedCount = 0;
+ }
+
+ /*
+ * update the SPE-SPD DTX hangover synchronization
+ * to know when SPE has added dtx hangover
+ */
+ st->decAnaElapsedCount += 1;
+ st->dtxHangoverAdded = 0;
+ encState = SPEECH;
+
+ if ( table_DTX[frame_type] ) {
+ encState = DTX;
+ if( ( frame_type == RX_NO_DATA ) & ( newState == SPEECH ) ) {
+ encState = SPEECH;
+ }
+ }
+
+ if ( encState == SPEECH ) {
+ st->dtxHangoverCount = DTX_HANG_CONST;
+ }
+ else {
+ if ( st->decAnaElapsedCount > DTX_ELAPSED_FRAMES_THRESH ) {
+ st->dtxHangoverAdded = 1;
+ st->decAnaElapsedCount = 0;
+ st->dtxHangoverCount = 0;
+ }
+ else if ( st->dtxHangoverCount == 0 ) {
+ st->decAnaElapsedCount = 0;
+ }
+ else {
+ st->dtxHangoverCount -= 1;
+ }
+ }
+
+ if ( newState != SPEECH ) {
+ /*
+ * DTX or DTX_MUTE
+ * CN data is not in a first SID, first SIDs are marked as SID_BAD
+ * but will do backwards analysis if a hangover period has been added
+ * according to the state machine above
+ */
+ st->sid_frame = 0;
+ st->valid_data = 0;
+
+ if ( frame_type == RX_SID_FIRST ) {
+ st->sid_frame = 1;
+ }
+ else if ( frame_type == RX_SID_UPDATE ) {
+ st->sid_frame = 1;
+ st->valid_data = 1;
+ }
+ else if ( frame_type == RX_SID_BAD ) {
+ st->sid_frame = 1;
+
+ /* use old data */
+ st->dtxHangoverAdded = 0;
+ }
+ }
+
+ /* newState is used by both SPEECH AND DTX synthesis routines */
+ return newState;
+}
+
+
+/*
+ * Lsf_lsp
+ *
+ *
+ * Parameters:
+ * lsf I: vector of LSFs
+ * lsp O: vector of LSPs
+ *
+ * Function:
+ * Transformation lsf to lsp, order M
+ *
+ * Returns:
+ * void
+ */
+static void Lsf_lsp( Word32 lsf[], Word32 lsp[] )
+{
+ Word32 i, ind, offset, tmp;
+
+
+ for ( i = 0; i < M; i++ ) {
+ /* ind = b8-b15 of lsf[i] */
+ ind = lsf[i] >> 8;
+
+ /* offset = b0-b7 of lsf[i] */
+ offset = lsf[i] & 0x00ff;
+
+ /* lsp[i] = table[ind]+ ((table[ind+1]-table[ind])*offset) / 256 */
+ tmp = ( ( cos_table[ind+1]-cos_table[ind] )*offset ) << 1;
+ lsp[i] = cos_table[ind] + ( tmp >> 9 );
+ }
+ return;
+}
+
+
+/*
+ * D_plsf_3
+ *
+ *
+ * Parameters:
+ * st->past_lsf_q I: Past dequantized LFSs
+ * st->past_r_q B: past quantized residual
+ * mode I: AMR mode
+ * bfi B: bad frame indicator
+ * indice I: quantization indices of 3 submatrices, Q0
+ * lsp1_q O: quantized 1st LSP vector
+ *
+ * Function:
+ * Decodes the LSP parameters using the received quantization indices.
+ * 1st order MA prediction and split by 3 vector quantization (split-VQ)
+ *
+ * Returns:
+ * void
+ */
+static void D_plsf_3( D_plsfState *st, enum Mode mode, Word16 bfi, Word16 *
+ indice, Word32 *lsp1_q )
+{
+ Word32 lsf1_r[M], lsf1_q[M];
+ Word32 i, index, temp;
+ const Word32 *p_cb1, *p_cb2, *p_cb3, *p_dico;
+
+
+ /* if bad frame */
+ if ( bfi != 0 ) {
+ /* use the past LSFs slightly shifted towards their mean */
+ for ( i = 0; i < M; i++ ) {
+ /* lsfi_q[i] = ALPHA*past_lsf_q[i] + ONE_ALPHA*meanLsf[i]; */
+ lsf1_q[i] = ( ( st->past_lsf_q[i] * ALPHA ) >> 15 ) + ( ( mean_lsf_3[i]
+ * ONE_ALPHA ) >> 15 );
+ }
+
+ /* estimate past quantized residual to be used in next frame */
+ if ( mode != MRDTX ) {
+ for ( i = 0; i < M; i++ ) {
+ /* temp = meanLsf[i] + pastR2_q[i] * pred_fac; */
+ temp = mean_lsf_3[i] + ( ( st->past_r_q[i] * pred_fac[i] ) >> 15 );
+ st->past_r_q[i] = lsf1_q[i] - temp;
+ }
+ }
+ else {
+ for ( i = 0; i < M; i++ ) {
+ /* temp = meanLsf[i] + pastR2_q[i]; */
+ temp = mean_lsf_3[i] + st->past_r_q[i];
+ st->past_r_q[i] = lsf1_q[i] - temp;
+ }
+ }
+ }
+
+ /* if good LSFs received */
+ else {
+ if ( ( mode == MR475 ) | ( mode == MR515 ) ) {
+ /* MR475, MR515 */
+ p_cb1 = dico1_lsf_3;
+ p_cb2 = dico2_lsf_3;
+ p_cb3 = mr515_3_lsf;
+ }
+ else if ( mode == MR795 ) {
+ /* MR795 */
+ p_cb1 = mr795_1_lsf;
+ p_cb2 = dico2_lsf_3;
+ p_cb3 = dico3_lsf_3;
+ }
+ else {
+ /* MR59, MR67, MR74, MR102, MRDTX */
+ p_cb1 = dico1_lsf_3;
+ p_cb2 = dico2_lsf_3;
+ p_cb3 = dico3_lsf_3;
+ }
+
+ /* decode prediction residuals from 3 received indices */
+ index = *indice++;
+ p_dico = &p_cb1[index + index + index];
+ index = *indice++;
+ lsf1_r[0] = *p_dico++;
+ lsf1_r[1] = *p_dico++;
+ lsf1_r[2] = *p_dico++;
+
+ if ( ( mode == MR475 ) | ( mode == MR515 ) ) {
+ /* MR475, MR515 only using every second entry */
+ index = index << 1;
+ }
+ p_dico = &p_cb2[index + index + index];
+ index = *indice++;
+ lsf1_r[3] = *p_dico++;
+ lsf1_r[4] = *p_dico++;
+ lsf1_r[5] = *p_dico++;
+ p_dico = &p_cb3[index << 2];
+ lsf1_r[6] = *p_dico++;
+ lsf1_r[7] = *p_dico++;
+ lsf1_r[8] = *p_dico++;
+ lsf1_r[9] = *p_dico++;
+
+ /* Compute quantized LSFs and update the past quantized residual */
+ if ( mode != MRDTX ) {
+ for ( i = 0; i < M; i++ ) {
+ lsf1_q[i] = lsf1_r[i] + ( mean_lsf_3[i] + ( ( st->past_r_q[i] *
+ pred_fac[i] ) >> 15 ) );
+ }
+ memcpy( st->past_r_q, lsf1_r, M <<2 );
+ }
+ else {
+ for ( i = 0; i < M; i++ ) {
+ lsf1_q[i] = lsf1_r[i] + ( mean_lsf_3[i] + st->past_r_q[i] );
+ }
+ memcpy( st->past_r_q, lsf1_r, M <<2 );
+ }
+ }
+
+ /* verification that LSFs has minimum distance of LSF_GAP Hz */
+ temp = LSF_GAP;
+
+ for ( i = 0; i < M; i++ ) {
+ if ( lsf1_q[i] < temp ) {
+ lsf1_q[i] = temp;
+ }
+ temp = lsf1_q[i] + LSF_GAP;
+ }
+ memcpy( st->past_lsf_q, lsf1_q, M <<2 );
+
+ /* convert LSFs to the cosine domain */
+ Lsf_lsp( lsf1_q, lsp1_q );
+ return;
+}
+
+
+/*
+ * pseudonoise
+ *
+ *
+ * Parameters:
+ * shift_reg B: Old CN generator shift register state
+ * no_bits I: Number of bits
+ *
+ * Function:
+ * pseudonoise
+ *
+ * Returns:
+ * noise_bits
+ */
+static Word32 pseudonoise( Word32 *shift_reg, Word32 no_bits )
+{
+ Word32 noise_bits, Sn, i;
+ Word32 s_reg;
+
+
+ s_reg = *shift_reg;
+ noise_bits = 0;
+
+ for ( i = 0; i < no_bits; i++ ) {
+ /* State n == 31 */
+ Sn = s_reg & 0x00000001L;
+
+ /* State n == 3 */
+ if ( s_reg & 0x10000000L ) {
+ Sn = Sn ^ 0x1L;
+ }
+ else {
+ Sn = Sn ^ 0x0L;
+ }
+ noise_bits = ( noise_bits << 1 ) | ( s_reg & 1 );
+ s_reg = s_reg >> 1;
+
+ if ( Sn & 1 ) {
+ s_reg = s_reg | 0x40000000L;
+ }
+ }
+ *shift_reg = s_reg;
+ return noise_bits;
+}
+
+
+/*
+ * Lsp_lsf
+ *
+ *
+ * Parameters:
+ * lsp I: LSP vector (range: -1<=val<1)
+ * lsf O: LSF vector Old CN generator shift register state
+ *
+ * Function:
+ * Transformation lsp to lsf, LPC order M
+ * lsf[i] = arccos(lsp[i])/(2*pi)
+ *
+ * Returns:
+ * void
+ */
+static void Lsp_lsf( Word32 lsp[], Word32 lsf[] )
+{
+ Word32 i, ind = 63; /* begin at end of table -1 */
+
+
+ for ( i = M - 1; i >= 0; i-- ) {
+ /* find value in table that is just greater than lsp[i] */
+ while ( cos_table[ind] < lsp[i] ) {
+ ind--;
+ }
+ lsf[i] = ( ( ( ( lsp[i] - cos_table[ind] ) * acos_slope[ind] ) + 0x800 )
+ >> 12 ) + ( ind << 8 );
+ }
+ return;
+}
+
+
+/*
+ * Reorder_lsf
+ *
+ *
+ * Parameters:
+ * lsf B: vector of LSFs (range: 0<=val<=0.5)
+ * min_dist I: minimum required distance
+ *
+ * Function:
+ * Make sure that the LSFs are properly ordered and to keep a certain minimum
+ * distance between adjacent LSFs. LPC order = M.
+ *
+ * Returns:
+ * void
+ */
+static void Reorder_lsf( Word32 *lsf, Word32 min_dist )
+{
+ Word32 lsf_min, i;
+
+
+ lsf_min = min_dist;
+
+ for ( i = 0; i < M; i++ ) {
+ if ( lsf[i] < lsf_min ) {
+ lsf[i] = lsf_min;
+ }
+ lsf_min = lsf[i] + min_dist;
+ }
+}
+
+/* VC5.0 Global optimization does not work with this function */
+#if _MSC_VER == 1100
+#pragma optimize( "g", off )
+#endif
+/*
+ * Get_lsp_pol
+ *
+ *
+ * Parameters:
+ * lsp I: line spectral frequencies
+ * f O: polynomial F1(z) or F2(z)
+ *
+ * Function:
+ * Find the polynomial F1(z) or F2(z) from the LSPs.
+ *
+ * F1(z) = product ( 1 - 2 lsp[i] z^-1 + z^-2 )
+ * i=0,2,4,6,8
+ * F2(z) = product ( 1 - 2 lsp[i] z^-1 + z^-2 )
+ * i=1,3,5,7,9
+ *
+ * where lsp[] is the LSP vector in the cosine domain.
+ *
+ * The expansion is performed using the following recursion:
+ *
+ * f[0] = 1
+ * b = -2.0 * lsp[0]
+ * f[1] = b
+ * for i=2 to 5 do
+ * b = -2.0 * lsp[2*i-2];
+ * f[i] = b*f[i-1] + 2.0*f[i-2];
+ * for j=i-1 down to 2 do
+ * f[j] = f[j] + b*f[j-1] + f[j-2];
+ * f[1] = f[1] + b;
+ *
+ * Returns:
+ * void
+ */
+static void Get_lsp_pol( Word32 *lsp, Word32 *f )
+{
+ volatile Word32 f0, f1, f2, f3, f4, f5;
+ Word32 l1, l2, l3, l4;
+
+
+ /* f[0] = 1.0; */
+ f0 = 16777216L;
+
+ /* f1 = *lsp * -1024; */
+ f1 = -lsp[0] << 10;
+ l1 = lsp[2];
+ l2 = lsp[4];
+ l3 = lsp[6];
+ l4 = lsp[8];
+ f2 = f0 << 1;
+ f2 -= ( ( ( f1 >> 16 ) * l1 ) + ( ( ( f1 & 0xFFFE ) * l1 ) >> 16 ) ) << 2;
+ f1 -= l1 << 10;
+ f3 = f1 << 1;
+ f3 -= ( ( ( f2 >> 16 ) * l2 ) + ( ( ( f2 & 0xFFFE ) * l2 ) >> 16 ) ) << 2;
+ f2 += f0;
+ f2 -= ( ( ( f1 >> 16 ) * l2 ) + ( ( ( f1 & 0xFFFE ) * l2 ) >> 16 ) ) << 2;
+ f1 -= l2 << 10;
+ f4 = f2 << 1;
+ f4 -= ( ( ( f3 >> 16 ) * l3 ) + ( ( ( f3 & 0xFFFE ) * l3 ) >> 16 ) ) << 2;
+ f3 += f1;
+ f3 -= ( ( ( f2 >> 16 ) * l3 ) + ( ( ( f2 & 0xFFFE ) * l3 ) >> 16 ) ) << 2;
+ f2 += f0;
+ f2 -= ( ( ( f1 >> 16 ) * l3 ) + ( ( ( f1 & 0xFFFE ) * l3 ) >> 16 ) ) << 2;
+ f1 -= l3 << 10;
+ f5 = f3 << 1;
+ f5 -= ( ( ( f4 >> 16 ) * l4 ) + ( ( ( f4 & 0xFFFE ) * l4 ) >> 16 ) ) << 2;
+ f4 += f2;
+ f4 -= ( ( ( f3 >> 16 ) * l4 ) + ( ( ( f3 & 0xFFFE ) * l4 ) >> 16 ) ) << 2;
+ f3 += f1;
+ f3 -= ( ( ( f2 >> 16 ) * l4 ) + ( ( ( f2 & 0xFFFE ) * l4 ) >> 16 ) ) << 2;
+ f2 += f0;
+ f2 -= ( ( ( f1 >> 16 ) * l4 ) + ( ( ( f1 & 0xFFFE ) * l4 ) >> 16 ) ) << 2;
+ f1 -= l4 << 10;
+ f[0] = f0;
+ f[1] = f1;
+ f[2] = f2;
+ f[3] = f3;
+ f[4] = f4;
+ f[5] = f5;
+ return;
+}
+#if _MSC_VER == 1100
+#pragma optimize( "", on )
+#endif
+
+
+/*
+ * Lsp_Az
+ *
+ *
+ * Parameters:
+ * lsp I: Line spectral frequencies
+ * a O: Predictor coefficients
+ *
+ * Function:
+ * Converts from the line spectral pairs (LSP) to LP coefficients,
+ * for a 10th order filter.
+ *
+ * Find the coefficients of F1(z) and F2(z)
+ * Multiply F1(z) by 1+z^{-1} and F2(z) by 1-z^{-1}
+ * A(z) = ( F1(z) + F2(z) ) / 2
+ *
+ * Returns:
+ * void
+ */
+static void Lsp_Az( Word32 lsp[], Word32 a[] )
+{
+ Word32 f1[6], f2[6];
+ Word32 T0, i, j;
+
+
+ Get_lsp_pol( &lsp[0], f1 );
+ Get_lsp_pol( &lsp[1], f2 );
+
+ for ( i = 5; i > 0; i-- ) {
+ f1[i] += f1[i - 1];
+ f2[i] -= f2[i - 1];
+ }
+ a[0] = 4096;
+
+ for ( i = 1, j = 10; i <= 5; i++, j-- ) {
+ T0 = f1[i] + f2[i];
+ a[i] = (Word16)(T0 >> 13); /* emulate fixed point bug */
+ if ( ( T0 & 4096 ) != 0 ) {
+ a[i]++;
+ }
+ T0 = f1[i] - f2[i];
+ a[j] = (Word16)(T0 >> 13); /* emulate fixed point bug */
+
+ if ( ( T0 & 4096 ) != 0 ) {
+ a[j]++;
+ }
+ }
+ return;
+}
+
+
+/*
+ * A_Refl
+ *
+ *
+ * Parameters:
+ * a I: Directform coefficients
+ * refl O: Reflection coefficients
+ *
+ * Function:
+ * Converts from the directform coefficients to reflection coefficients
+ *
+ * Returns:
+ * void
+ */
+static void A_Refl( Word32 a[], Word32 refl[] )
+{
+ /* local variables */
+ int normShift;
+ Word32 aState[M], bState[M];
+ Word32 normProd, acc, temp, mult, scale, i, j;
+
+
+ /* initialize states */
+ memcpy( aState, a, M <<2 );
+
+ /* backward Levinson recursion */
+ for ( i = M - 1; i >= 0; i-- ) {
+ if ( labs( aState[i] ) >= 4096 ) {
+ goto ExitRefl;
+ }
+ refl[i] = aState[i] << 3;
+ temp = ( refl[i] * refl[i] ) << 1;
+ acc = ( MAX_32 - temp );
+ normShift=0;
+ if (acc != 0){
+ temp = acc;
+ while (!(temp & 0x40000000))
+ {
+ normShift++;
+ temp = temp << 1;
+ }
+ }
+ else{
+ normShift = 0;
+ }
+ scale = 15 - normShift;
+ acc = ( acc << normShift );
+ temp = ( acc + ( Word32 )0x00008000L );
+
+ if ( temp > 0 ) {
+ normProd = temp >> 16;
+ mult = 0x20000000L / normProd;
+ }
+ else
+ mult = 16384;
+
+ for ( j = 0; j < i; j++ ) {
+ acc = aState[j] << 16;
+ acc -= ( refl[i] * aState[i - j - 1] ) << 1;
+ temp = ( acc + ( Word32 )0x00008000L ) >> 16;
+ temp = ( mult * temp ) << 1;
+
+ if ( scale > 0 ) {
+ if ( ( temp & ( ( Word32 )1 << ( scale - 1 ) ) ) != 0 ) {
+ temp = ( temp >> scale ) + 1;
+ }
+ else
+ temp = ( temp >> scale );
+ }
+ else
+ temp = ( temp >> scale );
+
+ if ( labs( temp ) > 32767 ) {
+ goto ExitRefl;
+ }
+ bState[j] = temp;
+ }
+ memcpy( aState, bState, i <<2 );
+ }
+ return;
+ExitRefl:
+ memset( refl, 0, M <<2 );
+}
+
+
+/*
+ * Log2_norm
+ *
+ *
+ * Parameters:
+ * x I: input value
+ * exp I: exponent
+ * exponent O: Integer part of Log2. (range: 0<=val<=30)
+ * fraction O: Fractional part of Log2. (range: 0<=val<1)
+ *
+ * Function:
+ * Computes log2
+ *
+ * Computes log2(L_x, exp), where L_x is positive and
+ * normalized, and exp is the normalisation exponent
+ * If L_x is negative or zero, the result is 0.
+ *
+ * The function Log2(L_x) is approximated by a table and linear
+ * interpolation. The following steps are used to compute Log2(L_x)
+ *
+ * exponent = 30-normExponent
+ * i = bit25-b31 of L_x; 32<=i<=63 (because of normalization).
+ * a = bit10-b24
+ * i -=32
+ * fraction = table[i]<<16 - (table[i] - table[i+1]) * a * 2
+ *
+ * Returns:
+ * void
+ */
+static void Log2_norm( Word32 x, Word32 exp, Word32 *exponent, Word32 *
+ fraction )
+{
+ Word32 y, i, a;
+
+
+ if ( x <= 0 ) {
+ *exponent = 0;
+ *fraction = 0;
+ return;
+ }
+
+ /* Extract b25-b31 */
+ i = x >> 25;
+ i = i - 32;
+
+ /* Extract b10-b24 of fraction */
+ a = x >> 9;
+ a = a & 0xFFFE; /* 2a */
+
+ /* fraction */
+ y = ( log2_table[i] << 16 ) - a * ( log2_table[i] - log2_table[i + 1] );
+ *fraction = y >> 16;
+ *exponent = 30 - exp;
+ return;
+}
+
+
+/*
+ * Log2
+ *
+ *
+ * Parameters:
+ * x I: input value
+ * exponent O: Integer part of Log2. (range: 0<=val<=30)
+ * fraction O: Fractional part of Log2. (range: 0<=val<1)
+ *
+ * Function:
+ * Computes log2(L_x)
+ * If x is negative or zero, the result is 0.
+ *
+ * Returns:
+ * void
+ */
+static void Log2( Word32 x, Word32 *exponent, Word32 *fraction )
+{
+ int tmp, exp=0;
+
+ if (x != 0){
+ tmp = x;
+ while (!((tmp & 0x80000000) ^ ((tmp & 0x40000000) << 1)))
+ {
+ exp++;
+ tmp = tmp << 1;
+ }
+ }
+ Log2_norm( x <<exp, exp, exponent, fraction );
+}
+
+
+/*
+ * Pow2
+ *
+ *
+ * Parameters:
+ * exponent I: Integer part. (range: 0<=val<=30)
+ * fraction O: Fractional part. (range: 0.0<=val<1.0)
+ *
+ * Function:
+ * pow(2.0, exponent.fraction)
+ *
+ * The function Pow2(L_x) is approximated by a table and linear interpolation.
+ *
+ * i = bit10-b15 of fraction, 0 <= i <= 31
+ * a = biT0-b9 of fraction
+ * x = table[i]<<16 - (table[i] - table[i+1]) * a * 2
+ * x = L_x >> (30-exponent) (with rounding)
+ *
+ * Returns:
+ * result (range: 0<=val<=0x7fffffff)
+ */
+static Word32 Pow2( Word32 exponent, Word32 fraction )
+{
+ Word32 i, a, tmp, x, exp;
+
+
+ /* Extract b10-b16 of fraction */
+ i = fraction >> 10;
+
+ /* Extract b0-b9 of fraction */
+ a = ( fraction << 5 ) & 0x7fff;
+
+ /* table[i] << 16 */
+ x = pow2_table[i] << 16;
+
+ /* table[i] - table[i+1] */
+ tmp = pow2_table[i] - pow2_table[i + 1];
+
+ /* L_x -= tmp*a*2 */
+ x -= ( tmp * a ) << 1;
+
+ if ( exponent >= -1 ) {
+ exp = ( 30 - exponent );
+
+ /* Rounding */
+ if ( ( x & ( ( Word32 )1 << ( exp - 1 ) ) ) != 0 ) {
+ x = ( x >> exp ) + 1;
+ }
+ else
+ x = x >> exp;
+ }
+ else
+ x = 0;
+ return( x );
+}
+
+
+/*
+ * Build_CN_code
+ *
+ *
+ * Parameters:
+ * seed B: Old CN generator shift register state
+ * cod O: Generated CN fixed codebook vector
+ *
+ * Function:
+ * Generate CN fixed codebook vector
+ *
+ * Returns:
+ * void
+ */
+static void Build_CN_code( Word32 *seed, Word32 cod[] )
+{
+ Word32 i, j, k;
+
+
+ memset( cod, 0, L_SUBFR <<2 );
+
+ for ( k = 0; k < 10; k++ ) {
+ i = pseudonoise( seed, 2 ); /* generate pulse position */
+ i = ( i * 20 ) >> 1;
+ i = ( i + k );
+ j = pseudonoise( seed, 1 ); /* generate sign */
+
+ if ( j > 0 ) {
+ cod[i] = 4096;
+ }
+ else {
+ cod[i] = -4096;
+ }
+ }
+ return;
+}
+
+
+/*
+ * Build_CN_param
+ *
+ *
+ * Parameters:
+ * seed B: Old CN generator shift register state
+ * nParam I: number of params
+ * paramSizeTable I: size of params
+ * parm O: CN Generated params
+ *
+ * Function:
+ * Generate parameters for comfort noise generation
+ *
+ * Returns:
+ * void
+ */
+static void Build_CN_param( Word16 *seed, enum Mode mode, Word16 parm[] )
+{
+ Word32 i;
+ const Word32 *p;
+
+
+ *seed = ( Word16 )( ( *seed * 31821 ) + 13849L );
+ p = &window_200_40[ * seed & 0x7F];
+
+ switch ( mode ) {
+ case MR122:
+ for ( i = 0; i < PRMNO_MR122; i++ ) {
+ parm[i] = ( Word16 )( *p++ & ~( 0xFFFF << bitno_MR122[i] ) );
+ }
+ break;
+
+ case MR102:
+ for ( i = 0; i < PRMNO_MR102; i++ ) {
+ parm[i] = ( Word16 )( *p++ & ~( 0xFFFF << bitno_MR102[i] ) );
+ }
+ break;
+
+ case MR795:
+ for ( i = 0; i < PRMNO_MR795; i++ ) {
+ parm[i] = ( Word16 )( *p++ & ~( 0xFFFF << bitno_MR795[i] ) );
+ }
+ break;
+
+ case MR74:
+ for ( i = 0; i < PRMNO_MR74; i++ ) {
+ parm[i] = ( Word16 )( *p++ & ~( 0xFFFF << bitno_MR74[i] ) );
+ }
+ break;
+
+ case MR67:
+ for ( i = 0; i < PRMNO_MR67; i++ ) {
+ parm[i] = ( Word16 )( *p++ & ~( 0xFFFF << bitno_MR67[i] ) );
+ }
+ break;
+
+ case MR59:
+ for ( i = 0; i < PRMNO_MR59; i++ ) {
+ parm[i] = ( Word16 )( *p++ & ~( 0xFFFF << bitno_MR59[i] ) );
+ }
+ break;
+
+ case MR515:
+ for ( i = 0; i < PRMNO_MR515; i++ ) {
+ parm[i] = ( Word16 )( *p++ & ~( 0xFFFF << bitno_MR515[i] ) );
+ }
+ break;
+
+ case MR475:
+ for ( i = 0; i < PRMNO_MR475; i++ ) {
+ parm[i] = ( Word16 )( *p++ & ~( 0xFFFF << bitno_MR475[i] ) );
+ }
+ break;
+ }
+}
+
+
+/*
+ * Syn_filt
+ *
+ *
+ * Parameters:
+ * a I: prediction coefficients [M+1]
+ * x I: input signal
+ * y O: output signal
+ * lg I: size of filtering
+ * mem B: memory associated with this filtering
+ * update I: 0=no update, 1=update of memory.
+ *
+ * Function:
+ * Perform synthesis filtering through 1/A(z).
+ *
+ * Returns:
+ * void
+ */
+static Word32 Syn_filt( Word32 a[], Word32 x[], Word32 y[], Word32 lg, Word32 mem[]
+ , Word32 update )
+{
+ Word32 tmp[50]; /* malloc is slow */
+ Word32 s, a0, overflow = 0;
+ Word32 *yy, *yy_limit;
+
+
+ /* Copy mem[] to yy[] */
+ memcpy( tmp, mem, 40 );
+ yy = tmp + M;
+ yy_limit = yy + lg;
+ a0 = a[0];
+
+ /* Do the filtering. */
+ while ( yy < yy_limit ) {
+
+ s = *x++ * a0;
+ s -= yy[-1] * a[1];
+ s -= yy[-2] * a[2];
+ s -= yy[-3] * a[3];
+ s -= yy[-4] * a[4];
+ s -= yy[-5] * a[5];
+ s -= yy[-6] * a[6];
+ s -= yy[-7] * a[7];
+ s -= yy[-8] * a[8];
+ s -= yy[-9] * a[9];
+ s -= yy[-10] * a[10];
+ if ( labs( s ) < 0x7ffffff )
+ *yy = ( s + 0x800L ) >> 12;
+ else if ( s > 0 ) {
+ *yy = 32767;
+ overflow = 1;
+ }
+ else {
+ *yy = -32768;
+ overflow = 1;
+ }
+ yy++;
+ }
+ memcpy( y, &tmp[M], lg <<2 );
+
+ /* Update of memory if update==1 */
+ if ( update ) {
+ memcpy( mem, &y[lg - M], 40 );
+ }
+ return overflow;
+}
+
+/*
+ * Syn_filt_overflow
+ *
+ *
+ * Parameters:
+ * a I: prediction coefficients [M+1]
+ * x I: input signal
+ * y O: output signal
+ * lg I: size of filtering
+ * mem B: memory associated with this filtering
+ * update I: 0=no update, 1=update of memory.
+ *
+ * Function:
+ * Perform synthesis filtering through 1/A(z).
+ * Saturate after every multiplication.
+ * Returns:
+ * void
+ */
+static void Syn_filt_overflow( Word32 a[], Word32 x[], Word32 y[], Word32 lg, Word32 mem[]
+ , Word32 update )
+{
+ Word32 tmp[50]; /* malloc is slow */
+ Word32 i, j, s, a0;
+ Word32 *yy;
+
+
+ /* Copy mem[] to yy[] */
+ memcpy( tmp, mem, 40 );
+ yy = tmp + M;
+ a0 = a[0];
+
+ /* Do the filtering. */
+ for ( i = 0; i < lg; i++ ) {
+ s = x[i] * a0;
+
+ for ( j = 1; j <= M; j++ ) {
+ s -= a[j] * yy[ - j];
+ if (s > 1073741823){
+ s = 1073741823;
+ }
+ else if ( s < -1073741824) {
+ s = -1073741824;
+ }
+ }
+
+ if ( labs( s ) < 0x7FFE800 )
+ *yy = ( s + 0x800L ) >> 12;
+ else if ( s > 0 ) {
+ *yy = 32767;
+ }
+ else {
+ *yy = -32768;
+ }
+ yy++;
+ }
+ memcpy( y, &tmp[M], lg <<2 );
+
+ /* Update of memory if update==1 */
+ if ( update ) {
+ memcpy( mem, &y[lg - M], 40 );
+ }
+ return;
+}
+
+/*
+ * dtx_dec
+ *
+ *
+ * Parameters:
+ * st B: DTX state struct
+ * mem_syn I: AMR decoder state
+ * lsfState B: LSF state struct
+ * pred_state->past_qua_en O: table of past quantized energies
+ * pred_state->past_qua_en_MR122 O: table of past quantized energies MR122
+ * averState->hangVar O:
+ * averState->hangCount O: hangover variable
+ * new_state I: new DTX state
+ * mode I: AMR mode
+ * parm I: vector of synthesis parameters
+ * synth O: synthesised speech
+ * A_t O: decoded LP filter in 4 subframes
+ *
+ * Function:
+ * DTX
+ *
+ * Returns:
+ * void
+ */
+static void dtx_dec( dtx_decState *st, Word32 *mem_syn, D_plsfState *lsfState,
+ gc_predState *pred_state, Cb_gain_averageState *averState, enum
+ DTXStateType new_state, enum Mode mode, Word16 parm[], Word32 synth[],
+ Word32 A_t[] )
+{
+ Word32 ex[L_SUBFR], acoeff[11], acoeff_variab[M + 1], lsp_int[M];
+ Word32 refl[M], lsf[M], lsf_int[M], lsf_int_variab[M], lsp_int_variab[M];
+ Word32 i, j, int_fac, log_en_int, pred_err, log_pg_e, log_pg_m, log_pg;
+ Word32 negative, lsf_mean, lsf_variab_index, lsf_variab_factor, ptr;
+ Word16 log_en_index, log_en_int_e, log_en_int_m, level, ma_pred_init,
+ tmp_int_length;
+
+
+ if ( ( st->dtxHangoverAdded != 0 ) & ( st->sid_frame != 0 ) ) {
+ /*
+ * sidFirst after dtx hangover period
+ * or sidUpd after dtxhangover
+ */
+ /* set log_en_adjust to correct value */
+ st->log_en_adjust = dtx_log_en_adjust[mode];
+ ptr = st->lsf_hist_ptr + M;
+
+ if ( ptr == 80 ) {
+ ptr = 0;
+ }
+ memcpy( &st->lsf_hist[ptr], &st->lsf_hist[st->lsf_hist_ptr], M <<2 );
+ ptr = st->log_en_hist_ptr + 1;
+
+ if ( ptr == DTX_HIST_SIZE ) {
+ ptr = 0;
+ }
+ st->log_en_hist[ptr] = st->log_en_hist[st->log_en_hist_ptr]; /* Q11 */
+
+ /*
+ * compute mean log energy and lsp
+ * from decoded signal (SID_FIRST)
+ */
+ st->log_en = 0;
+ memset( lsf, 0, M <<2 );
+
+ /* average energy and lsp */
+ for ( i = 0; i < DTX_HIST_SIZE; i++ ) {
+ st->log_en = st->log_en + ( st->log_en_hist[i] >> 3 );
+
+ for ( j = 0; j < M; j++ ) {
+ lsf[j] += st->lsf_hist[i * M + j];
+ }
+ }
+
+ for ( j = 0; j < M; j++ ) {
+ lsf[j] = lsf[j] >> 3; /* divide by 8 */
+ }
+ Lsf_lsp( lsf, st->lsp );
+
+ /*
+ * make log_en speech coder mode independent
+ * added again later before synthesis
+ */
+ st->log_en = st->log_en - st->log_en_adjust;
+
+ /* compute lsf variability vector */
+ memcpy( st->lsf_hist_mean, st->lsf_hist, 80 <<2 );
+
+ for ( i = 0; i < M; i++ ) {
+ lsf_mean = 0;
+
+ /* compute mean lsf */
+ for ( j = 0; j < 8; j++ ) {
+ lsf_mean += st->lsf_hist_mean[i + j * M];
+ }
+ lsf_mean = lsf_mean >> 3;
+
+ /*
+ * subtract mean and limit to within reasonable limits
+ * moreover the upper lsf's are attenuated
+ */
+ for ( j = 0; j < 8; j++ ) {
+ /* subtract mean */
+ st->lsf_hist_mean[i + j * M] = st->lsf_hist_mean[i + j * M] -
+ lsf_mean;
+
+ /* attenuate deviation from mean, especially for upper lsf's */
+ st->lsf_hist_mean[i + j * M] = ( st->lsf_hist_mean[i + j * M] *
+ lsf_hist_mean_scale[i] ) >> 15;
+
+ /* limit the deviation */
+ if ( st->lsf_hist_mean[i + j * M] < 0 ) {
+ negative = 1;
+ }
+ else {
+ negative = 0;
+ }
+ st->lsf_hist_mean[i + j * M] = labs( st->lsf_hist_mean[i + j * M] );
+
+ /* apply soft limit */
+ if ( st->lsf_hist_mean[i + j * M] > 655 ) {
+ st->lsf_hist_mean[i + j * M] = 655 + ( ( st->lsf_hist_mean[i + j
+ * M] - 655 ) >> 2 );
+ }
+
+ /* apply hard limit */
+ if ( st->lsf_hist_mean[i + j * M] > 1310 ) {
+ st->lsf_hist_mean[i + j * M] = 1310;
+ }
+
+ if ( negative != 0 ) {
+ st->lsf_hist_mean[i + j * M] = -st->lsf_hist_mean[i + j * M];
+ }
+ }
+ }
+ }
+
+ if ( st->sid_frame != 0 ) {
+ /*
+ * Set old SID parameters, always shift
+ * even if there is no new valid_data
+ */
+ memcpy( st->lsp_old, st->lsp, M <<2 );
+ st->old_log_en = st->log_en;
+
+ if ( st->valid_data != 0 ) /* new data available (no CRC) */ {
+ /* Compute interpolation factor, since the division only works
+ * for values of since_last_sid < 32 we have to limit the
+ * interpolation to 32 frames
+ */
+ tmp_int_length = st->since_last_sid;
+ st->since_last_sid = 0;
+
+ if ( tmp_int_length > 32 ) {
+ tmp_int_length = 32;
+ }
+
+ if ( tmp_int_length >= 2 ) {
+ st->true_sid_period_inv = 0x2000000 / ( tmp_int_length
+ << 10 );
+ }
+ else {
+ st->true_sid_period_inv = 16384; /* 0.5 it Q15 */
+ }
+ memcpy( lsfState->past_r_q, &past_rq_init[parm[0] * M], M <<2 );
+ D_plsf_3( lsfState, MRDTX, 0, &parm[1], st->lsp );
+
+ /* reset for next speech frame */
+ memset( lsfState->past_r_q, 0, M <<2 );
+ log_en_index = parm[4];
+
+ /* Q11 and divide by 4 */
+ st->log_en = ( Word16 )( log_en_index << 9 );
+
+ /* Subtract 2.5 in Q11 */
+ st->log_en = ( Word16 )( st->log_en - 5120 );
+
+ /* Index 0 is reserved for silence */
+ if ( log_en_index == 0 ) {
+ st->log_en = MIN_16;
+ }
+
+ /*
+ * no interpolation at startup after coder reset
+ * or when SID_UPD has been received right after SPEECH
+ */
+ if ( ( st->data_updated == 0 ) || ( st->dtxGlobalState == SPEECH ) ) {
+ memcpy( st->lsp_old, st->lsp, M <<2 );
+ st->old_log_en = st->log_en;
+ }
+ } /* endif valid_data */
+
+ /* initialize gain predictor memory of other modes */
+ ma_pred_init = ( Word16 )( ( st->log_en >> 1 ) - 9000 );
+
+ if ( ma_pred_init > 0 ) {
+ ma_pred_init = 0;
+ }
+
+ if ( ma_pred_init < - 14436 ) {
+ ma_pred_init = -14436;
+ }
+ pred_state->past_qua_en[0] = ma_pred_init;
+ pred_state->past_qua_en[1] = ma_pred_init;
+ pred_state->past_qua_en[2] = ma_pred_init;
+ pred_state->past_qua_en[3] = ma_pred_init;
+
+ /* past_qua_en for other modes than MR122 */
+ ma_pred_init = ( Word16 )( ( 5443*ma_pred_init ) >> 15 );
+
+ /* scale down by factor 20*log10(2) in Q15 */
+ pred_state->past_qua_en_MR122[0] = ma_pred_init;
+ pred_state->past_qua_en_MR122[1] = ma_pred_init;
+ pred_state->past_qua_en_MR122[2] = ma_pred_init;
+ pred_state->past_qua_en_MR122[3] = ma_pred_init;
+ } /* endif sid_frame */
+
+ /*
+ * CN generation
+ * recompute level adjustment factor Q11
+ * st->log_en_adjust = 0.9*st->log_en_adjust +
+ * 0.1*dtx_log_en_adjust[mode]);
+ */
+ st->log_en_adjust = ( Word16 )( ( ( st->log_en_adjust * 29491 ) >> 15 ) + ( (
+ ( dtx_log_en_adjust[mode] << 5 ) * 3277 ) >> 20 ) );
+
+ /* Interpolate SID info */
+ /* Q10 */
+ if ( st->since_last_sid > 30 )
+ int_fac = 32767;
+ else
+ int_fac = ( Word16 )( (st->since_last_sid + 1) << 10 );
+
+ /* Q10 * Q15 -> Q10 */
+ int_fac = ( int_fac * st->true_sid_period_inv ) >> 15;
+
+ /* Maximize to 1.0 in Q10 */
+ if ( int_fac > 1024 ) {
+ int_fac = 1024;
+ }
+
+ /* Q10 -> Q14 */
+ int_fac = ( Word16 )( int_fac << 4 );
+
+ /* Q14 * Q11->Q26 */
+ log_en_int = ( int_fac * st->log_en ) << 1;
+
+ for ( i = 0; i < M; i++ ) {
+ /* Q14 * Q15 -> Q14 */
+ lsp_int[i] = ( int_fac * st->lsp[i] ) >> 15;
+ }
+
+ /* 1-k in Q14 */
+ int_fac = 16384 - int_fac;
+
+ /* (Q14 * Q11 -> Q26) + Q26 -> Q26 */
+ log_en_int += ( int_fac * st->old_log_en ) << 1;
+
+ for ( i = 0; i < M; i++ ) {
+ /* Q14 + (Q14 * Q15 -> Q14) -> Q14 */
+ lsp_int[i] = lsp_int[i] + ( ( int_fac * st->lsp_old[i] ) >> 15 );
+
+ /* Q14 -> Q15 */
+ lsp_int[i] = lsp_int[i] << 1;
+ }
+
+ /* compute the amount of lsf variability */
+ /* -0.6 in Q12 */
+ lsf_variab_factor = st->log_pg_mean - 2457;
+
+ /* *0.3 Q12*Q15 -> Q12 */
+ lsf_variab_factor = 4096 - ( ( lsf_variab_factor * 9830 ) >> 15 );
+
+ /* limit to values between 0..1 in Q12 */
+ if ( lsf_variab_factor >= 4096 ) {
+ lsf_variab_factor = 32767;
+ }
+ else if ( lsf_variab_factor < 0 ) {
+ lsf_variab_factor = 0;
+ }
+ else
+ lsf_variab_factor = lsf_variab_factor << 3; /* -> Q15 */
+
+ /* get index of vector to do variability with */
+ lsf_variab_index = pseudonoise( &st->pn_seed_rx, 3 );
+
+ /* convert to lsf */
+ Lsp_lsf( lsp_int, lsf_int );
+
+ /* apply lsf variability */
+ memcpy( lsf_int_variab, lsf_int, M <<2 );
+
+ for ( i = 0; i < M; i++ ) {
+ lsf_int_variab[i] = lsf_int_variab[i] + ( ( lsf_variab_factor * st->
+ lsf_hist_mean[i + lsf_variab_index * M] ) >> 15 );
+ }
+
+ /* make sure that LSP's are ordered */
+ Reorder_lsf( lsf_int, LSF_GAP );
+ Reorder_lsf( lsf_int_variab, LSF_GAP );
+
+ /* copy lsf to speech decoders lsf state */
+ memcpy( lsfState->past_lsf_q, lsf_int, M <<2 );
+
+ /* convert to lsp */
+ Lsf_lsp( lsf_int, lsp_int );
+ Lsf_lsp( lsf_int_variab, lsp_int_variab );
+
+ /* Compute acoeffs Q12 acoeff is used for level
+ * normalization and Post_Filter, acoeff_variab is
+ * used for synthesis filter
+ * by doing this we make sure that the level
+ * in high frequenncies does not jump up and down
+ */
+ Lsp_Az( lsp_int, acoeff );
+ Lsp_Az( lsp_int_variab, acoeff_variab );
+
+ /* For use in Post_Filter */
+ memcpy( &A_t[0], acoeff, MP1 <<2 );
+ memcpy( &A_t[MP1], acoeff, MP1 <<2 );
+ memcpy( &A_t[MP1 <<1], acoeff, MP1 <<2 );
+ memcpy( &A_t[MP1 + MP1 + MP1], acoeff, MP1 <<2 );
+
+ /* Compute reflection coefficients Q15 */
+ A_Refl( &acoeff[1], refl );
+
+ /* Compute prediction error in Q15 */
+ /* 0.99997 in Q15 */
+ pred_err = MAX_16;
+
+ for ( i = 0; i < M; i++ ) {
+ pred_err = ( pred_err * ( MAX_16 - ( ( refl[i] * refl[i] ) >> 15 ) ) ) >>
+ 15;
+ }
+
+ /* compute logarithm of prediction gain */
+ Log2( pred_err, &log_pg_e, &log_pg_m );
+
+ /* convert exponent and mantissa to Word16 Q12 */
+ /* Q12 */
+ log_pg = ( log_pg_e - 15 ) << 12;
+ /* saturate */
+ if (log_pg < -32768) {
+ log_pg = -32768;
+ }
+ log_pg = ( -( log_pg + ( log_pg_m >> 3 ) ) ) >> 1;
+ st->log_pg_mean = ( Word16 )( ( ( 29491*st->log_pg_mean ) >> 15 ) + ( ( 3277
+ * log_pg ) >> 15 ) );
+
+ /* Compute interpolated log energy */
+ /* Q26 -> Q16 */
+ log_en_int = log_en_int >> 10;
+
+ /* Add 4 in Q16 */
+ log_en_int += 262144L;
+
+ /* subtract prediction gain */
+ log_en_int = log_en_int - ( log_pg << 4 );
+
+ /* adjust level to speech coder mode */
+ log_en_int += st->log_en_adjust << 5;
+ log_en_int_e = ( Word16 )( log_en_int >> 16 );
+ log_en_int_m = ( Word16 )( ( log_en_int - ( log_en_int_e << 16 ) ) >> 1 );
+
+ /* Q4 */
+ level = ( Word16 )( Pow2( log_en_int_e, log_en_int_m ) );
+
+ for ( i = 0; i < 4; i++ ) {
+ /* Compute innovation vector */
+ Build_CN_code( &st->pn_seed_rx, ex );
+
+ for ( j = 0; j < L_SUBFR; j++ ) {
+ ex[j] = ( level * ex[j] ) >> 15;
+ }
+
+ /* Synthesize */
+ Syn_filt( acoeff_variab, ex, &synth[i * L_SUBFR], L_SUBFR, mem_syn, 1 );
+ } /* next i */
+
+ /* reset codebook averaging variables */
+ averState->hangVar = 20;
+ averState->hangCount = 0;
+
+ if ( new_state == DTX_MUTE ) {
+ /*
+ * mute comfort noise as it has been quite a long time since
+ * last SID update was performed
+ */
+ Word32 num, denom;
+
+
+ tmp_int_length = st->since_last_sid;
+
+ if ( tmp_int_length > 32 ) {
+ tmp_int_length = 32;
+ }
+
+ if ( tmp_int_length == 1 ) {
+ st->true_sid_period_inv = MAX_16;
+ }
+ else {
+ num = 1024;
+ denom = ( tmp_int_length << 10 );
+ st->true_sid_period_inv = 0;
+
+ for ( i = 0; i < 15; i++ ) {
+ st->true_sid_period_inv <<= 1;
+ num <<= 1;
+
+ if ( num >= denom ) {
+ num = num - denom;
+ st->true_sid_period_inv += 1;
+ }
+ }
+ }
+ st->since_last_sid = 0;
+ memcpy( st->lsp_old, st->lsp, M << 2 );
+ st->old_log_en = st->log_en;
+
+ /* subtract 1/8 in Q11 i.e -6/8 dB */
+ st->log_en = st->log_en - 256;
+ if (st->log_en < -32768) st->log_en = -32768;
+ }
+
+ /*
+ * reset interpolation length timer
+ * if data has been updated.
+ */
+ if ( ( st->sid_frame != 0 ) & ( ( st->valid_data != 0 ) || ( ( st->valid_data
+ == 0 ) & ( st->dtxHangoverAdded != 0 ) ) ) ) {
+ st->since_last_sid = 0;
+ st->data_updated = 1;
+ }
+ return;
+}
+
+
+/*
+ * lsp_avg
+ *
+ *
+ * Parameters:
+ * st->lsp_meanSave B: LSP averages
+ * lsp I: LSPs
+ *
+ * Function:
+ * Calculate the LSP averages
+ *
+ * Returns:
+ * void
+ */
+static void lsp_avg( lsp_avgState *st, Word32 *lsp )
+{
+ Word32 i, tmp;
+
+
+ for ( i = 0; i < M; i++ ) {
+ /* mean = 0.84*mean */
+ tmp = ( st->lsp_meanSave[i] << 16 );
+ tmp -= ( EXPCONST * st->lsp_meanSave[i] ) << 1;
+
+ /* Add 0.16 of newest LSPs to mean */
+ tmp += ( EXPCONST * lsp[i] ) << 1;
+
+ /* Save means */
+ tmp += 0x00008000L;
+ st->lsp_meanSave[i] = tmp >> 16;
+ }
+ return;
+}
+
+
+/*
+ * Int_lpc_1and3
+ *
+ *
+ * Parameters:
+ * lsp_old I: LSP vector at the 4th subfr. of past frame [M]
+ * lsp_mid I: LSP vector at the 2nd subframe of present frame [M]
+ * lsp_new I: LSP vector at the 4th subframe of present frame [M]
+ * Az O: interpolated LP parameters in subframes 1 and 3
+ * [AZ_SIZE]
+ *
+ * Function:
+ * Interpolates the LSPs and converts to LPC parameters
+ * to get a different LP filter in each subframe.
+ *
+ * The 20 ms speech frame is divided into 4 subframes.
+ * The LSPs are quantized and transmitted at the 2nd and
+ * 4th subframes (twice per frame) and interpolated at the
+ * 1st and 3rd subframe.
+ *
+ * Returns:
+ * void
+ */
+static void Int_lpc_1and3( Word32 lsp_old[], Word32 lsp_mid[], Word32 lsp_new[],
+ Word32 Az[] )
+{
+ Word32 lsp[M];
+ Word32 i;
+
+
+ /* lsp[i] = lsp_mid[i] * 0.5 + lsp_old[i] * 0.5 */
+ for ( i = 0; i < 10; i++ ) {
+ lsp[i] = ( lsp_mid[i] >> 1 ) + ( lsp_old[i] >> 1 );
+ }
+
+ /* Subframe 1 */
+ Lsp_Az( lsp, Az );
+ Az += MP1;
+
+ /* Subframe 2 */
+ Lsp_Az( lsp_mid, Az );
+ Az += MP1;
+
+ for ( i = 0; i < 10; i++ ) {
+ lsp[i] = ( lsp_mid[i] >> 1 ) + ( lsp_new[i] >> 1 );
+ }
+
+ /* Subframe 3 */
+ Lsp_Az( lsp, Az );
+ Az += MP1;
+
+ /* Subframe 4 */
+ Lsp_Az( lsp_new, Az );
+ return;
+}
+
+
+/*
+ * Int_lpc_1to3
+ *
+ *
+ * Parameters:
+ * lsp_old I: LSP vector at the 4th subframe of past frame [M]
+ * lsp_new I: LSP vector at the 4th subframe of present frame [M]
+ * Az O: interpolated LP parameters in all subframes
+ * [AZ_SIZE]
+ *
+ * Function:
+ * Interpolates the LSPs and converts to LPC parameters to get a different
+ * LP filter in each subframe.
+ *
+ * The 20 ms speech frame is divided into 4 subframes.
+ * The LSPs are quantized and transmitted at the 4th
+ * subframes (once per frame) and interpolated at the
+ * 1st, 2nd and 3rd subframe.
+ *
+ * Returns:
+ * void
+ */
+static void Int_lpc_1to3( Word32 lsp_old[], Word32 lsp_new[], Word32 Az[] )
+{
+ Word32 lsp[M];
+ Word32 i;
+
+
+ for ( i = 0; i < 10; i++ ) {
+ lsp[i] = ( lsp_new[i] >> 2 ) + ( lsp_old[i] - ( lsp_old[i] >> 2 ) );
+ }
+
+ /* Subframe 1 */
+ Lsp_Az( lsp, Az );
+ Az += MP1;
+
+ for ( i = 0; i < 10; i++ ) {
+ lsp[i] = ( lsp_old[i] >> 1 ) + ( lsp_new[i] >> 1 );
+ }
+
+ /* Subframe 2 */
+ Lsp_Az( lsp, Az );
+ Az += MP1;
+
+ for ( i = 0; i < 10; i++ ) {
+ lsp[i] = ( lsp_old[i] >> 2 ) + ( lsp_new[i] - ( lsp_new[i] >> 2 ) );
+ }
+
+ /* Subframe 3 */
+ Lsp_Az( lsp, Az );
+ Az += MP1;
+
+ /* Subframe 4 */
+ Lsp_Az( lsp_new, Az );
+ return;
+}
+
+
+/*
+ * D_plsf_5
+ *
+ *
+ * Parameters:
+ * st->past_lsf_q I: Past dequantized LFSs
+ * st->past_r_q B: past quantized residual
+ * bfi B: bad frame indicator
+ * indice I: quantization indices of 3 submatrices, Q0
+ * lsp1_q O: quantized 1st LSP vector
+ * lsp2_q O: quantized 2nd LSP vector
+ *
+ * Function:
+ * Decodes the 2 sets of LSP parameters in a frame
+ * using the received quantization indices.
+ *
+ * Returns:
+ * void
+ */
+static void D_plsf_5( D_plsfState *st, Word16 bfi, Word16 *indice, Word32 *lsp1_q
+ , Word32 *lsp2_q )
+{
+ Word32 lsf1_r[M], lsf2_r[M], lsf1_q[M], lsf2_q[M];
+ Word32 i, temp1, temp2, sign;
+ const Word32 *p_dico;
+
+
+ /* if bad frame */
+ if ( bfi != 0 ) {
+ /* use the past LSFs slightly shifted towards their mean */
+ for ( i = 0; i < M; i += 2 ) {
+ /* lsfi_q[i] = ALPHA*st->past_lsf_q[i] + ONE_ALPHA*meanLsf[i]; */
+ lsf1_q[i] = ( ( st->past_lsf_q[i] * ALPHA_122 ) >> 15 ) + ( ( mean_lsf_5[i]
+ * ONE_ALPHA_122 ) >> 15 );
+ lsf1_q[i + 1] = ( ( st->past_lsf_q[i + 1] * ALPHA_122 ) >> 15 ) + ( (
+ mean_lsf_5[i + 1] * ONE_ALPHA_122 ) >> 15 );
+ }
+ memcpy( lsf2_q, lsf1_q, M <<2 );
+
+ /* estimate past quantized residual to be used in next frame */
+ for ( i = 0; i < M; i += 2 ) {
+ /* temp = meanLsf[i] + st->past_r_q[i] * LSPPpred_facMR122; */
+ temp1 = mean_lsf_5[i] + ( ( st->past_r_q[i] * LSP_PRED_FAC_MR122 ) >>
+ 15 );
+ temp2 = mean_lsf_5[i + 1] +( ( st->past_r_q[i + 1] *LSP_PRED_FAC_MR122
+ ) >> 15 );
+ st->past_r_q[i] = lsf2_q[i] - temp1;
+ st->past_r_q[i + 1] = lsf2_q[i + 1] -temp2;
+ }
+ }
+
+ /* if good LSFs received */
+ else {
+ /* decode prediction residuals from 5 received indices */
+ p_dico = &dico1_lsf_5[indice[0] << 2];
+ lsf1_r[0] = *p_dico++;
+ lsf1_r[1] = *p_dico++;
+ lsf2_r[0] = *p_dico++;
+ lsf2_r[1] = *p_dico++;
+ p_dico = &dico2_lsf_5[indice[1] << 2];
+ lsf1_r[2] = *p_dico++;
+ lsf1_r[3] = *p_dico++;
+ lsf2_r[2] = *p_dico++;
+ lsf2_r[3] = *p_dico++;
+ sign = ( Word16 )( indice[2] & 1 );
+ i = indice[2] >> 1;
+ p_dico = &dico3_lsf_5[i << 2];
+
+ if ( sign == 0 ) {
+ lsf1_r[4] = *p_dico++;
+ lsf1_r[5] = *p_dico++;
+ lsf2_r[4] = *p_dico++;
+ lsf2_r[5] = *p_dico++;
+ }
+ else {
+ lsf1_r[4] = ( Word16 )( -( *p_dico++ ) );
+ lsf1_r[5] = ( Word16 )( -( *p_dico++ ) );
+ lsf2_r[4] = ( Word16 )( -( *p_dico++ ) );
+ lsf2_r[5] = ( Word16 )( -( *p_dico++ ) );
+ }
+ p_dico = &dico4_lsf_5[( indice[3]<<2 )];
+ lsf1_r[6] = *p_dico++;
+ lsf1_r[7] = *p_dico++;
+ lsf2_r[6] = *p_dico++;
+ lsf2_r[7] = *p_dico++;
+ p_dico = &dico5_lsf_5[( indice[4]<<2 )];
+ lsf1_r[8] = *p_dico++;
+ lsf1_r[9] = *p_dico++;
+ lsf2_r[8] = *p_dico++;
+ lsf2_r[9] = *p_dico++;
+
+ /* Compute quantized LSFs and update the past quantized residual */
+ for ( i = 0; i < M; i++ ) {
+ temp1 = mean_lsf_5[i] + ( ( st->past_r_q[i] * LSP_PRED_FAC_MR122 ) >>
+ 15 );
+ lsf1_q[i] = lsf1_r[i] + temp1;
+ lsf2_q[i] = lsf2_r[i] + temp1;
+ st->past_r_q[i] = lsf2_r[i];
+ }
+ }
+
+ /* verification that LSFs have minimum distance of LSF_GAP Hz */
+ Reorder_lsf( lsf1_q, LSF_GAP );
+ Reorder_lsf( lsf2_q, LSF_GAP );
+ memcpy( st->past_lsf_q, lsf2_q, M <<2 );
+
+ /* convert LSFs to the cosine domain */
+ Lsf_lsp( lsf1_q, lsp1_q );
+ Lsf_lsp( lsf2_q, lsp2_q );
+ return;
+}
+
+
+/*
+ * Dec_lag3
+ *
+ *
+ * Parameters:
+ * index I: received pitch index
+ * t0_min I: minimum of search range
+ * t0_max I: maximum of search range
+ * i_subfr I: subframe flag
+ * T0_prev I: integer pitch delay of last subframe used
+ * in 2nd and 4th subframes
+ * T0 O: integer part of pitch lag
+ * T0_frac O : fractional part of pitch lag
+ * flag4 I : flag for encoding with 4 bits
+ * Function:
+ * Decoding of fractional pitch lag with 1/3 resolution.
+ * Extract the integer and fraction parts of the pitch lag from
+ * the received adaptive codebook index.
+ *
+ * The fractional lag in 1st and 3rd subframes is encoded with 8 bits
+ * while that in 2nd and 4th subframes is relatively encoded with 4, 5
+ * and 6 bits depending on the mode.
+ *
+ * Returns:
+ * void
+ */
+static void Dec_lag3( Word32 index, Word32 t0_min, Word32 t0_max, Word32 i_subfr
+ , Word32 T0_prev, Word32 *T0, Word32 *T0_frac, Word32 flag4 )
+{
+ Word32 i, tmp_lag;
+
+
+ /* if 1st or 3rd subframe */
+ if ( i_subfr == 0 ) {
+ if ( index < 197 ) {
+ *T0 = ( ( ( index + 2 ) * 10923 ) >> 15 ) + 19;
+ i = *T0 + *T0 + *T0;
+ *T0_frac = ( index - i ) + 58;
+ }
+ else {
+ *T0 = index - 112;
+ *T0_frac = 0;
+ }
+ }
+
+ /* 2nd or 4th subframe */
+ else {
+ if ( flag4 == 0 ) {
+ /* 'normal' decoding: either with 5 or 6 bit resolution */
+ i = ( ( ( index + 2 ) * 10923 ) >> 15 ) - 1;
+ *T0 = i + t0_min;
+ i = i + i + i;
+ *T0_frac = ( index - 2 ) - i;
+ }
+ else {
+ /* decoding with 4 bit resolution */
+ tmp_lag = T0_prev;
+
+ if ( ( tmp_lag - t0_min ) > 5 )
+ tmp_lag = t0_min + 5;
+
+ if ( ( t0_max - tmp_lag ) > 4 )
+ tmp_lag = t0_max - 4;
+
+ if ( index < 4 ) {
+ i = ( tmp_lag - 5 );
+ *T0 = i + index;
+ *T0_frac = 0;
+ }
+ else {
+ if ( index < 12 ) {
+ i = ( ( ( index - 5 ) * 10923 ) >> 15 ) - 1;
+ *T0 = i + tmp_lag;
+ i = i + i + i;
+ *T0_frac = ( index - 9 ) - i;
+ }
+ else {
+ i = ( index - 12 ) + tmp_lag;
+ *T0 = i + 1;
+ *T0_frac = 0;
+ }
+ }
+ } /* end if (decoding with 4 bit resolution) */
+ }
+ return;
+}
+
+
+/*
+ * Pred_lt_3or6_40
+ *
+ *
+ * Parameters:
+ * exc B: excitation buffer
+ * T0 I: integer pitch lag
+ * frac I: fraction of lag
+ * flag3 I: if set, upsampling rate = 3 (6 otherwise)
+ *
+ * Function:
+ * Compute the result of long term prediction with fractional
+ * interpolation of resolution 1/3 or 1/6. (Interpolated past excitation).
+ *
+ * Once the fractional pitch lag is determined,
+ * the adaptive codebook vector v(n) is computed by interpolating
+ * the past excitation signal u(n) at the given integer delay k
+ * and phase (fraction) :
+ *
+ * 9 9
+ * v(n) = SUM[ u(n-k-i) * b60(t+i*6) ] + SUM[ u(n-k+1+i) * b60(6-t+i*6) ],
+ * i=0 i=0
+ * n = 0, ...,39, t = 0, ...,5.
+ *
+ * The interpolation filter b60 is based on a Hamming windowed sin(x)/x
+ * function truncated at ± 59 and padded with zeros at ± 60 (b60(60)=0)).
+ * The filter has a cut-off frequency (-3 dB) at 3 600 Hz in
+ * the over-sampled domain.
+ *
+ * Returns:
+ * void
+ */
+static void Pred_lt_3or6_40( Word32 exc[], Word32 T0, Word32 frac, Word32 flag3 )
+{
+ Word32 s, i;
+ Word32 *x0, *x1, *x2;
+ const Word32 *c1, *c2;
+
+
+ x0 = &exc[ - T0];
+ frac = -frac;
+
+ if ( flag3 != 0 ) {
+ frac <<= 1; /* inter_3l[k] = inter6[2*k] -> k' = 2*k */
+ }
+
+ if ( frac < 0 ) {
+ frac += 6;
+ x0--;
+ }
+ c1 = &inter6[frac];
+ c2 = &inter6[6 - frac];
+
+ for ( i = 0; i < 40; i++ ) {
+ x1 = x0++;
+ x2 = x0;
+ s = x1[0] * c1[0];
+ s += x1[ - 1] * c1[6];
+ s += x1[ - 2] * c1[12];
+ s += x1[ - 3] * c1[18];
+ s += x1[ - 4] * c1[24];
+ s += x1[ - 5] * c1[30];
+ s += x1[ - 6] * c1[36];
+ s += x1[ - 7] * c1[42];
+ s += x1[ - 8] * c1[48];
+ s += x1[ - 9] * c1[54];
+ s += x2[0] * c2[0];
+ s += x2[1] * c2[6];
+ s += x2[2] * c2[12];
+ s += x2[3] * c2[18];
+ s += x2[4] * c2[24];
+ s += x2[5] * c2[30];
+ s += x2[6] * c2[36];
+ s += x2[7] * c2[42];
+ s += x2[8] * c2[48];
+ s += x2[9] * c2[54];
+ exc[i] = ( s + 0x4000 ) >> 15;
+
+ }
+}
+
+
+/*
+ * Dec_lag6
+ *
+ *
+ * Parameters:
+ * index I: received pitch index
+ * pit_min I: minimum pitch lag
+ * pit_max I: maximum pitch lag
+ * i_subfr I: subframe flag
+ * T0 B: integer part of pitch lag
+ * T0_frac O : fractional part of pitch lag
+ *
+ * Function:
+ * Decoding of fractional pitch lag with 1/6 resolution.
+ * Extract the integer and fraction parts of the pitch lag from
+ * the received adaptive codebook index.
+ *
+ * The fractional lag in 1st and 3rd subframes is encoded with 9 bits
+ * while that in 2nd and 4th subframes is relatively encoded with 6 bits.
+ * Note that in relative encoding only 61 values are used. If the
+ * decoder receives 61, 62, or 63 as the relative pitch index, it means
+ * that a transmission error occurred. In this case, the pitch lag from
+ * previous subframe (actually from previous frame) is used.
+ *
+ * Returns:
+ * void
+ */
+static void Dec_lag6( Word32 index, Word32 pit_min, Word32 pit_max, Word32
+ i_subfr, Word32 *T0, Word32 *T0_frac )
+{
+ Word32 t0_min, t0_max, i;
+
+
+ /* if 1st or 3rd subframe */
+ if ( i_subfr == 0 ) {
+ if ( index < 463 ) {
+ /* T0 = (index+5)/6 + 17 */
+ *T0 = ( index + 5 ) / 6 + 17;
+ i = *T0 + *T0 + *T0;
+
+ /* *T0_frac = index - T0*6 + 105 */
+ *T0_frac = ( index - ( i + i ) ) + 105;
+ }
+ else {
+ *T0 = index - 368;
+ *T0_frac = 0;
+ }
+ }
+
+ /* second or fourth subframe */
+ else {
+ /* find t0_min and t0_max for 2nd (or 4th) subframe */
+ t0_min = *T0 - 5;
+
+ if ( t0_min < pit_min ) {
+ t0_min = pit_min;
+ }
+ t0_max = t0_min + 9;
+
+ if ( t0_max > pit_max ) {
+ t0_max = pit_max;
+ t0_min = t0_max - 9;
+ }
+
+ /* i = (index+5)/6 - 1 */
+ i = ( index + 5 ) / 6 - 1;
+ *T0 = i + t0_min;
+ i = i + i + i;
+ *T0_frac = ( index - 3 ) - ( i + i );
+ }
+}
+
+
+/*
+ * decompress10
+ *
+ *
+ * Parameters:
+ * MSBs I: MSB part of the index
+ * LSBs I: LSB part of the index
+ * index1 I: index for first pos in posIndex
+ * index2 I: index for second pos in posIndex
+ * index3 I: index for third pos in posIndex
+ * pos_indx O: position of 3 pulses (decompressed)
+ * Function:
+ * Decompression of the linear codeword
+ *
+ * Returns:
+ * void
+ */
+static void decompress10( Word32 MSBs, Word32 LSBs, Word32 index1, Word32 index2
+ , Word32 index3, Word32 pos_indx[] )
+{
+ Word32 divMSB;
+
+ if (MSBs > 124)
+ {
+ MSBs = 124;
+ }
+ /*
+ * pos_indx[index1] = ((MSBs-25*(MSBs/25))%5)*2 + (LSBs-4*(LSBs/4))%2;
+ * pos_indx[index2] = ((MSBs-25*(MSBs/25))/5)*2 + (LSBs-4*(LSBs/4))/2;
+ * pos_indx[index3] = (MSBs/25)*2 + LSBs/4;
+ */
+ divMSB = MSBs / 25;
+ pos_indx[index1] = ( ( ( MSBs - 25 * ( divMSB ) ) % 5 ) << 1 ) + ( LSBs & 0x1
+ );
+ pos_indx[index2] = ( ( ( MSBs - 25 * ( divMSB ) ) / 5 ) << 1 ) + ( ( LSBs &
+ 0x2 ) >> 1 );
+ pos_indx[index3] = ( divMSB << 1 ) + ( LSBs >> 2 );
+ return;
+}
+
+
+/*
+ * decompress_codewords
+ *
+ *
+ * Parameters:
+ * indx I: position of 8 pulses (compressed)
+ * pos_indx O: position index of 8 pulses (position only)
+ *
+ * Function:
+ * Decompression of the linear codewords to 4+three indeces
+ * one bit from each pulse is made robust to errors by
+ * minimizing the phase shift of a bit error.
+ *
+ * i0,i4,i1 => one index (7+3) bits, 3 LSBs more robust
+ * i2,i6,i5 => one index (7+3) bits, 3 LSBs more robust
+ * i3,i7 => one index (5+2) bits, 2-3 LSbs more robust
+ *
+ * Returns:
+ * void
+ */
+static void decompress_codewords( Word16 indx[], Word32 pos_indx[] )
+{
+ Word32 ia, ib, MSBs, LSBs, MSBs0_24, tmp;
+
+
+ /*
+ * First index: 10x10x10 -> 2x5x2x5x2x5-> 125x2x2x2 -> 7+1x3 bits
+ * MSBs = indx[NB_TRACK]/8;
+ * LSBs = indx[NB_TRACK]%8;
+ */
+ MSBs = *indx >> 3;
+ LSBs = *indx & 0x7;
+ decompress10( MSBs, LSBs, 0, 4, 1, pos_indx );
+
+ /*
+ * Second index: 10x10x10 -> 2x5x2x5x2x5-> 125x2x2x2 -> 7+1x3 bits
+ * MSBs = indx[NB_TRACK+1]/8;
+ * LSBs = indx[NB_TRACK+1]%8;
+ */
+ MSBs = indx[1] >> 3;
+ LSBs = indx[1] & 0x7;
+ decompress10( MSBs, LSBs, 2, 6, 5, pos_indx );
+
+ /*
+ * Third index: 10x10 -> 2x5x2x5-> 25x2x2 -> 5+1x2 bits
+ * MSBs = indx[NB_TRACK+2]/4;
+ * LSBs = indx[NB_TRACK+2]%4;
+ * MSBs0_24 = (MSBs*25+12)/32;
+ * if ((MSBs0_24/5)%2==1)
+ * pos_indx[3] = (4-(MSBs0_24%5))*2 + LSBs%2;
+ * else
+ * pos_indx[3] = (MSBs0_24%5)*2 + LSBs%2;
+ * pos_indx[7] = (MSBs0_24/5)*2 + LSBs/2;
+ */
+ MSBs = indx[2] >> 2;
+ LSBs = indx[2] & 0x3;
+ MSBs0_24 = ( ( ( MSBs * 25 ) + 12 ) >> 5 );
+ tmp = ( MSBs0_24 * 6554 ) >> 15;
+ ia = tmp & 0x1;
+ ib = ( MSBs0_24 - ( tmp * 5 ) );
+
+ if ( ia == 1 ) {
+ ib = 4 - ib;
+ }
+ pos_indx[3] = ( ib << 1 ) + ( LSBs & 0x1 );
+ pos_indx[7] = ( tmp << 1 ) + ( LSBs >> 1 );
+}
+
+
+/*
+ * decode_2i40_9bits
+ *
+ *
+ * Parameters:
+ * subNr I: subframe number
+ * sign I: signs of 2 pulses
+ * index I: Positions of the 2 pulses
+ * cod O: algebraic (fixed) codebook excitation
+ *
+ * Function:
+ * Algebraic codebook decoder
+ *
+ * Returns:
+ * void
+ */
+static void decode_2i40_9bits( Word32 subNr, Word32 sign, Word32 index, Word32
+ cod[] )
+{
+ Word32 pos[2];
+ Word32 i, j, k;
+
+
+ /* Decode the positions */
+ /* table bit is the MSB */
+ j = ( index & 64 ) >> 6;
+ i = index & 7;
+
+ /* pos0 =i*5+startPos[j*8+subNr*2] */
+ i = ( i + ( i << 2 ) );
+ k = startPos[( j <<3 )+( subNr << 1 )];
+ pos[0] = i + k;
+ index = index >> 3;
+ i = index & 7;
+
+ /* pos1 =i*5+startPos[j*8+subNr*2+1] */
+ i = ( i + ( i << 2 ) );
+ k = startPos[( ( j <<3 )+ ( subNr <<1 ) ) + 1];
+ pos[1] = ( Word16 )( i + k );
+
+ /* decode the signs and build the codeword */
+ memset( cod, 0, L_SUBFR <<2 );
+
+ for ( j = 0; j < 2; j++ ) {
+ i = sign & 1;
+ sign = sign >> 1;
+
+ if ( i != 0 ) {
+ cod[pos[j]] = 8191; /* +1.0 */
+ }
+ else {
+ cod[pos[j]] = -8192; /* -1.0 */
+ }
+ }
+ return;
+}
+
+
+/*
+ * decode_2i40_11bits
+ *
+ *
+ * Parameters:
+ * sign I: signs of 2 pulses
+ * index I: Positions of the 2 pulses
+ * cod O: algebraic (fixed) codebook excitation
+ *
+ * Function:
+ * Algebraic codebook decoder
+ *
+ * Returns:
+ * void
+ */
+static void decode_2i40_11bits( Word32 sign, Word32 index, Word32 cod[] )
+{
+ Word32 pos[2];
+ Word32 i, j;
+
+
+ /* Decode the positions */
+ j = index & 1;
+ index = index >> 1;
+ i = index & 7;
+
+ /* pos0 =i*5+1+j*2 */
+ i = ( i + ( i << 2 ) );
+ i = ( i + 1 );
+ j = ( j << 1 );
+ pos[0] = i + j;
+ index = index >> 3;
+ j = index & 3;
+ index = index >> 2;
+ i = index & 7;
+
+ if ( j == 3 ) {
+ /* pos1 =i*5+4 */
+ i = ( i + ( i << 2 ) );
+ pos[1] = i + 4;
+ }
+ else {
+ /* pos1 =i*5+j */
+ i = ( i + ( i << 2 ) );
+ pos[1] = i + j;
+ }
+
+ /* decode the signs and build the codeword */
+ memset( cod, 0, L_SUBFR <<2 );
+
+ for ( j = 0; j < 2; j++ ) {
+ i = sign & 1;
+ sign = sign >> 1;
+
+ if ( i != 0 ) {
+ cod[pos[j]] = 8191; /* +1.0 */
+ }
+ else {
+ cod[pos[j]] = -8192; /* -1.0 */
+ }
+ }
+ return;
+}
+
+
+/*
+ * decode_3i40_14bits
+ *
+ *
+ * Parameters:
+ * sign I: signs of 3 pulses
+ * index I: Positions of the 3 pulses
+ * cod O: algebraic (fixed) codebook excitation
+ *
+ * Function:
+ * Algebraic codebook decoder
+ *
+ * Returns:
+ * void
+ */
+static void decode_3i40_14bits( Word32 sign, Word32 index, Word32 cod[] )
+{
+ Word32 pos[3];
+ Word32 i, j;
+
+
+ /* Decode the positions */
+ i = index & 7;
+
+ /* pos0 =i*5 */
+ pos[0] = i + ( i << 2 );
+ index = index >> 3;
+ j = index & 1;
+ index = index >> 1;
+ i = index & 7;
+
+ /* pos1 =i*5+1+j*2 */
+ i = ( i + ( i << 2 ) );
+ i = ( i + 1 );
+ j = ( j << 1 );
+ pos[1] = i + j;
+ index = index >> 3;
+ j = index & 1;
+ index = index >> 1;
+ i = index & 7;
+
+ /* pos2 =i*5+2+j*2 */
+ i = ( i + ( i << 2 ) );
+ i = ( i + 2 );
+ j = ( j << 1 );
+ pos[2] = i + j;
+
+ /* decode the signs and build the codeword */
+ memset( cod, 0, L_SUBFR <<2 );
+
+ for ( j = 0; j < 3; j++ ) {
+ i = sign & 1;
+ sign = sign >> 1;
+
+ if ( i > 0 ) {
+ cod[pos[j]] = 8191; /* +1.0 */
+ }
+ else {
+ cod[pos[j]] = -8192; /* -1.0 */
+ }
+ }
+ return;
+}
+
+
+/*
+ * decode_3i40_14bits
+ *
+ *
+ * Parameters:
+ * sign I: signs of 4 pulses
+ * index I: Positions of the 4 pulses
+ * cod O: algebraic (fixed) codebook excitation
+ *
+ * Function:
+ * Algebraic codebook decoder
+ *
+ * Returns:
+ * void
+ */
+static void decode_4i40_17bits( Word32 sign, Word32 index, Word32 cod[] )
+{
+ Word32 pos[4];
+ Word32 i, j;
+
+
+ /* Decode the positions */
+ i = index & 7;
+ i = dgray[i];
+
+ /* pos0 =i*5 */
+ pos[0] = i + ( i << 2 );
+ index = index >> 3;
+ i = index & 7;
+ i = dgray[i];
+
+ /* pos1 =i*5+1 */
+ i = ( i + ( i << 2 ) );
+ pos[1] = i + 1;
+ index = index >> 3;
+ i = index & 7;
+ i = dgray[i];
+
+ /* pos2 =i*5+1 */
+ i = ( i + ( i << 2 ) );
+ pos[2] = i + 2;
+ index = index >> 3;
+ j = index & 1;
+ index = index >> 1;
+ i = index & 7;
+ i = dgray[i];
+
+ /* pos3 =i*5+3+j */
+ i = ( i + ( i << 2 ) );
+ i = ( i + 3 );
+ pos[3] = i + j;
+
+ /* decode the signs and build the codeword */
+ memset( cod, 0, L_SUBFR <<2 );
+
+ for ( j = 0; j < 4; j++ ) {
+ i = sign & 1;
+ sign = sign >> 1;
+
+ if ( i != 0 ) {
+ cod[pos[j]] = 8191;
+ }
+ else {
+ cod[pos[j]] = -8192;
+ }
+ }
+ return;
+}
+
+
+/*
+ * decode_8i40_31bits
+ *
+ *
+ * Parameters:
+ * index I: index of 8 pulses (sign+position)
+ * cod O: algebraic (fixed) codebook excitation
+ *
+ * Function:
+ * Algebraic codebook decoder
+ *
+ * Returns:
+ * void
+ */
+static void decode_8i40_31bits( Word16 index[], Word32 cod[] )
+{
+ Word32 linear_codewords[8];
+ Word32 i, j, pos1, pos2, sign;
+
+
+ memset( cod, 0, L_CODE <<2 );
+ decompress_codewords( &index[NB_TRACK_MR102], linear_codewords );
+
+ /* decode the positions and signs of pulses and build the codeword */
+ for ( j = 0; j < NB_TRACK_MR102; j++ ) {
+ /* compute index i */
+ i = linear_codewords[j];
+ i <<= 2;
+
+ /* position of pulse "j" */
+ pos1 = i + j;
+
+ if ( index[j] == 0 ) {
+ sign = POS_CODE; /* +1.0 */
+ }
+ else {
+ sign = -NEG_CODE; /* -1.0 */
+ }
+
+ /* compute index i */
+ i = linear_codewords[j + 4];
+ i = i << 2;
+
+ /* position of pulse "j+4" */
+ pos2 = i + j;
+ cod[pos1] = sign;
+
+ if ( pos2 < pos1 ) {
+ sign = -( sign );
+ }
+ cod[pos2] = cod[pos2] + sign;
+ }
+ return;
+}
+
+
+/*
+ * decode_10i40_35bits
+ *
+ *
+ * Parameters:
+ * index I: index of 10 pulses (sign+position)
+ * cod O: algebraic (fixed) codebook excitation
+ *
+ * Function:
+ * Algebraic codebook decoder
+ *
+ * Returns:
+ * void
+ */
+static void decode_10i40_35bits( Word16 index[], Word32 cod[] )
+{
+ Word32 i, j, pos1, pos2, sign, tmp;
+
+
+ memset( cod, 0, L_CODE <<2 );
+
+ /* decode the positions and signs of pulses and build the codeword */
+ for ( j = 0; j < 5; j++ ) {
+ /* compute index i */
+ tmp = index[j];
+ i = tmp & 7;
+ i = dgray[i];
+ i = ( i * 5 );
+
+ /* position of pulse "j" */
+ pos1 = ( i + j );
+ i = ( tmp >> 3 ) & 1;
+
+ if ( i == 0 ) {
+ sign = 4096; /* +1.0 */
+ }
+ else {
+ sign = -4096; /* -1.0 */
+ }
+
+ /* compute index i */
+ i = index[j + 5] & 7;
+ i = dgray[i];
+ i = i * 5;
+
+ /* position of pulse "j+5" */
+ pos2 = ( i + j );
+ cod[pos1] = sign;
+
+ if ( pos2 < pos1 ) {
+ sign = -( sign );
+ }
+ cod[pos2] = cod[pos2] + sign;
+ }
+ return;
+}
+
+
+/*
+ * gmed_n
+ *
+ *
+ * Parameters:
+ * ind I: values
+ * n I: The number of gains (odd)
+ *
+ * Function:
+ * Calculates N-point median.
+ *
+ * Returns:
+ * index of the median value
+ */
+static Word32 gmed_n( Word32 ind[], Word32 n )
+{
+ Word32 tmp[NMAX], tmp2[NMAX];
+ Word32 max, medianIndex, i, j, ix = 0;
+
+
+ for ( i = 0; i < n; i++ ) {
+ tmp2[i] = ind[i];
+ }
+
+ for ( i = 0; i < n; i++ ) {
+ max = -32767;
+
+ for ( j = 0; j < n; j++ ) {
+ if ( tmp2[j] >= max ) {
+ max = tmp2[j];
+ ix = j;
+ }
+ }
+ tmp2[ix] = -32768;
+ tmp[i] = ix;
+ }
+ medianIndex = tmp[( n >>1 )];
+ return( ind[medianIndex] );
+}
+
+
+/*
+ * ec_gain_pitch
+ *
+ *
+ * Parameters:
+ * st->pbuf I: last five gains
+ * st->past_gain_pit I: past gain
+ * state I: state of the state machine
+ * gain_pitch O: pitch gain
+ *
+ * Function:
+ * Calculates pitch from previous values.
+ *
+ * Returns:
+ * void
+ */
+static void ec_gain_pitch( ec_gain_pitchState *st, Word16 state, Word32 *
+ gain_pitch )
+{
+ Word32 tmp;
+
+
+ /* calculate median of last five gains */
+ tmp = gmed_n( st->pbuf, 5 );
+
+ /* new gain = minimum(median, past_gain) * pdown[state] */
+ if ( tmp > st->past_gain_pit ) {
+ tmp = st->past_gain_pit;
+ }
+ *gain_pitch = ( tmp * pdown[state] ) >> 15;
+}
+
+
+/*
+ * d_gain_pitch
+ *
+ *
+ * Parameters:
+ * mode I: AMR mode
+ * index I: index of quantization
+ *
+ * Function:
+ * Decodes the pitch gain using the received index
+ *
+ * Returns:
+ * gain
+ */
+static Word32 d_gain_pitch( enum Mode mode, Word32 index )
+{
+ Word32 gain;
+
+
+ if ( mode == MR122 ) {
+ /* clear 2 LSBits */
+ gain = ( qua_gain_pitch[index] >> 2 ) << 2;
+ }
+ else {
+ gain = qua_gain_pitch[index];
+ }
+ return gain;
+}
+
+
+/*
+ * ec_gain_pitch_update
+ *
+ *
+ * Parameters:
+ * st->prev_gp B: previous pitch gain
+ * st->past_gain_pit O: past gain
+ * st->pbuf B: past gain buffer
+ * bfi I: bad frame indicator
+ * prev_bf I: previous frame was bad
+ * gain_pitch B: pitch gain
+ *
+ * Function:
+ * Update the pitch gain concealment state
+ * Limit gain_pitch if the previous frame was bad
+ *
+ * Returns:
+ * gain
+ */
+static void ec_gain_pitch_update( ec_gain_pitchState *st, Word32 bfi,
+ Word32 prev_bf, Word32 *gain_pitch )
+{
+ if ( bfi == 0 ) {
+ if ( prev_bf != 0 ) {
+ if ( *gain_pitch > st->prev_gp ) {
+ *gain_pitch = st->prev_gp;
+ }
+ }
+ st->prev_gp = *gain_pitch;
+ }
+ st->past_gain_pit = *gain_pitch;
+
+ /* if (st->past_gain_pit > 1.0) */
+ if ( st->past_gain_pit > 16384 ) {
+ st->past_gain_pit = 16384;
+ }
+ st->pbuf[0] = st->pbuf[1];
+ st->pbuf[1] = st->pbuf[2];
+ st->pbuf[2] = st->pbuf[3];
+ st->pbuf[3] = st->pbuf[4];
+ st->pbuf[4] = st->past_gain_pit;
+}
+
+
+/*
+ * gc_pred (366)
+ *
+ *
+ * Parameters:
+ * st->past_qua_en I: MA predictor
+ * st->past_qua_en_MR122 I: MA predictor MR122
+ * mode I: AMR mode
+ * code I: innovative codebook vector
+ * exp_gcode0 O: predicted gain factor (exponent)
+ * frac_gcode0 O: predicted gain factor (fraction)
+ * exp_en I: innovation energy (MR795) (exponent)
+ * frac_en I: innovation energy (MR795) (fraction)
+ *
+ * Function:
+ * MA prediction of the innovation energy
+ *
+ * Mean removed innovation energy (dB) in subframe n
+ * N-1
+ * E(n) = 10*log(gc*gc * SUM[(code(i) * code(i)]/N) - EMean
+ * i=0
+ * N=40
+ *
+ * Mean innovation energy (dB)
+ * N-1
+ * Ei(n) = 10*log(SUM[(code(i) * code(i)]/N)
+ * i=0
+ *
+ * Predicted energy
+ * 4
+ * Ep(n) = SUM[b(i) * R(n-i)]
+ * i=1
+ * b = [0.68 0.58 0.34 0.19]
+ * R(k) is quantified prediction error at subframe k
+ *
+ * E_Mean = 36 dB (MR122)
+ *
+ * Predicted gain gc is found by
+ *
+ * gc = POW[10, 0.05 * (Ep(n) + EMean - Ei)]
+ *
+ * Returns:
+ * void
+ */
+static void gc_pred( gc_predState *st, enum Mode mode, Word32 *code, Word32 *
+ exp_gcode0, Word32 *frac_gcode0, Word32 *exp_en, Word32 *frac_en )
+{
+ Word32 exp, frac, ener_code = 0, i = 0;
+
+
+ /* energy of code:
+ * ener_code = sum(code[i]^2)
+ */
+ while ( i < L_SUBFR ) {
+ ener_code += code[i] * code[i];
+ i++;
+ }
+
+ if ( ( 0x3fffffff <= ener_code ) | ( ener_code < 0 ) )
+ ener_code = MAX_32;
+ else
+ ener_code <<= 1;
+
+ if ( mode == MR122 ) {
+ Word32 ener;
+
+
+ /* ener_code = ener_code / lcode; lcode = 40; 1/40 = 26214 Q20 */
+ ener_code = ( ( ener_code + 0x00008000L ) >> 16 ) * 52428;
+
+ /* Q9 * Q20 -> Q30 */
+ /* energy of code:
+ * ener_code(Q17) = 10 * Log10(energy) / constant
+ * = 1/2 * Log2(energy)
+ * constant = 20*Log10(2)
+ */
+ /* ener_code = 1/2 * Log2(ener_code); Note: Log2=log2+30 */
+ Log2( ener_code, &exp, &frac );
+ ener_code = ( ( exp - 30 ) << 16 ) + ( frac << 1 );
+
+ /* Q16 for log(), ->Q17 for 1/2 log() */
+ /*
+ * predicted energy:
+ * ener(Q24) = (Emean + sum{pred[i]*pastEn[i]})/constant
+ * = MEAN_ENER + sum(pred[i]*past_qua_en[i])
+ * constant = 20*Log10(2)
+ */
+ ener = 0;
+ i = 0;
+
+ while ( i < 4 ) {
+ ener += st->past_qua_en_MR122[i] * pred_MR122[i];
+ i++;
+ }
+ ener <<= 1;
+ ener += MEAN_ENER_MR122;
+
+ /*
+ * predicted codebook gain
+
+ * gc0 = Pow10( (ener*constant - ener_code*constant) / 20 )
+ * = Pow2(ener-ener_code)
+ * = Pow2(int(d)+frac(d))
+ */
+ ener = ( ener - ener_code ) >> 1; /* Q16 */
+ *exp_gcode0 = ener >> 16;
+ *frac_gcode0 = ( ener >> 1 ) - ( *exp_gcode0 << 15 );
+ }
+
+ /* all modes except 12.2 */
+ else {
+ Word32 tmp, gcode0;
+ int exp_code;
+
+
+ /*
+ * Compute: meansEner - 10log10(ener_code/ LSufr)
+ */
+ exp_code=0;
+ if (ener_code != 0){
+ while (!(ener_code & 0x40000000))
+ {
+ exp_code++;
+ ener_code = ener_code << 1;
+ }
+ }
+
+ /* Log2 = log2 + 27 */
+ Log2_norm( ener_code, exp_code, &exp, &frac );
+
+ /* fact = 10/log2(10) = 3.01 = 24660 Q13 */
+ /* Q0.Q15 * Q13 -> Q14 */
+ tmp = ( exp * ( -49320 ) ) + ( ( ( frac * ( -24660 ) ) >> 15 ) << 1 );
+
+ /*
+ * tmp = meansEner - 10log10(ener_code/L_SUBFR)
+ * = meansEner - 10log10(ener_code) + 10log10(L_SUBFR)
+ * = K - fact * Log2(ener_code)
+ * = K - fact * log2(ener_code) - fact*27
+ *
+ * ==> K = meansEner + fact*27 + 10log10(L_SUBFR)
+ *
+ * meansEner = 33 = 540672 Q14 (MR475, MR515, MR59)
+ * meansEner = 28.75 = 471040 Q14 (MR67)
+ * meansEner = 30 = 491520 Q14 (MR74)
+ * meansEner = 36 = 589824 Q14 (MR795)
+ * meansEner = 33 = 540672 Q14 (MR102)
+ * 10log10(L_SUBFR) = 16.02 = 262481.51 Q14
+ * fact * 27 = 1331640 Q14
+ * -----------------------------------------
+ * (MR475, MR515, MR59) K = 2134793.51 Q14 ~= 16678 * 64 * 2
+ * (MR67) K = 2065161.51 Q14 ~= 32268 * 32 * 2
+ * (MR74) K = 2085641.51 Q14 ~= 32588 * 32 * 2
+ * (MR795) K = 2183945.51 Q14 ~= 17062 * 64 * 2
+ * (MR102) K = 2134793.51 Q14 ~= 16678 * 64 * 2
+ */
+ if ( mode == MR102 ) {
+ /* mean = 33 dB */
+ tmp += 2134784; /* Q14 */
+ }
+ else if ( mode == MR795 ) {
+ /* mean = 36 dB */
+ tmp += 2183936; /* Q14 */
+
+ /*
+ * ener_code = <xn xn> * 2^27*2^exp_code
+ * frac_en = ener_code / 2^16
+ * = <xn xn> * 2^11*2^exp_code
+ * <xn xn> = <xn xn>*2^11*2^exp * 2^exp_en
+ * := frac_en * 2^exp_en
+ *
+ * ==> exp_en = -11-exp_code;
+ */
+ *frac_en = ener_code >> 16;
+ *exp_en = -11 - exp_code;
+ }
+ else if ( mode == MR74 ) {
+ /* mean = 30 dB */
+ tmp += 2085632; /* Q14 */
+ }
+ else if ( mode == MR67 ) {
+ /* mean = 28.75 dB */
+ tmp += 2065152; /* Q14 */
+ }
+ else /* MR59, MR515, MR475 */ {
+ /* mean = 33 dB */
+ tmp += 2134784; /* Q14 */
+ }
+
+ /*
+ * Compute gcode0
+ * = Sum(i=0,3) pred[i]*past_qua_en[i] - ener_code + meanEner
+ */
+ tmp = tmp << 9; /* Q23 */
+
+ /* Q13 * Q10 -> Q23 */
+ i = 0;
+
+ while ( i < 4 ) {
+ tmp += pred[i] * st->past_qua_en[i];
+ i++;
+ }
+ gcode0 = tmp >> 15; /* Q8 */
+
+ /*
+ * gcode0 = pow(10.0, gcode0/20)
+ * = pow(2, 3.3219*gcode0/20)
+ * = pow(2, 0.166*gcode0)
+ */
+ /* 5439 Q15 = 0.165985 */
+ /* (correct: 1/(20*log10(2)) 0.166096 = 5443 Q15) */
+ /* For IS641 bitexactness */
+ if ( mode == MR74 ) {
+ /* Q8 * Q15 -> Q24 */
+ tmp = gcode0 * 10878;
+ }
+ else {
+ /* Q8 * Q15 -> Q24 */
+ tmp = gcode0 * 10886;
+ }
+ tmp = tmp >> 9; /* -> Q15 */
+
+ /* -> Q0.Q15 */
+ *exp_gcode0 = tmp >> 15;
+ *frac_gcode0 = tmp - ( *exp_gcode0 * 32768 );
+ }
+}
+
+
+/*
+ * gc_pred_update
+ *
+ *
+ * Parameters:
+ * st->past_qua_en B: MA predictor
+ * st->past_qua_en_MR122 B: MA predictor MR122
+ * qua_ener_MR122 I: quantized energy for update (log2(quaErr))
+ * qua_ener I: quantized energy for update (20*log10(quaErr))
+ *
+ * Function:
+ * Update MA predictor with last quantized energy
+ *
+ * Returns:
+ * void
+ */
+static void gc_pred_update( gc_predState *st, Word32 qua_ener_MR122,
+ Word32 qua_ener )
+{
+ Word32 i;
+
+
+ for ( i = 3; i > 0; i-- ) {
+ st->past_qua_en[i] = st->past_qua_en[i - 1];
+ st->past_qua_en_MR122[i] = st->past_qua_en_MR122[i - 1];
+ }
+ st->past_qua_en_MR122[0] = qua_ener_MR122; /* log2 (quaErr), Q10 */
+ st->past_qua_en[0] = qua_ener; /* 20*log10(quaErr), Q10 */
+}
+
+
+/*
+ * Dec_gain
+ *
+ *
+ * Parameters:
+ * pred_state->past_qua_en B: MA predictor
+ * pred_state->past_qua_en_MR122 B: MA predictor MR122
+ * mode I: AMR mode
+ * index I: index of quantization
+ * code I: Innovative vector
+ * evenSubfr I: Flag for even subframes
+ * gain_pit O: Pitch gain
+ * gain_cod O: Code gain
+ *
+ * Function:
+ * Decode the pitch and codebook gains
+ *
+ * Returns:
+ * void
+ */
+static void Dec_gain( gc_predState *pred_state, enum Mode mode, Word32 index,
+ Word32 code[], Word32 evenSubfr, Word32 *gain_pit, Word32 *gain_cod )
+{
+ Word32 frac, gcode0, exp, qua_ener, qua_ener_MR122, g_code, tmp;
+ const Word32 *p;
+
+
+ /* Read the quantized gains (table depends on mode) */
+ index = index << 2;
+
+ if ( ( mode == MR102 ) || ( mode == MR74 ) || ( mode == MR67 ) ) {
+ p = &table_gain_highrates[index];
+ *gain_pit = *p++;
+ g_code = *p++;
+ qua_ener_MR122 = *p++;
+ qua_ener = *p;
+ }
+ else {
+ if ( mode == MR475 ) {
+ index = index + ( ( 1 - evenSubfr ) << 1 );
+ p = &table_gain_MR475[index];
+ *gain_pit = *p++;
+ g_code = *p++;
+
+ /*
+ * calculate predictor update values (not stored in 4.75
+ * quantizer table to save space):
+ * qua_ener = log2(g)
+ * qua_ener_MR122 = 20*log10(g)
+ */
+ /* Log2(x Q12) = log2(x) + 12 */
+ Log2( g_code, &exp, &frac );
+ exp = exp - 12;
+ tmp = frac >> 5;
+
+ if ( ( frac & ( ( Word16 )1 << 4 ) ) != 0 ) {
+ tmp++;
+ }
+ qua_ener_MR122 = tmp + ( exp << 10 );
+
+ /* 24660 Q12 ~= 6.0206 = 20*log10(2) */
+ tmp = exp * 49320;
+ tmp += ( ( ( frac * 24660 ) >> 15 ) << 1 );
+
+ /* Q12 * Q0 = Q13 -> Q10 */
+ qua_ener = ( ( tmp << 13 ) + 0x00008000L ) >> 16;
+ }
+ else {
+ p = &table_gain_lowrates[index];
+ *gain_pit = *p++;
+ g_code = *p++;
+ qua_ener_MR122 = *p++;
+ qua_ener = *p;
+ }
+ }
+
+ /*
+ * predict codebook gain
+ * gc0 = Pow2(int(d)+frac(d))
+ * = 2^exp + 2^frac
+ * gcode0 (Q14) = 2^14*2^frac = gc0 * 2^(14-exp)
+ */
+ gc_pred( pred_state, mode, code, &exp, &frac, NULL, NULL );
+ gcode0 = Pow2( 14, frac );
+
+ /*
+ * read quantized gains, update table of past quantized energies
+ * st->past_qua_en(Q10) = 20 * Log10(gFac) / constant
+ * = Log2(gFac)
+ * = qua_ener
+ * constant = 20*Log10(2)
+ */
+ if ( exp < 11 ) {
+ *gain_cod = ( g_code * gcode0 ) >> ( 25 - exp );
+ }
+ else {
+ tmp = ( ( g_code * gcode0 ) << ( exp - 9 ) );
+
+ if ( ( tmp >> ( exp - 9 ) ) != ( g_code * gcode0 ) ) {
+ *gain_cod = 0x7FFF;
+ }
+ else {
+ *gain_cod = tmp >> 16;
+ }
+ }
+
+ /* update table of past quantized energies */
+ gc_pred_update( pred_state, qua_ener_MR122, qua_ener );
+ return;
+}
+
+
+/*
+ * gc_pred_average_limited
+ *
+ *
+ * Parameters:
+ * st->past_qua_en I: MA predictor
+ * st->past_qua_en_MR122 I: MA predictor MR122
+ * ener_avg_MR122 O: everaged quantized energy (log2(quaErr))
+ * ener_avg O: averaged quantized energy (20*log10(quaErr))
+ *
+ * Function:
+ * Compute average limited quantized energy
+ * Returns:
+ * void
+ */
+static void gc_pred_average_limited( gc_predState *st, Word32 *ener_avg_MR122,
+ Word32 *ener_avg )
+{
+ Word32 av_pred_en, i;
+
+
+ /* do average in MR122 mode (log2() domain) */
+ av_pred_en = 0;
+
+ for ( i = 0; i < NPRED; i++ ) {
+ av_pred_en = ( av_pred_en + st->past_qua_en_MR122[i] );
+ }
+
+ /* av_pred_en = 0.25*av_pred_en */
+ av_pred_en = ( av_pred_en * 8192 ) >> 15;
+
+ /* if (av_pred_en < -14/(20Log10(2))) av_pred_en = .. */
+ if ( av_pred_en < MIN_ENERGY_MR122 ) {
+ av_pred_en = MIN_ENERGY_MR122;
+ }
+ *ener_avg_MR122 = ( Word16 )av_pred_en;
+
+ /* do average for other modes (20*log10() domain) */
+ av_pred_en = 0;
+
+ for ( i = 0; i < NPRED; i++ ) {
+ av_pred_en = ( av_pred_en + st->past_qua_en[i] );
+ if (av_pred_en < -32768)
+ av_pred_en = -32768;
+ else if (av_pred_en > 32767)
+ av_pred_en = 32767;
+ }
+
+ /* av_pred_en = 0.25*av_pred_en */
+ av_pred_en = ( av_pred_en * 8192 ) >> 15;
+
+ *ener_avg = av_pred_en;
+}
+
+
+/*
+ * ec_gain_code
+ *
+ *
+ * Parameters:
+ * st->gbuf I: last five gains
+ * st->past_gain_code I: past gain
+ * pred_state B: MA predictor state
+ * state I: state of the state machine
+ * gain_code O: decoded innovation gain
+ *
+ * Function:
+ * Conceal the codebook gain
+ *
+ * Returns:
+ * void
+ */
+static void ec_gain_code( ec_gain_codeState *st, gc_predState *pred_state,
+ Word16 state, Word32 *gain_code )
+{
+ Word32 tmp, qua_ener_MR122, qua_ener;
+
+
+ /* calculate median of last five gain values */
+ tmp = gmed_n( st->gbuf, 5 );
+
+ /* new gain = minimum(median, past_gain) * cdown[state] */
+ if ( tmp > st->past_gain_code ) {
+ tmp = st->past_gain_code;
+ }
+ tmp = ( tmp * cdown[state] ) >> 15;
+ *gain_code = tmp;
+
+ /*
+ * update table of past quantized energies with average of
+ * current values
+ */
+ gc_pred_average_limited( pred_state, &qua_ener_MR122, &qua_ener );
+ gc_pred_update( pred_state, qua_ener_MR122, qua_ener );
+}
+
+
+/*
+ * ec_gain_code_update
+ *
+ *
+ * Parameters:
+ * st->gbuf B: last five gains
+ * st->past_gain_code O: past gain
+ * st->prev_gc B previous gain
+ * bfi I: bad indicator
+ * prev_bf I: previous frame bad indicator
+ * gain_code O: decoded innovation gain
+ *
+ * Function:
+ * Update the codebook gain concealment state
+ *
+ * Returns:
+ * void
+ */
+static void ec_gain_code_update( ec_gain_codeState *st, Word16 bfi,
+ Word16 prev_bf, Word32 *gain_code )
+{
+ /* limit gain_code by previous good gain if previous frame was bad */
+ if ( bfi == 0 ) {
+ if ( prev_bf != 0 ) {
+ if ( *gain_code > st->prev_gc ) {
+ *gain_code = st->prev_gc;
+ }
+ }
+ st->prev_gc = *gain_code;
+ }
+
+ /* update EC states: previous gain, gain buffer */
+ st->past_gain_code = *gain_code;
+ st->gbuf[0] = st->gbuf[1];
+ st->gbuf[1] = st->gbuf[2];
+ st->gbuf[2] = st->gbuf[3];
+ st->gbuf[3] = st->gbuf[4];
+ st->gbuf[4] = *gain_code;
+ return;
+}
+
+
+/*
+ * d_gain_code
+ *
+ *
+ * Parameters:
+ * pred_state B: MA predictor state
+ * mode I: AMR mode (MR795 or MR122)
+ * index I: received quantization index
+ * code I: innovation codevector
+ * gain_code O: decoded innovation gain
+ *
+ * Function:
+ * Decode the fixed codebook gain using the received index
+ *
+ * Returns:
+ * void
+ */
+static void d_gain_code( gc_predState *pred_state, enum Mode mode, Word32 index,
+ Word32 code[], Word32 *gain_code )
+{
+ Word32 g_code0, exp, frac, qua_ener_MR122, qua_ener;
+ Word32 exp_inn_en, frac_inn_en, tmp, tmp2, i;
+ const Word32 *p;
+
+
+ /*
+ * Decode codebook gain
+ */
+ gc_pred( pred_state, mode, code, &exp, &frac, &exp_inn_en, &frac_inn_en );
+ p = &qua_gain_code[( ( index + index )+ index )];
+
+ /* Different scalings between MR122 and the other modes */
+ if ( mode == MR122 ) {
+ /* predicted gain */
+ g_code0 = Pow2( exp, frac );
+
+ if ( g_code0 <= 2047 )
+ g_code0 = g_code0 << 4;
+ else
+ g_code0 = 32767;
+ *gain_code = ( ( g_code0 * *p++ ) >> 15 ) << 1;
+ if (*gain_code & 0xFFFF8000)
+ *gain_code = 32767;
+
+ }
+ else {
+ g_code0 = Pow2( 14, frac );
+ tmp = ( *p++ * g_code0 ) << 1;
+ exp = 9 - exp;
+
+ if ( exp > 0 ) {
+ tmp = tmp >> exp;
+ }
+ else {
+ for (i = exp; i < 0; i++) {
+ tmp2 = tmp << 1;
+ if ((tmp ^ tmp2) & 0x80000000) {
+ tmp = (tmp & 0x80000000) ? 0x80000000 : 0x7FFFFFFF;
+ break;
+ }
+ else {
+ tmp = tmp2;
+ }
+ }
+ }
+ *gain_code = tmp >> 16;
+ if (*gain_code & 0xFFFF8000)
+ *gain_code = 32767;
+ }
+
+ /*
+ * update table of past quantized energies
+ */
+ qua_ener_MR122 = *p++;
+ qua_ener = *p++;
+ gc_pred_update( pred_state, qua_ener_MR122, qua_ener );
+ return;
+}
+
+
+/*
+ * Int_lsf
+ *
+ *
+ * Parameters:
+ * lsf_old I: LSF vector at the 4th subframe of past frame
+ * lsf_new I: LSF vector at the 4th subframe of present frame
+ * i_subfr I: current subframe
+ * lsf_out O: interpolated LSF parameters for current subframe
+ *
+ * Function:
+ * Interpolates the LSFs for selected subframe
+ *
+ * The LSFs are interpolated at the 1st, 2nd and 3rd
+ * ubframe and only forwarded at the 4th subframe.
+ *
+ * sf1: 3/4 F0 + 1/4 F1
+ * sf2: 1/2 F0 + 1/2 F1
+ * sf3: 1/4 F0 + 3/4 F1
+ * sf4: F1
+ *
+ * Returns:
+ * void
+ */
+static void Int_lsf( Word32 lsf_old[], Word32 lsf_new[], int i_subfr, Word32
+ lsf_out[] )
+{
+ Word32 i;
+
+
+ switch ( i_subfr ) {
+ case 0:
+ for ( i = 0; i < 10; i++ ) {
+ lsf_out[i] = lsf_old[i] - ( lsf_old[i] >> 2 ) + ( lsf_new[i] >> 2 );
+ }
+ break;
+
+ case 40:
+ for ( i = 0; i < 10; i++ ) {
+ lsf_out[i] = ( lsf_old[i] >> 1 ) + ( lsf_new[i] >> 1 );
+ }
+ break;
+
+ case 80:
+ for ( i = 0; i < 10; i++ ) {
+ lsf_out[i] = ( lsf_old[i] >> 2 ) - ( lsf_new[i] >> 2 ) +
+ lsf_new[i];
+ }
+ break;
+
+ case 120:
+ memcpy( lsf_out, lsf_new, M <<2 );
+ break;
+ }
+}
+
+
+/*
+ * Cb_gain_average
+ *
+ *
+ * Parameters:
+ * st->cbGainHistory B: codebook gain history
+ * st->hangCount B: hangover counter
+ * mode I: AMR mode
+ * gain_code I: codebook gain
+ * lsp I: The LSP for the current frame
+ * lspAver I: The average of LSP for 8 frames
+ * bfi I: bad frame indication
+ * prev_bf I: previous bad frame indication
+ * pdfi I: potential degraded bad frame indication
+ * prev_pdf I: previous potential degraded bad frame indication
+ * inBackgroundNoise I: background noise decision
+ * voicedHangover I: number of frames after last voiced frame
+ *
+ * Function:
+ * The mixed codebook gain, used to make codebook gain more smooth in background
+ *
+ *
+ * Returns:
+ * void
+ */
+static Word32 Cb_gain_average( Cb_gain_averageState *st, enum Mode mode, Word32
+ gain_code, Word32 lsp[], Word32 lspAver[], Word16 bfi, Word16 prev_bf,
+ Word16 pdfi, Word16 prev_pdf, Word32 inBackgroundNoise, Word32
+ voicedHangover )
+{
+ Word32 tmp[M];
+ Word32 i, cbGainMix, tmp_diff, bgMix, cbGainMean, sum, diff, tmp1, tmp2;
+ int shift1, shift2, shift;
+
+
+ /* set correct cbGainMix for MR74, MR795, MR122 */
+ cbGainMix = gain_code;
+
+ /*
+ * Store list of CB gain needed in the CB gain averaging *
+ */
+ st->cbGainHistory[0] = st->cbGainHistory[1];
+ st->cbGainHistory[1] = st->cbGainHistory[2];
+ st->cbGainHistory[2] = st->cbGainHistory[3];
+ st->cbGainHistory[3] = st->cbGainHistory[4];
+ st->cbGainHistory[4] = st->cbGainHistory[5];
+ st->cbGainHistory[5] = st->cbGainHistory[6];
+ st->cbGainHistory[6] = gain_code;
+
+ /* compute lsp difference */
+ for ( i = 0; i < M; i++ ) {
+ tmp1 = labs( lspAver[i]- lsp[i] );
+ shift1 = 0;
+ if (tmp1 != 0){
+ while (!(tmp1 & 0x2000))
+ {
+ shift1++;
+ tmp1 = tmp1 << 1;
+ }
+ }
+ tmp2 = lspAver[i];
+ shift2 = 0;
+ if (tmp2 != 0){
+ while (!(tmp2 & 0x4000))
+ {
+ shift2++;
+ tmp2 = tmp2 << 1;
+ }
+ }
+ tmp[i] = ( tmp1 << 15 ) / tmp2;
+ shift = 2 + shift1 - shift2;
+
+ if ( shift >= 0 ) {
+ tmp[i] = tmp[i] >> shift;
+ }
+ else {
+ tmp[i] = tmp[i] << -( shift );
+ }
+ }
+ diff = *tmp + tmp[1] + tmp[2] + tmp[3] + tmp[4] + tmp[5] + tmp[6] + tmp[7] +
+ tmp[8] + tmp[9];
+
+ /* saturate */
+ if ( diff > 32767 ) {
+ diff = 32767;
+ }
+
+ /* Compute hangover */
+ st->hangVar += 1;
+
+ if ( diff <= 5325 ) {
+ st->hangVar = 0;
+ }
+
+ if ( st->hangVar > 10 ) {
+ /* Speech period, reset hangover variable */
+ st->hangCount = 0;
+ }
+
+ /* Compute mix constant (bgMix) */
+ bgMix = 8192;
+
+ /* MR475, MR515, MR59, MR67, MR102 */
+ if ( ( mode <= MR67 ) | ( mode == MR102 ) ) {
+ /* disable mix if too short time since */
+ if ( ( st->hangCount >= 40 ) & ( diff <= 5325 ) ) /* 0.65 in Q13 */ {
+ /* if errors and presumed noise make smoothing probability stronger */
+ if ( ( ( ( ( pdfi != 0 ) & ( prev_pdf != 0 ) ) | ( bfi != 0 ) | (
+ prev_bf != 0 ) ) & ( ( voicedHangover > 1 ) ) & (
+ inBackgroundNoise != 0 ) & ( mode < MR67 ) ) ) {
+ /* bgMix = min(0.25, max(0.0, diff-0.55)) / 0.25; */
+ tmp_diff = diff - 4506; /* 0.55 in Q13 */
+
+ /* max(0.0, diff-0.55) */
+ tmp1 = 0;
+
+ if ( tmp_diff > 0 ) {
+ tmp1 = tmp_diff;
+ }
+
+ /* min(0.25, tmp1) */
+ if ( 2048 >= tmp1 ) {
+ bgMix = tmp1 << 2;
+ }
+ }
+ else {
+ /* bgMix = min(0.25, max(0.0, diff-0.40)) / 0.25; */
+ tmp_diff = diff - 3277; /* 0.4 in Q13 */
+
+ /* max(0.0, diff-0.40) */
+ tmp1 = 0;
+
+ if ( tmp_diff > 0 ) {
+ tmp1 = tmp_diff;
+ }
+
+ /* min(0.25, tmp1) */
+ if ( 2048 >= tmp1 ) {
+ bgMix = tmp1 << 2;
+ }
+ }
+ }
+
+ /*
+ * Smoothen the cb gain trajectory
+ * smoothing depends on mix constant bgMix
+ */
+ sum = st->cbGainHistory[2] + st->cbGainHistory[3] + st->cbGainHistory[4] +
+ st->cbGainHistory[5] + st->cbGainHistory[6];
+
+ if ( sum > 163822 ) {
+ cbGainMean = 32767;
+ }
+ else {
+ cbGainMean = ( 3277 * sum + 0x00002000L ) >> 14; /* Q1 */
+ }
+
+ /* more smoothing in error and bg noise (NB no DFI used here) */
+ if ( ( ( bfi != 0 ) | ( prev_bf != 0 ) ) & ( inBackgroundNoise != 0 ) & (
+ mode < MR67 ) ) {
+ sum = 9362 * ( st->cbGainHistory[0] + st->cbGainHistory[1] + st->
+ cbGainHistory[2] + st->cbGainHistory[3] + st->cbGainHistory[4] +
+ st->cbGainHistory[5] + st->cbGainHistory[6] );
+ cbGainMean = ( sum + 0x00008000L ) >> 16; /* Q1 */
+ }
+
+ /* cbGainMix = bgMix*cbGainMix + (1-bgMix)*cbGainMean; */
+ sum = bgMix * cbGainMix; /* sum in Q14 */
+ sum += cbGainMean << 13;
+ sum -= bgMix * cbGainMean;
+ cbGainMix = ( sum + 0x00001000L ) >> 13;
+
+ /* Q1 */
+ }
+ st->hangCount += 1;
+ if (st->hangCount & 0x80000000)
+ st->hangCount = 40;
+ return cbGainMix;
+}
+
+
+/*
+ * ph_disp
+ *
+ *
+ * Parameters:
+ * state->gainMem B: LTP gain memory
+ * state->prevCbGain B: Codebook gain memory
+ * mode I: AMR mode
+ * x B: LTP excitation signal -> total excitation signal
+ * cbGain I: Codebook gain
+ * ltpGain I: LTP gain
+ * inno B: Innovation vector
+ * pitch_fac I: pitch factor used to scale the LTP excitation
+ * tmp_shift I: shift factor applied to sum of scaled LTP ex & innov.
+ * before rounding
+ *
+ * Function:
+ * Adaptive phase dispersion; forming of total excitation
+ *
+ *
+ * Returns:
+ * void
+ */
+static void ph_disp( ph_dispState *state, enum Mode mode, Word32 x[],
+ Word32 cbGain, Word32 ltpGain, Word32 inno[],
+ Word32 pitch_fac, Word32 tmp_shift)
+{
+ Word32 inno_sav[L_SUBFR], ps_poss[L_SUBFR];
+ Word32 i, i1, impNr, temp1, temp2, j, nze, nPulse, ppos;
+ const Word32 *ph_imp; /* Pointer to phase dispersion filter */
+
+
+ /* Update LTP gain memory */
+ state->gainMem[4] = state->gainMem[3];
+ state->gainMem[3] = state->gainMem[2];
+ state->gainMem[2] = state->gainMem[1];
+ state->gainMem[1] = state->gainMem[0];
+ state->gainMem[0] = ltpGain;
+
+ /* basic adaption of phase dispersion */
+ /* no dispersion */
+ impNr = 2;
+
+ /* if (ltpGain < 0.9) */
+ if ( ltpGain < PHDTHR2LTP ) {
+ /* maximum dispersion */
+ impNr = 0;
+
+ /* if (ltpGain > 0.6 */
+ if ( ltpGain > PHDTHR1LTP ) {
+ /* medium dispersion */
+ impNr = 1;
+ }
+ }
+
+ /* onset indicator */
+ /* onset = (cbGain > onFact * cbGainMem[0]) */
+ temp1 = ( ( state->prevCbGain * ONFACTPLUS1 ) + 0x1000 ) >> 13;
+
+ if ( cbGain > temp1 ) {
+ state->onset = ONLENGTH;
+ }
+ else {
+ if ( state->onset > 0 ) {
+ state->onset--;
+ }
+ }
+
+ /*
+ * if not onset, check ltpGain buffer and use max phase dispersion if
+ * half or more of the ltpGain-parameters say so
+ */
+ if ( state->onset == 0 ) {
+ /* Check LTP gain memory and set filter accordingly */
+ i1 = 0;
+
+ for ( i = 0; i < PHDGAINMEMSIZE; i++ ) {
+ if ( state->gainMem[i] < PHDTHR1LTP ) {
+ i1++;
+ }
+ }
+
+ if ( i1 > 2 ) {
+ impNr = 0;
+ }
+ }
+
+ /* Restrict decrease in phase dispersion to one step if not onset */
+ if ( ( impNr > ( state->prevState + 1 ) ) & ( state->onset == 0 ) ) {
+ impNr--;
+ }
+
+ /* if onset, use one step less phase dispersion */
+ if ( ( impNr<2 )&( state->onset>0 ) ) {
+ impNr++;
+ }
+
+ /* disable for very low levels */
+ if ( cbGain < 10 ) {
+ impNr = 2;
+ }
+
+ if ( state->lockFull == 1 ) {
+ impNr = 0;
+ }
+
+ /* update static memory */
+ state->prevState = impNr;
+ state->prevCbGain = cbGain;
+
+ /*
+ * do phase dispersion for all modes but 12.2 and 7.4;
+ * don't modify the innovation if impNr >=2 (= no phase disp)
+ */
+ if ( ( mode != MR122 ) & ( mode != MR102 ) & ( mode != MR74 ) & ( impNr < 2 )
+ ) {
+ /*
+ * track pulse positions, save innovation,
+ * and initialize new innovation
+ */
+ nze = 0;
+
+ for ( i = 0; i < L_SUBFR; i++ ) {
+ if ( inno[i] != 0 ) {
+ ps_poss[nze] = i;
+ nze++;
+ }
+ }
+ memcpy( inno_sav, inno, L_SUBFR <<2 );
+ memset( inno, 0, L_SUBFR <<2 );
+
+ /* Choose filter corresponding to codec mode and dispersion criterium */
+ ph_imp = ph_imp_mid;
+
+ if ( impNr == 0 ) {
+ ph_imp = ph_imp_low;
+ }
+
+ if ( mode == MR795 ) {
+ ph_imp = ph_imp_mid_MR795;
+
+ if ( impNr == 0 ) {
+ ph_imp = ph_imp_low_MR795;
+ }
+ }
+
+ /* Do phase dispersion of innovation */
+ for ( nPulse = 0; nPulse < nze; nPulse++ ) {
+ ppos = ps_poss[nPulse];
+
+ /* circular convolution with impulse response */
+ j = 0;
+
+ for ( i = ppos; i < L_SUBFR; i++ ) {
+ /* inno[i1] += inno_sav[ppos] * ph_imp[i1-ppos] */
+ temp1 = ( inno_sav[ppos] * ph_imp[j++] ) >> 15;
+ inno[i] = inno[i] + temp1;
+ }
+
+ for ( i = 0; i < ppos; i++ ) {
+ /* inno[i] += inno_sav[ppos] * ph_imp[L_SUBFR-ppos+i] */
+ temp1 = ( inno_sav[ppos] * ph_imp[j++] ) >> 15;
+ inno[i] = inno[i] + temp1;
+ }
+ }
+ }
+
+ /*
+ * compute total excitation for synthesis part of decoder
+ * (using modified innovation if phase dispersion is active)
+ */
+ for ( i = 0; i < L_SUBFR; i++ ) {
+ /* x[i] = gain_pit*x[i] + cbGain*code[i]; */
+ temp1 = x[i] * pitch_fac + inno[i] * cbGain;
+ temp2 = temp1 << tmp_shift;
+ x[i] = ( temp2 + 0x4000 ) >> 15;
+ if (labs(x[i]) > 32767)
+ {
+ if ((temp1 ^ temp2) & 0x80000000) {
+ x[i] = (temp1 & 0x80000000) ? -32768: 32767;
+ }
+ else {
+ x[i] = (temp2 & 0x80000000) ? -32768: 32767;
+ }
+ }
+ }
+ return;
+}
+
+
+/*
+ * sqrt_l_exp
+ *
+ *
+ * Parameters:
+ * x I: input value
+ * exp O: right shift to be applied to result
+ *
+ * Function:
+ * Sqrt with exponent value.
+ *
+ * y = sqrt(x)
+ * x = f * 2^-e, 0.5 <= f < 1 (normalization)
+ * y = sqrt(f) * 2^(-e/2)
+ *
+ * a) e = 2k --> y = sqrt(f) * 2^-k
+ * (k = e div 2, 0.707 <= sqrt(f) < 1)
+ * b) e = 2k+1 --> y = sqrt(f/2) * 2^-k
+ * (k = e div 2, 0.5 <= sqrt(f/2) < 0.707)
+ *
+ *
+ * Returns:
+ * y output value
+ */
+static Word32 sqrt_l_exp( Word32 x, Word32 *exp )
+{
+ Word32 y, a, i, tmp;
+ int e;
+
+
+ if ( x <= ( Word32 )0 ) {
+ *exp = 0;
+ return( Word32 )0;
+ }
+ e=0;
+ if (x != 0){
+ tmp = x;
+ while (!(tmp & 0x40000000))
+ {
+ e++;
+ tmp = tmp << 1;
+ }
+ }
+ e = e & 0xFFFE;
+ x = ( x << e );
+ *exp = ( Word16 )e;
+ x = ( x >> 9 );
+ i = ( Word16 )( x >> 16 );
+ x = ( x >> 1 );
+ a = x & ( Word16 )0x7fff;
+ i = ( i - 16 );
+ y = ( sqrt_table[i] << 16 );
+ tmp = ( sqrt_table[i] - sqrt_table[i + 1] );
+ y -= ( tmp * a ) << 1;
+ return( y );
+}
+
+
+/*
+ * Ex_ctrl
+ *
+ *
+ * Parameters:
+ * excitation B: Current subframe excitation
+ * excEnergy I: Exc. Energy, sqrt(totEx*totEx)
+ * exEnergyHist I: History of subframe energies
+ * voicedHangover I: number of frames after last voiced frame
+ * prevBFI I: Set i previous bad frame indicators
+ * carefulFlag I: Restrict dymamic in scaling
+ *
+ * Function:
+ * Charaterice synthesis speech and detect background noise
+ *
+ * Returns:
+ * background noise decision; 0 = no bgn, 1 = bgn
+ */
+static Word16 Ex_ctrl( Word32 excitation[], Word32 excEnergy, Word32
+ exEnergyHist[], Word32 voicedHangover, Word16 prevBFI, Word16 carefulFlag
+ )
+{
+ Word32 i, testEnergy, scaleFactor, avgEnergy, prevEnergy, T0;
+ int exp;
+
+
+ /* get target level */
+ avgEnergy = gmed_n( exEnergyHist, 9 );
+ prevEnergy = ( exEnergyHist[7] + exEnergyHist[8] ) >> 1;
+
+ if ( exEnergyHist[8] < prevEnergy ) {
+ prevEnergy = exEnergyHist[8];
+ }
+
+ /* upscaling to avoid too rapid energy rises for some cases */
+ if ( ( excEnergy<avgEnergy )&( excEnergy>5 ) ) {
+ /* testEnergy = 4*prevEnergy; */
+ testEnergy = prevEnergy << 2;
+
+ if ( ( voicedHangover < 7 ) || prevBFI != 0 ) {
+ /* testEnergy = 3*prevEnergy */
+ testEnergy = testEnergy - prevEnergy;
+ }
+
+ if ( avgEnergy > testEnergy ) {
+ avgEnergy = testEnergy;
+ }
+
+ /* scaleFactor=avgEnergy/excEnergy in Q0 */
+ exp=0;
+ if (excEnergy != 0){
+ while (!(excEnergy & 0x4000))
+ {
+ exp++;
+ excEnergy = excEnergy << 1;
+ }
+ }
+ excEnergy = 536838144 / excEnergy;
+ T0 = ( avgEnergy * excEnergy ) << 1;
+ T0 = ( T0 >> ( 20 - exp ) );
+
+ if ( T0 > 32767 ) {
+ /* saturate */
+ T0 = 32767;
+ }
+ scaleFactor = T0;
+
+ /* test if scaleFactor > 3.0 */
+ if ( ( carefulFlag != 0 ) & ( scaleFactor > 3072 ) ) {
+ scaleFactor = 3072;
+ }
+
+ /* scale the excitation by scaleFactor */
+ for ( i = 0; i < L_SUBFR; i++ ) {
+ T0 = ( scaleFactor * excitation[i] ) << 1;
+ T0 = ( T0 >> 11 );
+ excitation[i] = T0;
+ }
+ }
+ return 0;
+}
+
+
+/*
+ * Inv_sqrt
+ *
+ *
+ * Parameters:
+ * x I: input value
+ *
+ * Function:
+ * 1/sqrt(x)
+ *
+ * Returns:
+ * y 1/sqrt(x)
+ */
+static Word32 Inv_sqrt( Word32 x )
+{
+ int i, a, tmp, exp;
+ Word32 y;
+
+
+ if ( x <= ( Word32 )0 )
+ return( ( Word32 )0x3fffffffL );
+ exp=0;
+ while (!(x & 0x40000000))
+ {
+ exp++;
+ x = x << 1;
+ }
+
+ /* x is normalized */
+ exp = ( 30 - exp );
+
+ /* If exponent even -> shift right */
+ if ( ( exp & 1 ) == 0 ) {
+ x = ( x >> 1 );
+ }
+ exp = ( exp >> 1 );
+ exp = ( exp + 1 );
+ x = ( x >> 9 );
+
+ /* Extract b25-b31 */
+ i = ( Word16 )( x >> 16 );
+
+ /* Extract b10-b24 */
+ x = ( x >> 1 );
+ a = x & ( Word16 )0x7fff;
+ i = ( i - 16 );
+
+ /* table[i] << 16 */
+ y = inv_sqrt_table[i] << 16;
+
+ /* table[i] - table[i+1]) */
+ tmp = ( inv_sqrt_table[i] - inv_sqrt_table[i + 1] );
+
+ /* y -= tmp*a*2 */
+ y -= ( tmp * a ) << 1;
+
+ /* denormalization */
+ y = ( y >> exp );
+ return( y );
+}
+
+
+/*
+ * energy_old
+ *
+ *
+ * Parameters:
+ * in I: input value
+ *
+ * Function:
+ * Energy of signal
+ *
+ * Returns:
+ * Energy
+ */
+static Word32 energy_old( Word32 in[] )
+{
+ Word32 temp, i, sum = 0;
+
+
+ for ( i = 0; i < L_SUBFR; i += 8 ) {
+ temp = in[i] >> 2;
+ sum += temp * temp;
+ temp = in[i + 1] >> 2;
+ sum += temp * temp;
+ temp = in[i + 2] >> 2;
+ sum += temp * temp;
+ temp = in[i + 3] >> 2;
+ sum += temp * temp;
+ temp = in[i + 4] >> 2;
+ sum += temp * temp;
+ temp = in[i + 5] >> 2;
+ sum += temp * temp;
+ temp = in[i + 6] >> 2;
+ sum += temp * temp;
+ temp = in[i + 7] >> 2;
+ sum += temp * temp;
+ }
+
+ if ( sum & 0xC0000000 ) {
+ return 0x7FFFFFFF;
+ }
+ return( sum << 1 );
+}
+
+
+/*
+ * energy_new
+ *
+ *
+ * Parameters:
+ * in I: input value
+ *
+ * Function:
+ * Energy of signal
+ *
+ * Returns:
+ * Energy
+ */
+static Word32 energy_new( Word32 in[] )
+{
+ Word32 i, s = 0, overflow = 0;
+
+ s += in[0] * in[0];
+ for ( i = 1; i < L_SUBFR; i += 3 ) {
+ s += in[i] * in[i];
+ s += in[i + 1] *in[i + 1];
+ s += in[i + 2] * in[i + 2];
+
+
+ if ( s & 0xC0000000 ) {
+ overflow = 1;
+ break;
+ }
+ }
+
+ /* check for overflow */
+ if ( overflow ) {
+ s = energy_old( in );
+ }
+ else {
+ s = ( s >> 3 );
+ }
+ return s;
+}
+
+
+/*
+ * agc2
+ *
+ *
+ * Parameters:
+ * sig_in I: Post_Filter input signal
+ * sig_out B: Post_Filter output signal
+ *
+ * Function:
+ * Scales the excitation on a subframe basis
+ *
+ * Returns:
+ * Energy
+ */
+static void agc2( Word32 *sig_in, Word32 *sig_out )
+{
+ Word32 s;
+ int i, exp;
+ Word16 gain_in, gain_out, g0;
+
+
+ /* calculate gain_out with exponent */
+ s = energy_new( sig_out );
+
+ if ( s == 0 ) {
+ return;
+ }
+ exp=0;
+ while (!(s & 0x20000000))
+ {
+ exp++;
+ s = s << 1;
+ }
+
+ gain_out = ( Word16 )( ( s + 0x00008000L ) >> 16 );
+
+ /* calculate gain_in with exponent */
+ s = energy_new( sig_in );
+
+ if ( s == 0 ) {
+ g0 = 0;
+ }
+ else {
+ i = 0;
+ while (!(s & 0x40000000))
+ {
+ i++;
+ s = s << 1;
+ }
+
+ if ( s < 0x7fff7fff )
+ gain_in = ( Word16 )( ( s + 0x00008000L ) >> 16 );
+ else
+ gain_in = 32767;
+ exp = ( exp - i );
+
+ /*
+ * g0 = sqrt(gain_in/gain_out);
+ */
+ /* s = gain_out / gain_in */
+ s = ( gain_out << 15 ) / gain_in;
+ s = ( s << 7 );
+
+ if ( exp > 0 )
+ s = ( s >> exp );
+ else
+ s = ( s << ( -exp ) );
+ s = Inv_sqrt( s );
+ g0 = ( Word16 )( ( ( s << 9 ) + 0x00008000L ) >> 16 );
+ }
+
+ /* sig_out(n) = gain(n) * sig_out(n) */
+ for ( i = 0; i < L_SUBFR; i++ ) {
+ sig_out[i] = ( sig_out[i] * g0 ) >> 12;
+ }
+ return;
+}
+
+
+/*
+ * Bgn_scd
+ *
+ *
+ * Parameters:
+ * st->frameEnergyHist B: Frame Energy memory
+ * st->bgHangover B: Background hangover counter
+ * ltpGainHist I: LTP gain history
+ * speech I: synthesis speech frame
+ * voicedHangover O: number of frames after last voiced frame
+ *
+ * Function:
+ * Charaterice synthesis speech and detect background noise
+ *
+ * Returns:
+ * inbgNoise background noise decision; 0 = no bgn, 1 = bgn
+ */
+static Word16 Bgn_scd( Bgn_scdState *st, Word32 ltpGainHist[], Word32 speech[],
+ Word32 *voicedHangover )
+{
+ Word32 temp, ltpLimit, frame_energyMin, currEnergy, noiseFloor, maxEnergy,
+ maxEnergyLastPart, s, i;
+ Word16 prevVoiced, inbgNoise;
+
+
+ /*
+ * Update the inBackgroundNoise flag (valid for use in next frame if BFI)
+ * it now works as a energy detector floating on top
+ * not as good as a VAD.
+ */
+ s = 0;
+
+ for ( i = 0; i < L_FRAME; i++ ) {
+ s += speech[i] * speech[i];
+ }
+
+ if ( (s < 0xFFFFFFF) & (s >= 0) )
+ currEnergy = s >> 13;
+ else
+ currEnergy = 32767;
+ frame_energyMin = 32767;
+
+ for ( i = 0; i < L_ENERGYHIST; i++ ) {
+ if ( st->frameEnergyHist[i] < frame_energyMin )
+ frame_energyMin = st->frameEnergyHist[i];
+ }
+
+ /* Frame Energy Margin of 16 */
+ noiseFloor = frame_energyMin << 4;
+ maxEnergy = st->frameEnergyHist[0];
+
+ for ( i = 1; i < L_ENERGYHIST - 4; i++ ) {
+ if ( maxEnergy < st->frameEnergyHist[i] ) {
+ maxEnergy = st->frameEnergyHist[i];
+ }
+ }
+ maxEnergyLastPart = st->frameEnergyHist[2 * L_ENERGYHIST / 3];
+
+ for ( i = 2 * L_ENERGYHIST / 3 + 1; i < L_ENERGYHIST; i++ ) {
+ if ( maxEnergyLastPart < st->frameEnergyHist[i] ) {
+ maxEnergyLastPart = st->frameEnergyHist[i];
+ }
+ }
+
+ /* false */
+ inbgNoise = 0;
+
+ /*
+ * Do not consider silence as noise
+ * Do not consider continuous high volume as noise
+ * Or if the current noise level is very low
+ * Mark as noise if under current noise limit
+ * OR if the maximum energy is below the upper limit
+ */
+ if ( ( maxEnergy> LOWERNOISELIMIT )&( currEnergy<FRAMEENERGYLIMIT )&(
+ currEnergy>LOWERNOISELIMIT ) & ( ( currEnergy < noiseFloor ) || (
+ maxEnergyLastPart < UPPERNOISELIMIT ) ) ) {
+ if ( ( st->bgHangover + 1 ) > 30 ) {
+ st->bgHangover = 30;
+ }
+ else {
+ st->bgHangover += 1;
+ }
+ }
+ else {
+ st->bgHangover = 0;
+ }
+
+ /* make final decision about frame state, act somewhat cautiosly */
+ if ( st->bgHangover > 1 )
+ inbgNoise = 1; /* true */
+
+ for ( i = 0; i < L_ENERGYHIST - 1; i++ ) {
+ st->frameEnergyHist[i] = st->frameEnergyHist[i + 1];
+ }
+ st->frameEnergyHist[L_ENERGYHIST - 1] = currEnergy;
+
+ /*
+ * prepare for voicing decision;
+ * tighten the threshold after some time in noise
+ */
+ ltpLimit = 13926; /* 0.85 Q14 */
+
+ if ( st->bgHangover > 8 ) {
+ ltpLimit = 15565; /* 0.95 Q14 */
+ }
+
+ if ( st->bgHangover > 15 ) {
+ ltpLimit = 16383; /* 1.00 Q14 */
+ }
+
+ /* weak sort of voicing indication. */
+ prevVoiced = 0; /* false */
+
+ if ( gmed_n( &ltpGainHist[4], 5 ) > ltpLimit ) {
+ prevVoiced = 1; /* true */
+ }
+
+ if ( st->bgHangover > 20 ) {
+ if ( gmed_n( ltpGainHist, 9 ) > ltpLimit ) {
+ prevVoiced = 1; /* true */
+ }
+ else {
+ prevVoiced = 0; /* false */
+ }
+ }
+
+ if ( prevVoiced ) {
+ *voicedHangover = 0;
+ }
+ else {
+ temp = *voicedHangover + 1;
+
+ if ( temp > 10 ) {
+ *voicedHangover = 10;
+ }
+ else {
+ *voicedHangover = temp;
+ }
+ }
+ return inbgNoise;
+}
+
+
+/*
+ * dtx_dec_activity_update
+ *
+ *
+ * Parameters:
+ * st->lsf_hist_ptr B: LSF history pointer
+ * st->lsf_hist B: LSF history
+ * lsf I: lsf
+ * frame I: noise frame
+ *
+ * Function:
+ * Update lsp history and compute log energy.
+ *
+ * Returns:
+ * void
+ */
+static void dtx_dec_activity_update( dtx_decState *st, Word32 lsf[], Word32
+ frame[] )
+{
+ Word32 frame_en;
+ Word32 log_en_e, log_en_m, log_en, i;
+
+
+ /* update lsp history */
+ st->lsf_hist_ptr += M;
+
+ if ( st->lsf_hist_ptr == 80 ) {
+ st->lsf_hist_ptr = 0;
+ }
+ memcpy( &st->lsf_hist[st->lsf_hist_ptr], lsf, M <<2 );
+
+ /* compute log energy based on frame energy */
+ frame_en = 0; /* Q0 */
+
+ for ( i = 0; (i < L_FRAME); i ++ ) {
+ frame_en += frame[i] * frame[i];
+ if (frame_en & 0x80000000)
+ break;
+ }
+
+ log_en = (frame_en & 0xC0000000) ? 0x7FFFFFFE: (Word32)frame_en << 1;
+
+ Log2( log_en , &log_en_e, &log_en_m );
+
+ /* convert exponent and mantissa to Word16 Q10 */
+ log_en = log_en_e << 10; /* Q10 */
+ log_en = log_en + ( log_en_m >> 5 );
+
+ /* divide with L_FRAME i.e subtract with log2(L_FRAME) = 7.32193 */
+ log_en = log_en - 8521;
+
+ /*
+ * insert into log energy buffer, no division by two as
+ * log_en in decoder is Q11
+ */
+ st->log_en_hist_ptr += 1;
+
+ if ( st->log_en_hist_ptr == DTX_HIST_SIZE ) {
+ st->log_en_hist_ptr = 0;
+ }
+ st->log_en_hist[st->log_en_hist_ptr] = log_en; /* Q11 */
+}
+
+
+/*
+ * Decoder_amr
+ *
+ *
+ * Parameters:
+ * st B: State variables
+ * mode I: AMR mode
+ * parm I: vector of synthesis parameters
+ * frame_type I: received frame type
+ * synth O: synthesis speech
+ * A_t O: decoded LP filter in 4 subframes
+ *
+ * Function:
+ * Speech decoder routine
+ *
+ * Returns:
+ * void
+ */
+static void Decoder_amr( Decoder_amrState *st, enum Mode mode, Word16 parm[],
+ enum RXFrameType frame_type, Word32 synth[], Word32 A_t[] )
+{
+ /* LSPs */
+ Word32 lsp_new[M];
+ Word32 lsp_mid[M];
+
+
+ /* LSFs */
+ Word32 prev_lsf[M];
+ Word32 lsf_i[M];
+
+
+ /* Algebraic codevector */
+ Word32 code[L_SUBFR];
+
+
+ /* excitation */
+ Word32 excp[L_SUBFR];
+ Word32 exc_enhanced[L_SUBFR];
+
+
+ /* Scalars */
+ Word32 i, i_subfr, overflow, T0_frac, index, temp, temp2, subfrNr, excEnergy;
+ Word32 gain_code, gain_code_mix, pit_sharp, pit_flag, pitch_fac, t0_min, t0_max;
+ Word32 gain_pit = 0, evenSubfr = 0, T0 = 0, index_mr475 = 0;
+ Word32 *Az; /* Pointer on A_t */
+ Word16 flag4, carefulFlag;
+ Word16 delta_frc_low, delta_frc_range, tmp_shift;
+ Word16 bfi = 0, pdfi = 0;
+ /* bad frame indication flag, potential degraded bad frame flag */
+
+
+ enum DTXStateType newDTXState; /* SPEECH , DTX, DTX_MUTE */
+
+ /* find the new DTX state SPEECH OR DTX */
+ newDTXState = rx_dtx_handler( st->dtxDecoderState, frame_type );
+
+ /* DTX actions */
+ if ( newDTXState != SPEECH ) {
+ Decoder_amr_reset( st, MRDTX );
+ dtx_dec( st->dtxDecoderState, st->mem_syn, st->lsfState, st->pred_state,
+ st->Cb_gain_averState, newDTXState, mode, parm, synth, A_t );
+
+ /* update average lsp */
+ Lsf_lsp( st->lsfState->past_lsf_q, st->lsp_old );
+ lsp_avg( st->lsp_avg_st, st->lsfState->past_lsf_q );
+ goto theEnd;
+ }
+
+ /* SPEECH action state machine */
+ if ( table_speech_bad[frame_type] ) {
+ bfi = 1;
+
+ if ( frame_type != RX_SPEECH_BAD ) {
+ Build_CN_param( &st->nodataSeed, mode, parm );
+ }
+ }
+ else if ( frame_type == RX_SPEECH_DEGRADED ) {
+ pdfi = 1;
+ }
+
+ if ( bfi != 0 ) {
+ st->state += 1;
+ }
+ else if ( st->state == 6 ) {
+ st->state = 5;
+ }
+ else {
+ st->state = 0;
+ }
+
+ if ( st->state > 6 ) {
+ st->state = 6;
+ }
+
+ /*
+ * If this frame is the first speech frame after CNI period,
+ * set the BFH state machine to an appropriate state depending
+ * on whether there was DTX muting before start of speech or not
+ * If there was DTX muting, the first speech frame is muted.
+ * If there was no DTX muting, the first speech frame is not
+ * muted. The BFH state machine starts from state 5, however, to
+ * keep the audible noise resulting from a SID frame which is
+ * erroneously interpreted as a good speech frame as small as
+ * possible (the decoder output in this case is quickly muted)
+ */
+ if ( st->dtxDecoderState->dtxGlobalState == DTX ) {
+ st->state = 5;
+ st->prev_bf = 0;
+ }
+ else if ( st->dtxDecoderState->dtxGlobalState == DTX_MUTE ) {
+ st->state = 5;
+ st->prev_bf = 1;
+ }
+
+ /* save old LSFs for CB gain smoothing */
+ memcpy( prev_lsf, st->lsfState->past_lsf_q, M <<2 );
+
+ /*
+ * decode LSF parameters and generate interpolated lpc coefficients
+ * for the 4 subframes
+ */
+ if ( mode != MR122 ) {
+ D_plsf_3( st->lsfState, mode, bfi, parm, lsp_new );
+
+ /* Advance synthesis parameters pointer */
+ parm += 3;
+ Int_lpc_1to3( st->lsp_old, lsp_new, A_t );
+ }
+ else {
+ D_plsf_5( st->lsfState, bfi, parm, lsp_mid, lsp_new );
+
+ /* Advance synthesis parameters pointer */
+ parm += 5;
+ Int_lpc_1and3( st->lsp_old, lsp_mid, lsp_new, A_t );
+ }
+
+ /* update the LSPs for the next frame */
+ memcpy( st->lsp_old, lsp_new, M <<2 );
+
+ /*
+ * Loop for every subframe in the analysis frame
+ *
+ * The subframe size is L_SUBFR and the loop is repeated
+ * L_FRAME/L_SUBFR times *
+ * - decode the pitch delay
+ * - decode algebraic code
+ * - decode pitch and codebook gains
+ * - find the excitation and compute synthesis speech
+ */
+ /* pointer to interpolated LPC parameters */
+ Az = A_t;
+ evenSubfr = 0;
+ subfrNr = -1;
+
+ for ( i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR ) {
+ subfrNr += 1;
+ evenSubfr = 1 - evenSubfr;
+
+ /* flag for first and 3th subframe */
+ pit_flag = i_subfr;
+
+ if ( i_subfr == L_FRAME_BY2 ) {
+ if ( ( mode != MR475 ) & ( mode != MR515 ) ) {
+ pit_flag = 0;
+ }
+ }
+
+ /* pitch index */
+ index = *parm++;
+
+ /*
+ * decode pitch lag and find adaptive codebook vector.
+ */
+ if ( mode != MR122 ) {
+ /*
+ * flag4 indicates encoding with 4 bit resolution;
+ * this is needed for mode MR475, MR515, MR59 and MR67
+ */
+ flag4 = 0;
+
+ if ( ( mode == MR475 ) || ( mode == MR515 ) || ( mode == MR59 ) || (
+ mode == MR67 ) ) {
+ flag4 = 1;
+ }
+
+ /*
+ * get ranges for the t0_min and t0_max
+ * only needed in delta decoding
+ */
+ delta_frc_low = 5;
+ delta_frc_range = 9;
+
+ if ( mode == MR795 ) {
+ delta_frc_low = 10;
+ delta_frc_range = 19;
+ }
+ t0_min = st->old_T0 - delta_frc_low;
+
+ if ( t0_min < PIT_MIN ) {
+ t0_min = PIT_MIN;
+ }
+ t0_max = t0_min + delta_frc_range;
+
+ if ( t0_max > PIT_MAX ) {
+ t0_max = PIT_MAX;
+ t0_min = t0_max - delta_frc_range;
+ }
+ Dec_lag3( index, t0_min, t0_max, pit_flag, st->old_T0, &T0, &T0_frac,
+ flag4 );
+ st->T0_lagBuff = T0;
+
+ if ( bfi != 0 ) {
+ if ( st->old_T0 < PIT_MAX ) {
+ /* Graceful pitch degradation */
+ st->old_T0 += 1;
+ }
+ T0 = st->old_T0;
+ T0_frac = 0;
+
+ if ( ( st->inBackgroundNoise != 0 ) & ( st->voicedHangover > 4 ) & (
+ ( mode == MR475 ) || ( mode == MR515 ) || ( mode == MR59 ) ) )
+ {
+ T0 = st->T0_lagBuff;
+ }
+ }
+ Pred_lt_3or6_40( st->exc, T0, T0_frac, 1 );
+ }
+ else {
+ Dec_lag6( index, PIT_MIN_MR122, PIT_MAX, pit_flag, &T0, &T0_frac );
+
+ if ( ( bfi != 0 ) || ( ( pit_flag != 0 ) & ( index > 60 ) ) ) {
+ st->T0_lagBuff = T0;
+ T0 = st->old_T0;
+ T0_frac = 0;
+ }
+ Pred_lt_3or6_40( st->exc, T0, T0_frac, 0 );
+ }
+
+ /*
+ * (MR122 only: Decode pitch gain.)
+ * Decode innovative codebook.
+ * set pitch sharpening factor
+ */
+ /* MR475, MR515 */
+ if ( ( mode == MR475 ) || ( mode == MR515 ) ) {
+ /* index of position */
+ index = *parm++;
+
+ /* signs */
+ i = *parm++;
+ decode_2i40_9bits( subfrNr, i, index, code );
+ pit_sharp = st->sharp << 1;
+ }
+
+ /* MR59 */
+ else if ( mode == MR59 ) {
+ /* index of position */
+ index = *parm++;
+
+ /* signs */
+ i = *parm++;
+ decode_2i40_11bits( i, index, code );
+ pit_sharp = st->sharp << 1;
+ }
+
+ /* MR67 */
+ else if ( mode == MR67 ) {
+ /* index of position */
+ index = *parm++;
+
+ /* signs */
+ i = *parm++;
+ decode_3i40_14bits( i, index, code );
+ pit_sharp = st->sharp << 1;
+ }
+
+ /* MR74, MR795 */
+ else if ( mode <= MR795 ) {
+ /* index of position */
+ index = *parm++;
+
+ /* signs */
+ i = *parm++;
+ decode_4i40_17bits( i, index, code );
+ pit_sharp = st->sharp << 1;
+ }
+
+ /* MR102 */
+ else if ( mode == MR102 ) {
+ decode_8i40_31bits( parm, code );
+ parm += 7;
+ pit_sharp = st->sharp << 1;
+ }
+
+ /* MR122 */
+ else {
+ index = *parm++;
+
+ if ( bfi != 0 ) {
+ ec_gain_pitch( st->ec_gain_p_st, st->state, &gain_pit );
+ }
+ else {
+ gain_pit = d_gain_pitch( mode, index );
+ }
+ ec_gain_pitch_update( st->ec_gain_p_st, bfi, st->prev_bf, &gain_pit );
+ decode_10i40_35bits( parm, code );
+ parm += 10;
+
+ /*
+ * pit_sharp = gain_pit;
+ * if (pit_sharp > 1.0) pit_sharp = 1.0;
+ */
+ pit_sharp = gain_pit;
+
+ if ( pit_sharp > 16383 )
+ pit_sharp = 32767;
+ else
+ pit_sharp *= 2;
+ }
+
+ /*
+ * Add the pitch contribution to code[].
+ */
+ for ( i = T0; i < L_SUBFR; i++ ) {
+ temp = ( code[i - T0] * pit_sharp ) >> 15;
+ code[i] = code[i] + temp;
+ }
+
+ /*
+ * Decode codebook gain (MR122) or both pitch
+ * gain and codebook gain (all others)
+ * Update pitch sharpening "sharp" with quantized gain_pit
+ */
+ if ( mode == MR475 ) {
+ /* read and decode pitch and code gain */
+ if ( evenSubfr != 0 ) {
+ /* index of gain(s) */
+ index_mr475 = *parm++;
+ }
+
+ if ( bfi == 0 ) {
+ Dec_gain( st->pred_state, mode, index_mr475, code, evenSubfr, &
+ gain_pit, &gain_code );
+ }
+ else {
+ ec_gain_pitch( st->ec_gain_p_st, st->state, &gain_pit );
+ ec_gain_code( st->ec_gain_c_st, st->pred_state, st->state, &
+ gain_code );
+ }
+ ec_gain_pitch_update( st->ec_gain_p_st, bfi, st->prev_bf, &gain_pit );
+ ec_gain_code_update( st->ec_gain_c_st, bfi, st->prev_bf, &gain_code );
+ pit_sharp = gain_pit;
+
+ if ( pit_sharp > SHARPMAX ) {
+ pit_sharp = SHARPMAX;
+ }
+ }
+ else if ( ( mode <= MR74 ) || ( mode == MR102 ) ) {
+ /* read and decode pitch and code gain */
+ /* index of gain(s) */
+ index = *parm++;
+
+ if ( bfi == 0 ) {
+ Dec_gain( st->pred_state, mode, index, code, evenSubfr, &gain_pit, &
+ gain_code );
+ }
+ else {
+ ec_gain_pitch( st->ec_gain_p_st, st->state, &gain_pit );
+ ec_gain_code( st->ec_gain_c_st, st->pred_state, st->state, &
+ gain_code );
+ }
+ ec_gain_pitch_update( st->ec_gain_p_st, bfi, st->prev_bf, &gain_pit );
+ ec_gain_code_update( st->ec_gain_c_st, bfi, st->prev_bf, &gain_code );
+ pit_sharp = gain_pit;
+
+ if ( pit_sharp > SHARPMAX ) {
+ pit_sharp = SHARPMAX;
+ }
+
+ if ( mode == MR102 ) {
+ if ( st->old_T0 > ( L_SUBFR + 5 ) ) {
+ pit_sharp = pit_sharp >> 2;
+ }
+ }
+ }
+ else {
+ /* read and decode pitch gain */
+ /* index of gain(s) */
+ index = *parm++;
+
+ if ( mode == MR795 ) {
+ /* decode pitch gain */
+ if ( bfi != 0 ) {
+ ec_gain_pitch( st->ec_gain_p_st, st->state, &gain_pit );
+ }
+ else {
+ gain_pit = d_gain_pitch( mode, index );
+ }
+ ec_gain_pitch_update( st->ec_gain_p_st, bfi, st->prev_bf, &gain_pit
+ );
+
+ /* read and decode code gain */
+ index = *parm++;
+
+ if ( bfi == 0 ) {
+ d_gain_code( st->pred_state, mode, index, code, &gain_code );
+ }
+ else {
+ ec_gain_code( st->ec_gain_c_st, st->pred_state, st->state, &
+ gain_code );
+ }
+ ec_gain_code_update( st->ec_gain_c_st, bfi, st->prev_bf, &gain_code
+ );
+ pit_sharp = gain_pit;
+
+ if ( pit_sharp > SHARPMAX ) {
+ pit_sharp = SHARPMAX;
+ }
+ }
+ else { /* MR122 */
+
+ if ( bfi == 0 ) {
+ d_gain_code( st->pred_state, mode, index, code, &gain_code );
+ }
+ else {
+ ec_gain_code( st->ec_gain_c_st, st->pred_state, st->state, &
+ gain_code );
+ }
+ ec_gain_code_update( st->ec_gain_c_st, bfi, st->prev_bf, &gain_code
+ );
+ pit_sharp = gain_pit;
+ }
+ }
+
+ /*
+ * store pitch sharpening for next subframe
+ * (for modes which use the previous pitch gain for
+ * pitch sharpening in the search phase)
+ * do not update sharpening in even subframes for MR475
+ */
+ if ( ( mode != MR475 ) || evenSubfr == 0 ) {
+ st->sharp = gain_pit;
+
+ if ( st->sharp > SHARPMAX ) {
+ st->sharp = SHARPMAX;
+ }
+ }
+
+ if ( pit_sharp > 16383 )
+ pit_sharp = 32767;
+ else
+ pit_sharp *= 2;
+
+ if ( pit_sharp > 16384 ) {
+ for ( i = 0; i < L_SUBFR; i++ ) {
+ temp = ( st->exc[i] * pit_sharp ) >> 15;
+ temp2 = ( temp * gain_pit ) << 1;
+
+ if ( mode == MR122 ) {
+ temp2 = ( temp2 >> 1 );
+ }
+ excp[i] = ( temp2 + 0x00008000L ) >> 16;
+ }
+ }
+
+ /*
+ * Store list of LTP gains needed in the source
+ * characteristic detector (SCD)
+ */
+ if ( bfi == 0 ) {
+ for (i = 0; i < 8; i++){
+ st->ltpGainHistory[i] = st->ltpGainHistory[i+1];
+ }
+ st->ltpGainHistory[8] = gain_pit;
+ }
+
+
+ /*
+ * Limit gain_pit if in background noise and BFI
+ * for MR475, MR515, MR59
+ */
+ if ( ( st->prev_bf != 0 || bfi != 0 ) & ( st->inBackgroundNoise != 0 ) & (
+ ( mode == MR475 ) || ( mode == MR515 ) || ( mode == MR59 ) ) ) {
+ /* if (gain_pit > 0.75) in Q14*/
+ if ( gain_pit > 12288 )
+ /* gain_pit = (gain_pit-0.75)/2.0 + 0.75; */
+ gain_pit = ( ( gain_pit - 12288 ) >> 1 ) + 12288;
+
+ /* if (gain_pit > 0.90) in Q14*/
+ if ( gain_pit > 14745 ) {
+ gain_pit = 14745;
+ }
+ }
+
+ /*
+ * Calculate CB mixed gain
+ */
+ Int_lsf( prev_lsf, st->lsfState->past_lsf_q, i_subfr, lsf_i );
+ gain_code_mix = Cb_gain_average( st->Cb_gain_averState, mode, gain_code,
+ lsf_i, st->lsp_avg_st->lsp_meanSave, bfi, st->prev_bf, pdfi, st->
+ prev_pdf, st->inBackgroundNoise, st->voicedHangover );
+
+ /* make sure that MR74, MR795, MR122 have original codeGain*/
+ /* MR74, MR795, MR122 */
+ if ( ( mode > MR67 ) & ( mode != MR102 ) ) {
+ gain_code_mix = gain_code;
+ }
+
+ /*
+ * Find the total excitation.
+ * Find synthesis speech corresponding to st->exc[].
+ */
+ /* MR475, MR515, MR59, MR67, MR74, MR795, MR102*/
+ if ( mode <= MR102 ) {
+ pitch_fac = gain_pit;
+ tmp_shift = 1;
+ }
+
+ /* MR122 */
+ else {
+ pitch_fac = gain_pit >> 1;
+ tmp_shift = 2;
+ }
+
+ /*
+ * copy unscaled LTP excitation to exc_enhanced (used in phase
+ * dispersion below) and compute total excitation for LTP feedback
+ */
+ memcpy( exc_enhanced, st->exc, L_SUBFR <<2 );
+
+ for ( i = 0; i < L_SUBFR; i++ ) {
+ /* st->exc[i] = gain_pit*st->exc[i] + gain_code*code[i]; */
+ temp = ( st->exc[i] * pitch_fac ) + ( code[i] * gain_code );
+ temp2 = ( temp << tmp_shift );
+ if (((temp2 >> 1) ^ temp2) & 0x40000000) {
+ if ((temp ^ temp2) & 0x80000000) {
+ temp2 = (temp & 0x80000000) ? (-1073741824L) : 1073725439;
+ }
+ else {
+ temp2 = (temp2 & 0x80000000) ? (-1073741824L) : 1073725439;
+ }
+ }
+ st->exc[i] = ( temp2 + 0x00004000L ) >> 15;
+ }
+ /*
+ * Adaptive phase dispersion
+ */
+
+ /* free phase dispersion adaption */
+ st->ph_disp_st->lockFull = 0;
+
+ if ( ( ( mode == MR475 ) || ( mode == MR515 ) || ( mode == MR59 ) ) & ( st
+ ->voicedHangover > 3 ) & ( st->inBackgroundNoise != 0 ) & ( bfi != 0
+ ) ) {
+ /*
+ * Always Use full Phase Disp.
+ * if error in bg noise
+ */
+ st->ph_disp_st->lockFull = 1;
+ }
+
+ /*
+ * apply phase dispersion to innovation (if enabled) and
+ * compute total excitation for synthesis part
+ */
+ ph_disp( st->ph_disp_st, mode, exc_enhanced, gain_code_mix, gain_pit, code
+ , pitch_fac, tmp_shift );
+
+ /*
+ * The Excitation control module are active during BFI.
+ * Conceal drops in signal energy if in bg noise.
+ */
+ temp2 = 0;
+
+ for ( i = 0; i < L_SUBFR; i++ ) {
+ temp2 += ( exc_enhanced[i] * exc_enhanced[i] );
+ }
+
+ if ( temp2 > 0x3FFFFFFF ) {
+ excEnergy = 11584;
+ }
+ else {
+ temp2 = sqrt_l_exp( temp2, &temp );
+ temp2 = ( temp2 >> ( ( temp >> 1 ) + 15 ) );
+ excEnergy = temp2 >> 2;
+ }
+
+ if ( ( ( mode == MR475 ) || ( mode == MR515 ) || ( mode == MR59 ) ) & ( st
+ ->voicedHangover > 5 ) & ( st->inBackgroundNoise != 0 ) & ( st->
+ state < 4 ) & ( ( ( pdfi != 0 ) & ( st->prev_pdf != 0 ) ) || bfi !=
+ 0 || st->prev_bf != 0 ) ) {
+ carefulFlag = 0;
+
+ if ( ( pdfi != 0 ) & ( bfi == 0 ) ) {
+ carefulFlag = 1;
+ }
+ Ex_ctrl( exc_enhanced, excEnergy, st->excEnergyHist, st->voicedHangover
+ , st->prev_bf, carefulFlag );
+ }
+
+ if ( ( st->inBackgroundNoise != 0 ) & ( bfi != 0 || st->prev_bf != 0 ) & (
+ st->state < 4 ) ) {; /* do nothing! */
+ }
+ else {
+ /* Update energy history for all modes */
+ for (i = 0; i < 8; i++){
+ st->excEnergyHist[i] = st->excEnergyHist[i+1];
+ }
+ st->excEnergyHist[8] = excEnergy;
+ }
+
+ /*
+ * Excitation control module end.
+ */
+ if ( pit_sharp > 16384 ) {
+ for ( i = 0; i < L_SUBFR; i++ ) {
+ excp[i] = excp[i] + exc_enhanced[i];
+ if (labs(excp[i]) > 32767)
+ excp[i] = (excp[i] & 0x80000000) ? -32768 : 32767;
+ }
+ agc2( exc_enhanced, excp );
+ overflow = Syn_filt( Az, excp, &synth[i_subfr], L_SUBFR, st->mem_syn, 0
+ );
+ }
+ else {
+ overflow = Syn_filt( Az, exc_enhanced, &synth[i_subfr], L_SUBFR, st->
+ mem_syn, 0 );
+ }
+
+ if ( overflow ) {
+ for ( i = 0; i < PIT_MAX + L_INTERPOL + L_SUBFR; i++ ) {
+ st->old_exc[i] = st->old_exc[i] >> 2;
+ }
+
+ for ( i = 0; i < L_SUBFR; i++ ) {
+ exc_enhanced[i] = exc_enhanced[i] >> 2;
+ }
+ Syn_filt_overflow( Az, exc_enhanced, &synth[i_subfr], L_SUBFR, st->mem_syn, 1 );
+ }
+ else {
+ memcpy( st->mem_syn, &synth[i_subfr + 30], 40 );
+ }
+
+ /*
+ * Update signal for next frame.
+ * -> shift to the left by L_SUBFR st->exc[]
+ */
+ memcpy( &st->old_exc[0], &st->old_exc[L_SUBFR], ( PIT_MAX + L_INTERPOL )<<
+ 2 );
+
+ /* interpolated LPC parameters for next subframe */
+ Az += MP1;
+
+ /* store T0 for next subframe */
+ st->old_T0 = T0;
+ }
+
+ /*
+ * Call the Source Characteristic Detector which updates
+ * st->inBackgroundNoise and st->voicedHangover.
+ */
+ st->inBackgroundNoise = Bgn_scd( st->background_state, &( st->ltpGainHistory[
+ 0] ), &( synth[0] ), &( st->voicedHangover ) );
+ dtx_dec_activity_update( st->dtxDecoderState, st->lsfState->past_lsf_q, synth
+ );
+
+ /* store bfi for next subframe */
+ st->prev_bf = bfi;
+ st->prev_pdf = pdfi;
+
+ /*
+ * Calculate the LSF averages on the eight
+ * previous frames
+ */
+ lsp_avg( st->lsp_avg_st, st->lsfState->past_lsf_q );
+theEnd:
+ st->dtxDecoderState->dtxGlobalState = newDTXState;
+ return;
+}
+
+
+/*
+ * Residu40
+ *
+ *
+ * Parameters:
+ * a I: prediction coefficients
+ * x I: speech signal
+ * y O: residual signal
+ *
+ * Function:
+ * The LP residual is computed by filtering the input
+ * speech through the LP inverse filter a(z)
+ *
+ * Returns:
+ * void
+ */
+static void Residu40( Word32 a[], Word32 x[], Word32 y[] )
+{
+ Word32 s, i, j;
+
+
+ for ( i = 0; i < 40; i++ ) {
+ s = a[0] * x[i] + a[1] * x[i - 1] + a[2] * x[i - 2] + a[3] * x[i - 3];
+ s += a[4] * x[i - 4] + a[5] * x[i - 5] + a[6] * x[i - 6] + a[7] * x[i - 7]
+ ;
+ s += a[8] * x[i - 8] + a[9] * x[i - 9] + a[10] * x[i - 10];
+ y[i] = ( s + 0x800 ) >> 12;
+ if (abs(y[i]) > 32767){
+ /* go to safe mode */
+ for (i = 0; i < 40; i++) {
+ s = a[0] * x[i];
+ for (j = 1; j <= 10; j++) {
+ s += a[j] * x[i - j];
+ if (s > 1073741823){
+ s = 1073741823;
+ }
+ else if ( s < -1073741824) {
+ s = -1073741824;
+ }
+ }
+ y[i] = ( s + 0x800 ) >> 12;
+ if (abs(y[i]) > 32767)
+ y[i] = (y[i] & 0x80000000) ? -32768 : 32767;
+ }
+ return;
+ }
+
+ }
+ return;
+}
+
+
+/*
+ * agc
+ *
+ *
+ * Parameters:
+ * st->past_gain B: gain memory
+ * sig_in I: Post_Filter input signal
+ * sig_out B: Post_Filter output signal
+ * agc_fac I: AGC factor
+ *
+ * Function:
+ * Scales the Post_Filter output on a subframe basis
+ *
+ * Returns:
+ * void
+ */
+static void agc( agcState *st, Word32 *sig_in, Word32 *sig_out, Word16 agc_fac )
+{
+ Word32 s, gain_in, gain_out, g0, gain;
+ int exp, i;
+
+
+ /* calculate gain_out with exponent */
+ s = energy_new( sig_out );
+
+ if ( s == 0 ) {
+ st->past_gain = 0;
+ return;
+ }
+ exp=0;
+ i = s;
+ while (!(i & 0x40000000))
+ {
+ exp++;
+ i = i << 1;
+ }
+ exp -=1;
+ if (exp & 0x80000000) {
+ s >>= 1;
+ }
+ else {
+ s <<= exp;
+ }
+ gain_out = ( s + 0x00008000L ) >> 16;
+
+ /* calculate gain_in with exponent */
+ s = energy_new( sig_in );
+
+ if ( s == 0 ) {
+ g0 = 0;
+ }
+ else {
+ i=0;
+ while (!(s & 0x40000000))
+ {
+ i++;
+ s = s << 1;
+ }
+ s = s + 0x00008000L;
+
+ if ( s >= 0 )
+ gain_in = s >> 16;
+ else
+ gain_in = 32767;
+ exp = ( exp - i );
+
+ /*
+ * g0 = (1-agc_fac) * sqrt(gain_in/gain_out);
+ */
+ /* s = gain_out / gain_in */
+ s = ( gain_out << 15 ) / gain_in;
+ exp = 7 - exp;
+
+ if ( exp > 0 ){
+ if (exp > 31)
+ {
+ if(s){
+ s = 2147483647;
+ }
+ }
+ else {
+ s = s << exp ;
+ }
+ }
+ else
+ s = ( s >> ( -exp ) );
+ if (s < 0)
+ s = 2147483647;
+ s = Inv_sqrt( s );
+ i = ( ( s << 9 ) + 0x00008000L ) >> 16;
+ if (i & 0xFFFF8000)
+ i = 32767;
+
+ /* g0 = i * (1-agc_fac) */
+ g0 = ( i * ( 32767 - agc_fac ) ) >> 15;
+ }
+
+ /*
+ * compute gain[n] = agc_fac * gain[n-1] + (1-agc_fac) * sqrt(gain_in/gain_out)
+ * sig_out[n] = gain[n] * sig_out[n]
+ */
+ gain = st->past_gain;
+
+ for ( i = 0; i < L_SUBFR; i++ ) {
+ gain = ( gain * agc_fac ) >> 15;
+ gain = gain + g0;
+ sig_out[i] = ( sig_out[i] * gain ) >> 12;
+ if (labs(sig_out[i]) > 32767)
+ sig_out[i] = (sig_out[i] & 0x8000000) ? -32768 : 32767;
+ }
+ st->past_gain = gain;
+ return;
+}
+
+
+/*
+ * Post_Filter
+ *
+ *
+ * Parameters:
+ * st B: post filter states
+ * mode I: AMR mode
+ * syn B: synthesis speech
+ * Az_4 I: interpolated LPC parameters in all subfr.
+ *
+ * Function:
+ * Post_Filtering of synthesis speech.
+ *
+ * inverse filtering of syn[] through A(z/0.7) to get res2[]
+ * tilt compensation filtering; 1 - MU*k*z^-1
+ * synthesis filtering through 1/A(z/0.75)
+ * adaptive gain control
+ *
+ * Returns:
+ * void
+ */
+static void Post_Filter( Post_FilterState *st, enum Mode mode, Word32 *syn,
+ Word32 *Az_4 )
+{
+ Word32 h[22], Ap3[MP1], Ap4[MP1]; /* bandwidth expanded LP parameters */
+ Word32 tmp, i_subfr, i, temp1, temp2, overflow = 0;
+ Word32 *Az, *p1, *p2, *syn_work = &st->synth_buf[M];
+ const Word32 *pgamma3 = &gamma3[0];
+ const Word32 *pgamma4 = &gamma4_gamma3_MR122[0];
+
+
+ /*
+ * Post filtering
+ */
+ memcpy( syn_work, syn, L_FRAME <<2 );
+ Az = Az_4;
+
+ if ( ( mode == MR122 ) || ( mode == MR102 ) ) {
+ pgamma3 = &gamma4_gamma3_MR122[0];
+ pgamma4 = &gamma4_MR122[0];
+ }
+
+ for ( i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR ) {
+ /* Find weighted filter coefficients Ap3[] and Ap[4] */
+ Ap3[0] = Az[0];
+ Ap4[0] = Az[0];
+
+ for ( i = 1; i <= 10; i++ ) {
+ Ap3[i] = ( Az[i] * pgamma3[i - 1] +0x4000 ) >> 15;
+ Ap4[i] = ( Az[i] * pgamma4[i - 1] +0x4000 ) >> 15;
+ }
+
+ /* filtering of synthesis speech by A(z/0.7) to find res2[] */
+ Residu40( Ap3, &syn_work[i_subfr], st->res2 );
+
+ /* tilt compensation filter */
+ /* impulse response of A(z/0.7)/A(z/0.75) */
+ memcpy( h, Ap3, MP1 <<2 );
+ memset( &h[M +1], 0, ( 22 - M - 1 )<<2 );
+ Syn_filt( Ap4, h, h, 22, &h[M +1], 0 );
+
+ /* 1st correlation of h[] */
+ tmp = 16777216 + h[1] * h[1];
+
+ for ( i = 2; i < 22; i++ ) {
+ tmp += h[i] * h[i];
+ if (tmp > 0x3FFF8000)
+ break;
+ }
+ temp1 = tmp >> 15;
+ if (temp1 & 0xFFFF8000)
+ temp1 = 32767;
+
+ tmp = h[0] * h[1];
+
+ for ( i = 1; i < 21; i++ ) {
+ tmp += h[i] * h[i + 1];
+ if (abs(tmp) > 1073741823)
+ tmp = 1073741823;
+ }
+ temp2 = tmp >> 15;
+
+ if ( temp2 <= 0 ) {
+ temp2 = 0;
+ }
+ else {
+ tmp = temp2 * 26214;
+ temp2 = ( tmp & 0xffff8000 ) / temp1;
+ }
+
+ /* preemphasis */
+ p1 = st->res2 + 39;
+ p2 = p1 - 1;
+ tmp = *p1;
+
+ do {
+ *p1 = *p1 - ( ( temp2 * *p2-- ) >> 15 );
+ if (abs(*p1) > 32767) {
+ *p1 = (*p1 & 0x80000000) ? -32768 : 32767;
+ }
+ p1--;
+ *p1 = *p1 - ( ( temp2 * *p2-- ) >> 15 );
+ if (abs(*p1) > 32767) {
+ *p1 = (*p1 & 0x80000000) ? -32768 : 32767;
+ }
+ p1--;
+ *p1 = *p1 - ( ( temp2 * *p2-- ) >> 15 );
+ if (abs(*p1) > 32767) {
+ *p1 = (*p1 & 0x80000000) ? -32768 : 32767;
+ }
+ p1--;
+ } while( p1 > st->res2 );
+ *p1 = *p1 - ( ( temp2 * st->preemph_state_mem_pre ) >> 15 );
+ if (abs(*p1) > 32767) {
+ *p1 = (*p1 & 0x80000000) ? -32768 : 32767;
+ }
+ st->preemph_state_mem_pre = tmp;
+
+ /* filtering through 1/A(z/0.75) */
+ overflow = Syn_filt( Ap4, st->res2, &syn[i_subfr], L_SUBFR, st->mem_syn_pst, 0 );
+ if (overflow){
+ Syn_filt_overflow( Ap4, st->res2, &syn[i_subfr], L_SUBFR, st->mem_syn_pst, 1 );
+ overflow = 0;
+ }
+ else {
+ memcpy(st->mem_syn_pst, &syn[i_subfr + 30], 40);
+ }
+
+ /* scale output to input */
+ agc( st->agc_state, &syn_work[i_subfr], &syn[i_subfr], AGC_FAC );
+ Az += MP1;
+ }
+
+ /* update syn_work[] buffer */
+ memcpy( &syn_work[- M], &syn_work[L_FRAME - M], M <<2 );
+ return;
+}
+
+
+/*
+ * Post_Process
+ *
+ *
+ * Parameters:
+ * st B: post filter states
+ * signal B: signal
+ *
+ * Function:
+ * Postprocessing of input speech.
+ *
+ * 2nd order high pass filtering with cut off frequency at 60 Hz.
+ * Multiplication of output by two.
+ *
+ *
+ * Returns:
+ * void
+ */
+ static void Post_Process( Post_ProcessState *st, Word32 signal[] )
+ {
+ Word32 x2, tmp, i = 0;
+ Word32 mask = 0x40000000;
+
+ do {
+ x2 = st->x1;
+ st->x1 = st->x0;
+ st->x0 = signal[i];
+
+ /*
+ * y[i] = b[0]*x[i]*2 + b[1]*x[i-1]*2 + b140[2]*x[i-2]/2
+ * + a[1]*y[i-1] + a[2] * y[i-2];
+ */
+ tmp = ( st->y1_hi * 15836) + ( ( ( st->y1_lo * 15836 ) & ( Word32 )0xffff8000 ) >> 15);
+ tmp += (st->y2_hi * -7667) + ( ( ( st->y2_lo * ( -7667 ) ) & ( Word32 )0xffff8000 ) >> 15);
+ tmp += st->x0 * 7699;
+ tmp += st->x1 * -15398;
+ if ( ( (tmp >> 1) ^ tmp ) & mask)
+ tmp = (tmp & 0x80000000) ? -1073741824 : 1073741823;
+
+ tmp += x2 * 7699;
+ if ( ( (tmp >> 1) ^ tmp ) & mask)
+ tmp = (tmp & 0x80000000) ? -1073741824 : 1073741823;
+
+ tmp = tmp << 1;
+ if ( ( (tmp >> 1) ^ tmp ) & mask)
+ tmp = (tmp & 0x80000000) ? -1073741824 : 1073741823;
+
+ tmp = tmp << 1;
+ if ( ( (tmp >> 1) ^ tmp ) & mask)
+ tmp = (tmp & 0x80000000) ? -1073741824 : 1073741823;
+
+ if ( labs( tmp ) < 536862720 ) {
+ signal[i++] = ( tmp + 0x00002000L ) >> 14;
+ }
+ else if ( tmp > 0 ) {
+ signal[i++] = 32767;
+ }
+ else {
+ signal[i++] = -32768;
+ }
+ st->y2_hi = st->y1_hi;
+ st->y2_lo = st->y1_lo;
+ st->y1_hi = tmp >> 15;
+ st->y1_lo = ( ( tmp << 1 ) - ( st->y1_hi << 16 ) ) >> 1;
+ } while( i < 160 );
+ return;
+}
+
+
+/*
+ * Speech_Decode_Frame
+ *
+ *
+ * Parameters:
+ * st B: decoder memory
+ * mode I: AMR mode
+ * parm I: speech parameters
+ * frame_type I: Frame type
+ * synth O: synthesis speech
+
+ * Function:
+ * Decode one frame
+ *
+ * Returns:
+ * void
+ */
+void Speech_Decode_Frame( void *st, enum Mode mode, Word16 *parm, enum
+ RXFrameType frame_type, Word16 *synth )
+{
+ Word32 Az_dec[AZ_SIZE]; /* Decoded Az for post-filter in 4 subframes*/
+ Word32 synth_speech[L_FRAME];
+ Word32 i;
+
+ /* Synthesis */
+ Decoder_amr( ( ( Speech_Decode_FrameState * ) st )->decoder_amrState, mode,
+ parm, frame_type, synth_speech, Az_dec );
+ Post_Filter( ( ( Speech_Decode_FrameState * ) st )->post_state, mode,
+ synth_speech, Az_dec );
+
+ /* post HP filter, and 15->16 bits */
+ Post_Process( ( ( Speech_Decode_FrameState * ) st )->postHP_state,
+ synth_speech );
+
+for ( i = 0; i < L_FRAME; i++ ) {
+#ifndef NO13BIT
+ /* Truncate to 13 bits */
+ synth[i] = ( Word16 )( synth_speech[i] & 0xfff8 );
+#else
+ synth[i] = ( Word16 )( synth_speech[i]);
+#endif
+ }
+
+
+ return;
+}
+
+
+/*
+ * Decoder_amr_exit
+ *
+ *
+ * Parameters:
+ * state I: state structure
+ *
+ * Function:
+ * The memory used for state memory is freed
+ *
+ * Returns:
+ * Void
+ */
+static void Decoder_amr_exit( Decoder_amrState **state )
+{
+ if ( state == NULL || *state == NULL )
+ return;
+ free( ( *state )->lsfState );
+ free( ( *state )->ec_gain_p_st );
+ free( ( *state )->ec_gain_c_st );
+ free( ( *state )->pred_state );
+ free( ( *state )->background_state );
+ free( ( *state )->ph_disp_st );
+ free( ( *state )->Cb_gain_averState );
+ free( ( *state )->lsp_avg_st );
+ free( ( *state )->dtxDecoderState );
+
+ /* deallocate memory */
+ free( *state );
+ *state = NULL;
+ return;
+}
+
+
+/*
+ * Post_Filter_exit
+ *
+ *
+ * Parameters:
+ * state I: state structure
+ *
+ * Function:
+ * The memory used for state memory is freed
+ *
+ * Returns:
+ * Void
+ */
+static void Post_Filter_exit( Post_FilterState **state )
+{
+ if ( state == NULL || *state == NULL )
+ return;
+ free( ( *state )->agc_state );
+
+ /* deallocate memory */
+ free( *state );
+ *state = NULL;
+ return;
+}
+
+
+/*
+ * Post_Process_reset
+ *
+ *
+ * Parameters:
+ * state B: state structure
+ *
+ * Function:
+ * Resets state memory
+ *
+ * Returns:
+ * -1 failure
+ */
+static int Post_Process_reset( Post_ProcessState *state )
+{
+ if ( ( Post_ProcessState * )state == NULL ) {
+ fprintf( stderr, "Post_Process_reset: invalid parameter\n" );
+ return-1;
+ }
+ state->y2_hi = 0;
+ state->y2_lo = 0;
+ state->y1_hi = 0;
+ state->y1_lo = 0;
+ state->x0 = 0;
+ state->x1 = 0;
+ return 0;
+}
+
+
+/*
+ * Post_Process_exit
+ *
+ *
+ * Parameters:
+ * state I: state structure
+ *
+ * Function:
+ * The memory used for state memory is freed
+ *
+ * Returns:
+ * Void
+ */
+static void Post_Process_exit( Post_ProcessState **state )
+{
+ if ( state == NULL || *state == NULL )
+ return;
+
+ /* deallocate memory */
+ free( *state );
+ *state = NULL;
+ return;
+}
+
+
+/*
+ * Decoder_amr_init
+ *
+ *
+ * Parameters:
+ * state O: state structure
+ *
+ * Function:
+ * Allocates state memory and initializes state memory
+ *
+ * Returns:
+ * success = 0
+ */
+static int Decoder_amr_init( Decoder_amrState **state )
+{
+ Decoder_amrState * s;
+
+ if ( ( Decoder_amrState * )state == NULL ) {
+ fprintf( stderr, "Decoder_amr_init: invalid parameter\n" );
+ return-1;
+ }
+ *state = NULL;
+
+ /* allocate memory */
+ if ( ( s = ( Decoder_amrState * ) malloc( sizeof( Decoder_amrState ) ) ) ==
+ NULL ) {
+ fprintf( stderr, "Decoder_amr_init: can not malloc state structure\n" );
+ return-1;
+ }
+
+ /* DPlsf_init */
+ /* allocate memory */
+ if ( ( s->lsfState = ( D_plsfState * ) malloc( sizeof( D_plsfState ) ) ) ==
+ NULL ) {
+ free(s);
+ fprintf( stderr, "DPlsf_init: can not malloc state structure\n" );
+ return-1;
+ }
+
+ /* ecGainPitchInit */
+ /* allocate memory */
+ if ( ( s->ec_gain_p_st = ( ec_gain_pitchState * ) malloc( sizeof(
+ ec_gain_pitchState ) ) ) == NULL ) {
+ free(s->lsfState);
+ free(s);
+ fprintf( stderr, "ecGainPitchInit: can not malloc state structure\n" );
+ return-1;
+ }
+
+ /* ecGainCodeInit */
+ /* allocate memory */
+ if ( ( s->ec_gain_c_st = ( ec_gain_codeState * ) malloc( sizeof(
+ ec_gain_codeState ) ) ) == NULL ) {
+ free(s->ec_gain_p_st);
+ free(s->lsfState);
+ free(s);
+ fprintf( stderr, "ecGainCodeInit: can not malloc state structure\n" );
+ return-1;
+ }
+
+ /* gcPredInit */
+ /* allocate memory */
+ if ( ( s->pred_state = ( gc_predState * ) malloc( sizeof( gc_predState ) ) )
+ == NULL ) {
+ free(s->ec_gain_c_st);
+ free(s->ec_gain_p_st);
+ free(s->lsfState);
+ free(s);
+ fprintf( stderr, "gcPredInit: can not malloc state structure\n" );
+ return-1;
+ }
+
+ /* Cb_gain_averageInit */
+ /* allocate memory */
+ if ( ( s->Cb_gain_averState = ( Cb_gain_averageState * ) malloc( sizeof(
+ Cb_gain_averageState ) ) ) == NULL ) {
+ free(s->pred_state);
+ free(s->ec_gain_c_st);
+ free(s->ec_gain_p_st);
+ free(s->lsfState);
+ free(s);
+ fprintf( stderr, "Cb_gain_averageInit: can not malloc state structure\n" );
+ return-1;
+ }
+ memset( s->Cb_gain_averState->cbGainHistory, 0, L_CBGAINHIST <<2 );
+
+ /* Initialize hangover handling */
+ s->Cb_gain_averState->hangVar = 0;
+ s->Cb_gain_averState->hangCount = 0;
+
+ /* lsp_avgInit */
+ /* allocate memory */
+ if ( ( s->lsp_avg_st = ( lsp_avgState * ) malloc( sizeof( lsp_avgState ) ) )
+ == NULL ) {
+ free(s->Cb_gain_averState);
+ free(s->pred_state);
+ free(s->ec_gain_c_st);
+ free(s->ec_gain_p_st);
+ free(s->lsfState);
+ free(s);
+ fprintf( stderr, "lsp_avgInit: can not malloc state structure\n" );
+ return-1;
+ }
+
+ /* Bgn_scdInit */
+ /* allocate memory */
+ if ( ( s->background_state = ( Bgn_scdState * ) malloc( sizeof( Bgn_scdState
+ ) ) ) == NULL ) {
+ free(s->lsp_avg_st);
+ free(s->Cb_gain_averState);
+ free(s->pred_state);
+ free(s->ec_gain_c_st);
+ free(s->ec_gain_p_st);
+ free(s->lsfState);
+ free(s);
+ fprintf( stderr, "Bgn_scdInit: can not malloc state structure\n" );
+ return-1;
+ }
+
+ /* phDispInit */
+ /* allocate memory */
+ if ( ( s->ph_disp_st = ( ph_dispState * ) malloc( sizeof( ph_dispState ) ) )
+ == NULL ) {
+ free(s->background_state);
+ free(s->lsp_avg_st);
+ free(s->Cb_gain_averState);
+ free(s->pred_state);
+ free(s->ec_gain_c_st);
+ free(s->ec_gain_p_st);
+ free(s->lsfState);
+ free(s);
+ fprintf( stderr, "phDispInit: can not malloc state structure\n" );
+ return-1;
+ }
+
+ /* dtxDecInit */
+ /* allocate memory */
+ if ( ( s->dtxDecoderState = ( dtx_decState * ) malloc( sizeof( dtx_decState )
+ ) ) == NULL ) {
+ free(s->ph_disp_st);
+ free(s->background_state);
+ free(s->lsp_avg_st);
+ free(s->Cb_gain_averState);
+ free(s->pred_state);
+ free(s->ec_gain_c_st);
+ free(s->ec_gain_p_st);
+ free(s->lsfState);
+ free(s);
+ fprintf( stderr, "dtxDecInit: can not malloc state structure\n" );
+ return-1;
+ }
+ Decoder_amr_reset( s, 0 );
+ *state = s;
+ return 0;
+}
+
+
+/*
+ * Post_Filter_reset
+ *
+ *
+ * Parameters:
+ * state B: state structure
+ *
+ * Function:
+ * Resets state memory
+ *
+ * Returns:
+ * -1 failure
+ */
+static int Post_Filter_reset( Post_FilterState *state )
+{
+ if ( ( Post_FilterState * )state == NULL ) {
+ fprintf( stderr, "Post_Filter_reset: invalid parameter\n" );
+ return-1;
+ }
+ state->preemph_state_mem_pre = 0;
+ state->agc_state->past_gain = 4096;
+ memset( state->mem_syn_pst, 0, M <<2 );
+ memset( state->res2, 0, L_SUBFR <<2 );
+ memset( state->synth_buf, 0, ( L_FRAME + M )<<2 );
+ return 0;
+}
+
+
+/*
+ * Post_Filter_init
+ *
+ *
+ * Parameters:
+ * state O: state structure
+ *
+ * Function:
+ * Allocates state memory and initializes state memory
+ *
+ * Returns:
+ * success = 0
+ */
+static int Post_Filter_init( Post_FilterState **state )
+{
+ Post_FilterState * s;
+
+ if ( ( Post_FilterState * )state == NULL ) {
+ fprintf( stderr, "F057:invalid parameter\n" );
+ return-1;
+ }
+ *state = NULL;
+
+ /* allocate memory */
+ if ( ( s = ( Post_FilterState * ) malloc( sizeof( Post_FilterState ) ) ) ==
+ NULL ) {
+ fprintf( stderr, "F057:can not malloc filter structure\n" );
+ return-1;
+ }
+ s->agc_state = NULL;
+
+ /* allocate memory */
+ if ( ( s->agc_state = ( agcState * ) malloc( sizeof( agcState ) ) ) == NULL )
+ {
+ free(s);
+ fprintf( stderr, "agcInit: can not malloc state structure\n" );
+ return-1;
+ }
+ Post_Filter_reset( s );
+ *state = s;
+ return 0;
+}
+
+
+/*
+ * Post_Process_init
+ *
+ *
+ * Parameters:
+ * state O: state structure
+ *
+ * Function:
+ * Allocates state memory and initializes state memory
+ *
+ * Returns:
+ * success = 0
+ */
+static int Post_Process_init( Post_ProcessState **state )
+{
+ Post_ProcessState * s;
+
+ if ( ( Post_ProcessState * )state == NULL ) {
+ fprintf( stderr, "Post_Process_init: invalid parameter\n" );
+ return-1;
+ }
+ *state = NULL;
+
+ /* allocate memory */
+ if ( ( s = ( Post_ProcessState * ) malloc( sizeof( Post_ProcessState ) ) ) ==
+ NULL ) {
+ fprintf( stderr, "Post_Process_init: can not malloc state structure\n" );
+ return-1;
+ }
+ Post_Process_reset( s );
+ *state = s;
+ return 0;
+}
+
+
+/*
+ * Speech_Decode_Frame_exit
+ *
+ *
+ * Parameters:
+ * state I: state structure
+ *
+ * Function:
+ * The memory used for state memory is freed
+ *
+ * Returns:
+ * Void
+ */
+void Speech_Decode_Frame_exit( void **st )
+{
+ if ( (( Speech_Decode_FrameState * )( st )) == NULL )
+ return;
+ Decoder_amr_exit( &( ( ( Speech_Decode_FrameState * ) st )->decoder_amrState
+ ) );
+ Post_Filter_exit( &( ( ( Speech_Decode_FrameState * ) st )->post_state ) );
+ Post_Process_exit( &( ( ( Speech_Decode_FrameState * ) st )->postHP_state ) )
+ ;
+
+ /* deallocate memory */
+ free( (( Speech_Decode_FrameState * )st) );
+ return;
+}
+
+
+/*
+ * Speech_Decode_Frame_reset
+ *
+ *
+ * Parameters:
+ * state B: state structure
+ *
+ * Function:
+ * Resets state memory
+ *
+ * Returns:
+ * -1 = failure
+ */
+int Speech_Decode_Frame_reset( void **st )
+{
+ Speech_Decode_FrameState * state;
+
+ if ( st == NULL || *st == NULL )
+ return (-1);
+ state = ( Speech_Decode_FrameState * )st;
+ Decoder_amr_reset( state->decoder_amrState, ( enum Mode ) 0 );
+ Post_Filter_reset( state->post_state );
+ Post_Process_reset( state->postHP_state );
+ return 0;
+}
+
+
+/*
+ * Speech_Decode_Frame_init
+ *
+ *
+ * Parameters:
+ * state O: state structure
+ *
+ * Function:
+ * Allocates state memory and initializes state memory
+ *
+ * Returns:
+ * success = 0
+ */
+void * Speech_Decode_Frame_init( )
+{
+ Speech_Decode_FrameState * s;
+
+ /* allocate memory */
+ if ( ( s = ( Speech_Decode_FrameState * ) malloc( sizeof(
+ Speech_Decode_FrameState ) ) ) == NULL ) {
+ fprintf( stderr, "Speech_Decode_Frame_init: can not malloc state "
+ "structure\n" );
+ return NULL;
+ }
+ s->decoder_amrState = NULL;
+ s->post_state = NULL;
+ s->postHP_state = NULL;
+
+ if ( Decoder_amr_init( &s->decoder_amrState ) || Post_Filter_init( &s->
+ post_state ) || Post_Process_init( &s->postHP_state ) ) {
+ Speech_Decode_Frame_exit( ( void ** )( &s ) );
+ return NULL;
+ }
+ return s;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/sp_dec.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/sp_dec.h
new file mode 100644
index 000000000..bd7ec3471
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/sp_dec.h
@@ -0,0 +1,81 @@
+/*
+ * ===================================================================
+ * TS 26.104
+ * REL-5 V5.4.0 2004-03
+ * REL-6 V6.1.0 2004-03
+ * 3GPP AMR Floating-point Speech Codec
+ * ===================================================================
+ *
+ */
+
+/*
+ * sp_enc.h
+ *
+ *
+ * Project:
+ * AMR Floating-Point Codec
+ *
+ * Contains:
+ * Defines interface to AMR encoder
+ *
+ */
+#ifndef _SP_DEC_H_
+#define _SP_DEC_H_
+/*
+ * definition of enumerated types
+ */
+
+/*
+ * definition of modes for decoder
+ */
+enum Mode { MR475 = 0,
+ MR515,
+ MR59,
+ MR67,
+ MR74,
+ MR795,
+ MR102,
+ MR122,
+ MRDTX,
+ N_MODES /* number of (SPC) modes */
+};
+
+/* Declaration recieved frame types */
+enum RXFrameType { RX_SPEECH_GOOD = 0,
+ RX_SPEECH_DEGRADED,
+ RX_ONSET,
+ RX_SPEECH_BAD,
+ RX_SID_FIRST,
+ RX_SID_UPDATE,
+ RX_SID_BAD,
+ RX_NO_DATA,
+ RX_N_FRAMETYPES /* number of frame types */
+};
+
+/*
+ * Function prototypes
+ */
+
+/*
+ * initialize one instance of the speech decoder
+ */
+void* Speech_Decode_Frame_init ();
+
+/*
+ * free status struct
+ */
+void Speech_Decode_Frame_exit (void **st);
+
+/*
+ * Decodes one frame from encoded parameters
+ */
+void Speech_Decode_Frame (void *st, enum Mode mode, short *serial,
+ enum RXFrameType frame_type, short *synth);
+
+/*
+ * reset speech decoder
+ */
+int Speech_Decode_Frame_reset (void **st);
+
+#endif
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/sp_enc.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/sp_enc.h
new file mode 100644
index 000000000..25f9e6480
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/sp_enc.h
@@ -0,0 +1,74 @@
+/*
+ * ===================================================================
+ * TS 26.104
+ * REL-5 V5.4.0 2004-03
+ * REL-6 V6.1.0 2004-03
+ * 3GPP AMR Floating-point Speech Codec
+ * ===================================================================
+ *
+ */
+
+/*
+ * sp_enc.h
+ *
+ *
+ * Project:
+ * AMR Floating-Point Codec
+ *
+ * Contains:
+ * Defines interface to AMR encoder
+ *
+ */
+#ifndef _SP_ENC_H
+#define _SP_ENC_H
+
+/*
+ * include files
+ */
+#include "typedef.h"
+
+/*
+ * definition of modes for encoder
+ */
+enum Mode { MR475 = 0,
+ MR515,
+ MR59,
+ MR67,
+ MR74,
+ MR795,
+ MR102,
+ MR122,
+ MRDTX
+};
+
+/*
+ * Function prototypes
+ */
+
+/*
+ * initialize one instance of the speech encoder
+ * Stores pointer to filter status struct in *st. This pointer has to
+ * be passed to Speech_Encode_Frame in each call.
+ * returns 0 on success
+ */
+void *Speech_Encode_Frame_init (int dtx);
+/*
+ * reset speech encoder (i.e. set state memory to zero)
+ * returns 0 on success
+ */
+int Speech_Encode_Frame_reset(void *st, int dtx);
+
+/*
+ * de-initialize speech encoder (i.e. free status struct)
+ * stores NULL in *st
+ */
+void Speech_Encode_Frame_exit (void **st);
+
+/*
+ * Encodes one speech frame
+ * Returns analysis parameters
+ */
+void Speech_Encode_Frame (void *st, enum Mode mode, short *newSpeech,
+ short *prm, enum Mode *usedMode);
+
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/typedef.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/typedef.h
new file mode 100644
index 000000000..b25d92baf
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/amr_float/typedef.h
@@ -0,0 +1,22 @@
+/*
+ * ===================================================================
+ * TS 26.104
+ * REL-5 V5.4.0 2004-03
+ * REL-6 V6.1.0 2004-03
+ * 3GPP AMR Floating-point Speech Codec
+ * ===================================================================
+ *
+ */
+/* This is valid for PC */
+
+#ifndef _TYPEDEF_H
+#define _TYPEDEF_H
+
+typedef char Word8;
+typedef unsigned char UWord8;
+typedef short Word16;
+typedef long Word32;
+typedef float Float32;
+typedef double Float64;
+
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/audioconvert.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/audioconvert.h
new file mode 100644
index 000000000..46ba3d392
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/audioconvert.h
@@ -0,0 +1,115 @@
+/*
+ * audio conversion
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2008 Peter Ross
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AUDIOCONVERT_H
+#define AVCODEC_AUDIOCONVERT_H
+
+/**
+ * @file libavcodec/audioconvert.h
+ * Audio format conversion routines
+ */
+
+
+#include "avcodec.h"
+
+
+/**
+ * Generate string corresponding to the sample format with
+ * number sample_fmt, or a header if sample_fmt is negative.
+ *
+ * @param[in] buf the buffer where to write the string
+ * @param[in] buf_size the size of buf
+ * @param[in] sample_fmt the number of the sample format to print the corresponding info string, or
+ * a negative value to print the corresponding header.
+ * Meaningful values for obtaining a sample format info vary from 0 to SAMPLE_FMT_NB -1.
+ */
+void avcodec_sample_fmt_string(char *buf, int buf_size, int sample_fmt);
+
+/**
+ * @return NULL on error
+ */
+const char *avcodec_get_sample_fmt_name(int sample_fmt);
+
+/**
+ * @return SAMPLE_FMT_NONE on error
+ */
+enum SampleFormat avcodec_get_sample_fmt(const char* name);
+
+/**
+ * @return NULL on error
+ */
+const char *avcodec_get_channel_name(int channel_id);
+
+/**
+ * Return description of channel layout
+ */
+void avcodec_get_channel_layout_string(char *buf, int buf_size, int nb_channels, int64_t channel_layout);
+
+/**
+ * Guess the channel layout
+ * @param nb_channels
+ * @param codec_id Codec identifier, or CODEC_ID_NONE if unknown
+ * @param fmt_name Format name, or NULL if unknown
+ * @return Channel layout mask
+ */
+int64_t avcodec_guess_channel_layout(int nb_channels, enum CodecID codec_id, const char *fmt_name);
+
+/**
+ * @return the number of channels in the channel layout.
+ */
+int avcodec_channel_layout_num_channels(int64_t channel_layout);
+
+struct AVAudioConvert;
+typedef struct AVAudioConvert AVAudioConvert;
+
+/**
+ * Create an audio sample format converter context
+ * @param out_fmt Output sample format
+ * @param out_channels Number of output channels
+ * @param in_fmt Input sample format
+ * @param in_channels Number of input channels
+ * @param[in] matrix Channel mixing matrix (of dimension in_channel*out_channels). Set to NULL to ignore.
+ * @param flags See FF_MM_xx
+ * @return NULL on error
+ */
+AVAudioConvert *av_audio_convert_alloc(enum SampleFormat out_fmt, int out_channels,
+ enum SampleFormat in_fmt, int in_channels,
+ const float *matrix, int flags);
+
+/**
+ * Free audio sample format converter context
+ */
+void av_audio_convert_free(AVAudioConvert *ctx);
+
+/**
+ * Convert between audio sample formats
+ * @param[in] out array of output buffers for each channel. set to NULL to ignore processing of the given channel.
+ * @param[in] out_stride distance between consecutive input samples (measured in bytes)
+ * @param[in] in array of input buffers for each channel
+ * @param[in] in_stride distance between consecutive output samples (measured in bytes)
+ * @param len length of audio frame size (measured in samples)
+ */
+int av_audio_convert(AVAudioConvert *ctx,
+ void * const out[6], const int out_stride[6],
+ const void * const in[6], const int in_stride[6], int len);
+
+#endif /* AVCODEC_AUDIOCONVERT_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/avcodec.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/avcodec.h
new file mode 100644
index 000000000..c539552c3
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/avcodec.h
@@ -0,0 +1,3117 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVCODEC_H
+#define AVCODEC_AVCODEC_H
+
+#ifdef __cplusplus
+ #define FF_EXPORT extern "C"
+#else
+ #define FF_EXPORT
+#endif
+
+/**
+ * @file libavcodec/avcodec.h
+ * external API header
+ */
+
+
+#include "ffcodecs.h"
+#include "ffImgfmt.h"
+
+#ifdef HAVE_AV_CONFIG_H
+#include "libavutil/common.h"
+#endif
+
+#include <errno.h>
+#include "libavutil/avutil.h"
+
+#define LIBAVCODEC_VERSION_MAJOR 52
+#define LIBAVCODEC_VERSION_MINOR 48
+#define LIBAVCODEC_VERSION_MICRO 0
+
+#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
+
+#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
+
+#define AV_NOPTS_VALUE INT64_C(0x8000000000000000)
+#define AV_TIME_BASE 1000000
+static const AVRational AV_TIME_BASE_Q={1, AV_TIME_BASE};
+
+enum CodecType {
+ CODEC_TYPE_UNKNOWN = -1,
+ CODEC_TYPE_VIDEO,
+ CODEC_TYPE_AUDIO,
+ CODEC_TYPE_DATA,
+ CODEC_TYPE_SUBTITLE,
+ CODEC_TYPE_ATTACHMENT,
+ CODEC_TYPE_NB
+};
+
+/**
+ * Needed for CorePNG
+ */
+enum CorePNGFrameType {
+ SAMPLE_I,
+ SAMPLE_P
+};
+
+/**
+ * all in native-endian format
+ */
+enum SampleFormat {
+ SAMPLE_FMT_NONE = -1,
+ SAMPLE_FMT_U8, ///< unsigned 8 bits
+ SAMPLE_FMT_S16, ///< signed 16 bits
+ SAMPLE_FMT_S32, ///< signed 32 bits
+ SAMPLE_FMT_FLT, ///< float
+ SAMPLE_FMT_DBL, ///< double
+ SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if dynamically linking to libavcodec
+};
+
+/* Audio channel masks */
+#define CH_FRONT_LEFT 0x00000001
+#define CH_FRONT_RIGHT 0x00000002
+#define CH_FRONT_CENTER 0x00000004
+#define CH_LOW_FREQUENCY 0x00000008
+#define CH_BACK_LEFT 0x00000010
+#define CH_BACK_RIGHT 0x00000020
+#define CH_FRONT_LEFT_OF_CENTER 0x00000040
+#define CH_FRONT_RIGHT_OF_CENTER 0x00000080
+#define CH_BACK_CENTER 0x00000100
+#define CH_SIDE_LEFT 0x00000200
+#define CH_SIDE_RIGHT 0x00000400
+#define CH_TOP_CENTER 0x00000800
+#define CH_TOP_FRONT_LEFT 0x00001000
+#define CH_TOP_FRONT_CENTER 0x00002000
+#define CH_TOP_FRONT_RIGHT 0x00004000
+#define CH_TOP_BACK_LEFT 0x00008000
+#define CH_TOP_BACK_CENTER 0x00010000
+#define CH_TOP_BACK_RIGHT 0x00020000
+#define CH_STEREO_LEFT 0x20000000 ///< Stereo downmix.
+#define CH_STEREO_RIGHT 0x40000000 ///< See CH_STEREO_LEFT.
+
+/** Channel mask value used for AVCodecContext.request_channel_layout
+ to indicate that the user requests the channel order of the decoder output
+ to be the native codec channel order. */
+#define CH_LAYOUT_NATIVE 0x8000000000000000LL
+
+/* Audio channel convenience macros */
+#define CH_LAYOUT_MONO (CH_FRONT_CENTER)
+#define CH_LAYOUT_STEREO (CH_FRONT_LEFT|CH_FRONT_RIGHT)
+#define CH_LAYOUT_2_1 (CH_LAYOUT_STEREO|CH_BACK_CENTER)
+#define CH_LAYOUT_SURROUND (CH_LAYOUT_STEREO|CH_FRONT_CENTER)
+#define CH_LAYOUT_4POINT0 (CH_LAYOUT_SURROUND|CH_BACK_CENTER)
+#define CH_LAYOUT_2_2 (CH_LAYOUT_STEREO|CH_SIDE_LEFT|CH_SIDE_RIGHT)
+#define CH_LAYOUT_QUAD (CH_LAYOUT_STEREO|CH_BACK_LEFT|CH_BACK_RIGHT)
+#define CH_LAYOUT_5POINT0 (CH_LAYOUT_SURROUND|CH_SIDE_LEFT|CH_SIDE_RIGHT)
+#define CH_LAYOUT_5POINT1 (CH_LAYOUT_5POINT0|CH_LOW_FREQUENCY)
+#define CH_LAYOUT_5POINT0_BACK (CH_LAYOUT_SURROUND|CH_BACK_LEFT|CH_BACK_RIGHT)
+#define CH_LAYOUT_5POINT1_BACK (CH_LAYOUT_5POINT0_BACK|CH_LOW_FREQUENCY)
+#define CH_LAYOUT_7POINT0 (CH_LAYOUT_5POINT0|CH_BACK_LEFT|CH_BACK_RIGHT)
+#define CH_LAYOUT_7POINT1 (CH_LAYOUT_5POINT1|CH_BACK_LEFT|CH_BACK_RIGHT)
+#define CH_LAYOUT_7POINT1_WIDE (CH_LAYOUT_5POINT1_BACK|\
+ CH_FRONT_LEFT_OF_CENTER|CH_FRONT_RIGHT_OF_CENTER)
+#define CH_LAYOUT_STEREO_DOWNMIX (CH_STEREO_LEFT|CH_STEREO_RIGHT)
+
+/* in bytes */
+#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
+
+/**
+ * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
+ * This is mainly needed because some optimized bitstream readers read
+ * 32 or 64 bit at once and could read over the end.<br>
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged
+ * MPEG bitstreams could cause overread and segfault.
+ */
+#define FF_INPUT_BUFFER_PADDING_SIZE 8
+
+/**
+ * minimum encoding buffer size
+ * Used to avoid some checks during header writing.
+ */
+#define FF_MIN_BUFFER_SIZE 16384
+
+
+/**
+ * motion estimation type.
+ */
+enum Motion_Est_ID {
+ ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed
+ ME_FULL,
+ ME_LOG,
+ ME_PHODS,
+ ME_EPZS, ///< enhanced predictive zonal search
+ ME_X1, ///< reserved for experiments
+ ME_HEX, ///< hexagon based search
+ ME_UMH, ///< uneven multi-hexagon search
+ ME_ITER, ///< iterative search
+ ME_TESA, ///< transformed exhaustive search algorithm
+};
+
+enum AVDiscard{
+ /* We leave some space between them for extensions (drop some
+ * keyframes for intra-only or drop just some bidir frames). */
+ AVDISCARD_NONE =-16, ///< discard nothing
+ AVDISCARD_DEFAULT= 0, ///< discard useless packets like 0 size packets in avi
+ AVDISCARD_NONREF = 8, ///< discard all non reference
+ AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
+ AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
+ AVDISCARD_ALL = 48, ///< discard all
+};
+
+enum AVColorPrimaries{
+ AVCOL_PRI_BT709 =1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
+ AVCOL_PRI_UNSPECIFIED=2,
+ AVCOL_PRI_BT470M =4,
+ AVCOL_PRI_BT470BG =5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_PRI_SMPTE170M =6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
+ AVCOL_PRI_SMPTE240M =7, ///< functionally identical to above
+ AVCOL_PRI_FILM =8,
+ AVCOL_PRI_NB , ///< Not part of ABI
+};
+
+enum AVColorTransferCharacteristic{
+ AVCOL_TRC_BT709 =1, ///< also ITU-R BT1361
+ AVCOL_TRC_UNSPECIFIED=2,
+ AVCOL_TRC_GAMMA22 =4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_TRC_GAMMA28 =5, ///< also ITU-R BT470BG
+ AVCOL_TRC_NB , ///< Not part of ABI
+};
+
+enum AVColorSpace{
+ AVCOL_SPC_RGB =0,
+ AVCOL_SPC_BT709 =1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
+ AVCOL_SPC_UNSPECIFIED=2,
+ AVCOL_SPC_FCC =4,
+ AVCOL_SPC_BT470BG =5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
+ AVCOL_SPC_SMPTE170M =6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
+ AVCOL_SPC_SMPTE240M =7,
+ AVCOL_SPC_NB , ///< Not part of ABI
+};
+
+enum AVColorRange{
+ AVCOL_RANGE_UNSPECIFIED=0,
+ AVCOL_RANGE_MPEG =1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges
+ AVCOL_RANGE_JPEG =2, ///< the normal 2^n-1 "JPEG" YUV ranges
+ AVCOL_RANGE_NB , ///< Not part of ABI
+};
+
+/**
+ * X X 3 4 X X are luma samples,
+ * 1 2 1-6 are possible chroma positions
+ * X X 5 6 X 0 is undefined/unknown position
+ */
+enum AVChromaLocation{
+ AVCHROMA_LOC_UNSPECIFIED=0,
+ AVCHROMA_LOC_LEFT =1, ///< mpeg2/4, h264 default
+ AVCHROMA_LOC_CENTER =2, ///< mpeg1, jpeg, h263
+ AVCHROMA_LOC_TOPLEFT =3, ///< DV
+ AVCHROMA_LOC_TOP =4,
+ AVCHROMA_LOC_BOTTOMLEFT =5,
+ AVCHROMA_LOC_BOTTOM =6,
+ AVCHROMA_LOC_NB , ///< Not part of ABI
+};
+
+/**
+ * H.264 VUI colour primaries (matrix_coefficients)
+ */
+typedef enum {
+ YCbCr_RGB_coeff_GBR = 0,
+ YCbCr_RGB_coeff_ITUR_BT_709 = 1,
+ YCbCr_RGB_coeff_Unspecified = 2,
+ YCbCr_RGB_coeff_Reserved = 3,
+ YCbCr_RGB_coeff_US_Federal_Regulations_2003_73_682 = 4,
+ YCbCr_RGB_coeff_ITUR_BT_601_625 = 5,
+ YCbCr_RGB_coeff_ITUR_BT_601_525 = 6,
+ YCbCr_RGB_coeff_SMPTE240M = 7,
+ YCbCr_RGB_coeff_YCgCo = 8
+} YCbCr_RGB_MatrixCoefficientsType;
+
+typedef enum {
+ VIDEO_FULL_RANGE_TV = 0,
+ VIDEO_FULL_RANGE_PC = 1,
+ VIDEO_FULL_RANGE_INVALID = 2
+} VideoFullRangeType;
+
+typedef struct RcOverride{
+ int start_frame;
+ int end_frame;
+ int qscale; // If this is 0 then quality_factor will be used instead.
+ float quality_factor;
+} RcOverride;
+
+#define FF_MAX_B_FRAMES 16
+
+/* encoding support
+ These flags can be passed in AVCodecContext.flags before initialization.
+ Note: Not everything is supported yet.
+*/
+
+#define CODEC_FLAG_QSCALE 0x0002 ///< Use fixed qscale.
+#define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / advanced prediction for H.263.
+#define CODEC_FLAG_QPEL 0x0010 ///< Use qpel MC.
+#define CODEC_FLAG_GMC 0x0020 ///< Use GMC.
+#define CODEC_FLAG_MV0 0x0040 ///< Always try a MB with MV=<0,0>.
+#define CODEC_FLAG_PART 0x0080 ///< Use data partitioning.
+/**
+ * The parent program guarantees that the input for B-frames containing
+ * streams is not written to for at least s->max_b_frames+1 frames, if
+ * this is not set the input will be copied.
+ */
+#define CODEC_FLAG_INPUT_PRESERVED 0x0100
+#define CODEC_FLAG_PASS1 0x0200 ///< Use internal 2pass ratecontrol in first pass mode.
+#define CODEC_FLAG_PASS2 0x0400 ///< Use internal 2pass ratecontrol in second pass mode.
+#define CODEC_FLAG_EXTERN_HUFF 0x1000 ///< Use external Huffman table (for MJPEG).
+#define CODEC_FLAG_GRAY 0x2000 ///< Only decode/encode grayscale.
+#define CODEC_FLAG_EMU_EDGE 0x4000 ///< Don't draw edges.
+#define CODEC_FLAG_PSNR 0x8000 ///< error[?] variables will be set during encoding.
+#define CODEC_FLAG_TRUNCATED 0x00010000 /** Input bitstream might be truncated at a random
+ location instead of only at frame boundaries. */
+#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 ///< Normalize adaptive quantization.
+#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT.
+#define CODEC_FLAG_LOW_DELAY 0x00080000 ///< Force low delay.
+#define CODEC_FLAG_ALT_SCAN 0x00100000 ///< Use alternate scan.
+#define CODEC_FLAG_GLOBAL_HEADER 0x00400000 ///< Place global headers in extradata instead of every keyframe.
+#define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT).
+/* Fx : Flag for h263+ extra options */
+#define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction
+#define CODEC_FLAG_H263P_UMV 0x02000000 ///< unlimited motion vector
+#define CODEC_FLAG_CBP_RD 0x04000000 ///< Use rate distortion optimization for cbp.
+#define CODEC_FLAG_QP_RD 0x08000000 ///< Use rate distortion optimization for qp selectioon.
+#define CODEC_FLAG_H263P_AIV 0x00000008 ///< H.263 alternative inter VLC
+#define CODEC_FLAG_OBMC 0x00000001 ///< OBMC
+#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter
+#define CODEC_FLAG_H263P_SLICE_STRUCT 0x10000000
+#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation
+#define CODEC_FLAG_SVCD_SCAN_OFFSET 0x40000000 ///< Will reserve space for SVCD scan offset user data.
+#define CODEC_FLAG_CLOSED_GOP 0x80000000
+#define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks.
+#define CODEC_FLAG2_STRICT_GOP 0x00000002 ///< Strictly enforce GOP size.
+#define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding.
+#define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata.
+#define CODEC_FLAG2_BPYRAMID 0x00000010 ///< H.264 allow B-frames to be used as references.
+#define CODEC_FLAG2_WPRED 0x00000020 ///< H.264 weighted biprediction for B-frames
+#define CODEC_FLAG2_MIXED_REFS 0x00000040 ///< H.264 one reference per partition, as opposed to one reference per macroblock
+#define CODEC_FLAG2_8X8DCT 0x00000080 ///< H.264 high profile 8x8 transform
+#define CODEC_FLAG2_FASTPSKIP 0x00000100 ///< H.264 fast pskip
+#define CODEC_FLAG2_AUD 0x00000200 ///< H.264 access unit delimiters
+#define CODEC_FLAG2_BRDO 0x00000400 ///< B-frame rate-distortion optimization
+#define CODEC_FLAG2_INTRA_VLC 0x00000800 ///< Use MPEG-2 intra VLC table.
+#define CODEC_FLAG2_MEMC_ONLY 0x00001000 ///< Only do ME/MC (I frames -> ref, P frame -> ME+MC).
+#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format.
+#define CODEC_FLAG2_SKIP_RD 0x00004000 ///< RD optimal MB level residual skipping
+#define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
+#define CODEC_FLAG2_NON_LINEAR_QUANT 0x00010000 ///< Use MPEG-2 nonlinear quantizer.
+#define CODEC_FLAG2_BIT_RESERVOIR 0x00020000 ///< Use a bit reservoir when encoding if possible
+
+/* Unsupported options :
+ * Syntax Arithmetic coding (SAC)
+ * Reference Picture Selection
+ * Independent Segment Decoding */
+/* /Fx */
+/* codec capabilities */
+
+#define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback.
+/**
+ * Codec uses get_buffer() for allocating buffers.
+ * direct rendering method 1
+ */
+#define CODEC_CAP_DR1 0x0002
+/* If 'parse_only' field is true, then avcodec_parse_frame() can be used. */
+#define CODEC_CAP_PARSE_ONLY 0x0004
+#define CODEC_CAP_TRUNCATED 0x0008
+/* Codec can export data for HW decoding (XvMC). */
+#define CODEC_CAP_HWACCEL 0x0010
+/**
+ * Codec has a nonzero delay and needs to be fed with NULL at the end to get the delayed data.
+ * If this is not set, the codec is guaranteed to never be fed with NULL data.
+ */
+#define CODEC_CAP_DELAY 0x0020
+/**
+ * Codec can be fed a final frame with a smaller size.
+ * This can be used to prevent truncation of the last audio samples.
+ */
+#define CODEC_CAP_SMALL_LAST_FRAME 0x0040
+/**
+ * Codec can export data for HW decoding (VDPAU).
+ */
+#define CODEC_CAP_HWACCEL_VDPAU 0x0080
+/**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+ * This flag is reserved to the very rare category of codecs which have a
+ * bitstream that cannot be split into frames without timeconsuming
+ * operations like full decoding. Demuxers carring such bitstreams thus
+ * may return multiple frames in a packet. This has many disadvantages like
+ * prohibiting stream copy in many cases thus it should only be considered
+ * as a last resort.
+ */
+#define CODEC_CAP_SUBFRAMES 0x0100
+/**
+ * Codec supports frame-based multithreading.
+ */
+#define CODEC_CAP_FRAME_THREADS 0x0200
+
+//The following defines may change, don't expect compatibility if you use them.
+#define MB_TYPE_INTRA4x4 0x0001
+#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific
+#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific
+#define MB_TYPE_16x16 0x0008
+#define MB_TYPE_16x8 0x0010
+#define MB_TYPE_8x16 0x0020
+#define MB_TYPE_8x8 0x0040
+#define MB_TYPE_INTERLACED 0x0080
+#define MB_TYPE_DIRECT2 0x0100 //FIXME
+#define MB_TYPE_ACPRED 0x0200
+#define MB_TYPE_GMC 0x0400
+#define MB_TYPE_SKIP 0x0800
+#define MB_TYPE_P0L0 0x1000
+#define MB_TYPE_P1L0 0x2000
+#define MB_TYPE_P0L1 0x4000
+#define MB_TYPE_P1L1 0x8000
+#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0)
+#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1)
+#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1)
+#define MB_TYPE_QUANT 0x00010000
+#define MB_TYPE_CBP 0x00020000
+//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)
+
+/**
+ * Pan Scan area.
+ * This specifies the area which should be displayed.
+ * Note there may be multiple such areas for one frame.
+ */
+typedef struct AVPanScan{
+ /**
+ * id
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int id;
+
+ /**
+ * width and height in 1/16 pel
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int width;
+ int height;
+
+ /**
+ * position of the top left corner in 1/16 pel for up to 3 fields/frames
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t position[3][2];
+}AVPanScan;
+
+#define FF_COMMON_FRAME \
+ /**\
+ * pointer to the picture planes.\
+ * This might be different from the first allocated byte\
+ * - encoding: \
+ * - decoding: \
+ */\
+ uint8_t *data[4];\
+ int linesize[4];\
+ /**\
+ * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.\
+ * This isn't used by libavcodec unless the default get/release_buffer() is used.\
+ * - encoding: \
+ * - decoding: \
+ */\
+ uint8_t *base[4];\
+ /**\
+ * 1 -> keyframe, 0-> not\
+ * - encoding: Set by libavcodec.\
+ * - decoding: Set by libavcodec.\
+ */\
+ int key_frame;\
+\
+ /**\
+ * Picture type of the frame, see ?_TYPE below.\
+ * - encoding: Set by libavcodec. for coded_picture (and set by user for input).\
+ * - decoding: Set by libavcodec.\
+ */\
+ int pict_type;\
+\
+ /**\
+ * presentation timestamp in time_base units (time when frame should be shown to user)\
+ * If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed.\
+ * - encoding: MUST be set by user.\
+ * - decoding: Set by libavcodec.\
+ */\
+ int64_t pts;\
+\
+ /**\
+ * picture number in bitstream order\
+ * - encoding: set by\
+ * - decoding: Set by libavcodec.\
+ */\
+ int coded_picture_number;\
+ /**\
+ * picture number in display order\
+ * - encoding: set by\
+ * - decoding: Set by libavcodec.\
+ */\
+ int display_picture_number;\
+\
+ /**\
+ * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) \
+ * - encoding: Set by libavcodec. for coded_picture (and set by user for input).\
+ * - decoding: Set by libavcodec.\
+ */\
+ int quality; \
+\
+ /**\
+ * buffer age (1->was last buffer and dint change, 2->..., ...).\
+ * Set to INT_MAX if the buffer has not been used yet.\
+ * - encoding: unused\
+ * - decoding: MUST be set by get_buffer().\
+ */\
+ int age;\
+\
+ /**\
+ * is this picture used as reference\
+ * The values for this are the same as the MpegEncContext.picture_structure\
+ * variable, that is 1->top field, 2->bottom field, 3->frame/both fields.\
+ * Set to 4 for delayed, non-reference frames.\
+ * - encoding: unused\
+ * - decoding: Set by libavcodec. (before get_buffer() call)).\
+ */\
+ int reference;\
+\
+ /**\
+ * QP table\
+ * - encoding: unused\
+ * - decoding: Set by libavcodec.\
+ */\
+ int8_t *qscale_table;\
+ /**\
+ * QP store stride\
+ * - encoding: unused\
+ * - decoding: Set by libavcodec.\
+ */\
+ int qstride;\
+\
+ /**\
+ * mbskip_table[mb]>=1 if MB didn't change\
+ * stride= mb_width = (width+15)>>4\
+ * - encoding: unused\
+ * - decoding: Set by libavcodec.\
+ */\
+ uint8_t *mbskip_table;\
+\
+ /**\
+ * motion vector table\
+ * @code\
+ * example:\
+ * int mv_sample_log2= 4 - motion_subsample_log2;\
+ * int mb_width= (width+15)>>4;\
+ * int mv_stride= (mb_width << mv_sample_log2) + 1;\
+ * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];\
+ * @endcode\
+ * - encoding: Set by user.\
+ * - decoding: Set by libavcodec.\
+ */\
+ int16_t (*motion_val[2])[2];\
+\
+ /**\
+ * macroblock type table\
+ * mb_type_base + mb_width + 2\
+ * - encoding: Set by user.\
+ * - decoding: Set by libavcodec.\
+ */\
+ uint32_t *mb_type;\
+\
+ /**\
+ * log2 of the size of the block which a single vector in motion_val represents: \
+ * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)\
+ * - encoding: unused\
+ * - decoding: Set by libavcodec.\
+ */\
+ uint8_t motion_subsample_log2;\
+\
+ /**\
+ * for some private data of the user\
+ * - encoding: unused\
+ * - decoding: Set by user.\
+ */\
+ void *opaque;\
+\
+ /**\
+ * error\
+ * - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.\
+ * - decoding: unused\
+ */\
+ uint64_t error[4];\
+\
+ /**\
+ * type of the buffer (to keep track of who has to deallocate data[*])\
+ * - encoding: Set by the one who allocates it.\
+ * - decoding: Set by the one who allocates it.\
+ * Note: User allocated (direct rendering) & internal buffers cannot coexist currently.\
+ */\
+ int type;\
+ \
+ /**\
+ * When decoding, this signals how much the picture must be delayed.\
+ * extra_delay = repeat_pict / (2*fps)\
+ * - encoding: unused\
+ * - decoding: Set by libavcodec.\
+ */\
+ int repeat_pict;\
+ \
+ /**\
+ * \
+ */\
+ int qscale_type;\
+ \
+ /**\
+ * The content of the picture is interlaced.\
+ * - encoding: Set by user.\
+ * - decoding: Set by libavcodec. (default 0)\
+ */\
+ int interlaced_frame;\
+ \
+ /**\
+ * If the content is interlaced, is top field displayed first.\
+ * - encoding: Set by user.\
+ * - decoding: Set by libavcodec.\
+ */\
+ int top_field_first;\
+ \
+ /**\
+ * Pan scan.\
+ * - encoding: Set by user.\
+ * - decoding: Set by libavcodec.\
+ */\
+ AVPanScan *pan_scan;\
+ \
+ /**\
+ * Tell user application that palette has changed from previous frame.\
+ * - encoding: ??? (no palette-enabled encoder yet)\
+ * - decoding: Set by libavcodec. (default 0).\
+ */\
+ int palette_has_changed;\
+ \
+ /**\
+ * codec suggestion on buffer type if != 0\
+ * - encoding: unused\
+ * - decoding: Set by libavcodec. (before get_buffer() call)).\
+ */\
+ int buffer_hints;\
+\
+ /**\
+ * DCT coefficients\
+ * - encoding: unused\
+ * - decoding: Set by libavcodec.\
+ */\
+ short *dct_coeff;\
+\
+ /**\
+ * motion referece frame index\
+ * - encoding: Set by user.\
+ * - decoding: Set by libavcodec.\
+ */\
+ int8_t *ref_index[2];\
+\
+ /**\
+ * reordered opaque 64bit number (generally a PTS) from AVCodecContext.reordered_opaque\
+ * output in AVFrame.reordered_opaque\
+ * - encoding: unused\
+ * - decoding: Read by user.\
+ */\
+ int64_t reordered_opaque;\
+ int64_t reordered_opaque2; /* ffdshow custom code */\
+ int64_t reordered_opaque3; /* ffdshow custom code */\
+\
+ /* ffdshow custom code */\
+ int mb_width,mb_height,mb_stride,b8_stride;\
+ int num_sprite_warping_points,real_sprite_warping_points;\
+ int play_flags;\
+\
+ /* ffmpeg-mt */\
+ struct AVCodecContext *owner;\
+ void *thread_opaque;\
+\
+ /* ffdshow custom stuffs (begin) */\
+\
+ int h264_poc_decoded;\
+ int h264_poc_outputed;\
+ int h264_frame_num_decoded;\
+ int h264_max_frame_num;\
+\
+ int mpeg2_sequence_end_flag;\
+\
+ /**\
+ * video_full_range_flag\
+ * - encoding: unused\
+ * - decoding: Set by libavcodec. -1: invalid, 0: TV (16-235) 1: PC (1-254)\
+ */\
+ VideoFullRangeType video_full_range_flag;\
+ /**\
+ * YCbCr_RGB_matrix_coefficients\
+ * - encoding: unused\
+ * - decoding: Set by libavcodec.\
+ */\
+ YCbCr_RGB_MatrixCoefficientsType YCbCr_RGB_matrix_coefficients;
+ /* ffdshow custom stuffs (end) */
+
+#define FF_QSCALE_TYPE_MPEG1 0
+#define FF_QSCALE_TYPE_MPEG2 1
+#define FF_QSCALE_TYPE_H264 2
+
+#define FF_BUFFER_TYPE_INTERNAL 1
+#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user)
+#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.
+#define FF_BUFFER_TYPE_COPY 8 ///< Just a (modified) copy of some other buffer, don't deallocate anything.
+
+
+#define FF_I_TYPE 1 ///< Intra
+#define FF_P_TYPE 2 ///< Predicted
+#define FF_B_TYPE 3 ///< Bi-dir predicted
+#define FF_S_TYPE 4 ///< S(GMC)-VOP MPEG4
+#define FF_SI_TYPE 5 ///< Switching Intra
+#define FF_SP_TYPE 6 ///< Switching Predicted
+#define FF_BI_TYPE 7
+
+#define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore).
+#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.
+#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
+#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
+
+/**
+ * Audio Video Frame.
+ * New fields can be added to the end of FF_COMMON_FRAME with minor version
+ * bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump. No fields should be added into AVFrame before or after
+ * FF_COMMON_FRAME!
+ * sizeof(AVFrame) must not be used outside libav*.
+ */
+typedef struct AVFrame {
+ FF_COMMON_FRAME
+} AVFrame;
+
+/**
+ * main external API structure.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * sizeof(AVCodecContext) must not be used outside libav*.
+ */
+struct TlibavcodecExt;
+typedef struct AVCodecContext {
+ /**
+ * information on struct for av_log
+ * - set by avcodec_alloc_context
+ */
+ const AVClass *av_class;
+ /**
+ * the average bitrate
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream.
+ */
+ int bit_rate;
+
+ /**
+ * number of bits the bitstream is allowed to diverge from the reference.
+ * the reference can be CBR (for CBR pass1) or VBR (for pass2)
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: unused
+ */
+ int bit_rate_tolerance;
+
+ /**
+ * CODEC_FLAG_*.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags;
+
+ /**
+ * Some codecs need additional format info. It is stored here.
+ * If any muxer uses this then ALL demuxers/parsers AND encoders for the
+ * specific codec MUST set it correctly otherwise stream copy breaks.
+ * In general use of this field by muxers is not recommanded.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec. (FIXME: Is this OK?)
+ */
+ int sub_id;
+
+ /**
+ * Motion estimation algorithm used for video coding.
+ * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex),
+ * 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific]
+ * - encoding: MUST be set by user.
+ * - decoding: unused
+ */
+ int me_method;
+
+ /**
+ * some codecs need / can use extradata like Huffman tables.
+ * mjpeg: Huffman tables
+ * rv10: additional flags
+ * mpeg4: global headers (they can be in the bitstream or here)
+ * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger
+ * than extradata_size to avoid prolems if it is read with the bitstream reader.
+ * The bytewise contents of extradata must not depend on the architecture or CPU endianness.
+ * - encoding: Set/allocated/freed by libavcodec.
+ * - decoding: Set/allocated/freed by user.
+ */
+ const uint8_t *extradata;
+ int extradata_size;
+
+ /**
+ * This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identically 1.
+ * - encoding: MUST be set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational time_base;
+
+ /* video only */
+ /**
+ * picture width / height.
+ * - encoding: MUST be set by user.
+ * - decoding: Set by libavcodec.
+ * Note: For compatibility it is possible to set this instead of
+ * coded_width/height before decoding.
+ */
+ int width, height;
+
+#define FF_ASPECT_EXTENDED 15
+
+ /**
+ * the number of pictures in a group of pictures, or 0 for intra_only
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int gop_size;
+
+ /**
+ * Pixel format, see PIX_FMT_xxx.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum PixelFormat pix_fmt;
+
+ /**
+ * Frame rate emulation. If not zero, the lower layer (i.e. format handler)
+ * has to read frames at native frame rate.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rate_emu;
+
+ /**
+ * If non NULL, 'draw_horiz_band' is called by the libavcodec
+ * decoder to draw a horizontal band. It improves cache usage. Not
+ * all codecs can do that. You must check the codec capabilities
+ * beforehand.
+ * The function is also used by hardware acceleration APIs.
+ * It is called at least once during frame decoding to pass
+ * the data needed for hardware render.
+ * In that mode instead of pixel data, AVFrame points to
+ * a structure specific to the acceleration API. The application
+ * reads the structure and can change some fields to indicate progress
+ * or mark state.
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @param height the height of the slice
+ * @param y the y position of the slice
+ * @param type 1->top field, 2->bottom field, 3->frame
+ * @param offset offset into the AVFrame.data from which the slice should be read
+ */
+ void (*draw_horiz_band)(struct AVCodecContext *s,
+ const AVFrame *src, int offset[4],
+ int y, int type, int height);
+
+ /* audio only */
+ int sample_rate; ///< samples per second
+ int channels; ///< number of audio channels
+
+ /**
+ * audio sample format
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum SampleFormat sample_fmt; ///< sample format
+
+ /* The following data should not be initialized. */
+ /**
+ * Samples per packet, initialized when calling 'init'.
+ */
+ int frame_size;
+ int frame_number; ///< audio or video frame number
+#if LIBAVCODEC_VERSION_MAJOR < 53
+ int real_pict_num; ///< Returns the real picture number of previous encoded frame.
+#endif
+
+ /**
+ * Number of frames the decoded output will be delayed relative to
+ * the encoded input.
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ int delay;
+
+ /* - encoding parameters */
+ float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
+ float qblur; ///< amount of qscale smoothing over time (0.0-1.0)
+
+ /**
+ * minimum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmin;
+
+ /**
+ * maximum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmax;
+
+ /**
+ * maximum quantizer difference between frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_qdiff;
+
+ /**
+ * maximum number of B-frames between non-B-frames
+ * Note: The output will be delayed by max_b_frames+1 relative to the input.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_b_frames;
+
+ /**
+ * qscale factor between IP and B-frames
+ * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_factor;
+
+ /** obsolete FIXME remove */
+ int rc_strategy;
+#define FF_RC_STRATEGY_XVID 1
+
+ int b_frame_strategy;
+
+ /**
+ * hurry up amount
+ * - encoding: unused
+ * - decoding: Set by user. 1-> Skip B-frames, 2-> Skip IDCT/dequant too, 5-> Skip everything except header
+ * @deprecated Deprecated in favor of skip_idct and skip_frame.
+ */
+ int hurry_up;
+
+ struct AVCodec *codec;
+
+ void *priv_data;
+
+ int rtp_payload_size; /* The size of the RTP payload: the coder will */
+ /* do its best to deliver a chunk with size */
+ /* below rtp_payload_size, the chunk will start */
+ /* with a start code on some codecs like H.263. */
+ /* This doesn't take account of any particular */
+ /* headers inside the transmitted RTP payload. */
+
+
+ /* The RTP callback: This function is called */
+ /* every time the encoder has a packet to send. */
+ /* It depends on the encoder if the data starts */
+ /* with a Start Code (it should). H.263 does. */
+ /* mb_nb contains the number of macroblocks */
+ /* encoded in the RTP payload. */
+ void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb);
+
+ /* statistics, used for 2-pass encoding */
+ int mv_bits;
+ int header_bits;
+ int i_tex_bits;
+ int p_tex_bits;
+ int i_count;
+ int p_count;
+ int skip_count;
+ int misc_bits;
+
+ /**
+ * number of bits used for the previously encoded frame
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ int frame_bits;
+
+ /**
+ * Private data of the user, can be used to carry app specific stuff.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ struct TlibavcodecExt *opaque;
+
+ char codec_name[32];
+ enum CodecType codec_type; /* see CODEC_TYPE_xxx */
+ enum CodecID codec_id; /* see CODEC_ID_xxx */
+
+ /**
+ * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * A demuxer should set this to what is stored in the field used to identify the codec.
+ * If there are multiple such fields in a container then the demuxer should choose the one
+ * which maximizes the information about the used codec.
+ * If the codec tag field in a container is larger then 32 bits then the demuxer should
+ * remap the longer ID to 32 bits with a table or other structure. Alternatively a new
+ * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated
+ * first.
+ * - encoding: Set by user, if not then the default based on codec_id will be used.
+ * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ */
+ unsigned int codec_tag;
+
+ /**
+ * Work around bugs in encoders which sometimes cannot be detected automatically.
+ * - encoding: Set by user
+ * - decoding: Set by user
+ */
+ int workaround_bugs;
+#define FF_BUG_AUTODETECT 1 ///< autodetection
+#define FF_BUG_OLD_MSMPEG4 2
+#define FF_BUG_XVID_ILACE 4
+#define FF_BUG_UMP4 8
+#define FF_BUG_NO_PADDING 16
+#define FF_BUG_AMV 32
+#define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default.
+#define FF_BUG_QPEL_CHROMA 64
+#define FF_BUG_STD_QPEL 128
+#define FF_BUG_QPEL_CHROMA2 256
+#define FF_BUG_DIRECT_BLOCKSIZE 512
+#define FF_BUG_EDGE 1024
+#define FF_BUG_HPEL_CHROMA 2048
+#define FF_BUG_DC_CLIP 4096
+#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders.
+//#define FF_BUG_FAKE_SCALABILITY 16 //Autodetection should work 100%.
+
+ /**
+ * luma single coefficient elimination threshold
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int luma_elim_threshold;
+
+ /**
+ * chroma single coeff elimination threshold
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int chroma_elim_threshold;
+
+ /**
+ * strictly follow the standard (MPEG4, ...).
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ * Setting this to STRICT or higher means the encoder and decoder will
+ * generally do stupid things. While setting it to inofficial or lower
+ * will mean the encoder might use things that are not supported by all
+ * spec compliant decoders. Decoders make no difference between normal,
+ * inofficial and experimental, that is they always try to decode things
+ * when they can unless they are explicitly asked to behave stupid
+ * (=strictly conform to the specs)
+ */
+ int strict_std_compliance;
+#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to a older more strict version of the spec or reference software.
+#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences.
+#define FF_COMPLIANCE_NORMAL 0
+#define FF_COMPLIANCE_INOFFICIAL -1 ///< Allow inofficial extensions.
+#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
+
+ /**
+ * qscale offset between IP and B-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_offset;
+
+ /**
+ * Error recognization; higher values will detect more errors but may
+ * misdetect some more or less valid parts as errors.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int error_recognition;
+#define FF_ER_CAREFUL 1
+#define FF_ER_COMPLIANT 2
+#define FF_ER_AGGRESSIVE 3
+#define FF_ER_VERY_AGGRESSIVE 4
+
+ /**
+ * Called at the beginning of each frame to get a buffer for it.
+ * If pic.reference is set then the frame will be read later by libavcodec.
+ * avcodec_align_dimensions() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ * if CODEC_CAP_DR1 is not set then get_buffer() must call
+ * avcodec_default_get_buffer() instead of providing buffers allocated by
+ * some other means.
+ * - encoding: unused
+ * - decoding: Set by libavcodec., user can override.
+ */
+ int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+ /**
+ * Called to release buffers which were allocated with get_buffer.
+ * A released buffer can be reused in get_buffer().
+ * pic.data[*] must be set to NULL.
+ * - encoding: unused
+ * - decoding: Set by libavcodec., user can override.
+ */
+ void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+ /**
+ * Size of the frame reordering buffer in the decoder.
+ * For MPEG-2 it is 1 IPB or 0 low delay IP.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int has_b_frames;
+
+ /**
+ * number of bytes per packet if constant and known or 0
+ * Used by some WAV based audio codecs.
+ */
+ int block_align;
+
+ int parse_only; /* - decoding only: If true, only parsing is done
+ (function avcodec_parse_frame()). The frame
+ data is returned. Only MPEG codecs support this now. */
+
+ /**
+ * 0-> h263 quant 1-> mpeg quant
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mpeg_quant;
+
+ /**
+ * pass1 encoding statistics output buffer
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ char *stats_out;
+
+ /**
+ * pass2 encoding statistics input buffer
+ * Concatenated stuff from stats_out of pass1 should be placed here.
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ char *stats_in;
+
+ /**
+ * ratecontrol qmin qmax limiting method
+ * 0-> clipping, 1-> use a nice continous function to limit qscale wthin qmin/qmax.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float rc_qsquish;
+
+ float rc_qmod_amp;
+ int rc_qmod_freq;
+
+ /**
+ * ratecontrol override, see RcOverride
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ RcOverride *rc_override;
+ int rc_override_count;
+
+ /**
+ * rate control equation
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ const char *rc_eq;
+
+ /**
+ * maximum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_max_rate;
+
+ /**
+ * minimum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_min_rate;
+
+ /**
+ * decoder bitstream buffer size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_buffer_size;
+ float rc_buffer_aggressivity;
+
+ /**
+ * qscale factor between P and I-frames
+ * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_factor;
+
+ /**
+ * qscale offset between P and I-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_offset;
+
+ /**
+ * initial complexity for pass1 ratecontrol
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float rc_initial_cplx;
+
+ /**
+ * DCT algorithm, see FF_DCT_* below
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dct_algo;
+#define FF_DCT_AUTO 0
+#define FF_DCT_FASTINT 1
+#define FF_DCT_INT 2
+#define FF_DCT_MMX 3
+#define FF_DCT_MLIB 4
+#define FF_DCT_ALTIVEC 5
+#define FF_DCT_FAAN 6
+
+ /**
+ * luminance masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float lumi_masking;
+
+ /**
+ * temporary complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float temporal_cplx_masking;
+
+ /**
+ * spatial complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float spatial_cplx_masking;
+
+ /**
+ * p block masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float p_masking;
+
+ /**
+ * darkness masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float dark_masking;
+
+ /**
+ * IDCT algorithm, see FF_IDCT_* below.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int idct_algo;
+/*
+ * idct_algo is set by ffdshow.ax.
+ * It uses the list "idctNames[]" found in Tlibavcodec.cpp.
+ * The indexes of the items in that list should match with the values below.
+ */
+#define FF_IDCT_AUTO 0
+#define FF_IDCT_LIBMPEG2MMX 1
+#define FF_IDCT_SIMPLEMMX 2
+#define FF_IDCT_XVIDMMX 3
+#define FF_IDCT_SIMPLE 4
+#define FF_IDCT_INT 5
+#define FF_IDCT_FAAN 6
+#define FF_IDCT_H264 7
+#define FF_IDCT_VP3 8
+#define FF_IDCT_CAVS 9
+#define FF_IDCT_WMV2 10
+
+ /**
+ * slice count
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user (or 0).
+ */
+ int slice_count;
+ /**
+ * slice offsets in the frame in bytes
+ * - encoding: Set/allocated by libavcodec.
+ * - decoding: Set/allocated by user (or NULL).
+ */
+ int *slice_offset;
+
+ /**
+ * error concealment flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int error_concealment;
+#define FF_EC_GUESS_MVS 1
+#define FF_EC_DEBLOCK 2
+
+ /**
+ * dsp_mask could be add used to disable unwanted CPU features
+ * CPU features (i.e. MMX, SSE. ...)
+ *
+ * With the FORCE flag you may instead enable given CPU features.
+ * (Dangerous: Usable in case of misdetection, improper usage however will
+ * result into program crash.)
+ */
+ unsigned dsp_mask;
+#define FF_MM_FORCE 0x80000000 /* Force usage of selected flags (OR) */
+ /* lower 16 bits - CPU features */
+#define FF_MM_MMX 0x0001 ///< standard MMX
+#define FF_MM_3DNOW 0x0004 ///< AMD 3DNOW
+#define FF_MM_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext
+#define FF_MM_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
+#define FF_MM_SSE 0x0008 ///< SSE functions
+#define FF_MM_SSE2 0x0010 ///< PIV SSE2 functions
+#define FF_MM_3DNOWEXT 0x0020 ///< AMD 3DNowExt
+#define FF_MM_SSE3 0x0040 ///< Prescott SSE3 functions
+#define FF_MM_SSSE3 0x0080 ///< Conroe SSSE3 functions
+#define FF_MM_SSE4 0x0100 ///< Penryn SSE4.1 functions
+#define FF_MM_SSE42 0x0200 ///< Nehalem SSE4.2 functions
+
+ /**
+ * bits per sample/pixel from the demuxer (needed for huffyuv).
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user.
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * prediction method (needed for huffyuv)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int prediction_method;
+#define FF_PRED_LEFT 0
+#define FF_PRED_PLANE 1
+#define FF_PRED_MEDIAN 2
+
+ /**
+ * sample aspect ratio (0 if unknown)
+ * That is the width of a pixel divided by the height of the pixel.
+ * Numerator and denominator must be relatively prime and smaller than 256 for some video standards.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational sample_aspect_ratio;
+
+ /* ffdshow custom code
+ * Regarding MPEG-2, currently there are two ways of encoding SAR.
+ * Of course one is wrong. However, considerable number of videos are encoded in a wrong way.
+ * We set the spec compliant value in sample_aspect_ratio and
+ * wrong spec-value in sample_aspect_ratio2.
+ */
+ AVRational sample_aspect_ratio2;
+
+ /**
+ * the picture in the bitstream
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ AVFrame *coded_frame;
+
+ /**
+ * debug
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug;
+#define FF_DEBUG_PICT_INFO 1
+#define FF_DEBUG_RC 2
+#define FF_DEBUG_BITSTREAM 4
+#define FF_DEBUG_MB_TYPE 8
+#define FF_DEBUG_QP 16
+#define FF_DEBUG_MV 32
+#define FF_DEBUG_DCT_COEFF 0x00000040
+#define FF_DEBUG_SKIP 0x00000080
+#define FF_DEBUG_STARTCODE 0x00000100
+#define FF_DEBUG_PTS 0x00000200
+#define FF_DEBUG_ER 0x00000400
+#define FF_DEBUG_MMCO 0x00000800
+#define FF_DEBUG_BUGS 0x00001000
+#define FF_DEBUG_VIS_QP 0x00002000
+#define FF_DEBUG_VIS_MB_TYPE 0x00004000
+#define FF_DEBUG_BUFFERS 0x00008000
+
+ /**
+ * debug
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug_mv;
+#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames
+#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames
+#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames
+
+ /**
+ * error
+ * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR.
+ * - decoding: unused
+ */
+ uint64_t error[4];
+
+ /**
+ * minimum MB quantizer
+ * - encoding: unused
+ * - decoding: unused
+ */
+ int mb_qmin;
+
+ /**
+ * maximum MB quantizer
+ * - encoding: unused
+ * - decoding: unused
+ */
+ int mb_qmax;
+
+ /**
+ * motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_cmp;
+ /**
+ * subpixel motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_sub_cmp;
+ /**
+ * macroblock comparison function (not supported yet)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_cmp;
+ /**
+ * interlaced DCT comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int ildct_cmp;
+#define FF_CMP_SAD 0
+#define FF_CMP_SSE 1
+#define FF_CMP_SATD 2
+#define FF_CMP_DCT 3
+#define FF_CMP_PSNR 4
+#define FF_CMP_BIT 5
+#define FF_CMP_RD 6
+#define FF_CMP_ZERO 7
+#define FF_CMP_VSAD 8
+#define FF_CMP_VSSE 9
+#define FF_CMP_NSSE 10
+#define FF_CMP_W53 11
+#define FF_CMP_W97 12
+#define FF_CMP_DCTMAX 13
+#define FF_CMP_DCT264 14
+#define FF_CMP_CHROMA 256
+
+ /**
+ * ME diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dia_size;
+
+ /**
+ * amount of previous MV predictors (2a+1 x 2a+1 square)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int last_predictor_count;
+
+ /**
+ * prepass for motion estimation
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_me;
+
+ /**
+ * motion estimation prepass comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_pre_cmp;
+
+ /**
+ * ME prepass diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_dia_size;
+
+ /**
+ * subpel ME quality
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_subpel_quality;
+
+ /**
+ * callback to negotiate the pixelFormat
+ * @param fmt is the list of formats which are supported by the codec,
+ * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality.
+ * The first is always the native one.
+ * @return the chosen format
+ * - encoding: unused
+ * - decoding: Set by user, if not set the native format will be chosen.
+ */
+ enum PixelFormat (*get_format)(struct AVCodecContext *s, const enum PixelFormat * fmt);
+
+ /**
+ * DTG active format information (additional aspect ratio
+ * information only used in DVB MPEG-2 transport streams)
+ * 0 if not set.
+ *
+ * - encoding: unused
+ * - decoding: Set by decoder.
+ */
+ int dtg_active_format;
+#define FF_DTG_AFD_SAME 8
+#define FF_DTG_AFD_4_3 9
+#define FF_DTG_AFD_16_9 10
+#define FF_DTG_AFD_14_9 11
+#define FF_DTG_AFD_4_3_SP_14_9 13
+#define FF_DTG_AFD_16_9_SP_14_9 14
+#define FF_DTG_AFD_SP_4_3 15
+
+ /**
+ * maximum motion estimation search range in subpel units
+ * If 0 then no limit.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_range;
+
+ /**
+ * intra quantizer bias
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int intra_quant_bias;
+#define FF_DEFAULT_QUANT_BIAS 999999
+
+ /**
+ * inter quantizer bias
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int inter_quant_bias;
+
+ /**
+ * color table ID
+ * - encoding: unused
+ * - decoding: Which clrtable should be used for 8bit RGB images.
+ * Tables have to be stored somewhere. FIXME
+ */
+ int color_table_id;
+
+ /**
+ * internal_buffer count
+ * Don't touch, used by libavcodec default_get_buffer().
+ */
+ int internal_buffer_count;
+
+ /**
+ * internal_buffers
+ * Don't touch, used by libavcodec default_get_buffer().
+ */
+ void *internal_buffer;
+
+#define FF_LAMBDA_SHIFT 7
+#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT)
+#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda
+#define FF_LAMBDA_MAX (256*128-1)
+
+#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove
+ /**
+ * Global quality for codecs which cannot change it per frame.
+ * This should be proportional to MPEG-1/2/4 qscale.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int global_quality;
+
+#define FF_CODER_TYPE_VLC 0
+#define FF_CODER_TYPE_AC 1
+#define FF_CODER_TYPE_RAW 2
+#define FF_CODER_TYPE_RLE 3
+#define FF_CODER_TYPE_DEFLATE 4
+ /**
+ * coder type
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int coder_type;
+
+ /**
+ * context model
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int context_model;
+#if 0
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ uint8_t * (*realloc)(struct AVCodecContext *s, uint8_t *buf, int buf_size);
+#endif
+
+ /**
+ * slice flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int slice_flags;
+#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display
+#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics)
+#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1)
+
+ /**
+ * macroblock decision mode
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_decision;
+#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp
+#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits
+#define FF_MB_DECISION_RD 2 ///< rate distortion
+
+ /**
+ * custom intra quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: Set by libavcodec.
+ */
+ uint16_t *intra_matrix;
+
+ /**
+ * custom inter quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: Set by libavcodec.
+ */
+ uint16_t *inter_matrix;
+
+ /**
+ * fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * - encoding: unused
+ * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ */
+ unsigned int stream_codec_tag;
+
+ /**
+ * scene change detection threshold
+ * 0 is default, larger means fewer detected scene changes.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int scenechange_threshold;
+
+ /**
+ * minimum Lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int lmin;
+
+ /**
+ * maximum Lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int lmax;
+
+ /**
+ * palette control structure
+ * - encoding: ??? (no palette-enabled encoder yet)
+ * - decoding: Set by user.
+ */
+ struct AVPaletteControl *palctrl;
+
+ /**
+ * noise reduction strength
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int noise_reduction;
+
+ /**
+ * Called at the beginning of a frame to get cr buffer for it.
+ * Buffer type (size, hints) must be the same. libavcodec won't check it.
+ * libavcodec will pass previous buffer in pic, function should return
+ * same buffer or new buffer with old frame "painted" into it.
+ * If pic.data[0] == NULL must behave like get_buffer().
+ * if CODEC_CAP_DR1 is not set then reget_buffer() must call
+ * avcodec_default_reget_buffer() instead of providing buffers allocated by
+ * some other means.
+ * - encoding: unused
+ * - decoding: Set by libavcodec., user can override
+ */
+ int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+ /**
+ * Number of bits which should be loaded into the rc buffer before decoding starts.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_initial_buffer_occupancy;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int inter_threshold;
+
+ /**
+ * CODEC_FLAG2_*
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags2;
+
+ /**
+ * Simulates errors in the bitstream to test error concealment.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int error_rate;
+
+ /**
+ * MP3 antialias algorithm, see FF_AA_* below.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int antialias_algo;
+#define FF_AA_AUTO 0
+#define FF_AA_FASTINT 1 //not implemented yet
+#define FF_AA_INT 2
+#define FF_AA_FLOAT 3
+ /**
+ * quantizer noise shaping
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int quantizer_noise_shaping;
+
+ /**
+ * thread count
+ * is used to decide how many independent tasks should be passed to execute()
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_count;
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * @param count the number of things to execute
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size);
+
+ /**
+ * thread opaque
+ * Can be used by execute() to store some per AVCodecContext stuff.
+ * - encoding: set by execute()
+ * - decoding: set by execute()
+ */
+ void *thread_opaque;
+
+ /**
+ * Motion estimation threshold below which no motion estimation is
+ * performed, but instead the user specified motion vectors are used.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_threshold;
+
+ /**
+ * Macroblock threshold below which the user specified macroblock types will be used.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_threshold;
+
+ /**
+ * precision of the intra DC coefficient - 8
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int intra_dc_precision;
+
+ /**
+ * noise vs. sse weight for the nsse comparsion function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int nsse_weight;
+
+ /**
+ * Number of macroblock rows at the top which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_top;
+
+ /**
+ * Number of macroblock rows at the bottom which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_bottom;
+
+ /**
+ * profile
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int profile;
+#define FF_PROFILE_UNKNOWN -99
+#define FF_PROFILE_AAC_MAIN 0
+#define FF_PROFILE_AAC_LOW 1
+#define FF_PROFILE_AAC_SSR 2
+#define FF_PROFILE_AAC_LTP 3
+
+ /**
+ * level
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int level;
+#define FF_LEVEL_UNKNOWN -99
+
+ /**
+ * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int lowres;
+
+ /**
+ * Bitstream width / height, may be different from width/height if lowres
+ * or other things are used.
+ * - encoding: unused
+ * - decoding: Set by user before init if known. Codec should override / dynamically change if needed.
+ */
+ int coded_width, coded_height;
+
+ /**
+ * frame skip threshold
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_threshold;
+
+ /**
+ * frame skip factor
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_factor;
+
+ /**
+ * frame skip exponent
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_exp;
+
+ /**
+ * frame skip comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_cmp;
+
+ /**
+ * Border processing masking, raises the quantizer for mbs on the borders
+ * of the picture.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float border_masking;
+
+ /**
+ * minimum MB lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmin;
+
+ /**
+ * maximum MB lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmax;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_penalty_compensation;
+
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_loop_filter;
+
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_idct;
+
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_frame;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int bidir_refine;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int brd_scale;
+
+ /**
+ * constant rate factor - quality-based VBR - values ~correspond to qps
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float crf;
+
+ /**
+ * constant quantization parameter rate control method
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int cqp;
+
+ /**
+ * minimum GOP size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int keyint_min;
+
+ /**
+ * number of reference frames
+ * - encoding: Set by user.
+ * - decoding: Set by lavc.
+ */
+ int refs;
+
+ /**
+ * chroma qp offset from luma
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int chromaoffset;
+
+ /**
+ * Influences how often B-frames are used.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int bframebias;
+
+ /**
+ * trellis RD quantization
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int trellis;
+
+ /**
+ * Reduce fluctuations in qp (before curve compression).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float complexityblur;
+
+ /**
+ * in-loop deblocking filter alphac0 parameter
+ * alpha is in the range -6...6
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int deblockalpha;
+
+ /**
+ * in-loop deblocking filter beta parameter
+ * beta is in the range -6...6
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int deblockbeta;
+
+ /**
+ * macroblock subpartition sizes to consider - p8x8, p4x4, b8x8, i8x8, i4x4
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int partitions;
+#define X264_PART_I4X4 0x001 /* Analyze i4x4 */
+#define X264_PART_I8X8 0x002 /* Analyze i8x8 (requires 8x8 transform) */
+#define X264_PART_P8X8 0x010 /* Analyze p16x8, p8x16 and p8x8 */
+#define X264_PART_P4X4 0x020 /* Analyze p8x4, p4x8, p4x4 */
+#define X264_PART_B8X8 0x100 /* Analyze b16x8, b8x16 and b8x8 */
+
+ /**
+ * direct MV prediction mode - 0 (none), 1 (spatial), 2 (temporal), 3 (auto)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int directpred;
+
+ /**
+ * Audio cutoff bandwidth (0 means "automatic")
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int cutoff;
+
+ /**
+ * Multiplied by qscale for each frame and added to scene_change_score.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int scenechange_factor;
+
+ /**
+ *
+ * Note: Value depends upon the compare function used for fullpel ME.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mv0_threshold;
+
+ /**
+ * Adjusts sensitivity of b_frame_strategy 1.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int b_sensitivity;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int compression_level;
+#define FF_COMPRESSION_DEFAULT -1
+
+ /**
+ * Sets whether to use LPC mode - used by FLAC encoder.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int use_lpc;
+
+ /**
+ * LPC coefficient precision - used by FLAC encoder
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int lpc_coeff_precision;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int min_prediction_order;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_prediction_order;
+
+ /**
+ * search method for selecting prediction order
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int prediction_order_method;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int min_partition_order;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_partition_order;
+
+ /**
+ * GOP timecode frame start number, in non drop frame format
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int64_t timecode_frame_start;
+
+#if LIBAVCODEC_VERSION_MAJOR < 53
+ /**
+ * Decoder should decode to this many channels if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @deprecated Deprecated in favor of request_channel_layout.
+ */
+ int request_channels;
+#endif
+
+ /**
+ * Percentage of dynamic range compression to be applied by the decoder.
+ * The default value is 1.0, corresponding to full compression.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ float drc_scale;
+
+ /**
+ * opaque 64bit number (generally a PTS) that will be reordered and
+ * output in AVFrame.reordered_opaque
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int64_t reordered_opaque;
+ int64_t reordered_opaque2; /* ffdshow custom code */
+ int64_t reordered_opaque3; /* ffdshow custom code */
+
+ int is_copy; /* ffmpeg-mt */
+ int thread_type; /* ffmpeg-mt */
+#define FF_THREAD_FRAME 1 //< Decode more than one frame at once
+#define FF_THREAD_SLICE 2 //< Decode more than one part of a single frame at once
+#define FF_THREAD_DEFAULT 3 //< Use both if possible.
+
+ int active_thread_type; /* ffmpeg-mt */
+ /**
+ * Bits per sample/pixel of internal libavcodec pixel/sample format.
+ * This field is applicable only when sample_fmt is SAMPLE_FMT_S32.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ int bits_per_raw_sample;
+
+ /**
+ * Audio channel layout.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ int64_t channel_layout;
+
+ /**
+ * Request decoder to use this channel layout if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int64_t request_channel_layout;
+
+ /**
+ * Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_max_available_vbv_use;
+
+ /**
+ * Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_min_vbv_overflow_use;
+
+ /**
+ * Hardware accelerator in use
+ * - encoding: unused.
+ * - decoding: Set by libavcodec
+ */
+ struct AVHWAccel *hwaccel;
+
+ /**
+ * For some codecs, the time base is closer to the field rate than the frame rate.
+ * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
+ * if no telecine is used ...
+ *
+ * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
+ */
+ int ticks_per_frame;
+
+ /**
+ * Chromaticity coordinates of the source primaries.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorPrimaries color_primaries;
+
+ /**
+ * Color Transfer Characteristic.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ /**
+ * This defines the location of chroma samples.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVChromaLocation chroma_sample_location;
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * Also see avcodec_thread_init and e.g. the --enable-pthread configure option.
+ * @param c context passed also to func
+ * @param count the number of things to execute
+ * @param arg2 argument passed unchanged to func
+ * @param ret return values of executed functions, must have space for "count" values. May be NULL.
+ * @param func function that will be called count times, with jobnr from 0 to count-1.
+ * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no
+ * two instances of func executing at the same time will have the same threadnr.
+ * @return always 0 currently, but code should handle a future improvement where when any call to func
+ * returns < 0 no further calls to func may be done and < 0 is returned.
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count);
+
+ /* ffdshow custom stuff (begin) */
+
+ /**
+ * minimum and maxminum quantizer for I frames. If 0, derived from qmin, i_quant_factor, i_quant_offset
+ * - encoding: set by user.
+ * - decoding: unused
+ */
+ int qmin_i,qmax_i;
+
+ /**
+ * minimum and maximum quantizer for B frames. If 0, derived from qmin, b_quant_factor, b_quant_offset
+ * - encoding: set by user.
+ * - decoding: unused
+ */
+ int qmin_b,qmax_b;
+
+ float postgain;
+ int ac3mode,ac3lfe;
+ int ac3channels[6];
+ int nal_length_size;
+ int vorbis_header_size[3];
+ int64_t granulepos;
+ int64_t *parserRtStart;
+ void (*handle_user_data)(struct AVCodecContext *c,const uint8_t *buf,int buf_size);
+ int h264_has_to_drop_first_non_ref; // Workaround Haali's media splitter (http://forum.doom9.org/showthread.php?p=1226434#post1226434)
+
+ enum CorePNGFrameType corepng_frame_type;
+
+ /**
+ * Force 4:3 or 16:9 as DAR (MPEG-2 only)
+ * - encoding: unused.
+ * - decoding: Set by user.
+ */
+ int isDVD;
+
+ /* ffdshow custom stuff (end) */
+} AVCodecContext;
+
+/**
+ * AVHWAccel.
+ */
+typedef struct AVHWAccel {
+ /**
+ * Name of the hardware accelerated codec.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ */
+ const char *name;
+
+ /**
+ * Type of codec implemented by the hardware accelerator.
+ *
+ * See CODEC_TYPE_xxx
+ */
+ enum CodecType type;
+
+ /**
+ * Codec implemented by the hardware accelerator.
+ *
+ * See CODEC_ID_xxx
+ */
+ enum CodecID id;
+
+ /**
+ * Supported pixel format.
+ *
+ * Only hardware accelerated formats are supported here.
+ */
+ enum PixelFormat pix_fmt;
+
+ /**
+ * Hardware accelerated codec capabilities.
+ * see FF_HWACCEL_CODEC_CAP_*
+ */
+ int capabilities;
+
+ struct AVHWAccel *next;
+
+ /**
+ * Called at the beginning of each frame or field picture.
+ *
+ * Meaningful frame information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * Note that buf can be NULL along with buf_size set to 0.
+ * Otherwise, this means the whole frame is available at this point.
+ *
+ * @param avctx the codec context
+ * @param buf the frame data buffer base
+ * @param buf_size the size of the frame in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Callback for each slice.
+ *
+ * Meaningful slice information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @param buf the slice data buffer base
+ * @param buf_size the size of the slice in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Called at the end of each frame or field picture.
+ *
+ * The whole picture is parsed at this point and can now be sent
+ * to the hardware accelerator. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*end_frame)(AVCodecContext *avctx);
+
+ /**
+ * Size of HW accelerator private data.
+ *
+ * Private data is allocated with av_mallocz() before
+ * AVCodecContext.get_buffer() and deallocated after
+ * AVCodecContext.release_buffer().
+ */
+ int priv_data_size;
+} AVHWAccel;
+
+/**
+ * AVCodec.
+ */
+typedef struct AVCodec {
+ /**
+ * Name of the codec implementation.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ * This is the primary way to find a codec from the user perspective.
+ */
+ const char *name;
+ enum CodecType type;
+ enum CodecID id;
+ int priv_data_size;
+ int (*init)(AVCodecContext *);
+ int (*encode)(AVCodecContext *, uint8_t *buf, int buf_size, void *data);
+ int (*close)(AVCodecContext *);
+ int (*decode)(AVCodecContext *, void *outdata, int *outdata_size,
+ const uint8_t *buf, int buf_size);
+ /**
+ * Codec capabilities.
+ * see CODEC_CAP_*
+ */
+ int capabilities;
+ struct AVCodec *next;
+ /**
+ * Flush buffers.
+ * Will be called when seeking
+ */
+ void (*flush)(AVCodecContext *);
+ const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
+ const enum PixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
+ /**
+ * Descriptive name for the codec, meant to be more human readable than name.
+ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
+ */
+ const char *long_name;
+ const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
+ const enum SampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
+ const int64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
+} AVCodec;
+
+/**
+ * four components are given, that's all.
+ * the last component is alpha
+ */
+typedef struct AVPicture {
+ uint8_t *data[4];
+ int linesize[4]; ///< number of bytes per line
+} AVPicture;
+
+void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *v_shift);
+void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
+
+/* external high level API */
+
+/**
+ * If c is NULL, returns the first registered codec,
+ * if c is non-NULL, returns the next registered codec after c,
+ * or NULL if c is the last one.
+ */
+AVCodec *av_codec_next(AVCodec *c);
+
+/**
+ * Returns the LIBAVCODEC_VERSION_INT constant.
+ */
+unsigned avcodec_version(void);
+
+/**
+ * Initializes libavcodec.
+ *
+ * @warning This function must be called before any other libavcodec
+ * function.
+ */
+FF_EXPORT void avcodec_init(void);
+
+/**
+ * Register the codec codec and initialize libavcodec.
+ *
+ * @see avcodec_init()
+ */
+void avcodec_register(AVCodec *codec);
+
+/**
+ * Finds a registered encoder with a matching codec ID.
+ *
+ * @param id CodecID of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder(enum CodecID id);
+
+/**
+ * Finds a registered encoder with the specified name.
+ *
+ * @param name name of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder_by_name(const char *name);
+
+/**
+ * Finds a registered decoder with a matching codec ID.
+ *
+ * @param id CodecID of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+FF_EXPORT AVCodec *avcodec_find_decoder(enum CodecID id);
+
+/**
+ * Finds a registered decoder with the specified name.
+ *
+ * @param name name of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder_by_name(const char *name);
+void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);
+
+/**
+ * Sets the fields of the given AVCodecContext to default values.
+ *
+ * @param s The AVCodecContext of which the fields should be set to default values.
+ */
+void avcodec_get_context_defaults(AVCodecContext *s);
+
+/**
+ * Allocates an AVCodecContext and sets its fields to default values. The
+ * resulting struct can be deallocated by simply calling av_free().
+ *
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ * @see avcodec_get_context_defaults
+ */
+FF_EXPORT AVCodecContext *avcodec_alloc_context(void);
+
+/**
+ * Sets the fields of the given AVFrame to default values.
+ *
+ * @param pic The AVFrame of which the fields should be set to default values.
+ */
+void avcodec_get_frame_defaults(AVFrame *pic);
+
+/**
+ * Allocates an AVFrame and sets its fields to default values. The resulting
+ * struct can be deallocated by simply calling av_free().
+ *
+ * @return An AVFrame filled with default values or NULL on failure.
+ * @see avcodec_get_frame_defaults
+ */
+FF_EXPORT AVFrame *avcodec_alloc_frame(void);
+
+FF_EXPORT int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
+FF_EXPORT void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
+FF_EXPORT int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
+void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
+
+/**
+ * Checks if the given dimension of a picture is valid, meaning that all
+ * bytes of the picture can be addressed with a signed int.
+ *
+ * @param[in] w Width of the picture.
+ * @param[in] h Height of the picture.
+ * @return Zero if valid, a negative value if invalid.
+ */
+int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h);
+enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
+
+FF_EXPORT int avcodec_thread_init(AVCodecContext *s, int thread_count);
+FF_EXPORT void avcodec_thread_free(AVCodecContext *s);
+int avcodec_thread_execute(AVCodecContext *s, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
+const char* avcodec_get_current_idct(AVCodecContext *avctx);
+void avcodec_get_encoder_info(AVCodecContext *avctx,int *xvid_build,int *divx_version,int *divx_build,int *lavc_build);
+//FIXME func typedef
+
+/**
+ * Initializes the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated.
+ *
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
+ *
+ * @warning This function is not thread safe!
+ *
+ * @code
+ * avcodec_register_all();
+ * codec = avcodec_find_decoder(CODEC_ID_H264);
+ * if (!codec)
+ * exit(1);
+ *
+ * context = avcodec_alloc_context();
+ *
+ * if (avcodec_open(context, codec) < 0)
+ * exit(1);
+ * @endcode
+ *
+ * @param avctx The context which will be set up to use the given codec.
+ * @param codec The codec to use within the context.
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context, avcodec_find_decoder, avcodec_find_encoder
+ */
+FF_EXPORT int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
+
+/**
+ * @deprecated Use avcodec_decode_audio2() instead.
+ */
+attribute_deprecated int avcodec_decode_audio(AVCodecContext *avctx, int16_t *samples,
+ int *frame_size_ptr,
+ const uint8_t *buf, int buf_size);
+
+/**
+ * Decodes an audio frame from buf into samples.
+ * The avcodec_decode_audio2() function decodes an audio frame from the input
+ * buffer buf of size buf_size. To decode it, it makes use of the
+ * audio codec which was coupled with avctx using avcodec_open(). The
+ * resulting decoded frame is stored in output buffer samples. If no frame
+ * could be decompressed, frame_size_ptr is zero. Otherwise, it is the
+ * decompressed frame size in bytes.
+ *
+ * @warning You must set frame_size_ptr to the allocated size of the
+ * output buffer before calling avcodec_decode_audio2().
+ *
+ * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
+ * the actual read bytes because some optimized bitstream readers read 32 or 64
+ * bits at once and could read over the end.
+ *
+ * @warning The end of the input buffer buf should be set to 0 to ensure that
+ * no overreading happens for damaged MPEG streams.
+ *
+ * @note You might have to align the input buffer buf and output buffer
+ * samples. The alignment requirements depend on the CPU: On some CPUs it isn't
+ * necessary at all, on others it won't work at all if not aligned and on others
+ * it will work but it will have an impact on performance. In practice, the
+ * bitstream should have 4 byte alignment at minimum and all sample data should
+ * be 16 byte aligned unless the CPU doesn't need it (AltiVec and SSE do). If
+ * the linesize is not a multiple of 16 then there's no sense in aligning the
+ * start of the buffer to 16.
+ *
+ * @param avctx the codec context
+ * @param[out] samples the output buffer
+ * @param[in,out] frame_size_ptr the output buffer size in bytes
+ * @param[in] buf the input buffer
+ * @param[in] buf_size the input buffer size in bytes
+ * @return On error a negative value is returned, otherwise the number of bytes
+ * used or zero if no frame could be decompressed.
+ */
+FF_EXPORT int avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples,
+ int *frame_size_ptr,
+ const uint8_t *buf, int buf_size);
+
+/**
+ * Decodes a video frame from buf into picture.
+ * The avcodec_decode_video() function decodes a video frame from the input
+ * buffer buf of size buf_size. To decode it, it makes use of the
+ * video codec which was coupled with avctx using avcodec_open(). The
+ * resulting decoded frame is stored in picture.
+ *
+ * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
+ * the actual read bytes because some optimized bitstream readers read 32 or 64
+ * bits at once and could read over the end.
+ *
+ * @warning The end of the input buffer buf should be set to 0 to ensure that
+ * no overreading happens for damaged MPEG streams.
+ *
+ * @note You might have to align the input buffer buf and output buffer
+ * samples. The alignment requirements depend on the CPU: on some CPUs it isn't
+ * necessary at all, on others it won't work at all if not aligned and on others
+ * it will work but it will have an impact on performance. In practice, the
+ * bitstream should have 4 byte alignment at minimum and all sample data should
+ * be 16 byte aligned unless the CPU doesn't need it (AltiVec and SSE do). If
+ * the linesize is not a multiple of 16 then there's no sense in aligning the
+ * start of the buffer to 16.
+ *
+ * @note Some codecs have a delay between input and output, these need to be
+ * feeded with buf=NULL, buf_size=0 at the end to return the remaining frames.
+ *
+ * @param avctx the codec context
+ * @param[out] picture The AVFrame in which the decoded video frame will be stored.
+ * @param[in] buf the input buffer
+ * @param[in] buf_size the size of the input buffer in bytes
+ * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
+ * @return On error a negative value is returned, otherwise the number of bytes
+ * used or zero if no frame could be decompressed.
+ */
+FF_EXPORT int avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
+ int *got_picture_ptr,
+ const uint8_t *buf, int buf_size);
+
+
+int avcodec_parse_frame(AVCodecContext *avctx, uint8_t **pdata,
+ int *data_size_ptr,
+ uint8_t *buf, int buf_size);
+
+/**
+ * Encodes an audio frame from samples into buf.
+ *
+ * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.
+ * However, for PCM audio the user will know how much space is needed
+ * because it depends on the value passed in buf_size as described
+ * below. In that case a lower value can be used.
+ *
+ * @param avctx the codec context
+ * @param[out] buf the output buffer
+ * @param[in] buf_size the output buffer size
+ * @param[in] samples the input buffer containing the samples
+ * The number of samples read from this buffer is frame_size*channels,
+ * both of which are defined in avctx.
+ * For PCM audio the number of samples read from samples is equal to
+ * buf_size * input_sample_size / output_sample_size.
+ * @return On error a negative value is returned, on success zero or the number
+ * of bytes used to encode the data read from the input buffer.
+ */
+int avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const short *samples);
+
+/**
+ * Encodes a video frame from pict into buf.
+ * The input picture should be
+ * stored using a specific format, namely avctx.pix_fmt.
+ *
+ * @param avctx the codec context
+ * @param[out] buf the output buffer for the bitstream of encoded frame
+ * @param[in] buf_size the size of the output buffer in bytes
+ * @param[in] pict the input picture to encode
+ * @return On error a negative value is returned, on success zero or the number
+ * of bytes used from the output buffer.
+ */
+int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVFrame *pict);
+
+FF_EXPORT int avcodec_close(AVCodecContext *avctx);
+
+/**
+ * Register all the codecs, parsers and bitstream filters which were enabled at
+ * configuration time. If you do not call this function you can select exactly
+ * which formats you want to support, by using the individual registration
+ * functions.
+ *
+ * @see avcodec_register
+ * @see av_register_codec_parser
+ * @see av_register_bitstream_filter
+ */
+FF_EXPORT void avcodec_register_all(void);
+
+/**
+ * Flush buffers, should be called when seeking or when switching to a different stream.
+ */
+FF_EXPORT void avcodec_flush_buffers(AVCodecContext *avctx);
+
+void avcodec_default_free_buffers(AVCodecContext *s);
+
+/* misc useful functions */
+
+/**
+ * Returns a single letter to describe the given picture type pict_type.
+ *
+ * @param[in] pict_type the picture type
+ * @return A single character representing the picture type.
+ */
+char av_get_pict_type_char(int pict_type);
+
+/**
+ * Returns codec bits per sample.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_bits_per_sample(enum CodecID codec_id);
+
+/**
+ * Returns sample format bits per sample.
+ *
+ * @param[in] sample_fmt the sample format
+ * @return Number of bits per sample or zero if unknown for the given sample format.
+ */
+int av_get_bits_per_sample_format(enum SampleFormat sample_fmt);
+
+/* frame parsing */
+typedef struct AVCodecParserContext {
+ void *priv_data;
+ struct AVCodecParser *parser;
+ int64_t frame_offset; /* offset of the current frame */
+ int64_t cur_offset; /* current offset
+ (incremented by each av_parser_parse()) */
+ int64_t next_frame_offset; /* offset of the next frame */
+ /* video info */
+ int pict_type; /* XXX: Put it back in AVCodecContext. */
+ /**
+ * This field is used for proper frame duration computation in lavf.
+ * It signals, how much longer the frame duration of the current frame
+ * is compared to normal frame duration.
+ *
+ * frame_duration = (2 + repeat_pict) / (2*fps)
+ *
+ * It is used by codecs like H.264 to display telecined material.
+ *
+ * @note This field can also be set to -1 for half-frame duration in case
+ * of field pictures.
+ */
+ int repeat_pict; /* XXX: Put it back in AVCodecContext. */
+ int64_t pts; /* pts of the current frame */
+ int64_t dts; /* dts of the current frame */
+
+ /* private data */
+ int64_t last_pts;
+ int64_t last_dts;
+ int fetch_timestamp;
+
+#define AV_PARSER_PTS_NB 4
+ int cur_frame_start_index;
+ int64_t cur_frame_offset[AV_PARSER_PTS_NB];
+ int64_t cur_frame_pts[AV_PARSER_PTS_NB];
+ int64_t cur_frame_dts[AV_PARSER_PTS_NB];
+
+ int flags;
+#define PARSER_FLAG_COMPLETE_FRAMES 0x0001
+
+ int64_t offset; ///< byte offset from starting packet start
+ int64_t cur_frame_end[AV_PARSER_PTS_NB];
+
+ /*!
+ * Set by parser to 1 for key frames and 0 for non-key frames.
+ * It is initialized to -1, so if the parser doesn't set this flag,
+ * old-style fallback using FF_I_TYPE picture type as key frames
+ * will be used.
+ */
+ int key_frame;
+
+ /**
+ * Time difference in stream time base units from the pts of this
+ * packet to the point at which the output from the decoder has converged
+ * independent from the availability of previous frames. That is, the
+ * frames are virtually identical no matter if decoding started from
+ * the very first frame or from this keyframe.
+ * Is AV_NOPTS_VALUE if unknown.
+ * This field is not the display duration of the current frame.
+ *
+ * The purpose of this field is to allow seeking in streams that have no
+ * keyframes in the conventional sense. It corresponds to the
+ * recovery point SEI in H.264 and match_time_delta in NUT. It is also
+ * essential for some types of subtitle streams to ensure that all
+ * subtitles are correctly displayed after seeking.
+ */
+ int64_t convergence_duration;
+
+ // Timestamp generation support:
+ /**
+ * Synchronization point for start of timestamp generation.
+ *
+ * Set to >0 for sync point, 0 for no sync point and <0 for undefined
+ * (default).
+ *
+ * For example, this corresponds to presence of H.264 buffering period
+ * SEI message.
+ */
+ int dts_sync_point;
+
+ /**
+ * Offset of the current timestamp against last timestamp sync point in
+ * units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain a valid timestamp offset.
+ *
+ * Note that the timestamp of sync point has usually a nonzero
+ * dts_ref_dts_delta, which refers to the previous sync point. Offset of
+ * the next frame after timestamp sync point will be usually 1.
+ *
+ * For example, this corresponds to H.264 cpb_removal_delay.
+ */
+ int dts_ref_dts_delta;
+
+ /**
+ * Presentation delay of current frame in units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain valid non-negative timestamp delta (presentation time of a frame
+ * must not lie in the past).
+ *
+ * This delay represents the difference between decoding and presentation
+ * time of the frame.
+ *
+ * For example, this corresponds to H.264 dpb_output_delay.
+ */
+ int pts_dts_delta;
+
+ /**
+ * Position of the packet in file.
+ *
+ * Analogous to cur_frame_pts/dts
+ */
+ int64_t cur_frame_pos[AV_PARSER_PTS_NB];
+
+ /**
+ * Byte position of currently parsed frame in stream.
+ */
+ int64_t pos;
+
+ /**
+ * Previous frame byte position.
+ */
+ int64_t last_pos;
+} AVCodecParserContext;
+
+typedef struct AVCodecParser {
+ int codec_ids[5]; /* several codec IDs are permitted */
+ int priv_data_size;
+ int (*parser_init)(AVCodecParserContext *s);
+ int (*parser_parse)(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size);
+ void (*parser_close)(AVCodecParserContext *s);
+ int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size);
+ struct AVCodecParser *next;
+} AVCodecParser;
+
+AVCodecParser *av_parser_next(AVCodecParser *c);
+
+void av_register_codec_parser(AVCodecParser *parser);
+FF_EXPORT AVCodecParserContext *av_parser_init(int codec_id);
+
+#if LIBAVCODEC_VERSION_MAJOR < 53
+attribute_deprecated
+int av_parser_parse(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size,
+ int64_t pts, int64_t dts);
+#endif
+
+/**
+ * Parse a packet.
+ *
+ * @param s parser context.
+ * @param avctx codec context.
+ * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished.
+ * @param poutbuf_size set to size of parsed buffer or zero if not yet finished.
+ * @param buf input buffer.
+ * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output).
+ * @param pts input presentation timestamp.
+ * @param dts input decoding timestamp.
+ * @param pos input byte position in stream.
+ * @return the number of bytes of the input bitstream used.
+ *
+ * Example:
+ * @code
+ * while(in_len){
+ * len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
+ * in_data, in_len,
+ * pts, dts, pos);
+ * in_data += len;
+ * in_len -= len;
+ *
+ * if(size)
+ * decode_frame(data, size);
+ * }
+ * @endcode
+ */
+int av_parser_parse2(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size,
+ int64_t pts, int64_t dts,
+ int64_t pos);
+
+int av_parser_change(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+FF_EXPORT void av_parser_close(AVCodecParserContext *s);
+
+/* memory */
+FF_EXPORT void av_free(void *ptr);
+
+/**
+ * Reallocates the given block if it is not large enough, otherwise it
+ * does nothing.
+ *
+ * @see av_realloc
+ */
+void *av_fast_realloc(void *ptr, unsigned int *size, unsigned int min_size);
+
+/**
+ * Allocates a buffer, reusing the given one if large enough.
+ *
+ * Contrary to av_fast_realloc the current buffer contents might not be
+ * preserved and on error the old buffer is freed, thus no special
+ * handling to avoid memleaks is necessary.
+ *
+ * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer
+ * @param size size of the buffer *ptr points to
+ * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and
+ * *size 0 if an error occurred.
+ */
+void av_fast_malloc(void *ptr, unsigned int *size, unsigned int min_size);
+
+/* for static data only */
+
+/**
+ * Frees all static arrays and resets their pointers to 0.
+ * Call this function to release all statically allocated tables.
+ *
+ * @deprecated. Code which uses av_free_static is broken/misdesigned
+ * and should correctly use static arrays
+ *
+ */
+attribute_deprecated void av_free_static(void);
+
+/**
+ * Copy image 'src' to 'dst'.
+ */
+void av_picture_copy(AVPicture *dst, const AVPicture *src,
+ enum PixelFormat pix_fmt, int width, int height);
+
+/**
+ * Crop image top and left side.
+ */
+int av_picture_crop(AVPicture *dst, const AVPicture *src,
+ enum PixelFormat pix_fmt, int top_band, int left_band);
+
+/**
+ * Pad image.
+ */
+int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum PixelFormat pix_fmt,
+ int padtop, int padbottom, int padleft, int padright, int *color);
+
+unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
+
+/* error handling */
+#if EINVAL > 0
+#define AVERROR(e) (-(e)) /**< Returns a negative error code from a POSIX error code, to return from library functions. */
+#define AVUNERROR(e) (-(e)) /**< Returns a POSIX error code from a library function error return value. */
+#else
+/* Some platforms have E* and errno already negated. */
+#define AVERROR(e) (e)
+#define AVUNERROR(e) (e)
+#endif
+#define AVERROR_UNKNOWN AVERROR(EINVAL) /**< unknown error */
+#define AVERROR_IO AVERROR(EIO) /**< I/O error */
+#define AVERROR_NUMEXPECTED AVERROR(EDOM) /**< Number syntax expected in filename. */
+#define AVERROR_INVALIDDATA AVERROR(EINVAL) /**< invalid data found */
+#define AVERROR_NOMEM AVERROR(ENOMEM) /**< not enough memory */
+#define AVERROR_NOFMT AVERROR(EILSEQ) /**< unknown format */
+#define AVERROR_NOTSUPP AVERROR(ENOSYS) /**< Operation not supported. */
+#define AVERROR_NOENT AVERROR(ENOENT) /**< No such file or directory. */
+#define AVERROR_EOF AVERROR(EPIPE) /**< End of file. */
+#define AVERROR_PATCHWELCOME -MKTAG('P','A','W','E') /**< Not yet implemented in FFmpeg. Patches welcome. */
+
+/**
+ * Logs a generic warning message about a missing feature. This function is
+ * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] feature string containing the name of the missing feature
+ * @param[in] want_sample indicates if samples are wanted which exhibit this feature.
+ * If want_sample is non-zero, additional verbage will be added to the log
+ * message which tells the user how to report samples to the development
+ * mailing list.
+ */
+void av_log_missing_feature(void *avc, const char *feature, int want_sample);
+
+/**
+ * Logs a generic warning message asking for a sample. This function is
+ * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] msg string containing an optional message, or NULL if no message
+ */
+void av_log_ask_for_sample(void *avc, const char *msg);
+
+/**
+ * Lock operation used by lockmgr
+ */
+enum AVLockOp {
+ AV_LOCK_CREATE, ///< Create a mutex
+ AV_LOCK_OBTAIN, ///< Lock the mutex
+ AV_LOCK_RELEASE, ///< Unlock the mutex
+ AV_LOCK_DESTROY, ///< Free mutex resources
+};
+
+/**
+ * Register a user provided lock manager supporting the operations
+ * specified by AVLockOp. mutex points to a (void *) where the
+ * lockmgr should store/get a pointer to a user allocated mutex. It's
+ * NULL upon AV_LOCK_CREATE and != NULL for all other ops.
+ *
+ * @param cb User defined callback. Note: FFmpeg may invoke calls to this
+ * callback during the call to av_lockmgr_register().
+ * Thus, the application must be prepared to handle that.
+ * If cb is set to NULL the lockmgr will be unregistered.
+ * Also note that during unregistration the previously registered
+ * lockmgr callback may also be invoked.
+ */
+int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op));
+
+/**
+ * ffdshow custom stuff
+ *
+ * @param[out] recovery_frame_cnt. Valid only if GDR.
+ * @return 0: no recovery point, 1:I-frame 2:Recovery Point SEI (GDR), 3:IDR, -1:error
+ */
+int avcodec_h264_search_recovery_point(AVCodecContext *avctx,
+ const uint8_t *buf, int buf_size, int *recovery_frame_cnt);
+
+/* Media Player Classic - Homecinema specific functions */
+FF_EXPORT int FFGetChannelMap(struct AVCodecContext* avctx);
+FF_EXPORT void* FF_aligned_malloc(size_t size, size_t alignment);
+FF_EXPORT void FF_aligned_free(void* mem_ptr);
+
+#endif /* AVCODEC_AVCODEC_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/bitstream.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/bitstream.c
new file mode 100644
index 000000000..4337c84b3
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/bitstream.c
@@ -0,0 +1,294 @@
+/*
+ * Common bit i/o utils
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * alternative bitstream reader & writer by Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/bitstream.c
+ * bitstream api.
+ */
+
+#include "avcodec.h"
+#include "get_bits.h"
+#include "put_bits.h"
+
+const uint8_t ff_log2_run[32]={
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 9,10,11,12,13,14,15
+};
+
+void align_put_bits(PutBitContext *s)
+{
+#ifdef ALT_BITSTREAM_WRITER
+ put_bits(s,( - s->index) & 7,0);
+#else
+ put_bits(s,s->bit_left & 7,0);
+#endif
+}
+
+void ff_put_string(PutBitContext *pb, const char *string, int terminate_string)
+{
+ while(*string){
+ put_bits(pb, 8, *string);
+ string++;
+ }
+ if(terminate_string)
+ put_bits(pb, 8, 0);
+}
+
+void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
+{
+ int words= length>>4;
+ int bits= length&15;
+ int i;
+
+ if(length==0) return;
+
+ if(CONFIG_SMALL || words < 16 || put_bits_count(pb)&7){
+ for(i=0; i<words; i++) put_bits(pb, 16, AV_RB16(src + 2*i));
+ }else{
+ for(i=0; put_bits_count(pb)&31; i++)
+ put_bits(pb, 8, src[i]);
+ flush_put_bits(pb);
+ memcpy(put_bits_ptr(pb), src+i, 2*words-i);
+ skip_put_bytes(pb, 2*words-i);
+ }
+
+ put_bits(pb, bits, AV_RB16(src + 2*words)>>(16-bits));
+}
+
+/* VLC decoding */
+
+//#define DEBUG_VLC
+
+#define GET_DATA(v, table, i, wrap, size) \
+{\
+ const uint8_t *ptr = (const uint8_t *)table + i * wrap;\
+ switch(size) {\
+ case 1:\
+ v = *(const uint8_t *)ptr;\
+ break;\
+ case 2:\
+ v = *(const uint16_t *)ptr;\
+ break;\
+ default:\
+ v = *(const uint32_t *)ptr;\
+ break;\
+ }\
+}
+
+
+static int alloc_table(VLC *vlc, int size, int use_static)
+{
+ int index;
+ index = vlc->table_size;
+ vlc->table_size += size;
+ if (vlc->table_size > vlc->table_allocated) {
+ if(use_static)
+ abort(); //cant do anything, init_vlc() is used with too little memory
+ vlc->table_allocated += (1 << vlc->bits);
+ vlc->table = av_realloc(vlc->table,
+ sizeof(VLC_TYPE) * 2 * vlc->table_allocated);
+ if (!vlc->table)
+ return -1;
+ }
+ return index;
+}
+
+static int build_table(VLC *vlc, int table_nb_bits,
+ int nb_codes,
+ const void *bits, int bits_wrap, int bits_size,
+ const void *codes, int codes_wrap, int codes_size,
+ const void *symbols, int symbols_wrap, int symbols_size,
+ uint32_t code_prefix, int n_prefix, int flags)
+{
+ int i, j, k, n, table_size, table_index, nb, n1, index, code_prefix2, symbol;
+ uint32_t code;
+ VLC_TYPE (*table)[2];
+
+ table_size = 1 << table_nb_bits;
+ table_index = alloc_table(vlc, table_size, flags & INIT_VLC_USE_NEW_STATIC);
+#ifdef DEBUG_VLC
+ av_log(NULL,AV_LOG_DEBUG,"new table index=%d size=%d code_prefix=%x n=%d\n",
+ table_index, table_size, code_prefix, n_prefix);
+#endif
+ if (table_index < 0)
+ return -1;
+ table = &vlc->table[table_index];
+
+ for(i=0;i<table_size;i++) {
+ table[i][1] = 0; //bits
+ table[i][0] = -1; //codes
+ }
+
+ /* first pass: map codes and compute auxillary table sizes */
+ for(i=0;i<nb_codes;i++) {
+ GET_DATA(n, bits, i, bits_wrap, bits_size);
+ GET_DATA(code, codes, i, codes_wrap, codes_size);
+ /* we accept tables with holes */
+ if (n <= 0)
+ continue;
+ if (!symbols)
+ symbol = i;
+ else
+ GET_DATA(symbol, symbols, i, symbols_wrap, symbols_size);
+#if defined(DEBUG_VLC) && 0
+ av_log(NULL,AV_LOG_DEBUG,"i=%d n=%d code=0x%x\n", i, n, code);
+#endif
+ /* if code matches the prefix, it is in the table */
+ n -= n_prefix;
+ if(flags & INIT_VLC_LE)
+ code_prefix2= code & (n_prefix>=32 ? 0xffffffff : (1 << n_prefix)-1);
+ else
+ code_prefix2= code >> n;
+ if (n > 0 && code_prefix2 == code_prefix) {
+ if (n <= table_nb_bits) {
+ /* no need to add another table */
+ j = (code << (table_nb_bits - n)) & (table_size - 1);
+ nb = 1 << (table_nb_bits - n);
+ for(k=0;k<nb;k++) {
+ if(flags & INIT_VLC_LE)
+ j = (code >> n_prefix) + (k<<n);
+#ifdef DEBUG_VLC
+ av_log(NULL, AV_LOG_DEBUG, "%4x: code=%d n=%d\n",
+ j, i, n);
+#endif
+ if (table[j][1] /*bits*/ != 0) {
+ av_log(NULL, AV_LOG_ERROR, "incorrect codes\n");
+ return -1;
+ }
+ table[j][1] = n; //bits
+ table[j][0] = symbol;
+ j++;
+ }
+ } else {
+ n -= table_nb_bits;
+ j = (code >> ((flags & INIT_VLC_LE) ? n_prefix : n)) & ((1 << table_nb_bits) - 1);
+#ifdef DEBUG_VLC
+ av_log(NULL,AV_LOG_DEBUG,"%4x: n=%d (subtable)\n",
+ j, n);
+#endif
+ /* compute table size */
+ n1 = -table[j][1]; //bits
+ if (n > n1)
+ n1 = n;
+ table[j][1] = -n1; //bits
+ }
+ }
+ }
+
+ /* second pass : fill auxillary tables recursively */
+ for(i=0;i<table_size;i++) {
+ n = table[i][1]; //bits
+ if (n < 0) {
+ n = -n;
+ if (n > table_nb_bits) {
+ n = table_nb_bits;
+ table[i][1] = -n; //bits
+ }
+ index = build_table(vlc, n, nb_codes,
+ bits, bits_wrap, bits_size,
+ codes, codes_wrap, codes_size,
+ symbols, symbols_wrap, symbols_size,
+ (flags & INIT_VLC_LE) ? (code_prefix | (i << n_prefix)) : ((code_prefix << table_nb_bits) | i),
+ n_prefix + table_nb_bits, flags);
+ if (index < 0)
+ return -1;
+ /* note: realloc has been done, so reload tables */
+ table = &vlc->table[table_index];
+ table[i][0] = index; //code
+ }
+ }
+ return table_index;
+}
+
+
+/* Build VLC decoding tables suitable for use with get_vlc().
+
+ 'nb_bits' set thee decoding table size (2^nb_bits) entries. The
+ bigger it is, the faster is the decoding. But it should not be too
+ big to save memory and L1 cache. '9' is a good compromise.
+
+ 'nb_codes' : number of vlcs codes
+
+ 'bits' : table which gives the size (in bits) of each vlc code.
+
+ 'codes' : table which gives the bit pattern of of each vlc code.
+
+ 'symbols' : table which gives the values to be returned from get_vlc().
+
+ 'xxx_wrap' : give the number of bytes between each entry of the
+ 'bits' or 'codes' tables.
+
+ 'xxx_size' : gives the number of bytes of each entry of the 'bits'
+ or 'codes' tables.
+
+ 'wrap' and 'size' allows to use any memory configuration and types
+ (byte/word/long) to store the 'bits', 'codes', and 'symbols' tables.
+
+ 'use_static' should be set to 1 for tables, which should be freed
+ with av_free_static(), 0 if free_vlc() will be used.
+*/
+int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
+ const void *bits, int bits_wrap, int bits_size,
+ const void *codes, int codes_wrap, int codes_size,
+ const void *symbols, int symbols_wrap, int symbols_size,
+ int flags)
+{
+ vlc->bits = nb_bits;
+ if(flags & INIT_VLC_USE_NEW_STATIC){
+ if(vlc->table_size && vlc->table_size == vlc->table_allocated){
+ return 0;
+ }else if(vlc->table_size){
+ abort(); // fatal error, we are called on a partially initialized table
+ }
+ }else {
+ vlc->table = NULL;
+ vlc->table_allocated = 0;
+ vlc->table_size = 0;
+ }
+
+#ifdef DEBUG_VLC
+ av_log(NULL,AV_LOG_DEBUG,"build table nb_codes=%d\n", nb_codes);
+#endif
+
+ if (build_table(vlc, nb_bits, nb_codes,
+ bits, bits_wrap, bits_size,
+ codes, codes_wrap, codes_size,
+ symbols, symbols_wrap, symbols_size,
+ 0, 0, flags) < 0) {
+ av_freep(&vlc->table);
+ return -1;
+ }
+ if((flags & INIT_VLC_USE_NEW_STATIC) && vlc->table_size != vlc->table_allocated)
+ av_log(NULL, AV_LOG_ERROR, "needed %d had %d\n", vlc->table_size, vlc->table_allocated);
+ return 0;
+}
+
+
+void free_vlc(VLC *vlc)
+{
+ av_freep(&vlc->table);
+}
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/bytestream.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/bytestream.h
new file mode 100644
index 000000000..b56f6ce74
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/bytestream.h
@@ -0,0 +1,71 @@
+/*
+ * Bytestream functions
+ * copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_BYTESTREAM_H
+#define AVCODEC_BYTESTREAM_H
+
+#include <string.h>
+#include "libavutil/common.h"
+#include "libavutil/intreadwrite.h"
+
+#define DEF_T(type, name, bytes, read, write) \
+static av_always_inline type bytestream_get_ ## name(const uint8_t **b){\
+ (*b) += bytes;\
+ return read(*b - bytes);\
+}\
+static av_always_inline void bytestream_put_ ##name(uint8_t **b, const type value){\
+ write(*b, value);\
+ (*b) += bytes;\
+}
+
+#define DEF(name, bytes, read, write) \
+ DEF_T(unsigned int, name, bytes, read, write)
+#define DEF64(name, bytes, read, write) \
+ DEF_T(uint64_t, name, bytes, read, write)
+
+DEF64(le64, 8, AV_RL64, AV_WL64)
+DEF (le32, 4, AV_RL32, AV_WL32)
+DEF (le24, 3, AV_RL24, AV_WL24)
+DEF (le16, 2, AV_RL16, AV_WL16)
+DEF64(be64, 8, AV_RB64, AV_WB64)
+DEF (be32, 4, AV_RB32, AV_WB32)
+DEF (be24, 3, AV_RB24, AV_WB24)
+DEF (be16, 2, AV_RB16, AV_WB16)
+DEF (byte, 1, AV_RB8 , AV_WB8 )
+
+#undef DEF
+#undef DEF64
+#undef DEF_T
+
+static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
+{
+ memcpy(dst, *b, size);
+ (*b) += size;
+ return size;
+}
+
+static av_always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size)
+{
+ memcpy(*b, src, size);
+ (*b) += size;
+}
+
+#endif /* AVCODEC_BYTESTREAM_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cabac.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cabac.c
new file mode 100644
index 000000000..cc0a7c225
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cabac.c
@@ -0,0 +1,168 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/cabac.c
+ * Context Adaptive Binary Arithmetic Coder.
+ */
+
+#include <string.h>
+
+#include "libavutil/common.h"
+#include "get_bits.h"
+#include "cabac.h"
+
+static const uint8_t lps_range[64][4]= {
+{128,176,208,240}, {128,167,197,227}, {128,158,187,216}, {123,150,178,205},
+{116,142,169,195}, {111,135,160,185}, {105,128,152,175}, {100,122,144,166},
+{ 95,116,137,158}, { 90,110,130,150}, { 85,104,123,142}, { 81, 99,117,135},
+{ 77, 94,111,128}, { 73, 89,105,122}, { 69, 85,100,116}, { 66, 80, 95,110},
+{ 62, 76, 90,104}, { 59, 72, 86, 99}, { 56, 69, 81, 94}, { 53, 65, 77, 89},
+{ 51, 62, 73, 85}, { 48, 59, 69, 80}, { 46, 56, 66, 76}, { 43, 53, 63, 72},
+{ 41, 50, 59, 69}, { 39, 48, 56, 65}, { 37, 45, 54, 62}, { 35, 43, 51, 59},
+{ 33, 41, 48, 56}, { 32, 39, 46, 53}, { 30, 37, 43, 50}, { 29, 35, 41, 48},
+{ 27, 33, 39, 45}, { 26, 31, 37, 43}, { 24, 30, 35, 41}, { 23, 28, 33, 39},
+{ 22, 27, 32, 37}, { 21, 26, 30, 35}, { 20, 24, 29, 33}, { 19, 23, 27, 31},
+{ 18, 22, 26, 30}, { 17, 21, 25, 28}, { 16, 20, 23, 27}, { 15, 19, 22, 25},
+{ 14, 18, 21, 24}, { 14, 17, 20, 23}, { 13, 16, 19, 22}, { 12, 15, 18, 21},
+{ 12, 14, 17, 20}, { 11, 14, 16, 19}, { 11, 13, 15, 18}, { 10, 12, 15, 17},
+{ 10, 12, 14, 16}, { 9, 11, 13, 15}, { 9, 11, 12, 14}, { 8, 10, 12, 14},
+{ 8, 9, 11, 13}, { 7, 9, 11, 12}, { 7, 9, 10, 12}, { 7, 8, 10, 11},
+{ 6, 8, 9, 11}, { 6, 7, 9, 10}, { 6, 7, 8, 9}, { 2, 2, 2, 2},
+};
+
+uint8_t ff_h264_mlps_state[4*64];
+uint8_t ff_h264_lps_range[4*2*64];
+uint8_t ff_h264_lps_state[2*64];
+uint8_t ff_h264_mps_state[2*64];
+
+static const uint8_t mps_state[64]= {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9,10,11,12,13,14,15,16,
+ 17,18,19,20,21,22,23,24,
+ 25,26,27,28,29,30,31,32,
+ 33,34,35,36,37,38,39,40,
+ 41,42,43,44,45,46,47,48,
+ 49,50,51,52,53,54,55,56,
+ 57,58,59,60,61,62,62,63,
+};
+
+static const uint8_t lps_state[64]= {
+ 0, 0, 1, 2, 2, 4, 4, 5,
+ 6, 7, 8, 9, 9,11,11,12,
+ 13,13,15,15,16,16,18,18,
+ 19,19,21,21,22,22,23,24,
+ 24,25,26,26,27,27,28,29,
+ 29,30,30,30,31,32,32,33,
+ 33,33,34,34,35,35,35,36,
+ 36,36,37,37,37,38,38,63,
+};
+
+const uint8_t ff_h264_norm_shift[512]= {
+ 9,8,7,7,6,6,6,6,5,5,5,5,5,5,5,5,
+ 4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+};
+
+/**
+ *
+ * @param buf_size size of buf in bits
+ */
+void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size){
+ init_put_bits(&c->pb, buf, buf_size);
+
+ c->low= 0;
+ c->range= 0x1FE;
+ c->outstanding_count= 0;
+#ifdef STRICT_LIMITS
+ c->sym_count =0;
+#endif
+
+ c->pb.bit_left++; //avoids firstBitFlag
+}
+
+/**
+ *
+ * @param buf_size size of buf in bits
+ */
+void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size){
+ c->bytestream_start=
+ c->bytestream= buf;
+ c->bytestream_end= buf + buf_size;
+
+#if CABAC_BITS == 16
+ c->low = (*c->bytestream++)<<18;
+ c->low+= (*c->bytestream++)<<10;
+#else
+ c->low = (*c->bytestream++)<<10;
+#endif
+ c->low+= ((*c->bytestream++)<<2) + 2;
+ c->range= 0x1FE;
+}
+
+void ff_init_cabac_states(CABACContext *c){
+ int i, j;
+
+ for(i=0; i<64; i++){
+ for(j=0; j<4; j++){ //FIXME check if this is worth the 1 shift we save
+ ff_h264_lps_range[j*2*64+2*i+0]=
+ ff_h264_lps_range[j*2*64+2*i+1]= lps_range[i][j];
+ }
+
+ ff_h264_mlps_state[128+2*i+0]=
+ ff_h264_mps_state[2*i+0]= 2*mps_state[i]+0;
+ ff_h264_mlps_state[128+2*i+1]=
+ ff_h264_mps_state[2*i+1]= 2*mps_state[i]+1;
+
+ if( i ){
+#ifdef BRANCHLESS_CABAC_DECODER
+ ff_h264_mlps_state[128-2*i-1]= 2*lps_state[i]+0;
+ ff_h264_mlps_state[128-2*i-2]= 2*lps_state[i]+1;
+ }else{
+ ff_h264_mlps_state[128-2*i-1]= 1;
+ ff_h264_mlps_state[128-2*i-2]= 0;
+#else
+ ff_h264_lps_state[2*i+0]= 2*lps_state[i]+0;
+ ff_h264_lps_state[2*i+1]= 2*lps_state[i]+1;
+ }else{
+ ff_h264_lps_state[2*i+0]= 1;
+ ff_h264_lps_state[2*i+1]= 0;
+#endif
+ }
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cabac.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cabac.h
new file mode 100644
index 000000000..16b0dc079
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cabac.h
@@ -0,0 +1,588 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/cabac.h
+ * Context Adaptive Binary Arithmetic Coder.
+ */
+
+#ifndef AVCODEC_CABAC_H
+#define AVCODEC_CABAC_H
+
+#include "put_bits.h"
+
+//#undef NDEBUG
+#include <assert.h>
+#include "libavutil/x86_cpu.h"
+
+#define CABAC_BITS 16
+#define CABAC_MASK ((1<<CABAC_BITS)-1)
+#define BRANCHLESS_CABAC_DECODER 1
+//#define ARCH_X86_DISABLED 1
+
+typedef struct CABACContext{
+ int low;
+ int range;
+ int outstanding_count;
+#ifdef STRICT_LIMITS
+ int symCount;
+#endif
+ const uint8_t *bytestream_start;
+ const uint8_t *bytestream;
+ const uint8_t *bytestream_end;
+ PutBitContext pb;
+}CABACContext;
+
+extern uint8_t ff_h264_mlps_state[4*64];
+extern uint8_t ff_h264_lps_range[4*2*64]; ///< rangeTabLPS
+extern uint8_t ff_h264_mps_state[2*64]; ///< transIdxMPS
+extern uint8_t ff_h264_lps_state[2*64]; ///< transIdxLPS
+extern const uint8_t ff_h264_norm_shift[512];
+
+
+void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size);
+void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size);
+void ff_init_cabac_states(CABACContext *c);
+
+
+static inline void put_cabac_bit(CABACContext *c, int b){
+ put_bits(&c->pb, 1, b);
+ for(;c->outstanding_count; c->outstanding_count--){
+ put_bits(&c->pb, 1, 1-b);
+ }
+}
+
+static inline void renorm_cabac_encoder(CABACContext *c){
+ while(c->range < 0x100){
+ //FIXME optimize
+ if(c->low<0x100){
+ put_cabac_bit(c, 0);
+ }else if(c->low<0x200){
+ c->outstanding_count++;
+ c->low -= 0x100;
+ }else{
+ put_cabac_bit(c, 1);
+ c->low -= 0x200;
+ }
+
+ c->range+= c->range;
+ c->low += c->low;
+ }
+}
+
+static void refill(CABACContext *c){
+#if CABAC_BITS == 16
+ c->low+= (c->bytestream[0]<<9) + (c->bytestream[1]<<1);
+#else
+ c->low+= c->bytestream[0]<<1;
+#endif
+ c->low -= CABAC_MASK;
+ c->bytestream+= CABAC_BITS/8;
+}
+
+#if ! ( ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS) )
+static void refill2(CABACContext *c){
+ int i, x;
+
+ x= c->low ^ (c->low-1);
+ i= 7 - ff_h264_norm_shift[x>>(CABAC_BITS-1)];
+
+ x= -CABAC_MASK;
+
+#if CABAC_BITS == 16
+ x+= (c->bytestream[0]<<9) + (c->bytestream[1]<<1);
+#else
+ x+= c->bytestream[0]<<1;
+#endif
+
+ c->low += x<<i;
+ c->bytestream+= CABAC_BITS/8;
+}
+#endif
+
+static inline void renorm_cabac_decoder(CABACContext *c){
+ while(c->range < 0x100){
+ c->range+= c->range;
+ c->low+= c->low;
+ if(!(c->low & CABAC_MASK))
+ refill(c);
+ }
+}
+
+static inline void renorm_cabac_decoder_once(CABACContext *c){
+#ifdef ARCH_X86_DISABLED
+ int temp;
+#if 0
+ //P3:683 athlon:475
+ __asm__(
+ "lea -0x100(%0), %2 \n\t"
+ "shr $31, %2 \n\t" //FIXME 31->63 for x86-64
+ "shl %%cl, %0 \n\t"
+ "shl %%cl, %1 \n\t"
+ : "+r"(c->range), "+r"(c->low), "+c"(temp)
+ );
+#elif 0
+ //P3:680 athlon:474
+ __asm__(
+ "cmp $0x100, %0 \n\t"
+ "setb %%cl \n\t" //FIXME 31->63 for x86-64
+ "shl %%cl, %0 \n\t"
+ "shl %%cl, %1 \n\t"
+ : "+r"(c->range), "+r"(c->low), "+c"(temp)
+ );
+#elif 1
+ int temp2;
+ //P3:665 athlon:517
+ __asm__(
+ "lea -0x100(%0), %%eax \n\t"
+ "cltd \n\t"
+ "mov %0, %%eax \n\t"
+ "and %%edx, %0 \n\t"
+ "and %1, %%edx \n\t"
+ "add %%eax, %0 \n\t"
+ "add %%edx, %1 \n\t"
+ : "+r"(c->range), "+r"(c->low), "+a"(temp), "+d"(temp2)
+ );
+#elif 0
+ int temp2;
+ //P3:673 athlon:509
+ __asm__(
+ "cmp $0x100, %0 \n\t"
+ "sbb %%edx, %%edx \n\t"
+ "mov %0, %%eax \n\t"
+ "and %%edx, %0 \n\t"
+ "and %1, %%edx \n\t"
+ "add %%eax, %0 \n\t"
+ "add %%edx, %1 \n\t"
+ : "+r"(c->range), "+r"(c->low), "+a"(temp), "+d"(temp2)
+ );
+#else
+ int temp2;
+ //P3:677 athlon:511
+ __asm__(
+ "cmp $0x100, %0 \n\t"
+ "lea (%0, %0), %%eax \n\t"
+ "lea (%1, %1), %%edx \n\t"
+ "cmovb %%eax, %0 \n\t"
+ "cmovb %%edx, %1 \n\t"
+ : "+r"(c->range), "+r"(c->low), "+a"(temp), "+d"(temp2)
+ );
+#endif
+#else
+ //P3:675 athlon:476
+ int shift= (uint32_t)(c->range - 0x100)>>31;
+ c->range<<= shift;
+ c->low <<= shift;
+#endif
+ if(!(c->low & CABAC_MASK))
+ refill(c);
+}
+
+static av_always_inline int get_cabac_inline(CABACContext *c, uint8_t * const state){
+ //FIXME gcc generates duplicate load/stores for c->low and c->range
+#define LOW "0"
+#define RANGE "4"
+#if ARCH_X86_64
+#define BYTESTART "16"
+/* rename BYTE because GCC doesn't like it */
+#define FFBYTE "24"
+#define BYTEEND "32"
+#else
+#define BYTESTART "12"
+/* rename BYTE because GCC doesn't like it */
+#define FFBYTE "16"
+#define BYTEEND "20"
+#endif
+#if ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS)
+ int bit;
+
+#ifndef BRANCHLESS_CABAC_DECODER
+ __asm__ volatile(
+ "movzbl (%1), %0 \n\t"
+ "movl "RANGE "(%2), %%ebx \n\t"
+ "movl "RANGE "(%2), %%edx \n\t"
+ "andl $0xC0, %%ebx \n\t"
+ "movzbl "MANGLE(ff_h264_lps_range)"(%0, %%ebx, 2), %%esi\n\t"
+ "movl "LOW "(%2), %%ebx \n\t"
+//eax:state ebx:low, edx:range, esi:RangeLPS
+ "subl %%esi, %%edx \n\t"
+ "movl %%edx, %%ecx \n\t"
+ "shll $17, %%ecx \n\t"
+ "cmpl %%ecx, %%ebx \n\t"
+ " ja 1f \n\t"
+
+#if 1
+ //athlon:4067 P3:4110
+ "lea -0x100(%%edx), %%ecx \n\t"
+ "shr $31, %%ecx \n\t"
+ "shl %%cl, %%edx \n\t"
+ "shl %%cl, %%ebx \n\t"
+#else
+ //athlon:4057 P3:4130
+ "cmp $0x100, %%edx \n\t" //FIXME avoidable
+ "setb %%cl \n\t"
+ "shl %%cl, %%edx \n\t"
+ "shl %%cl, %%ebx \n\t"
+#endif
+ "movzbl "MANGLE(ff_h264_mps_state)"(%0), %%ecx \n\t"
+ "movb %%cl, (%1) \n\t"
+//eax:state ebx:low, edx:range, esi:RangeLPS
+ "test %%bx, %%bx \n\t"
+ " jnz 2f \n\t"
+ "mov "FFBYTE "(%2), %%"REG_S" \n\t"
+ "subl $0xFFFF, %%ebx \n\t"
+ "movzwl (%%"REG_S"), %%ecx \n\t"
+ "bswap %%ecx \n\t"
+ "shrl $15, %%ecx \n\t"
+ "add $2, %%"REG_S" \n\t"
+ "addl %%ecx, %%ebx \n\t"
+ "mov %%"REG_S", "FFBYTE "(%2) \n\t"
+ "jmp 2f \n\t"
+ "1: \n\t"
+//eax:state ebx:low, edx:range, esi:RangeLPS
+ "subl %%ecx, %%ebx \n\t"
+ "movl %%esi, %%edx \n\t"
+ "movzbl " MANGLE(ff_h264_norm_shift) "(%%esi), %%ecx \n\t"
+ "shll %%cl, %%ebx \n\t"
+ "shll %%cl, %%edx \n\t"
+ "movzbl "MANGLE(ff_h264_lps_state)"(%0), %%ecx \n\t"
+ "movb %%cl, (%1) \n\t"
+ "add $1, %0 \n\t"
+ "test %%bx, %%bx \n\t"
+ " jnz 2f \n\t"
+
+ "mov "FFBYTE "(%2), %%"REG_c" \n\t"
+ "movzwl (%%"REG_c"), %%esi \n\t"
+ "bswap %%esi \n\t"
+ "shrl $15, %%esi \n\t"
+ "subl $0xFFFF, %%esi \n\t"
+ "add $2, %%"REG_c" \n\t"
+ "mov %%"REG_c", "FFBYTE "(%2) \n\t"
+
+ "leal -1(%%ebx), %%ecx \n\t"
+ "xorl %%ebx, %%ecx \n\t"
+ "shrl $15, %%ecx \n\t"
+ "movzbl " MANGLE(ff_h264_norm_shift) "(%%ecx), %%ecx \n\t"
+ "neg %%ecx \n\t"
+ "add $7, %%ecx \n\t"
+
+ "shll %%cl , %%esi \n\t"
+ "addl %%esi, %%ebx \n\t"
+ "2: \n\t"
+ "movl %%edx, "RANGE "(%2) \n\t"
+ "movl %%ebx, "LOW "(%2) \n\t"
+ :"=&a"(bit) //FIXME this is fragile gcc either runs out of registers or miscompiles it (for example if "+a"(bit) or "+m"(*state) is used
+ :"r"(state), "r"(c)
+ : "%"REG_c, "%ebx", "%edx", "%"REG_S, "memory"
+ );
+ bit&=1;
+#else /* BRANCHLESS_CABAC_DECODER */
+
+
+#if HAVE_FAST_CMOV
+#define BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\
+ "mov "tmp" , %%ecx \n\t"\
+ "shl $17 , "tmp" \n\t"\
+ "cmp "low" , "tmp" \n\t"\
+ "cmova %%ecx , "range" \n\t"\
+ "sbb %%ecx , %%ecx \n\t"\
+ "and %%ecx , "tmp" \n\t"\
+ "sub "tmp" , "low" \n\t"\
+ "xor %%ecx , "ret" \n\t"
+#else /* HAVE_FAST_CMOV */
+#define BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\
+ "mov "tmp" , %%ecx \n\t"\
+ "shl $17 , "tmp" \n\t"\
+ "sub "low" , "tmp" \n\t"\
+ "sar $31 , "tmp" \n\t" /*lps_mask*/\
+ "sub %%ecx , "range" \n\t" /*RangeLPS - range*/\
+ "and "tmp" , "range" \n\t" /*(RangeLPS - range)&lps_mask*/\
+ "add %%ecx , "range" \n\t" /*new range*/\
+ "shl $17 , %%ecx \n\t"\
+ "and "tmp" , %%ecx \n\t"\
+ "sub %%ecx , "low" \n\t"\
+ "xor "tmp" , "ret" \n\t"
+#endif /* HAVE_FAST_CMOV */
+
+
+#define BRANCHLESS_GET_CABAC(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\
+ "movzbl "statep" , "ret" \n\t"\
+ "mov "range" , "tmp" \n\t"\
+ "and $0xC0 , "range" \n\t"\
+ "movzbl "MANGLE(ff_h264_lps_range)"("ret", "range", 2), "range" \n\t"\
+ "sub "range" , "tmp" \n\t"\
+ BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\
+ "movzbl " MANGLE(ff_h264_norm_shift) "("range"), %%ecx \n\t"\
+ "shl %%cl , "range" \n\t"\
+ "movzbl "MANGLE(ff_h264_mlps_state)"+128("ret"), "tmp" \n\t"\
+ "mov "tmpbyte" , "statep" \n\t"\
+ "shl %%cl , "low" \n\t"\
+ "test "lowword" , "lowword" \n\t"\
+ " jnz 1f \n\t"\
+ "mov "FFBYTE"("cabac"), %%"REG_c" \n\t"\
+ "movzwl (%%"REG_c") , "tmp" \n\t"\
+ "bswap "tmp" \n\t"\
+ "shr $15 , "tmp" \n\t"\
+ "sub $0xFFFF , "tmp" \n\t"\
+ "add $2 , %%"REG_c" \n\t"\
+ "mov %%"REG_c" , "FFBYTE "("cabac") \n\t"\
+ "lea -1("low") , %%ecx \n\t"\
+ "xor "low" , %%ecx \n\t"\
+ "shr $15 , %%ecx \n\t"\
+ "movzbl " MANGLE(ff_h264_norm_shift) "(%%ecx), %%ecx \n\t"\
+ "neg %%ecx \n\t"\
+ "add $7 , %%ecx \n\t"\
+ "shl %%cl , "tmp" \n\t"\
+ "add "tmp" , "low" \n\t"\
+ "1: \n\t"
+
+ __asm__ volatile(
+ "movl "RANGE "(%2), %%esi \n\t"
+ "movl "LOW "(%2), %%ebx \n\t"
+ BRANCHLESS_GET_CABAC("%0", "%2", "(%1)", "%%ebx", "%%bx", "%%esi", "%%edx", "%%dl")
+ "movl %%esi, "RANGE "(%2) \n\t"
+ "movl %%ebx, "LOW "(%2) \n\t"
+
+ :"=&a"(bit)
+ :"r"(state), "r"(c)
+ : "%"REG_c, "%ebx", "%edx", "%esi", "memory"
+ );
+ bit&=1;
+#endif /* BRANCHLESS_CABAC_DECODER */
+#else /* ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS) */
+ int s = *state;
+ int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + s];
+ int bit, lps_mask av_unused;
+
+ c->range -= RangeLPS;
+#ifndef BRANCHLESS_CABAC_DECODER
+ if(c->low < (c->range<<(CABAC_BITS+1))){
+ bit= s&1;
+ *state= ff_h264_mps_state[s];
+ renorm_cabac_decoder_once(c);
+ }else{
+ bit= ff_h264_norm_shift[RangeLPS];
+ c->low -= (c->range<<(CABAC_BITS+1));
+ *state= ff_h264_lps_state[s];
+ c->range = RangeLPS<<bit;
+ c->low <<= bit;
+ bit= (s&1)^1;
+
+ if(!(c->low & CABAC_MASK)){
+ refill2(c);
+ }
+ }
+#else /* BRANCHLESS_CABAC_DECODER */
+ lps_mask= ((c->range<<(CABAC_BITS+1)) - c->low)>>31;
+
+ c->low -= (c->range<<(CABAC_BITS+1)) & lps_mask;
+ c->range += (RangeLPS - c->range) & lps_mask;
+
+ s^=lps_mask;
+ *state= (ff_h264_mlps_state+128)[s];
+ bit= s&1;
+
+ lps_mask= ff_h264_norm_shift[c->range];
+ c->range<<= lps_mask;
+ c->low <<= lps_mask;
+ if(!(c->low & CABAC_MASK))
+ refill2(c);
+#endif /* BRANCHLESS_CABAC_DECODER */
+#endif /* ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS) */
+ return bit;
+}
+
+static int av_noinline av_unused get_cabac_noinline(CABACContext *c, uint8_t * const state){
+ return get_cabac_inline(c,state);
+}
+
+static int av_unused get_cabac(CABACContext *c, uint8_t * const state){
+ return get_cabac_inline(c,state);
+}
+
+static int av_unused get_cabac_bypass(CABACContext *c){
+#if 0 //not faster
+ int bit;
+ __asm__ volatile(
+ "movl "RANGE "(%1), %%ebx \n\t"
+ "movl "LOW "(%1), %%eax \n\t"
+ "shl $17, %%ebx \n\t"
+ "add %%eax, %%eax \n\t"
+ "sub %%ebx, %%eax \n\t"
+ "cltd \n\t"
+ "and %%edx, %%ebx \n\t"
+ "add %%ebx, %%eax \n\t"
+ "test %%ax, %%ax \n\t"
+ " jnz 1f \n\t"
+ "movl "FFBYTE "(%1), %%"REG_b" \n\t"
+ "subl $0xFFFF, %%eax \n\t"
+ "movzwl (%%"REG_b"), %%ecx \n\t"
+ "bswap %%ecx \n\t"
+ "shrl $15, %%ecx \n\t"
+ "addl $2, %%"REG_b" \n\t"
+ "addl %%ecx, %%eax \n\t"
+ "movl %%"REG_b", "FFBYTE "(%1) \n\t"
+ "1: \n\t"
+ "movl %%eax, "LOW "(%1) \n\t"
+
+ :"=&d"(bit)
+ :"r"(c)
+ : "%eax", "%"REG_b, "%ecx", "memory"
+ );
+ return bit+1;
+#else
+ int range;
+ c->low += c->low;
+
+ if(!(c->low & CABAC_MASK))
+ refill(c);
+
+ range= c->range<<(CABAC_BITS+1);
+ if(c->low < range){
+ return 0;
+ }else{
+ c->low -= range;
+ return 1;
+ }
+#endif
+}
+
+
+static av_always_inline int get_cabac_bypass_sign(CABACContext *c, int val){
+#if ARCH_X86 && HAVE_EBX_AVAILABLE
+ __asm__ volatile(
+ "movl "RANGE "(%1), %%ebx \n\t"
+ "movl "LOW "(%1), %%eax \n\t"
+ "shl $17, %%ebx \n\t"
+ "add %%eax, %%eax \n\t"
+ "sub %%ebx, %%eax \n\t"
+ "cltd \n\t"
+ "and %%edx, %%ebx \n\t"
+ "add %%ebx, %%eax \n\t"
+ "xor %%edx, %%ecx \n\t"
+ "sub %%edx, %%ecx \n\t"
+ "test %%ax, %%ax \n\t"
+ " jnz 1f \n\t"
+ "mov "FFBYTE "(%1), %%"REG_b" \n\t"
+ "subl $0xFFFF, %%eax \n\t"
+ "movzwl (%%"REG_b"), %%edx \n\t"
+ "bswap %%edx \n\t"
+ "shrl $15, %%edx \n\t"
+ "add $2, %%"REG_b" \n\t"
+ "addl %%edx, %%eax \n\t"
+ "mov %%"REG_b", "FFBYTE "(%1) \n\t"
+ "1: \n\t"
+ "movl %%eax, "LOW "(%1) \n\t"
+
+ :"+c"(val)
+ :"r"(c)
+ : "%eax", "%"REG_b, "%edx", "memory"
+ );
+ return val;
+#else
+ int range, mask;
+ c->low += c->low;
+
+ if(!(c->low & CABAC_MASK))
+ refill(c);
+
+ range= c->range<<(CABAC_BITS+1);
+ c->low -= range;
+ mask= c->low >> 31;
+ range &= mask;
+ c->low += range;
+ return (val^mask)-mask;
+#endif
+}
+
+/**
+ *
+ * @return the number of bytes read or 0 if no end
+ */
+static int av_unused get_cabac_terminate(CABACContext *c){
+ c->range -= 2;
+ if(c->low < c->range<<(CABAC_BITS+1)){
+ renorm_cabac_decoder_once(c);
+ return 0;
+ }else{
+ return c->bytestream - c->bytestream_start;
+ }
+}
+
+#if 0
+/**
+ * Get (truncated) unary binarization.
+ */
+static int get_cabac_u(CABACContext *c, uint8_t * state, int max, int max_index, int truncated){
+ int i;
+
+ for(i=0; i<max; i++){
+ if(get_cabac(c, state)==0)
+ return i;
+
+ if(i< max_index) state++;
+ }
+
+ return truncated ? max : -1;
+}
+
+/**
+ * get unary exp golomb k-th order binarization.
+ */
+static int get_cabac_ueg(CABACContext *c, uint8_t * state, int max, int is_signed, int k, int max_index){
+ int i, v;
+ int m= 1<<k;
+
+ if(get_cabac(c, state)==0)
+ return 0;
+
+ if(0 < max_index) state++;
+
+ for(i=1; i<max; i++){
+ if(get_cabac(c, state)==0){
+ if(is_signed && get_cabac_bypass(c)){
+ return -i;
+ }else
+ return i;
+ }
+
+ if(i < max_index) state++;
+ }
+
+ while(get_cabac_bypass(c)){
+ i+= m;
+ m+= m;
+ }
+
+ v=0;
+ while(m>>=1){
+ v+= v + get_cabac_bypass(c);
+ }
+ i += v;
+
+ if(is_signed && get_cabac_bypass(c)){
+ return -i;
+ }else
+ return i;
+}
+#endif /* 0 */
+
+#endif /* AVCODEC_CABAC_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cook.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cook.c
new file mode 100644
index 000000000..58a7cc74b
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cook.c
@@ -0,0 +1,1239 @@
+/*
+ * COOK compatible decoder
+ * Copyright (c) 2003 Sascha Sommer
+ * Copyright (c) 2005 Benjamin Larsson
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/cook.c
+ * Cook compatible decoder. Bastardization of the G.722.1 standard.
+ * This decoder handles RealNetworks, RealAudio G2 data.
+ * Cook is identified by the codec name cook in RM files.
+ *
+ * To use this decoder, a calling application must supply the extradata
+ * bytes provided from the RM container; 8+ bytes for mono streams and
+ * 16+ for stereo streams (maybe more).
+ *
+ * Codec technicalities (all this assume a buffer length of 1024):
+ * Cook works with several different techniques to achieve its compression.
+ * In the timedomain the buffer is divided into 8 pieces and quantized. If
+ * two neighboring pieces have different quantization index a smooth
+ * quantization curve is used to get a smooth overlap between the different
+ * pieces.
+ * To get to the transformdomain Cook uses a modulated lapped transform.
+ * The transform domain has 50 subbands with 20 elements each. This
+ * means only a maximum of 50*20=1000 coefficients are used out of the 1024
+ * available.
+ */
+
+#include <math.h>
+#include <stddef.h>
+#include <stdio.h>
+
+#include "libavutil/lfg.h"
+#include "libavutil/random_seed.h"
+#include "avcodec.h"
+#include "get_bits.h"
+#include "dsputil.h"
+#include "bytestream.h"
+
+#include "cookdata.h"
+
+/* the different Cook versions */
+#define MONO 0x1000001
+#define STEREO 0x1000002
+#define JOINT_STEREO 0x1000003
+#define MC_COOK 0x2000000 //multichannel Cook, not supported
+
+#define SUBBAND_SIZE 20
+#define MAX_SUBPACKETS 5
+
+typedef struct {
+ int *now;
+ int *previous;
+} cook_gains;
+
+typedef struct {
+ int ch_idx;
+ int size;
+ int num_channels;
+ int cookversion;
+ int samples_per_frame;
+ int subbands;
+ int js_subband_start;
+ int js_vlc_bits;
+ int samples_per_channel;
+ int log2_numvector_size;
+ unsigned int channel_mask;
+ VLC ccpl; ///< channel coupling
+ int joint_stereo;
+ int bits_per_subpacket;
+ int bits_per_subpdiv;
+ int total_subbands;
+ int numvector_size; ///< 1 << log2_numvector_size;
+
+ float mono_previous_buffer1[1024];
+ float mono_previous_buffer2[1024];
+ /** gain buffers */
+ cook_gains gains1;
+ cook_gains gains2;
+ int gain_1[9];
+ int gain_2[9];
+ int gain_3[9];
+ int gain_4[9];
+} COOKSubpacket;
+
+typedef struct cook {
+ /*
+ * The following 5 functions provide the lowlevel arithmetic on
+ * the internal audio buffers.
+ */
+ void (* scalar_dequant)(struct cook *q, int index, int quant_index,
+ int* subband_coef_index, int* subband_coef_sign,
+ float* mlt_p);
+
+ void (* decouple) (struct cook *q,
+ COOKSubpacket *p,
+ int subband,
+ float f1, float f2,
+ float *decode_buffer,
+ float *mlt_buffer1, float *mlt_buffer2);
+
+ void (* imlt_window) (struct cook *q, float *buffer1,
+ cook_gains *gains_ptr, float *previous_buffer);
+
+ void (* interpolate) (struct cook *q, float* buffer,
+ int gain_index, int gain_index_next);
+
+ void (* saturate_output) (struct cook *q, int chan, int16_t *out);
+
+ AVCodecContext* avctx;
+ GetBitContext gb;
+ /* stream data */
+ int nb_channels;
+ int bit_rate;
+ int sample_rate;
+ int num_vectors;
+ int samples_per_channel;
+ /* states */
+ AVLFG random_state;
+
+ /* transform data */
+ FFTContext mdct_ctx;
+ float* mlt_window;
+
+ /* VLC data */
+ VLC envelope_quant_index[13];
+ VLC sqvh[7]; //scalar quantization
+
+ /* generatable tables and related variables */
+ int gain_size_factor;
+ float gain_table[23];
+
+ /* data buffers */
+
+ uint8_t* decoded_bytes_buffer;
+ DECLARE_ALIGNED_16(float,mono_mdct_output)[2048];
+ float decode_buffer_1[1024];
+ float decode_buffer_2[1024];
+ float decode_buffer_0[1060]; /* static allocation for joint decode */
+
+ const float *cplscales[5];
+ int num_subpackets;
+ COOKSubpacket subpacket[MAX_SUBPACKETS];
+} COOKContext;
+
+static float pow2tab[127];
+static float rootpow2tab[127];
+
+/*************** init functions ***************/
+
+/* table generator */
+static av_cold void init_pow2table(void){
+ int i;
+ for (i=-63 ; i<64 ; i++){
+ pow2tab[63+i]= pow(2, i);
+ rootpow2tab[63+i]=sqrt(pow(2, i));
+ }
+}
+
+/* table generator */
+static av_cold void init_gain_table(COOKContext *q) {
+ int i;
+ q->gain_size_factor = q->samples_per_channel/8;
+ for (i=0 ; i<23 ; i++) {
+ q->gain_table[i] = pow(pow2tab[i+52] ,
+ (1.0/(double)q->gain_size_factor));
+ }
+}
+
+
+static av_cold int init_cook_vlc_tables(COOKContext *q) {
+ int i, result;
+
+ result = 0;
+ for (i=0 ; i<13 ; i++) {
+ result |= init_vlc (&q->envelope_quant_index[i], 9, 24,
+ envelope_quant_index_huffbits[i], 1, 1,
+ envelope_quant_index_huffcodes[i], 2, 2, 0);
+ }
+ av_log(q->avctx,AV_LOG_DEBUG,"sqvh VLC init\n");
+ for (i=0 ; i<7 ; i++) {
+ result |= init_vlc (&q->sqvh[i], vhvlcsize_tab[i], vhsize_tab[i],
+ cvh_huffbits[i], 1, 1,
+ cvh_huffcodes[i], 2, 2, 0);
+ }
+
+ for(i=0;i<q->num_subpackets;i++){
+ if (q->subpacket[i].joint_stereo==1){
+ result |= init_vlc (&q->subpacket[i].ccpl, 6, (1<<q->subpacket[i].js_vlc_bits)-1,
+ ccpl_huffbits[q->subpacket[i].js_vlc_bits-2], 1, 1,
+ ccpl_huffcodes[q->subpacket[i].js_vlc_bits-2], 2, 2, 0);
+ av_log(q->avctx,AV_LOG_DEBUG,"subpacket %i Joint-stereo VLC used.\n",i);
+ }
+ }
+
+ av_log(q->avctx,AV_LOG_DEBUG,"VLC tables initialized.\n");
+ return result;
+}
+
+static av_cold int init_cook_mlt(COOKContext *q) {
+ int j;
+ int mlt_size = q->samples_per_channel;
+
+ if ((q->mlt_window = av_malloc(sizeof(float)*mlt_size)) == 0)
+ return -1;
+
+ /* Initialize the MLT window: simple sine window. */
+ ff_sine_window_init(q->mlt_window, mlt_size);
+ for(j=0 ; j<mlt_size ; j++)
+ q->mlt_window[j] *= sqrt(2.0 / q->samples_per_channel);
+
+ /* Initialize the MDCT. */
+ if (ff_mdct_init(&q->mdct_ctx, av_log2(mlt_size)+1, 1, 1.0)) {
+ av_free(q->mlt_window);
+ return -1;
+ }
+ av_log(q->avctx,AV_LOG_DEBUG,"MDCT initialized, order = %d.\n",
+ av_log2(mlt_size)+1);
+
+ return 0;
+}
+
+static const float *maybe_reformat_buffer32 (COOKContext *q, const float *ptr, int n)
+{
+ if (1)
+ return ptr;
+}
+
+static av_cold void init_cplscales_table (COOKContext *q) {
+ int i;
+ for (i=0;i<5;i++)
+ q->cplscales[i] = maybe_reformat_buffer32 (q, cplscales[i], (1<<(i+2))-1);
+}
+
+/*************** init functions end ***********/
+
+/**
+ * Cook indata decoding, every 32 bits are XORed with 0x37c511f2.
+ * Why? No idea, some checksum/error detection method maybe.
+ *
+ * Out buffer size: extra bytes are needed to cope with
+ * padding/misalignment.
+ * Subpackets passed to the decoder can contain two, consecutive
+ * half-subpackets, of identical but arbitrary size.
+ * 1234 1234 1234 1234 extraA extraB
+ * Case 1: AAAA BBBB 0 0
+ * Case 2: AAAA ABBB BB-- 3 3
+ * Case 3: AAAA AABB BBBB 2 2
+ * Case 4: AAAA AAAB BBBB BB-- 1 5
+ *
+ * Nice way to waste CPU cycles.
+ *
+ * @param inbuffer pointer to byte array of indata
+ * @param out pointer to byte array of outdata
+ * @param bytes number of bytes
+ */
+#define DECODE_BYTES_PAD1(bytes) (3 - ((bytes)+3) % 4)
+#define DECODE_BYTES_PAD2(bytes) ((bytes) % 4 + DECODE_BYTES_PAD1(2 * (bytes)))
+
+static inline int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){
+ int i, off;
+ uint32_t c;
+ const uint32_t* buf;
+ uint32_t* obuf = (uint32_t*) out;
+ /* FIXME: 64 bit platforms would be able to do 64 bits at a time.
+ * I'm too lazy though, should be something like
+ * for(i=0 ; i<bitamount/64 ; i++)
+ * (int64_t)out[i] = 0x37c511f237c511f2^be2me_64(int64_t)in[i]);
+ * Buffer alignment needs to be checked. */
+
+ off = (intptr_t)inbuffer & 3;
+ buf = (const uint32_t*) (inbuffer - off);
+ c = be2me_32((0x37c511f2 >> (off*8)) | (0x37c511f2 << (32-(off*8))));
+ bytes += 3 + off;
+ for (i = 0; i < bytes/4; i++)
+ obuf[i] = c ^ buf[i];
+
+ return off;
+}
+
+/**
+ * Cook uninit
+ */
+
+static av_cold int cook_decode_close(AVCodecContext *avctx)
+{
+ int i;
+ COOKContext *q = avctx->priv_data;
+ av_log(avctx,AV_LOG_DEBUG, "Deallocating memory.\n");
+
+ /* Free allocated memory buffers. */
+ av_free(q->mlt_window);
+ av_free(q->decoded_bytes_buffer);
+
+ /* Free the transform. */
+ ff_mdct_end(&q->mdct_ctx);
+
+ /* Free the VLC tables. */
+ for (i=0 ; i<13 ; i++) {
+ free_vlc(&q->envelope_quant_index[i]);
+ }
+ for (i=0 ; i<7 ; i++) {
+ free_vlc(&q->sqvh[i]);
+ }
+ for (i=0 ; i<q->num_subpackets ; i++) {
+ free_vlc(&q->subpacket[i].ccpl);
+ }
+
+ av_log(avctx,AV_LOG_DEBUG,"Memory deallocated.\n");
+
+ return 0;
+}
+
+/**
+ * Fill the gain array for the timedomain quantization.
+ *
+ * @param q pointer to the COOKContext
+ * @param gaininfo[9] array of gain indexes
+ */
+
+static void decode_gain_info(GetBitContext *gb, int *gaininfo)
+{
+ int i, n;
+
+ while (get_bits1(gb)) {}
+ n = get_bits_count(gb) - 1; //amount of elements*2 to update
+
+ i = 0;
+ while (n--) {
+ int index = get_bits(gb, 3);
+ int gain = get_bits1(gb) ? get_bits(gb, 4) - 7 : -1;
+
+ while (i <= index) gaininfo[i++] = gain;
+ }
+ while (i <= 8) gaininfo[i++] = 0;
+}
+
+/**
+ * Create the quant index table needed for the envelope.
+ *
+ * @param q pointer to the COOKContext
+ * @param quant_index_table pointer to the array
+ */
+
+static void decode_envelope(COOKContext *q, COOKSubpacket *p, int* quant_index_table) {
+ int i,j, vlc_index;
+
+ quant_index_table[0]= get_bits(&q->gb,6) - 6; //This is used later in categorize
+
+ for (i=1 ; i < p->total_subbands ; i++){
+ vlc_index=i;
+ if (i >= p->js_subband_start * 2) {
+ vlc_index-=p->js_subband_start;
+ } else {
+ vlc_index/=2;
+ if(vlc_index < 1) vlc_index = 1;
+ }
+ if (vlc_index>13) vlc_index = 13; //the VLC tables >13 are identical to No. 13
+
+ j = get_vlc2(&q->gb, q->envelope_quant_index[vlc_index-1].table,
+ q->envelope_quant_index[vlc_index-1].bits,2);
+ quant_index_table[i] = quant_index_table[i-1] + j - 12; //differential encoding
+ }
+}
+
+/**
+ * Calculate the category and category_index vector.
+ *
+ * @param q pointer to the COOKContext
+ * @param quant_index_table pointer to the array
+ * @param category pointer to the category array
+ * @param category_index pointer to the category_index array
+ */
+
+static void categorize(COOKContext *q, COOKSubpacket *p, int* quant_index_table,
+ int* category, int* category_index){
+ int exp_idx, bias, tmpbias1, tmpbias2, bits_left, num_bits, index, v, i, j;
+ int exp_index2[102];
+ int exp_index1[102];
+
+ int tmp_categorize_array[128*2];
+ int tmp_categorize_array1_idx=p->numvector_size;
+ int tmp_categorize_array2_idx=p->numvector_size;
+
+ bits_left = p->bits_per_subpacket - get_bits_count(&q->gb);
+
+ if(bits_left > q->samples_per_channel) {
+ bits_left = q->samples_per_channel +
+ ((bits_left - q->samples_per_channel)*5)/8;
+ //av_log(q->avctx, AV_LOG_ERROR, "bits_left = %d\n",bits_left);
+ }
+
+ memset(&exp_index1,0,102*sizeof(int));
+ memset(&exp_index2,0,102*sizeof(int));
+ memset(&tmp_categorize_array,0,128*2*sizeof(int));
+
+ bias=-32;
+
+ /* Estimate bias. */
+ for (i=32 ; i>0 ; i=i/2){
+ num_bits = 0;
+ index = 0;
+ for (j=p->total_subbands ; j>0 ; j--){
+ exp_idx = av_clip((i - quant_index_table[index] + bias) / 2, 0, 7);
+ index++;
+ num_bits+=expbits_tab[exp_idx];
+ }
+ if(num_bits >= bits_left - 32){
+ bias+=i;
+ }
+ }
+
+ /* Calculate total number of bits. */
+ num_bits=0;
+ for (i=0 ; i<p->total_subbands ; i++) {
+ exp_idx = av_clip((bias - quant_index_table[i]) / 2, 0, 7);
+ num_bits += expbits_tab[exp_idx];
+ exp_index1[i] = exp_idx;
+ exp_index2[i] = exp_idx;
+ }
+ tmpbias1 = tmpbias2 = num_bits;
+
+ for (j = 1 ; j < p->numvector_size ; j++) {
+ if (tmpbias1 + tmpbias2 > 2*bits_left) { /* ---> */
+ int max = -999999;
+ index=-1;
+ for (i=0 ; i<p->total_subbands ; i++){
+ if (exp_index1[i] < 7) {
+ v = (-2*exp_index1[i]) - quant_index_table[i] + bias;
+ if ( v >= max) {
+ max = v;
+ index = i;
+ }
+ }
+ }
+ if(index==-1)break;
+ tmp_categorize_array[tmp_categorize_array1_idx++] = index;
+ tmpbias1 -= expbits_tab[exp_index1[index]] -
+ expbits_tab[exp_index1[index]+1];
+ ++exp_index1[index];
+ } else { /* <--- */
+ int min = 999999;
+ index=-1;
+ for (i=0 ; i<p->total_subbands ; i++){
+ if(exp_index2[i] > 0){
+ v = (-2*exp_index2[i])-quant_index_table[i]+bias;
+ if ( v < min) {
+ min = v;
+ index = i;
+ }
+ }
+ }
+ if(index == -1)break;
+ tmp_categorize_array[--tmp_categorize_array2_idx] = index;
+ tmpbias2 -= expbits_tab[exp_index2[index]] -
+ expbits_tab[exp_index2[index]-1];
+ --exp_index2[index];
+ }
+ }
+
+ for(i=0 ; i<p->total_subbands ; i++)
+ category[i] = exp_index2[i];
+
+ for(i=0 ; i<p->numvector_size-1 ; i++)
+ category_index[i] = tmp_categorize_array[tmp_categorize_array2_idx++];
+
+}
+
+
+/**
+ * Expand the category vector.
+ *
+ * @param q pointer to the COOKContext
+ * @param category pointer to the category array
+ * @param category_index pointer to the category_index array
+ */
+
+static inline void expand_category(COOKContext *q, int* category,
+ int* category_index){
+ int i;
+ for(i=0 ; i<q->num_vectors ; i++){
+ ++category[category_index[i]];
+ }
+}
+
+/**
+ * The real requantization of the mltcoefs
+ *
+ * @param q pointer to the COOKContext
+ * @param index index
+ * @param quant_index quantisation index
+ * @param subband_coef_index array of indexes to quant_centroid_tab
+ * @param subband_coef_sign signs of coefficients
+ * @param mlt_p pointer into the mlt buffer
+ */
+
+static void scalar_dequant_float(COOKContext *q, int index, int quant_index,
+ int* subband_coef_index, int* subband_coef_sign,
+ float* mlt_p){
+ int i;
+ float f1;
+
+ for(i=0 ; i<SUBBAND_SIZE ; i++) {
+ if (subband_coef_index[i]) {
+ f1 = quant_centroid_tab[index][subband_coef_index[i]];
+ if (subband_coef_sign[i]) f1 = -f1;
+ } else {
+ /* noise coding if subband_coef_index[i] == 0 */
+ f1 = dither_tab[index];
+ if (av_lfg_get(&q->random_state) < 0x80000000) f1 = -f1;
+ }
+ mlt_p[i] = f1 * rootpow2tab[quant_index+63];
+ }
+}
+/**
+ * Unpack the subband_coef_index and subband_coef_sign vectors.
+ *
+ * @param q pointer to the COOKContext
+ * @param category pointer to the category array
+ * @param subband_coef_index array of indexes to quant_centroid_tab
+ * @param subband_coef_sign signs of coefficients
+ */
+
+static int unpack_SQVH(COOKContext *q, COOKSubpacket *p, int category, int* subband_coef_index,
+ int* subband_coef_sign) {
+ int i,j;
+ int vlc, vd ,tmp, result;
+
+ vd = vd_tab[category];
+ result = 0;
+ for(i=0 ; i<vpr_tab[category] ; i++){
+ vlc = get_vlc2(&q->gb, q->sqvh[category].table, q->sqvh[category].bits, 3);
+ if (p->bits_per_subpacket < get_bits_count(&q->gb)){
+ vlc = 0;
+ result = 1;
+ }
+ for(j=vd-1 ; j>=0 ; j--){
+ tmp = (vlc * invradix_tab[category])/0x100000;
+ subband_coef_index[vd*i+j] = vlc - tmp * (kmax_tab[category]+1);
+ vlc = tmp;
+ }
+ for(j=0 ; j<vd ; j++){
+ if (subband_coef_index[i*vd + j]) {
+ if(get_bits_count(&q->gb) < p->bits_per_subpacket){
+ subband_coef_sign[i*vd+j] = get_bits1(&q->gb);
+ } else {
+ result=1;
+ subband_coef_sign[i*vd+j]=0;
+ }
+ } else {
+ subband_coef_sign[i*vd+j]=0;
+ }
+ }
+ }
+ return result;
+}
+
+
+/**
+ * Fill the mlt_buffer with mlt coefficients.
+ *
+ * @param q pointer to the COOKContext
+ * @param category pointer to the category array
+ * @param quant_index_table pointer to the array
+ * @param mlt_buffer pointer to mlt coefficients
+ */
+
+
+static void decode_vectors(COOKContext* q, COOKSubpacket* p, int* category,
+ int *quant_index_table, float* mlt_buffer){
+ /* A zero in this table means that the subband coefficient is
+ random noise coded. */
+ int subband_coef_index[SUBBAND_SIZE];
+ /* A zero in this table means that the subband coefficient is a
+ positive multiplicator. */
+ int subband_coef_sign[SUBBAND_SIZE];
+ int band, j;
+ int index=0;
+
+ for(band=0 ; band<p->total_subbands ; band++){
+ index = category[band];
+ if(category[band] < 7){
+ if(unpack_SQVH(q, p, category[band], subband_coef_index, subband_coef_sign)){
+ index=7;
+ for(j=0 ; j<p->total_subbands ; j++) category[band+j]=7;
+ }
+ }
+ if(index>=7) {
+ memset(subband_coef_index, 0, sizeof(subband_coef_index));
+ memset(subband_coef_sign, 0, sizeof(subband_coef_sign));
+ }
+ q->scalar_dequant(q, index, quant_index_table[band],
+ subband_coef_index, subband_coef_sign,
+ &mlt_buffer[band * SUBBAND_SIZE]);
+ }
+
+ if(p->total_subbands*SUBBAND_SIZE >= q->samples_per_channel){
+ return;
+ } /* FIXME: should this be removed, or moved into loop above? */
+}
+
+
+/**
+ * function for decoding mono data
+ *
+ * @param q pointer to the COOKContext
+ * @param mlt_buffer pointer to mlt coefficients
+ */
+
+static void mono_decode(COOKContext *q, COOKSubpacket *p, float* mlt_buffer) {
+
+ int category_index[128];
+ int quant_index_table[102];
+ int category[128];
+
+ memset(&category, 0, 128*sizeof(int));
+ memset(&category_index, 0, 128*sizeof(int));
+
+ decode_envelope(q, p, quant_index_table);
+ q->num_vectors = get_bits(&q->gb,p->log2_numvector_size);
+ categorize(q, p, quant_index_table, category, category_index);
+ expand_category(q, category, category_index);
+ decode_vectors(q, p, category, quant_index_table, mlt_buffer);
+}
+
+
+/**
+ * the actual requantization of the timedomain samples
+ *
+ * @param q pointer to the COOKContext
+ * @param buffer pointer to the timedomain buffer
+ * @param gain_index index for the block multiplier
+ * @param gain_index_next index for the next block multiplier
+ */
+
+static void interpolate_float(COOKContext *q, float* buffer,
+ int gain_index, int gain_index_next){
+ int i;
+ float fc1, fc2;
+ fc1 = pow2tab[gain_index+63];
+
+ if(gain_index == gain_index_next){ //static gain
+ for(i=0 ; i<q->gain_size_factor ; i++){
+ buffer[i]*=fc1;
+ }
+ return;
+ } else { //smooth gain
+ fc2 = q->gain_table[11 + (gain_index_next-gain_index)];
+ for(i=0 ; i<q->gain_size_factor ; i++){
+ buffer[i]*=fc1;
+ fc1*=fc2;
+ }
+ return;
+ }
+}
+
+/**
+ * Apply transform window, overlap buffers.
+ *
+ * @param q pointer to the COOKContext
+ * @param inbuffer pointer to the mltcoefficients
+ * @param gains_ptr current and previous gains
+ * @param previous_buffer pointer to the previous buffer to be used for overlapping
+ */
+
+static void imlt_window_float (COOKContext *q, float *buffer1,
+ cook_gains *gains_ptr, float *previous_buffer)
+{
+ const float fc = pow2tab[gains_ptr->previous[0] + 63];
+ int i;
+ /* The weird thing here, is that the two halves of the time domain
+ * buffer are swapped. Also, the newest data, that we save away for
+ * next frame, has the wrong sign. Hence the subtraction below.
+ * Almost sounds like a complex conjugate/reverse data/FFT effect.
+ */
+
+ /* Apply window and overlap */
+ for(i = 0; i < q->samples_per_channel; i++){
+ buffer1[i] = buffer1[i] * fc * q->mlt_window[i] -
+ previous_buffer[i] * q->mlt_window[q->samples_per_channel - 1 - i];
+ }
+}
+
+/**
+ * The modulated lapped transform, this takes transform coefficients
+ * and transforms them into timedomain samples.
+ * Apply transform window, overlap buffers, apply gain profile
+ * and buffer management.
+ *
+ * @param q pointer to the COOKContext
+ * @param inbuffer pointer to the mltcoefficients
+ * @param gains_ptr current and previous gains
+ * @param previous_buffer pointer to the previous buffer to be used for overlapping
+ */
+
+static void imlt_gain(COOKContext *q, float *inbuffer,
+ cook_gains *gains_ptr, float* previous_buffer)
+{
+ float *buffer0 = q->mono_mdct_output;
+ float *buffer1 = q->mono_mdct_output + q->samples_per_channel;
+ int i;
+
+ /* Inverse modified discrete cosine transform */
+ ff_imdct_calc(&q->mdct_ctx, q->mono_mdct_output, inbuffer);
+
+ q->imlt_window (q, buffer1, gains_ptr, previous_buffer);
+
+ /* Apply gain profile */
+ for (i = 0; i < 8; i++) {
+ if (gains_ptr->now[i] || gains_ptr->now[i + 1])
+ q->interpolate(q, &buffer1[q->gain_size_factor * i],
+ gains_ptr->now[i], gains_ptr->now[i + 1]);
+ }
+
+ /* Save away the current to be previous block. */
+ memcpy(previous_buffer, buffer0, sizeof(float)*q->samples_per_channel);
+}
+
+
+/**
+ * function for getting the jointstereo coupling information
+ *
+ * @param q pointer to the COOKContext
+ * @param decouple_tab decoupling array
+ *
+ */
+
+static void decouple_info(COOKContext *q, COOKSubpacket *p, int* decouple_tab){
+ int length, i;
+
+ if(get_bits1(&q->gb)) {
+ if(cplband[p->js_subband_start] > cplband[p->subbands-1]) return;
+
+ length = cplband[p->subbands-1] - cplband[p->js_subband_start] + 1;
+ for (i=0 ; i<length ; i++) {
+ decouple_tab[cplband[p->js_subband_start] + i] = get_vlc2(&q->gb, p->ccpl.table, p->ccpl.bits, 2);
+ }
+ return;
+ }
+
+ if(cplband[p->js_subband_start] > cplband[p->subbands-1]) return;
+
+ length = cplband[p->subbands-1] - cplband[p->js_subband_start] + 1;
+ for (i=0 ; i<length ; i++) {
+ decouple_tab[cplband[p->js_subband_start] + i] = get_bits(&q->gb, p->js_vlc_bits);
+ }
+ return;
+}
+
+/*
+ * function decouples a pair of signals from a single signal via multiplication.
+ *
+ * @param q pointer to the COOKContext
+ * @param subband index of the current subband
+ * @param f1 multiplier for channel 1 extraction
+ * @param f2 multiplier for channel 2 extraction
+ * @param decode_buffer input buffer
+ * @param mlt_buffer1 pointer to left channel mlt coefficients
+ * @param mlt_buffer2 pointer to right channel mlt coefficients
+ */
+static void decouple_float (COOKContext *q,
+ COOKSubpacket *p,
+ int subband,
+ float f1, float f2,
+ float *decode_buffer,
+ float *mlt_buffer1, float *mlt_buffer2)
+{
+ int j, tmp_idx;
+ for (j=0 ; j<SUBBAND_SIZE ; j++) {
+ tmp_idx = ((p->js_subband_start + subband)*SUBBAND_SIZE)+j;
+ mlt_buffer1[SUBBAND_SIZE*subband + j] = f1 * decode_buffer[tmp_idx];
+ mlt_buffer2[SUBBAND_SIZE*subband + j] = f2 * decode_buffer[tmp_idx];
+ }
+}
+
+/**
+ * function for decoding joint stereo data
+ *
+ * @param q pointer to the COOKContext
+ * @param mlt_buffer1 pointer to left channel mlt coefficients
+ * @param mlt_buffer2 pointer to right channel mlt coefficients
+ */
+
+static void joint_decode(COOKContext *q, COOKSubpacket *p, float* mlt_buffer1,
+ float* mlt_buffer2) {
+ int i,j;
+ int decouple_tab[SUBBAND_SIZE];
+ float *decode_buffer = q->decode_buffer_0;
+ int idx, cpl_tmp;
+ float f1,f2;
+ const float* cplscale;
+
+ memset(decouple_tab, 0, sizeof(decouple_tab));
+ memset(decode_buffer, 0, sizeof(decode_buffer));
+
+ /* Make sure the buffers are zeroed out. */
+ memset(mlt_buffer1,0, 1024*sizeof(float));
+ memset(mlt_buffer2,0, 1024*sizeof(float));
+ decouple_info(q, p, decouple_tab);
+ mono_decode(q, p, decode_buffer);
+
+ /* The two channels are stored interleaved in decode_buffer. */
+ for (i=0 ; i<p->js_subband_start ; i++) {
+ for (j=0 ; j<SUBBAND_SIZE ; j++) {
+ mlt_buffer1[i*20+j] = decode_buffer[i*40+j];
+ mlt_buffer2[i*20+j] = decode_buffer[i*40+20+j];
+ }
+ }
+
+ /* When we reach js_subband_start (the higher frequencies)
+ the coefficients are stored in a coupling scheme. */
+ idx = (1 << p->js_vlc_bits) - 1;
+ for (i=p->js_subband_start ; i<p->subbands ; i++) {
+ cpl_tmp = cplband[i];
+ idx -=decouple_tab[cpl_tmp];
+ cplscale = q->cplscales[p->js_vlc_bits-2]; //choose decoupler table
+ f1 = cplscale[decouple_tab[cpl_tmp]];
+ f2 = cplscale[idx-1];
+ q->decouple (q, p, i, f1, f2, decode_buffer, mlt_buffer1, mlt_buffer2);
+ idx = (1 << p->js_vlc_bits) - 1;
+ }
+}
+
+/**
+ * First part of subpacket decoding:
+ * decode raw stream bytes and read gain info.
+ *
+ * @param q pointer to the COOKContext
+ * @param inbuffer pointer to raw stream data
+ * @param gain_ptr array of current/prev gain pointers
+ */
+
+static inline void
+decode_bytes_and_gain(COOKContext *q, COOKSubpacket *p, const uint8_t *inbuffer,
+ cook_gains *gains_ptr)
+{
+ int offset;
+
+ offset = decode_bytes(inbuffer, q->decoded_bytes_buffer,
+ p->bits_per_subpacket/8);
+ init_get_bits(&q->gb, q->decoded_bytes_buffer + offset,
+ p->bits_per_subpacket);
+ decode_gain_info(&q->gb, gains_ptr->now);
+
+ /* Swap current and previous gains */
+ FFSWAP(int *, gains_ptr->now, gains_ptr->previous);
+}
+
+ /**
+ * Saturate the output signal to signed 16bit integers.
+ *
+ * @param q pointer to the COOKContext
+ * @param chan channel to saturate
+ * @param out pointer to the output vector
+ */
+static void
+saturate_output_float (COOKContext *q, int chan, int16_t *out)
+{
+ int j;
+ float *output = q->mono_mdct_output + q->samples_per_channel;
+ /* Clip and convert floats to 16 bits.
+ */
+ for (j = 0; j < q->samples_per_channel; j++) {
+ out[chan + q->nb_channels * j] =
+ av_clip_int16(lrintf(output[j]));
+ }
+}
+
+/**
+ * Final part of subpacket decoding:
+ * Apply modulated lapped transform, gain compensation,
+ * clip and convert to integer.
+ *
+ * @param q pointer to the COOKContext
+ * @param decode_buffer pointer to the mlt coefficients
+ * @param gain_ptr array of current/prev gain pointers
+ * @param previous_buffer pointer to the previous buffer to be used for overlapping
+ * @param out pointer to the output buffer
+ * @param chan 0: left or single channel, 1: right channel
+ */
+
+static inline void
+mlt_compensate_output(COOKContext *q, float *decode_buffer,
+ cook_gains *gains, float *previous_buffer,
+ int16_t *out, int chan)
+{
+ imlt_gain(q, decode_buffer, gains, previous_buffer);
+ q->saturate_output (q, chan, out);
+}
+
+
+/**
+ * Cook subpacket decoding. This function returns one decoded subpacket,
+ * usually 1024 samples per channel.
+ *
+ * @param q pointer to the COOKContext
+ * @param inbuffer pointer to the inbuffer
+ * @param sub_packet_size subpacket size
+ * @param outbuffer pointer to the outbuffer
+ */
+
+
+static void decode_subpacket(COOKContext *q, COOKSubpacket* p, const uint8_t *inbuffer, int16_t *outbuffer) {
+ int sub_packet_size = p->size;
+ /* packet dump */
+// for (i=0 ; i<sub_packet_size ; i++) {
+// av_log(q->avctx, AV_LOG_ERROR, "%02x", inbuffer[i]);
+// }
+// av_log(q->avctx, AV_LOG_ERROR, "\n");
+ memset(q->decode_buffer_1,0,sizeof(q->decode_buffer_1));
+ decode_bytes_and_gain(q, p, inbuffer, &p->gains1);
+
+ if (p->joint_stereo) {
+ joint_decode(q, p, q->decode_buffer_1, q->decode_buffer_2);
+ } else {
+ mono_decode(q, p, q->decode_buffer_1);
+
+ if (p->num_channels == 2) {
+ decode_bytes_and_gain(q, p, inbuffer + sub_packet_size/2, &p->gains2);
+ mono_decode(q, p, q->decode_buffer_2);
+ }
+ }
+
+ mlt_compensate_output(q, q->decode_buffer_1, &p->gains1,
+ p->mono_previous_buffer1, outbuffer, p->ch_idx);
+
+ if (p->num_channels == 2) {
+ if (p->joint_stereo) {
+ mlt_compensate_output(q, q->decode_buffer_2, &p->gains1,
+ p->mono_previous_buffer2, outbuffer, p->ch_idx + 1);
+ } else {
+ mlt_compensate_output(q, q->decode_buffer_2, &p->gains2,
+ p->mono_previous_buffer2, outbuffer, p->ch_idx + 1);
+ }
+ }
+
+}
+
+
+/**
+ * Cook frame decoding
+ *
+ * @param avctx pointer to the AVCodecContext
+ */
+
+static int cook_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size) {
+ COOKContext *q = avctx->priv_data;
+ int i;
+ int offset = 0;
+ int chidx = 0;
+
+ if (buf_size < avctx->block_align)
+ return buf_size;
+
+ /* estimate subpacket sizes */
+ q->subpacket[0].size = avctx->block_align;
+
+ for(i=1;i<q->num_subpackets;i++){
+ q->subpacket[i].size = 2 * buf[avctx->block_align - q->num_subpackets + i];
+ q->subpacket[0].size -= q->subpacket[i].size + 1;
+ if (q->subpacket[0].size < 0) {
+ av_log(avctx,AV_LOG_DEBUG,"frame subpacket size total > avctx->block_align!\n");
+ return -1;
+ }
+ }
+
+ /* decode supbackets */
+ *data_size = 0;
+ for(i=0;i<q->num_subpackets;i++){
+ q->subpacket[i].bits_per_subpacket = (q->subpacket[i].size*8)>>q->subpacket[i].bits_per_subpdiv;
+ q->subpacket[i].ch_idx = chidx;
+ av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] size %i js %i %i block_align %i\n",i,q->subpacket[i].size,q->subpacket[i].joint_stereo,offset,avctx->block_align);
+ decode_subpacket(q, &q->subpacket[i], buf + offset, (int16_t*)data);
+ offset += q->subpacket[i].size;
+ chidx += q->subpacket[i].num_channels;
+ av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] %i %i\n",i,q->subpacket[i].size * 8,get_bits_count(&q->gb));
+ }
+ *data_size = sizeof(int16_t) * q->nb_channels * q->samples_per_channel;
+
+ /* Discard the first two frames: no valid audio. */
+ if (avctx->frame_number < 2) *data_size = 0;
+
+ return avctx->block_align;
+}
+
+static av_cold int cook_count_channels(unsigned int mask){
+ int i;
+ int channels = 0;
+ for(i = 0;i<32;i++){
+ if(mask & (1<<i))
+ ++channels;
+ }
+ return channels;
+}
+
+/**
+ * Cook initialization
+ *
+ * @param avctx pointer to the AVCodecContext
+ */
+
+static av_cold int cook_decode_init(AVCodecContext *avctx)
+{
+ COOKContext *q = avctx->priv_data;
+ const uint8_t *edata_ptr = avctx->extradata;
+ const uint8_t *edata_ptr_end = edata_ptr + avctx->extradata_size;
+ int extradata_size = avctx->extradata_size;
+ int s = 0;
+ unsigned int channel_mask = 0;
+ q->avctx = avctx;
+
+ /* Take care of the codec specific extradata. */
+ if (extradata_size <= 0) {
+ av_log(avctx,AV_LOG_ERROR,"Necessary extradata missing!\n");
+ return -1;
+ }
+ av_log(avctx,AV_LOG_DEBUG,"codecdata_length=%d\n",avctx->extradata_size);
+
+ /* Take data from the AVCodecContext (RM container). */
+ q->sample_rate = avctx->sample_rate;
+ q->nb_channels = avctx->channels;
+ q->bit_rate = avctx->bit_rate;
+
+ /* Initialize RNG. */
+ av_lfg_init(&q->random_state, 0);
+
+ while(edata_ptr < edata_ptr_end){
+ /* 8 for mono, 16 for stereo, ? for multichannel
+ Swap to right endianness so we don't need to care later on. */
+ if (extradata_size >= 8){
+ q->subpacket[s].cookversion = bytestream_get_be32(&edata_ptr);
+ q->subpacket[s].samples_per_frame = bytestream_get_be16(&edata_ptr);
+ q->subpacket[s].subbands = bytestream_get_be16(&edata_ptr);
+ extradata_size -= 8;
+ }
+ if (avctx->extradata_size >= 8){
+ bytestream_get_be32(&edata_ptr); //Unknown unused
+ q->subpacket[s].js_subband_start = bytestream_get_be16(&edata_ptr);
+ q->subpacket[s].js_vlc_bits = bytestream_get_be16(&edata_ptr);
+ extradata_size -= 8;
+ }
+
+ /* Initialize extradata related variables. */
+ q->subpacket[s].samples_per_channel = q->subpacket[s].samples_per_frame / q->nb_channels;
+ q->subpacket[s].bits_per_subpacket = avctx->block_align * 8;
+
+ /* Initialize default data states. */
+ q->subpacket[s].log2_numvector_size = 5;
+ q->subpacket[s].total_subbands = q->subpacket[s].subbands;
+ q->subpacket[s].num_channels = 1;
+
+ /* Initialize version-dependent variables */
+
+ av_log(avctx,AV_LOG_DEBUG,"subpacket[%i].cookversion=%x\n",s,q->subpacket[s].cookversion);
+ q->subpacket[s].joint_stereo = 0;
+ switch (q->subpacket[s].cookversion) {
+ case MONO:
+ if (q->nb_channels != 1) {
+ av_log(avctx,AV_LOG_ERROR,"Container channels != 1, report sample!\n");
+ return -1;
+ }
+ av_log(avctx,AV_LOG_DEBUG,"MONO\n");
+ break;
+ case STEREO:
+ if (q->nb_channels != 1) {
+ q->subpacket[s].bits_per_subpdiv = 1;
+ q->subpacket[s].num_channels = 2;
+ }
+ av_log(avctx,AV_LOG_DEBUG,"STEREO\n");
+ break;
+ case JOINT_STEREO:
+ if (q->nb_channels != 2) {
+ av_log(avctx,AV_LOG_ERROR,"Container channels != 2, report sample!\n");
+ return -1;
+ }
+ av_log(avctx,AV_LOG_DEBUG,"JOINT_STEREO\n");
+ if (avctx->extradata_size >= 16){
+ q->subpacket[s].total_subbands = q->subpacket[s].subbands + q->subpacket[s].js_subband_start;
+ q->subpacket[s].joint_stereo = 1;
+ q->subpacket[s].num_channels = 2;
+ }
+ if (q->subpacket[s].samples_per_channel > 256) {
+ q->subpacket[s].log2_numvector_size = 6;
+ }
+ if (q->subpacket[s].samples_per_channel > 512) {
+ q->subpacket[s].log2_numvector_size = 7;
+ }
+ break;
+ case MC_COOK:
+ av_log(avctx,AV_LOG_DEBUG,"MULTI_CHANNEL\n");
+ if(extradata_size >= 4)
+ channel_mask |= q->subpacket[s].channel_mask = bytestream_get_be32(&edata_ptr);
+
+ if(cook_count_channels(q->subpacket[s].channel_mask) > 1){
+ q->subpacket[s].total_subbands = q->subpacket[s].subbands + q->subpacket[s].js_subband_start;
+ q->subpacket[s].joint_stereo = 1;
+ q->subpacket[s].num_channels = 2;
+ q->subpacket[s].samples_per_channel = q->subpacket[s].samples_per_frame >> 1;
+
+ if (q->subpacket[s].samples_per_channel > 256) {
+ q->subpacket[s].log2_numvector_size = 6;
+ }
+ if (q->subpacket[s].samples_per_channel > 512) {
+ q->subpacket[s].log2_numvector_size = 7;
+ }
+ }else
+ q->subpacket[s].samples_per_channel = q->subpacket[s].samples_per_frame;
+
+ break;
+ default:
+ av_log(avctx,AV_LOG_ERROR,"Unknown Cook version, report sample!\n");
+ return -1;
+ break;
+ }
+
+ if(s > 1 && q->subpacket[s].samples_per_channel != q->samples_per_channel) {
+ av_log(avctx,AV_LOG_ERROR,"different number of samples per channel!\n");
+ return -1;
+ } else
+ q->samples_per_channel = q->subpacket[0].samples_per_channel;
+
+
+ /* Initialize variable relations */
+ q->subpacket[s].numvector_size = (1 << q->subpacket[s].log2_numvector_size);
+
+ /* Try to catch some obviously faulty streams, othervise it might be exploitable */
+ if (q->subpacket[s].total_subbands > 53) {
+ av_log(avctx,AV_LOG_ERROR,"total_subbands > 53, report sample!\n");
+ return -1;
+ }
+
+ if ((q->subpacket[s].js_vlc_bits > 6) || (q->subpacket[s].js_vlc_bits < 0)) {
+ av_log(avctx,AV_LOG_ERROR,"js_vlc_bits = %d, only >= 0 and <= 6 allowed!\n",q->subpacket[s].js_vlc_bits);
+ return -1;
+ }
+
+ if (q->subpacket[s].subbands > 50) {
+ av_log(avctx,AV_LOG_ERROR,"subbands > 50, report sample!\n");
+ return -1;
+ }
+ q->subpacket[s].gains1.now = q->subpacket[s].gain_1;
+ q->subpacket[s].gains1.previous = q->subpacket[s].gain_2;
+ q->subpacket[s].gains2.now = q->subpacket[s].gain_3;
+ q->subpacket[s].gains2.previous = q->subpacket[s].gain_4;
+
+ q->num_subpackets++;
+ s++;
+ if (s > MAX_SUBPACKETS) {
+ av_log(avctx,AV_LOG_ERROR,"Too many subpackets > 5, report file!\n");
+ return -1;
+ }
+ }
+ /* Generate tables */
+ init_pow2table();
+ init_gain_table(q);
+ init_cplscales_table(q);
+
+ if (init_cook_vlc_tables(q) != 0)
+ return -1;
+
+
+ if(avctx->block_align >= UINT_MAX/2)
+ return -1;
+
+ /* Pad the databuffer with:
+ DECODE_BYTES_PAD1 or DECODE_BYTES_PAD2 for decode_bytes(),
+ FF_INPUT_BUFFER_PADDING_SIZE, for the bitstreamreader. */
+ q->decoded_bytes_buffer =
+ av_mallocz(avctx->block_align
+ + DECODE_BYTES_PAD1(avctx->block_align)
+ + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (q->decoded_bytes_buffer == NULL)
+ return -1;
+
+ /* Initialize transform. */
+ if ( init_cook_mlt(q) != 0 )
+ return -1;
+
+ /* Initialize COOK signal arithmetic handling */
+ if (1) {
+ q->scalar_dequant = scalar_dequant_float;
+ q->decouple = decouple_float;
+ q->imlt_window = imlt_window_float;
+ q->interpolate = interpolate_float;
+ q->saturate_output = saturate_output_float;
+ }
+
+ /* Try to catch some obviously faulty streams, othervise it might be exploitable */
+ if ((q->samples_per_channel == 256) || (q->samples_per_channel == 512) || (q->samples_per_channel == 1024)) {
+ } else {
+ av_log(avctx,AV_LOG_ERROR,"unknown amount of samples_per_channel = %d, report sample!\n",q->samples_per_channel);
+ return -1;
+ }
+
+ avctx->sample_fmt = SAMPLE_FMT_S16;
+ if (channel_mask)
+ avctx->channel_layout = channel_mask;
+ else
+ avctx->channel_layout = (avctx->channels==2) ? CH_LAYOUT_STEREO : CH_LAYOUT_MONO;
+
+ return 0;
+}
+
+
+AVCodec cook_decoder =
+{
+ /*.name = */"cook",
+ /*.type = */CODEC_TYPE_AUDIO,
+ /*.id = */CODEC_ID_COOK,
+ /*.priv_data_size = */sizeof(COOKContext),
+ /*.init =*/ cook_decode_init,
+ /*.encode = */ NULL,
+ /*.close = */cook_decode_close,
+ /*.decode = */cook_decode_frame,
+ /*.capabilities = */0,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("COOK"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cookdata.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cookdata.h
new file mode 100644
index 000000000..a0175f04a
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cookdata.h
@@ -0,0 +1,563 @@
+/*
+ * COOK compatible decoder data
+ * Copyright (c) 2003 Sascha Sommer
+ * Copyright (c) 2005 Benjamin Larsson
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file cookdata.h
+ * Cook AKA RealAudio G2 compatible decoderdata
+ */
+
+#ifndef AVCODEC_COOKDATA_H
+#define AVCODEC_COOKDATA_H
+
+#include <stdint.h>
+
+/* various data tables */
+
+static const int expbits_tab[8] = {
+ 52,47,43,37,29,22,16,0,
+};
+
+static const float dither_tab[8] = {
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.176777, 0.25, 0.707107,
+};
+
+static const float quant_centroid_tab[7][14] = {
+ { 0.000, 0.392, 0.761, 1.120, 1.477, 1.832, 2.183, 2.541, 2.893, 3.245, 3.598, 3.942, 4.288, 4.724 },
+ { 0.000, 0.544, 1.060, 1.563, 2.068, 2.571, 3.072, 3.562, 4.070, 4.620, 0.000, 0.000, 0.000, 0.000 },
+ { 0.000, 0.746, 1.464, 2.180, 2.882, 3.584, 4.316, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000 },
+ { 0.000, 1.006, 2.000, 2.993, 3.985, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000 },
+ { 0.000, 1.321, 2.703, 3.983, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000 },
+ { 0.000, 1.657, 3.491, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000 },
+ { 0.000, 1.964, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000 }
+};
+
+static const int invradix_tab[7] = {
+ 74899, 104858, 149797, 209716, 262144, 349526, 524288,
+};
+
+static const int kmax_tab[7] = {
+ 13, 9, 6, 4, 3, 2, 1,
+};
+
+static const int vd_tab[7] = {
+ 2, 2, 2, 4, 4, 5, 5,
+};
+
+static const int vpr_tab[7] = {
+ 10, 10, 10, 5, 5, 4, 4,
+};
+
+
+
+/* VLC data */
+
+static const int vhsize_tab[7] = {
+ 191, 97, 48, 607, 246, 230, 32,
+};
+
+static const int vhvlcsize_tab[7] = {
+ 8, 7, 7, 10, 9, 9, 6,
+};
+
+static const uint8_t envelope_quant_index_huffbits[13][24] = {
+ { 4, 6, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 4, 5, 7, 8, 9, 11, 11, 12, 12, 12, 12 },
+ { 10, 8, 6, 5, 5, 4, 3, 3, 3, 3, 3, 3, 4, 5, 7, 9, 11, 12, 13, 15, 15, 15, 16, 16 },
+ { 12, 10, 8, 6, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 4, 4, 5, 5, 7, 9, 11, 13, 14, 14 },
+ { 13, 10, 9, 9, 7, 7, 5, 5, 4, 3, 3, 3, 3, 3, 4, 4, 4, 5, 7, 9, 11, 13, 13, 13 },
+ { 12, 13, 10, 8, 6, 6, 5, 5, 4, 4, 3, 3, 3, 3, 3, 4, 5, 5, 6, 7, 9, 11, 14, 14 },
+ { 12, 11, 9, 8, 8, 7, 5, 4, 4, 3, 3, 3, 3, 3, 4, 4, 5, 5, 7, 8, 10, 13, 14, 14 },
+ { 15, 16, 15, 12, 10, 8, 6, 5, 4, 3, 3, 3, 2, 3, 4, 5, 5, 7, 9, 11, 13, 16, 16, 16 },
+ { 14, 14, 11, 10, 9, 7, 7, 5, 5, 4, 3, 3, 2, 3, 3, 4, 5, 7, 9, 9, 12, 14, 15, 15 },
+ { 9, 9, 9, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 13 },
+ { 14, 12, 10, 8, 6, 6, 5, 4, 3, 3, 3, 3, 3, 3, 4, 5, 6, 8, 8, 9, 11, 14, 14, 14 },
+ { 13, 10, 9, 8, 6, 6, 5, 4, 4, 4, 3, 3, 2, 3, 4, 5, 6, 8, 9, 9, 11, 12, 14, 14 },
+ { 16, 13, 12, 11, 9, 6, 5, 5, 4, 4, 4, 3, 2, 3, 3, 4, 5, 7, 8, 10, 14, 16, 16, 16 },
+ { 13, 14, 14, 14, 10, 8, 7, 7, 5, 4, 3, 3, 2, 3, 3, 4, 5, 5, 7, 9, 11, 14, 14, 14 },
+};
+
+static const uint16_t envelope_quant_index_huffcodes[13][24] = {
+ {0x0006, 0x003e, 0x001c, 0x001d, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x0000, 0x0001,
+ 0x0002, 0x000d, 0x001e, 0x007e, 0x00fe, 0x01fe, 0x07fc, 0x07fd, 0x0ffc, 0x0ffd, 0x0ffe, 0x0fff},
+ {0x03fe, 0x00fe, 0x003e, 0x001c, 0x001d, 0x000c, 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005,
+ 0x000d, 0x001e, 0x007e, 0x01fe, 0x07fe, 0x0ffe, 0x1ffe, 0x7ffc, 0x7ffd, 0x7ffe, 0xfffe, 0xffff},
+ {0x0ffe, 0x03fe, 0x00fe, 0x003e, 0x001c, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x0000,
+ 0x0001, 0x0002, 0x000c, 0x000d, 0x001d, 0x001e, 0x007e, 0x01fe, 0x07fe, 0x1ffe, 0x3ffe, 0x3fff},
+ {0x1ffc, 0x03fe, 0x01fc, 0x01fd, 0x007c, 0x007d, 0x001c, 0x001d, 0x000a, 0x0000, 0x0001, 0x0002,
+ 0x0003, 0x0004, 0x000b, 0x000c, 0x000d, 0x001e, 0x007e, 0x01fe, 0x07fe, 0x1ffd, 0x1ffe, 0x1fff},
+ {0x0ffe, 0x1ffe, 0x03fe, 0x00fe, 0x003c, 0x003d, 0x001a, 0x001b, 0x000a, 0x000b, 0x0000, 0x0001,
+ 0x0002, 0x0003, 0x0004, 0x000c, 0x001c, 0x001d, 0x003e, 0x007e, 0x01fe, 0x07fe, 0x3ffe, 0x3fff},
+ {0x0ffe, 0x07fe, 0x01fe, 0x00fc, 0x00fd, 0x007c, 0x001c, 0x000a, 0x000b, 0x0000, 0x0001, 0x0002,
+ 0x0003, 0x0004, 0x000c, 0x000d, 0x001d, 0x001e, 0x007d, 0x00fe, 0x03fe, 0x1ffe, 0x3ffe, 0x3fff},
+ {0x7ffc, 0xfffc, 0x7ffd, 0x0ffe, 0x03fe, 0x00fe, 0x003e, 0x001c, 0x000c, 0x0002, 0x0003, 0x0004,
+ 0x0000, 0x0005, 0x000d, 0x001d, 0x001e, 0x007e, 0x01fe, 0x07fe, 0x1ffe, 0xfffd, 0xfffe, 0xffff},
+ {0x3ffc, 0x3ffd, 0x07fe, 0x03fe, 0x01fc, 0x007c, 0x007d, 0x001c, 0x001d, 0x000c, 0x0002, 0x0003,
+ 0x0000, 0x0004, 0x0005, 0x000d, 0x001e, 0x007e, 0x01fd, 0x01fe, 0x0ffe, 0x3ffe, 0x7ffe, 0x7fff},
+ {0x01fc, 0x01fd, 0x01fe, 0x00fc, 0x007c, 0x003c, 0x001c, 0x000c, 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x000d, 0x001d, 0x003d, 0x007d, 0x00fd, 0x03fe, 0x07fe, 0x0ffe, 0x1ffe, 0x1fff},
+ {0x3ffc, 0x0ffe, 0x03fe, 0x00fc, 0x003c, 0x003d, 0x001c, 0x000c, 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x000d, 0x001d, 0x003e, 0x00fd, 0x00fe, 0x01fe, 0x07fe, 0x3ffd, 0x3ffe, 0x3fff},
+ {0x1ffe, 0x03fe, 0x01fc, 0x00fc, 0x003c, 0x003d, 0x001c, 0x000a, 0x000b, 0x000c, 0x0002, 0x0003,
+ 0x0000, 0x0004, 0x000d, 0x001d, 0x003e, 0x00fd, 0x01fd, 0x01fe, 0x07fe, 0x0ffe, 0x3ffe, 0x3fff},
+ {0xfffc, 0x1ffe, 0x0ffe, 0x07fe, 0x01fe, 0x003e, 0x001c, 0x001d, 0x000a, 0x000b, 0x000c, 0x0002,
+ 0x0000, 0x0003, 0x0004, 0x000d, 0x001e, 0x007e, 0x00fe, 0x03fe, 0x3ffe, 0xfffd, 0xfffe, 0xffff},
+ {0x1ffc, 0x3ffa, 0x3ffb, 0x3ffc, 0x03fe, 0x00fe, 0x007c, 0x007d, 0x001c, 0x000c, 0x0002, 0x0003,
+ 0x0000, 0x0004, 0x0005, 0x000d, 0x001d, 0x001e, 0x007e, 0x01fe, 0x07fe, 0x3ffd, 0x3ffe, 0x3fff},
+};
+
+
+static const uint8_t cvh_huffbits0[191] = {
+ 1, 4, 6, 6, 7, 7, 8, 8, 8, 9, 9, 10,
+ 11, 11, 4, 5, 6, 7, 7, 8, 8, 9, 9, 9,
+ 9, 10, 11, 11, 5, 6, 7, 8, 8, 9, 9, 9,
+ 9, 10, 10, 10, 11, 12, 6, 7, 8, 9, 9, 9,
+ 9, 10, 10, 10, 10, 11, 12, 13, 7, 7, 8, 9,
+ 9, 9, 10, 10, 10, 10, 11, 11, 12, 13, 8, 8,
+ 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 13, 14,
+ 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 13,
+ 13, 15, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12,
+ 12, 13, 14, 15, 9, 9, 9, 10, 10, 10, 11, 11,
+ 12, 13, 12, 14, 15, 16, 9, 9, 10, 10, 10, 10,
+ 11, 12, 12, 14, 14, 16, 16, 0, 9, 9, 10, 10,
+ 11, 11, 12, 13, 13, 14, 14, 15, 0, 0, 10, 10,
+ 10, 11, 11, 12, 12, 13, 15, 15, 16, 0, 0, 0,
+ 11, 11, 11, 12, 13, 13, 13, 15, 16, 16, 0, 0,
+ 0, 0, 11, 11, 12, 13, 13, 14, 15, 16, 16,
+};
+
+static const uint16_t cvh_huffcodes0[191] = {
+ 0x0000,0x0008,0x002c,0x002d,0x0062,0x0063,0x00d4,0x00d5,0x00d6,0x01c6,0x01c7,0x03ca,
+ 0x07d6,0x07d7,0x0009,0x0014,0x002e,0x0064,0x0065,0x00d7,0x00d8,0x01c8,0x01c9,0x01ca,
+ 0x01cb,0x03cb,0x07d8,0x07d9,0x0015,0x002f,0x0066,0x00d9,0x00da,0x01cc,0x01cd,0x01ce,
+ 0x01cf,0x03cc,0x03cd,0x03ce,0x07da,0x0fe4,0x0030,0x0067,0x00db,0x01d0,0x01d1,0x01d2,
+ 0x01d3,0x03cf,0x03d0,0x03d1,0x03d2,0x07db,0x0fe5,0x1fea,0x0068,0x0069,0x00dc,0x01d4,
+ 0x01d5,0x01d6,0x03d3,0x03d4,0x03d5,0x03d6,0x07dc,0x07dd,0x0fe6,0x1feb,0x00dd,0x00de,
+ 0x01d7,0x01d8,0x01d9,0x03d7,0x03d8,0x03d9,0x03da,0x07de,0x07df,0x0fe7,0x1fec,0x3ff2,
+ 0x00df,0x00e0,0x01da,0x01db,0x03db,0x03dc,0x07e0,0x07e1,0x07e2,0x0fe8,0x0fe9,0x1fed,
+ 0x1fee,0x7ff4,0x00e1,0x00e2,0x01dc,0x01dd,0x03dd,0x03de,0x07e3,0x07e4,0x07e5,0x0fea,
+ 0x0feb,0x1fef,0x3ff3,0x7ff5,0x01de,0x01df,0x01e0,0x03df,0x03e0,0x03e1,0x07e6,0x07e7,
+ 0x0fec,0x1ff0,0x0fed,0x3ff4,0x7ff6,0xfff8,0x01e1,0x01e2,0x03e2,0x03e3,0x03e4,0x03e5,
+ 0x07e8,0x0fee,0x0fef,0x3ff5,0x3ff6,0xfff9,0xfffa,0xfffa,0x01e3,0x01e4,0x03e6,0x03e7,
+ 0x07e9,0x07ea,0x0ff0,0x1ff1,0x1ff2,0x3ff7,0x3ff8,0x7ff7,0x7ff7,0xfffa,0x03e8,0x03e9,
+ 0x03ea,0x07eb,0x07ec,0x0ff1,0x0ff2,0x1ff3,0x7ff8,0x7ff9,0xfffb,0x3ff8,0x7ff7,0x7ff7,
+ 0x07ed,0x07ee,0x07ef,0x0ff3,0x1ff4,0x1ff5,0x1ff6,0x7ffa,0xfffc,0xfffd,0xfffb,0xfffb,
+ 0x3ff8,0x7ff7,0x07f0,0x07f1,0x0ff4,0x1ff7,0x1ff8,0x3ff9,0x7ffb,0xfffe,0xffff,
+};
+
+
+static const uint8_t cvh_huffbits1[97] = {
+ 1, 4, 5, 6, 7, 8, 8, 9, 10, 10, 4, 5,
+ 6, 7, 7, 8, 8, 9, 9, 11, 5, 5, 6, 7,
+ 8, 8, 9, 9, 10, 11, 6, 6, 7, 8, 8, 9,
+ 9, 10, 11, 12, 7, 7, 8, 8, 9, 9, 10, 11,
+ 11, 13, 8, 8, 8, 9, 9, 10, 10, 11, 12, 14,
+ 8, 8, 8, 9, 10, 11, 11, 12, 13, 15, 9, 9,
+ 9, 10, 11, 12, 12, 14, 14, 0, 9, 9, 9, 10,
+ 11, 12, 14, 16, 0, 0, 10, 10, 11, 12, 13, 14,
+ 16,
+};
+
+
+static const uint16_t cvh_huffcodes1[97] = {
+ 0x0000,0x0008,0x0014,0x0030,0x006a,0x00e2,0x00e3,0x01e4,0x03ec,0x03ed,0x0009,0x0015,
+ 0x0031,0x006b,0x006c,0x00e4,0x00e5,0x01e5,0x01e6,0x07f0,0x0016,0x0017,0x0032,0x006d,
+ 0x00e6,0x00e7,0x01e7,0x01e8,0x03ee,0x07f1,0x0033,0x0034,0x006e,0x00e8,0x00e9,0x01e9,
+ 0x01ea,0x03ef,0x07f2,0x0ff6,0x006f,0x0070,0x00ea,0x00eb,0x01eb,0x01ec,0x03f0,0x07f3,
+ 0x07f4,0x1ffa,0x00ec,0x00ed,0x00ee,0x01ed,0x01ee,0x03f1,0x03f2,0x07f5,0x0ff7,0x3ffa,
+ 0x00ef,0x00f0,0x00f1,0x01ef,0x03f3,0x07f6,0x07f7,0x0ff8,0x1ffb,0x7ffe,0x01f0,0x01f1,
+ 0x01f2,0x03f4,0x07f8,0x0ff9,0x0ffa,0x3ffb,0x3ffc,0x0000,0x01f3,0x01f4,0x01f5,0x03f5,
+ 0x07f9,0x0ffb,0x3ffd,0xfffe,0x0000,0x0000,0x03f6,0x03f7,0x07fa,0x0ffc,0x1ffc,0x3ffe,
+ 0xffff,
+};
+
+static const uint8_t cvh_huffbits2[48] = {
+ 1, 4, 5, 7, 8, 9, 10, 3, 4, 5, 7, 8,
+ 9, 10, 5, 5, 6, 7, 8, 10, 10, 7, 6, 7,
+ 8, 9, 10, 12, 8, 8, 8, 9, 10, 12, 14, 8,
+ 9, 9, 10, 11, 15, 16, 9, 10, 11, 12, 13, 16,
+};
+
+static const uint16_t cvh_huffcodes2[48] = {
+ 0x0000,0x000a,0x0018,0x0074,0x00f2,0x01f4,0x03f6,0x0004,0x000b,0x0019,0x0075,0x00f3,
+ 0x01f5,0x03f7,0x001a,0x001b,0x0038,0x0076,0x00f4,0x03f8,0x03f9,0x0077,0x0039,0x0078,
+ 0x00f5,0x01f6,0x03fa,0x0ffc,0x00f6,0x00f7,0x00f8,0x01f7,0x03fb,0x0ffd,0x3ffe,0x00f9,
+ 0x01f8,0x01f9,0x03fc,0x07fc,0x7ffe,0xfffe,0x01fa,0x03fd,0x07fd,0x0ffe,0x1ffe,0xffff,
+};
+
+static const uint8_t cvh_huffbits3[607] = {
+ 2, 4, 6, 8, 10, 5, 5, 6, 8, 10, 7, 8,
+ 8, 10, 12, 9, 9, 10, 12, 15, 10, 11, 13, 16,
+ 16, 5, 6, 8, 10, 11, 5, 6, 8, 10, 12, 7,
+ 7, 8, 10, 13, 9, 9, 10, 12, 15, 12, 11, 13,
+ 16, 16, 7, 9, 10, 12, 15, 7, 8, 10, 12, 13,
+ 9, 9, 11, 13, 16, 11, 11, 12, 14, 16, 12, 12,
+ 14, 16, 0, 9, 11, 12, 16, 16, 9, 10, 13, 15,
+ 16, 10, 11, 12, 16, 16, 13, 13, 16, 16, 16, 16,
+ 16, 15, 16, 0, 11, 13, 16, 16, 15, 11, 13, 15,
+ 16, 16, 13, 13, 16, 16, 0, 14, 16, 16, 16, 0,
+ 16, 16, 0, 0, 0, 4, 6, 8, 10, 13, 6, 6,
+ 8, 10, 13, 9, 8, 10, 12, 16, 10, 10, 11, 15,
+ 16, 13, 12, 14, 16, 16, 5, 6, 8, 11, 13, 6,
+ 6, 8, 10, 13, 8, 8, 9, 11, 14, 10, 10, 12,
+ 12, 16, 13, 12, 13, 15, 16, 7, 8, 9, 12, 16,
+ 7, 8, 10, 12, 14, 9, 9, 10, 13, 16, 11, 10,
+ 12, 15, 16, 13, 13, 16, 16, 0, 9, 11, 13, 16,
+ 16, 9, 10, 12, 15, 16, 10, 11, 13, 16, 16, 13,
+ 12, 16, 16, 16, 16, 16, 16, 16, 0, 11, 13, 16,
+ 16, 16, 11, 13, 16, 16, 16, 12, 13, 15, 16, 0,
+ 16, 16, 16, 16, 0, 16, 16, 0, 0, 0, 6, 8,
+ 11, 13, 16, 8, 8, 10, 12, 16, 11, 10, 11, 13,
+ 16, 12, 13, 13, 15, 16, 16, 16, 14, 16, 0, 6,
+ 8, 10, 13, 16, 8, 8, 10, 12, 16, 10, 10, 11,
+ 13, 16, 13, 12, 13, 16, 16, 14, 14, 14, 16, 0,
+ 8, 9, 11, 13, 16, 8, 9, 11, 16, 14, 10, 10,
+ 12, 15, 16, 12, 12, 13, 16, 16, 15, 16, 16, 16,
+ 0, 10, 12, 15, 16, 16, 10, 12, 12, 14, 16, 12,
+ 12, 13, 16, 16, 14, 15, 16, 16, 0, 16, 16, 16,
+ 0, 0, 12, 15, 15, 16, 0, 13, 13, 16, 16, 0,
+ 14, 16, 16, 16, 0, 16, 16, 16, 0, 0, 0, 0,
+ 0, 0, 0, 8, 10, 13, 15, 16, 10, 11, 13, 16,
+ 16, 13, 13, 14, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 0, 8, 10, 11, 15, 16, 9, 10, 12,
+ 16, 16, 12, 12, 15, 16, 16, 16, 14, 16, 16, 16,
+ 16, 16, 16, 16, 0, 9, 11, 14, 16, 16, 10, 11,
+ 13, 16, 16, 14, 13, 14, 16, 16, 16, 15, 15, 16,
+ 0, 16, 16, 16, 0, 0, 11, 13, 16, 16, 16, 11,
+ 13, 15, 16, 16, 13, 16, 16, 16, 0, 16, 16, 16,
+ 16, 0, 16, 16, 0, 0, 0, 15, 16, 16, 16, 0,
+ 14, 16, 16, 16, 0, 16, 16, 16, 0, 0, 16, 16,
+ 0, 0, 0, 0, 0, 0, 0, 0, 9, 13, 16, 16,
+ 16, 11, 13, 16, 16, 16, 14, 15, 16, 16, 0, 15,
+ 16, 16, 16, 0, 16, 16, 0, 0, 0, 9, 13, 15,
+ 15, 16, 12, 13, 14, 16, 16, 16, 15, 16, 16, 0,
+ 16, 16, 16, 16, 0, 16, 16, 0, 0, 0, 11, 13,
+ 15, 16, 0, 12, 14, 16, 16, 0, 16, 16, 16, 16,
+ 0, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 16,
+ 16, 16, 16, 0, 16, 16, 16, 16, 0, 16, 16, 16,
+ 0, 0, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0,
+ 16, 16, 0, 0, 0, 16, 16,
+};
+
+
+static const uint16_t cvh_huffcodes3[607] = {
+ 0x0000,0x0004,0x0022,0x00c6,0x03b0,0x000c,0x000d,0x0023,0x00c7,0x03b1,0x005c,0x00c8,
+ 0x00c9,0x03b2,0x0fa4,0x01c2,0x01c3,0x03b3,0x0fa5,0x7f72,0x03b4,0x07b2,0x1f9a,0xff24,
+ 0xff25,0x000e,0x0024,0x00ca,0x03b5,0x07b3,0x000f,0x0025,0x00cb,0x03b6,0x0fa6,0x005d,
+ 0x005e,0x00cc,0x03b7,0x1f9b,0x01c4,0x01c5,0x03b8,0x0fa7,0x7f73,0x0fa8,0x07b4,0x1f9c,
+ 0xff26,0xff27,0x005f,0x01c6,0x03b9,0x0fa9,0x7f74,0x0060,0x00cd,0x03ba,0x0faa,0x1f9d,
+ 0x01c7,0x01c8,0x07b5,0x1f9e,0xff28,0x07b6,0x07b7,0x0fab,0x3fa2,0xff29,0x0fac,0x0fad,
+ 0x3fa3,0xff2a,0x3fa2,0x01c9,0x07b8,0x0fae,0xff2b,0xff2c,0x01ca,0x03bb,0x1f9f,0x7f75,
+ 0xff2d,0x03bc,0x07b9,0x0faf,0xff2e,0xff2f,0x1fa0,0x1fa1,0xff30,0xff31,0xff32,0xff33,
+ 0xff34,0x7f76,0xff35,0xff31,0x07ba,0x1fa2,0xff36,0xff37,0x7f77,0x07bb,0x1fa3,0x7f78,
+ 0xff38,0xff39,0x1fa4,0x1fa5,0xff3a,0xff3b,0xff2e,0x3fa4,0xff3c,0xff3d,0xff3e,0xff31,
+ 0xff3f,0xff40,0xff30,0xff31,0xff31,0x0005,0x0026,0x00ce,0x03bd,0x1fa6,0x0027,0x0028,
+ 0x00cf,0x03be,0x1fa7,0x01cb,0x00d0,0x03bf,0x0fb0,0xff41,0x03c0,0x03c1,0x07bc,0x7f79,
+ 0xff42,0x1fa8,0x0fb1,0x3fa5,0xff43,0xff44,0x0010,0x0029,0x00d1,0x07bd,0x1fa9,0x002a,
+ 0x002b,0x00d2,0x03c2,0x1faa,0x00d3,0x00d4,0x01cc,0x07be,0x3fa6,0x03c3,0x03c4,0x0fb2,
+ 0x0fb3,0xff45,0x1fab,0x0fb4,0x1fac,0x7f7a,0xff46,0x0061,0x00d5,0x01cd,0x0fb5,0xff47,
+ 0x0062,0x00d6,0x03c5,0x0fb6,0x3fa7,0x01ce,0x01cf,0x03c6,0x1fad,0xff48,0x07bf,0x03c7,
+ 0x0fb7,0x7f7b,0xff49,0x1fae,0x1faf,0xff4a,0xff4b,0x7f7b,0x01d0,0x07c0,0x1fb0,0xff4c,
+ 0xff4d,0x01d1,0x03c8,0x0fb8,0x7f7c,0xff4e,0x03c9,0x07c1,0x1fb1,0xff4f,0xff50,0x1fb2,
+ 0x0fb9,0xff51,0xff52,0xff53,0xff54,0xff55,0xff56,0xff57,0xff52,0x07c2,0x1fb3,0xff58,
+ 0xff59,0xff5a,0x07c3,0x1fb4,0xff5b,0xff5c,0xff5d,0x0fba,0x1fb5,0x7f7d,0xff5e,0xff4f,
+ 0xff5f,0xff60,0xff61,0xff62,0xff52,0xff63,0xff64,0xff51,0xff52,0xff52,0x002c,0x00d7,
+ 0x07c4,0x1fb6,0xff65,0x00d8,0x00d9,0x03ca,0x0fbb,0xff66,0x07c5,0x03cb,0x07c6,0x1fb7,
+ 0xff67,0x0fbc,0x1fb8,0x1fb9,0x7f7e,0xff68,0xff69,0xff6a,0x3fa8,0xff6b,0x7f7e,0x002d,
+ 0x00da,0x03cc,0x1fba,0xff6c,0x00db,0x00dc,0x03cd,0x0fbd,0xff6d,0x03ce,0x03cf,0x07c7,
+ 0x1fbb,0xff6e,0x1fbc,0x0fbe,0x1fbd,0xff6f,0xff70,0x3fa9,0x3faa,0x3fab,0xff71,0xff6f,
+ 0x00dd,0x01d2,0x07c8,0x1fbe,0xff72,0x00de,0x01d3,0x07c9,0xff73,0x3fac,0x03d0,0x03d1,
+ 0x0fbf,0x7f7f,0xff74,0x0fc0,0x0fc1,0x1fbf,0xff75,0xff76,0x7f80,0xff77,0xff78,0xff79,
+ 0xff75,0x03d2,0x0fc2,0x7f81,0xff7a,0xff7b,0x03d3,0x0fc3,0x0fc4,0x3fad,0xff7c,0x0fc5,
+ 0x0fc6,0x1fc0,0xff7d,0xff7e,0x3fae,0x7f82,0xff7f,0xff80,0xff80,0xff81,0xff82,0xff83,
+ 0xff80,0xff80,0x0fc7,0x7f83,0x7f84,0xff84,0xff7a,0x1fc1,0x1fc2,0xff85,0xff86,0x3fad,
+ 0x3faf,0xff87,0xff88,0xff89,0xff7d,0xff8a,0xff8b,0xff8c,0xff80,0xff80,0x3fae,0x7f82,
+ 0xff7f,0xff80,0xff80,0x00df,0x03d4,0x1fc3,0x7f85,0xff8d,0x03d5,0x07ca,0x1fc4,0xff8e,
+ 0xff8f,0x1fc5,0x1fc6,0x3fb0,0xff90,0xff91,0xff92,0xff93,0xff94,0xff95,0xff96,0xff97,
+ 0xff98,0xff99,0xff9a,0xff95,0x00e0,0x03d6,0x07cb,0x7f86,0xff9b,0x01d4,0x03d7,0x0fc8,
+ 0xff9c,0xff9d,0x0fc9,0x0fca,0x7f87,0xff9e,0xff9f,0xffa0,0x3fb1,0xffa1,0xffa2,0xffa3,
+ 0xffa4,0xffa5,0xffa6,0xffa7,0xffa2,0x01d5,0x07cc,0x3fb2,0xffa8,0xffa9,0x03d8,0x07cd,
+ 0x1fc7,0xffaa,0xffab,0x3fb3,0x1fc8,0x3fb4,0xffac,0xffad,0xffae,0x7f88,0x7f89,0xffaf,
+ 0xffaf,0xffb0,0xffb1,0xffb2,0xffaf,0xffaf,0x07ce,0x1fc9,0xffb3,0xffb4,0xffb5,0x07cf,
+ 0x1fca,0x7f8a,0xffb6,0xffb7,0x1fcb,0xffb8,0xffb9,0xffba,0xffba,0xffbb,0xffbc,0xffbd,
+ 0xffbe,0xffbe,0xffbf,0xffc0,0xffbd,0xffbe,0xffbe,0x7f8b,0xffc1,0xffc2,0xffc3,0xffb4,
+ 0x3fb5,0xffc4,0xffc5,0xffc6,0xffb6,0xffc7,0xffc8,0xffc9,0xffba,0xffba,0xffca,0xffcb,
+ 0xffbd,0xffbe,0xffbe,0xffbb,0xffbc,0xffbd,0xffbe,0xffbe,0x01d6,0x1fcc,0xffcc,0xffcd,
+ 0xffce,0x07d0,0x1fcd,0xffcf,0xffd0,0xffd1,0x3fb6,0x7f8c,0xffd2,0xffd3,0xff90,0x7f8d,
+ 0xffd4,0xffd5,0xffd6,0xff95,0xffd7,0xffd8,0xff94,0xff95,0xff95,0x01d7,0x1fce,0x7f8e,
+ 0x7f8f,0xffd9,0x0fcb,0x1fcf,0x3fb7,0xffda,0xffdb,0xffdc,0x7f90,0xffdd,0xffde,0xff9e,
+ 0xffdf,0xffe0,0xffe1,0xffe2,0xffa2,0xffe3,0xffe4,0xffa1,0xffa2,0xffa2,0x07d1,0x1fd0,
+ 0x7f91,0xffe5,0xffa8,0x0fcc,0x3fb8,0xffe6,0xffe7,0xffaa,0xffe8,0xffe9,0xffea,0xffeb,
+ 0xffac,0xffec,0xffed,0xffee,0xffaf,0xffaf,0xffae,0x7f88,0x7f89,0xffaf,0xffaf,0xffef,
+ 0xfff0,0xfff1,0xfff2,0xffb4,0xfff3,0xfff4,0xfff5,0xfff6,0xffb6,0xfff7,0xfff8,0xfff9,
+ 0xffba,0xffba,0xfffa,0xfffb,0xffbd,0xffbe,0xffbe,0xffbb,0xffbc,0xffbd,0xffbe,0xffbe,
+ 0xfffc,0xfffd,0xffb3,0xffb4,0xffb4,0xfffe,0xffff,
+};
+
+static const uint8_t cvh_huffbits4[246] = {
+ 2, 4, 7, 10, 4, 5, 7, 10, 7, 8, 10, 14,
+ 11, 11, 15, 15, 4, 5, 9, 12, 5, 5, 8, 12,
+ 8, 7, 10, 15, 11, 11, 15, 15, 7, 9, 12, 15,
+ 8, 8, 12, 15, 10, 10, 13, 15, 14, 14, 15, 0,
+ 11, 13, 15, 15, 11, 13, 15, 15, 14, 15, 15, 0,
+ 15, 15, 0, 0, 4, 5, 9, 13, 5, 6, 9, 13,
+ 9, 9, 11, 15, 14, 13, 15, 15, 4, 6, 9, 12,
+ 5, 6, 9, 13, 9, 8, 11, 15, 13, 12, 15, 15,
+ 7, 9, 12, 15, 7, 8, 11, 15, 10, 10, 14, 15,
+ 14, 15, 15, 0, 10, 12, 15, 15, 11, 13, 15, 15,
+ 15, 15, 15, 0, 15, 15, 0, 0, 6, 9, 13, 14,
+ 8, 9, 12, 15, 12, 12, 15, 15, 15, 15, 15, 0,
+ 7, 9, 13, 15, 8, 9, 12, 15, 11, 12, 15, 15,
+ 15, 15, 15, 0, 9, 11, 15, 15, 9, 11, 15, 15,
+ 14, 14, 15, 0, 15, 15, 0, 0, 14, 15, 15, 0,
+ 14, 15, 15, 0, 15, 15, 0, 0, 0, 0, 0, 0,
+ 9, 12, 15, 15, 12, 13, 15, 15, 15, 15, 15, 0,
+ 15, 15, 0, 0, 10, 12, 15, 15, 12, 14, 15, 15,
+ 15, 15, 15, 0, 15, 15, 0, 0, 14, 15, 15, 0,
+ 15, 15, 15, 0, 15, 15, 0, 0, 0, 0, 0, 0,
+ 15, 15, 0, 0, 15, 15,
+};
+
+
+static const uint16_t cvh_huffcodes4[246] = {
+ 0x0000,0x0004,0x006c,0x03e6,0x0005,0x0012,0x006d,0x03e7,0x006e,0x00e8,0x03e8,0x3fc4,
+ 0x07e0,0x07e1,0x7fa4,0x7fa5,0x0006,0x0013,0x01e2,0x0fda,0x0014,0x0015,0x00e9,0x0fdb,
+ 0x00ea,0x006f,0x03e9,0x7fa6,0x07e2,0x07e3,0x7fa7,0x7fa8,0x0070,0x01e3,0x0fdc,0x7fa9,
+ 0x00eb,0x00ec,0x0fdd,0x7faa,0x03ea,0x03eb,0x1fd6,0x7fab,0x3fc5,0x3fc6,0x7fac,0x1fd6,
+ 0x07e4,0x1fd7,0x7fad,0x7fae,0x07e5,0x1fd8,0x7faf,0x7fb0,0x3fc7,0x7fb1,0x7fb2,0x1fd6,
+ 0x7fb3,0x7fb4,0x1fd6,0x1fd6,0x0007,0x0016,0x01e4,0x1fd9,0x0017,0x0032,0x01e5,0x1fda,
+ 0x01e6,0x01e7,0x07e6,0x7fb5,0x3fc8,0x1fdb,0x7fb6,0x7fb7,0x0008,0x0033,0x01e8,0x0fde,
+ 0x0018,0x0034,0x01e9,0x1fdc,0x01ea,0x00ed,0x07e7,0x7fb8,0x1fdd,0x0fdf,0x7fb9,0x7fba,
+ 0x0071,0x01eb,0x0fe0,0x7fbb,0x0072,0x00ee,0x07e8,0x7fbc,0x03ec,0x03ed,0x3fc9,0x7fbd,
+ 0x3fca,0x7fbe,0x7fbf,0x3fc9,0x03ee,0x0fe1,0x7fc0,0x7fc1,0x07e9,0x1fde,0x7fc2,0x7fc3,
+ 0x7fc4,0x7fc5,0x7fc6,0x3fc9,0x7fc7,0x7fc8,0x3fc9,0x3fc9,0x0035,0x01ec,0x1fdf,0x3fcb,
+ 0x00ef,0x01ed,0x0fe2,0x7fc9,0x0fe3,0x0fe4,0x7fca,0x7fcb,0x7fcc,0x7fcd,0x7fce,0x7fca,
+ 0x0073,0x01ee,0x1fe0,0x7fcf,0x00f0,0x01ef,0x0fe5,0x7fd0,0x07ea,0x0fe6,0x7fd1,0x7fd2,
+ 0x7fd3,0x7fd4,0x7fd5,0x7fd1,0x01f0,0x07eb,0x7fd6,0x7fd7,0x01f1,0x07ec,0x7fd8,0x7fd9,
+ 0x3fcc,0x3fcd,0x7fda,0x7fda,0x7fdb,0x7fdc,0x7fda,0x7fda,0x3fce,0x7fdd,0x7fde,0x7fd6,
+ 0x3fcf,0x7fdf,0x7fe0,0x7fd8,0x7fe1,0x7fe2,0x7fda,0x7fda,0x3fcc,0x3fcd,0x7fda,0x7fda,
+ 0x01f2,0x0fe7,0x7fe3,0x7fe4,0x0fe8,0x1fe1,0x7fe5,0x7fe6,0x7fe7,0x7fe8,0x7fe9,0x7fca,
+ 0x7fea,0x7feb,0x7fca,0x7fca,0x03ef,0x0fe9,0x7fec,0x7fed,0x0fea,0x3fd0,0x7fee,0x7fef,
+ 0x7ff0,0x7ff1,0x7ff2,0x7fd1,0x7ff3,0x7ff4,0x7fd1,0x7fd1,0x3fd1,0x7ff5,0x7ff6,0x7fd6,
+ 0x7ff7,0x7ff8,0x7ff9,0x7fd8,0x7ffa,0x7ffb,0x7fda,0x7fda,0x3fcc,0x3fcd,0x7fda,0x7fda,
+ 0x7ffc,0x7ffd,0x7fd6,0x7fd6,0x7ffe,0x7fff,
+};
+
+
+static const uint8_t cvh_huffbits5[230] = {
+ 2, 4, 8, 4, 5, 9, 9, 10, 14, 4, 6, 11,
+ 5, 6, 12, 10, 11, 15, 9, 11, 15, 10, 13, 15,
+ 14, 15, 0, 4, 6, 12, 6, 7, 12, 12, 12, 15,
+ 5, 7, 13, 6, 7, 13, 12, 13, 15, 10, 12, 15,
+ 11, 13, 15, 15, 15, 0, 8, 13, 15, 11, 12, 15,
+ 15, 15, 0, 10, 13, 15, 12, 15, 15, 15, 15, 0,
+ 15, 15, 0, 15, 15, 0, 0, 0, 0, 4, 5, 11,
+ 5, 7, 12, 11, 12, 15, 6, 7, 13, 7, 8, 14,
+ 12, 14, 15, 11, 13, 15, 12, 13, 15, 15, 15, 0,
+ 5, 6, 13, 7, 8, 15, 12, 14, 15, 6, 8, 14,
+ 7, 8, 15, 14, 15, 15, 12, 12, 15, 12, 13, 15,
+ 15, 15, 0, 9, 13, 15, 12, 13, 15, 15, 15, 0,
+ 11, 13, 15, 13, 13, 15, 15, 15, 0, 14, 15, 0,
+ 15, 15, 0, 0, 0, 0, 8, 10, 15, 11, 12, 15,
+ 15, 15, 0, 10, 12, 15, 12, 13, 15, 15, 15, 0,
+ 14, 15, 0, 15, 15, 0, 0, 0, 0, 8, 12, 15,
+ 12, 13, 15, 15, 15, 0, 11, 13, 15, 13, 15, 15,
+ 15, 15, 0, 15, 15, 0, 15, 15, 0, 0, 0, 0,
+ 14, 15, 0, 15, 15, 0, 0, 0, 0, 15, 15, 0,
+ 15, 15,
+};
+
+
+
+static const uint16_t cvh_huffcodes5[230] = {
+ 0x0000,0x0004,0x00f0,0x0005,0x0012,0x01f0,0x01f1,0x03e8,0x3fce,0x0006,0x0030,0x07de,
+ 0x0013,0x0031,0x0fd2,0x03e9,0x07df,0x7fb0,0x01f2,0x07e0,0x7fb1,0x03ea,0x1fd2,0x7fb2,
+ 0x3fcf,0x7fb3,0x0031,0x0007,0x0032,0x0fd3,0x0033,0x0070,0x0fd4,0x0fd5,0x0fd6,0x7fb4,
+ 0x0014,0x0071,0x1fd3,0x0034,0x0072,0x1fd4,0x0fd7,0x1fd5,0x7fb5,0x03eb,0x0fd8,0x7fb6,
+ 0x07e1,0x1fd6,0x7fb7,0x7fb8,0x7fb9,0x0072,0x00f1,0x1fd7,0x7fba,0x07e2,0x0fd9,0x7fbb,
+ 0x7fbc,0x7fbd,0x0070,0x03ec,0x1fd8,0x7fbe,0x0fda,0x7fbf,0x7fc0,0x7fc1,0x7fc2,0x0072,
+ 0x7fc3,0x7fc4,0x0071,0x7fc5,0x7fc6,0x0072,0x0034,0x0072,0x0072,0x0008,0x0015,0x07e3,
+ 0x0016,0x0073,0x0fdb,0x07e4,0x0fdc,0x7fc7,0x0035,0x0074,0x1fd9,0x0075,0x00f2,0x3fd0,
+ 0x0fdd,0x3fd1,0x7fc8,0x07e5,0x1fda,0x7fc9,0x0fde,0x1fdb,0x7fca,0x7fcb,0x7fcc,0x00f2,
+ 0x0017,0x0036,0x1fdc,0x0076,0x00f3,0x7fcd,0x0fdf,0x3fd2,0x7fce,0x0037,0x00f4,0x3fd3,
+ 0x0077,0x00f5,0x7fcf,0x3fd4,0x7fd0,0x7fd1,0x0fe0,0x0fe1,0x7fd2,0x0fe2,0x1fdd,0x7fd3,
+ 0x7fd4,0x7fd5,0x00f5,0x01f3,0x1fde,0x7fd6,0x0fe3,0x1fdf,0x7fd7,0x7fd8,0x7fd9,0x00f3,
+ 0x07e6,0x1fe0,0x7fda,0x1fe1,0x1fe2,0x7fdb,0x7fdc,0x7fdd,0x00f5,0x3fd5,0x7fde,0x00f4,
+ 0x7fdf,0x7fe0,0x00f5,0x0077,0x00f5,0x00f5,0x00f6,0x03ed,0x7fe1,0x07e7,0x0fe4,0x7fe2,
+ 0x7fe3,0x7fe4,0x0073,0x03ee,0x0fe5,0x7fe5,0x0fe6,0x1fe3,0x7fe6,0x7fe7,0x7fe8,0x00f2,
+ 0x3fd6,0x7fe9,0x0074,0x7fea,0x7feb,0x00f2,0x0075,0x00f2,0x00f2,0x00f7,0x0fe7,0x7fec,
+ 0x0fe8,0x1fe4,0x7fed,0x7fee,0x7fef,0x00f3,0x07e8,0x1fe5,0x7ff0,0x1fe6,0x7ff1,0x7ff2,
+ 0x7ff3,0x7ff4,0x00f5,0x7ff5,0x7ff6,0x00f4,0x7ff7,0x7ff8,0x00f5,0x0077,0x00f5,0x00f5,
+ 0x3fd7,0x7ff9,0x0036,0x7ffa,0x7ffb,0x00f3,0x0076,0x00f3,0x00f3,0x7ffc,0x7ffd,0x0000,
+ 0x7ffe,0x7fff,
+};
+
+
+static const uint8_t cvh_huffbits6[32] = {
+ 1, 4, 4, 6, 4, 6, 6, 8, 4, 6, 6, 8,
+ 6, 9, 8, 10, 4, 6, 7, 8, 6, 9, 8, 11,
+ 6, 9, 8, 10, 8, 10, 9, 11,
+};
+
+static const uint16_t cvh_huffcodes6[32] = {
+ 0x0000,0x0008,0x0009,0x0034,0x000a,0x0035,0x0036,0x00f6,0x000b,0x0037,0x0038,0x00f7,
+ 0x0039,0x01fa,0x00f8,0x03fc,0x000c,0x003a,0x007a,0x00f9,0x003b,0x01fb,0x00fa,0x07fe,
+ 0x003c,0x01fc,0x00fb,0x03fd,0x00fc,0x03fe,0x01fd,0x07ff,
+};
+
+static const uint16_t* const cvh_huffcodes[7] = {
+ cvh_huffcodes0, cvh_huffcodes1, cvh_huffcodes2, cvh_huffcodes3,
+ cvh_huffcodes4, cvh_huffcodes5, cvh_huffcodes6,
+};
+
+static const uint8_t* const cvh_huffbits[7] = {
+ cvh_huffbits0, cvh_huffbits1, cvh_huffbits2, cvh_huffbits3,
+ cvh_huffbits4, cvh_huffbits5, cvh_huffbits6,
+};
+
+
+static const uint16_t ccpl_huffcodes2[3] = {
+ 0x02,0x00,0x03,
+};
+
+static const uint16_t ccpl_huffcodes3[7] = {
+ 0x3e,0x1e,0x02,0x00,0x06,0x0e,0x3f,
+};
+
+static const uint16_t ccpl_huffcodes4[15] = {
+ 0xfc,0xfd,0x7c,0x3c,0x1c,0x0c,0x04,0x00,0x05,0x0d,0x1d,0x3d,
+ 0x7d,0xfe,0xff,
+};
+
+static const uint16_t ccpl_huffcodes5[31] = {
+ 0x03f8,0x03f9,0x03fa,0x03fb,0x01f8,0x01f9,0x00f8,0x00f9,0x0078,0x0079,0x0038,0x0039,
+ 0x0018,0x0019,0x0004,0x0000,0x0005,0x001a,0x001b,0x003a,0x003b,0x007a,0x007b,0x00fa,
+ 0x00fb,0x01fa,0x01fb,0x03fc,0x03fd,0x03fe,0x03ff,
+};
+
+static const uint16_t ccpl_huffcodes6[63] = {
+ 0x0004,0x0005,0x0005,0x0006,0x0006,0x0007,0x0007,0x0007,0x0007,0x0008,0x0008,0x0008,
+ 0x0008,0x0009,0x0009,0x0009,0x0009,0x000a,0x000a,0x000a,0x000a,0x000a,0x000b,0x000b,
+ 0x000b,0x000b,0x000c,0x000d,0x000e,0x000e,0x0010,0x0000,0x000a,0x0018,0x0019,0x0036,
+ 0x0037,0x0074,0x0075,0x0076,0x0077,0x00f4,0x00f5,0x00f6,0x00f7,0x01f5,0x01f6,0x01f7,
+ 0x01f8,0x03f6,0x03f7,0x03f8,0x03f9,0x03fa,0x07fa,0x07fb,0x07fc,0x07fd,0x0ffd,0x1ffd,
+ 0x3ffd,0x3ffe,0xffff,
+};
+
+static const uint8_t ccpl_huffbits2[3] = {
+ 2,1,2,
+};
+
+static const uint8_t ccpl_huffbits3[7] = {
+ 6,5,2,1,3,4,6,
+};
+
+static const uint8_t ccpl_huffbits4[15] = {
+ 8,8,7,6,5,4,3,1,3,4,5,6,7,8,8,
+};
+
+static const uint8_t ccpl_huffbits5[31] = {
+ 10,10,10,10,9,9,8,8,7,7,6,6,
+ 5,5,3,1,3,5,5,6,6,7,7,8,
+ 8,9,9,10,10,10,10,
+};
+
+static const uint8_t ccpl_huffbits6[63] = {
+ 16,15,14,13,12,11,11,11,11,10,10,10,
+ 10,9,9,9,9,9,8,8,8,8,7,7,
+ 7,7,6,6,5,5,3,1,4,5,5,6,
+ 6,7,7,7,7,8,8,8,8,9,9,9,
+ 9,10,10,10,10,10,11,11,11,11,12,13,
+ 14,14,16,
+};
+
+static const uint16_t* const ccpl_huffcodes[5] = {
+ ccpl_huffcodes2,ccpl_huffcodes3,
+ ccpl_huffcodes4,ccpl_huffcodes5,ccpl_huffcodes6
+};
+
+static const uint8_t* const ccpl_huffbits[5] = {
+ ccpl_huffbits2,ccpl_huffbits3,
+ ccpl_huffbits4,ccpl_huffbits5,ccpl_huffbits6
+};
+
+
+//Coupling tables
+
+static const int cplband[51] = {
+ 0,1,2,3,4,5,6,7,8,9,
+ 10,11,11,12,12,13,13,14,14,14,
+ 15,15,15,15,16,16,16,16,16,17,
+ 17,17,17,17,17,18,18,18,18,18,
+ 18,18,19,19,19,19,19,19,19,19,
+ 19,
+};
+
+static const float cplscale2[3] = {
+0.953020632266998,0.70710676908493,0.302905440330505,
+};
+
+static const float cplscale3[7] = {
+0.981279790401459,0.936997592449188,0.875934481620789,0.70710676908493,
+0.482430040836334,0.349335819482803,0.192587479948997,
+};
+
+static const float cplscale4[15] = {
+0.991486728191376,0.973249018192291,0.953020632266998,0.930133521556854,
+0.903453230857849,0.870746195316315,0.826180458068848,0.70710676908493,
+0.563405573368073,0.491732746362686,0.428686618804932,0.367221474647522,
+0.302905440330505,0.229752898216248,0.130207896232605,
+};
+
+static const float cplscale5[31] = {
+0.995926380157471,0.987517595291138,0.978726446628571,0.969505727291107,
+0.95979779958725,0.949531257152557,0.938616216182709,0.926936149597168,
+0.914336204528809,0.900602877140045,0.885426938533783,0.868331849575043,
+0.84851086139679,0.824381768703461,0.791833400726318,0.70710676908493,
+0.610737144947052,0.566034197807312,0.529177963733673,0.495983630418777,
+0.464778542518616,0.434642940759659,0.404955863952637,0.375219136476517,
+0.344963222742081,0.313672333955765,0.280692428350449,0.245068684220314,
+0.205169528722763,0.157508864998817,0.0901700109243393,
+};
+
+static const float cplscale6[63] = {
+0.998005926609039,0.993956744670868,0.989822506904602,0.985598564147949,
+0.981279790401459,0.976860702037811,0.972335040569305,0.967696130275726,
+0.962936460971832,0.958047747612000,0.953020632266998,0.947844684123993,
+0.942508161067963,0.936997592449188,0.931297719478607,0.925390899181366,
+0.919256627559662,0.912870943546295,0.906205296516418,0.899225592613220,
+0.891890347003937,0.884148240089417,0.875934481620789,0.867165684700012,
+0.857730865478516,0.847477376461029,0.836184680461884,0.823513329029083,
+0.808890223503113,0.791194140911102,0.767520070075989,0.707106769084930,
+0.641024887561798,0.611565053462982,0.587959706783295,0.567296981811523,
+0.548448026180267,0.530831515789032,0.514098942279816,0.498019754886627,
+0.482430040836334,0.467206478118896,0.452251672744751,0.437485188245773,
+0.422837972640991,0.408248275518417,0.393658757209778,0.379014074802399,
+0.364258885383606,0.349335819482803,0.334183186292648,0.318732559680939,
+0.302905440330505,0.286608695983887,0.269728302955627,0.252119421958923,
+0.233590632677078,0.213876649737358,0.192587479948997,0.169101938605309,
+0.142307326197624,0.109772264957428,0.0631198287010193,
+};
+
+static const float* const cplscales[5] = {
+ cplscale2, cplscale3, cplscale4, cplscale5, cplscale6,
+};
+
+#endif /* AVCODEC_COOKDATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/corepng.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/corepng.c
new file mode 100644
index 000000000..04a9fd685
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/corepng.c
@@ -0,0 +1,195 @@
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include "avcodec.h"
+#include "common.h"
+#include "dsputil.h"
+
+#define PNGFrameType_RGB24 0x01
+#define PNGFrameType_YUY2 0x02
+#define PNGFrameType_YV12 0x03
+
+extern AVCodec png_decoder;
+
+typedef struct CorePNGCodecPrivate {
+ int16_t wSize;
+ int8_t bType;
+} CorePNGCodecPrivate;
+
+typedef struct CorePNGcontext{
+ AVCodecContext *avctx;
+ DSPContext dsp;
+ AVFrame picture,prev_picture;
+ CorePNGCodecPrivate private;
+ uint8_t *buf;int buf_size;
+ int shiftX,shiftY;
+ AVCodecContext *decctx;
+ AVFrame decframe;
+} CorePNGcontext;
+
+static int read_image(CorePNGcontext * const a, uint8_t *ptr, int linesize,int flip)
+{
+ int got_picture=0,ret;
+ a->decframe.data[0]=ptr;
+ a->decframe.linesize[0]=linesize*(flip?-1:1);
+ ret=avcodec_decode_video(a->decctx,&a->decframe,&got_picture,a->buf,a->buf_size);
+ if(ret>0){
+ a->buf+=ret;
+ a->buf_size-=ret;
+ }
+ return 0;
+}
+
+static int decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ CorePNGcontext * const a = avctx->priv_data;
+ int Bpp,num_planes,swapUV=0;
+ AVFrame temp,*p;
+
+ if (!a->decctx) {
+ a->decctx=avcodec_alloc_context();
+ avcodec_open(a->decctx,&png_decoder);
+ avcodec_get_frame_defaults(&a->decframe);
+ }
+
+ temp= a->picture;
+ a->picture= a->prev_picture;
+ a->prev_picture= temp;
+
+ p= &a->picture;
+ avctx->coded_frame= p;
+
+ avctx->flags |= CODEC_FLAG_EMU_EDGE; // alternatively we would have to use our own buffer management
+
+ if(p->data[0])
+ avctx->release_buffer(avctx, p);
+
+ p->reference= 1;
+ if(avctx->get_buffer(avctx, p) < 0){
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+
+ /* special case for last picture */
+ if (buf_size == 0) {
+ return 0;
+ }
+
+ a->buf=buf;a->buf_size=buf_size;
+
+ switch(a->private.bType){
+ case PNGFrameType_RGB24:
+ read_image(a,a->picture.data[0],a->picture.linesize[0],0);
+ Bpp=3;num_planes=1;
+ break;
+ case PNGFrameType_YUY2:
+ swapUV=1;
+ case PNGFrameType_YV12:
+ read_image(a,a->picture.data[0],a->picture.linesize[0],1);
+ read_image(a,a->picture.data[swapUV?1:2],a->picture.linesize[swapUV?1:2],1);
+ read_image(a,a->picture.data[swapUV?2:1],a->picture.linesize[swapUV?2:1],1);
+ Bpp=1;num_planes=3;
+ break;
+ default:
+ return 0;
+ }
+
+ if(avctx->sample_fmt==SAMPLE_I){ //indicates that this is a keyframe, CorePNG doesn't store this info in the stream ifself
+ a->picture.key_frame=1;
+ a->picture.pict_type=FF_I_TYPE;
+ }else{
+ int i,shiftX=0,shiftY=0;
+ a->picture.key_frame=0;
+ a->picture.pict_type=FF_P_TYPE;
+ for(i=0;i<num_planes;i++,shiftX=a->shiftX,shiftY=a->shiftY){
+ uint8_t *cur=a->picture.data[i],*prev=a->prev_picture.data[i];
+ int y;
+ for(y=0;y<avctx->height>>shiftY;y++,cur+=a->picture.linesize[i],prev+=a->prev_picture.linesize[i])
+ a->dsp.add_bytes(cur,prev,avctx->width*Bpp>>shiftX);
+ }
+ }
+
+ *data_size = sizeof(AVFrame);
+ *(AVFrame*)data = a->picture;
+
+ return buf_size;
+}
+
+static int decode_init(AVCodecContext *avctx){
+
+ CorePNGcontext * const a = avctx->priv_data;
+
+ dsputil_init(&a->dsp, avctx);
+
+ a->avctx= avctx;
+ a->picture.data[0]=a->prev_picture.data[0]=NULL;
+
+ if(avctx->extradata_size == sizeof(CorePNGCodecPrivate))
+ memcpy(&a->private,avctx->extradata,sizeof(CorePNGCodecPrivate));
+ else{
+ a->private.wSize = sizeof(CorePNGCodecPrivate);
+ a->private.bType = PNGFrameType_RGB24;
+ }
+
+ switch(a->private.bType){
+ case PNGFrameType_RGB24:
+ avctx->pix_fmt = PIX_FMT_BGR24;
+ break;
+ case PNGFrameType_YUY2:
+ avctx->pix_fmt = PIX_FMT_YUV422P;
+ break;
+ case PNGFrameType_YV12:
+ avctx->pix_fmt = PIX_FMT_YUV420P;
+ break;
+ }
+ avctx->has_b_frames=0;
+
+ avcodec_get_chroma_sub_sample(avctx->pix_fmt, &a->shiftX, &a->shiftY);
+
+ a->decctx=NULL;
+
+ return 0;
+}
+
+static int decode_done(AVCodecContext *avctx)
+{
+ CorePNGcontext * const a = avctx->priv_data;
+ if(a->decctx){
+ avcodec_close(a->decctx);
+ av_free(a->decctx);
+ }
+ return 0;
+}
+
+AVCodec corepng_decoder = {
+ "corepng",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_COREPNG,
+ sizeof(CorePNGcontext),
+ decode_init,
+ NULL,
+ decode_done,
+ decode_frame,
+ /*.capabilities = */0,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */"CorePNG",
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cscd.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cscd.c
new file mode 100644
index 000000000..55df95873
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cscd.c
@@ -0,0 +1,264 @@
+/*
+ * CamStudio decoder
+ * Copyright (c) 2006 Reimar Doeffinger
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "avcodec.h"
+
+#if CONFIG_ZLIB
+#include <zlib.h>
+#endif
+#include "libavutil/lzo.h"
+
+typedef struct {
+ AVFrame pic;
+ int linelen, height, bpp;
+ unsigned int decomp_size;
+ unsigned char* decomp_buf;
+} CamStudioContext;
+
+static void copy_frame_default(AVFrame *f, const uint8_t *src,
+ int linelen, int height) {
+ int i;
+ uint8_t *dst = f->data[0];
+ dst += (height - 1) * f->linesize[0];
+ for (i = height; i; i--) {
+ memcpy(dst, src, linelen);
+ src += linelen;
+ dst -= f->linesize[0];
+ }
+}
+
+static void add_frame_default(AVFrame *f, const uint8_t *src,
+ int linelen, int height) {
+ int i, j;
+ uint8_t *dst = f->data[0];
+ dst += (height - 1) * f->linesize[0];
+ for (i = height; i; i--) {
+ for (j = linelen; j; j--)
+ *dst++ += *src++;
+ dst -= f->linesize[0] + linelen;
+ }
+}
+
+#if !HAVE_BIGENDIAN
+#define copy_frame_16 copy_frame_default
+#define copy_frame_32 copy_frame_default
+#define add_frame_16 add_frame_default
+#define add_frame_32 add_frame_default
+#else
+static void copy_frame_16(AVFrame *f, const uint8_t *src,
+ int linelen, int height) {
+ int i, j;
+ uint8_t *dst = f->data[0];
+ dst += (height - 1) * f->linesize[0];
+ for (i = height; i; i--) {
+ for (j = linelen / 2; j; j--) {
+ dst[0] = src[1];
+ dst[1] = src[0];
+ src += 2;
+ dst += 2;
+ }
+ dst -= f->linesize[0] + linelen;
+ }
+}
+
+static void copy_frame_32(AVFrame *f, const uint8_t *src,
+ int linelen, int height) {
+ int i, j;
+ uint8_t *dst = f->data[0];
+ dst += (height - 1) * f->linesize[0];
+ for (i = height; i; i--) {
+ for (j = linelen / 4; j; j--) {
+ dst[0] = src[3];
+ dst[1] = src[2];
+ dst[2] = src[1];
+ dst[3] = src[0];
+ src += 4;
+ dst += 4;
+ }
+ dst -= f->linesize[0] + linelen;
+ }
+}
+
+static void add_frame_16(AVFrame *f, const uint8_t *src,
+ int linelen, int height) {
+ int i, j;
+ uint8_t *dst = f->data[0];
+ dst += (height - 1) * f->linesize[0];
+ for (i = height; i; i--) {
+ for (j = linelen / 2; j; j--) {
+ dst[0] += src[1];
+ dst[1] += src[0];
+ src += 2;
+ dst += 2;
+ }
+ dst -= f->linesize[0] + linelen;
+ }
+}
+
+static void add_frame_32(AVFrame *f, const uint8_t *src,
+ int linelen, int height) {
+ int i, j;
+ uint8_t *dst = f->data[0];
+ dst += (height - 1) * f->linesize[0];
+ for (i = height; i; i--) {
+ for (j = linelen / 4; j; j--) {
+ dst[0] += src[3];
+ dst[1] += src[2];
+ dst[2] += src[1];
+ dst[3] += src[0];
+ src += 4;
+ dst += 4;
+ }
+ dst -= f->linesize[0] + linelen;
+ }
+}
+#endif
+
+static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
+ const uint8_t *buf, int buf_size) {
+ CamStudioContext *c = avctx->priv_data;
+ AVFrame *picture = data;
+
+ if (buf_size < 2) {
+ av_log(avctx, AV_LOG_ERROR, "coded frame too small\n");
+ return -1;
+ }
+
+ if (c->pic.data[0])
+ avctx->release_buffer(avctx, &c->pic);
+ c->pic.reference = 1;
+ c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE |
+ FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
+ if (avctx->get_buffer(avctx, &c->pic) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+
+ // decompress data
+ switch ((buf[0] >> 1) & 7) {
+ case 0: { // lzo compression
+ int outlen = c->decomp_size, inlen = buf_size - 2;
+ if (av_lzo1x_decode(c->decomp_buf, &outlen, &buf[2], &inlen))
+ av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n");
+ break;
+ }
+ case 1: { // zlib compression
+#if CONFIG_ZLIB
+ unsigned long dlen = c->decomp_size;
+ if (uncompress(c->decomp_buf, &dlen, &buf[2], buf_size - 2) != Z_OK)
+ av_log(avctx, AV_LOG_ERROR, "error during zlib decompression\n");
+ break;
+#else
+ av_log(avctx, AV_LOG_ERROR, "compiled without zlib support\n");
+ return -1;
+#endif
+ }
+ default:
+ av_log(avctx, AV_LOG_ERROR, "unknown compression\n");
+ return -1;
+ }
+
+ // flip upside down, add difference frame
+ if (buf[0] & 1) { // keyframe
+ c->pic.pict_type = FF_I_TYPE;
+ c->pic.key_frame = 1;
+ switch (c->bpp) {
+ case 16:
+ copy_frame_16(&c->pic, c->decomp_buf, c->linelen, c->height);
+ break;
+ case 32:
+ copy_frame_32(&c->pic, c->decomp_buf, c->linelen, c->height);
+ break;
+ default:
+ copy_frame_default(&c->pic, c->decomp_buf, c->linelen, c->height);
+ }
+ } else {
+ c->pic.pict_type = FF_P_TYPE;
+ c->pic.key_frame = 0;
+ switch (c->bpp) {
+ case 16:
+ add_frame_16(&c->pic, c->decomp_buf, c->linelen, c->height);
+ break;
+ case 32:
+ add_frame_32(&c->pic, c->decomp_buf, c->linelen, c->height);
+ break;
+ default:
+ add_frame_default(&c->pic, c->decomp_buf, c->linelen, c->height);
+ }
+ }
+
+ *picture = c->pic;
+ *data_size = sizeof(AVFrame);
+ return buf_size;
+}
+
+static av_cold int decode_init(AVCodecContext *avctx) {
+ CamStudioContext *c = avctx->priv_data;
+ switch (avctx->bits_per_coded_sample) {
+ case 16: avctx->pix_fmt = PIX_FMT_RGB555; break;
+ case 24: avctx->pix_fmt = PIX_FMT_BGR24; break;
+ case 32: avctx->pix_fmt = PIX_FMT_RGB32; break;
+ default:
+ av_log(avctx, AV_LOG_ERROR,
+ "CamStudio codec error: invalid depth %i bpp\n",
+ avctx->bits_per_coded_sample);
+ return 1;
+ }
+ c->bpp = avctx->bits_per_coded_sample;
+ c->pic.data[0] = NULL;
+ c->linelen = avctx->width * avctx->bits_per_coded_sample / 8;
+ c->height = avctx->height;
+ c->decomp_size = c->height * c->linelen;
+ c->decomp_buf = av_malloc(c->decomp_size + AV_LZO_OUTPUT_PADDING);
+ if (!c->decomp_buf) {
+ av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
+ return 1;
+ }
+ return 0;
+}
+
+static av_cold int decode_end(AVCodecContext *avctx) {
+ CamStudioContext *c = avctx->priv_data;
+ av_freep(&c->decomp_buf);
+ if (c->pic.data[0])
+ avctx->release_buffer(avctx, &c->pic);
+ return 0;
+}
+
+AVCodec cscd_decoder = {
+ "camstudio",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_CSCD,
+ sizeof(CamStudioContext),
+ decode_init,
+ NULL,
+ decode_end,
+ decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("CamStudio"),
+};
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cyuv.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cyuv.c
new file mode 100644
index 000000000..7f67d922b
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/cyuv.c
@@ -0,0 +1,191 @@
+/*
+ * Creative YUV (CYUV) Video Decoder
+ * by Mike Melanson (melanson@pcisys.net)
+ * based on "Creative YUV (CYUV) stream format for AVI":
+ * http://www.csse.monash.edu.au/~timf/videocodec/cyuv.txt
+ *
+ * Copyright (C) 2003 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/cyuv.c
+ * Creative YUV (CYUV) Video Decoder.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "avcodec.h"
+#include "dsputil.h"
+
+
+typedef struct CyuvDecodeContext {
+ AVCodecContext *avctx;
+ int width, height;
+ AVFrame frame;
+} CyuvDecodeContext;
+
+static av_cold int cyuv_decode_init(AVCodecContext *avctx)
+{
+ CyuvDecodeContext *s = avctx->priv_data;
+
+ s->avctx = avctx;
+ s->width = avctx->width;
+ /* width needs to be divisible by 4 for this codec to work */
+ if (s->width & 0x3)
+ return -1;
+ s->height = avctx->height;
+ avctx->pix_fmt = PIX_FMT_YUV411P;
+
+ return 0;
+}
+
+static int cyuv_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ CyuvDecodeContext *s=avctx->priv_data;
+
+ unsigned char *y_plane;
+ unsigned char *u_plane;
+ unsigned char *v_plane;
+ int y_ptr;
+ int u_ptr;
+ int v_ptr;
+
+ /* prediction error tables (make it clear that they are signed values) */
+ const signed char *y_table = (const signed char*)buf + 0;
+ const signed char *u_table = (const signed char*)buf + 16;
+ const signed char *v_table = (const signed char*)buf + 32;
+
+ unsigned char y_pred, u_pred, v_pred;
+ int stream_ptr;
+ unsigned char cur_byte;
+ int pixel_groups;
+
+ /* sanity check the buffer size: A buffer has 3x16-bytes tables
+ * followed by (height) lines each with 3 bytes to represent groups
+ * of 4 pixels. Thus, the total size of the buffer ought to be:
+ * (3 * 16) + height * (width * 3 / 4) */
+ if (buf_size != 48 + s->height * (s->width * 3 / 4)) {
+ av_log(avctx, AV_LOG_ERROR, "got a buffer with %d bytes when %d were expected\n",
+ buf_size, 48 + s->height * (s->width * 3 / 4));
+ return -1;
+ }
+
+ /* pixel data starts 48 bytes in, after 3x16-byte tables */
+ stream_ptr = 48;
+
+ if (s->frame.data[0])
+ avctx->release_buffer(avctx, &s->frame);
+
+ s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
+ s->frame.reference = 0;
+ if (avctx->get_buffer(avctx, &s->frame) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+
+ y_plane = s->frame.data[0];
+ u_plane = s->frame.data[1];
+ v_plane = s->frame.data[2];
+
+ /* iterate through each line in the height */
+ for (y_ptr = 0, u_ptr = 0, v_ptr = 0;
+ y_ptr < (s->height * s->frame.linesize[0]);
+ y_ptr += s->frame.linesize[0] - s->width,
+ u_ptr += s->frame.linesize[1] - s->width / 4,
+ v_ptr += s->frame.linesize[2] - s->width / 4) {
+
+ /* reset predictors */
+ cur_byte = buf[stream_ptr++];
+ u_plane[u_ptr++] = u_pred = cur_byte & 0xF0;
+ y_plane[y_ptr++] = y_pred = (cur_byte & 0x0F) << 4;
+
+ cur_byte = buf[stream_ptr++];
+ v_plane[v_ptr++] = v_pred = cur_byte & 0xF0;
+ y_pred += y_table[cur_byte & 0x0F];
+ y_plane[y_ptr++] = y_pred;
+
+ cur_byte = buf[stream_ptr++];
+ y_pred += y_table[cur_byte & 0x0F];
+ y_plane[y_ptr++] = y_pred;
+ y_pred += y_table[(cur_byte & 0xF0) >> 4];
+ y_plane[y_ptr++] = y_pred;
+
+ /* iterate through the remaining pixel groups (4 pixels/group) */
+ pixel_groups = s->width / 4 - 1;
+ while (pixel_groups--) {
+
+ cur_byte = buf[stream_ptr++];
+ u_pred += u_table[(cur_byte & 0xF0) >> 4];
+ u_plane[u_ptr++] = u_pred;
+ y_pred += y_table[cur_byte & 0x0F];
+ y_plane[y_ptr++] = y_pred;
+
+ cur_byte = buf[stream_ptr++];
+ v_pred += v_table[(cur_byte & 0xF0) >> 4];
+ v_plane[v_ptr++] = v_pred;
+ y_pred += y_table[cur_byte & 0x0F];
+ y_plane[y_ptr++] = y_pred;
+
+ cur_byte = buf[stream_ptr++];
+ y_pred += y_table[cur_byte & 0x0F];
+ y_plane[y_ptr++] = y_pred;
+ y_pred += y_table[(cur_byte & 0xF0) >> 4];
+ y_plane[y_ptr++] = y_pred;
+
+ }
+ }
+
+ *data_size=sizeof(AVFrame);
+ *(AVFrame*)data= s->frame;
+
+ return buf_size;
+}
+
+static av_cold int cyuv_decode_end(AVCodecContext *avctx)
+{
+ CyuvDecodeContext *s = avctx->priv_data;
+
+ if (s->frame.data[0])
+ avctx->release_buffer(avctx, &s->frame);
+
+ return 0;
+}
+
+#if CONFIG_CYUV_DECODER
+AVCodec cyuv_decoder = {
+ "cyuv",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_CYUV,
+ sizeof(CyuvDecodeContext),
+ cyuv_decode_init,
+ NULL,
+ cyuv_decode_end,
+ cyuv_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("Creative YUV (CYUV)"),
+};
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/dsputil.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/dsputil.c
new file mode 100644
index 000000000..6ee66cd74
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/dsputil.c
@@ -0,0 +1,4775 @@
+/*
+ * DSP utils
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/dsputil.c
+ * DSP utils
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "simple_idct.h"
+#include "faandct.h"
+#include "faanidct.h"
+#include "mathops.h"
+#include "mpegvideo.h"
+#include "config.h"
+
+/* vorbis.c */
+void vorbis_inverse_coupling(float *mag, float *ang, int blocksize);
+
+/* ac3dec.c */
+void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len);
+
+/* pngdec.c */
+void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp);
+
+uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP] = {0, };
+uint32_t ff_squareTbl[512] = {0, };
+
+// 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
+#define pb_7f (~0UL/255 * 0x7f)
+#define pb_80 (~0UL/255 * 0x80)
+
+const uint8_t ff_zigzag_direct[64] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+/* Specific zigzag scan for 248 idct. NOTE that unlike the
+ specification, we interleave the fields */
+const uint8_t ff_zigzag248_direct[64] = {
+ 0, 8, 1, 9, 16, 24, 2, 10,
+ 17, 25, 32, 40, 48, 56, 33, 41,
+ 18, 26, 3, 11, 4, 12, 19, 27,
+ 34, 42, 49, 57, 50, 58, 35, 43,
+ 20, 28, 5, 13, 6, 14, 21, 29,
+ 36, 44, 51, 59, 52, 60, 37, 45,
+ 22, 30, 7, 15, 23, 31, 38, 46,
+ 53, 61, 54, 62, 39, 47, 55, 63,
+};
+
+/* not permutated inverse zigzag_direct + 1 for MMX quantizer */
+DECLARE_ALIGNED_16(uint16_t, inv_zigzag_direct16)[64];
+
+const uint8_t ff_alternate_horizontal_scan[64] = {
+ 0, 1, 2, 3, 8, 9, 16, 17,
+ 10, 11, 4, 5, 6, 7, 15, 14,
+ 13, 12, 19, 18, 24, 25, 32, 33,
+ 26, 27, 20, 21, 22, 23, 28, 29,
+ 30, 31, 34, 35, 40, 41, 48, 49,
+ 42, 43, 36, 37, 38, 39, 44, 45,
+ 46, 47, 50, 51, 56, 57, 58, 59,
+ 52, 53, 54, 55, 60, 61, 62, 63,
+};
+
+const uint8_t ff_alternate_vertical_scan[64] = {
+ 0, 8, 16, 24, 1, 9, 2, 10,
+ 17, 25, 32, 40, 48, 56, 57, 49,
+ 41, 33, 26, 18, 3, 11, 4, 12,
+ 19, 27, 34, 42, 50, 58, 35, 43,
+ 51, 59, 20, 28, 5, 13, 6, 14,
+ 21, 29, 36, 44, 52, 60, 37, 45,
+ 53, 61, 22, 30, 7, 15, 23, 31,
+ 38, 46, 54, 62, 39, 47, 55, 63,
+};
+
+/* a*inverse[b]>>32 == a/b for all 0<=a<=16909558 && 2<=b<=256
+ * for a>16909558, is an overestimate by less than 1 part in 1<<24 */
+const uint32_t ff_inverse[257]={
+ 0, 4294967295U,2147483648U,1431655766, 1073741824, 858993460, 715827883, 613566757,
+ 536870912, 477218589, 429496730, 390451573, 357913942, 330382100, 306783379, 286331154,
+ 268435456, 252645136, 238609295, 226050911, 214748365, 204522253, 195225787, 186737709,
+ 178956971, 171798692, 165191050, 159072863, 153391690, 148102321, 143165577, 138547333,
+ 134217728, 130150525, 126322568, 122713352, 119304648, 116080198, 113025456, 110127367,
+ 107374183, 104755300, 102261127, 99882961, 97612894, 95443718, 93368855, 91382283,
+ 89478486, 87652394, 85899346, 84215046, 82595525, 81037119, 79536432, 78090315,
+ 76695845, 75350304, 74051161, 72796056, 71582789, 70409300, 69273667, 68174085,
+ 67108864, 66076420, 65075263, 64103990, 63161284, 62245903, 61356676, 60492498,
+ 59652324, 58835169, 58040099, 57266231, 56512728, 55778797, 55063684, 54366675,
+ 53687092, 53024288, 52377650, 51746594, 51130564, 50529028, 49941481, 49367441,
+ 48806447, 48258060, 47721859, 47197443, 46684428, 46182445, 45691142, 45210183,
+ 44739243, 44278014, 43826197, 43383509, 42949673, 42524429, 42107523, 41698712,
+ 41297763, 40904451, 40518560, 40139882, 39768216, 39403370, 39045158, 38693400,
+ 38347923, 38008561, 37675152, 37347542, 37025581, 36709123, 36398028, 36092163,
+ 35791395, 35495598, 35204650, 34918434, 34636834, 34359739, 34087043, 33818641,
+ 33554432, 33294321, 33038210, 32786010, 32537632, 32292988, 32051995, 31814573,
+ 31580642, 31350127, 31122952, 30899046, 30678338, 30460761, 30246249, 30034737,
+ 29826162, 29620465, 29417585, 29217465, 29020050, 28825284, 28633116, 28443493,
+ 28256364, 28071682, 27889399, 27709467, 27531842, 27356480, 27183338, 27012373,
+ 26843546, 26676816, 26512144, 26349493, 26188825, 26030105, 25873297, 25718368,
+ 25565282, 25414008, 25264514, 25116768, 24970741, 24826401, 24683721, 24542671,
+ 24403224, 24265352, 24129030, 23994231, 23860930, 23729102, 23598722, 23469767,
+ 23342214, 23216040, 23091223, 22967740, 22845571, 22724695, 22605092, 22486740,
+ 22369622, 22253717, 22139007, 22025474, 21913099, 21801865, 21691755, 21582751,
+ 21474837, 21367997, 21262215, 21157475, 21053762, 20951060, 20849356, 20748635,
+ 20648882, 20550083, 20452226, 20355296, 20259280, 20164166, 20069941, 19976593,
+ 19884108, 19792477, 19701685, 19611723, 19522579, 19434242, 19346700, 19259944,
+ 19173962, 19088744, 19004281, 18920561, 18837576, 18755316, 18673771, 18592933,
+ 18512791, 18433337, 18354562, 18276457, 18199014, 18122225, 18046082, 17970575,
+ 17895698, 17821442, 17747799, 17674763, 17602325, 17530479, 17459217, 17388532,
+ 17318417, 17248865, 17179870, 17111424, 17043522, 16976156, 16909321, 16843010,
+ 16777216
+};
+
+/* Input permutation for the simple_idct_mmx */
+static const uint8_t simple_mmx_permutation[64]={
+ 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
+ 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
+ 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
+ 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
+ 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
+ 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
+ 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
+ 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
+};
+
+static const uint8_t idct_sse2_row_perm[8] = {0, 4, 1, 5, 2, 6, 3, 7};
+
+void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
+ int i;
+ int end;
+
+ st->scantable= src_scantable;
+
+ for(i=0; i<64; i++){
+ int j;
+ j = src_scantable[i];
+ st->permutated[i] = permutation[j];
+ }
+
+ end=-1;
+ for(i=0; i<64; i++){
+ int j;
+ j = st->permutated[i];
+ if(j>end) end=j;
+ st->raster_end[i]= end;
+ }
+}
+
+static int pix_sum_c(uint8_t * pix, int line_size)
+{
+ int s, i, j;
+
+ s = 0;
+ for (i = 0; i < 16; i++) {
+ for (j = 0; j < 16; j += 8) {
+ s += pix[0];
+ s += pix[1];
+ s += pix[2];
+ s += pix[3];
+ s += pix[4];
+ s += pix[5];
+ s += pix[6];
+ s += pix[7];
+ pix += 8;
+ }
+ pix += line_size - 16;
+ }
+ return s;
+}
+
+static int pix_norm1_c(uint8_t * pix, int line_size)
+{
+ int s, i, j;
+ uint32_t *sq = ff_squareTbl + 256;
+
+ s = 0;
+ for (i = 0; i < 16; i++) {
+ for (j = 0; j < 16; j += 8) {
+#if 0
+ s += sq[pix[0]];
+ s += sq[pix[1]];
+ s += sq[pix[2]];
+ s += sq[pix[3]];
+ s += sq[pix[4]];
+ s += sq[pix[5]];
+ s += sq[pix[6]];
+ s += sq[pix[7]];
+#else
+#if LONG_MAX > 2147483647
+ register uint64_t x=*(uint64_t*)pix;
+ s += sq[x&0xff];
+ s += sq[(x>>8)&0xff];
+ s += sq[(x>>16)&0xff];
+ s += sq[(x>>24)&0xff];
+ s += sq[(x>>32)&0xff];
+ s += sq[(x>>40)&0xff];
+ s += sq[(x>>48)&0xff];
+ s += sq[(x>>56)&0xff];
+#else
+ register uint32_t x=*(uint32_t*)pix;
+ s += sq[x&0xff];
+ s += sq[(x>>8)&0xff];
+ s += sq[(x>>16)&0xff];
+ s += sq[(x>>24)&0xff];
+ x=*(uint32_t*)(pix+4);
+ s += sq[x&0xff];
+ s += sq[(x>>8)&0xff];
+ s += sq[(x>>16)&0xff];
+ s += sq[(x>>24)&0xff];
+#endif
+#endif
+ pix += 8;
+ }
+ pix += line_size - 16;
+ }
+ return s;
+}
+
+static void bswap_buf(uint32_t *dst, const uint32_t *src, int w){
+ int i;
+
+ for(i=0; i+8<=w; i+=8){
+ dst[i+0]= bswap_32(src[i+0]);
+ dst[i+1]= bswap_32(src[i+1]);
+ dst[i+2]= bswap_32(src[i+2]);
+ dst[i+3]= bswap_32(src[i+3]);
+ dst[i+4]= bswap_32(src[i+4]);
+ dst[i+5]= bswap_32(src[i+5]);
+ dst[i+6]= bswap_32(src[i+6]);
+ dst[i+7]= bswap_32(src[i+7]);
+ }
+ for(;i<w; i++){
+ dst[i+0]= bswap_32(src[i+0]);
+ }
+}
+
+static int sse4_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
+{
+ int s, i;
+ uint32_t *sq = ff_squareTbl + 256;
+
+ s = 0;
+ for (i = 0; i < h; i++) {
+ s += sq[pix1[0] - pix2[0]];
+ s += sq[pix1[1] - pix2[1]];
+ s += sq[pix1[2] - pix2[2]];
+ s += sq[pix1[3] - pix2[3]];
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+ return s;
+}
+
+static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
+{
+ int s, i;
+ uint32_t *sq = ff_squareTbl + 256;
+
+ s = 0;
+ for (i = 0; i < h; i++) {
+ s += sq[pix1[0] - pix2[0]];
+ s += sq[pix1[1] - pix2[1]];
+ s += sq[pix1[2] - pix2[2]];
+ s += sq[pix1[3] - pix2[3]];
+ s += sq[pix1[4] - pix2[4]];
+ s += sq[pix1[5] - pix2[5]];
+ s += sq[pix1[6] - pix2[6]];
+ s += sq[pix1[7] - pix2[7]];
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+ return s;
+}
+
+static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
+{
+ int s, i;
+ uint32_t *sq = ff_squareTbl + 256;
+
+ s = 0;
+ for (i = 0; i < h; i++) {
+ s += sq[pix1[ 0] - pix2[ 0]];
+ s += sq[pix1[ 1] - pix2[ 1]];
+ s += sq[pix1[ 2] - pix2[ 2]];
+ s += sq[pix1[ 3] - pix2[ 3]];
+ s += sq[pix1[ 4] - pix2[ 4]];
+ s += sq[pix1[ 5] - pix2[ 5]];
+ s += sq[pix1[ 6] - pix2[ 6]];
+ s += sq[pix1[ 7] - pix2[ 7]];
+ s += sq[pix1[ 8] - pix2[ 8]];
+ s += sq[pix1[ 9] - pix2[ 9]];
+ s += sq[pix1[10] - pix2[10]];
+ s += sq[pix1[11] - pix2[11]];
+ s += sq[pix1[12] - pix2[12]];
+ s += sq[pix1[13] - pix2[13]];
+ s += sq[pix1[14] - pix2[14]];
+ s += sq[pix1[15] - pix2[15]];
+
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+ return s;
+}
+
+
+/* draw the edges of width 'w' of an image of size width, height */
+//FIXME check that this is ok for mpeg4 interlaced
+static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
+{
+ uint8_t *ptr, *last_line;
+ int i;
+
+ last_line = buf + (height - 1) * wrap;
+ for(i=0;i<w;i++) {
+ /* top and bottom */
+ memcpy(buf - (i + 1) * wrap, buf, width);
+ memcpy(last_line + (i + 1) * wrap, last_line, width);
+ }
+ /* left and right */
+ ptr = buf;
+ for(i=0;i<height;i++) {
+ memset(ptr - w, ptr[0], w);
+ memset(ptr + width, ptr[width-1], w);
+ ptr += wrap;
+ }
+ /* corners */
+ for(i=0;i<w;i++) {
+ memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
+ memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
+ memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
+ memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
+ }
+}
+
+/**
+ * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
+ * @param buf destination buffer
+ * @param src source buffer
+ * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
+ * @param block_w width of block
+ * @param block_h height of block
+ * @param src_x x coordinate of the top left sample of the block in the source buffer
+ * @param src_y y coordinate of the top left sample of the block in the source buffer
+ * @param w width of the source buffer
+ * @param h height of the source buffer
+ */
+void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
+ int src_x, int src_y, int w, int h){
+ int x, y;
+ int start_y, start_x, end_y, end_x;
+
+ if(src_y>= h){
+ src+= (h-1-src_y)*linesize;
+ src_y=h-1;
+ }else if(src_y<=-block_h){
+ src+= (1-block_h-src_y)*linesize;
+ src_y=1-block_h;
+ }
+ if(src_x>= w){
+ src+= (w-1-src_x);
+ src_x=w-1;
+ }else if(src_x<=-block_w){
+ src+= (1-block_w-src_x);
+ src_x=1-block_w;
+ }
+
+ start_y= FFMAX(0, -src_y);
+ start_x= FFMAX(0, -src_x);
+ end_y= FFMIN(block_h, h-src_y);
+ end_x= FFMIN(block_w, w-src_x);
+
+ // copy existing part
+ for(y=start_y; y<end_y; y++){
+ for(x=start_x; x<end_x; x++){
+ buf[x + y*linesize]= src[x + y*linesize];
+ }
+ }
+
+ //top
+ for(y=0; y<start_y; y++){
+ for(x=start_x; x<end_x; x++){
+ buf[x + y*linesize]= buf[x + start_y*linesize];
+ }
+ }
+
+ //bottom
+ for(y=end_y; y<block_h; y++){
+ for(x=start_x; x<end_x; x++){
+ buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
+ }
+ }
+
+ for(y=0; y<block_h; y++){
+ //left
+ for(x=0; x<start_x; x++){
+ buf[x + y*linesize]= buf[start_x + y*linesize];
+ }
+
+ //right
+ for(x=end_x; x<block_w; x++){
+ buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
+ }
+ }
+}
+
+static void get_pixels_c(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
+{
+ int i;
+
+ /* read the pixels */
+ for(i=0;i<8;i++) {
+ block[0] = pixels[0];
+ block[1] = pixels[1];
+ block[2] = pixels[2];
+ block[3] = pixels[3];
+ block[4] = pixels[4];
+ block[5] = pixels[5];
+ block[6] = pixels[6];
+ block[7] = pixels[7];
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void diff_pixels_c(DCTELEM *restrict block, const uint8_t *s1,
+ const uint8_t *s2, int stride){
+ int i;
+
+ /* read the pixels */
+ for(i=0;i<8;i++) {
+ block[0] = s1[0] - s2[0];
+ block[1] = s1[1] - s2[1];
+ block[2] = s1[2] - s2[2];
+ block[3] = s1[3] - s2[3];
+ block[4] = s1[4] - s2[4];
+ block[5] = s1[5] - s2[5];
+ block[6] = s1[6] - s2[6];
+ block[7] = s1[7] - s2[7];
+ s1 += stride;
+ s2 += stride;
+ block += 8;
+ }
+}
+
+
+static void put_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
+ int line_size)
+{
+ int i;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ /* read the pixels */
+ for(i=0;i<8;i++) {
+ pixels[0] = cm[block[0]];
+ pixels[1] = cm[block[1]];
+ pixels[2] = cm[block[2]];
+ pixels[3] = cm[block[3]];
+ pixels[4] = cm[block[4]];
+ pixels[5] = cm[block[5]];
+ pixels[6] = cm[block[6]];
+ pixels[7] = cm[block[7]];
+
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void put_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels,
+ int line_size)
+{
+ int i;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ /* read the pixels */
+ for(i=0;i<4;i++) {
+ pixels[0] = cm[block[0]];
+ pixels[1] = cm[block[1]];
+ pixels[2] = cm[block[2]];
+ pixels[3] = cm[block[3]];
+
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void put_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels,
+ int line_size)
+{
+ int i;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ /* read the pixels */
+ for(i=0;i<2;i++) {
+ pixels[0] = cm[block[0]];
+ pixels[1] = cm[block[1]];
+
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void put_signed_pixels_clamped_c(const DCTELEM *block,
+ uint8_t *restrict pixels,
+ int line_size)
+{
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 8; j++) {
+ if (*block < -128)
+ *pixels = 0;
+ else if (*block > 127)
+ *pixels = 255;
+ else
+ *pixels = (uint8_t)(*block + 128);
+ block++;
+ pixels++;
+ }
+ pixels += (line_size - 8);
+ }
+}
+
+static void add_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
+ int line_size)
+{
+ int i;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ /* read the pixels */
+ for(i=0;i<8;i++) {
+ pixels[0] = cm[pixels[0] + block[0]];
+ pixels[1] = cm[pixels[1] + block[1]];
+ pixels[2] = cm[pixels[2] + block[2]];
+ pixels[3] = cm[pixels[3] + block[3]];
+ pixels[4] = cm[pixels[4] + block[4]];
+ pixels[5] = cm[pixels[5] + block[5]];
+ pixels[6] = cm[pixels[6] + block[6]];
+ pixels[7] = cm[pixels[7] + block[7]];
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void add_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels,
+ int line_size)
+{
+ int i;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ /* read the pixels */
+ for(i=0;i<4;i++) {
+ pixels[0] = cm[pixels[0] + block[0]];
+ pixels[1] = cm[pixels[1] + block[1]];
+ pixels[2] = cm[pixels[2] + block[2]];
+ pixels[3] = cm[pixels[3] + block[3]];
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void add_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels,
+ int line_size)
+{
+ int i;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ /* read the pixels */
+ for(i=0;i<2;i++) {
+ pixels[0] = cm[pixels[0] + block[0]];
+ pixels[1] = cm[pixels[1] + block[1]];
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void add_pixels8_c(uint8_t *restrict pixels, DCTELEM *block, int line_size)
+{
+ int i;
+ for(i=0;i<8;i++) {
+ pixels[0] += block[0];
+ pixels[1] += block[1];
+ pixels[2] += block[2];
+ pixels[3] += block[3];
+ pixels[4] += block[4];
+ pixels[5] += block[5];
+ pixels[6] += block[6];
+ pixels[7] += block[7];
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void add_pixels4_c(uint8_t *restrict pixels, DCTELEM *block, int line_size)
+{
+ int i;
+ for(i=0;i<4;i++) {
+ pixels[0] += block[0];
+ pixels[1] += block[1];
+ pixels[2] += block[2];
+ pixels[3] += block[3];
+ pixels += line_size;
+ block += 4;
+ }
+}
+
+static int sum_abs_dctelem_c(DCTELEM *block)
+{
+ int sum=0, i;
+ for(i=0; i<64; i++)
+ sum+= FFABS(block[i]);
+ return sum;
+}
+
+#if 0
+
+#define PIXOP2(OPNAME, OP) \
+static void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+{\
+ int i;\
+ for(i=0; i<h; i++){\
+ OP(*((uint64_t*)block), AV_RN64(pixels));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+}\
+\
+static void OPNAME ## _no_rnd_pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+{\
+ int i;\
+ for(i=0; i<h; i++){\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+1);\
+ OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+}\
+\
+static void OPNAME ## _pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+{\
+ int i;\
+ for(i=0; i<h; i++){\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+1);\
+ OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+}\
+\
+static void OPNAME ## _no_rnd_pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+{\
+ int i;\
+ for(i=0; i<h; i++){\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+line_size);\
+ OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+}\
+\
+static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+{\
+ int i;\
+ for(i=0; i<h; i++){\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+line_size);\
+ OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+}\
+\
+static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+{\
+ int i;\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+1);\
+ uint64_t l0= (a&0x0303030303030303ULL)\
+ + (b&0x0303030303030303ULL)\
+ + 0x0202020202020202ULL;\
+ uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
+ + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
+ uint64_t l1,h1;\
+\
+ pixels+=line_size;\
+ for(i=0; i<h; i+=2){\
+ uint64_t a= AV_RN64(pixels );\
+ uint64_t b= AV_RN64(pixels+1);\
+ l1= (a&0x0303030303030303ULL)\
+ + (b&0x0303030303030303ULL);\
+ h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
+ + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
+ OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
+ pixels+=line_size;\
+ block +=line_size;\
+ a= AV_RN64(pixels );\
+ b= AV_RN64(pixels+1);\
+ l0= (a&0x0303030303030303ULL)\
+ + (b&0x0303030303030303ULL)\
+ + 0x0202020202020202ULL;\
+ h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
+ + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
+ OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+}\
+\
+static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+{\
+ int i;\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+1);\
+ uint64_t l0= (a&0x0303030303030303ULL)\
+ + (b&0x0303030303030303ULL)\
+ + 0x0101010101010101ULL;\
+ uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
+ + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
+ uint64_t l1,h1;\
+\
+ pixels+=line_size;\
+ for(i=0; i<h; i+=2){\
+ uint64_t a= AV_RN64(pixels );\
+ uint64_t b= AV_RN64(pixels+1);\
+ l1= (a&0x0303030303030303ULL)\
+ + (b&0x0303030303030303ULL);\
+ h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
+ + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
+ OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
+ pixels+=line_size;\
+ block +=line_size;\
+ a= AV_RN64(pixels );\
+ b= AV_RN64(pixels+1);\
+ l0= (a&0x0303030303030303ULL)\
+ + (b&0x0303030303030303ULL)\
+ + 0x0101010101010101ULL;\
+ h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
+ + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
+ OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+}\
+\
+CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels_c , 8)\
+CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels_x2_c , 8)\
+CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels_y2_c , 8)\
+CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels_xy2_c, 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels_x2_c , 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels_y2_c , 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels_xy2_c, 8)
+
+#define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEFEFEFEFEULL)>>1) )
+#else // 64 bit variant
+
+#define PIXOP2(OPNAME, OP) \
+static void OPNAME ## _pixels2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ int i;\
+ for(i=0; i<h; i++){\
+ OP(*((uint16_t*)(block )), AV_RN16(pixels ));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+}\
+static void OPNAME ## _pixels4_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ int i;\
+ for(i=0; i<h; i++){\
+ OP(*((uint32_t*)(block )), AV_RN32(pixels ));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+}\
+static void OPNAME ## _pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ int i;\
+ for(i=0; i<h; i++){\
+ OP(*((uint32_t*)(block )), AV_RN32(pixels ));\
+ OP(*((uint32_t*)(block+4)), AV_RN32(pixels+4));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+}\
+static inline void OPNAME ## _no_rnd_pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _pixels8_c(block, pixels, line_size, h);\
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+ int src_stride1, int src_stride2, int h){\
+ int i;\
+ for(i=0; i<h; i++){\
+ uint32_t a,b;\
+ a= AV_RN32(&src1[i*src_stride1 ]);\
+ b= AV_RN32(&src2[i*src_stride2 ]);\
+ OP(*((uint32_t*)&dst[i*dst_stride ]), no_rnd_avg32(a, b));\
+ a= AV_RN32(&src1[i*src_stride1+4]);\
+ b= AV_RN32(&src2[i*src_stride2+4]);\
+ OP(*((uint32_t*)&dst[i*dst_stride+4]), no_rnd_avg32(a, b));\
+ }\
+}\
+\
+static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+ int src_stride1, int src_stride2, int h){\
+ int i;\
+ for(i=0; i<h; i++){\
+ uint32_t a,b;\
+ a= AV_RN32(&src1[i*src_stride1 ]);\
+ b= AV_RN32(&src2[i*src_stride2 ]);\
+ OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
+ a= AV_RN32(&src1[i*src_stride1+4]);\
+ b= AV_RN32(&src2[i*src_stride2+4]);\
+ OP(*((uint32_t*)&dst[i*dst_stride+4]), rnd_avg32(a, b));\
+ }\
+}\
+\
+static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+ int src_stride1, int src_stride2, int h){\
+ int i;\
+ for(i=0; i<h; i++){\
+ uint32_t a,b;\
+ a= AV_RN32(&src1[i*src_stride1 ]);\
+ b= AV_RN32(&src2[i*src_stride2 ]);\
+ OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
+ }\
+}\
+\
+static inline void OPNAME ## _pixels2_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+ int src_stride1, int src_stride2, int h){\
+ int i;\
+ for(i=0; i<h; i++){\
+ uint32_t a,b;\
+ a= AV_RN16(&src1[i*src_stride1 ]);\
+ b= AV_RN16(&src2[i*src_stride2 ]);\
+ OP(*((uint16_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
+ }\
+}\
+\
+static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+ int src_stride1, int src_stride2, int h){\
+ OPNAME ## _pixels8_l2(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
+ OPNAME ## _pixels8_l2(dst+8, src1+8, src2+8, dst_stride, src_stride1, src_stride2, h);\
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+ int src_stride1, int src_stride2, int h){\
+ OPNAME ## _no_rnd_pixels8_l2(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
+ OPNAME ## _no_rnd_pixels8_l2(dst+8, src1+8, src2+8, dst_stride, src_stride1, src_stride2, h);\
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels8_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _no_rnd_pixels8_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
+}\
+\
+static inline void OPNAME ## _pixels8_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _pixels8_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels8_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _no_rnd_pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
+}\
+\
+static inline void OPNAME ## _pixels8_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
+}\
+\
+static inline void OPNAME ## _pixels8_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
+ int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ int i;\
+ for(i=0; i<h; i++){\
+ uint32_t a, b, c, d, l0, l1, h0, h1;\
+ a= AV_RN32(&src1[i*src_stride1]);\
+ b= AV_RN32(&src2[i*src_stride2]);\
+ c= AV_RN32(&src3[i*src_stride3]);\
+ d= AV_RN32(&src4[i*src_stride4]);\
+ l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x02020202UL;\
+ h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ l1= (c&0x03030303UL)\
+ + (d&0x03030303UL);\
+ h1= ((c&0xFCFCFCFCUL)>>2)\
+ + ((d&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
+ a= AV_RN32(&src1[i*src_stride1+4]);\
+ b= AV_RN32(&src2[i*src_stride2+4]);\
+ c= AV_RN32(&src3[i*src_stride3+4]);\
+ d= AV_RN32(&src4[i*src_stride4+4]);\
+ l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x02020202UL;\
+ h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ l1= (c&0x03030303UL)\
+ + (d&0x03030303UL);\
+ h1= ((c&0xFCFCFCFCUL)>>2)\
+ + ((d&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
+ }\
+}\
+\
+static inline void OPNAME ## _pixels4_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _pixels4_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
+}\
+\
+static inline void OPNAME ## _pixels4_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _pixels4_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
+}\
+\
+static inline void OPNAME ## _pixels2_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _pixels2_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
+}\
+\
+static inline void OPNAME ## _pixels2_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _pixels2_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels8_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
+ int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ int i;\
+ for(i=0; i<h; i++){\
+ uint32_t a, b, c, d, l0, l1, h0, h1;\
+ a= AV_RN32(&src1[i*src_stride1]);\
+ b= AV_RN32(&src2[i*src_stride2]);\
+ c= AV_RN32(&src3[i*src_stride3]);\
+ d= AV_RN32(&src4[i*src_stride4]);\
+ l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x01010101UL;\
+ h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ l1= (c&0x03030303UL)\
+ + (d&0x03030303UL);\
+ h1= ((c&0xFCFCFCFCUL)>>2)\
+ + ((d&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
+ a= AV_RN32(&src1[i*src_stride1+4]);\
+ b= AV_RN32(&src2[i*src_stride2+4]);\
+ c= AV_RN32(&src3[i*src_stride3+4]);\
+ d= AV_RN32(&src4[i*src_stride4+4]);\
+ l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x01010101UL;\
+ h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ l1= (c&0x03030303UL)\
+ + (d&0x03030303UL);\
+ h1= ((c&0xFCFCFCFCUL)>>2)\
+ + ((d&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
+ }\
+}\
+static inline void OPNAME ## _pixels16_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
+ int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ OPNAME ## _pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
+ OPNAME ## _pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
+}\
+static inline void OPNAME ## _no_rnd_pixels16_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
+ int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ OPNAME ## _no_rnd_pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
+ OPNAME ## _no_rnd_pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
+}\
+\
+static inline void OPNAME ## _pixels2_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+{\
+ int i, a0, b0, a1, b1;\
+ a0= pixels[0];\
+ b0= pixels[1] + 2;\
+ a0 += b0;\
+ b0 += pixels[2];\
+\
+ pixels+=line_size;\
+ for(i=0; i<h; i+=2){\
+ a1= pixels[0];\
+ b1= pixels[1];\
+ a1 += b1;\
+ b1 += pixels[2];\
+\
+ block[0]= (a1+a0)>>2; /* FIXME non put */\
+ block[1]= (b1+b0)>>2;\
+\
+ pixels+=line_size;\
+ block +=line_size;\
+\
+ a0= pixels[0];\
+ b0= pixels[1] + 2;\
+ a0 += b0;\
+ b0 += pixels[2];\
+\
+ block[0]= (a1+a0)>>2;\
+ block[1]= (b1+b0)>>2;\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+}\
+\
+static inline void OPNAME ## _pixels4_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+{\
+ int i;\
+ const uint32_t a= AV_RN32(pixels );\
+ const uint32_t b= AV_RN32(pixels+1);\
+ uint32_t l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x02020202UL;\
+ uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ uint32_t l1,h1;\
+\
+ pixels+=line_size;\
+ for(i=0; i<h; i+=2){\
+ uint32_t a= AV_RN32(pixels );\
+ uint32_t b= AV_RN32(pixels+1);\
+ l1= (a&0x03030303UL)\
+ + (b&0x03030303UL);\
+ h1= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
+ pixels+=line_size;\
+ block +=line_size;\
+ a= AV_RN32(pixels );\
+ b= AV_RN32(pixels+1);\
+ l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x02020202UL;\
+ h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+}\
+\
+static inline void OPNAME ## _pixels8_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+{\
+ int j;\
+ for(j=0; j<2; j++){\
+ int i;\
+ const uint32_t a= AV_RN32(pixels );\
+ const uint32_t b= AV_RN32(pixels+1);\
+ uint32_t l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x02020202UL;\
+ uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ uint32_t l1,h1;\
+\
+ pixels+=line_size;\
+ for(i=0; i<h; i+=2){\
+ uint32_t a= AV_RN32(pixels );\
+ uint32_t b= AV_RN32(pixels+1);\
+ l1= (a&0x03030303UL)\
+ + (b&0x03030303UL);\
+ h1= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
+ pixels+=line_size;\
+ block +=line_size;\
+ a= AV_RN32(pixels );\
+ b= AV_RN32(pixels+1);\
+ l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x02020202UL;\
+ h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+ pixels+=4-line_size*(h+1);\
+ block +=4-line_size*h;\
+ }\
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels8_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+{\
+ int j;\
+ for(j=0; j<2; j++){\
+ int i;\
+ const uint32_t a= AV_RN32(pixels );\
+ const uint32_t b= AV_RN32(pixels+1);\
+ uint32_t l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x01010101UL;\
+ uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ uint32_t l1,h1;\
+\
+ pixels+=line_size;\
+ for(i=0; i<h; i+=2){\
+ uint32_t a= AV_RN32(pixels );\
+ uint32_t b= AV_RN32(pixels+1);\
+ l1= (a&0x03030303UL)\
+ + (b&0x03030303UL);\
+ h1= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
+ pixels+=line_size;\
+ block +=line_size;\
+ a= AV_RN32(pixels );\
+ b= AV_RN32(pixels+1);\
+ l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x01010101UL;\
+ h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
+ pixels+=line_size;\
+ block +=line_size;\
+ }\
+ pixels+=4-line_size*(h+1);\
+ block +=4-line_size*h;\
+ }\
+}\
+\
+CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels8_c , 8)\
+CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels8_x2_c , 8)\
+CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels8_y2_c , 8)\
+CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels8_xy2_c, 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_c , OPNAME ## _pixels8_c , 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels8_x2_c , 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels8_y2_c , 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels8_xy2_c, 8)\
+
+#define op_avg(a, b) a = rnd_avg32(a, b)
+#endif
+#define op_put(a, b) a = b
+
+PIXOP2(avg, op_avg)
+PIXOP2(put, op_put)
+#undef op_avg
+#undef op_put
+
+#define avg2(a,b) ((a+b+1)>>1)
+#define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
+
+static void put_no_rnd_pixels16_l2_c(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
+ put_no_rnd_pixels16_l2(dst, a, b, stride, stride, stride, h);
+}
+
+static void put_no_rnd_pixels8_l2_c(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
+ put_no_rnd_pixels8_l2(dst, a, b, stride, stride, stride, h);
+}
+
+static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
+{
+ const int A=(16-x16)*(16-y16);
+ const int B=( x16)*(16-y16);
+ const int C=(16-x16)*( y16);
+ const int D=( x16)*( y16);
+ int i;
+
+ for(i=0; i<h; i++)
+ {
+ dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8;
+ dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8;
+ dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8;
+ dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8;
+ dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8;
+ dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8;
+ dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8;
+ dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8;
+ dst+= stride;
+ src+= stride;
+ }
+}
+
+void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
+ int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
+{
+ int y, vx, vy;
+ const int s= 1<<shift;
+
+ width--;
+ height--;
+
+ for(y=0; y<h; y++){
+ int x;
+
+ vx= ox;
+ vy= oy;
+ for(x=0; x<8; x++){ //XXX FIXME optimize
+ int src_x, src_y, frac_x, frac_y, index;
+
+ src_x= vx>>16;
+ src_y= vy>>16;
+ frac_x= src_x&(s-1);
+ frac_y= src_y&(s-1);
+ src_x>>=shift;
+ src_y>>=shift;
+
+ if((unsigned)src_x < width){
+ if((unsigned)src_y < height){
+ index= src_x + src_y*stride;
+ dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
+ + src[index +1]* frac_x )*(s-frac_y)
+ + ( src[index+stride ]*(s-frac_x)
+ + src[index+stride+1]* frac_x )* frac_y
+ + r)>>(shift*2);
+ }else{
+ index= src_x + av_clip(src_y, 0, height)*stride;
+ dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
+ + src[index +1]* frac_x )*s
+ + r)>>(shift*2);
+ }
+ }else{
+ if((unsigned)src_y < height){
+ index= av_clip(src_x, 0, width) + src_y*stride;
+ dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
+ + src[index+stride ]* frac_y )*s
+ + r)>>(shift*2);
+ }else{
+ index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride;
+ dst[y*stride + x]= src[index ];
+ }
+ }
+
+ vx+= dxx;
+ vy+= dyx;
+ }
+ ox += dxy;
+ oy += dyy;
+ }
+}
+
+static inline void put_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ switch(width){
+ case 2: put_pixels2_c (dst, src, stride, height); break;
+ case 4: put_pixels4_c (dst, src, stride, height); break;
+ case 8: put_pixels8_c (dst, src, stride, height); break;
+ case 16:put_pixels16_c(dst, src, stride, height); break;
+ }
+}
+
+static inline void put_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (683*(2*src[j] + src[j+1] + 1)) >> 11;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void put_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (683*(src[j] + 2*src[j+1] + 1)) >> 11;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void put_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (683*(2*src[j] + src[j+stride] + 1)) >> 11;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void put_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (2731*(4*src[j] + 3*src[j+1] + 3*src[j+stride] + 2*src[j+stride+1] + 6)) >> 15;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void put_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (2731*(3*src[j] + 2*src[j+1] + 4*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void put_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (683*(src[j] + 2*src[j+stride] + 1)) >> 11;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void put_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (2731*(3*src[j] + 4*src[j+1] + 2*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void put_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (2731*(2*src[j] + 3*src[j+1] + 3*src[j+stride] + 4*src[j+stride+1] + 6)) >> 15;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void avg_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ switch(width){
+ case 2: avg_pixels2_c (dst, src, stride, height); break;
+ case 4: avg_pixels4_c (dst, src, stride, height); break;
+ case 8: avg_pixels8_c (dst, src, stride, height); break;
+ case 16:avg_pixels16_c(dst, src, stride, height); break;
+ }
+}
+
+static inline void avg_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (dst[j] + ((683*(2*src[j] + src[j+1] + 1)) >> 11) + 1) >> 1;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void avg_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (dst[j] + ((683*(src[j] + 2*src[j+1] + 1)) >> 11) + 1) >> 1;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void avg_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (dst[j] + ((683*(2*src[j] + src[j+stride] + 1)) >> 11) + 1) >> 1;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void avg_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (dst[j] + ((2731*(4*src[j] + 3*src[j+1] + 3*src[j+stride] + 2*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void avg_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (dst[j] + ((2731*(3*src[j] + 2*src[j+1] + 4*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void avg_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (dst[j] + ((683*(src[j] + 2*src[j+stride] + 1)) >> 11) + 1) >> 1;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void avg_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (dst[j] + ((2731*(3*src[j] + 4*src[j+1] + 2*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static inline void avg_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
+ int i,j;
+ for (i=0; i < height; i++) {
+ for (j=0; j < width; j++) {
+ dst[j] = (dst[j] + ((2731*(2*src[j] + 3*src[j+1] + 3*src[j+stride] + 4*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+#if 0
+#define TPEL_WIDTH(width)\
+static void put_tpel_pixels ## width ## _mc00_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
+ void put_tpel_pixels_mc00_c(dst, src, stride, width, height);}\
+static void put_tpel_pixels ## width ## _mc10_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
+ void put_tpel_pixels_mc10_c(dst, src, stride, width, height);}\
+static void put_tpel_pixels ## width ## _mc20_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
+ void put_tpel_pixels_mc20_c(dst, src, stride, width, height);}\
+static void put_tpel_pixels ## width ## _mc01_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
+ void put_tpel_pixels_mc01_c(dst, src, stride, width, height);}\
+static void put_tpel_pixels ## width ## _mc11_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
+ void put_tpel_pixels_mc11_c(dst, src, stride, width, height);}\
+static void put_tpel_pixels ## width ## _mc21_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
+ void put_tpel_pixels_mc21_c(dst, src, stride, width, height);}\
+static void put_tpel_pixels ## width ## _mc02_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
+ void put_tpel_pixels_mc02_c(dst, src, stride, width, height);}\
+static void put_tpel_pixels ## width ## _mc12_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
+ void put_tpel_pixels_mc12_c(dst, src, stride, width, height);}\
+static void put_tpel_pixels ## width ## _mc22_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
+ void put_tpel_pixels_mc22_c(dst, src, stride, width, height);}
+#endif
+
+#define H264_CHROMA_MC(OPNAME, OP)\
+static void OPNAME ## h264_chroma_mc2_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
+ const int A=(8-x)*(8-y);\
+ const int B=( x)*(8-y);\
+ const int C=(8-x)*( y);\
+ const int D=( x)*( y);\
+ int i;\
+ \
+ assert(x<8 && y<8 && x>=0 && y>=0);\
+\
+ if(D){\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
+ OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }else{\
+ const int E= B+C;\
+ const int step= C ? stride : 1;\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + E*src[step+0]));\
+ OP(dst[1], (A*src[1] + E*src[step+1]));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }\
+}\
+\
+static void OPNAME ## h264_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
+ const int A=(8-x)*(8-y);\
+ const int B=( x)*(8-y);\
+ const int C=(8-x)*( y);\
+ const int D=( x)*( y);\
+ int i;\
+ \
+ assert(x<8 && y<8 && x>=0 && y>=0);\
+\
+ if(D){\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
+ OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
+ OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
+ OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }else{\
+ const int E= B+C;\
+ const int step= C ? stride : 1;\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + E*src[step+0]));\
+ OP(dst[1], (A*src[1] + E*src[step+1]));\
+ OP(dst[2], (A*src[2] + E*src[step+2]));\
+ OP(dst[3], (A*src[3] + E*src[step+3]));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }\
+}\
+\
+static void OPNAME ## h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
+ const int A=(8-x)*(8-y);\
+ const int B=( x)*(8-y);\
+ const int C=(8-x)*( y);\
+ const int D=( x)*( y);\
+ int i;\
+ \
+ assert(x<8 && y<8 && x>=0 && y>=0);\
+\
+ if(D){\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
+ OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
+ OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
+ OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
+ OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
+ OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
+ OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
+ OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }else{\
+ const int E= B+C;\
+ const int step= C ? stride : 1;\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + E*src[step+0]));\
+ OP(dst[1], (A*src[1] + E*src[step+1]));\
+ OP(dst[2], (A*src[2] + E*src[step+2]));\
+ OP(dst[3], (A*src[3] + E*src[step+3]));\
+ OP(dst[4], (A*src[4] + E*src[step+4]));\
+ OP(dst[5], (A*src[5] + E*src[step+5]));\
+ OP(dst[6], (A*src[6] + E*src[step+6]));\
+ OP(dst[7], (A*src[7] + E*src[step+7]));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }\
+}
+
+#define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
+#define op_put(a, b) a = (((b) + 32)>>6)
+
+H264_CHROMA_MC(put_ , op_put)
+H264_CHROMA_MC(avg_ , op_avg)
+#undef op_avg
+#undef op_put
+
+static void put_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){
+ const int A=(8-x)*(8-y);
+ const int B=( x)*(8-y);
+ const int C=(8-x)*( y);
+ const int D=( x)*( y);
+ int i;
+
+ assert(x<8 && y<8 && x>=0 && y>=0);
+
+ for(i=0; i<h; i++)
+ {
+ dst[0] = (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6;
+ dst[1] = (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6;
+ dst[2] = (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6;
+ dst[3] = (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6;
+ dst[4] = (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6;
+ dst[5] = (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6;
+ dst[6] = (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6;
+ dst[7] = (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6;
+ dst+= stride;
+ src+= stride;
+ }
+}
+
+static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){
+ const int A=(8-x)*(8-y);
+ const int B=( x)*(8-y);
+ const int C=(8-x)*( y);
+ const int D=( x)*( y);
+ int i;
+
+ assert(x<8 && y<8 && x>=0 && y>=0);
+
+ for(i=0; i<h; i++)
+ {
+ dst[0] = avg2(dst[0], ((A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6));
+ dst[1] = avg2(dst[1], ((A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6));
+ dst[2] = avg2(dst[2], ((A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6));
+ dst[3] = avg2(dst[3], ((A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6));
+ dst[4] = avg2(dst[4], ((A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6));
+ dst[5] = avg2(dst[5], ((A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6));
+ dst[6] = avg2(dst[6], ((A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6));
+ dst[7] = avg2(dst[7], ((A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6));
+ dst+= stride;
+ src+= stride;
+ }
+}
+
+#define QPEL_MC(r, OPNAME, RND, OP) \
+static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<h; i++)\
+ {\
+ OP(dst[0], (src[0]+src[1])*20 - (src[0]+src[2])*6 + (src[1]+src[3])*3 - (src[2]+src[4]));\
+ OP(dst[1], (src[1]+src[2])*20 - (src[0]+src[3])*6 + (src[0]+src[4])*3 - (src[1]+src[5]));\
+ OP(dst[2], (src[2]+src[3])*20 - (src[1]+src[4])*6 + (src[0]+src[5])*3 - (src[0]+src[6]));\
+ OP(dst[3], (src[3]+src[4])*20 - (src[2]+src[5])*6 + (src[1]+src[6])*3 - (src[0]+src[7]));\
+ OP(dst[4], (src[4]+src[5])*20 - (src[3]+src[6])*6 + (src[2]+src[7])*3 - (src[1]+src[8]));\
+ OP(dst[5], (src[5]+src[6])*20 - (src[4]+src[7])*6 + (src[3]+src[8])*3 - (src[2]+src[8]));\
+ OP(dst[6], (src[6]+src[7])*20 - (src[5]+src[8])*6 + (src[4]+src[8])*3 - (src[3]+src[7]));\
+ OP(dst[7], (src[7]+src[8])*20 - (src[6]+src[8])*6 + (src[5]+src[7])*3 - (src[4]+src[6]));\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }\
+}\
+\
+static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int w=8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<w; i++)\
+ {\
+ const int src0= src[0*srcStride];\
+ const int src1= src[1*srcStride];\
+ const int src2= src[2*srcStride];\
+ const int src3= src[3*srcStride];\
+ const int src4= src[4*srcStride];\
+ const int src5= src[5*srcStride];\
+ const int src6= src[6*srcStride];\
+ const int src7= src[7*srcStride];\
+ const int src8= src[8*srcStride];\
+ OP(dst[0*dstStride], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
+ OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
+ OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
+ OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
+ OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
+ OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
+ OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
+ OP(dst[7*dstStride], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
+ dst++;\
+ src++;\
+ }\
+}\
+\
+static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ \
+ for(i=0; i<h; i++)\
+ {\
+ OP(dst[ 0], (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]));\
+ OP(dst[ 1], (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]));\
+ OP(dst[ 2], (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]));\
+ OP(dst[ 3], (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]));\
+ OP(dst[ 4], (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]));\
+ OP(dst[ 5], (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]));\
+ OP(dst[ 6], (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]));\
+ OP(dst[ 7], (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]));\
+ OP(dst[ 8], (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]));\
+ OP(dst[ 9], (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]));\
+ OP(dst[10], (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]));\
+ OP(dst[11], (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]));\
+ OP(dst[12], (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]));\
+ OP(dst[13], (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]));\
+ OP(dst[14], (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]));\
+ OP(dst[15], (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]));\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }\
+}\
+\
+static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ const int w=16;\
+ for(i=0; i<w; i++)\
+ {\
+ const int src0= src[0*srcStride];\
+ const int src1= src[1*srcStride];\
+ const int src2= src[2*srcStride];\
+ const int src3= src[3*srcStride];\
+ const int src4= src[4*srcStride];\
+ const int src5= src[5*srcStride];\
+ const int src6= src[6*srcStride];\
+ const int src7= src[7*srcStride];\
+ const int src8= src[8*srcStride];\
+ const int src9= src[9*srcStride];\
+ const int src10= src[10*srcStride];\
+ const int src11= src[11*srcStride];\
+ const int src12= src[12*srcStride];\
+ const int src13= src[13*srcStride];\
+ const int src14= src[14*srcStride];\
+ const int src15= src[15*srcStride];\
+ const int src16= src[16*srcStride];\
+ OP(dst[ 0*dstStride], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
+ OP(dst[ 1*dstStride], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
+ OP(dst[ 2*dstStride], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
+ OP(dst[ 3*dstStride], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
+ OP(dst[ 4*dstStride], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
+ OP(dst[ 5*dstStride], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
+ OP(dst[ 6*dstStride], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
+ OP(dst[ 7*dstStride], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
+ OP(dst[ 8*dstStride], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
+ OP(dst[ 9*dstStride], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
+ OP(dst[10*dstStride], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
+ OP(dst[11*dstStride], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
+ OP(dst[12*dstStride], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
+ OP(dst[13*dstStride], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
+ OP(dst[14*dstStride], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
+ OP(dst[15*dstStride], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
+ dst++;\
+ src++;\
+ }\
+}\
+\
+static void OPNAME ## qpel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels8_c(dst, src, stride, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t half[64];\
+ put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
+ OPNAME ## pixels8_l2(dst, src, half, stride, stride, 8, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t half[64];\
+ put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
+ OPNAME ## pixels8_l2(dst, src+1, half, stride, stride, 8, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t half[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
+ OPNAME ## pixels8_l2(dst, full, half, stride, 16, 8, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ copy_block9(full, src, 16, stride, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
+}\
+\
+static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t half[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
+ OPNAME ## pixels8_l2(dst, full+16, half, stride, 16, 8, 8);\
+}\
+void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfV[64];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l4(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## pixels8_l2(halfH, halfH, full, 8, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
+}\
+void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfV[64];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l4(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## pixels8_l2(halfH, halfH, full+1, 8, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
+}\
+void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfV[64];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l4(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## pixels8_l2(halfH, halfH, full, 8, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
+}\
+void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfV[64];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l4(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## pixels8_l2(halfH, halfH, full+1, 8, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t halfH[72];\
+ uint8_t halfHV[64];\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t halfH[72];\
+ uint8_t halfHV[64];\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
+}\
+void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfV[64];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## pixels8_l2(halfH, halfH, full, 8, 8, 16, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
+}\
+void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfV[64];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## pixels8_l2(halfH, halfH, full+1, 8, 8, 16, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
+}\
+static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t halfH[72];\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
+}\
+static void OPNAME ## qpel16_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels16_c(dst, src, stride, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t half[256];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
+ OPNAME ## pixels16_l2(dst, src, half, stride, stride, 16, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t half[256];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
+ OPNAME ## pixels16_l2(dst, src+1, half, stride, stride, 16, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t half[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
+ OPNAME ## pixels16_l2(dst, full, half, stride, 24, 16, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ copy_block17(full, src, 24, stride, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
+}\
+\
+static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t half[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
+ OPNAME ## pixels16_l2(dst, full+24, half, stride, 24, 16, 16);\
+}\
+void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfV[256];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l4(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## pixels16_l2(halfH, halfH, full, 16, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
+}\
+void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfV[256];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l4(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## pixels16_l2(halfH, halfH, full+1, 16, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
+}\
+void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfV[256];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l4(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## pixels16_l2(halfH, halfH, full, 16, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
+}\
+void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfV[256];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l4(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## pixels16_l2(halfH, halfH, full+1, 16, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t halfH[272];\
+ uint8_t halfHV[256];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t halfH[272];\
+ uint8_t halfHV[256];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
+}\
+void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfV[256];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2(dst, halfV, halfHV, stride, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## pixels16_l2(halfH, halfH, full, 16, 16, 24, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
+}\
+void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfV[256];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2(dst, halfV, halfHV, stride, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## pixels16_l2(halfH, halfH, full+1, 16, 16, 24, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
+}\
+static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t halfH[272];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
+}
+
+#define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
+#define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
+#define op_put(a, b) a = cm[((b) + 16)>>5]
+#define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
+
+QPEL_MC(0, put_ , _ , op_put)
+QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
+QPEL_MC(0, avg_ , _ , op_avg)
+//QPEL_MC(1, avg_no_rnd , _ , op_avg)
+#undef op_avg
+#undef op_avg_no_rnd
+#undef op_put
+#undef op_put_no_rnd
+
+#if 1
+#define H264_LOWPASS(OPNAME, OP, OP2) \
+static av_unused void OPNAME ## h264_qpel2_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int h=2;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<h; i++)\
+ {\
+ OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
+ OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }\
+}\
+\
+static av_unused void OPNAME ## h264_qpel2_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int w=2;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<w; i++)\
+ {\
+ const int srcB= src[-2*srcStride];\
+ const int srcA= src[-1*srcStride];\
+ const int src0= src[0 *srcStride];\
+ const int src1= src[1 *srcStride];\
+ const int src2= src[2 *srcStride];\
+ const int src3= src[3 *srcStride];\
+ const int src4= src[4 *srcStride];\
+ OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
+ OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
+ dst++;\
+ src++;\
+ }\
+}\
+\
+static av_unused void OPNAME ## h264_qpel2_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+ const int h=2;\
+ const int w=2;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ src -= 2*srcStride;\
+ for(i=0; i<h+5; i++)\
+ {\
+ tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]);\
+ tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]);\
+ tmp+=tmpStride;\
+ src+=srcStride;\
+ }\
+ tmp -= tmpStride*(h+5-2);\
+ for(i=0; i<w; i++)\
+ {\
+ const int tmpB= tmp[-2*tmpStride];\
+ const int tmpA= tmp[-1*tmpStride];\
+ const int tmp0= tmp[0 *tmpStride];\
+ const int tmp1= tmp[1 *tmpStride];\
+ const int tmp2= tmp[2 *tmpStride];\
+ const int tmp3= tmp[3 *tmpStride];\
+ const int tmp4= tmp[4 *tmpStride];\
+ OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
+ OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
+ dst++;\
+ tmp++;\
+ }\
+}\
+static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int h=4;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<h; i++)\
+ {\
+ OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
+ OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
+ OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
+ OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }\
+}\
+\
+static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int w=4;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<w; i++)\
+ {\
+ const int srcB= src[-2*srcStride];\
+ const int srcA= src[-1*srcStride];\
+ const int src0= src[0 *srcStride];\
+ const int src1= src[1 *srcStride];\
+ const int src2= src[2 *srcStride];\
+ const int src3= src[3 *srcStride];\
+ const int src4= src[4 *srcStride];\
+ const int src5= src[5 *srcStride];\
+ const int src6= src[6 *srcStride];\
+ OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
+ OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
+ OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
+ OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
+ dst++;\
+ src++;\
+ }\
+}\
+\
+static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+ const int h=4;\
+ const int w=4;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ src -= 2*srcStride;\
+ for(i=0; i<h+5; i++)\
+ {\
+ tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]);\
+ tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]);\
+ tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]);\
+ tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]);\
+ tmp+=tmpStride;\
+ src+=srcStride;\
+ }\
+ tmp -= tmpStride*(h+5-2);\
+ for(i=0; i<w; i++)\
+ {\
+ const int tmpB= tmp[-2*tmpStride];\
+ const int tmpA= tmp[-1*tmpStride];\
+ const int tmp0= tmp[0 *tmpStride];\
+ const int tmp1= tmp[1 *tmpStride];\
+ const int tmp2= tmp[2 *tmpStride];\
+ const int tmp3= tmp[3 *tmpStride];\
+ const int tmp4= tmp[4 *tmpStride];\
+ const int tmp5= tmp[5 *tmpStride];\
+ const int tmp6= tmp[6 *tmpStride];\
+ OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
+ OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
+ OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
+ OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
+ dst++;\
+ tmp++;\
+ }\
+}\
+\
+static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int h=8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<h; i++)\
+ {\
+ OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
+ OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
+ OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
+ OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
+ OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
+ OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
+ OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
+ OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }\
+}\
+\
+static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int w=8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<w; i++)\
+ {\
+ const int srcB= src[-2*srcStride];\
+ const int srcA= src[-1*srcStride];\
+ const int src0= src[0 *srcStride];\
+ const int src1= src[1 *srcStride];\
+ const int src2= src[2 *srcStride];\
+ const int src3= src[3 *srcStride];\
+ const int src4= src[4 *srcStride];\
+ const int src5= src[5 *srcStride];\
+ const int src6= src[6 *srcStride];\
+ const int src7= src[7 *srcStride];\
+ const int src8= src[8 *srcStride];\
+ const int src9= src[9 *srcStride];\
+ const int src10=src[10*srcStride];\
+ OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
+ OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
+ OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
+ OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
+ OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
+ OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
+ OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
+ OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
+ dst++;\
+ src++;\
+ }\
+}\
+\
+static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+ const int h=8;\
+ const int w=8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ src -= 2*srcStride;\
+ for(i=0; i<h+5; i++)\
+ {\
+ tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]);\
+ tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]);\
+ tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]);\
+ tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]);\
+ tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]);\
+ tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]);\
+ tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]);\
+ tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]);\
+ tmp+=tmpStride;\
+ src+=srcStride;\
+ }\
+ tmp -= tmpStride*(h+5-2);\
+ for(i=0; i<w; i++)\
+ {\
+ const int tmpB= tmp[-2*tmpStride];\
+ const int tmpA= tmp[-1*tmpStride];\
+ const int tmp0= tmp[0 *tmpStride];\
+ const int tmp1= tmp[1 *tmpStride];\
+ const int tmp2= tmp[2 *tmpStride];\
+ const int tmp3= tmp[3 *tmpStride];\
+ const int tmp4= tmp[4 *tmpStride];\
+ const int tmp5= tmp[5 *tmpStride];\
+ const int tmp6= tmp[6 *tmpStride];\
+ const int tmp7= tmp[7 *tmpStride];\
+ const int tmp8= tmp[8 *tmpStride];\
+ const int tmp9= tmp[9 *tmpStride];\
+ const int tmp10=tmp[10*tmpStride];\
+ OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
+ OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
+ OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
+ OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
+ OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
+ OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
+ OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
+ OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
+ dst++;\
+ tmp++;\
+ }\
+}\
+\
+static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel8_v_lowpass(dst , src , dstStride, srcStride);\
+ OPNAME ## h264_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## h264_qpel8_v_lowpass(dst , src , dstStride, srcStride);\
+ OPNAME ## h264_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride);\
+}\
+\
+static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel8_h_lowpass(dst , src , dstStride, srcStride);\
+ OPNAME ## h264_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## h264_qpel8_h_lowpass(dst , src , dstStride, srcStride);\
+ OPNAME ## h264_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride);\
+}\
+\
+static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+ OPNAME ## h264_qpel8_hv_lowpass(dst , tmp , src , dstStride, tmpStride, srcStride);\
+ OPNAME ## h264_qpel8_hv_lowpass(dst+8, tmp+8, src+8, dstStride, tmpStride, srcStride);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## h264_qpel8_hv_lowpass(dst , tmp , src , dstStride, tmpStride, srcStride);\
+ OPNAME ## h264_qpel8_hv_lowpass(dst+8, tmp+8, src+8, dstStride, tmpStride, srcStride);\
+}\
+
+#define H264_MC(OPNAME, SIZE) \
+static void OPNAME ## h264_qpel ## SIZE ## _mc00_c (uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t half[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2(dst, src, half, stride, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t half[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2(dst, src+1, half, stride, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ uint8_t half[SIZE*SIZE];\
+ copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
+ OPNAME ## pixels ## SIZE ## _l2(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
+ OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ uint8_t half[SIZE*SIZE];\
+ copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
+ OPNAME ## pixels ## SIZE ## _l2(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ uint8_t halfH[SIZE*SIZE];\
+ uint8_t halfV[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
+ copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
+ OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ uint8_t halfH[SIZE*SIZE];\
+ uint8_t halfV[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
+ copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
+ OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ uint8_t halfH[SIZE*SIZE];\
+ uint8_t halfV[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
+ copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
+ OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc33_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ uint8_t halfH[SIZE*SIZE];\
+ uint8_t halfV[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
+ copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
+ OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
+ int16_t tmp[SIZE*(SIZE+5)];\
+ OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
+ int16_t tmp[SIZE*(SIZE+5)];\
+ uint8_t halfH[SIZE*SIZE];\
+ uint8_t halfHV[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
+ int16_t tmp[SIZE*(SIZE+5)];\
+ uint8_t halfH[SIZE*SIZE];\
+ uint8_t halfHV[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ int16_t tmp[SIZE*(SIZE+5)];\
+ uint8_t halfV[SIZE*SIZE];\
+ uint8_t halfHV[SIZE*SIZE];\
+ copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
+ put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ int16_t tmp[SIZE*(SIZE+5)];\
+ uint8_t halfV[SIZE*SIZE];\
+ uint8_t halfHV[SIZE*SIZE];\
+ copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
+ put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
+}\
+
+#define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
+//#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
+#define op_put(a, b) a = cm[((b) + 16)>>5]
+#define op2_avg(a, b) a = (((a)+cm[((b) + 512)>>10]+1)>>1)
+#define op2_put(a, b) a = cm[((b) + 512)>>10]
+
+H264_LOWPASS(put_ , op_put, op2_put)
+H264_LOWPASS(avg_ , op_avg, op2_avg)
+H264_MC(put_, 2)
+H264_MC(put_, 4)
+H264_MC(put_, 8)
+H264_MC(put_, 16)
+H264_MC(avg_, 4)
+H264_MC(avg_, 8)
+H264_MC(avg_, 16)
+
+#undef op_avg
+#undef op_put
+#undef op2_avg
+#undef op2_put
+#endif
+
+#define op_scale1(x) block[x] = av_clip_uint8( (block[x]*weight + offset) >> log2_denom )
+#define op_scale2(x) dst[x] = av_clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1))
+#define H264_WEIGHT(W,H) \
+static void weight_h264_pixels ## W ## x ## H ## _c(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
+ int y; \
+ offset <<= log2_denom; \
+ if(log2_denom) offset += 1<<(log2_denom-1); \
+ for(y=0; y<H; y++, block += stride){ \
+ op_scale1(0); \
+ op_scale1(1); \
+ if(W==2) continue; \
+ op_scale1(2); \
+ op_scale1(3); \
+ if(W==4) continue; \
+ op_scale1(4); \
+ op_scale1(5); \
+ op_scale1(6); \
+ op_scale1(7); \
+ if(W==8) continue; \
+ op_scale1(8); \
+ op_scale1(9); \
+ op_scale1(10); \
+ op_scale1(11); \
+ op_scale1(12); \
+ op_scale1(13); \
+ op_scale1(14); \
+ op_scale1(15); \
+ } \
+} \
+static void biweight_h264_pixels ## W ## x ## H ## _c(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
+ int y; \
+ offset = ((offset + 1) | 1) << log2_denom; \
+ for(y=0; y<H; y++, dst += stride, src += stride){ \
+ op_scale2(0); \
+ op_scale2(1); \
+ if(W==2) continue; \
+ op_scale2(2); \
+ op_scale2(3); \
+ if(W==4) continue; \
+ op_scale2(4); \
+ op_scale2(5); \
+ op_scale2(6); \
+ op_scale2(7); \
+ if(W==8) continue; \
+ op_scale2(8); \
+ op_scale2(9); \
+ op_scale2(10); \
+ op_scale2(11); \
+ op_scale2(12); \
+ op_scale2(13); \
+ op_scale2(14); \
+ op_scale2(15); \
+ } \
+}
+
+H264_WEIGHT(16,16)
+H264_WEIGHT(16,8)
+H264_WEIGHT(8,16)
+H264_WEIGHT(8,8)
+H264_WEIGHT(8,4)
+H264_WEIGHT(4,8)
+H264_WEIGHT(4,4)
+H264_WEIGHT(4,2)
+H264_WEIGHT(2,4)
+H264_WEIGHT(2,2)
+
+#undef op_scale1
+#undef op_scale2
+#undef H264_WEIGHT
+
+static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ int i;
+
+ for(i=0; i<h; i++){
+ dst[0]= cm[(9*(src[0] + src[1]) - (src[-1] + src[2]) + 8)>>4];
+ dst[1]= cm[(9*(src[1] + src[2]) - (src[ 0] + src[3]) + 8)>>4];
+ dst[2]= cm[(9*(src[2] + src[3]) - (src[ 1] + src[4]) + 8)>>4];
+ dst[3]= cm[(9*(src[3] + src[4]) - (src[ 2] + src[5]) + 8)>>4];
+ dst[4]= cm[(9*(src[4] + src[5]) - (src[ 3] + src[6]) + 8)>>4];
+ dst[5]= cm[(9*(src[5] + src[6]) - (src[ 4] + src[7]) + 8)>>4];
+ dst[6]= cm[(9*(src[6] + src[7]) - (src[ 5] + src[8]) + 8)>>4];
+ dst[7]= cm[(9*(src[7] + src[8]) - (src[ 6] + src[9]) + 8)>>4];
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
+#if CONFIG_CAVS_DECODER
+/* AVS specific */
+void ff_cavsdsp_init(DSPContext* c, AVCodecContext *avctx);
+
+void ff_put_cavs_qpel8_mc00_c(uint8_t *dst, uint8_t *src, int stride) {
+ put_pixels8_c(dst, src, stride, 8);
+}
+void ff_avg_cavs_qpel8_mc00_c(uint8_t *dst, uint8_t *src, int stride) {
+ avg_pixels8_c(dst, src, stride, 8);
+}
+void ff_put_cavs_qpel16_mc00_c(uint8_t *dst, uint8_t *src, int stride) {
+ put_pixels16_c(dst, src, stride, 16);
+}
+void ff_avg_cavs_qpel16_mc00_c(uint8_t *dst, uint8_t *src, int stride) {
+ avg_pixels16_c(dst, src, stride, 16);
+}
+#endif /* CONFIG_CAVS_DECODER */
+
+void ff_mlp_init(DSPContext* c, AVCodecContext *avctx);
+
+#if CONFIG_VC1_DECODER
+/* VC-1 specific */
+void ff_vc1dsp_init(DSPContext* c, AVCodecContext *avctx);
+
+void ff_put_vc1_mspel_mc00_c(uint8_t *dst, uint8_t *src, int stride, int rnd) {
+ put_pixels8_c(dst, src, stride, 8);
+}
+void ff_avg_vc1_mspel_mc00_c(uint8_t *dst, uint8_t *src, int stride, int rnd) {
+ avg_pixels8_c(dst, src, stride, 8);
+}
+#endif /* CONFIG_VC1_DECODER */
+
+void ff_intrax8dsp_init(DSPContext* c, AVCodecContext *avctx);
+
+#if CONFIG_RV30_DECODER
+void ff_rv30dsp_init(DSPContext* c, AVCodecContext *avctx);
+#endif /* CONFIG_RV30_DECODER */
+
+#if CONFIG_RV40_DECODER
+static void put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){
+ put_pixels16_xy2_c(dst, src, stride, 16);
+}
+static void avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){
+ avg_pixels16_xy2_c(dst, src, stride, 16);
+}
+static void put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){
+ put_pixels8_xy2_c(dst, src, stride, 8);
+}
+static void avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){
+ avg_pixels8_xy2_c(dst, src, stride, 8);
+}
+
+void ff_rv40dsp_init(DSPContext* c, AVCodecContext *avctx);
+#endif /* CONFIG_RV40_DECODER */
+
+static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ int i;
+
+ for(i=0; i<w; i++){
+ const int src_1= src[ -srcStride];
+ const int src0 = src[0 ];
+ const int src1 = src[ srcStride];
+ const int src2 = src[2*srcStride];
+ const int src3 = src[3*srcStride];
+ const int src4 = src[4*srcStride];
+ const int src5 = src[5*srcStride];
+ const int src6 = src[6*srcStride];
+ const int src7 = src[7*srcStride];
+ const int src8 = src[8*srcStride];
+ const int src9 = src[9*srcStride];
+ dst[0*dstStride]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
+ dst[1*dstStride]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
+ dst[2*dstStride]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
+ dst[3*dstStride]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
+ dst[4*dstStride]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
+ dst[5*dstStride]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
+ dst[6*dstStride]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
+ dst[7*dstStride]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
+ src++;
+ dst++;
+ }
+}
+
+static void put_mspel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){
+ put_pixels8_c(dst, src, stride, 8);
+}
+
+static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){
+ uint8_t half[64];
+ wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
+ put_pixels8_l2(dst, src, half, stride, stride, 8, 8);
+}
+
+static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){
+ wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
+}
+
+static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){
+ uint8_t half[64];
+ wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
+ put_pixels8_l2(dst, src+1, half, stride, stride, 8, 8);
+}
+
+static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){
+ wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
+}
+
+static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){
+ uint8_t halfH[88];
+ uint8_t halfV[64];
+ uint8_t halfHV[64];
+ wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
+ wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
+ wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
+ put_pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);
+}
+static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){
+ uint8_t halfH[88];
+ uint8_t halfV[64];
+ uint8_t halfHV[64];
+ wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
+ wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
+ wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
+ put_pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);
+}
+static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){
+ uint8_t halfH[88];
+ wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
+ wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
+}
+
+static void h263_v_loop_filter_c(uint8_t *src, int stride, int qscale){
+ if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
+ int x;
+ const int strength= ff_h263_loop_filter_strength[qscale];
+
+ for(x=0; x<8; x++){
+ int d1, d2, ad1;
+ int p0= src[x-2*stride];
+ int p1= src[x-1*stride];
+ int p2= src[x+0*stride];
+ int p3= src[x+1*stride];
+ int d = (p0 - p3 + 4*(p2 - p1)) / 8;
+
+ if (d<-2*strength) d1= 0;
+ else if(d<- strength) d1=-2*strength - d;
+ else if(d< strength) d1= d;
+ else if(d< 2*strength) d1= 2*strength - d;
+ else d1= 0;
+
+ p1 += d1;
+ p2 -= d1;
+ if(p1&256) p1= ~(p1>>31);
+ if(p2&256) p2= ~(p2>>31);
+
+ src[x-1*stride] = p1;
+ src[x+0*stride] = p2;
+
+ ad1= FFABS(d1)>>1;
+
+ d2= av_clip((p0-p3)/4, -ad1, ad1);
+
+ src[x-2*stride] = p0 - d2;
+ src[x+ stride] = p3 + d2;
+ }
+ }
+}
+
+static void h263_h_loop_filter_c(uint8_t *src, int stride, int qscale){
+ if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
+ int y;
+ const int strength= ff_h263_loop_filter_strength[qscale];
+
+ for(y=0; y<8; y++){
+ int d1, d2, ad1;
+ int p0= src[y*stride-2];
+ int p1= src[y*stride-1];
+ int p2= src[y*stride+0];
+ int p3= src[y*stride+1];
+ int d = (p0 - p3 + 4*(p2 - p1)) / 8;
+
+ if (d<-2*strength) d1= 0;
+ else if(d<- strength) d1=-2*strength - d;
+ else if(d< strength) d1= d;
+ else if(d< 2*strength) d1= 2*strength - d;
+ else d1= 0;
+
+ p1 += d1;
+ p2 -= d1;
+ if(p1&256) p1= ~(p1>>31);
+ if(p2&256) p2= ~(p2>>31);
+
+ src[y*stride-1] = p1;
+ src[y*stride+0] = p2;
+
+ ad1= FFABS(d1)>>1;
+
+ d2= av_clip((p0-p3)/4, -ad1, ad1);
+
+ src[y*stride-2] = p0 - d2;
+ src[y*stride+1] = p3 + d2;
+ }
+ }
+}
+
+static void h261_loop_filter_c(uint8_t *src, int stride){
+ int x,y,xy,yz;
+ int temp[64];
+
+ for(x=0; x<8; x++){
+ temp[x ] = 4*src[x ];
+ temp[x + 7*8] = 4*src[x + 7*stride];
+ }
+ for(y=1; y<7; y++){
+ for(x=0; x<8; x++){
+ xy = y * stride + x;
+ yz = y * 8 + x;
+ temp[yz] = src[xy - stride] + 2*src[xy] + src[xy + stride];
+ }
+ }
+
+ for(y=0; y<8; y++){
+ src[ y*stride] = (temp[ y*8] + 2)>>2;
+ src[7+y*stride] = (temp[7+y*8] + 2)>>2;
+ for(x=1; x<7; x++){
+ xy = y * stride + x;
+ yz = y * 8 + x;
+ src[xy] = (temp[yz-1] + 2*temp[yz] + temp[yz+1] + 8)>>4;
+ }
+ }
+}
+
+static av_always_inline av_flatten void h264_loop_filter_luma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0)
+{
+ int i, d;
+ for( i = 0; i < 4; i++ ) {
+ if( tc0[i] < 0 ) {
+ pix += 4*ystride;
+ continue;
+ }
+ for( d = 0; d < 4; d++ ) {
+ const int p0 = pix[-1*xstride];
+ const int p1 = pix[-2*xstride];
+ const int p2 = pix[-3*xstride];
+ const int q0 = pix[0];
+ const int q1 = pix[1*xstride];
+ const int q2 = pix[2*xstride];
+
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
+
+ int tc = tc0[i];
+ int i_delta;
+
+ if( FFABS( p2 - p0 ) < beta ) {
+ if(tc0[i])
+ pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] );
+ tc++;
+ }
+ if( FFABS( q2 - q0 ) < beta ) {
+ if(tc0[i])
+ pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] );
+ tc++;
+ }
+
+ i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
+ pix[-xstride] = av_clip_uint8( p0 + i_delta ); /* p0' */
+ pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */
+ }
+ pix += ystride;
+ }
+ }
+}
+static void h264_v_loop_filter_luma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
+{
+ h264_loop_filter_luma_c(pix, stride, 1, alpha, beta, tc0);
+}
+static void h264_h_loop_filter_luma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
+{
+ h264_loop_filter_luma_c(pix, 1, stride, alpha, beta, tc0);
+}
+
+static av_always_inline av_flatten void h264_loop_filter_luma_intra_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta)
+{
+ int d;
+ for( d = 0; d < 16; d++ ) {
+ const int p2 = pix[-3*xstride];
+ const int p1 = pix[-2*xstride];
+ const int p0 = pix[-1*xstride];
+
+ const int q0 = pix[ 0*xstride];
+ const int q1 = pix[ 1*xstride];
+ const int q2 = pix[ 2*xstride];
+
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
+
+ if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
+ if( FFABS( p2 - p0 ) < beta)
+ {
+ const int p3 = pix[-4*xstride];
+ /* p0', p1', p2' */
+ pix[-1*xstride] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3;
+ pix[-2*xstride] = ( p2 + p1 + p0 + q0 + 2 ) >> 2;
+ pix[-3*xstride] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3;
+ } else {
+ /* p0' */
+ pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
+ }
+ if( FFABS( q2 - q0 ) < beta)
+ {
+ const int q3 = pix[3*xstride];
+ /* q0', q1', q2' */
+ pix[0*xstride] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3;
+ pix[1*xstride] = ( p0 + q0 + q1 + q2 + 2 ) >> 2;
+ pix[2*xstride] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3;
+ } else {
+ /* q0' */
+ pix[0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
+ }
+ }else{
+ /* p0', q0' */
+ pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
+ pix[ 0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
+ }
+ }
+ pix += ystride;
+ }
+}
+static void h264_v_loop_filter_luma_intra_c(uint8_t *pix, int stride, int alpha, int beta)
+{
+ h264_loop_filter_luma_intra_c(pix, stride, 1, alpha, beta);
+}
+static void h264_h_loop_filter_luma_intra_c(uint8_t *pix, int stride, int alpha, int beta)
+{
+ h264_loop_filter_luma_intra_c(pix, 1, stride, alpha, beta);
+}
+
+static av_always_inline av_flatten void h264_loop_filter_chroma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0)
+{
+ int i, d;
+ for( i = 0; i < 4; i++ ) {
+ const int tc = tc0[i];
+ if( tc <= 0 ) {
+ pix += 2*ystride;
+ continue;
+ }
+ for( d = 0; d < 2; d++ ) {
+ const int p0 = pix[-1*xstride];
+ const int p1 = pix[-2*xstride];
+ const int q0 = pix[0];
+ const int q1 = pix[1*xstride];
+
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
+
+ int delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
+
+ pix[-xstride] = av_clip_uint8( p0 + delta ); /* p0' */
+ pix[0] = av_clip_uint8( q0 - delta ); /* q0' */
+ }
+ pix += ystride;
+ }
+ }
+}
+static void h264_v_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
+{
+ h264_loop_filter_chroma_c(pix, stride, 1, alpha, beta, tc0);
+}
+static void h264_h_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
+{
+ h264_loop_filter_chroma_c(pix, 1, stride, alpha, beta, tc0);
+}
+
+static av_always_inline av_flatten void h264_loop_filter_chroma_intra_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta)
+{
+ int d;
+ for( d = 0; d < 8; d++ ) {
+ const int p0 = pix[-1*xstride];
+ const int p1 = pix[-2*xstride];
+ const int q0 = pix[0];
+ const int q1 = pix[1*xstride];
+
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
+
+ pix[-xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */
+ pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */
+ }
+ pix += ystride;
+ }
+}
+static void h264_v_loop_filter_chroma_intra_c(uint8_t *pix, int stride, int alpha, int beta)
+{
+ h264_loop_filter_chroma_intra_c(pix, stride, 1, alpha, beta);
+}
+static void h264_h_loop_filter_chroma_intra_c(uint8_t *pix, int stride, int alpha, int beta)
+{
+ h264_loop_filter_chroma_intra_c(pix, 1, stride, alpha, beta);
+}
+
+static inline int pix_abs16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
+{
+ int s, i;
+
+ s = 0;
+ for(i=0;i<h;i++) {
+ s += abs(pix1[0] - pix2[0]);
+ s += abs(pix1[1] - pix2[1]);
+ s += abs(pix1[2] - pix2[2]);
+ s += abs(pix1[3] - pix2[3]);
+ s += abs(pix1[4] - pix2[4]);
+ s += abs(pix1[5] - pix2[5]);
+ s += abs(pix1[6] - pix2[6]);
+ s += abs(pix1[7] - pix2[7]);
+ s += abs(pix1[8] - pix2[8]);
+ s += abs(pix1[9] - pix2[9]);
+ s += abs(pix1[10] - pix2[10]);
+ s += abs(pix1[11] - pix2[11]);
+ s += abs(pix1[12] - pix2[12]);
+ s += abs(pix1[13] - pix2[13]);
+ s += abs(pix1[14] - pix2[14]);
+ s += abs(pix1[15] - pix2[15]);
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+ return s;
+}
+
+static int pix_abs16_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
+{
+ int s, i;
+
+ s = 0;
+ for(i=0;i<h;i++) {
+ s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
+ s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
+ s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
+ s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
+ s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
+ s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
+ s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
+ s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
+ s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
+ s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
+ s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
+ s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
+ s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
+ s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
+ s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
+ s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+ return s;
+}
+
+static int pix_abs16_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
+{
+ int s, i;
+ uint8_t *pix3 = pix2 + line_size;
+
+ s = 0;
+ for(i=0;i<h;i++) {
+ s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
+ s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
+ s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
+ s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
+ s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
+ s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
+ s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
+ s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
+ s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
+ s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
+ s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
+ s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
+ s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
+ s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
+ s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
+ s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
+ pix1 += line_size;
+ pix2 += line_size;
+ pix3 += line_size;
+ }
+ return s;
+}
+
+static int pix_abs16_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
+{
+ int s, i;
+ uint8_t *pix3 = pix2 + line_size;
+
+ s = 0;
+ for(i=0;i<h;i++) {
+ s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
+ s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
+ s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
+ s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
+ s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
+ s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
+ s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
+ s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
+ s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
+ s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
+ s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
+ s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
+ s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
+ s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
+ s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
+ s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
+ pix1 += line_size;
+ pix2 += line_size;
+ pix3 += line_size;
+ }
+ return s;
+}
+
+static inline int pix_abs8_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
+{
+ int s, i;
+
+ s = 0;
+ for(i=0;i<h;i++) {
+ s += abs(pix1[0] - pix2[0]);
+ s += abs(pix1[1] - pix2[1]);
+ s += abs(pix1[2] - pix2[2]);
+ s += abs(pix1[3] - pix2[3]);
+ s += abs(pix1[4] - pix2[4]);
+ s += abs(pix1[5] - pix2[5]);
+ s += abs(pix1[6] - pix2[6]);
+ s += abs(pix1[7] - pix2[7]);
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+ return s;
+}
+
+static int pix_abs8_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
+{
+ int s, i;
+
+ s = 0;
+ for(i=0;i<h;i++) {
+ s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
+ s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
+ s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
+ s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
+ s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
+ s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
+ s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
+ s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+ return s;
+}
+
+static int pix_abs8_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
+{
+ int s, i;
+ uint8_t *pix3 = pix2 + line_size;
+
+ s = 0;
+ for(i=0;i<h;i++) {
+ s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
+ s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
+ s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
+ s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
+ s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
+ s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
+ s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
+ s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
+ pix1 += line_size;
+ pix2 += line_size;
+ pix3 += line_size;
+ }
+ return s;
+}
+
+static int pix_abs8_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
+{
+ int s, i;
+ uint8_t *pix3 = pix2 + line_size;
+
+ s = 0;
+ for(i=0;i<h;i++) {
+ s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
+ s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
+ s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
+ s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
+ s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
+ s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
+ s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
+ s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
+ pix1 += line_size;
+ pix2 += line_size;
+ pix3 += line_size;
+ }
+ return s;
+}
+
+static int nsse16_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
+ MpegEncContext *c = v;
+ int score1=0;
+ int score2=0;
+ int x,y;
+
+ for(y=0; y<h; y++){
+ for(x=0; x<16; x++){
+ score1+= (s1[x ] - s2[x ])*(s1[x ] - s2[x ]);
+ }
+ if(y+1<h){
+ for(x=0; x<15; x++){
+ score2+= FFABS( s1[x ] - s1[x +stride]
+ - s1[x+1] + s1[x+1+stride])
+ -FFABS( s2[x ] - s2[x +stride]
+ - s2[x+1] + s2[x+1+stride]);
+ }
+ }
+ s1+= stride;
+ s2+= stride;
+ }
+
+ if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
+ else return score1 + FFABS(score2)*8;
+}
+
+static int nsse8_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
+ MpegEncContext *c = v;
+ int score1=0;
+ int score2=0;
+ int x,y;
+
+ for(y=0; y<h; y++){
+ for(x=0; x<8; x++){
+ score1+= (s1[x ] - s2[x ])*(s1[x ] - s2[x ]);
+ }
+ if(y+1<h){
+ for(x=0; x<7; x++){
+ score2+= FFABS( s1[x ] - s1[x +stride]
+ - s1[x+1] + s1[x+1+stride])
+ -FFABS( s2[x ] - s2[x +stride]
+ - s2[x+1] + s2[x+1+stride]);
+ }
+ }
+ s1+= stride;
+ s2+= stride;
+ }
+
+ if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
+ else return score1 + FFABS(score2)*8;
+}
+
+static int try_8x8basis_c(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
+ int i;
+ unsigned int sum=0;
+
+ for(i=0; i<8*8; i++){
+ int b= rem[i] + ((basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT));
+ int w= weight[i];
+ b>>= RECON_SHIFT;
+ assert(-512<b && b<512);
+
+ sum += (w*b)*(w*b)>>4;
+ }
+ return sum>>2;
+}
+
+static void add_8x8basis_c(int16_t rem[64], int16_t basis[64], int scale){
+ int i;
+
+ for(i=0; i<8*8; i++){
+ rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
+ }
+}
+
+/**
+ * permutes an 8x8 block.
+ * @param block the block which will be permuted according to the given permutation vector
+ * @param permutation the permutation vector
+ * @param last the last non zero coefficient in scantable order, used to speed the permutation up
+ * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
+ * (inverse) permutated to scantable order!
+ */
+void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last)
+{
+ int i;
+ DCTELEM temp[64];
+
+ if(last<=0) return;
+ //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
+
+ for(i=0; i<=last; i++){
+ const int j= scantable[i];
+ temp[j]= block[j];
+ block[j]=0;
+ }
+
+ for(i=0; i<=last; i++){
+ const int j= scantable[i];
+ const int perm_j= permutation[j];
+ block[perm_j]= temp[j];
+ }
+}
+
+static int zero_cmp(void *s, uint8_t *a, uint8_t *b, int stride, int h){
+ return 0;
+}
+
+void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
+ int i;
+
+ memset(cmp, 0, sizeof(void*)*6);
+
+ for(i=0; i<6; i++){
+ switch(type&0xFF){
+ case FF_CMP_SAD:
+ cmp[i]= c->sad[i];
+ break;
+ case FF_CMP_SATD:
+ cmp[i]= c->hadamard8_diff[i];
+ break;
+ case FF_CMP_SSE:
+ cmp[i]= c->sse[i];
+ break;
+ case FF_CMP_DCT:
+ cmp[i]= c->dct_sad[i];
+ break;
+ case FF_CMP_DCT264:
+ cmp[i]= c->dct264_sad[i];
+ break;
+ case FF_CMP_DCTMAX:
+ cmp[i]= c->dct_max[i];
+ break;
+ case FF_CMP_PSNR:
+ cmp[i]= c->quant_psnr[i];
+ break;
+ case FF_CMP_BIT:
+ cmp[i]= c->bit[i];
+ break;
+ case FF_CMP_RD:
+ cmp[i]= c->rd[i];
+ break;
+ case FF_CMP_VSAD:
+ cmp[i]= c->vsad[i];
+ break;
+ case FF_CMP_VSSE:
+ cmp[i]= c->vsse[i];
+ break;
+ case FF_CMP_ZERO:
+ cmp[i]= zero_cmp;
+ break;
+ case FF_CMP_NSSE:
+ cmp[i]= c->nsse[i];
+ break;
+#if CONFIG_SNOW_ENCODER
+ case FF_CMP_W53:
+ cmp[i]= c->w53[i];
+ break;
+ case FF_CMP_W97:
+ cmp[i]= c->w97[i];
+ break;
+#endif
+ default:
+ av_log(NULL, AV_LOG_ERROR,"internal error in cmp function selection\n");
+ }
+ }
+}
+
+static void clear_block_c(DCTELEM *block)
+{
+ memset(block, 0, sizeof(DCTELEM)*64);
+}
+
+/**
+ * memset(blocks, 0, sizeof(DCTELEM)*6*64)
+ */
+static void clear_blocks_c(DCTELEM *blocks)
+{
+ memset(blocks, 0, sizeof(DCTELEM)*6*64);
+}
+
+static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
+ long i;
+ for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
+ long a = *(long*)(src+i);
+ long b = *(long*)(dst+i);
+ *(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
+ }
+ for(; i<w; i++)
+ dst[i+0] += src[i+0];
+}
+
+static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
+ long i;
+ for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
+ long a = *(long*)(src1+i);
+ long b = *(long*)(src2+i);
+ *(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
+ }
+ for(; i<w; i++)
+ dst[i] = src1[i]+src2[i];
+}
+
+static void diff_bytes_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
+ long i;
+#if !HAVE_FAST_UNALIGNED
+ if((long)src2 & (sizeof(long)-1)){
+ for(i=0; i+7<w; i+=8){
+ dst[i+0] = src1[i+0]-src2[i+0];
+ dst[i+1] = src1[i+1]-src2[i+1];
+ dst[i+2] = src1[i+2]-src2[i+2];
+ dst[i+3] = src1[i+3]-src2[i+3];
+ dst[i+4] = src1[i+4]-src2[i+4];
+ dst[i+5] = src1[i+5]-src2[i+5];
+ dst[i+6] = src1[i+6]-src2[i+6];
+ dst[i+7] = src1[i+7]-src2[i+7];
+ }
+ }else
+#endif
+ for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
+ long a = *(long*)(src1+i);
+ long b = *(long*)(src2+i);
+ *(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
+ }
+ for(; i<w; i++)
+ dst[i+0] = src1[i+0]-src2[i+0];
+}
+
+static void add_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1, const uint8_t *diff, int w, int *left, int *left_top){
+ int i;
+ uint8_t l, lt;
+
+ l= *left;
+ lt= *left_top;
+
+ for(i=0; i<w; i++){
+ l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
+ lt= src1[i];
+ dst[i]= l;
+ }
+
+ *left= l;
+ *left_top= lt;
+}
+
+static void sub_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top){
+ int i;
+ uint8_t l, lt;
+
+ l= *left;
+ lt= *left_top;
+
+ for(i=0; i<w; i++){
+ const int pred= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF);
+ lt= src1[i];
+ l= src2[i];
+ dst[i]= l - pred;
+ }
+
+ *left= l;
+ *left_top= lt;
+}
+
+static int add_hfyu_left_prediction_c(uint8_t *dst, const uint8_t *src, int w, int acc){
+ int i;
+
+ for(i=0; i<w-1; i++){
+ acc+= src[i];
+ dst[i]= acc;
+ i++;
+ acc+= src[i];
+ dst[i]= acc;
+ }
+
+ for(; i<w; i++){
+ acc+= src[i];
+ dst[i]= acc;
+ }
+
+ return acc;
+}
+
+#if HAVE_BIGENDIAN
+#define B 3
+#define G 2
+#define R 1
+#define A 0
+#else
+#define B 0
+#define G 1
+#define R 2
+#define A 3
+#endif
+static void add_hfyu_left_prediction_bgr32_c(uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha){
+ int i;
+ int r,g,b,a;
+ r= *red;
+ g= *green;
+ b= *blue;
+ a= *alpha;
+
+ for(i=0; i<w; i++){
+ b+= src[4*i+B];
+ g+= src[4*i+G];
+ r+= src[4*i+R];
+ a+= src[4*i+A];
+
+ dst[4*i+B]= b;
+ dst[4*i+G]= g;
+ dst[4*i+R]= r;
+ dst[4*i+A]= a;
+ }
+
+ *red= r;
+ *green= g;
+ *blue= b;
+ *alpha= a;
+}
+#undef B
+#undef G
+#undef R
+#undef A
+
+#define BUTTERFLY2(o1,o2,i1,i2) \
+o1= (i1)+(i2);\
+o2= (i1)-(i2);
+
+#define BUTTERFLY1(x,y) \
+{\
+ int a,b;\
+ a= x;\
+ b= y;\
+ x= a+b;\
+ y= a-b;\
+}
+
+#define BUTTERFLYA(x,y) (FFABS((x)+(y)) + FFABS((x)-(y)))
+
+static int hadamard8_diff8x8_c(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
+ int i;
+ int temp[64];
+ int sum=0;
+
+ assert(h==8);
+
+ for(i=0; i<8; i++){
+ //FIXME try pointer walks
+ BUTTERFLY2(temp[8*i+0], temp[8*i+1], src[stride*i+0]-dst[stride*i+0],src[stride*i+1]-dst[stride*i+1]);
+ BUTTERFLY2(temp[8*i+2], temp[8*i+3], src[stride*i+2]-dst[stride*i+2],src[stride*i+3]-dst[stride*i+3]);
+ BUTTERFLY2(temp[8*i+4], temp[8*i+5], src[stride*i+4]-dst[stride*i+4],src[stride*i+5]-dst[stride*i+5]);
+ BUTTERFLY2(temp[8*i+6], temp[8*i+7], src[stride*i+6]-dst[stride*i+6],src[stride*i+7]-dst[stride*i+7]);
+
+ BUTTERFLY1(temp[8*i+0], temp[8*i+2]);
+ BUTTERFLY1(temp[8*i+1], temp[8*i+3]);
+ BUTTERFLY1(temp[8*i+4], temp[8*i+6]);
+ BUTTERFLY1(temp[8*i+5], temp[8*i+7]);
+
+ BUTTERFLY1(temp[8*i+0], temp[8*i+4]);
+ BUTTERFLY1(temp[8*i+1], temp[8*i+5]);
+ BUTTERFLY1(temp[8*i+2], temp[8*i+6]);
+ BUTTERFLY1(temp[8*i+3], temp[8*i+7]);
+ }
+
+ for(i=0; i<8; i++){
+ BUTTERFLY1(temp[8*0+i], temp[8*1+i]);
+ BUTTERFLY1(temp[8*2+i], temp[8*3+i]);
+ BUTTERFLY1(temp[8*4+i], temp[8*5+i]);
+ BUTTERFLY1(temp[8*6+i], temp[8*7+i]);
+
+ BUTTERFLY1(temp[8*0+i], temp[8*2+i]);
+ BUTTERFLY1(temp[8*1+i], temp[8*3+i]);
+ BUTTERFLY1(temp[8*4+i], temp[8*6+i]);
+ BUTTERFLY1(temp[8*5+i], temp[8*7+i]);
+
+ sum +=
+ BUTTERFLYA(temp[8*0+i], temp[8*4+i])
+ +BUTTERFLYA(temp[8*1+i], temp[8*5+i])
+ +BUTTERFLYA(temp[8*2+i], temp[8*6+i])
+ +BUTTERFLYA(temp[8*3+i], temp[8*7+i]);
+ }
+#if 0
+static int maxi=0;
+if(sum>maxi){
+ maxi=sum;
+ printf("MAX:%d\n", maxi);
+}
+#endif
+ return sum;
+}
+
+static int hadamard8_intra8x8_c(/*MpegEncContext*/ void *s, uint8_t *src, uint8_t *dummy, int stride, int h){
+ int i;
+ int temp[64];
+ int sum=0;
+
+ assert(h==8);
+
+ for(i=0; i<8; i++){
+ //FIXME try pointer walks
+ BUTTERFLY2(temp[8*i+0], temp[8*i+1], src[stride*i+0],src[stride*i+1]);
+ BUTTERFLY2(temp[8*i+2], temp[8*i+3], src[stride*i+2],src[stride*i+3]);
+ BUTTERFLY2(temp[8*i+4], temp[8*i+5], src[stride*i+4],src[stride*i+5]);
+ BUTTERFLY2(temp[8*i+6], temp[8*i+7], src[stride*i+6],src[stride*i+7]);
+
+ BUTTERFLY1(temp[8*i+0], temp[8*i+2]);
+ BUTTERFLY1(temp[8*i+1], temp[8*i+3]);
+ BUTTERFLY1(temp[8*i+4], temp[8*i+6]);
+ BUTTERFLY1(temp[8*i+5], temp[8*i+7]);
+
+ BUTTERFLY1(temp[8*i+0], temp[8*i+4]);
+ BUTTERFLY1(temp[8*i+1], temp[8*i+5]);
+ BUTTERFLY1(temp[8*i+2], temp[8*i+6]);
+ BUTTERFLY1(temp[8*i+3], temp[8*i+7]);
+ }
+
+ for(i=0; i<8; i++){
+ BUTTERFLY1(temp[8*0+i], temp[8*1+i]);
+ BUTTERFLY1(temp[8*2+i], temp[8*3+i]);
+ BUTTERFLY1(temp[8*4+i], temp[8*5+i]);
+ BUTTERFLY1(temp[8*6+i], temp[8*7+i]);
+
+ BUTTERFLY1(temp[8*0+i], temp[8*2+i]);
+ BUTTERFLY1(temp[8*1+i], temp[8*3+i]);
+ BUTTERFLY1(temp[8*4+i], temp[8*6+i]);
+ BUTTERFLY1(temp[8*5+i], temp[8*7+i]);
+
+ sum +=
+ BUTTERFLYA(temp[8*0+i], temp[8*4+i])
+ +BUTTERFLYA(temp[8*1+i], temp[8*5+i])
+ +BUTTERFLYA(temp[8*2+i], temp[8*6+i])
+ +BUTTERFLYA(temp[8*3+i], temp[8*7+i]);
+ }
+
+ sum -= FFABS(temp[8*0] + temp[8*4]); // -mean
+
+ return sum;
+}
+
+static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
+ MpegEncContext * const s= (MpegEncContext *)c;
+ DECLARE_ALIGNED_16(uint64_t, aligned_temp)[sizeof(DCTELEM)*64/8];
+ DCTELEM * const temp= (DCTELEM*)aligned_temp;
+
+ assert(h==8);
+
+ s->dsp.diff_pixels(temp, src1, src2, stride);
+ s->dsp.fdct(temp);
+ return s->dsp.sum_abs_dctelem(temp);
+}
+
+#if CONFIG_GPL
+#define DCT8_1D {\
+ const int s07 = SRC(0) + SRC(7);\
+ const int s16 = SRC(1) + SRC(6);\
+ const int s25 = SRC(2) + SRC(5);\
+ const int s34 = SRC(3) + SRC(4);\
+ const int a0 = s07 + s34;\
+ const int a1 = s16 + s25;\
+ const int a2 = s07 - s34;\
+ const int a3 = s16 - s25;\
+ const int d07 = SRC(0) - SRC(7);\
+ const int d16 = SRC(1) - SRC(6);\
+ const int d25 = SRC(2) - SRC(5);\
+ const int d34 = SRC(3) - SRC(4);\
+ const int a4 = d16 + d25 + (d07 + (d07>>1));\
+ const int a5 = d07 - d34 - (d25 + (d25>>1));\
+ const int a6 = d07 + d34 - (d16 + (d16>>1));\
+ const int a7 = d16 - d25 + (d34 + (d34>>1));\
+ DST(0, a0 + a1 ) ;\
+ DST(1, a4 + (a7>>2)) ;\
+ DST(2, a2 + (a3>>1)) ;\
+ DST(3, a5 + (a6>>2)) ;\
+ DST(4, a0 - a1 ) ;\
+ DST(5, a6 - (a5>>2)) ;\
+ DST(6, (a2>>1) - a3 ) ;\
+ DST(7, (a4>>2) - a7 ) ;\
+}
+
+static int dct264_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
+ MpegEncContext * const s= (MpegEncContext *)c;
+ DCTELEM dct[8][8];
+ int i;
+ int sum=0;
+
+ s->dsp.diff_pixels(dct[0], src1, src2, stride);
+
+#define SRC(x) dct[i][x]
+#define DST(x,v) dct[i][x]= v
+ for( i = 0; i < 8; i++ )
+ DCT8_1D
+#undef SRC
+#undef DST
+
+#define SRC(x) dct[x][i]
+#define DST(x,v) sum += FFABS(v)
+ for( i = 0; i < 8; i++ )
+ DCT8_1D
+#undef SRC
+#undef DST
+ return sum;
+}
+#endif
+
+static int dct_max8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
+ MpegEncContext * const s= (MpegEncContext *)c;
+ DECLARE_ALIGNED_16(uint64_t, aligned_temp)[sizeof(DCTELEM)*64/8];
+ DCTELEM * const temp= (DCTELEM*)aligned_temp;
+ int sum=0, i;
+
+ assert(h==8);
+
+ s->dsp.diff_pixels(temp, src1, src2, stride);
+ s->dsp.fdct(temp);
+
+ for(i=0; i<64; i++)
+ sum= FFMAX(sum, FFABS(temp[i]));
+
+ return sum;
+}
+
+static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
+ MpegEncContext * const s= (MpegEncContext *)c;
+ DECLARE_ALIGNED_16(uint64_t, aligned_temp)[sizeof(DCTELEM)*64*2/8];
+ DCTELEM * const temp= (DCTELEM*)aligned_temp;
+ DCTELEM * const bak = ((DCTELEM*)aligned_temp)+64;
+ int sum=0, i;
+
+ assert(h==8);
+ s->mb_intra=0;
+
+ s->dsp.diff_pixels(temp, src1, src2, stride);
+
+ memcpy(bak, temp, 64*sizeof(DCTELEM));
+
+ s->block_last_index[0/*FIXME*/]= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
+ s->dct_unquantize_inter(s, temp, 0, s->qscale);
+ ff_simple_idct(temp); //FIXME
+
+ for(i=0; i<64; i++)
+ sum+= (temp[i]-bak[i])*(temp[i]-bak[i]);
+
+ return sum;
+}
+
+static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
+ MpegEncContext * const s= (MpegEncContext *)c;
+ const uint8_t *scantable= s->intra_scantable.permutated;
+ DECLARE_ALIGNED_16(uint64_t, aligned_temp)[sizeof(DCTELEM)*64/8];
+ DECLARE_ALIGNED_16(uint64_t, aligned_src1)[8];
+ DECLARE_ALIGNED_16(uint64_t, aligned_src2)[8];
+ DCTELEM * const temp= (DCTELEM*)aligned_temp;
+ uint8_t * const lsrc1 = (uint8_t*)aligned_src1;
+ uint8_t * const lsrc2 = (uint8_t*)aligned_src2;
+ int i, last, run, bits, level, distortion, start_i;
+ const int esc_length= s->ac_esc_length;
+ uint8_t * length;
+ uint8_t * last_length;
+
+ assert(h==8);
+
+ copy_block8(lsrc1, src1, 8, stride, 8);
+ copy_block8(lsrc2, src2, 8, stride, 8);
+
+ s->dsp.diff_pixels(temp, lsrc1, lsrc2, 8);
+
+ s->block_last_index[0/*FIXME*/]= last= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
+
+ bits=0;
+
+ if (s->mb_intra) {
+ start_i = 1;
+ length = s->intra_ac_vlc_length;
+ last_length= s->intra_ac_vlc_last_length;
+ bits+= s->luma_dc_vlc_length[temp[0] + 256]; //FIXME chroma
+ } else {
+ start_i = 0;
+ length = s->inter_ac_vlc_length;
+ last_length= s->inter_ac_vlc_last_length;
+ }
+
+ if(last>=start_i){
+ run=0;
+ for(i=start_i; i<last; i++){
+ int j= scantable[i];
+ level= temp[j];
+
+ if(level){
+ level+=64;
+ if((level&(~127)) == 0){
+ bits+= length[UNI_AC_ENC_INDEX(run, level)];
+ }else
+ bits+= esc_length;
+ run=0;
+ }else
+ run++;
+ }
+ i= scantable[last];
+
+ level= temp[i] + 64;
+
+ assert(level - 64);
+
+ if((level&(~127)) == 0){
+ bits+= last_length[UNI_AC_ENC_INDEX(run, level)];
+ }else
+ bits+= esc_length;
+
+ }
+
+ if(last>=0){
+ if(s->mb_intra)
+ s->dct_unquantize_intra(s, temp, 0, s->qscale);
+ else
+ s->dct_unquantize_inter(s, temp, 0, s->qscale);
+ }
+
+ s->dsp.idct_add(lsrc2, 8, temp);
+
+ distortion= s->dsp.sse[1](NULL, lsrc2, lsrc1, 8, 8);
+
+ return distortion + ((bits*s->qscale*s->qscale*109 + 64)>>7);
+}
+
+static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
+ MpegEncContext * const s= (MpegEncContext *)c;
+ const uint8_t *scantable= s->intra_scantable.permutated;
+ DECLARE_ALIGNED_16(uint64_t, aligned_temp)[sizeof(DCTELEM)*64/8];
+ DCTELEM * const temp= (DCTELEM*)aligned_temp;
+ int i, last, run, bits, level, start_i;
+ const int esc_length= s->ac_esc_length;
+ uint8_t * length;
+ uint8_t * last_length;
+
+ assert(h==8);
+
+ s->dsp.diff_pixels(temp, src1, src2, stride);
+
+ s->block_last_index[0/*FIXME*/]= last= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
+
+ bits=0;
+
+ if (s->mb_intra) {
+ start_i = 1;
+ length = s->intra_ac_vlc_length;
+ last_length= s->intra_ac_vlc_last_length;
+ bits+= s->luma_dc_vlc_length[temp[0] + 256]; //FIXME chroma
+ } else {
+ start_i = 0;
+ length = s->inter_ac_vlc_length;
+ last_length= s->inter_ac_vlc_last_length;
+ }
+
+ if(last>=start_i){
+ run=0;
+ for(i=start_i; i<last; i++){
+ int j= scantable[i];
+ level= temp[j];
+
+ if(level){
+ level+=64;
+ if((level&(~127)) == 0){
+ bits+= length[UNI_AC_ENC_INDEX(run, level)];
+ }else
+ bits+= esc_length;
+ run=0;
+ }else
+ run++;
+ }
+ i= scantable[last];
+
+ level= temp[i] + 64;
+
+ assert(level - 64);
+
+ if((level&(~127)) == 0){
+ bits+= last_length[UNI_AC_ENC_INDEX(run, level)];
+ }else
+ bits+= esc_length;
+ }
+
+ return bits;
+}
+
+#define VSAD_INTRA(size) \
+static int vsad_intra##size##_c(/*MpegEncContext*/ void *c, uint8_t *s, uint8_t *dummy, int stride, int h){ \
+ int score=0; \
+ int x,y; \
+ \
+ for(y=1; y<h; y++){ \
+ for(x=0; x<size; x+=4){ \
+ score+= FFABS(s[x ] - s[x +stride]) + FFABS(s[x+1] - s[x+1+stride]) \
+ +FFABS(s[x+2] - s[x+2+stride]) + FFABS(s[x+3] - s[x+3+stride]); \
+ } \
+ s+= stride; \
+ } \
+ \
+ return score; \
+}
+VSAD_INTRA(8)
+VSAD_INTRA(16)
+
+static int vsad16_c(/*MpegEncContext*/ void *c, uint8_t *s1, uint8_t *s2, int stride, int h){
+ int score=0;
+ int x,y;
+
+ for(y=1; y<h; y++){
+ for(x=0; x<16; x++){
+ score+= FFABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
+ }
+ s1+= stride;
+ s2+= stride;
+ }
+
+ return score;
+}
+
+#define SQ(a) ((a)*(a))
+#define VSSE_INTRA(size) \
+static int vsse_intra##size##_c(/*MpegEncContext*/ void *c, uint8_t *s, uint8_t *dummy, int stride, int h){ \
+ int score=0; \
+ int x,y; \
+ \
+ for(y=1; y<h; y++){ \
+ for(x=0; x<size; x+=4){ \
+ score+= SQ(s[x ] - s[x +stride]) + SQ(s[x+1] - s[x+1+stride]) \
+ +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]); \
+ } \
+ s+= stride; \
+ } \
+ \
+ return score; \
+}
+VSSE_INTRA(8)
+VSSE_INTRA(16)
+
+static int vsse16_c(/*MpegEncContext*/ void *c, uint8_t *s1, uint8_t *s2, int stride, int h){
+ int score=0;
+ int x,y;
+
+ for(y=1; y<h; y++){
+ for(x=0; x<16; x++){
+ score+= SQ(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
+ }
+ s1+= stride;
+ s2+= stride;
+ }
+
+ return score;
+}
+
+static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
+ int size){
+ int score=0;
+ int i;
+ for(i=0; i<size; i++)
+ score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
+ return score;
+}
+
+WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
+WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
+WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
+#if CONFIG_GPL
+WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
+#endif
+WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
+WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
+WRAPPER8_16_SQ(rd8x8_c, rd16_c)
+WRAPPER8_16_SQ(bit8x8_c, bit16_c)
+
+static void vector_fmul_c(float *dst, const float *src, int len){
+ int i;
+ for(i=0; i<len; i++)
+ dst[i] *= src[i];
+}
+
+static void vector_fmul_reverse_c(float *dst, const float *src0, const float *src1, int len){
+ int i;
+ src1 += len-1;
+ for(i=0; i<len; i++)
+ dst[i] = src0[i] * src1[-i];
+}
+
+static void vector_fmul_add_c(float *dst, const float *src0, const float *src1, const float *src2, int len){
+ int i;
+ for(i=0; i<len; i++)
+ dst[i] = src0[i] * src1[i] + src2[i];
+}
+
+void ff_vector_fmul_window_c(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len){
+ int i,j;
+ dst += len;
+ win += len;
+ src0+= len;
+ for(i=-len, j=len-1; i<0; i++, j--) {
+ float s0 = src0[i];
+ float s1 = src1[j];
+ float wi = win[i];
+ float wj = win[j];
+ dst[i] = s0*wj - s1*wi + add_bias;
+ dst[j] = s0*wi + s1*wj + add_bias;
+ }
+}
+
+#if CONFIG_AAC_DECODER
+static void vector_fmul_scalar_c(float *dst, const float *src, float mul,
+ int len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ dst[i] = src[i] * mul;
+}
+
+static void vector_fmul_sv_scalar_2_c(float *dst, const float *src,
+ const float **sv, float mul, int len)
+{
+ int i;
+ for (i = 0; i < len; i += 2, sv++) {
+ dst[i ] = src[i ] * sv[0][0] * mul;
+ dst[i+1] = src[i+1] * sv[0][1] * mul;
+ }
+}
+
+static void vector_fmul_sv_scalar_4_c(float *dst, const float *src,
+ const float **sv, float mul, int len)
+{
+ int i;
+ for (i = 0; i < len; i += 4, sv++) {
+ dst[i ] = src[i ] * sv[0][0] * mul;
+ dst[i+1] = src[i+1] * sv[0][1] * mul;
+ dst[i+2] = src[i+2] * sv[0][2] * mul;
+ dst[i+3] = src[i+3] * sv[0][3] * mul;
+ }
+}
+
+static void sv_fmul_scalar_2_c(float *dst, const float **sv, float mul,
+ int len)
+{
+ int i;
+ for (i = 0; i < len; i += 2, sv++) {
+ dst[i ] = sv[0][0] * mul;
+ dst[i+1] = sv[0][1] * mul;
+ }
+}
+
+static void sv_fmul_scalar_4_c(float *dst, const float **sv, float mul,
+ int len)
+{
+ int i;
+ for (i = 0; i < len; i += 4, sv++) {
+ dst[i ] = sv[0][0] * mul;
+ dst[i+1] = sv[0][1] * mul;
+ dst[i+2] = sv[0][2] * mul;
+ dst[i+3] = sv[0][3] * mul;
+ }
+}
+#endif
+
+static void butterflies_float_c(float *restrict v1, float *restrict v2,
+ int len)
+{
+ int i;
+ for (i = 0; i < len; i++) {
+ float t = v1[i] - v2[i];
+ v1[i] += v2[i];
+ v2[i] = t;
+ }
+}
+
+static float scalarproduct_float_c(const float *v1, const float *v2, int len)
+{
+ float p = 0.0;
+ int i;
+
+ for (i = 0; i < len; i++)
+ p += v1[i] * v2[i];
+
+ return p;
+}
+
+static void int32_to_float_fmul_scalar_c(float *dst, const int *src, float mul, int len){
+ int i;
+ for(i=0; i<len; i++)
+ dst[i] = src[i] * mul;
+}
+
+static av_always_inline int float_to_int16_one(const float *src){
+ int_fast32_t tmp = *(const int32_t*)src;
+ if(tmp & 0xf0000){
+ tmp = (0x43c0ffff - tmp)>>31;
+ // is this faster on some gcc/cpu combinations?
+// if(tmp > 0x43c0ffff) tmp = 0xFFFF;
+// else tmp = 0;
+ }
+ return tmp - 0x8000;
+}
+
+void ff_float_to_int16_c(int16_t *dst, const float *src, long len){
+ int i;
+ for(i=0; i<len; i++)
+ dst[i] = float_to_int16_one(src+i);
+}
+
+void ff_float_to_int16_interleave_c(int16_t *dst, const float **src, long len, int channels){
+ int i,j,c;
+ if(channels==2){
+ for(i=0; i<len; i++){
+ dst[2*i] = float_to_int16_one(src[0]+i);
+ dst[2*i+1] = float_to_int16_one(src[1]+i);
+ }
+ }else{
+ for(c=0; c<channels; c++)
+ for(i=0, j=c; i<len; i++, j+=channels)
+ dst[j] = float_to_int16_one(src[c]+i);
+ }
+}
+
+#define W0 2048
+#define W1 2841 /* 2048*sqrt (2)*cos (1*pi/16) */
+#define W2 2676 /* 2048*sqrt (2)*cos (2*pi/16) */
+#define W3 2408 /* 2048*sqrt (2)*cos (3*pi/16) */
+#define W4 2048 /* 2048*sqrt (2)*cos (4*pi/16) */
+#define W5 1609 /* 2048*sqrt (2)*cos (5*pi/16) */
+#define W6 1108 /* 2048*sqrt (2)*cos (6*pi/16) */
+#define W7 565 /* 2048*sqrt (2)*cos (7*pi/16) */
+
+static void wmv2_idct_row(short * b)
+{
+ int s1,s2;
+ int a0,a1,a2,a3,a4,a5,a6,a7;
+ /*step 1*/
+ a1 = W1*b[1]+W7*b[7];
+ a7 = W7*b[1]-W1*b[7];
+ a5 = W5*b[5]+W3*b[3];
+ a3 = W3*b[5]-W5*b[3];
+ a2 = W2*b[2]+W6*b[6];
+ a6 = W6*b[2]-W2*b[6];
+ a0 = W0*b[0]+W0*b[4];
+ a4 = W0*b[0]-W0*b[4];
+ /*step 2*/
+ s1 = (181*(a1-a5+a7-a3)+128)>>8;//1,3,5,7,
+ s2 = (181*(a1-a5-a7+a3)+128)>>8;
+ /*step 3*/
+ b[0] = (a0+a2+a1+a5 + (1<<7))>>8;
+ b[1] = (a4+a6 +s1 + (1<<7))>>8;
+ b[2] = (a4-a6 +s2 + (1<<7))>>8;
+ b[3] = (a0-a2+a7+a3 + (1<<7))>>8;
+ b[4] = (a0-a2-a7-a3 + (1<<7))>>8;
+ b[5] = (a4-a6 -s2 + (1<<7))>>8;
+ b[6] = (a4+a6 -s1 + (1<<7))>>8;
+ b[7] = (a0+a2-a1-a5 + (1<<7))>>8;
+}
+static void wmv2_idct_col(short * b)
+{
+ int s1,s2;
+ int a0,a1,a2,a3,a4,a5,a6,a7;
+ /*step 1, with extended precision*/
+ a1 = (W1*b[8*1]+W7*b[8*7] + 4)>>3;
+ a7 = (W7*b[8*1]-W1*b[8*7] + 4)>>3;
+ a5 = (W5*b[8*5]+W3*b[8*3] + 4)>>3;
+ a3 = (W3*b[8*5]-W5*b[8*3] + 4)>>3;
+ a2 = (W2*b[8*2]+W6*b[8*6] + 4)>>3;
+ a6 = (W6*b[8*2]-W2*b[8*6] + 4)>>3;
+ a0 = (W0*b[8*0]+W0*b[8*4] )>>3;
+ a4 = (W0*b[8*0]-W0*b[8*4] )>>3;
+ /*step 2*/
+ s1 = (181*(a1-a5+a7-a3)+128)>>8;
+ s2 = (181*(a1-a5-a7+a3)+128)>>8;
+ /*step 3*/
+ b[8*0] = (a0+a2+a1+a5 + (1<<13))>>14;
+ b[8*1] = (a4+a6 +s1 + (1<<13))>>14;
+ b[8*2] = (a4-a6 +s2 + (1<<13))>>14;
+ b[8*3] = (a0-a2+a7+a3 + (1<<13))>>14;
+
+ b[8*4] = (a0-a2-a7-a3 + (1<<13))>>14;
+ b[8*5] = (a4-a6 -s2 + (1<<13))>>14;
+ b[8*6] = (a4+a6 -s1 + (1<<13))>>14;
+ b[8*7] = (a0+a2-a1-a5 + (1<<13))>>14;
+}
+void ff_wmv2_idct_c(short * block){
+ int i;
+
+ for(i=0;i<64;i+=8){
+ wmv2_idct_row(block+i);
+ }
+ for(i=0;i<8;i++){
+ wmv2_idct_col(block+i);
+ }
+}
+/* XXX: those functions should be suppressed ASAP when all IDCTs are
+ converted */
+static void ff_wmv2_idct_put_c(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_wmv2_idct_c(block);
+ put_pixels_clamped_c(block, dest, line_size);
+}
+static void ff_wmv2_idct_add_c(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_wmv2_idct_c(block);
+ add_pixels_clamped_c(block, dest, line_size);
+}
+static void ff_jref_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ j_rev_dct (block);
+ put_pixels_clamped_c(block, dest, line_size);
+}
+static void ff_jref_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ j_rev_dct (block);
+ add_pixels_clamped_c(block, dest, line_size);
+}
+
+static void ff_jref_idct4_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ j_rev_dct4 (block);
+ put_pixels_clamped4_c(block, dest, line_size);
+}
+static void ff_jref_idct4_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ j_rev_dct4 (block);
+ add_pixels_clamped4_c(block, dest, line_size);
+}
+
+static void ff_jref_idct2_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ j_rev_dct2 (block);
+ put_pixels_clamped2_c(block, dest, line_size);
+}
+static void ff_jref_idct2_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ j_rev_dct2 (block);
+ add_pixels_clamped2_c(block, dest, line_size);
+}
+
+static void ff_jref_idct1_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ dest[0] = cm[(block[0] + 4)>>3];
+}
+static void ff_jref_idct1_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ dest[0] = cm[dest[0] + ((block[0] + 4)>>3)];
+}
+
+static void just_return(void *mem av_unused, int stride av_unused, int h av_unused) { return; }
+
+/* init static data */
+av_cold void dsputil_static_init(void)
+{
+ int i;
+
+ for(i=0;i<256;i++) ff_cropTbl[i + MAX_NEG_CROP] = i;
+ for(i=0;i<MAX_NEG_CROP;i++) {
+ ff_cropTbl[i] = 0;
+ ff_cropTbl[i + MAX_NEG_CROP + 256] = 255;
+ }
+
+ for(i=0;i<512;i++) {
+ ff_squareTbl[i] = (i - 256) * (i - 256);
+ }
+
+ for(i=0; i<64; i++) inv_zigzag_direct16[ff_zigzag_direct[i]]= i+1;
+}
+
+int ff_check_alignment(void){
+ static int did_fail=0;
+ DECLARE_ALIGNED_16(int, aligned);
+
+ if((intptr_t)&aligned & 15){
+ if(!did_fail){
+#if HAVE_MMX
+ av_log(NULL, AV_LOG_ERROR,
+ "Compiler did not align stack variables. Libavcodec has been miscompiled\n"
+ "and may be very slow or crash. This is not a bug in libavcodec,\n"
+ "but in the compiler. You may try recompiling using gcc >= 4.2.\n"
+ "Do not report crashes to FFmpeg developers.\n");
+#endif
+ did_fail=1;
+ }
+ return -1;
+ }
+ return 0;
+}
+
+av_cold void attribute_align_arg dsputil_init(DSPContext* c, AVCodecContext *avctx)
+{
+ int i;
+
+ ff_check_alignment();
+
+#if CONFIG_ENCODERS
+ if(avctx->dct_algo==FF_DCT_FASTINT) {
+ c->fdct = fdct_ifast;
+ c->fdct248 = fdct_ifast248;
+ }
+ else if(avctx->dct_algo==FF_DCT_FAAN) {
+ c->fdct = ff_faandct;
+ c->fdct248 = ff_faandct248;
+ }
+ else {
+ c->fdct = ff_jpeg_fdct_islow; //slow/accurate/default
+ c->fdct248 = ff_fdct248_islow;
+ }
+#endif //CONFIG_ENCODERS
+
+ if(avctx->lowres==1){
+ if(avctx->idct_algo==FF_IDCT_INT || avctx->idct_algo==FF_IDCT_AUTO || !CONFIG_H264_DECODER){
+ c->idct_put= ff_jref_idct4_put;
+ c->idct_add= ff_jref_idct4_add;
+ }else{
+ c->idct_put= ff_h264_lowres_idct_put_c;
+ c->idct_add= ff_h264_lowres_idct_add_c;
+ }
+ c->idct = j_rev_dct4;
+ c->idct_permutation_type= FF_NO_IDCT_PERM;
+ }else if(avctx->lowres==2){
+ c->idct_put= ff_jref_idct2_put;
+ c->idct_add= ff_jref_idct2_add;
+ c->idct = j_rev_dct2;
+ c->idct_permutation_type= FF_NO_IDCT_PERM;
+ }else if(avctx->lowres==3){
+ c->idct_put= ff_jref_idct1_put;
+ c->idct_add= ff_jref_idct1_add;
+ c->idct = j_rev_dct1;
+ c->idct_permutation_type= FF_NO_IDCT_PERM;
+ }else{
+ if(avctx->idct_algo==FF_IDCT_INT){
+ c->idct_put= ff_jref_idct_put;
+ c->idct_add= ff_jref_idct_add;
+ c->idct = j_rev_dct;
+ c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
+ }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER ) &&
+ avctx->idct_algo==FF_IDCT_VP3){
+ c->idct_put= ff_vp3_idct_put_c;
+ c->idct_add= ff_vp3_idct_add_c;
+ c->idct = ff_vp3_idct_c;
+ c->idct_permutation_type= FF_NO_IDCT_PERM;
+ }else if(avctx->idct_algo==FF_IDCT_WMV2){
+ c->idct_put= ff_wmv2_idct_put_c;
+ c->idct_add= ff_wmv2_idct_add_c;
+ c->idct = ff_wmv2_idct_c;
+ c->idct_permutation_type= FF_NO_IDCT_PERM;
+ }else if(avctx->idct_algo==FF_IDCT_FAAN){
+ c->idct_put= ff_faanidct_put;
+ c->idct_add= ff_faanidct_add;
+ c->idct = ff_faanidct;
+ c->idct_permutation_type= FF_NO_IDCT_PERM;
+ }else{ //accurate/default
+ c->idct_put= ff_simple_idct_put;
+ c->idct_add= ff_simple_idct_add;
+ c->idct = ff_simple_idct;
+ c->idct_permutation_type= FF_NO_IDCT_PERM;
+ }
+ }
+
+ if (CONFIG_H264_DECODER) {
+ c->h264_idct_add= ff_h264_idct_add_c;
+ c->h264_idct8_add= ff_h264_idct8_add_c;
+ c->h264_idct_dc_add= ff_h264_idct_dc_add_c;
+ c->h264_idct8_dc_add= ff_h264_idct8_dc_add_c;
+ c->h264_idct_add16 = ff_h264_idct_add16_c;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_c;
+ c->h264_idct_add8 = ff_h264_idct_add8_c;
+ c->h264_idct_add16intra= ff_h264_idct_add16intra_c;
+ }
+
+ c->get_pixels = get_pixels_c;
+ c->diff_pixels = diff_pixels_c;
+ c->put_pixels_clamped = put_pixels_clamped_c;
+ c->put_signed_pixels_clamped = put_signed_pixels_clamped_c;
+ c->add_pixels_clamped = add_pixels_clamped_c;
+ c->add_pixels8 = add_pixels8_c;
+ c->add_pixels4 = add_pixels4_c;
+ c->sum_abs_dctelem = sum_abs_dctelem_c;
+ c->gmc1 = gmc1_c;
+ c->gmc = ff_gmc_c;
+ c->clear_block = clear_block_c;
+ c->clear_blocks = clear_blocks_c;
+ c->pix_sum = pix_sum_c;
+ c->pix_norm1 = pix_norm1_c;
+
+ /* TODO [0] 16 [1] 8 */
+ c->pix_abs[0][0] = pix_abs16_c;
+ c->pix_abs[0][1] = pix_abs16_x2_c;
+ c->pix_abs[0][2] = pix_abs16_y2_c;
+ c->pix_abs[0][3] = pix_abs16_xy2_c;
+ c->pix_abs[1][0] = pix_abs8_c;
+ c->pix_abs[1][1] = pix_abs8_x2_c;
+ c->pix_abs[1][2] = pix_abs8_y2_c;
+ c->pix_abs[1][3] = pix_abs8_xy2_c;
+
+#define dspfunc(PFX, IDX, NUM) \
+ c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## NUM ## _c; \
+ c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## NUM ## _x2_c; \
+ c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## NUM ## _y2_c; \
+ c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## NUM ## _xy2_c
+
+ dspfunc(put, 0, 16);
+ dspfunc(put_no_rnd, 0, 16);
+ dspfunc(put, 1, 8);
+ dspfunc(put_no_rnd, 1, 8);
+ dspfunc(put, 2, 4);
+ dspfunc(put, 3, 2);
+
+ dspfunc(avg, 0, 16);
+ dspfunc(avg_no_rnd, 0, 16);
+ dspfunc(avg, 1, 8);
+ dspfunc(avg_no_rnd, 1, 8);
+ dspfunc(avg, 2, 4);
+ dspfunc(avg, 3, 2);
+#undef dspfunc
+
+ c->put_no_rnd_pixels_l2[0]= put_no_rnd_pixels16_l2_c;
+ c->put_no_rnd_pixels_l2[1]= put_no_rnd_pixels8_l2_c;
+
+ c->put_tpel_pixels_tab[ 0] = put_tpel_pixels_mc00_c;
+ c->put_tpel_pixels_tab[ 1] = put_tpel_pixels_mc10_c;
+ c->put_tpel_pixels_tab[ 2] = put_tpel_pixels_mc20_c;
+ c->put_tpel_pixels_tab[ 4] = put_tpel_pixels_mc01_c;
+ c->put_tpel_pixels_tab[ 5] = put_tpel_pixels_mc11_c;
+ c->put_tpel_pixels_tab[ 6] = put_tpel_pixels_mc21_c;
+ c->put_tpel_pixels_tab[ 8] = put_tpel_pixels_mc02_c;
+ c->put_tpel_pixels_tab[ 9] = put_tpel_pixels_mc12_c;
+ c->put_tpel_pixels_tab[10] = put_tpel_pixels_mc22_c;
+
+ c->avg_tpel_pixels_tab[ 0] = avg_tpel_pixels_mc00_c;
+ c->avg_tpel_pixels_tab[ 1] = avg_tpel_pixels_mc10_c;
+ c->avg_tpel_pixels_tab[ 2] = avg_tpel_pixels_mc20_c;
+ c->avg_tpel_pixels_tab[ 4] = avg_tpel_pixels_mc01_c;
+ c->avg_tpel_pixels_tab[ 5] = avg_tpel_pixels_mc11_c;
+ c->avg_tpel_pixels_tab[ 6] = avg_tpel_pixels_mc21_c;
+ c->avg_tpel_pixels_tab[ 8] = avg_tpel_pixels_mc02_c;
+ c->avg_tpel_pixels_tab[ 9] = avg_tpel_pixels_mc12_c;
+ c->avg_tpel_pixels_tab[10] = avg_tpel_pixels_mc22_c;
+
+#define dspfunc(PFX, IDX, NUM) \
+ c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \
+ c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \
+ c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \
+ c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \
+ c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \
+ c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \
+ c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \
+ c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \
+ c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \
+ c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \
+ c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
+ c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
+ c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
+ c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
+ c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
+ c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
+
+ dspfunc(put_qpel, 0, 16);
+ dspfunc(put_no_rnd_qpel, 0, 16);
+
+ dspfunc(avg_qpel, 0, 16);
+ /* dspfunc(avg_no_rnd_qpel, 0, 16); */
+
+ dspfunc(put_qpel, 1, 8);
+ dspfunc(put_no_rnd_qpel, 1, 8);
+
+ dspfunc(avg_qpel, 1, 8);
+ /* dspfunc(avg_no_rnd_qpel, 1, 8); */
+
+ dspfunc(put_h264_qpel, 0, 16);
+ dspfunc(put_h264_qpel, 1, 8);
+ dspfunc(put_h264_qpel, 2, 4);
+ dspfunc(put_h264_qpel, 3, 2);
+ dspfunc(avg_h264_qpel, 0, 16);
+ dspfunc(avg_h264_qpel, 1, 8);
+ dspfunc(avg_h264_qpel, 2, 4);
+
+#undef dspfunc
+ c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_c;
+ c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_c;
+ c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_c;
+ c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_c;
+ c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_c;
+ c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_c;
+ c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_no_rnd_vc1_chroma_mc8_c;
+ c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_no_rnd_vc1_chroma_mc8_c;
+
+ c->weight_h264_pixels_tab[0]= weight_h264_pixels16x16_c;
+ c->weight_h264_pixels_tab[1]= weight_h264_pixels16x8_c;
+ c->weight_h264_pixels_tab[2]= weight_h264_pixels8x16_c;
+ c->weight_h264_pixels_tab[3]= weight_h264_pixels8x8_c;
+ c->weight_h264_pixels_tab[4]= weight_h264_pixels8x4_c;
+ c->weight_h264_pixels_tab[5]= weight_h264_pixels4x8_c;
+ c->weight_h264_pixels_tab[6]= weight_h264_pixels4x4_c;
+ c->weight_h264_pixels_tab[7]= weight_h264_pixels4x2_c;
+ c->weight_h264_pixels_tab[8]= weight_h264_pixels2x4_c;
+ c->weight_h264_pixels_tab[9]= weight_h264_pixels2x2_c;
+ c->biweight_h264_pixels_tab[0]= biweight_h264_pixels16x16_c;
+ c->biweight_h264_pixels_tab[1]= biweight_h264_pixels16x8_c;
+ c->biweight_h264_pixels_tab[2]= biweight_h264_pixels8x16_c;
+ c->biweight_h264_pixels_tab[3]= biweight_h264_pixels8x8_c;
+ c->biweight_h264_pixels_tab[4]= biweight_h264_pixels8x4_c;
+ c->biweight_h264_pixels_tab[5]= biweight_h264_pixels4x8_c;
+ c->biweight_h264_pixels_tab[6]= biweight_h264_pixels4x4_c;
+ c->biweight_h264_pixels_tab[7]= biweight_h264_pixels4x2_c;
+ c->biweight_h264_pixels_tab[8]= biweight_h264_pixels2x4_c;
+ c->biweight_h264_pixels_tab[9]= biweight_h264_pixels2x2_c;
+
+ c->draw_edges = draw_edges_c;
+
+#if CONFIG_CAVS_DECODER
+ ff_cavsdsp_init(c,avctx);
+#endif
+
+#if CONFIG_MLP_DECODER || CONFIG_TRUEHD_DECODER
+ ff_mlp_init(c, avctx);
+#endif
+#if CONFIG_VC1_DECODER
+ ff_vc1dsp_init(c,avctx);
+#endif
+#if CONFIG_WMV2_DECODER || CONFIG_VC1_DECODER
+ ff_intrax8dsp_init(c,avctx);
+#endif
+#if CONFIG_RV30_DECODER
+ ff_rv30dsp_init(c,avctx);
+#endif
+#if CONFIG_RV40_DECODER
+ ff_rv40dsp_init(c,avctx);
+ c->put_rv40_qpel_pixels_tab[0][15] = put_rv40_qpel16_mc33_c;
+ c->avg_rv40_qpel_pixels_tab[0][15] = avg_rv40_qpel16_mc33_c;
+ c->put_rv40_qpel_pixels_tab[1][15] = put_rv40_qpel8_mc33_c;
+ c->avg_rv40_qpel_pixels_tab[1][15] = avg_rv40_qpel8_mc33_c;
+#endif
+
+ c->put_mspel_pixels_tab[0]= put_mspel8_mc00_c;
+ c->put_mspel_pixels_tab[1]= put_mspel8_mc10_c;
+ c->put_mspel_pixels_tab[2]= put_mspel8_mc20_c;
+ c->put_mspel_pixels_tab[3]= put_mspel8_mc30_c;
+ c->put_mspel_pixels_tab[4]= put_mspel8_mc02_c;
+ c->put_mspel_pixels_tab[5]= put_mspel8_mc12_c;
+ c->put_mspel_pixels_tab[6]= put_mspel8_mc22_c;
+ c->put_mspel_pixels_tab[7]= put_mspel8_mc32_c;
+
+#define SET_CMP_FUNC(name) \
+ c->name[0]= name ## 16_c;\
+ c->name[1]= name ## 8x8_c;
+
+ SET_CMP_FUNC(hadamard8_diff)
+ c->hadamard8_diff[4]= hadamard8_intra16_c;
+ c->hadamard8_diff[5]= hadamard8_intra8x8_c;
+ SET_CMP_FUNC(dct_sad)
+ SET_CMP_FUNC(dct_max)
+#if CONFIG_GPL
+ SET_CMP_FUNC(dct264_sad)
+#endif
+ c->sad[0]= pix_abs16_c;
+ c->sad[1]= pix_abs8_c;
+ c->sse[0]= sse16_c;
+ c->sse[1]= sse8_c;
+ c->sse[2]= sse4_c;
+ SET_CMP_FUNC(quant_psnr)
+ SET_CMP_FUNC(rd)
+ SET_CMP_FUNC(bit)
+ c->vsad[0]= vsad16_c;
+ c->vsad[4]= vsad_intra16_c;
+ c->vsad[5]= vsad_intra8_c;
+ c->vsse[0]= vsse16_c;
+ c->vsse[4]= vsse_intra16_c;
+ c->vsse[5]= vsse_intra8_c;
+ c->nsse[0]= nsse16_c;
+ c->nsse[1]= nsse8_c;
+#if CONFIG_SNOW_ENCODER
+ c->w53[0]= w53_16_c;
+ c->w53[1]= w53_8_c;
+ c->w97[0]= w97_16_c;
+ c->w97[1]= w97_8_c;
+#endif
+
+ c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
+
+ c->add_bytes= add_bytes_c;
+ c->add_bytes_l2= add_bytes_l2_c;
+ c->diff_bytes= diff_bytes_c;
+ c->add_hfyu_median_prediction= add_hfyu_median_prediction_c;
+ c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_c;
+ c->add_hfyu_left_prediction = add_hfyu_left_prediction_c;
+ c->add_hfyu_left_prediction_bgr32 = add_hfyu_left_prediction_bgr32_c;
+ c->bswap_buf= bswap_buf;
+#if CONFIG_PNG_DECODER
+ c->add_png_paeth_prediction= ff_add_png_paeth_prediction;
+#endif
+
+ c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_c;
+ c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_c;
+ c->h264_v_loop_filter_luma_intra= h264_v_loop_filter_luma_intra_c;
+ c->h264_h_loop_filter_luma_intra= h264_h_loop_filter_luma_intra_c;
+ c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_c;
+ c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_c;
+ c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_c;
+ c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_c;
+ c->h264_loop_filter_strength= NULL;
+
+ if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
+ c->h263_h_loop_filter= h263_h_loop_filter_c;
+ c->h263_v_loop_filter= h263_v_loop_filter_c;
+ }
+
+ if (CONFIG_VP3_DECODER) {
+ c->vp3_h_loop_filter= ff_vp3_h_loop_filter_c;
+ c->vp3_v_loop_filter= ff_vp3_v_loop_filter_c;
+ }
+ if (CONFIG_VP6_DECODER) {
+ c->vp6_filter_diag4= ff_vp6_filter_diag4_c;
+ }
+
+ c->h261_loop_filter= h261_loop_filter_c;
+
+ c->try_8x8basis= try_8x8basis_c;
+ c->add_8x8basis= add_8x8basis_c;
+
+#if CONFIG_VORBIS_DECODER
+ c->vorbis_inverse_coupling = vorbis_inverse_coupling;
+#endif
+#if CONFIG_AC3_DECODER
+ c->ac3_downmix = ff_ac3_downmix_c;
+#endif
+ c->vector_fmul = vector_fmul_c;
+ c->vector_fmul_reverse = vector_fmul_reverse_c;
+ c->vector_fmul_add = vector_fmul_add_c;
+ c->vector_fmul_window = ff_vector_fmul_window_c;
+ c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_c;
+ c->float_to_int16 = ff_float_to_int16_c;
+ c->float_to_int16_interleave = ff_float_to_int16_interleave_c;
+#if CONFIG_AAC_DECODER
+ c->scalarproduct_float = scalarproduct_float_c;
+#endif
+ c->butterflies_float = butterflies_float_c;
+#if CONFIG_AAC_DECODER
+ c->vector_fmul_scalar = vector_fmul_scalar_c;
+
+ c->vector_fmul_sv_scalar[0] = vector_fmul_sv_scalar_2_c;
+ c->vector_fmul_sv_scalar[1] = vector_fmul_sv_scalar_4_c;
+
+ c->sv_fmul_scalar[0] = sv_fmul_scalar_2_c;
+ c->sv_fmul_scalar[1] = sv_fmul_scalar_4_c;
+#endif
+
+ c->shrink[0]= ff_img_copy_plane;
+ c->shrink[1]= ff_shrink22;
+ c->shrink[2]= ff_shrink44;
+ c->shrink[3]= ff_shrink88;
+
+ c->prefetch= just_return;
+
+ memset(c->put_2tap_qpel_pixels_tab, 0, sizeof(c->put_2tap_qpel_pixels_tab));
+ memset(c->avg_2tap_qpel_pixels_tab, 0, sizeof(c->avg_2tap_qpel_pixels_tab));
+
+ #if HAVE_MMX
+ dsputil_init_mmx(c, avctx);
+ #endif
+
+ for(i=0; i<64; i++){
+ if(!c->put_2tap_qpel_pixels_tab[0][i])
+ c->put_2tap_qpel_pixels_tab[0][i]= c->put_h264_qpel_pixels_tab[0][i];
+ if(!c->avg_2tap_qpel_pixels_tab[0][i])
+ c->avg_2tap_qpel_pixels_tab[0][i]= c->avg_h264_qpel_pixels_tab[0][i];
+ }
+
+ switch(c->idct_permutation_type){
+ case FF_NO_IDCT_PERM:
+ for(i=0; i<64; i++)
+ c->idct_permutation[i]= i;
+ break;
+ case FF_LIBMPEG2_IDCT_PERM:
+ for(i=0; i<64; i++)
+ c->idct_permutation[i]= (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
+ break;
+ case FF_SIMPLE_IDCT_PERM:
+ for(i=0; i<64; i++)
+ c->idct_permutation[i]= simple_mmx_permutation[i];
+ break;
+ case FF_TRANSPOSE_IDCT_PERM:
+ for(i=0; i<64; i++)
+ c->idct_permutation[i]= ((i&7)<<3) | (i>>3);
+ break;
+ case FF_PARTTRANS_IDCT_PERM:
+ for(i=0; i<64; i++)
+ c->idct_permutation[i]= (i&0x24) | ((i&3)<<3) | ((i>>3)&3);
+ break;
+ case FF_SSE2_IDCT_PERM:
+ for(i=0; i<64; i++)
+ c->idct_permutation[i]= (i&0x38) | idct_sse2_row_perm[i&7];
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Internal error, IDCT permutation not set\n");
+ }
+}
+
+// avcodec_get_current_idct,avcodec_get_encoder_info by h.yamagata
+// It's caller's responsibility to check avctx->priv_data is MpegEncContext*.
+const char* avcodec_get_current_idct(AVCodecContext *avctx)
+{
+ MpegEncContext *s = avctx->priv_data;
+ DSPContext *c = &s->dsp;
+
+ if (c->idct_put==ff_jref_idct_put)
+ return "Integer (ff_jref_idct)";
+ if (c->idct_put==ff_jref_idct1_put)
+ return "Integer (ff_jref_idct1)";
+ if (c->idct_put==ff_jref_idct1_put)
+ return "Integer (ff_jref_idct2)";
+ if (c->idct_put==ff_jref_idct4_put)
+ return "Integer (ff_jref_idct4)";
+ if (c->idct_put==ff_h264_lowres_idct_put_c)
+ return "H.264 (ff_h264_lowres_idct_c)";
+ if (c->idct_put==ff_vp3_idct_put_c)
+ return "VP3 (ff_vp3_idct_c)";
+ if (c->idct_put==ff_faanidct_put)
+ return "FAAN (ff_faanidct_put)";
+ if (c->idct_put==ff_simple_idct_put)
+ return "Simple IDCT (simple_idct)";
+#if HAVE_MMX
+ return avcodec_get_current_idct_mmx(avctx,c);
+#else
+ return "";
+#endif
+}
+
+// It's caller's responsibility to check avctx->priv_data is MpegEncContext*.
+void avcodec_get_encoder_info(AVCodecContext *avctx,int *xvid_build,int *divx_version,int *divx_build,int *lavc_build)
+{
+ MpegEncContext *s = avctx->priv_data;
+ *xvid_build = s->xvid_build;
+ *divx_version = s->divx_version;
+ *divx_build = s->divx_build;
+ *lavc_build = s->lavc_build;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/dsputil.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/dsputil.h
new file mode 100644
index 000000000..3786102dd
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/dsputil.h
@@ -0,0 +1,975 @@
+/*
+ * DSP utils
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/dsputil.h
+ * DSP utils.
+ * note, many functions in here may use MMX which trashes the FPU state, it is
+ * absolutely necessary to call emms_c() between dsp & float/double code
+ */
+
+#ifndef AVCODEC_DSPUTIL_H
+#define AVCODEC_DSPUTIL_H
+
+#include "libavutil/intreadwrite.h"
+#include "avcodec.h"
+
+
+//#define DEBUG
+/* dct code */
+typedef short DCTELEM;
+typedef int DWTELEM;
+typedef short IDWTELEM;
+
+void fdct_ifast (DCTELEM *data);
+void fdct_ifast248 (DCTELEM *data);
+void ff_jpeg_fdct_islow (DCTELEM *data);
+void ff_fdct248_islow (DCTELEM *data);
+
+void j_rev_dct (DCTELEM *data);
+void j_rev_dct4 (DCTELEM *data);
+void j_rev_dct2 (DCTELEM *data);
+void j_rev_dct1 (DCTELEM *data);
+void ff_wmv2_idct_c(DCTELEM *data);
+
+void ff_fdct_mmx(DCTELEM *block);
+void ff_fdct_mmx2(DCTELEM *block);
+void ff_fdct_sse2(DCTELEM *block);
+
+void ff_h264_idct8_add_c(uint8_t *dst, DCTELEM *block, int stride);
+void ff_h264_idct_add_c(uint8_t *dst, DCTELEM *block, int stride);
+void ff_h264_idct8_dc_add_c(uint8_t *dst, DCTELEM *block, int stride);
+void ff_h264_idct_dc_add_c(uint8_t *dst, DCTELEM *block, int stride);
+void ff_h264_lowres_idct_add_c(uint8_t *dst, int stride, DCTELEM *block);
+void ff_h264_lowres_idct_put_c(uint8_t *dst, int stride, DCTELEM *block);
+void ff_h264_idct_add16_c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
+void ff_h264_idct_add16intra_c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
+void ff_h264_idct8_add4_c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
+void ff_h264_idct_add8_c(uint8_t **dest, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
+
+void ff_vector_fmul_window_c(float *dst, const float *src0, const float *src1,
+ const float *win, float add_bias, int len);
+void ff_float_to_int16_c(int16_t *dst, const float *src, long len);
+void ff_float_to_int16_interleave_c(int16_t *dst, const float **src, long len, int channels);
+
+/* encoding scans */
+extern const uint8_t ff_alternate_horizontal_scan[64];
+extern const uint8_t ff_alternate_vertical_scan[64];
+extern const uint8_t ff_zigzag_direct[64];
+extern const uint8_t ff_zigzag248_direct[64];
+
+/* pixel operations */
+#define MAX_NEG_CROP 1024
+
+/* temporary */
+extern uint32_t ff_squareTbl[512];
+extern uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP];
+
+/* VP3 DSP functions */
+void ff_vp3_idct_c(DCTELEM *block/* align 16*/);
+void ff_vp3_idct_put_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
+void ff_vp3_idct_add_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
+
+void ff_vp3_v_loop_filter_c(uint8_t *src, int stride, int *bounding_values);
+void ff_vp3_h_loop_filter_c(uint8_t *src, int stride, int *bounding_values);
+
+/* VP6 DSP functions */
+void ff_vp6_filter_diag4_c(uint8_t *dst, uint8_t *src, int stride,
+ const int16_t *h_weights, const int16_t *v_weights);
+
+/* 1/2^n downscaling functions from imgconvert.c */
+void ff_img_copy_plane(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
+void ff_shrink22(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
+void ff_shrink44(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
+void ff_shrink88(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
+
+void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
+ int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height);
+
+/* minimum alignment rules ;)
+If you notice errors in the align stuff, need more alignment for some ASM code
+for some CPU or need to use a function with less aligned data then send a mail
+to the ffmpeg-devel mailing list, ...
+
+!warning These alignments might not match reality, (missing attribute((align))
+stuff somewhere possible).
+I (Michael) did not check them, these are just the alignments which I think
+could be reached easily ...
+
+!future video codecs might need functions with less strict alignment
+*/
+
+/*
+void get_pixels_c(DCTELEM *block, const uint8_t *pixels, int line_size);
+void diff_pixels_c(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride);
+void put_pixels_clamped_c(const DCTELEM *block, uint8_t *pixels, int line_size);
+void add_pixels_clamped_c(const DCTELEM *block, uint8_t *pixels, int line_size);
+void clear_blocks_c(DCTELEM *blocks);
+*/
+
+/* add and put pixel (decoding) */
+// blocksizes for op_pixels_func are 8x4,8x8 16x8 16x16
+//h for op_pixels_func is limited to {width/2, width} but never larger than 16 and never smaller then 4
+typedef void (*op_pixels_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int h);
+typedef void (*tpel_mc_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int w, int h);
+typedef void (*qpel_mc_func)(uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);
+typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y);
+typedef void (*h264_weight_func)(uint8_t *block, int stride, int log2_denom, int weight, int offset);
+typedef void (*h264_biweight_func)(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset);
+
+#define DEF_OLD_QPEL(name)\
+void ff_put_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);\
+void ff_put_no_rnd_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);\
+void ff_avg_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);
+
+DEF_OLD_QPEL(qpel16_mc11_old_c)
+DEF_OLD_QPEL(qpel16_mc31_old_c)
+DEF_OLD_QPEL(qpel16_mc12_old_c)
+DEF_OLD_QPEL(qpel16_mc32_old_c)
+DEF_OLD_QPEL(qpel16_mc13_old_c)
+DEF_OLD_QPEL(qpel16_mc33_old_c)
+DEF_OLD_QPEL(qpel8_mc11_old_c)
+DEF_OLD_QPEL(qpel8_mc31_old_c)
+DEF_OLD_QPEL(qpel8_mc12_old_c)
+DEF_OLD_QPEL(qpel8_mc32_old_c)
+DEF_OLD_QPEL(qpel8_mc13_old_c)
+DEF_OLD_QPEL(qpel8_mc33_old_c)
+
+#define CALL_2X_PIXELS(a, b, n)\
+static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ b(block , pixels , line_size, h);\
+ b(block+n, pixels+n, line_size, h);\
+}
+
+/* motion estimation */
+// h is limited to {width/2, width, 2*width} but never larger than 16 and never smaller then 2
+// although currently h<4 is not used as functions with width <8 are neither used nor implemented
+typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size, int h)/* __attribute__ ((const))*/;
+
+
+// for snow slices
+typedef struct slice_buffer_s slice_buffer;
+
+/**
+ * Scantable.
+ */
+typedef struct ScanTable{
+ const uint8_t *scantable;
+ uint8_t permutated[64];
+ uint8_t raster_end[64];
+} ScanTable;
+
+void ff_init_scantable(uint8_t *, ScanTable *st, const uint8_t *src_scantable);
+
+void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize,
+ int block_w, int block_h,
+ int src_x, int src_y, int w, int h);
+
+/**
+ * DSPContext.
+ */
+typedef struct DSPContext {
+ /* pixel ops : interface with DCT */
+ void (*get_pixels)(DCTELEM *block/*align 16*/, const uint8_t *pixels/*align 8*/, int line_size);
+ void (*diff_pixels)(DCTELEM *block/*align 16*/, const uint8_t *s1/*align 8*/, const uint8_t *s2/*align 8*/, int stride);
+ void (*put_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size);
+ void (*put_signed_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size);
+ void (*add_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size);
+ void (*add_pixels8)(uint8_t *pixels, DCTELEM *block, int line_size);
+ void (*add_pixels4)(uint8_t *pixels, DCTELEM *block, int line_size);
+ int (*sum_abs_dctelem)(DCTELEM *block/*align 16*/);
+ /**
+ * translational global motion compensation.
+ */
+ void (*gmc1)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x16, int y16, int rounder);
+ /**
+ * global motion compensation.
+ */
+ void (*gmc )(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int ox, int oy,
+ int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height);
+ void (*clear_block)(DCTELEM *block/*align 16*/);
+ void (*clear_blocks)(DCTELEM *blocks/*align 16*/);
+ int (*pix_sum)(uint8_t * pix, int line_size);
+ int (*pix_norm1)(uint8_t * pix, int line_size);
+// 16x16 8x8 4x4 2x2 16x8 8x4 4x2 8x16 4x8 2x4
+
+ me_cmp_func sad[6]; /* identical to pix_absAxA except additional void * */
+ me_cmp_func sse[6];
+ me_cmp_func hadamard8_diff[6];
+ me_cmp_func dct_sad[6];
+ me_cmp_func quant_psnr[6];
+ me_cmp_func bit[6];
+ me_cmp_func rd[6];
+ me_cmp_func vsad[6];
+ me_cmp_func vsse[6];
+ me_cmp_func nsse[6];
+ me_cmp_func w53[6];
+ me_cmp_func w97[6];
+ me_cmp_func dct_max[6];
+ me_cmp_func dct264_sad[6];
+
+ me_cmp_func me_pre_cmp[6];
+ me_cmp_func me_cmp[6];
+ me_cmp_func me_sub_cmp[6];
+ me_cmp_func mb_cmp[6];
+ me_cmp_func ildct_cmp[6]; //only width 16 used
+ me_cmp_func frame_skip_cmp[6]; //only width 8 used
+
+ int (*ssd_int8_vs_int16)(const int8_t *pix1, const int16_t *pix2,
+ int size);
+
+ /**
+ * Halfpel motion compensation with rounding (a+b+1)>>1.
+ * this is an array[4][4] of motion compensation functions for 4
+ * horizontal blocksizes (8,16) and the 4 halfpel positions<br>
+ * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
+ * @param block destination where the result is stored
+ * @param pixels source
+ * @param line_size number of bytes in a horizontal line of block
+ * @param h height
+ */
+ op_pixels_func put_pixels_tab[4][4];
+
+ /**
+ * Halfpel motion compensation with rounding (a+b+1)>>1.
+ * This is an array[4][4] of motion compensation functions for 4
+ * horizontal blocksizes (8,16) and the 4 halfpel positions<br>
+ * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
+ * @param block destination into which the result is averaged (a+b+1)>>1
+ * @param pixels source
+ * @param line_size number of bytes in a horizontal line of block
+ * @param h height
+ */
+ op_pixels_func avg_pixels_tab[4][4];
+
+ /**
+ * Halfpel motion compensation with no rounding (a+b)>>1.
+ * this is an array[2][4] of motion compensation functions for 2
+ * horizontal blocksizes (8,16) and the 4 halfpel positions<br>
+ * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
+ * @param block destination where the result is stored
+ * @param pixels source
+ * @param line_size number of bytes in a horizontal line of block
+ * @param h height
+ */
+ op_pixels_func put_no_rnd_pixels_tab[4][4];
+
+ /**
+ * Halfpel motion compensation with no rounding (a+b)>>1.
+ * this is an array[2][4] of motion compensation functions for 2
+ * horizontal blocksizes (8,16) and the 4 halfpel positions<br>
+ * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
+ * @param block destination into which the result is averaged (a+b)>>1
+ * @param pixels source
+ * @param line_size number of bytes in a horizontal line of block
+ * @param h height
+ */
+ op_pixels_func avg_no_rnd_pixels_tab[4][4];
+
+ void (*put_no_rnd_pixels_l2[2])(uint8_t *block/*align width (8 or 16)*/, const uint8_t *a/*align 1*/, const uint8_t *b/*align 1*/, int line_size, int h);
+
+ /**
+ * Thirdpel motion compensation with rounding (a+b+1)>>1.
+ * this is an array[12] of motion compensation functions for the 9 thirdpe
+ * positions<br>
+ * *pixels_tab[ xthirdpel + 4*ythirdpel ]
+ * @param block destination where the result is stored
+ * @param pixels source
+ * @param line_size number of bytes in a horizontal line of block
+ * @param h height
+ */
+ tpel_mc_func put_tpel_pixels_tab[11]; //FIXME individual func ptr per width?
+ tpel_mc_func avg_tpel_pixels_tab[11]; //FIXME individual func ptr per width?
+
+ qpel_mc_func put_qpel_pixels_tab[2][16];
+ qpel_mc_func avg_qpel_pixels_tab[2][16];
+ qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16];
+ qpel_mc_func avg_no_rnd_qpel_pixels_tab[2][16];
+ qpel_mc_func put_mspel_pixels_tab[8];
+
+ /**
+ * h264 Chroma MC
+ */
+ h264_chroma_mc_func put_h264_chroma_pixels_tab[3];
+ h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
+ /* This is really one func used in VC-1 decoding */
+ h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3];
+ h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3];
+
+ qpel_mc_func put_h264_qpel_pixels_tab[4][16];
+ qpel_mc_func avg_h264_qpel_pixels_tab[4][16];
+
+ qpel_mc_func put_2tap_qpel_pixels_tab[4][16];
+ qpel_mc_func avg_2tap_qpel_pixels_tab[4][16];
+
+ h264_weight_func weight_h264_pixels_tab[10];
+ h264_biweight_func biweight_h264_pixels_tab[10];
+
+ /* AVS specific */
+ qpel_mc_func put_cavs_qpel_pixels_tab[2][16];
+ qpel_mc_func avg_cavs_qpel_pixels_tab[2][16];
+ void (*cavs_filter_lv)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2);
+ void (*cavs_filter_lh)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2);
+ void (*cavs_filter_cv)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2);
+ void (*cavs_filter_ch)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2);
+ void (*cavs_idct8_add)(uint8_t *dst, DCTELEM *block, int stride);
+
+ me_cmp_func pix_abs[2][4];
+
+ /* huffyuv specific */
+ void (*add_bytes)(uint8_t *dst/*align 16*/, uint8_t *src/*align 16*/, int w);
+ void (*add_bytes_l2)(uint8_t *dst/*align 16*/, uint8_t *src1/*align 16*/, uint8_t *src2/*align 16*/, int w);
+ void (*diff_bytes)(uint8_t *dst/*align 16*/, uint8_t *src1/*align 16*/, uint8_t *src2/*align 1*/,int w);
+ /**
+ * subtract huffyuv's variant of median prediction
+ * note, this might read from src1[-1], src2[-1]
+ */
+ void (*sub_hfyu_median_prediction)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top);
+ void (*add_hfyu_median_prediction)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
+ int (*add_hfyu_left_prediction)(uint8_t *dst, const uint8_t *src, int w, int left);
+ void (*add_hfyu_left_prediction_bgr32)(uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha);
+ /* this might write to dst[w] */
+ void (*add_png_paeth_prediction)(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp);
+ void (*bswap_buf)(uint32_t *dst, const uint32_t *src, int w);
+
+ void (*h264_v_loop_filter_luma)(uint8_t *pix/*align 16*/, int stride, int alpha, int beta, int8_t *tc0);
+ void (*h264_h_loop_filter_luma)(uint8_t *pix/*align 4 */, int stride, int alpha, int beta, int8_t *tc0);
+ /* v/h_loop_filter_luma_intra: align 16 */
+ void (*h264_v_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta);
+ void (*h264_h_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta);
+ void (*h264_v_loop_filter_chroma)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta, int8_t *tc0);
+ void (*h264_h_loop_filter_chroma)(uint8_t *pix/*align 4*/, int stride, int alpha, int beta, int8_t *tc0);
+ void (*h264_v_loop_filter_chroma_intra)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta);
+ void (*h264_h_loop_filter_chroma_intra)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta);
+ // h264_loop_filter_strength: simd only. the C version is inlined in h264.c
+ void (*h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
+ int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field);
+
+ void (*h263_v_loop_filter)(uint8_t *src, int stride, int qscale);
+ void (*h263_h_loop_filter)(uint8_t *src, int stride, int qscale);
+
+ void (*h261_loop_filter)(uint8_t *src, int stride);
+
+ void (*x8_v_loop_filter)(uint8_t *src, int stride, int qscale);
+ void (*x8_h_loop_filter)(uint8_t *src, int stride, int qscale);
+
+ void (*vp3_v_loop_filter)(uint8_t *src, int stride, int *bounding_values);
+ void (*vp3_h_loop_filter)(uint8_t *src, int stride, int *bounding_values);
+
+ void (*vp6_filter_diag4)(uint8_t *dst, uint8_t *src, int stride,
+ const int16_t *h_weights,const int16_t *v_weights);
+
+ /* assume len is a multiple of 4, and arrays are 16-byte aligned */
+ void (*vorbis_inverse_coupling)(float *mag, float *ang, int blocksize);
+ void (*ac3_downmix)(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len);
+ /* assume len is a multiple of 8, and arrays are 16-byte aligned */
+ void (*vector_fmul)(float *dst, const float *src, int len);
+ void (*vector_fmul_reverse)(float *dst, const float *src0, const float *src1, int len);
+ /* assume len is a multiple of 8, and src arrays are 16-byte aligned */
+ void (*vector_fmul_add)(float *dst, const float *src0, const float *src1, const float *src2, int len);
+ /* assume len is a multiple of 4, and arrays are 16-byte aligned */
+ void (*vector_fmul_window)(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len);
+ /* assume len is a multiple of 8, and arrays are 16-byte aligned */
+ void (*int32_to_float_fmul_scalar)(float *dst, const int *src, float mul, int len);
+ /**
+ * Multiply a vector of floats by a scalar float. Source and
+ * destination vectors must overlap exactly or not at all.
+ * @param dst result vector, 16-byte aligned
+ * @param src input vector, 16-byte aligned
+ * @param mul scalar value
+ * @param len length of vector, multiple of 4
+ */
+ void (*vector_fmul_scalar)(float *dst, const float *src, float mul,
+ int len);
+ /**
+ * Multiply a vector of floats by concatenated short vectors of
+ * floats and by a scalar float. Source and destination vectors
+ * must overlap exactly or not at all.
+ * [0]: short vectors of length 2, 8-byte aligned
+ * [1]: short vectors of length 4, 16-byte aligned
+ * @param dst output vector, 16-byte aligned
+ * @param src input vector, 16-byte aligned
+ * @param sv array of pointers to short vectors
+ * @param mul scalar value
+ * @param len number of elements in src and dst, multiple of 4
+ */
+ void (*vector_fmul_sv_scalar[2])(float *dst, const float *src,
+ const float **sv, float mul, int len);
+ /**
+ * Multiply short vectors of floats by a scalar float, store
+ * concatenated result.
+ * [0]: short vectors of length 2, 8-byte aligned
+ * [1]: short vectors of length 4, 16-byte aligned
+ * @param dst output vector, 16-byte aligned
+ * @param sv array of pointers to short vectors
+ * @param mul scalar value
+ * @param len number of output elements, multiple of 4
+ */
+ void (*sv_fmul_scalar[2])(float *dst, const float **sv,
+ float mul, int len);
+ /**
+ * Calculate the scalar product of two vectors of floats.
+ * @param v1 first vector, 16-byte aligned
+ * @param v2 second vector, 16-byte aligned
+ * @param len length of vectors, multiple of 4
+ */
+ float (*scalarproduct_float)(const float *v1, const float *v2, int len);
+ /**
+ * Calculate the sum and difference of two vectors of floats.
+ * @param v1 first input vector, sum output, 16-byte aligned
+ * @param v2 second input vector, difference output, 16-byte aligned
+ * @param len length of vectors, multiple of 4
+ */
+ void (*butterflies_float)(float *restrict v1, float *restrict v2, int len);
+
+ /* C version: convert floats from the range [384.0,386.0] to ints in [-32768,32767]
+ * simd versions: convert floats from [-32768.0,32767.0] without rescaling and arrays are 16byte aligned */
+ void (*float_to_int16)(int16_t *dst, const float *src, long len);
+ void (*float_to_int16_interleave)(int16_t *dst, const float **src, long len, int channels);
+
+ /* (I)DCT */
+ void (*fdct)(DCTELEM *block/* align 16*/);
+ void (*fdct248)(DCTELEM *block/* align 16*/);
+
+ /* IDCT really*/
+ void (*idct)(DCTELEM *block/* align 16*/);
+
+ /**
+ * block -> idct -> clip to unsigned 8 bit -> dest.
+ * (-1392, 0, 0, ...) -> idct -> (-174, -174, ...) -> put -> (0, 0, ...)
+ * @param line_size size in bytes of a horizontal line of dest
+ */
+ void (*idct_put)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
+
+ /**
+ * block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
+ * @param line_size size in bytes of a horizontal line of dest
+ */
+ void (*idct_add)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
+
+ /**
+ * idct input permutation.
+ * several optimized IDCTs need a permutated input (relative to the normal order of the reference
+ * IDCT)
+ * this permutation must be performed before the idct_put/add, note, normally this can be merged
+ * with the zigzag/alternate scan<br>
+ * an example to avoid confusion:
+ * - (->decode coeffs -> zigzag reorder -> dequant -> reference idct ->...)
+ * - (x -> referece dct -> reference idct -> x)
+ * - (x -> referece dct -> simple_mmx_perm = idct_permutation -> simple_idct_mmx -> x)
+ * - (->decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant -> simple_idct_mmx ->...)
+ */
+ uint8_t idct_permutation[64];
+ int idct_permutation_type;
+#define FF_NO_IDCT_PERM 1
+#define FF_LIBMPEG2_IDCT_PERM 2
+#define FF_SIMPLE_IDCT_PERM 3
+#define FF_TRANSPOSE_IDCT_PERM 4
+#define FF_PARTTRANS_IDCT_PERM 5
+#define FF_SSE2_IDCT_PERM 6
+
+ int (*try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale);
+ void (*add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale);
+#define BASIS_SHIFT 16
+#define RECON_SHIFT 6
+
+ void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w);
+#define EDGE_WIDTH 16
+
+ /* h264 functions */
+ /* NOTE!!! if you implement any of h264_idct8_add, h264_idct8_add4 then you must implement all of them
+ NOTE!!! if you implement any of h264_idct_add, h264_idct_add16, h264_idct_add16intra, h264_idct_add8 then you must implement all of them
+ The reason for above, is that no 2 out of one list may use a different permutation.
+ */
+ void (*h264_idct_add)(uint8_t *dst/*align 4*/, DCTELEM *block/*align 16*/, int stride);
+ void (*h264_idct8_add)(uint8_t *dst/*align 8*/, DCTELEM *block/*align 16*/, int stride);
+ void (*h264_idct_dc_add)(uint8_t *dst/*align 4*/, DCTELEM *block/*align 16*/, int stride);
+ void (*h264_idct8_dc_add)(uint8_t *dst/*align 8*/, DCTELEM *block/*align 16*/, int stride);
+ void (*h264_dct)(DCTELEM block[4][4]);
+ void (*h264_idct_add16)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]);
+ void (*h264_idct8_add4)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]);
+ void (*h264_idct_add8)(uint8_t **dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]);
+ void (*h264_idct_add16intra)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]);
+
+ void (*prefetch)(void *mem, int stride, int h);
+
+ void (*shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
+
+ /* mlp/truehd functions */
+ void (*mlp_filter_channel)(int32_t *state, const int32_t *coeff,
+ int firorder, int iirorder,
+ unsigned int filter_shift, int32_t mask, int blocksize,
+ int32_t *sample_buffer);
+
+ /* vc1 functions */
+ void (*vc1_inv_trans_8x8)(DCTELEM *b);
+ void (*vc1_inv_trans_8x4)(uint8_t *dest, int line_size, DCTELEM *block);
+ void (*vc1_inv_trans_4x8)(uint8_t *dest, int line_size, DCTELEM *block);
+ void (*vc1_inv_trans_4x4)(uint8_t *dest, int line_size, DCTELEM *block);
+ void (*vc1_inv_trans_8x8_dc)(uint8_t *dest, int line_size, DCTELEM *block);
+ void (*vc1_inv_trans_8x4_dc)(uint8_t *dest, int line_size, DCTELEM *block);
+ void (*vc1_inv_trans_4x8_dc)(uint8_t *dest, int line_size, DCTELEM *block);
+ void (*vc1_inv_trans_4x4_dc)(uint8_t *dest, int line_size, DCTELEM *block);
+ void (*vc1_v_overlap)(uint8_t* src, int stride);
+ void (*vc1_h_overlap)(uint8_t* src, int stride);
+ void (*vc1_v_loop_filter4)(uint8_t *src, int stride, int pq);
+ void (*vc1_h_loop_filter4)(uint8_t *src, int stride, int pq);
+ void (*vc1_v_loop_filter8)(uint8_t *src, int stride, int pq);
+ void (*vc1_h_loop_filter8)(uint8_t *src, int stride, int pq);
+ void (*vc1_v_loop_filter16)(uint8_t *src, int stride, int pq);
+ void (*vc1_h_loop_filter16)(uint8_t *src, int stride, int pq);
+ /* put 8x8 block with bicubic interpolation and quarterpel precision
+ * last argument is actually round value instead of height
+ */
+ op_pixels_func put_vc1_mspel_pixels_tab[16];
+ op_pixels_func avg_vc1_mspel_pixels_tab[16];
+
+ /* intrax8 functions */
+ void (*x8_spatial_compensation[12])(uint8_t *src , uint8_t *dst, int linesize);
+ void (*x8_setup_spatial_compensation)(uint8_t *src, uint8_t *dst, int linesize,
+ int * range, int * sum, int edges);
+
+ /* rv30 functions */
+ qpel_mc_func put_rv30_tpel_pixels_tab[4][16];
+ qpel_mc_func avg_rv30_tpel_pixels_tab[4][16];
+
+ /* rv40 functions */
+ qpel_mc_func put_rv40_qpel_pixels_tab[4][16];
+ qpel_mc_func avg_rv40_qpel_pixels_tab[4][16];
+ h264_chroma_mc_func put_rv40_chroma_pixels_tab[3];
+ h264_chroma_mc_func avg_rv40_chroma_pixels_tab[3];
+} DSPContext;
+
+void dsputil_static_init(void);
+void dsputil_init(DSPContext* p, AVCodecContext *avctx);
+
+int ff_check_alignment(void);
+
+/**
+ * permute block according to permuatation.
+ * @param last last non zero element in scantable order
+ */
+void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last);
+
+void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type);
+
+#define BYTE_VEC32(c) ((c)*0x01010101UL)
+
+static inline uint32_t rnd_avg32(uint32_t a, uint32_t b)
+{
+ return (a | b) - (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
+}
+
+static inline uint32_t no_rnd_avg32(uint32_t a, uint32_t b)
+{
+ return (a & b) + (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
+}
+
+static inline int get_penalty_factor(int lambda, int lambda2, int type){
+ switch(type&0xFF){
+ default:
+ case FF_CMP_SAD:
+ return lambda>>FF_LAMBDA_SHIFT;
+ case FF_CMP_DCT:
+ return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
+ case FF_CMP_W53:
+ return (4*lambda)>>(FF_LAMBDA_SHIFT);
+ case FF_CMP_W97:
+ return (2*lambda)>>(FF_LAMBDA_SHIFT);
+ case FF_CMP_SATD:
+ case FF_CMP_DCT264:
+ return (2*lambda)>>FF_LAMBDA_SHIFT;
+ case FF_CMP_RD:
+ case FF_CMP_PSNR:
+ case FF_CMP_SSE:
+ case FF_CMP_NSSE:
+ return lambda2>>FF_LAMBDA_SHIFT;
+ case FF_CMP_BIT:
+ return 1;
+ }
+}
+
+/**
+ * Empty mmx state.
+ * this must be called between any dsp function and float/double code.
+ * for example sin(); dsp->idct_put(); emms_c(); cos()
+ */
+#define emms_c()
+
+/* should be defined by architectures supporting
+ one or more MultiMedia extension */
+int mm_support(void);
+extern int mm_flags;
+
+void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx);
+
+#define DECLARE_ALIGNED_16(t, v, ...) DECLARE_ALIGNED(16, t, v)
+#define DECLARE_ALIGNED_8(t, v, ...) DECLARE_ALIGNED(8, t, v)
+
+#if HAVE_MMX
+
+#undef emms_c
+
+static inline void emms(void)
+{
+ #if defined(__INTEL_COMPILER) || defined(_MSC_VER)
+ __asm emms;
+ #else
+ __asm__ volatile ("emms;":::"memory");
+ #endif
+}
+
+
+#define emms_c() \
+{\
+ if (mm_flags & FF_MM_MMX)\
+ emms();\
+}
+
+#else
+
+#define mm_flags 0
+#define mm_support() 0
+
+#endif
+
+#ifndef STRIDE_ALIGN
+# define STRIDE_ALIGN 8
+#endif
+
+/* PSNR */
+void get_psnr(uint8_t *orig_image[3], uint8_t *coded_image[3],
+ int orig_linesize[3], int coded_linesize,
+ AVCodecContext *avctx);
+
+/* FFT computation */
+
+/* NOTE: soon integer code will be added, so you must use the
+ FFTSample type */
+typedef float FFTSample;
+
+typedef struct FFTComplex {
+ FFTSample re, im;
+} FFTComplex;
+
+typedef struct FFTContext {
+ int nbits;
+ int inverse;
+ uint16_t *revtab;
+ FFTComplex *exptab;
+ FFTComplex *exptab1; /* only used by SSE code */
+ FFTComplex *tmp_buf;
+ int mdct_size; /* size of MDCT (i.e. number of input data * 2) */
+ int mdct_bits; /* n = 2^nbits */
+ /* pre/post rotation tables */
+ FFTSample *tcos;
+ FFTSample *tsin;
+ void (*fft_permute)(struct FFTContext *s, FFTComplex *z);
+ void (*fft_calc)(struct FFTContext *s, FFTComplex *z);
+ void (*imdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
+ void (*imdct_half)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
+ void (*mdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
+ int split_radix;
+ int permutation;
+#define FF_MDCT_PERM_NONE 0
+#define FF_MDCT_PERM_INTERLEAVE 1
+} FFTContext;
+
+#if CONFIG_HARDCODED_TABLES
+#define COSTABLE_CONST const
+#define SINTABLE_CONST const
+#define SINETABLE_CONST const
+#else
+#define COSTABLE_CONST
+#define SINTABLE_CONST
+#define SINETABLE_CONST
+#endif
+
+#define COSTABLE(size) \
+ COSTABLE_CONST DECLARE_ALIGNED_16(FFTSample, ff_cos_##size)[size/2]
+#define SINTABLE(size) \
+ SINTABLE_CONST DECLARE_ALIGNED_16(FFTSample, ff_sin_##size)[size/2]
+#define SINETABLE(size) \
+ SINETABLE_CONST DECLARE_ALIGNED_16(float, ff_sine_##size)[size]
+extern COSTABLE(16);
+extern COSTABLE(32);
+extern COSTABLE(64);
+extern COSTABLE(128);
+extern COSTABLE(256);
+extern COSTABLE(512);
+extern COSTABLE(1024);
+extern COSTABLE(2048);
+extern COSTABLE(4096);
+extern COSTABLE(8192);
+extern COSTABLE(16384);
+extern COSTABLE(32768);
+extern COSTABLE(65536);
+extern COSTABLE_CONST FFTSample* const ff_cos_tabs[17];
+
+/**
+ * Initializes the cosine table in ff_cos_tabs[index]
+ * \param index index in ff_cos_tabs array of the table to initialize
+ */
+void ff_init_ff_cos_tabs(int index);
+
+extern SINTABLE(16);
+extern SINTABLE(32);
+extern SINTABLE(64);
+extern SINTABLE(128);
+extern SINTABLE(256);
+extern SINTABLE(512);
+extern SINTABLE(1024);
+extern SINTABLE(2048);
+extern SINTABLE(4096);
+extern SINTABLE(8192);
+extern SINTABLE(16384);
+extern SINTABLE(32768);
+extern SINTABLE(65536);
+
+/**
+ * Sets up a complex FFT.
+ * @param nbits log2 of the length of the input array
+ * @param inverse if 0 perform the forward transform, if 1 perform the inverse
+ */
+int ff_fft_init(FFTContext *s, int nbits, int inverse);
+void ff_fft_permute_c(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_c(FFTContext *s, FFTComplex *z);
+
+void ff_fft_init_mmx(FFTContext *s);
+
+/**
+ * Do the permutation needed BEFORE calling ff_fft_calc().
+ */
+static inline void ff_fft_permute(FFTContext *s, FFTComplex *z)
+{
+ s->fft_permute(s, z);
+}
+/**
+ * Do a complex FFT with the parameters defined in ff_fft_init(). The
+ * input data must be permuted before. No 1.0/sqrt(n) normalization is done.
+ */
+static inline void ff_fft_calc(FFTContext *s, FFTComplex *z)
+{
+ s->fft_calc(s, z);
+}
+void ff_fft_end(FFTContext *s);
+
+/* MDCT computation */
+
+static inline void ff_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ s->imdct_calc(s, output, input);
+}
+static inline void ff_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ s->imdct_half(s, output, input);
+}
+
+static inline void ff_mdct_calc(FFTContext *s, FFTSample *output,
+ const FFTSample *input)
+{
+ s->mdct_calc(s, output, input);
+}
+
+/**
+ * Generate a Kaiser-Bessel Derived Window.
+ * @param window pointer to half window
+ * @param alpha determines window shape
+ * @param n size of half window
+ */
+void ff_kbd_window_init(float *window, float alpha, int n);
+
+/**
+ * Generate a sine window.
+ * @param window pointer to half window
+ * @param n size of half window
+ */
+void ff_sine_window_init(float *window, int n);
+/**
+ * initialize the specified entry of ff_sine_windows
+ */
+void ff_init_ff_sine_windows(int index);
+extern SINETABLE( 32);
+extern SINETABLE( 64);
+extern SINETABLE( 128);
+extern SINETABLE( 256);
+extern SINETABLE( 512);
+extern SINETABLE(1024);
+extern SINETABLE(2048);
+extern SINETABLE(4096);
+extern SINETABLE_CONST float * const ff_sine_windows[13];
+
+int ff_mdct_init(FFTContext *s, int nbits, int inverse, double scale);
+void ff_imdct_calc_c(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_half_c(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_mdct_calc_c(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_mdct_end(FFTContext *s);
+
+/* Real Discrete Fourier Transform */
+
+enum RDFTransformType {
+ RDFT,
+ IRDFT,
+ RIDFT,
+ IRIDFT,
+};
+
+typedef struct {
+ int nbits;
+ int inverse;
+ int sign_convention;
+
+ /* pre/post rotation tables */
+ const FFTSample *tcos;
+ SINTABLE_CONST FFTSample *tsin;
+ FFTContext fft;
+} RDFTContext;
+
+/**
+ * Sets up a real FFT.
+ * @param nbits log2 of the length of the input array
+ * @param trans the type of transform
+ */
+int ff_rdft_init(RDFTContext *s, int nbits, enum RDFTransformType trans);
+void ff_rdft_calc(RDFTContext *s, FFTSample *data);
+void ff_rdft_end(RDFTContext *s);
+
+/* Discrete Cosine Transform */
+
+typedef struct {
+ int nbits;
+ int inverse;
+ FFTSample *data;
+ RDFTContext rdft;
+ const float *costab;
+ FFTSample *csc2;
+} DCTContext;
+
+/**
+ * Sets up (Inverse)DCT.
+ * @param nbits log2 of the length of the input array
+ * @param inverse >0 forward transform, <0 inverse transform
+ */
+int ff_dct_init(DCTContext *s, int nbits, int inverse);
+void ff_dct_calc(DCTContext *s, FFTSample *data);
+void ff_dct_end (DCTContext *s);
+
+#define WRAPPER8_16(name8, name16)\
+static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
+ return name8(s, dst , src , stride, h)\
+ +name8(s, dst+8 , src+8 , stride, h);\
+}
+
+#define WRAPPER8_16_SQ(name8, name16)\
+static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
+ int score=0;\
+ score +=name8(s, dst , src , stride, 8);\
+ score +=name8(s, dst+8 , src+8 , stride, 8);\
+ if(h==16){\
+ dst += 8*stride;\
+ src += 8*stride;\
+ score +=name8(s, dst , src , stride, 8);\
+ score +=name8(s, dst+8 , src+8 , stride, 8);\
+ }\
+ return score;\
+}
+
+
+static inline void copy_block2(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
+{
+ int i;
+ for(i=0; i<h; i++)
+ {
+ AV_WN16(dst , AV_RN16(src ));
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
+static inline void copy_block4(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
+{
+ int i;
+ for(i=0; i<h; i++)
+ {
+ AV_WN32(dst , AV_RN32(src ));
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
+static inline void copy_block8(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
+{
+ int i;
+ for(i=0; i<h; i++)
+ {
+ AV_WN32(dst , AV_RN32(src ));
+ AV_WN32(dst+4 , AV_RN32(src+4 ));
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
+static inline void copy_block9(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
+{
+ int i;
+ for(i=0; i<h; i++)
+ {
+ AV_WN32(dst , AV_RN32(src ));
+ AV_WN32(dst+4 , AV_RN32(src+4 ));
+ dst[8]= src[8];
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
+static inline void copy_block16(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
+{
+ int i;
+ for(i=0; i<h; i++)
+ {
+ AV_WN32(dst , AV_RN32(src ));
+ AV_WN32(dst+4 , AV_RN32(src+4 ));
+ AV_WN32(dst+8 , AV_RN32(src+8 ));
+ AV_WN32(dst+12, AV_RN32(src+12));
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
+static inline void copy_block17(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
+{
+ int i;
+ for(i=0; i<h; i++)
+ {
+ AV_WN32(dst , AV_RN32(src ));
+ AV_WN32(dst+4 , AV_RN32(src+4 ));
+ AV_WN32(dst+8 , AV_RN32(src+8 ));
+ AV_WN32(dst+12, AV_RN32(src+12));
+ dst[16]= src[16];
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
+const char* avcodec_get_current_idct_mmx(AVCodecContext *avctx,DSPContext *c);
+
+#endif /* AVCODEC_DSPUTIL_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/dxva.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/dxva.h
new file mode 100644
index 000000000..92468f78d
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/dxva.h
@@ -0,0 +1,1963 @@
+//------------------------------------------------------------------------------
+// File: DXVA.h
+//
+// Desc: DirectX Video Acceleration header file.
+//
+// Copyright (c) 1999 - 2002, Microsoft Corporation. All rights reserved.
+//------------------------------------------------------------------------------
+
+
+#ifndef __DIRECTX_VA__
+#define __DIRECTX_VA__
+
+#if _MSC_VER >= 1200
+#pragma warning(push)
+#endif
+
+#pragma warning(disable:4201) // named type definition in parentheses
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __DIRECTX_VA_COPP_ONLY
+#define __DIRECTX_VA_DECODER__
+#define __DIRECTX_VA_PROCAMPCONTROL__
+#define __DIRECTX_VA_DEINTERLACE__
+#endif
+
+#ifndef DXVABit
+#define DXVABit(__x) (1 << __x)
+#endif
+
+// -------------------------------------------------------------------------
+//
+// The definitions that follow describe the DirectX Video Acceleration
+// decoding interface.
+// This interface is accessable via the IAMVideoAccelerator interface.
+//
+// -------------------------------------------------------------------------
+//
+#ifndef __DIRECTX_VA_DECODER__
+#define __DIRECTX_VA_DECODER__
+
+DEFINE_GUID(DXVA_ModeNone, 0x1b81be00, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeH261_A, 0x1b81be01, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeH261_B, 0x1b81be02, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+
+DEFINE_GUID(DXVA_ModeH263_A, 0x1b81be03, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeH263_B, 0x1b81be04, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeH263_C, 0x1b81be05, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeH263_D, 0x1b81be06, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeH263_E, 0x1b81be07, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeH263_F, 0x1b81be08, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+
+DEFINE_GUID(DXVA_ModeMPEG1_A, 0x1b81be09, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+
+DEFINE_GUID(DXVA_ModeMPEG2_A, 0x1b81be0A, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeMPEG2_B, 0x1b81be0B, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeMPEG2_C, 0x1b81be0C, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeMPEG2_D, 0x1b81be0D, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+
+DEFINE_GUID(DXVA_ModeH264_A, 0x1b81be64, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeH264_B, 0x1b81be65, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeH264_C, 0x1b81be66, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeH264_D, 0x1b81be67, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeH264_E, 0x1b81be68, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeH264_F, 0x1b81be69, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+
+DEFINE_GUID(DXVA_ModeWMV8_A, 0x1b81be80, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeWMV8_B, 0x1b81be81, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+
+DEFINE_GUID(DXVA_ModeWMV9_A, 0x1b81be90, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeWMV9_B, 0x1b81be91, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeWMV9_C, 0x1b81be94, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+
+DEFINE_GUID(DXVA_ModeVC1_A, 0x1b81beA0, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeVC1_B, 0x1b81beA1, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeVC1_C, 0x1b81beA2, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+DEFINE_GUID(DXVA_ModeVC1_D, 0x1b81beA3, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+
+DEFINE_GUID(DXVA_NoEncrypt, 0x1b81beD0, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
+
+#define DXVA_ModeWMV8_PostProc DXVA_ModeWMV8_A
+#define DXVA_ModeWMV8_MoComp DXVA_ModeWMV8_B
+
+#define DXVA_ModeWMV9_PostProc DXVA_ModeWMV9_A
+#define DXVA_ModeWMV9_MoComp DXVA_ModeWMV9_B
+#define DXVA_ModeWMV9_IDCT DXVA_ModeWMV9_C
+
+#define DXVA_ModeVC1_PostProc DXVA_ModeVC1_A
+#define DXVA_ModeVC1_MoComp DXVA_ModeVC1_B
+#define DXVA_ModeVC1_IDCT DXVA_ModeVC1_C
+#define DXVA_ModeVC1_VLD DXVA_ModeVC1_D
+
+#define DXVA_ModeH264_MoComp_NoFGT DXVA_ModeH264_A
+#define DXVA_ModeH264_MoComp_FGT DXVA_ModeH264_B
+#define DXVA_ModeH264_IDCT_NoFGT DXVA_ModeH264_C
+#define DXVA_ModeH264_IDCT_FGT DXVA_ModeH264_D
+#define DXVA_ModeH264_VLD_NoFGT DXVA_ModeH264_E
+#define DXVA_ModeH264_VLD_FGT DXVA_ModeH264_F
+
+#define DXVA_RESTRICTED_MODE_UNRESTRICTED 0xFFFF
+#define DXVA_RESTRICTED_MODE_H261_A 1
+#define DXVA_RESTRICTED_MODE_H261_B 2
+
+#define DXVA_RESTRICTED_MODE_H263_A 3
+#define DXVA_RESTRICTED_MODE_H263_B 4
+#define DXVA_RESTRICTED_MODE_H263_C 5
+#define DXVA_RESTRICTED_MODE_H263_D 6
+#define DXVA_RESTRICTED_MODE_H263_E 7
+#define DXVA_RESTRICTED_MODE_H263_F 8
+
+#define DXVA_RESTRICTED_MODE_MPEG1_A 9
+
+#define DXVA_RESTRICTED_MODE_MPEG2_A 0xA
+#define DXVA_RESTRICTED_MODE_MPEG2_B 0xB
+#define DXVA_RESTRICTED_MODE_MPEG2_C 0xC
+#define DXVA_RESTRICTED_MODE_MPEG2_D 0xD
+
+#define DXVA_RESTRICTED_MODE_H264_A 0x64
+#define DXVA_RESTRICTED_MODE_H264_B 0x65
+#define DXVA_RESTRICTED_MODE_H264_C 0x66
+#define DXVA_RESTRICTED_MODE_H264_D 0x67
+#define DXVA_RESTRICTED_MODE_H264_E 0x68
+#define DXVA_RESTRICTED_MODE_H264_F 0x69
+
+#define DXVA_RESTRICTED_MODE_WMV8_A 0x80
+#define DXVA_RESTRICTED_MODE_WMV8_B 0x81
+
+#define DXVA_RESTRICTED_MODE_WMV9_A 0x90
+#define DXVA_RESTRICTED_MODE_WMV9_B 0x91
+#define DXVA_RESTRICTED_MODE_WMV9_C 0x94
+
+#define DXVA_RESTRICTED_MODE_VC1_A 0xA0
+#define DXVA_RESTRICTED_MODE_VC1_B 0xA1
+#define DXVA_RESTRICTED_MODE_VC1_C 0xA2
+#define DXVA_RESTRICTED_MODE_VC1_D 0xA3
+
+#define DXVA_RESTRICTED_MODE_WMV8_POSTPROC DXVA_RESTRICTED_MODE_WMV8_A
+#define DXVA_RESTRICTED_MODE_WMV8_MOCOMP DXVA_RESTRICTED_MODE_WMV8_B
+
+#define DXVA_RESTRICTED_MODE_WMV9_POSTPROC DXVA_RESTRICTED_MODE_WMV9_A
+#define DXVA_RESTRICTED_MODE_WMV9_MOCOMP DXVA_RESTRICTED_MODE_WMV9_B
+#define DXVA_RESTRICTED_MODE_WMV9_IDCT DXVA_RESTRICTED_MODE_WMV9_C
+
+#define DXVA_RESTRICTED_MODE_VC1_POSTPROC DXVA_RESTRICTED_MODE_VC1_A
+#define DXVA_RESTRICTED_MODE_VC1_MOCOMP DXVA_RESTRICTED_MODE_VC1_B
+#define DXVA_RESTRICTED_MODE_VC1_IDCT DXVA_RESTRICTED_MODE_VC1_C
+#define DXVA_RESTRICTED_MODE_VC1_VLD DXVA_RESTRICTED_MODE_VC1_D
+
+#define DXVA_RESTRICTED_MODE_H264_MOCOMP_NOFGT DXVA_RESTRICTED_MODE_H264_A
+#define DXVA_RESTRICTED_MODE_H264_MOCOMP_FGT DXVA_RESTRICTED_MODE_H264_B
+#define DXVA_RESTRICTED_MODE_H264_IDCT_NOFGT DXVA_RESTRICTED_MODE_H264_C
+#define DXVA_RESTRICTED_MODE_H264_IDCT_FGT DXVA_RESTRICTED_MODE_H264_D
+#define DXVA_RESTRICTED_MODE_H264_VLD_NOFGT DXVA_RESTRICTED_MODE_H264_E
+#define DXVA_RESTRICTED_MODE_H264_VLD_FGT DXVA_RESTRICTED_MODE_H264_F
+
+#define DXVA_COMPBUFFER_TYPE_THAT_IS_NOT_USED 0
+#define DXVA_PICTURE_DECODE_BUFFER 1
+#define DXVA_MACROBLOCK_CONTROL_BUFFER 2
+#define DXVA_RESIDUAL_DIFFERENCE_BUFFER 3
+#define DXVA_DEBLOCKING_CONTROL_BUFFER 4
+#define DXVA_INVERSE_QUANTIZATION_MATRIX_BUFFER 5
+#define DXVA_SLICE_CONTROL_BUFFER 6
+#define DXVA_BITSTREAM_DATA_BUFFER 7
+#define DXVA_AYUV_BUFFER 8
+#define DXVA_IA44_SURFACE_BUFFER 9
+#define DXVA_DPXD_SURFACE_BUFFER 10
+#define DXVA_HIGHLIGHT_BUFFER 11
+#define DXVA_DCCMD_SURFACE_BUFFER 12
+#define DXVA_ALPHA_BLEND_COMBINATION_BUFFER 13
+#define DXVA_PICTURE_RESAMPLE_BUFFER 14
+#define DXVA_READ_BACK_BUFFER 15
+
+/* H.264/AVC Additional buffer types */
+#define DXVA_MOTION_VECTOR_BUFFER 16
+#define DXVA_FILM_GRAIN_BUFFER 17
+
+#define DXVA_NUM_TYPES_COMP_BUFFERS 18
+
+/* values for bDXVA_Func */
+#define DXVA_PICTURE_DECODING_FUNCTION 1
+#define DXVA_ALPHA_BLEND_DATA_LOAD_FUNCTION 2
+#define DXVA_ALPHA_BLEND_COMBINATION_FUNCTION 3
+#define DXVA_PICTURE_RESAMPLE_FUNCTION 4
+#define DXVA_DEBLOCKING_FILTER_FUNCTION 5
+#define DXVA_FILM_GRAIN_SYNTHESIS_FUNCTION 6
+#define DXVA_STATUS_REPORTING_FUNCTION 7
+
+/* values returned from Execute command in absence of read-back */
+#define DXVA_EXECUTE_RETURN_OK 0
+#define DXVA_EXECUTE_RETURN_DATA_ERROR_MINOR 1
+#define DXVA_EXECUTE_RETURN_DATA_ERROR_SIGNIF 2
+#define DXVA_EXECUTE_RETURN_DATA_ERROR_SEVERE 3
+#define DXVA_EXECUTE_RETURN_OTHER_ERROR_SEVERE 4
+
+
+#pragma pack(push, BeforeDXVApacking, 1)
+
+
+typedef struct _DXVA_ConnectMode {
+ GUID guidMode;
+ WORD wRestrictedMode;
+} DXVA_ConnectMode, *LPDXVA_ConnectMode;
+
+
+typedef DWORD DXVA_ConfigQueryOrReplyFunc, *LPDXVA_ConfigQueryOrReplyFunc;
+
+#define DXVA_QUERYORREPLYFUNCFLAG_DECODER_PROBE_QUERY 0xFFFFF1
+#define DXVA_QUERYORREPLYFUNCFLAG_DECODER_LOCK_QUERY 0xFFFFF5
+#define DXVA_QUERYORREPLYFUNCFLAG_ACCEL_PROBE_OK_COPY 0xFFFFF8
+#define DXVA_QUERYORREPLYFUNCFLAG_ACCEL_PROBE_OK_PLUS 0xFFFFF9
+#define DXVA_QUERYORREPLYFUNCFLAG_ACCEL_LOCK_OK_COPY 0xFFFFFC
+#define DXVA_QUERYORREPLYFUNCFLAG_ACCEL_PROBE_FALSE_PLUS 0xFFFFFB
+#define DXVA_QUERYORREPLYFUNCFLAG_ACCEL_LOCK_FALSE_PLUS 0xFFFFFF
+
+#define readDXVA_QueryOrReplyFuncFlag(ptr) ((*(ptr)) >> 8)
+
+#define readDXVA_QueryOrReplyFuncFlag_ACCEL(ptr) (((*(ptr)) >> 11) & 1)
+#define readDXVA_QueryOrReplyFuncFlag_LOCK(ptr) (((*(ptr)) >> 10) & 1)
+#define readDXVA_QueryOrReplyFuncFlag_BAD(ptr) (((*(ptr)) >> 9) & 1)
+#define readDXVA_QueryOrReplyFuncFlag_PLUS(ptr) (((*(ptr)) >> 8) & 1)
+
+#define readDXVA_QueryOrReplyFuncFunc(ptr) ((*(ptr)) & 0xFF)
+
+#define writeDXVA_QueryOrReplyFunc(ptr, flg, fnc) ((*(ptr)) = ((flg) << 8) | (fnc))
+
+#define setDXVA_QueryOrReplyFuncFlag(ptr, flg) ((*(ptr)) |= ((flg) << 8))
+#define setDXVA_QueryOrReplyFuncFunc(ptr, fnc) ((*(ptr)) |= (fnc));
+
+typedef DWORD DXVA_EncryptProtocolFunc, *LPDXVA_EncryptProtocolFunc;
+
+#define DXVA_ENCRYPTPROTOCOLFUNCFLAG_HOST 0xFFFF00
+#define DXVA_ENCRYPTPROTOCOLFUNCFLAG_ACCEL 0xFFFF08
+
+#define readDXVA_EncryptProtocolFuncFlag(ptr) ((*(ptr)) >> 8)
+#define readDXVA_EncryptProtocolFuncFlag_ACCEL(ptr) (((*(ptr)) >> 11) & 1)
+
+#define readDXVA_EncryptProtocolFuncFunc(ptr) ((*(ptr)) & 0xFF)
+
+#define writeDXVA_EncryptProtocolFunc(ptr, flg, fnc) ((*(ptr)) = ((flg) << 8) | (fnc))
+
+#define setDXVA_EncryptProtocolFuncFlag(ptr, flg) ((*(ptr)) |= ((flg) << 8))
+#define setDXVA_EncryptProtocolFuncFunc(ptr, fnc) ((*(ptr)) |= (fnc));
+
+typedef struct _DXVA_EncryptProtocolHeader {
+ DXVA_EncryptProtocolFunc dwFunction;
+ DWORD ReservedBits[3];
+ GUID guidEncryptProtocol;
+} DXVA_EncryptProtocolHeader, *LPDXVA_EncryptProtocolHeader;
+
+typedef struct _DXVA_ConfigPictureDecode {
+
+ // Operation Indicated
+ DXVA_ConfigQueryOrReplyFunc dwFunction;
+
+ // Alignment
+ DWORD dwReservedBits[3];
+
+ // Encryption GUIDs
+ GUID guidConfigBitstreamEncryption;
+ GUID guidConfigMBcontrolEncryption;
+ GUID guidConfigResidDiffEncryption;
+
+ // Bitstream Processing Indicator
+ BYTE bConfigBitstreamRaw;
+
+ // Macroblock Control Config
+ BYTE bConfigMBcontrolRasterOrder;
+
+ // Host Resid Diff Config
+ BYTE bConfigResidDiffHost;
+ BYTE bConfigSpatialResid8;
+ BYTE bConfigResid8Subtraction;
+ BYTE bConfigSpatialHost8or9Clipping;
+ BYTE bConfigSpatialResidInterleaved;
+ BYTE bConfigIntraResidUnsigned;
+
+ // Accelerator Resid Diff Config
+ BYTE bConfigResidDiffAccelerator;
+ BYTE bConfigHostInverseScan;
+ BYTE bConfigSpecificIDCT;
+ BYTE bConfig4GroupedCoefs;
+} DXVA_ConfigPictureDecode, *LPDXVA_ConfigPictureDecode;
+
+
+/* Picture Decoding Parameters */
+
+typedef struct _DXVA_PictureParameters {
+
+ WORD wDecodedPictureIndex;
+ WORD wDeblockedPictureIndex;
+
+ WORD wForwardRefPictureIndex;
+ WORD wBackwardRefPictureIndex;
+
+ WORD wPicWidthInMBminus1;
+ WORD wPicHeightInMBminus1;
+
+ BYTE bMacroblockWidthMinus1;
+ BYTE bMacroblockHeightMinus1;
+
+ BYTE bBlockWidthMinus1;
+ BYTE bBlockHeightMinus1;
+
+ BYTE bBPPminus1;
+
+ BYTE bPicStructure;
+ BYTE bSecondField;
+ BYTE bPicIntra;
+ BYTE bPicBackwardPrediction;
+
+ BYTE bBidirectionalAveragingMode;
+ BYTE bMVprecisionAndChromaRelation;
+ BYTE bChromaFormat;
+
+ BYTE bPicScanFixed;
+ BYTE bPicScanMethod;
+ BYTE bPicReadbackRequests;
+
+ BYTE bRcontrol;
+ BYTE bPicSpatialResid8;
+ BYTE bPicOverflowBlocks;
+ BYTE bPicExtrapolation;
+
+ BYTE bPicDeblocked;
+ BYTE bPicDeblockConfined;
+ BYTE bPic4MVallowed;
+ BYTE bPicOBMC;
+ BYTE bPicBinPB;
+ BYTE bMV_RPS;
+
+ BYTE bReservedBits;
+
+ WORD wBitstreamFcodes;
+ WORD wBitstreamPCEelements;
+ BYTE bBitstreamConcealmentNeed;
+ BYTE bBitstreamConcealmentMethod;
+
+} DXVA_PictureParameters, *LPDXVA_PictureParameters;
+
+
+
+/* Picture Resampling */
+
+typedef struct _DXVA_PicResample {
+
+ WORD wPicResampleSourcePicIndex;
+ WORD wPicResampleDestPicIndex;
+
+ WORD wPicResampleRcontrol;
+ BYTE bPicResampleExtrapWidth;
+ BYTE bPicResampleExtrapHeight;
+
+ DWORD dwPicResampleSourceWidth;
+ DWORD dwPicResampleSourceHeight;
+
+ DWORD dwPicResampleDestWidth;
+ DWORD dwPicResampleDestHeight;
+
+ DWORD dwPicResampleFullDestWidth;
+ DWORD dwPicResampleFullDestHeight;
+
+} DXVA_PicResample, *LPDXVA_PicResample;
+
+
+#define DXVA_CHROMA_FORMAT_420 1
+#define DXVA_CHROMA_FORMAT_422 2
+#define DXVA_CHROMA_FORMAT_444 3
+
+#define DXVA_PICTURE_STRUCTURE_TOP_FIELD 1
+#define DXVA_PICTURE_STRUCTURE_BOTTOM_FIELD 2
+#define DXVA_PICTURE_STRUCTURE_FRAME 3
+
+#define DXVA_BIDIRECTIONAL_AVERAGING_MPEG2_ROUND 0
+#define DXVA_BIDIRECTIONAL_AVERAGING_H263_TRUNC 1
+
+#define DXVA_MV_PRECISION_AND_CHROMA_RELATION_MPEG2 0
+#define DXVA_MV_PRECISION_AND_CHROMA_RELATION_H263 1
+#define DXVA_MV_PRECISION_AND_CHROMA_RELATION_H261 2
+
+#define DXVA_SCAN_METHOD_ZIG_ZAG 0
+#define DXVA_SCAN_METHOD_ALTERNATE_VERTICAL 1
+#define DXVA_SCAN_METHOD_ALTERNATE_HORIZONTAL 2
+#define DXVA_SCAN_METHOD_ARBITRARY 3
+
+#define DXVA_BITSTREAM_CONCEALMENT_NEED_UNLIKELY 0
+#define DXVA_BITSTREAM_CONCEALMENT_NEED_MILD 1
+#define DXVA_BITSTREAM_CONCEALMENT_NEED_LIKELY 2
+#define DXVA_BITSTREAM_CONCEALMENT_NEED_SEVERE 3
+
+#define DXVA_BITSTREAM_CONCEALMENT_METHOD_UNSPECIFIED 0
+#define DXVA_BITSTREAM_CONCEALMENT_METHOD_INTRA 1
+#define DXVA_BITSTREAM_CONCEALMENT_METHOD_FORWARD 2
+#define DXVA_BITSTREAM_CONCEALMENT_METHOD_BACKWARD 3
+
+
+/* Buffer Description Data */
+
+typedef struct _DXVA_BufferDescription {
+ DWORD dwTypeIndex;
+ DWORD dwBufferIndex;
+ DWORD dwDataOffset;
+ DWORD dwDataSize;
+ DWORD dwFirstMBaddress;
+ DWORD dwNumMBsInBuffer;
+ DWORD dwWidth;
+ DWORD dwHeight;
+ DWORD dwStride;
+ DWORD dwReservedBits;
+} DXVA_BufferDescription, *LPDXVA_BufferDescription;
+
+
+/* Off-Host IDCT Coefficient Data Structures */
+
+typedef struct _DXVA_TCoef4Group {
+ BYTE TCoefIDX[4];
+ SHORT TCoefValue[4];
+} DXVA_TCoef4Group, *LPDXVA_TCoef4Group;
+
+typedef struct _DXVA_TCoefSingle {
+ WORD wIndexWithEOB;
+ SHORT TCoefValue;
+} DXVA_TCoefSingle, *LPDXVA_TCoefSingle;
+
+/* Macros for Reading EOB and Index Values */
+
+#define readDXVA_TCoefSingleIDX(ptr) ((ptr)->wIndexWithEOB >> 1)
+#define readDXVA_TCoefSingleEOB(ptr) ((ptr)->wIndexWithEOB & 1)
+
+/* Macro for Writing EOB and Index Values */
+
+#define writeDXVA_TCoefSingleIndexWithEOB(ptr, idx, eob) ((ptr)->wIndexWithEOB = ((idx) << 1) | (eob))
+#define setDXVA_TCoefSingleIDX(ptr, idx) ((ptr)->wIndexWithEOB |= ((idx) << 1))
+#define setDXVA_TCoefSingleEOB(ptr) ((ptr)->wIndexWithEOB |= 1)
+
+/* Spatial-Domain Residual Difference Blocks */
+
+#define DXVA_USUAL_BLOCK_WIDTH 8
+#define DXVA_USUAL_BLOCK_HEIGHT 8
+#define DXVA_USUAL_BLOCK_SIZE (DXVA_USUAL_BLOCK_WIDTH * DXVA_USUAL_BLOCK_HEIGHT)
+
+typedef SHORT DXVA_Sample16[DXVA_USUAL_BLOCK_SIZE];
+typedef signed char DXVA_Sample8 [DXVA_USUAL_BLOCK_SIZE];
+
+/* Deblocking Filter Control Structure */
+
+typedef BYTE DXVA_DeblockingEdgeControl;
+
+typedef DXVA_DeblockingEdgeControl * LPDXVA_DeblockingEdgeControl;
+
+/* Macros for Reading STRENGTH and FilterOn */
+
+#define readDXVA_EdgeFilterStrength(ptr) ((*(ptr)) >> 1)
+#define readDXVA_EdgeFilterOn(ptr) ((*(ptr)) & 1)
+
+/* Macro for Writing STRENGTH and FilterOn */
+
+#define writeDXVA_DeblockingEdgeControl(ptr, str, fon) ((*(ptr)) = ((str) << 1) | (fon))
+#define setDXVA_EdgeFilterStrength(ptr, str) ((*(ptr)) |= ((str) << 1))
+#define setDXVA_EdgeFilterOn(ptr) ((*(ptr)) |= 1)
+
+
+/* Macroblock Control Command Data Structures */
+
+typedef struct _DXVA_MVvalue {
+ SHORT horz, vert;
+} DXVA_MVvalue, *LPDXVA_MVvalue;
+
+
+/* Inverse Quantization Matrices */
+
+typedef struct _DXVA_QmatrixData {
+ BYTE bNewQmatrix[4]; /* intra Y, inter Y, intra chroma, inter chroma */
+ WORD Qmatrix[4][DXVA_USUAL_BLOCK_WIDTH * DXVA_USUAL_BLOCK_HEIGHT];
+} DXVA_QmatrixData, *LPDXVA_QmatrixData;
+
+
+/* Slice Control Buffer Data */
+
+typedef struct _DXVA_SliceInfo {
+ WORD wHorizontalPosition;
+ WORD wVerticalPosition;
+ DWORD dwSliceBitsInBuffer;
+ DWORD dwSliceDataLocation;
+ BYTE bStartCodeBitOffset;
+ BYTE bReservedBits;
+ WORD wMBbitOffset;
+ WORD wNumberMBsInSlice;
+ WORD wQuantizerScaleCode;
+ WORD wBadSliceChopping;
+} DXVA_SliceInfo, *LPDXVA_SliceInfo;
+
+
+#define DXVA_NumMV_OBMC_off_BinPBwith4MV_off 4
+#define DXVA_NumMV_OBMC_off_BinPBwith4MV_on (4+1)
+#define DXVA_NumMV_OBMC_on__BinPB_off (10)
+#define DXVA_NumMV_OBMC_on__BinPB_on (11) /* not current standards */
+
+#define DXVA_NumBlocksPerMB_420 (4+2+0)
+#define DXVA_NumBlocksPerMB_422 (4+2+2)
+#define DXVA_NumBlocksPerMB_444 (4+4+4)
+
+/* Basic form for I pictures */
+/* Host Residual Differences */
+typedef struct _DXVA_MBctrl_I_HostResidDiff_1 {
+ WORD wMBaddress;
+ WORD wMBtype;
+ DWORD dwMB_SNL;
+ WORD wPatternCode;
+ WORD wPC_Overflow; /* zero if not overflow format */
+ DWORD dwReservedBits2;
+} DXVA_MBctrl_I_HostResidDiff_1;
+
+/* Basic form for I pictures */
+/* Off-Host IDCT, 4:2:0 sampling */
+typedef struct _DXVA_MBctrl_I_OffHostIDCT_1 {
+ WORD wMBaddress;
+ WORD wMBtype;
+ DWORD dwMB_SNL;
+ WORD wPatternCode;
+ BYTE bNumCoef[DXVA_NumBlocksPerMB_420];
+} DXVA_MBctrl_I_OffHostIDCT_1;
+
+/* Basic form for P and B pictures */
+/* Should also be used for concealment MVs in MPEG-2 I pictures */
+/* Without OBMC, without BinPB and 4MV together, without MV RPS */
+/* Host Residual Differences */
+typedef struct _DXVA_MBctrl_P_HostResidDiff_1 {
+ WORD wMBaddress;
+ WORD wMBtype;
+ DWORD dwMB_SNL;
+ WORD wPatternCode;
+ WORD wPC_Overflow; /* zero if not overflow format */
+ DWORD dwReservedBits2;
+ DXVA_MVvalue MVector[DXVA_NumMV_OBMC_off_BinPBwith4MV_off];
+} DXVA_MBctrl_P_HostResidDiff_1;
+
+/* Basic form for P and B pictures */
+/* Without OBMC, without BinPB and 4MV together, without MV RPS */
+/* Off-Host IDCT, 4:2:0 sampling */
+typedef struct _DXVA_MBctrl_P_OffHostIDCT_1 {
+ WORD wMBaddress;
+ WORD wMBtype;
+ DWORD dwMB_SNL;
+ WORD wPatternCode;
+ BYTE bNumCoef[DXVA_NumBlocksPerMB_420];
+ DXVA_MVvalue MVector[DXVA_NumMV_OBMC_off_BinPBwith4MV_off];
+} DXVA_MBctrl_P_OffHostIDCT_1;
+
+/* How to load alpha blending graphic data */
+typedef struct _DXVA_ConfigAlphaLoad {
+
+ // Operation Indicated
+ DXVA_ConfigQueryOrReplyFunc dwFunction;
+
+ // Alignment
+ DWORD dwReservedBits[3];
+
+ BYTE bConfigDataType;
+} DXVA_ConfigAlphaLoad, *LPDXVA_ConfigAlphaLoad;
+
+#define DXVA_CONFIG_DATA_TYPE_IA44 0
+#define DXVA_CONFIG_DATA_TYPE_AI44 1
+#define DXVA_CONFIG_DATA_TYPE_DPXD 2
+#define DXVA_CONFIG_DATA_TYPE_AYUV 3
+
+
+/* How to combine alpha blending graphic data */
+typedef struct _DXVA_ConfigAlphaCombine {
+
+ // Operation Indicated
+ DXVA_ConfigQueryOrReplyFunc dwFunction;
+
+ // Alignment
+ DWORD dwReservedBits[3];
+
+ BYTE bConfigBlendType;
+ BYTE bConfigPictureResizing;
+ BYTE bConfigOnlyUsePicDestRectArea;
+ BYTE bConfigGraphicResizing;
+ BYTE bConfigWholePlaneAlpha;
+
+} DXVA_ConfigAlphaCombine, *LPDXVA_ConfigAlphaCombine;
+
+#define DXVA_CONFIG_BLEND_TYPE_FRONT_BUFFER 0
+#define DXVA_CONFIG_BLEND_TYPE_BACK_HARDWARE 1
+
+
+/* AYUV sample for 16-entry YUV palette or graphic surface */
+
+typedef struct _DXVA_AYUVsample2 {
+ BYTE bCrValue;
+ BYTE bCbValue;
+ BYTE bY_Value;
+ BYTE bSampleAlpha8;
+} DXVA_AYUVsample2, *LPDXVA_AYUVsample2;
+
+/* Macros for IA44 alpha blending surface samples */
+
+typedef BYTE DXVA_IA44sample, *LPDXVA_IA44sample;
+
+#define readDXVA_IA44index(ptr) (((*(ptr)) & 0xF0) >> 4)
+#define readDXVA_IA44alpha(ptr) ((*(ptr)) & 0x0F)
+
+#define writeDXVA_IA44(ptr, idx, alpha) ((*(ptr)) = (((idx) << 4) | (alpha)))
+
+#define setDXVA_IA44index(ptr, idx) ((*(ptr)) |= ((idx) << 4))
+#define setDXVA_IA44alpha(ptr, alpha) ((*(ptr)) |= (alpha))
+
+/* Macros for AI44 alpha blending surface samples */
+
+typedef BYTE DXVA_AI44sample, *LPDXVA_AI44sample;
+
+#define readDXVA_AI44index(ptr) ((*(ptr)) & 0x0F)
+#define readDXVA_AI44alpha(ptr) (((*(ptr)) & 0xF0) >> 4)
+
+#define writeDXVA_AI44(ptr, idx, alpha) ((*(ptr)) = (((alpha) << 4) | (idx)))
+
+#define setDXVA_AI44index(ptr, idx) ((*(ptr)) |= (idx))
+#define setDXVA_AI44alpha(ptr, alpha) ((*(ptr)) |= ((alpha) << 4))
+
+
+/* Highlight data structure */
+
+typedef struct _DXVA_Highlight {
+ WORD wHighlightActive;
+ WORD wHighlightIndices;
+ WORD wHighlightAlphas;
+ RECT HighlightRect;
+} DXVA_Highlight, *LPDXVA_Highlight;
+
+typedef BYTE DXVA_DPXD, *LPDXVA_DPXD;
+typedef WORD DXVA_DCCMD, *LPDXVA_DCCMD;
+
+/* Alpha blend combination */
+
+typedef struct _DXVA_BlendCombination {
+ WORD wPictureSourceIndex;
+ WORD wBlendedDestinationIndex;
+ RECT PictureSourceRect16thPel;
+ RECT PictureDestinationRect;
+ RECT GraphicSourceRect;
+ RECT GraphicDestinationRect;
+ WORD wBlendDelay;
+ BYTE bBlendOn;
+ BYTE bWholePlaneAlpha;
+ DXVA_AYUVsample2 OutsideYUVcolor;
+} DXVA_BlendCombination, *LPDXVA_BlendCombination;
+
+/* H.264/AVC-specific structures */
+
+/* H.264/AVC picture entry data structure */
+typedef struct _DXVA_PicEntry_H264 {
+ union {
+ struct {
+ UCHAR Index7Bits : 7;
+ UCHAR AssociatedFlag : 1;
+ };
+ UCHAR bPicEntry;
+ };
+} DXVA_PicEntry_H264, *LPDXVA_PicEntry_H264; /* 1 byte */
+
+/* H.264/AVC picture parameters structure */
+typedef struct _DXVA_PicParams_H264 {
+ USHORT wFrameWidthInMbsMinus1;
+ USHORT wFrameHeightInMbsMinus1;
+ DXVA_PicEntry_H264 CurrPic; /* flag is bot field flag */
+ UCHAR num_ref_frames;
+
+ union {
+ struct {
+ USHORT field_pic_flag : 1;
+ USHORT MbaffFrameFlag : 1;
+ USHORT residual_colour_transform_flag : 1;
+ USHORT sp_for_switch_flag : 1;
+ USHORT chroma_format_idc : 2;
+ USHORT RefPicFlag : 1;
+ USHORT constrained_intra_pred_flag : 1;
+
+ USHORT weighted_pred_flag : 1;
+ USHORT weighted_bipred_idc : 2;
+ USHORT MbsConsecutiveFlag : 1;
+ USHORT frame_mbs_only_flag : 1;
+ USHORT transform_8x8_mode_flag : 1;
+ USHORT MinLumaBipredSize8x8Flag : 1;
+ USHORT IntraPicFlag : 1;
+ };
+ USHORT wBitFields;
+ };
+ UCHAR bit_depth_luma_minus8;
+ UCHAR bit_depth_chroma_minus8;
+
+ USHORT Reserved16Bits;
+ UINT StatusReportFeedbackNumber;
+
+ DXVA_PicEntry_H264 RefFrameList[16]; /* flag LT */
+ INT CurrFieldOrderCnt[2];
+ INT FieldOrderCntList[16][2];
+
+ CHAR pic_init_qs_minus26;
+ CHAR chroma_qp_index_offset; /* also used for QScb */
+ CHAR second_chroma_qp_index_offset; /* also for QScr */
+ UCHAR ContinuationFlag;
+
+/* remainder for parsing */
+ CHAR pic_init_qp_minus26;
+ UCHAR num_ref_idx_l0_active_minus1;
+ UCHAR num_ref_idx_l1_active_minus1;
+ UCHAR Reserved8BitsA;
+
+ USHORT FrameNumList[16];
+ UINT UsedForReferenceFlags;
+ USHORT NonExistingFrameFlags;
+ USHORT frame_num;
+
+ UCHAR log2_max_frame_num_minus4;
+ UCHAR pic_order_cnt_type;
+ UCHAR log2_max_pic_order_cnt_lsb_minus4;
+ UCHAR delta_pic_order_always_zero_flag;
+
+ UCHAR direct_8x8_inference_flag;
+ UCHAR entropy_coding_mode_flag;
+ UCHAR pic_order_present_flag;
+ UCHAR num_slice_groups_minus1;
+
+ UCHAR slice_group_map_type;
+ UCHAR deblocking_filter_control_present_flag;
+ UCHAR redundant_pic_cnt_present_flag;
+ UCHAR Reserved8BitsB;
+
+ USHORT slice_group_change_rate_minus1;
+
+ UCHAR SliceGroupMap[810]; /* 4b/sgmu, Size BT.601 */
+
+} DXVA_PicParams_H264, *LPDXVA_PicParams_H264;
+
+/* H.264/AVC quantization weighting matrix data structure */
+typedef struct _DXVA_Qmatrix_H264 {
+ UCHAR bScalingLists4x4[6][16];
+ UCHAR bScalingLists8x8[2][64];
+
+} DXVA_Qmatrix_H264, *LPDXVA_Qmatrix_H264;
+
+/* H.264/AVC slice control data structure - short form */
+typedef struct _DXVA_Slice_H264_Short {
+ UINT BSNALunitDataLocation; /* type 1..5 */
+ UINT SliceBytesInBuffer; /* for off-host parse */
+ USHORT wBadSliceChopping; /* for off-host parse */
+} DXVA_Slice_H264_Short, *LPDXVA_Slice_H264_Short;
+
+/* H.264/AVC picture entry data structure - long form */
+typedef struct _DXVA_Slice_H264_Long {
+ UINT BSNALunitDataLocation; /* type 1..5 */
+ UINT SliceBytesInBuffer; /* for off-host parse */
+ USHORT wBadSliceChopping; /* for off-host parse */
+
+ USHORT first_mb_in_slice;
+ USHORT NumMbsForSlice;
+
+ USHORT BitOffsetToSliceData; /* after CABAC alignment */
+
+ UCHAR slice_type;
+ UCHAR luma_log2_weight_denom;
+ UCHAR chroma_log2_weight_denom;
+ UCHAR num_ref_idx_l0_active_minus1;
+ UCHAR num_ref_idx_l1_active_minus1;
+ CHAR slice_alpha_c0_offset_div2;
+ CHAR slice_beta_offset_div2;
+ UCHAR Reserved8Bits;
+ DXVA_PicEntry_H264 RefPicList[2][32]; /* L0 & L1 */
+ SHORT Weights[2][32][3][2]; /* L0 & L1; Y, Cb, Cr */
+ CHAR slice_qs_delta;
+ /* rest off-host parse */
+ CHAR slice_qp_delta;
+ UCHAR redundant_pic_cnt;
+ UCHAR direct_spatial_mv_pred_flag;
+ UCHAR cabac_init_idc;
+ UCHAR disable_deblocking_filter_idc;
+ USHORT slice_id;
+} DXVA_Slice_H264_Long, *LPDXVA_Slice_H264_Long;
+
+/* H.264/AVC macroblock control command data structure */
+typedef struct _DXVA_MBctrl_H264 {
+ union {
+ struct {
+ UINT bSliceID : 8; /* 1 byte */
+ UINT MbType5Bits : 5;
+ UINT IntraMbFlag : 1;
+ UINT mb_field_decoding_flag : 1;
+ UINT transform_size_8x8_flag : 1; /* 2 bytes */
+ UINT HostResidDiff : 1;
+ UINT DcBlockCodedCrFlag : 1;
+ UINT DcBlockCodedCbFlag : 1;
+ UINT DcBlockCodedYFlag : 1;
+ UINT FilterInternalEdgesFlag : 1;
+ UINT FilterLeftMbEdgeFlag : 1;
+ UINT FilterTopMbEdgeFlag : 1;
+ UINT ReservedBit : 1;
+ UINT bMvQuantity : 8; /* 4 bytes */
+ };
+ UINT dwMBtype; /* 4 bytes so far */
+ };
+ USHORT CurrMbAddr; /* 6 bytes so far */
+ USHORT wPatternCode[3];/* YCbCr, 16 4x4 blks, 1b each */
+ /* 12 bytes so far */
+ UCHAR bQpPrime[3]; /* Y, Cb, Cr, need just 7b QpY */
+ UCHAR bMBresidDataQuantity;
+ ULONG dwMBdataLocation; /* offset into resid buffer */
+ /* 20 bytes so far */
+ union {
+ struct {
+/* start here for Intra MB's (9 useful bytes in branch) */
+ USHORT LumaIntraPredModes[4];/* 16 blocks, 4b each */
+ /* 28 bytes so far */
+ union {
+ struct {
+ UCHAR intra_chroma_pred_mode : 2;
+ UCHAR IntraPredAvailFlags : 5;
+ UCHAR ReservedIntraBit : 1;
+ };
+ UCHAR bMbIntraStruct; /* 29 bytes so far */
+ };
+ UCHAR ReservedIntra24Bits[3]; /* 32 bytes total */
+ };
+ struct {
+/* start here for non-Intra MB's (12 bytes in branch) */
+ UCHAR bSubMbShapes; /* 4 subMbs, 2b each */
+ UCHAR bSubMbPredModes; /* 4 subMBs, 2b each */
+ /* 22 bytes so far */
+ USHORT wMvBuffOffset; /* offset into MV buffer */
+ UCHAR bRefPicSelect[2][4]; /* 32 bytes total */
+ };
+ };
+} DXVA_MBctrl_H264, *LPDXVA_MBctrl_H264;
+
+/* H.264/AVC IndexA and IndexB data structure */
+typedef struct _DXVA_DeblockIndexAB_H264 {
+ UCHAR bIndexAinternal; /* 6b - could get from MB CC */
+ UCHAR bIndexBinternal; /* 6b - could get from MB CC */
+
+ UCHAR bIndexAleft0;
+ UCHAR bIndexBleft0;
+
+ UCHAR bIndexAleft1;
+ UCHAR bIndexBleft1;
+
+ UCHAR bIndexAtop0;
+ UCHAR bIndexBtop0;
+
+ UCHAR bIndexAtop1;
+ UCHAR bIndexBtop1;
+} DXVA_DeblockIndexAB_H264, *LPDXVA_DeblockIndexAB_H264;
+ /* 10 bytes in struct */
+
+/* H.264/AVC deblocking filter control data structure */
+typedef struct _DXVA_Deblock_H264 {
+ USHORT CurrMbAddr; /* dup info */ /* 2 bytes so far */
+ union {
+ struct {
+ UCHAR ReservedBit : 1;
+ UCHAR FieldModeCurrentMbFlag : 1; /* dup info */
+ UCHAR FieldModeLeftMbFlag : 1;
+ UCHAR FieldModeAboveMbFlag : 1;
+ UCHAR FilterInternal8x8EdgesFlag : 1;
+ UCHAR FilterInternal4x4EdgesFlag : 1;
+ UCHAR FilterLeftMbEdgeFlag : 1;
+ UCHAR FilterTopMbEdgeFlag : 1;
+ };
+ UCHAR FirstByte;
+ };
+ UCHAR Reserved8Bits; /* 4 bytes so far */
+
+ UCHAR bbSinternalLeftVert; /* 2 bits per bS */
+ UCHAR bbSinternalMidVert;
+
+ UCHAR bbSinternalRightVert;
+ UCHAR bbSinternalTopHorz; /* 8 bytes so far */
+
+ UCHAR bbSinternalMidHorz;
+ UCHAR bbSinternalBotHorz; /* 10 bytes so far */
+
+ USHORT wbSLeft0; /* 4 bits per bS (1 wasted) */
+ USHORT wbSLeft1; /* 4 bits per bS (1 wasted) */
+
+ USHORT wbSTop0; /* 4 bits per bS (1 wasted) */
+ USHORT wbSTop1; /* 4b (2 wasted) 18 bytes so far*/
+
+ DXVA_DeblockIndexAB_H264 IndexAB[3]; /* Y, Cb, Cr */
+
+} DXVA_Deblock_H264, *LPDXVA_Deblock_H264;/* 48 bytes */
+
+/* H.264/AVC film grain characteristics data structure */
+typedef struct _DXVA_FilmGrainCharacteristics {
+
+ USHORT wFrameWidthInMbsMinus1;
+ USHORT wFrameHeightInMbsMinus1;
+
+ DXVA_PicEntry_H264 InPic; /* flag is bot field flag */
+ DXVA_PicEntry_H264 OutPic; /* flag is field pic flag */
+
+ USHORT PicOrderCnt_offset;
+ INT CurrPicOrderCnt;
+ UINT StatusReportFeedbackNumber;
+
+ UCHAR model_id;
+ UCHAR separate_colour_description_present_flag;
+ UCHAR film_grain_bit_depth_luma_minus8;
+ UCHAR film_grain_bit_depth_chroma_minus8;
+
+ UCHAR film_grain_full_range_flag;
+ UCHAR film_grain_colour_primaries;
+ UCHAR film_grain_transfer_characteristics;
+ UCHAR film_grain_matrix_coefficients;
+
+ UCHAR blending_mode_id;
+ UCHAR log2_scale_factor;
+
+ UCHAR comp_model_present_flag[4];
+ UCHAR num_intensity_intervals_minus1[4];
+ UCHAR num_model_values_minus1[4];
+
+ UCHAR intensity_interval_lower_bound[3][16];
+ UCHAR intensity_interval_upper_bound[3][16];
+ SHORT comp_model_value[3][16][8];
+} DXVA_FilmGrainChar_H264, *LPDXVA_FilmGrainChar_H264;
+
+/* H.264/AVC status reporting data structure */
+typedef struct _DXVA_Status_H264 {
+ UINT StatusReportFeedbackNumber;
+ DXVA_PicEntry_H264 CurrPic; /* flag is bot field flag */
+ UCHAR field_pic_flag;
+ UCHAR bDXVA_Func;
+ UCHAR bBufType;
+ UCHAR bStatus;
+ UCHAR bReserved8Bits;
+ USHORT wNumMbsAffected;
+} DXVA_Status_H264, *LPDXVA_Status_H264;
+
+/* VC-1 status reporting data structure */
+typedef struct _DXVA_Status_VC1 {
+ USHORT StatusReportFeedbackNumber;
+ WORD wDecodedPictureIndex;
+ WORD wDeblockedPictureIndex;
+ UCHAR bPicStructure;
+ UCHAR bBufType;
+ UCHAR bStatus;
+ UCHAR bReserved8Bits;
+ USHORT wNumMbsAffected;
+} DXVA_Status_VC1, *LPDXVA_Status_VC1;
+
+
+#pragma pack(push, 16)
+
+
+typedef DXVA_MBctrl_I_HostResidDiff_1 *
+ LPDXVA_MBctrl_I_HostResidDiff_1;
+
+typedef DXVA_MBctrl_I_OffHostIDCT_1 *
+ LPDXVA_MBctrl_I_OffHostIDCT_1;
+
+typedef DXVA_MBctrl_P_HostResidDiff_1 *
+ LPDXVA_MBctrl_P_HostResidDiff_1;
+
+typedef DXVA_MBctrl_P_OffHostIDCT_1 *
+ LPDXVA_MBctrl_P_OffHostIDCT_1;
+
+
+#pragma pack(pop)
+
+/*
+ * Other forms of pictures are constructed in the obvious way
+ * from the above by adjusting the number of residual difference
+ * blocks, the number of motion vectors per macroblock, etc.
+ */
+
+#define readDXVA_MBskipsFollowing(ptr) (((ptr)->dwMB_SNL & 0xFF000000) >> 24)
+#define readDXVA_MBdataLocation(ptr) (((ptr)->dwMB_SNL & 0x00FFFFFF))
+
+#define writeDXVA_MB_SNL(ptr, skips, dloc) ((ptr)->dwMB_SNL = (((skips) << 24) | (dloc)))
+#define setDXVA_MBskipsFollowing(ptr, skips) ((ptr)->dwMB_SNL |= ((skips) << 24))
+#define setDXVA_MBdataLocation(ptr, dloc) ((ptr)->dwMB_SNL |= (dloc))
+
+#define readDXVA_MvertFieldSel_3(ptr) (((ptr)->wMBtype & 0x8000) >> 15)
+#define readDXVA_MvertFieldSel_2(ptr) (((ptr)->wMBtype & 0x4000) >> 14)
+#define readDXVA_MvertFieldSel_1(ptr) (((ptr)->wMBtype & 0x2000) >> 13)
+#define readDXVA_MvertFieldSel_0(ptr) (((ptr)->wMBtype & 0x1000) >> 12)
+#define readDXVA_ReservedBits(ptr) (((ptr)->wMBtype & 0x0800) >> 11)
+#define readDXVA_HostResidDiff(ptr) (((ptr)->wMBtype & 0x0400) >> 10)
+#define readDXVA_MotionType(ptr) (((ptr)->wMBtype & 0x0300) >> 8)
+#define readDXVA_MBscanMethod(ptr) (((ptr)->wMBtype & 0x00C0) >> 6)
+#define readDXVA_FieldResidual(ptr) (((ptr)->wMBtype & 0x0020) >> 5)
+#define readDXVA_H261LoopFilter(ptr) (((ptr)->wMBtype & 0x0010) >> 4)
+#define readDXVA_Motion4MV(ptr) (((ptr)->wMBtype & 0x0008) >> 3)
+#define readDXVA_MotionBackward(ptr) (((ptr)->wMBtype & 0x0004) >> 2)
+#define readDXVA_MotionForward(ptr) (((ptr)->wMBtype & 0x0002) >> 1)
+#define readDXVA_IntraMacroblock(ptr) (((ptr)->wMBtype & 0x0001))
+
+#define setDXVA_MvertFieldSel_3(ptr) ((ptr)->wMBtype |= 0x8000)
+#define setDXVA_MvertFieldSel_2(ptr) ((ptr)->wMBtype |= 0x4000)
+#define setDXVA_MvertFieldSel_1(ptr) ((ptr)->wMBtype |= 0x2000)
+#define setDXVA_MvertFieldSel_0(ptr) ((ptr)->wMBtype |= 0x1000)
+#define setDXVA_ReservedBits(ptr) ((ptr)->wMBtype |= 0x0800)
+#define setDXVA_HostResidDiff(ptr) ((ptr)->wMBtype |= 0x0400)
+#define setDXVA_MotionType(ptr, value) ((ptr)->wMBtype |= ((value) << 8))
+#define setDXVA_MBscanMethod(ptr, value) ((ptr)->wMBtype |= ((value) << 6))
+#define setDXVA_FieldResidual(ptr) ((ptr)->wMBtype |= 0x0020)
+#define setDXVA_H261LoopFilter(ptr) ((ptr)->wMBtype |= 0x0010)
+#define setDXVA_Motion4MV(ptr) ((ptr)->wMBtype |= 0x0008)
+#define setDXVA_MotionBackward(ptr) ((ptr)->wMBtype |= 0x0004)
+#define setDXVA_MotionForward(ptr) ((ptr)->wMBtype |= 0x0002)
+#define setDXVA_IntraMacroblock(ptr) ((ptr)->wMBtype |= 0x0001)
+
+#define readDXVA_Y___0coded(ptr) (((ptr)->wPatternCode & 0x0800) >> 11)
+#define readDXVA_Y___1coded(ptr) (((ptr)->wPatternCode & 0x0400) >> 10)
+#define readDXVA_Y___2coded(ptr) (((ptr)->wPatternCode & 0x0200) >> 9)
+#define readDXVA_Y___3coded(ptr) (((ptr)->wPatternCode & 0x0100) >> 8)
+#define readDXVA_Cb__4coded(ptr) (((ptr)->wPatternCode & 0x0080) >> 7)
+#define readDXVA_Cr__5coded(ptr) (((ptr)->wPatternCode & 0x0040) >> 6)
+#define readDXVA_Cb__6coded(ptr) (((ptr)->wPatternCode & 0x0020) >> 5)
+#define readDXVA_Cr__7coded(ptr) (((ptr)->wPatternCode & 0x0010) >> 4)
+#define readDXVA_Cb__8coded(ptr) (((ptr)->wPatternCode & 0x0008) >> 3)
+#define readDXVA_Cb__9coded(ptr) (((ptr)->wPatternCode & 0x0004) >> 2)
+#define readDXVA_Cr_10coded(ptr) (((ptr)->wPatternCode & 0x0002) >> 1)
+#define readDXVA_Cr_11coded(ptr) (((ptr)->wPatternCode & 0x0001))
+
+#define readDXVA_Y___0oflow(ptr) (((ptr)->wPC_Overflow & 0x0800) >> 11)
+#define readDXVA_Y___1oflow(ptr) (((ptr)->wPC_Overflow & 0x0400) >> 10)
+#define readDXVA_Y___2oflow(ptr) (((ptr)->wPC_Overflow & 0x0200) >> 9)
+#define readDXVA_Y___3oflow(ptr) (((ptr)->wPC_Overflow & 0x0100) >> 8)
+#define readDXVA_Cb__4oflow(ptr) (((ptr)->wPC_Overflow & 0x0080) >> 7)
+#define readDXVA_Cr__5oflow(ptr) (((ptr)->wPC_Overflow & 0x0040) >> 6)
+#define readDXVA_Cb__6oflow(ptr) (((ptr)->wPC_Overflow & 0x0020) >> 5)
+#define readDXVA_Cr__7oflow(ptr) (((ptr)->wPC_Overflow & 0x0010) >> 4)
+#define readDXVA_Cb__8oflow(ptr) (((ptr)->wPC_Overflow & 0x0008) >> 3)
+#define readDXVA_Cb__9oflow(ptr) (((ptr)->wPC_Overflow & 0x0004) >> 2)
+#define readDXVA_Cr_10oflow(ptr) (((ptr)->wPC_Overflow & 0x0002) >> 1)
+#define readDXVA_Cr_11oflow(ptr) (((ptr)->wPC_Overflow & 0x0001))
+
+#pragma pack(pop, BeforeDXVApacking)
+#endif /* __DIRECTX_VA_DECODER__ */
+
+
+// -------------------------------------------------------------------------
+//
+// D3DFORMAT describes a pixel memory layout, DXVA sample format contains
+// additional information that describes how the pixels should be interpreted.
+//
+// DXVA Extended color data - occupies the SampleFormat DWORD
+// data fields.
+// -------------------------------------------------------------------------
+#ifndef __DIRECTX_VA_SAMPLEFORMAT__
+#define __DIRECTX_VA_SAMPLEFORMAT__
+
+typedef enum _DXVA_SampleFormat {
+ DXVA_SampleFormatMask = 0xFF, // 8 bits used for DXVA Sample format
+ DXVA_SampleUnknown = 0,
+ DXVA_SamplePreviousFrame = 1,
+ DXVA_SampleProgressiveFrame = 2,
+ DXVA_SampleFieldInterleavedEvenFirst = 3,
+ DXVA_SampleFieldInterleavedOddFirst = 4,
+ DXVA_SampleFieldSingleEven = 5,
+ DXVA_SampleFieldSingleOdd = 6,
+ DXVA_SampleSubStream = 7
+} DXVA_SampleFormat;
+
+#define DXVA_ExtractSampleFormat(_sf) ((_sf) & (DXVA_SampleFormatMask))
+
+#define DXVA_ExtractExtColorData(_sf, _Mask, _Shift) \
+ (((_sf) & (_Mask)) >> (_Shift))
+
+#define DXVABitMask(__n) (~((~0) << __n))
+#define DXVA_ExtColorData_ShiftBase 8
+#define DXVAColorMask(__bits,__base) (DXVABitMask(__bits) << (__base))
+
+typedef enum _DXVA_VideoTransferFunction
+{
+ DXVA_VideoTransFuncShift = (DXVA_ExtColorData_ShiftBase + 19),
+ DXVA_VideoTransFuncMask = DXVAColorMask(5, DXVA_VideoTransFuncShift),
+
+ DXVA_VideoTransFunc_Unknown = 0,
+ DXVA_VideoTransFunc_10 = 1,
+ DXVA_VideoTransFunc_18 = 2,
+ DXVA_VideoTransFunc_20 = 3,
+ DXVA_VideoTransFunc_22 = 4,
+ DXVA_VideoTransFunc_22_709 = 5,
+ DXVA_VideoTransFunc_22_240M = 6,
+ DXVA_VideoTransFunc_22_8bit_sRGB = 7,
+ DXVA_VideoTransFunc_28 = 8
+} DXVA_VideoTransferFunction;
+
+typedef enum _DXVA_VideoPrimaries
+{
+ DXVA_VideoPrimariesShift = (DXVA_ExtColorData_ShiftBase + 14),
+ DXVA_VideoPrimariesMask = DXVAColorMask(5, DXVA_VideoPrimariesShift),
+
+ DXVA_VideoPrimaries_Unknown = 0,
+ DXVA_VideoPrimaries_reserved = 1,
+ DXVA_VideoPrimaries_BT709 = 2,
+ DXVA_VideoPrimaries_BT470_2_SysM = 3,
+ DXVA_VideoPrimaries_BT470_2_SysBG = 4,
+ DXVA_VideoPrimaries_SMPTE170M = 5,
+ DXVA_VideoPrimaries_SMPTE240M = 6,
+ DXVA_VideoPrimaries_EBU3213 = 7,
+ DXVA_VideoPrimaries_SMPTE_C = 8
+} DXVA_VideoPrimaries;
+
+typedef enum _DXVA_VideoLighting
+{
+ DXVA_VideoLightingShift = (DXVA_ExtColorData_ShiftBase + 10),
+ DXVA_VideoLightingMask = DXVAColorMask(4, DXVA_VideoLightingShift),
+
+ DXVA_VideoLighting_Unknown = 0,
+ DXVA_VideoLighting_bright = 1,
+ DXVA_VideoLighting_office = 2,
+ DXVA_VideoLighting_dim = 3,
+ DXVA_VideoLighting_dark = 4
+} DXVA_VideoLighting;
+
+typedef enum _DXVA_VideoTransferMatrix
+{
+ DXVA_VideoTransferMatrixShift = (DXVA_ExtColorData_ShiftBase + 7),
+ DXVA_VideoTransferMatrixMask = DXVAColorMask(3, DXVA_VideoTransferMatrixShift),
+
+ DXVA_VideoTransferMatrix_Unknown = 0,
+ DXVA_VideoTransferMatrix_BT709 = 1,
+ DXVA_VideoTransferMatrix_BT601 = 2,
+ DXVA_VideoTransferMatrix_SMPTE240M = 3
+} DXVA_VideoTransferMatrix;
+
+typedef enum _DXVA_NominalRange
+{
+ DXVA_NominalRangeShift = (DXVA_ExtColorData_ShiftBase + 4),
+ DXVA_NominalRangeMask = DXVAColorMask(3, DXVA_NominalRangeShift),
+
+ DXVA_NominalRange_Unknown = 0,
+ DXVA_NominalRange_Normal = 1,
+ DXVA_NominalRange_Wide = 2,
+
+ DXVA_NominalRange_0_255 = 1,
+ DXVA_NominalRange_16_235 = 2,
+ DXVA_NominalRange_48_208 = 3
+} DXVA_NominalRange;
+
+
+typedef enum _DXVA_VideoChromaSubsampling
+{
+ DXVA_VideoChromaSubsamplingShift = (DXVA_ExtColorData_ShiftBase + 0),
+ DXVA_VideoChromaSubsamplingMask = DXVAColorMask(4, DXVA_VideoChromaSubsamplingShift),
+
+ DXVA_VideoChromaSubsampling_Unknown = 0,
+ DXVA_VideoChromaSubsampling_ProgressiveChroma = 0x8,
+ DXVA_VideoChromaSubsampling_Horizontally_Cosited = 0x4,
+ DXVA_VideoChromaSubsampling_Vertically_Cosited = 0x2,
+ DXVA_VideoChromaSubsampling_Vertically_AlignedChromaPlanes = 0x1,
+
+ // 4:2:0 variations
+ DXVA_VideoChromaSubsampling_MPEG2 = DXVA_VideoChromaSubsampling_Horizontally_Cosited |
+ DXVA_VideoChromaSubsampling_Vertically_AlignedChromaPlanes,
+
+ DXVA_VideoChromaSubsampling_MPEG1 = DXVA_VideoChromaSubsampling_Vertically_AlignedChromaPlanes,
+
+ DXVA_VideoChromaSubsampling_DV_PAL = DXVA_VideoChromaSubsampling_Horizontally_Cosited |
+ DXVA_VideoChromaSubsampling_Vertically_Cosited,
+ // 4:4:4, 4:2:2, 4:1:1
+ DXVA_VideoChromaSubsampling_Cosited = DXVA_VideoChromaSubsampling_Horizontally_Cosited |
+ DXVA_VideoChromaSubsampling_Vertically_Cosited |
+ DXVA_VideoChromaSubsampling_Vertically_AlignedChromaPlanes,
+} DXVA_VideoChromaSubsampling;
+
+typedef struct _DXVA_ExtendedFormat
+{
+ UINT SampleFormat : 8; // See DXVA_SampleFormat
+ UINT VideoChromaSubsampling : 4; // See DXVA_VideoChromaSubSampling
+ DXVA_NominalRange NominalRange : 3; // See DXVA_NominalRange
+ DXVA_VideoTransferMatrix VideoTransferMatrix : 3; // See DXVA_VideoTransferMatrix
+ DXVA_VideoLighting VideoLighting : 4; // See DXVA_VideoLighting
+ DXVA_VideoPrimaries VideoPrimaries : 5; // See DXVA_VideoPrimaries
+ DXVA_VideoTransferFunction VideoTransferFunction : 5; // See DXVA_VideoTransferFunction
+} DXVA_ExtendedFormat;
+
+#endif
+
+
+
+// -------------------------------------------------------------------------
+//
+// The definitions that follow describe the video de-interlace interface
+// between the VMR and the graphics device driver. This interface is not
+// accessable via the IAMVideoAccelerator interface.
+//
+// -------------------------------------------------------------------------
+//
+#ifndef __DIRECTX_VA_DEINTERLACE__
+#define __DIRECTX_VA_DEINTERLACE__
+
+typedef LONGLONG REFERENCE_TIME;
+
+DEFINE_GUID(DXVA_DeinterlaceBobDevice,
+ 0x335aa36e,0x7884,0x43a4,0x9c,0x91,0x7f,0x87,0xfa,0xf3,0xe3,0x7e);
+
+DEFINE_GUID(DXVA_DeinterlaceContainerDevice,
+ 0x0e85cb93,0x3046,0x4ff0,0xae,0xcc,0xd5,0x8c,0xb5,0xf0,0x35,0xfd);
+
+
+#if (DIRECT3D_VERSION < 0x0800) || !defined(DIRECT3D_VERSION)
+typedef DWORD D3DFORMAT;
+enum {
+ D3DPOOL_DEFAULT = 0,
+ D3DPOOL_MANAGED = 1,
+ D3DPOOL_SYSTEMMEM = 2,
+ D3DPOOL_SCRATCH = 3,
+ D3DPOOL_LOCALVIDMEM = 4,
+ D3DPOOL_NONLOCALVIDMEM = 5,
+ D3DPOOL_FORCE_DWORD = 0x7fffffff
+};
+#endif
+
+
+// -------------------------------------------------------------------------
+// data structures shared by User mode and Kernel mode.
+// -------------------------------------------------------------------------
+//
+
+typedef struct _DXVA_Frequency {
+ DWORD Numerator;
+ DWORD Denominator;
+} DXVA_Frequency;
+
+typedef struct _DXVA_VideoDesc {
+ DWORD Size;
+ DWORD SampleWidth;
+ DWORD SampleHeight;
+ DWORD SampleFormat; // also contains extend color data
+ D3DFORMAT d3dFormat;
+ DXVA_Frequency InputSampleFreq;
+ DXVA_Frequency OutputFrameFreq;
+} DXVA_VideoDesc, *LPDXVA_VideoDesc;
+
+typedef enum _DXVA_VideoProcessCaps {
+ DXVA_VideoProcess_None = 0x0000,
+ DXVA_VideoProcess_YUV2RGB = 0x0001,
+ DXVA_VideoProcess_StretchX = 0x0002,
+ DXVA_VideoProcess_StretchY = 0x0004,
+ DXVA_VideoProcess_AlphaBlend = 0x0008,
+ DXVA_VideoProcess_SubRects = 0x0010,
+ DXVA_VideoProcess_SubStreams = 0x0020,
+ DXVA_VideoProcess_SubStreamsExtended = 0x0040,
+ DXVA_VideoProcess_YUV2RGBExtended = 0x0080,
+ DXVA_VideoProcess_AlphaBlendExtended = 0x0100
+} DXVA_VideoProcessCaps;
+
+typedef enum _DXVA_DeinterlaceTech {
+
+ // the algorithm is unknown or proprietary
+ DXVA_DeinterlaceTech_Unknown = 0x0000,
+
+ // the algorithm creates the missing lines by repeating
+ // the line either above or below it - this method will look very jaggy and
+ // isn't recommended
+ DXVA_DeinterlaceTech_BOBLineReplicate = 0x0001,
+
+ // The algorithm creates the missing lines by vertically stretching each
+ // video field by a factor of two by averaging two lines
+ DXVA_DeinterlaceTech_BOBVerticalStretch = 0x0002,
+
+ // or using a [-1, 9, 9, -1]/16 filter across four lines.
+ DXVA_DeinterlaceTech_BOBVerticalStretch4Tap = 0x0100,
+
+ // the pixels in the missing line are recreated by a median filtering operation
+ DXVA_DeinterlaceTech_MedianFiltering = 0x0004,
+
+ // the pixels in the missing line are recreated by an edge filter.
+ // In this process, spatial directional filters are applied to determine
+ // the orientation of edges in the picture content, and missing
+ // pixels are created by filtering along (rather than across) the
+ // detected edges.
+ DXVA_DeinterlaceTech_EdgeFiltering = 0x0010,
+
+ // the pixels in the missing line are recreated by switching on a field by
+ // field basis between using either spatial or temporal interpolation
+ // depending on the amount of motion.
+ DXVA_DeinterlaceTech_FieldAdaptive = 0x0020,
+
+ // the pixels in the missing line are recreated by switching on a pixel by pixel
+ // basis between using either spatial or temporal interpolation depending on
+ // the amount of motion..
+ DXVA_DeinterlaceTech_PixelAdaptive = 0x0040,
+
+ // Motion Vector Steering identifies objects within a sequence of video
+ // fields. The missing pixels are recreated after first aligning the
+ // movement axes of the individual objects in the scene to make them
+ // parallel with the time axis.
+ DXVA_DeinterlaceTech_MotionVectorSteered = 0x0080
+
+} DXVA_DeinterlaceTech;
+
+
+typedef struct _DXVA_VideoSample {
+ REFERENCE_TIME rtStart;
+ REFERENCE_TIME rtEnd;
+ DXVA_SampleFormat SampleFormat; // only lower 8 bits used
+ VOID* lpDDSSrcSurface;
+} DXVA_VideoSample, *LPDXVA_VideoSample;
+
+
+
+// -------------------------------------------------------------------------
+// DeinterlaceBltEx declarations
+// -------------------------------------------------------------------------
+//
+
+typedef enum _DXVA_SampleFlags {
+ DXVA_SampleFlagsMask = DXVABit(3)|DXVABit(2)|DXVABit(1)|DXVABit(0),
+
+ DXVA_SampleFlag_Palette_Changed = 0x0001,
+ DXVA_SampleFlag_SrcRect_Changed = 0x0002,
+ DXVA_SampleFlag_DstRect_Changed = 0x0004,
+ DXVA_SampleFlag_ColorData_Changed = 0x0008,
+} DXVA_SampleFlags;
+
+typedef enum _DXVA_DestinationFlags {
+ DXVA_DestinationFlagMask = DXVABit(3)|DXVABit(2)|DXVABit(1)|DXVABit(0),
+
+ DXVA_DestinationFlag_Background_Changed = 0x0001,
+ DXVA_DestinationFlag_TargetRect_Changed = 0x0002,
+ DXVA_DestinationFlag_ColorData_Changed = 0x0004,
+ DXVA_DestinationFlag_Alpha_Changed = 0x0008
+} DXVA_DestinationFlags;
+
+
+
+
+typedef struct _DXVA_VideoSample2 {
+#ifdef _WIN64
+ DWORD Size;
+ DWORD Reserved;
+#endif
+ REFERENCE_TIME rtStart;
+ REFERENCE_TIME rtEnd;
+ DWORD SampleFormat; // cast to DXVA_ExtendedFormat, or use Extract macros
+ DWORD SampleFlags;
+ VOID* lpDDSSrcSurface;
+ RECT rcSrc;
+ RECT rcDst;
+ DXVA_AYUVsample2 Palette[16];
+} DXVA_VideoSample2, *LPDXVA_VideoSample2;
+
+typedef struct _DXVA_DeinterlaceCaps {
+ DWORD Size;
+ DWORD NumPreviousOutputFrames;
+ DWORD InputPool;
+ DWORD NumForwardRefSamples;
+ DWORD NumBackwardRefSamples;
+ D3DFORMAT d3dOutputFormat;
+ DXVA_VideoProcessCaps VideoProcessingCaps;
+ DXVA_DeinterlaceTech DeinterlaceTechnology;
+} DXVA_DeinterlaceCaps, *LPDXVA_DeinterlaceCaps;
+
+
+
+
+// -------------------------------------------------------------------------
+// Data types used with RenderMoComp in kernel mode
+// -------------------------------------------------------------------------
+//
+
+// Function codes for RenderMoComp
+
+#define MAX_DEINTERLACE_SURFACES 32
+
+#ifdef _WIN64
+//
+// These structures are used for thunking 32 bit DeinterlaceBltEx calls on
+// 64 bit drivers.
+//
+typedef struct _DXVA_VideoSample32 {
+ REFERENCE_TIME rtStart;
+ REFERENCE_TIME rtEnd;
+ DWORD SampleFormat;
+ DWORD SampleFlags;
+ DWORD lpDDSSrcSurface; // 32 bit pointer size
+ RECT rcSrc;
+ RECT rcDst;
+ DXVA_AYUVsample2 Palette[16];
+ // DWORD Pad;
+ // 4 bytes of padding added by the compiler to align the struct to 8 bytes.
+} DXVA_VideoSample32;
+
+typedef struct _DXVA_DeinterlaceBltEx32 {
+ DWORD Size;
+ DXVA_AYUVsample2 BackgroundColor;
+ RECT rcTarget;
+ REFERENCE_TIME rtTarget;
+ DWORD NumSourceSurfaces;
+ FLOAT Alpha;
+ DXVA_VideoSample32 Source[MAX_DEINTERLACE_SURFACES];
+ DWORD DestinationFormat;
+ DWORD DestinationFlags;
+} DXVA_DeinterlaceBltEx32;
+#endif
+
+
+typedef struct _DXVA_DeinterlaceBlt {
+ DWORD Size;
+ DWORD Reserved;
+ REFERENCE_TIME rtTarget;
+ RECT DstRect;
+ RECT SrcRect;
+ DWORD NumSourceSurfaces;
+ FLOAT Alpha;
+ DXVA_VideoSample Source[MAX_DEINTERLACE_SURFACES];
+} DXVA_DeinterlaceBlt;
+
+#define DXVA_DeinterlaceBltFnCode 0x01
+// lpInput => DXVA_DeinterlaceBlt*
+// lpOuput => NULL /* not currently used */
+
+
+typedef struct _DXVA_DeinterlaceBltEx {
+ DWORD Size;
+ DXVA_AYUVsample2 BackgroundColor;
+ RECT rcTarget;
+ REFERENCE_TIME rtTarget;
+ DWORD NumSourceSurfaces;
+ FLOAT Alpha;
+ DXVA_VideoSample2 Source[MAX_DEINTERLACE_SURFACES];
+ DWORD DestinationFormat;
+ DWORD DestinationFlags;
+} DXVA_DeinterlaceBltEx;
+
+#define DXVA_DeinterlaceBltExFnCode 0x02
+// lpInput => DXVA_DeinterlaceBltEx*
+// lpOuput => NULL /* not currently used */
+
+
+#define MAX_DEINTERLACE_DEVICE_GUIDS 32
+typedef struct _DXVA_DeinterlaceQueryAvailableModes {
+ DWORD Size;
+ DWORD NumGuids;
+ GUID Guids[MAX_DEINTERLACE_DEVICE_GUIDS];
+} DXVA_DeinterlaceQueryAvailableModes;
+
+#define DXVA_DeinterlaceQueryAvailableModesFnCode 0x01
+// lpInput => DXVA_VideoDesc*
+// lpOuput => DXVA_DeinterlaceQueryAvailableModes*
+
+
+typedef struct _DXVA_DeinterlaceQueryModeCaps {
+ DWORD Size;
+ GUID Guid;
+ DXVA_VideoDesc VideoDesc;
+} DXVA_DeinterlaceQueryModeCaps;
+
+#define DXVA_DeinterlaceQueryModeCapsFnCode 0x02
+// lpInput => DXVA_DeinterlaceQueryModeCaps*
+// lpOuput => DXVA_DeinterlaceCaps*
+
+#endif /* __DIRECTX_VA_DEINTERLACE__ */
+
+
+// -------------------------------------------------------------------------
+//
+// The definitions that follow describe the video ProcAmp interface
+// between the VMR and the graphics device driver. This interface is not
+// accessable via the IAMVideoAccelerator interface.
+//
+// -------------------------------------------------------------------------
+//
+#ifndef __DIRECTX_VA_PROCAMPCONTROL__
+#define __DIRECTX_VA_PROCAMPCONTROL__
+
+DEFINE_GUID(DXVA_ProcAmpControlDevice,
+ 0x9f200913,0x2ffd,0x4056,0x9f,0x1e,0xe1,0xb5,0x08,0xf2,0x2d,0xcf);
+
+typedef enum _DXVA_ProcAmpControlProp {
+ DXVA_ProcAmp_None = 0x0000,
+ DXVA_ProcAmp_Brightness = 0x0001,
+ DXVA_ProcAmp_Contrast = 0x0002,
+ DXVA_ProcAmp_Hue = 0x0004,
+ DXVA_ProcAmp_Saturation = 0x0008
+} DXVA_ProcAmpControlProp;
+
+
+typedef struct _DXVA_ProcAmpControlCaps {
+ DWORD Size;
+ DWORD InputPool;
+ D3DFORMAT d3dOutputFormat;
+ DWORD ProcAmpControlProps;// see DXVA_ProcAmpControlProp
+ DWORD VideoProcessingCaps;// see DXVA_VideoProcessCaps
+} DXVA_ProcAmpControlCaps, *LPDXVA_ProcAmpControlCaps;
+
+#define DXVA_ProcAmpControlQueryCapsFnCode 0x03
+// lpInput => DXVA_VideoDesc*
+// lpOuput => DXVA_ProcAmpControlCaps*
+
+
+typedef struct _DXVA_ProcAmpControlQueryRange {
+ DWORD Size;
+ DXVA_ProcAmpControlProp ProcAmpControlProp;
+ DXVA_VideoDesc VideoDesc;
+} DXVA_ProcAmpControlQueryRange, *LPDXVA_ProcAmpControlQueryRange;
+
+typedef struct _DXVA_VideoPropertyRange {
+ FLOAT MinValue;
+ FLOAT MaxValue;
+ FLOAT DefaultValue;
+ FLOAT StepSize;
+} DXVA_VideoPropertyRange, *LPDXVA_VideoPropertyRange;
+
+#define DXVA_ProcAmpControlQueryRangeFnCode 0x04
+// lpInput => DXVA_ProcAmpControlQueryRange*
+// lpOuput => DXVA_VideoPropertyRange*
+
+
+typedef struct _DXVA_ProcAmpControlBlt {
+ DWORD Size;
+ RECT DstRect;
+ RECT SrcRect;
+ FLOAT Alpha;
+ FLOAT Brightness;
+ FLOAT Contrast;
+ FLOAT Hue;
+ FLOAT Saturation;
+} DXVA_ProcAmpControlBlt;
+
+#define DXVA_ProcAmpControlBltFnCode 0x01
+// lpInput => DXVA_ProcAmpControlBlt*
+// lpOuput => NULL /* not currently used */
+
+#endif /* __DIRECTX_VA_PROCAMPCONTROL__ */
+
+
+// -------------------------------------------------------------------------
+//
+// The definitions that follow describe the Certified Output Protection
+// Protocol between the VMR and the graphics device driver. This interface
+// is not accessable via the IAMVideoAccelerator interface.
+//
+// -------------------------------------------------------------------------
+//
+
+#ifndef __DIRECTX_VA_CERTOUTPUTPROTECT__
+#define __DIRECTX_VA_CERTOUTPUTPROTECT__
+
+
+DEFINE_GUID(DXVA_COPPDevice,
+ 0xd2457add,0x8999,0x45ed,0x8a,0x8a,0xd1,0xaa,0x04,0x7b,0xa4,0xd5);
+
+
+// -------------------------------------------------------------------------
+// COPPGetCertificateLength
+// -------------------------------------------------------------------------
+#define DXVA_COPPGetCertificateLengthFnCode 0x01
+// lpInput => NULL
+// lpOuput => DWORD*
+
+
+// -------------------------------------------------------------------------
+// COPPKeyExchange
+// -------------------------------------------------------------------------
+#define DXVA_COPPKeyExchangeFnCode 0x02
+// lpInputData => NULL
+// lpOuputData => GUID*
+
+
+// -------------------------------------------------------------------------
+// COPPSequenceStart
+// -------------------------------------------------------------------------
+typedef struct _DXVA_COPPSignature {
+ UCHAR Signature[256];
+} DXVA_COPPSignature, *LPDXVA_COPPSignature;
+
+#define DXVA_COPPSequenceStartFnCode 0x03
+// lpInputData => DXVA_COPPSignature*
+// lpOuputData => NULL
+
+
+
+// -------------------------------------------------------------------------
+// COPPCommand
+// -------------------------------------------------------------------------
+typedef struct _DXVA_COPPCommand {
+ GUID macKDI; // 16 bytes
+ GUID guidCommandID; // 16 bytes
+ ULONG dwSequence; // 4 bytes
+ ULONG cbSizeData; // 4 bytes
+ UCHAR CommandData[4056]; // 4056 bytes (4056+4+4+16+16 = 4096)
+} DXVA_COPPCommand, *LPDXVA_COPPCommand;
+
+#define DXVA_COPPCommandFnCode 0x04
+// lpInputData => DXVA_COPPCommand*
+// lpOuputData => NULL
+
+
+DEFINE_GUID(DXVA_COPPSetProtectionLevel,
+ 0x9bb9327c,0x4eb5,0x4727,0x9f,0x00,0xb4,0x2b,0x09,0x19,0xc0,0xda);
+
+typedef struct _DXVA_COPPSetProtectionLevelCmdData {
+ ULONG ProtType;
+ ULONG ProtLevel;
+ ULONG ExtendedInfoChangeMask;
+ ULONG ExtendedInfoData;
+} DXVA_COPPSetProtectionLevelCmdData;
+
+typedef enum _COPP_DPCP_Protection_Level {
+ COPP_DPCP_Level0 = 0,
+ COPP_DPCP_LevelMin = COPP_DPCP_Level0,
+ COPP_DPCP_Level1 = 1,
+ COPP_DPCP_LevelMax = COPP_DPCP_Level1,
+ COPP_DPCP_ForceDWORD = 0x7fffffff
+} COPP_DPCP_Protection_Level;
+
+// Set the HDCP protection level - (0 - 1 DWORD, 4 bytes)
+
+typedef enum _COPP_HDCP_Protection_Level {
+ COPP_HDCP_Level0 = 0,
+ COPP_HDCP_LevelMin = COPP_HDCP_Level0,
+ COPP_HDCP_Level1 = 1,
+ COPP_HDCP_LevelMax = COPP_HDCP_Level1,
+ COPP_HDCP_ForceDWORD = 0x7fffffff
+} COPP_HDCP_Protection_Level;
+
+typedef enum _COPP_CGMSA_Protection_Level {
+ COPP_CGMSA_Disabled = 0,
+ COPP_CGMSA_LevelMin = COPP_CGMSA_Disabled,
+ COPP_CGMSA_CopyFreely = 1,
+ COPP_CGMSA_CopyNoMore = 2,
+ COPP_CGMSA_CopyOneGeneration = 3,
+ COPP_CGMSA_CopyNever = 4,
+ COPP_CGMSA_RedistributionControlRequired = 0x08,
+ COPP_CGMSA_LevelMax = (COPP_CGMSA_RedistributionControlRequired + COPP_CGMSA_CopyNever),
+ COPP_CGMSA_ForceDWORD = 0x7fffffff
+} COPP_CGMSA_Protection_Level;
+
+typedef enum _COPP_ACP_Protection_Level {
+ COPP_ACP_Level0 = 0,
+ COPP_ACP_LevelMin = COPP_ACP_Level0,
+ COPP_ACP_Level1 = 1,
+ COPP_ACP_Level2 = 2,
+ COPP_ACP_Level3 = 3,
+ COPP_ACP_LevelMax = COPP_ACP_Level3,
+ COPP_ACP_ForceDWORD = 0x7fffffff
+} COPP_ACP_Protection_Level;
+
+#define COPP_NoProtectionLevelAvailable -1
+#define COPP_DefaultProtectionLevel 0
+
+
+//
+// Bit flags of possible protection types. Note that it is possible to apply
+// different protection settings to a single connector.
+//
+enum {
+ COPP_ProtectionType_Unknown = 0x80000000,
+ COPP_ProtectionType_None = 0x00000000,
+ COPP_ProtectionType_HDCP = 0x00000001,
+ COPP_ProtectionType_ACP = 0x00000002,
+ COPP_ProtectionType_CGMSA = 0x00000004,
+ COPP_ProtectionType_DPCP = 0x00000010,
+ COPP_ProtectionType_Mask = 0x80000017,
+ COPP_ProtectionType_Reserved = 0x7FFFFFF8
+};
+
+DEFINE_GUID(DXVA_COPPSetSignaling,
+ 0x9a631a5, 0xd684, 0x4c60, 0x8e, 0x4d, 0xd3, 0xbb, 0xf, 0xb, 0xe3, 0xee);
+
+typedef struct _DXVA_COPPSetSignalingCmdData {
+ ULONG ActiveTVProtectionStandard; // See COPP_TVProtectionStandard
+ ULONG AspectRatioChangeMask1;
+ ULONG AspectRatioData1; // See COPP_ImageAspectRatio_EN300294 for ETSI EN 300 294 values
+ ULONG AspectRatioChangeMask2;
+ ULONG AspectRatioData2;
+ ULONG AspectRatioChangeMask3;
+ ULONG AspectRatioData3;
+ ULONG ExtendedInfoChangeMask[4];
+ ULONG ExtendedInfoData[4];
+ ULONG Reserved;
+} DXVA_COPPSetSignalingCmdData;
+
+// Add format enum and data enum
+typedef enum _COPP_TVProtectionStandard {
+ COPP_ProtectionStandard_Unknown = 0x80000000,
+ COPP_ProtectionStandard_None = 0x00000000,
+ COPP_ProtectionStandard_IEC61880_525i = 0x00000001,
+ COPP_ProtectionStandard_IEC61880_2_525i = 0x00000002,
+ COPP_ProtectionStandard_IEC62375_625p = 0x00000004,
+ COPP_ProtectionStandard_EIA608B_525 = 0x00000008,
+ COPP_ProtectionStandard_EN300294_625i = 0x00000010,
+ COPP_ProtectionStandard_CEA805A_TypeA_525p = 0x00000020,
+ COPP_ProtectionStandard_CEA805A_TypeA_750p = 0x00000040,
+ COPP_ProtectionStandard_CEA805A_TypeA_1125i = 0x00000080,
+ COPP_ProtectionStandard_CEA805A_TypeB_525p = 0x00000100,
+ COPP_ProtectionStandard_CEA805A_TypeB_750p = 0x00000200,
+ COPP_ProtectionStandard_CEA805A_TypeB_1125i = 0x00000400,
+ COPP_ProtectionStandard_ARIBTRB15_525i = 0x00000800,
+ COPP_ProtectionStandard_ARIBTRB15_525p = 0x00001000,
+ COPP_ProtectionStandard_ARIBTRB15_750p = 0x00002000,
+ COPP_ProtectionStandard_ARIBTRB15_1125i = 0x00004000,
+ COPP_ProtectionStandard_Mask = 0x80007FFF,
+ COPP_ProtectionStandard_Reserved = 0x7FFF8000
+} COPP_TVProtectionStandard;
+
+#define COPP_ImageAspectRatio_EN300294_Mask 0x00000007
+
+typedef enum _COPP_ImageAspectRatio_EN300294 {
+ COPP_AspectRatio_EN300294_FullFormat4by3 = 0,
+ COPP_AspectRatio_EN300294_Box14by9Center = 1,
+ COPP_AspectRatio_EN300294_Box14by9Top = 2,
+ COPP_AspectRatio_EN300294_Box16by9Center = 3,
+ COPP_AspectRatio_EN300294_Box16by9Top = 4,
+ COPP_AspectRatio_EN300294_BoxGT16by9Center = 5,
+ COPP_AspectRatio_EN300294_FullFormat4by3ProtectedCenter = 6,
+ COPP_AspectRatio_EN300294_FullFormat16by9Anamorphic = 7,
+ COPP_AspectRatio_ForceDWORD = 0x7fffffff
+} COPP_ImageAspectRatio_EN300294;
+
+
+// -------------------------------------------------------------------------
+// COPPQueryStatus
+// -------------------------------------------------------------------------
+typedef struct _DXVA_COPPStatusInput {
+ GUID rApp; // 16 bytes
+ GUID guidStatusRequestID;// 16 bytes
+ ULONG dwSequence; // 4 bytes
+ ULONG cbSizeData; // 4 bytes
+ UCHAR StatusData[4056]; // 4056 bytes (4056+4+4+16+16 = 4096)
+} DXVA_COPPStatusInput, *LPDXVA_COPPStatusInput;
+
+typedef struct _DXVA_COPPStatusOutput {
+ GUID macKDI; // 16 bytes
+ ULONG cbSizeData; // 4 bytes
+ UCHAR COPPStatus[4076]; // 4076 bytes (4076+16+4 = 4096)
+} DXVA_COPPStatusOutput, *LPDXVA_COPPStatusOutput;
+
+typedef enum _COPP_StatusFlags {
+ COPP_StatusNormal = 0x00,
+ COPP_LinkLost = 0x01,
+ COPP_RenegotiationRequired = 0x02,
+ COPP_StatusFlagsReserved = 0xFFFFFFFC
+} COPP_StatusFlags;
+
+typedef struct _DXVA_COPPStatusData {
+ GUID rApp;
+ ULONG dwFlags; // See COPP_StatusFlags above
+ ULONG dwData;
+ ULONG ExtendedInfoValidMask;
+ ULONG ExtendedInfoData;
+} DXVA_COPPStatusData;
+
+typedef struct _DXVA_COPPStatusDisplayData {
+ GUID rApp;
+ ULONG dwFlags; // See COPP_StatusFlags above
+ ULONG DisplayWidth;
+ ULONG DisplayHeight;
+ ULONG Format; // also contains extended color data
+ ULONG d3dFormat;
+ ULONG FreqNumerator;
+ ULONG FreqDenominator;
+} DXVA_COPPStatusDisplayData;
+
+typedef enum _COPP_StatusHDCPFlags {
+ COPP_HDCPRepeater = 0x01,
+ COPP_HDCPFlagsReserved = 0xFFFFFFFE
+} COPP_StatusHDCPFlags;
+
+typedef struct _DXVA_COPPStatusHDCPKeyData {
+ GUID rApp;
+ ULONG dwFlags; // See COPP_StatusFlags above
+ ULONG dwHDCPFlags; // See COPP_StatusHDCPFlags above
+ GUID BKey; // Lower 40 bits
+ GUID Reserved1;
+ GUID Reserved2;
+} DXVA_COPPStatusHDCPKeyData;
+
+
+#define DXVA_COPPQueryStatusFnCode 0x05
+// lpInputData => DXVA_COPPStatusInput*
+// lpOuputData => DXVA_COPPStatusOutput*
+
+
+//
+// Status GUID and enumerations
+//
+DEFINE_GUID(DXVA_COPPQueryConnectorType,
+ 0x81d0bfd5,0x6afe,0x48c2,0x99,0xc0,0x95,0xa0,0x8f,0x97,0xc5,0xda);
+
+typedef enum _COPP_ConnectorType {
+ COPP_ConnectorType_Unknown = -1,
+ COPP_ConnectorType_VGA = 0,
+ COPP_ConnectorType_SVideo = 1,
+ COPP_ConnectorType_CompositeVideo = 2,
+ COPP_ConnectorType_ComponentVideo = 3,
+ COPP_ConnectorType_DVI = 4,
+ COPP_ConnectorType_HDMI = 5,
+ COPP_ConnectorType_LVDS = 6,
+ COPP_ConnectorType_TMDS = 7,
+ COPP_ConnectorType_D_JPN = 8,
+ COPP_ConnectorType_SDI = 9,
+ COPP_ConnectorType_DisplayPortExternal = 10,
+ COPP_ConnectorType_DisplayPortEmbedded = 11,
+ COPP_ConnectorType_UDIExternal = 12,
+ COPP_ConnectorType_UDIEmbedded = 13,
+ COPP_ConnectorType_Internal = 0x80000000, // can be combined with the other connector types
+ COPP_ConnectorType_ForceDWORD = 0x7fffffff /* force 32-bit size enum */
+} COPP_ConnectorType;
+
+DEFINE_GUID(DXVA_COPPQueryProtectionType,
+ 0x38f2a801,0x9a6c,0x48bb,0x91,0x07,0xb6,0x69,0x6e,0x6f,0x17,0x97);
+
+DEFINE_GUID(DXVA_COPPQueryLocalProtectionLevel,
+ 0xb2075857,0x3eda,0x4d5d,0x88,0xdb,0x74,0x8f,0x8c,0x1a,0x05,0x49);
+
+DEFINE_GUID(DXVA_COPPQueryGlobalProtectionLevel,
+ 0x1957210a,0x7766,0x452a,0xb9,0x9a,0xd2,0x7a,0xed,0x54,0xf0,0x3a);
+
+DEFINE_GUID(DXVA_COPPQueryDisplayData,
+ 0xd7bf1ba3,0xad13,0x4f8e,0xaf,0x98,0x0d,0xcb,0x3c,0xa2,0x04,0xcc);
+
+DEFINE_GUID(DXVA_COPPQueryHDCPKeyData,
+ 0xdb59d74, 0xa992, 0x492e, 0xa0, 0xbd, 0xc2, 0x3f, 0xda, 0x56, 0x4e, 0x0);
+
+DEFINE_GUID(DXVA_COPPQueryBusData,
+ 0xc6f4d673, 0x6174, 0x4184, 0x8e, 0x35, 0xf6, 0xdb, 0x52, 0x0, 0xbc, 0xba);
+
+typedef enum _COPP_BusType {
+ COPP_BusType_Unknown = 0,
+ COPP_BusType_PCI = 1,
+ COPP_BusType_PCIX = 2,
+ COPP_BusType_PCIExpress = 3,
+ COPP_BusType_AGP = 4,
+ COPP_BusType_Integrated = 0x80000000, // can be combined with the other bus types
+ COPP_BusType_ForceDWORD = 0x7fffffff /* force 32-bit size enum */
+} COPP_BusType;
+
+DEFINE_GUID(DXVA_COPPQuerySignaling,
+ 0x6629a591, 0x3b79, 0x4cf3, 0x92, 0x4a, 0x11, 0xe8, 0xe7, 0x81, 0x16, 0x71);
+
+typedef struct _DXVA_COPPStatusSignalingCmdData {
+ GUID rApp;
+ ULONG dwFlags; // See COPP_StatusFlags above
+ ULONG AvailableTVProtectionStandards; // See COPP_TVProtectionStandard
+ ULONG ActiveTVProtectionStandard; // See COPP_TVProtectionStandard
+ ULONG TVType;
+ ULONG AspectRatioValidMask1;
+ ULONG AspectRatioData1; // See COPP_AspectRatio_EN300294 for ETSI EN 300 294 values
+ ULONG AspectRatioValidMask2;
+ ULONG AspectRatioData2;
+ ULONG AspectRatioValidMask3;
+ ULONG AspectRatioData3;
+ ULONG ExtendedInfoValidMask[4];
+ ULONG ExtendedInfoData[4];
+} DXVA_COPPStatusSignalingCmdData;
+
+
+#endif /* __DIRECTX_VA_CERTOUTPUTPROTECT__ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#ifdef __DXVA1_DEPRECATED_INTERFACES__
+
+#if !defined(__cplusplus)
+#error C++ compiler required.
+#endif
+
+/* IID_IDirect3DVideoDevice9 */
+DEFINE_GUID(IID_IDirect3DVideoDevice9,
+0x694036ac, 0x542a, 0x4a3a, 0x9a, 0x32, 0x53, 0xbc, 0x20, 0x0, 0x2c, 0x1b);
+
+/* IID_IDirect3DDXVADevice9 */
+DEFINE_GUID(IID_IDirect3DDXVADevice9,
+0x9f00c3d3, 0x5ab6, 0x465f, 0xb9, 0x55, 0x9f, 0xe, 0xbb, 0x2c, 0x56, 0x6);
+
+interface IDirect3DVideoDevice9;
+interface IDirect3DDXVADevice9;
+
+typedef struct _DXVAUncompDataInfo
+{
+ DWORD UncompWidth; /* Width of uncompressed data */
+ DWORD UncompHeight; /* Height of uncompressed data */
+ D3DFORMAT UncompFormat; /* Format of uncompressed data */
+} DXVAUncompDataInfo;
+
+typedef struct _DXVACompBufferInfo
+{
+ DWORD NumCompBuffers; /* Number of buffers reqd for compressed data */
+ DWORD WidthToCreate; /* Width of surface to create */
+ DWORD HeightToCreate; /* Height of surface to create */
+ DWORD BytesToAllocate; /* Total number of bytes used by each surface */
+ DWORD Usage; /* Usage used to create the compressed buffer */
+ D3DPOOL Pool; /* Pool where the compressed buffer belongs */
+ D3DFORMAT Format; /* Format used to create the compressed buffer */
+} DXVACompBufferInfo;
+
+typedef struct _DXVABufferInfo
+{
+ VOID* pCompSurface; /* Pointer to buffer containing compressed data */
+ DWORD DataOffset; /* Offset of relevant data from the beginning of buffer */
+ DWORD DataSize; /* Size of relevant data */
+} DXVABufferInfo;
+
+#undef INTERFACE
+#define INTERFACE IDirect3DVideoDevice9
+
+DECLARE_INTERFACE_(IDirect3DVideoDevice9, IUnknown)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD(QueryInterface)(THIS_ REFIID riid, void** ppvObj) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+
+ /*** IDirect3DVideoDevice9 methods ***/
+ STDMETHOD(CreateSurface)(THIS_ UINT Width,UINT Height,UINT BackBuffers,
+ D3DFORMAT Format,D3DPOOL Pool,DWORD Usage,
+ IDirect3DSurface9** ppSurface,
+ HANDLE* pSharedHandle) PURE;
+ STDMETHOD(GetDXVACompressedBufferInfo)(THIS_ GUID* pGuid,
+ DXVAUncompDataInfo* pUncompData,
+ DWORD* pNumBuffers,
+ DXVACompBufferInfo* pBufferInfo) PURE;
+ STDMETHOD(GetDXVAGuids)(THIS_ DWORD* pNumGuids,GUID* pGuids) PURE;
+ STDMETHOD(GetDXVAInternalInfo)(THIS_ GUID* pGuid,
+ DXVAUncompDataInfo* pUncompData,
+ DWORD* pMemoryUsed) PURE;
+ STDMETHOD(GetUncompressedDXVAFormats)(THIS_ GUID* pGuid,
+ DWORD* pNumFormats,
+ D3DFORMAT* pFormats) PURE;
+ STDMETHOD(CreateDXVADevice)(THIS_ GUID* pGuid,
+ DXVAUncompDataInfo* pUncompData,
+ LPVOID pData,DWORD DataSize,
+ IDirect3DDXVADevice9** ppDXVADevice) PURE;
+};
+
+#undef INTERFACE
+#define INTERFACE IDirect3DDXVADevice9
+
+DECLARE_INTERFACE_(IDirect3DDXVADevice9, IUnknown)
+{
+ /*** IUnknown methods ***/
+ STDMETHOD(QueryInterface)(THIS_ REFIID riid, void** ppvObj) PURE;
+ STDMETHOD_(ULONG,AddRef)(THIS) PURE;
+ STDMETHOD_(ULONG,Release)(THIS) PURE;
+
+ /*** IDirect3DDXVADevice9 methods ***/
+ STDMETHOD(BeginFrame)(THIS_ IDirect3DSurface9* pDstSurface,
+ DWORD SizeInputData,VOID* pInputData,
+ DWORD* pSizeOutputData,VOID* pOutputData) PURE;
+ STDMETHOD(EndFrame)(THIS_ DWORD SizeMiscData,VOID* pMiscData) PURE;
+ STDMETHOD(Execute)(THIS_ DWORD FunctionNum,VOID* pInputData,
+ DWORD InputSize,VOID* OuputData,DWORD OutputSize,
+ DWORD NumBuffers,DXVABufferInfo* pBufferInfo) PURE;
+ STDMETHOD(QueryStatus)(THIS_ IDirect3DSurface9* pSurface,DWORD Flags) PURE;
+};
+
+#endif /* __DXVA1_DEPRECATED_INTERFACES__ */
+
+#if _MSC_VER >= 1200
+#pragma warning(pop)
+#endif
+
+#endif /* __DIRECTX_VA__ */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eac3dec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eac3dec.c
new file mode 100644
index 000000000..79f3c3b49
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eac3dec.c
@@ -0,0 +1,606 @@
+/*
+ * E-AC-3 decoder
+ * Copyright (c) 2007 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com>
+ * Copyright (c) 2008 Justin Ruggles
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * There are several features of E-AC-3 that this decoder does not yet support.
+ *
+ * Enhanced Coupling
+ * No known samples exist. If any ever surface, this feature should not be
+ * too difficult to implement.
+ *
+ * Reduced Sample Rates
+ * No known samples exist. The spec also does not give clear information
+ * on how this is to be implemented.
+ *
+ * Dependent Streams
+ * Only the independent stream is currently decoded. Any dependent
+ * streams are skipped. We have only come across two examples of this, and
+ * they are both just test streams, one for HD-DVD and the other for
+ * Blu-ray.
+ *
+ * Transient Pre-noise Processing
+ * This is side information which a decoder should use to reduce artifacts
+ * caused by transients. There are samples which are known to have this
+ * information, but this decoder currently ignores it.
+ */
+
+
+#include "avcodec.h"
+#include "internal.h"
+#include "aac_ac3_parser.h"
+#include "ac3.h"
+#include "ac3_parser.h"
+#include "ac3dec.h"
+#include "ac3dec_data.h"
+#include "eac3dec_data.h"
+
+/** gain adaptive quantization mode */
+typedef enum {
+ EAC3_GAQ_NO =0,
+ EAC3_GAQ_12,
+ EAC3_GAQ_14,
+ EAC3_GAQ_124
+} EAC3GaqMode;
+
+#define EAC3_SR_CODE_REDUCED 3
+
+void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
+{
+ int bin, bnd, ch, i;
+ uint8_t wrapflag[SPX_MAX_BANDS]={0,}, num_copy_sections, copy_sizes[SPX_MAX_BANDS];
+ float rms_energy[SPX_MAX_BANDS];
+
+ /* Set copy index mapping table. Set wrap flags to apply a notch filter at
+ wrap points later on. */
+ wrapflag[0] = 1;
+ bin = s->spx_copy_start_freq;
+ num_copy_sections = 0;
+ for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
+ int copysize;
+ int bandsize = s->spx_band_sizes[bnd];
+ if ((bin + bandsize) > s->spx_start_freq) {
+ copy_sizes[num_copy_sections++] = bin - s->spx_copy_start_freq;
+ bin = s->spx_copy_start_freq;
+ wrapflag[bnd] = 1;
+ }
+ for (i = 0; i < bandsize; i += copysize) {
+ if (bin == s->spx_start_freq) {
+ copy_sizes[num_copy_sections++] = bin - s->spx_copy_start_freq;
+ bin = s->spx_copy_start_freq;
+ }
+ copysize = FFMIN(bandsize - i, s->spx_start_freq - bin);
+ bin += copysize;
+ }
+ }
+ copy_sizes[num_copy_sections++] = bin - s->spx_copy_start_freq;
+
+ for (ch = 1; ch <= s->fbw_channels; ch++) {
+ if (!s->channel_in_spx[ch])
+ continue;
+
+ /* Copy coeffs from normal bands to extension bands */
+ bin = s->spx_start_freq;
+ for (i = 0; i < num_copy_sections; i++) {
+ memcpy(&s->transform_coeffs[ch][bin],
+ &s->transform_coeffs[ch][s->spx_copy_start_freq],
+ copy_sizes[i]*sizeof(float));
+ bin += copy_sizes[i];
+ }
+
+ /* Calculate RMS energy for each SPX band. */
+ bin = s->spx_start_freq;
+ for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
+ int bandsize = s->spx_band_sizes[bnd];
+ float accum = 0.0f;
+ for (i = 0; i < bandsize; i++) {
+ float coeff = s->transform_coeffs[ch][bin++];
+ accum += coeff * coeff;
+ }
+ rms_energy[bnd] = sqrt(accum / (float)bandsize);
+ }
+
+ /* Apply a notch filter at transitions between normal and extension
+ bands and at all wrap points. */
+ if (s->spx_atten_code[ch] >= 0) {
+ const float *atten_tab = ff_eac3_spx_atten_tab[s->spx_atten_code[ch]];
+ bin = s->spx_start_freq - 2;
+ for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
+ if (wrapflag[bnd]) {
+ float *coeffs = &s->transform_coeffs[ch][bin];
+ coeffs[0] *= atten_tab[0];
+ coeffs[1] *= atten_tab[1];
+ coeffs[2] *= atten_tab[2];
+ coeffs[3] *= atten_tab[1];
+ coeffs[4] *= atten_tab[0];
+ }
+ bin += s->spx_band_sizes[bnd];
+ }
+ }
+
+ /* Apply noise-blended coefficient scaling based on previously
+ calculated RMS energy, blending factors, and SPX coordinates for
+ each band. */
+ bin = s->spx_start_freq;
+ for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
+ float nscale = s->spx_noise_blend[ch][bnd] * rms_energy[bnd];
+ float sscale = s->spx_signal_blend[ch][bnd];
+ for (i = 0; i < s->spx_band_sizes[bnd]; i++) {
+ float noise = nscale * (((int)av_lfg_get(&s->dith_state))/(float)(1<<31));
+ s->transform_coeffs[ch][bin] *= sscale;
+ s->transform_coeffs[ch][bin++] += noise;
+ }
+ }
+ }
+}
+
+/** lrint(M_SQRT2*cos(2*M_PI/12)*(1<<23)) */
+#define COEFF_0 10273905LL
+
+/** lrint(M_SQRT2*cos(0*M_PI/12)*(1<<23)) = lrint(M_SQRT2*(1<<23)) */
+#define COEFF_1 11863283LL
+
+/** lrint(M_SQRT2*cos(5*M_PI/12)*(1<<23)) */
+#define COEFF_2 3070444LL
+
+/**
+ * Calculate 6-point IDCT of the pre-mantissas.
+ * All calculations are 24-bit fixed-point.
+ */
+static void idct6(int pre_mant[6])
+{
+ int tmp;
+ int even0, even1, even2, odd0, odd1, odd2;
+
+ odd1 = pre_mant[1] - pre_mant[3] - pre_mant[5];
+
+ even2 = ( pre_mant[2] * COEFF_0) >> 23;
+ tmp = ( pre_mant[4] * COEFF_1) >> 23;
+ odd0 = ((pre_mant[1] + pre_mant[5]) * COEFF_2) >> 23;
+
+ even0 = pre_mant[0] + (tmp >> 1);
+ even1 = pre_mant[0] - tmp;
+
+ tmp = even0;
+ even0 = tmp + even2;
+ even2 = tmp - even2;
+
+ tmp = odd0;
+ odd0 = tmp + pre_mant[1] + pre_mant[3];
+ odd2 = tmp + pre_mant[5] - pre_mant[3];
+
+ pre_mant[0] = even0 + odd0;
+ pre_mant[1] = even1 + odd1;
+ pre_mant[2] = even2 + odd2;
+ pre_mant[3] = even2 - odd2;
+ pre_mant[4] = even1 - odd1;
+ pre_mant[5] = even0 - odd0;
+}
+
+void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch)
+{
+ int bin, blk, gs;
+ int end_bap, gaq_mode;
+ GetBitContext *gbc = &s->gbc;
+ int gaq_gain[AC3_MAX_COEFS];
+
+ gaq_mode = get_bits(gbc, 2);
+ end_bap = (gaq_mode < 2) ? 12 : 17;
+
+ /* if GAQ gain is used, decode gain codes for bins with hebap between
+ 8 and end_bap */
+ gs = 0;
+ if (gaq_mode == EAC3_GAQ_12 || gaq_mode == EAC3_GAQ_14) {
+ /* read 1-bit GAQ gain codes */
+ for (bin = s->start_freq[ch]; bin < s->end_freq[ch]; bin++) {
+ if (s->bap[ch][bin] > 7 && s->bap[ch][bin] < end_bap)
+ gaq_gain[gs++] = get_bits1(gbc) << (gaq_mode-1);
+ }
+ } else if (gaq_mode == EAC3_GAQ_124) {
+ /* read 1.67-bit GAQ gain codes (3 codes in 5 bits) */
+ int gc = 2;
+ for (bin = s->start_freq[ch]; bin < s->end_freq[ch]; bin++) {
+ if (s->bap[ch][bin] > 7 && s->bap[ch][bin] < 17) {
+ if (gc++ == 2) {
+ int group_code = get_bits(gbc, 5);
+ if (group_code > 26) {
+ av_log(s->avctx, AV_LOG_WARNING, "GAQ gain group code out-of-range\n");
+ group_code = 26;
+ }
+ gaq_gain[gs++] = ff_ac3_ungroup_3_in_5_bits_tab[group_code][0];
+ gaq_gain[gs++] = ff_ac3_ungroup_3_in_5_bits_tab[group_code][1];
+ gaq_gain[gs++] = ff_ac3_ungroup_3_in_5_bits_tab[group_code][2];
+ gc = 0;
+ }
+ }
+ }
+ }
+
+ gs=0;
+ for (bin = s->start_freq[ch]; bin < s->end_freq[ch]; bin++) {
+ int hebap = s->bap[ch][bin];
+ int bits = ff_eac3_bits_vs_hebap[hebap];
+ if (!hebap) {
+ /* zero-mantissa dithering */
+ for (blk = 0; blk < 6; blk++) {
+ s->pre_mantissa[ch][bin][blk] = (av_lfg_get(&s->dith_state) & 0x7FFFFF) - 0x400000;
+ }
+ } else if (hebap < 8) {
+ /* Vector Quantization */
+ int v = get_bits(gbc, bits);
+ for (blk = 0; blk < 6; blk++) {
+ s->pre_mantissa[ch][bin][blk] = ff_eac3_mantissa_vq[hebap][v][blk] << 8;
+ }
+ } else {
+ /* Gain Adaptive Quantization */
+ int gbits, log_gain;
+ if (gaq_mode != EAC3_GAQ_NO && hebap < end_bap) {
+ log_gain = gaq_gain[gs++];
+ } else {
+ log_gain = 0;
+ }
+ gbits = bits - log_gain;
+
+ for (blk = 0; blk < 6; blk++) {
+ int mant = get_sbits(gbc, gbits);
+ if (log_gain && mant == -(1 << (gbits-1))) {
+ /* large mantissa */
+ int b;
+ int mbits = bits - (2 - log_gain);
+ mant = get_sbits(gbc, mbits);
+ mant <<= (23 - (mbits - 1));
+ /* remap mantissa value to correct for asymmetric quantization */
+ if (mant >= 0)
+ b = 1 << (23 - log_gain);
+ else
+ b = ff_eac3_gaq_remap_2_4_b[hebap-8][log_gain-1] << 8;
+ mant += ((ff_eac3_gaq_remap_2_4_a[hebap-8][log_gain-1] * (int64_t)mant) >> 15) + b;
+ } else {
+ /* small mantissa, no GAQ, or Gk=1 */
+ mant <<= 24 - bits;
+ if (!log_gain) {
+ /* remap mantissa value for no GAQ or Gk=1 */
+ mant += (ff_eac3_gaq_remap_1[hebap-8] * (int64_t)mant) >> 15;
+ }
+ }
+ s->pre_mantissa[ch][bin][blk] = mant;
+ }
+ }
+ idct6(s->pre_mantissa[ch][bin]);
+ }
+}
+
+int ff_eac3_parse_header(AC3DecodeContext *s)
+{
+ int i, blk, ch;
+ int ac3_exponent_strategy, parse_aht_info, parse_spx_atten_data;
+ int parse_transient_proc_info;
+ int num_cpl_blocks;
+ GetBitContext *gbc = &s->gbc;
+
+ /* An E-AC-3 stream can have multiple independent streams which the
+ application can select from. each independent stream can also contain
+ dependent streams which are used to add or replace channels. */
+ if (s->frame_type == EAC3_FRAME_TYPE_DEPENDENT) {
+ av_log_missing_feature(s->avctx, "Dependent substream decoding", 1);
+ return AAC_AC3_PARSE_ERROR_FRAME_TYPE;
+ } else if (s->frame_type == EAC3_FRAME_TYPE_RESERVED) {
+ av_log(s->avctx, AV_LOG_ERROR, "Reserved frame type\n");
+ return AAC_AC3_PARSE_ERROR_FRAME_TYPE;
+ }
+
+ /* The substream id indicates which substream this frame belongs to. each
+ independent stream has its own substream id, and the dependent streams
+ associated to an independent stream have matching substream id's. */
+ if (s->substreamid) {
+ /* only decode substream with id=0. skip any additional substreams. */
+ av_log_missing_feature(s->avctx, "Additional substreams", 1);
+ return AAC_AC3_PARSE_ERROR_FRAME_TYPE;
+ }
+
+ if (s->bit_alloc_params.sr_code == EAC3_SR_CODE_REDUCED) {
+ /* The E-AC-3 specification does not tell how to handle reduced sample
+ rates in bit allocation. The best assumption would be that it is
+ handled like AC-3 DolbyNet, but we cannot be sure until we have a
+ sample which utilizes this feature. */
+ av_log_missing_feature(s->avctx, "Reduced sampling rates", 1);
+ return -1;
+ }
+ skip_bits(gbc, 5); // skip bitstream id
+
+ /* volume control params */
+ for (i = 0; i < (s->channel_mode ? 1 : 2); i++) {
+ skip_bits(gbc, 5); // skip dialog normalization
+ if (get_bits1(gbc)) {
+ skip_bits(gbc, 8); // skip compression gain word
+ }
+ }
+
+ /* dependent stream channel map */
+ if (s->frame_type == EAC3_FRAME_TYPE_DEPENDENT) {
+ if (get_bits1(gbc)) {
+ skip_bits(gbc, 16); // skip custom channel map
+ }
+ }
+
+ /* mixing metadata */
+ if (get_bits1(gbc)) {
+ /* center and surround mix levels */
+ if (s->channel_mode > AC3_CHMODE_STEREO) {
+ skip_bits(gbc, 2); // skip preferred stereo downmix mode
+ if (s->channel_mode & 1) {
+ /* if three front channels exist */
+ skip_bits(gbc, 3); //skip Lt/Rt center mix level
+ s->center_mix_level = get_bits(gbc, 3);
+ }
+ if (s->channel_mode & 4) {
+ /* if a surround channel exists */
+ skip_bits(gbc, 3); //skip Lt/Rt surround mix level
+ s->surround_mix_level = get_bits(gbc, 3);
+ }
+ }
+
+ /* lfe mix level */
+ if (s->lfe_on && get_bits1(gbc)) {
+ // TODO: use LFE mix level
+ skip_bits(gbc, 5); // skip LFE mix level code
+ }
+
+ /* info for mixing with other streams and substreams */
+ if (s->frame_type == EAC3_FRAME_TYPE_INDEPENDENT) {
+ for (i = 0; i < (s->channel_mode ? 1 : 2); i++) {
+ // TODO: apply program scale factor
+ if (get_bits1(gbc)) {
+ skip_bits(gbc, 6); // skip program scale factor
+ }
+ }
+ if (get_bits1(gbc)) {
+ skip_bits(gbc, 6); // skip external program scale factor
+ }
+ /* skip mixing parameter data */
+ switch(get_bits(gbc, 2)) {
+ case 1: skip_bits(gbc, 5); break;
+ case 2: skip_bits(gbc, 12); break;
+ case 3: {
+ int mix_data_size = (get_bits(gbc, 5) + 2) << 3;
+ skip_bits_long(gbc, mix_data_size);
+ break;
+ }
+ }
+ /* skip pan information for mono or dual mono source */
+ if (s->channel_mode < AC3_CHMODE_STEREO) {
+ for (i = 0; i < (s->channel_mode ? 1 : 2); i++) {
+ if (get_bits1(gbc)) {
+ /* note: this is not in the ATSC A/52B specification
+ reference: ETSI TS 102 366 V1.1.1
+ section: E.1.3.1.25 */
+ skip_bits(gbc, 8); // skip pan mean direction index
+ skip_bits(gbc, 6); // skip reserved paninfo bits
+ }
+ }
+ }
+ /* skip mixing configuration information */
+ if (get_bits1(gbc)) {
+ for (blk = 0; blk < s->num_blocks; blk++) {
+ if (s->num_blocks == 1 || get_bits1(gbc)) {
+ skip_bits(gbc, 5);
+ }
+ }
+ }
+ }
+ }
+
+ /* informational metadata */
+ if (get_bits1(gbc)) {
+ skip_bits(gbc, 3); // skip bit stream mode
+ skip_bits(gbc, 2); // skip copyright bit and original bitstream bit
+ if (s->channel_mode == AC3_CHMODE_STEREO) {
+ skip_bits(gbc, 4); // skip Dolby surround and headphone mode
+ }
+ if (s->channel_mode >= AC3_CHMODE_2F2R) {
+ skip_bits(gbc, 2); // skip Dolby surround EX mode
+ }
+ for (i = 0; i < (s->channel_mode ? 1 : 2); i++) {
+ if (get_bits1(gbc)) {
+ skip_bits(gbc, 8); // skip mix level, room type, and A/D converter type
+ }
+ }
+ if (s->bit_alloc_params.sr_code != EAC3_SR_CODE_REDUCED) {
+ skip_bits1(gbc); // skip source sample rate code
+ }
+ }
+
+ /* converter synchronization flag
+ If frames are less than six blocks, this bit should be turned on
+ once every 6 blocks to indicate the start of a frame set.
+ reference: RFC 4598, Section 2.1.3 Frame Sets */
+ if (s->frame_type == EAC3_FRAME_TYPE_INDEPENDENT && s->num_blocks != 6) {
+ skip_bits1(gbc); // skip converter synchronization flag
+ }
+
+ /* original frame size code if this stream was converted from AC-3 */
+ if (s->frame_type == EAC3_FRAME_TYPE_AC3_CONVERT &&
+ (s->num_blocks == 6 || get_bits1(gbc))) {
+ skip_bits(gbc, 6); // skip frame size code
+ }
+
+ /* additional bitstream info */
+ if (get_bits1(gbc)) {
+ int addbsil = get_bits(gbc, 6);
+ for (i = 0; i < addbsil + 1; i++) {
+ skip_bits(gbc, 8); // skip additional bit stream info
+ }
+ }
+
+ /* audio frame syntax flags, strategy data, and per-frame data */
+
+ if (s->num_blocks == 6) {
+ ac3_exponent_strategy = get_bits1(gbc);
+ parse_aht_info = get_bits1(gbc);
+ } else {
+ /* less than 6 blocks, so use AC-3-style exponent strategy syntax, and
+ do not use AHT */
+ ac3_exponent_strategy = 1;
+ parse_aht_info = 0;
+ }
+
+ s->snr_offset_strategy = get_bits(gbc, 2);
+ parse_transient_proc_info = get_bits1(gbc);
+
+ s->block_switch_syntax = get_bits1(gbc);
+ if (!s->block_switch_syntax)
+ memset(s->block_switch, 0, sizeof(s->block_switch));
+
+ s->dither_flag_syntax = get_bits1(gbc);
+ if (!s->dither_flag_syntax) {
+ for (ch = 1; ch <= s->fbw_channels; ch++)
+ s->dither_flag[ch] = 1;
+ }
+ s->dither_flag[CPL_CH] = s->dither_flag[s->lfe_ch] = 0;
+
+ s->bit_allocation_syntax = get_bits1(gbc);
+ if (!s->bit_allocation_syntax) {
+ /* set default bit allocation parameters */
+ s->bit_alloc_params.slow_decay = ff_ac3_slow_decay_tab[2];
+ s->bit_alloc_params.fast_decay = ff_ac3_fast_decay_tab[1];
+ s->bit_alloc_params.slow_gain = ff_ac3_slow_gain_tab [1];
+ s->bit_alloc_params.db_per_bit = ff_ac3_db_per_bit_tab[2];
+ s->bit_alloc_params.floor = ff_ac3_floor_tab [7];
+ }
+
+ s->fast_gain_syntax = get_bits1(gbc);
+ s->dba_syntax = get_bits1(gbc);
+ s->skip_syntax = get_bits1(gbc);
+ parse_spx_atten_data = get_bits1(gbc);
+
+ /* coupling strategy occurance and coupling use per block */
+ num_cpl_blocks = 0;
+ if (s->channel_mode > 1) {
+ for (blk = 0; blk < s->num_blocks; blk++) {
+ s->cpl_strategy_exists[blk] = (!blk || get_bits1(gbc));
+ if (s->cpl_strategy_exists[blk]) {
+ s->cpl_in_use[blk] = get_bits1(gbc);
+ } else {
+ s->cpl_in_use[blk] = s->cpl_in_use[blk-1];
+ }
+ num_cpl_blocks += s->cpl_in_use[blk];
+ }
+ } else {
+ memset(s->cpl_in_use, 0, sizeof(s->cpl_in_use));
+ }
+
+ /* exponent strategy data */
+ if (ac3_exponent_strategy) {
+ /* AC-3-style exponent strategy syntax */
+ for (blk = 0; blk < s->num_blocks; blk++) {
+ for (ch = !s->cpl_in_use[blk]; ch <= s->fbw_channels; ch++) {
+ s->exp_strategy[blk][ch] = get_bits(gbc, 2);
+ }
+ }
+ } else {
+ /* LUT-based exponent strategy syntax */
+ for (ch = !((s->channel_mode > 1) && num_cpl_blocks); ch <= s->fbw_channels; ch++) {
+ int frmchexpstr = get_bits(gbc, 5);
+ for (blk = 0; blk < 6; blk++) {
+ s->exp_strategy[blk][ch] = ff_eac3_frm_expstr[frmchexpstr][blk];
+ }
+ }
+ }
+ /* LFE exponent strategy */
+ if (s->lfe_on) {
+ for (blk = 0; blk < s->num_blocks; blk++) {
+ s->exp_strategy[blk][s->lfe_ch] = get_bits1(gbc);
+ }
+ }
+ /* original exponent strategies if this stream was converted from AC-3 */
+ if (s->frame_type == EAC3_FRAME_TYPE_INDEPENDENT &&
+ (s->num_blocks == 6 || get_bits1(gbc))) {
+ skip_bits(gbc, 5 * s->fbw_channels); // skip converter channel exponent strategy
+ }
+
+ /* determine which channels use AHT */
+ if (parse_aht_info) {
+ /* For AHT to be used, all non-zero blocks must reuse exponents from
+ the first block. Furthermore, for AHT to be used in the coupling
+ channel, all blocks must use coupling and use the same coupling
+ strategy. */
+ s->channel_uses_aht[CPL_CH]=0;
+ for (ch = (num_cpl_blocks != 6); ch <= s->channels; ch++) {
+ int use_aht = 1;
+ for (blk = 1; blk < 6; blk++) {
+ if ((s->exp_strategy[blk][ch] != EXP_REUSE) ||
+ (!ch && s->cpl_strategy_exists[blk])) {
+ use_aht = 0;
+ break;
+ }
+ }
+ s->channel_uses_aht[ch] = use_aht && get_bits1(gbc);
+ }
+ } else {
+ memset(s->channel_uses_aht, 0, sizeof(s->channel_uses_aht));
+ }
+
+ /* per-frame SNR offset */
+ if (!s->snr_offset_strategy) {
+ int csnroffst = (get_bits(gbc, 6) - 15) << 4;
+ int snroffst = (csnroffst + get_bits(gbc, 4)) << 2;
+ for (ch = 0; ch <= s->channels; ch++)
+ s->snr_offset[ch] = snroffst;
+ }
+
+ /* transient pre-noise processing data */
+ if (parse_transient_proc_info) {
+ for (ch = 1; ch <= s->fbw_channels; ch++) {
+ if (get_bits1(gbc)) { // channel in transient processing
+ skip_bits(gbc, 10); // skip transient processing location
+ skip_bits(gbc, 8); // skip transient processing length
+ }
+ }
+ }
+
+ /* spectral extension attenuation data */
+ for (ch = 1; ch <= s->fbw_channels; ch++) {
+ if (parse_spx_atten_data && get_bits1(gbc))
+ s->spx_atten_code[ch] = get_bits(gbc, 5);
+ else
+ s->spx_atten_code[ch] = -1;
+ }
+
+ /* block start information */
+ if (s->num_blocks > 1 && get_bits1(gbc)) {
+ /* reference: Section E2.3.2.27
+ nblkstrtbits = (numblks - 1) * (4 + ceiling(log2(words_per_frame)))
+ The spec does not say what this data is or what it's used for.
+ It is likely the offset of each block within the frame. */
+ int block_start_bits = (s->num_blocks-1) * (4 + av_log2(s->frame_size-2));
+ skip_bits_long(gbc, block_start_bits);
+ av_log_missing_feature(s->avctx, "Block start info", 1);
+ }
+
+ /* syntax state initialization */
+ for (ch = 1; ch <= s->fbw_channels; ch++) {
+ s->first_spx_coords[ch] = 1;
+ s->first_cpl_coords[ch] = 1;
+ }
+ s->first_cpl_leak = 1;
+
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eac3dec_data.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eac3dec_data.c
new file mode 100644
index 000000000..6c6a55180
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eac3dec_data.c
@@ -0,0 +1,1095 @@
+/*
+ * E-AC-3 decoder tables
+ * Copyright (c) 2007 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/eac3dec_data.c
+ * Tables taken directly from the E-AC-3 spec.
+ */
+
+#include "eac3dec_data.h"
+#include "ac3.h"
+
+const uint8_t ff_eac3_bits_vs_hebap[20] = {
+ 0, 2, 3, 4, 5, 7, 8, 9, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 14, 16,
+};
+
+/**
+ * Table E3.6, Gk=1
+ * No gain (Gk=1) inverse quantization, remapping scale factors
+ * ff_eac3_gaq_remap[hebap+8]
+ */
+const int16_t ff_eac3_gaq_remap_1[12] = {
+ 4681, 2185, 1057, 520, 258, 129, 64, 32, 16, 8, 2, 0
+};
+
+/**
+ * Table E3.6, Gk=2 & Gk=4, A
+ * Large mantissa inverse quantization, remapping scale factors
+ * ff_eac3_gaq_remap_2_4_a[hebap-8][Gk=2,4]
+ */
+const int16_t ff_eac3_gaq_remap_2_4_a[9][2] = {
+ { -10923, -4681 },
+ { -14043, -6554 },
+ { -15292, -7399 },
+ { -15855, -7802 },
+ { -16124, -7998 },
+ { -16255, -8096 },
+ { -16320, -8144 },
+ { -16352, -8168 },
+ { -16368, -8180 }
+};
+
+/**
+ * Table E3.6, Gk=2 & Gk=4, B
+ * Large mantissa inverse quantization, negative mantissa remapping offsets
+ * ff_eac3_gaq_remap_3_4_b[hebap-8][Gk=2,4]
+ */
+const int16_t ff_eac3_gaq_remap_2_4_b[9][2] = {
+ { -5461, -1170 },
+ { -11703, -4915 },
+ { -14199, -6606 },
+ { -15327, -7412 },
+ { -15864, -7805 },
+ { -16126, -7999 },
+ { -16255, -8096 },
+ { -16320, -8144 },
+ { -16352, -8168 }
+};
+
+static const int16_t vq_hebap1[4][6] = {
+{ 7167, 4739, 1106, 4269, 10412, 4820},
+{ -5702, -3187, -14483, -1392, -2027, 849},
+{ 633, 6199, 7009, -12779, -2306, -2636},
+{ -1468, -7031, 7592, 10617, -5946, -3062},
+};
+static const int16_t vq_hebap2[8][6] = {
+{ -12073, 608, -7019, 590, 4000, 869},
+{ 6692, 15689, -6178, -9239, -74, 133},
+{ 1855, -989, 20596, -2920, -4475, 225},
+{ -1194, -3901, -821, -6566, -875, -20298},
+{ -2762, -3181, -4094, -5623, -16945, 9765},
+{ 1547, 6839, 1980, 20233, -1071, -4986},
+{ 6221, -17915, -5516, 6266, 358, 1162},
+{ 3753, -1066, 4283, -3227, 15928, 10186},
+};
+static const int16_t vq_hebap3[16][6] = {
+{ -10028, 20779, 10982, -4560, 798, -68},
+{ 11050, 20490, -6617, -5342, -1797, -1631},
+{ 3977, -542, 7118, -1166, 18844, 14678},
+{ -4320, -96, -7295, -492, -22050, -4277},
+{ 2692, 5856, 5530, 21862, -7212, -5325},
+{ -135, -23391, 962, 8115, -644, 382},
+{ -1563, 3400, -3299, 4693, -6892, 22398},
+{ 3535, 3030, 7296, 6214, 20476, -12099},
+{ 57, -6823, 1848, -22349, -5919, 6823},
+{ -821, -3655, -387, -6253, -1735, -22373},
+{ -6046, 1586, -18890, -14392, 9214, 705},
+{ -5716, 264, -17964, 14618, 7921, -337},
+{ -110, 108, 8, 74, -89, -50},
+{ 6612, -1517, 21687, -1658, -7949, -246},
+{ 21667, -6335, -8290, -101, -1349, -22},
+{ -22003, -6476, 7974, 648, 2054, -331},
+};
+static const int16_t vq_hebap4[32][6] = {
+{ 6636, -4593, 14173, -17297, -16523, 864},
+{ 3658, 22540, 104, -1763, -84, 6},
+{ 21580, -17815, -7282, -1575, -2078, -320},
+{ -2233, 10017, -2728, 14938, -13640, -17659},
+{ -1564, -17738, -19161, 13735, 2757, 2951},
+{ 4520, 5510, 7393, 10799, 19231, -13770},
+{ 399, 2976, -1099, 5013, -1159, 22095},
+{ 3624, -2359, 4680, -2238, 22702, 3765},
+{ -4201, -8285, -6810, -12390, -18414, 15382},
+{ -5198, -6869, -10047, -8364, -16022, -20562},
+{ -142, -22671, -368, 4391, -464, -13},
+{ 814, -1118, -1089, -22019, 74, 1553},
+{ -1618, 19222, -17642, -13490, 842, -2309},
+{ 4689, 16490, 20813, -15387, -4164, -3968},
+{ -3308, 11214, -13542, 13599, -19473, 13770},
+{ 1817, 854, 21225, -966, -1643, -268},
+{ -2587, -107, -20154, 376, 1174, -304},
+{ -2919, 453, -5390, 750, -22034, -978},
+{ -19012, 16839, 10000, -3580, 2211, 1459},
+{ 1363, -2658, -33, -4067, 1165, -21985},
+{ -8592, -2760, -17520, -15985, 14897, 1323},
+{ 652, -9331, 3253, -14622, 12181, 19692},
+{ -6361, 5773, -15395, 17291, 16590, -2922},
+{ -661, -601, 1609, 22610, 992, -1045},
+{ 4961, 9107, 11225, 7829, 16320, 18627},
+{ -21872, -1433, 138, 1470, -1891, -196},
+{ -19499, -18203, 11056, -516, 2543, -2249},
+{ -1196, -17574, 20150, 11462, -401, 2619},
+{ 4638, -8154, 11891, -15759, 17615, -14955},
+{ -83, 278, 323, 55, -154, 232},
+{ 7788, 1462, 18395, 15296, -15763, -1131},
+};
+static const int16_t vq_hebap5[128][6] = {
+{ -3394, -19730, 2963, 9590, 4660, 19673},
+{ -15665, -6405, 17671, 3860, -8232, -19429},
+{ 4467, 412, -17873, -8037, 691, -17307},
+{ 3580, 2363, 6886, 3763, 6379, -20522},
+{ -17230, -14133, -1396, -23939, 8373, -12537},
+{ -8073, -21469, -15638, 3214, 8105, -5965},
+{ 4343, 5169, 2683, -16822, -5146, -16558},
+{ 6348, -10668, 12995, -25500, -22090, 4091},
+{ -2880, -8366, -5968, -17158, -2638, 23132},
+{ -5095, -14281, -22371, 21741, 3689, 2961},
+{ -2443, -17739, 25155, 2707, 1594, 7},
+{ -18379, 9010, 4270, 731, -426, -640},
+{ -23695, 24732, 5642, 612, -308, -964},
+{ -767, 1268, 225, 1635, 173, 916},
+{ 5455, 6493, 4902, 10560, 23041, -17140},
+{ 17219, -21054, -18716, 4936, -3420, 3357},
+{ -1390, 15488, -21946, -14611, 1339, 542},
+{ -6866, -2254, -12070, -3075, -19981, -20622},
+{ -1803, 11775, 1343, 8917, 693, 24497},
+{ -21610, 9462, 4681, 9254, -7815, 15904},
+{ -5559, -3018, -9169, -1347, -22547, 12868},
+{ -366, 5076, -1727, 20427, -283, -2923},
+{ -1886, -6313, -939, -2081, -1399, 3513},
+{ -3161, -537, -5075, 11268, 19396, 989},
+{ 2345, 4153, 5769, -4273, 233, -399},
+{ -21894, -1138, -16474, 5902, 5488, -3211},
+{ 10007, -12530, 18829, 20932, -1158, 1790},
+{ -1165, 5014, -1199, 6415, -8418, -21038},
+{ 1892, -3534, 3815, -5846, 16427, 20288},
+{ -2664, -11627, -4147, -18311, -22710, 14848},
+{ 17256, 10419, 7764, 12040, 18956, 2525},
+{ -21419, -18685, -10897, 4368, -7051, 4539},
+{ -1574, 2050, 5760, 24756, 15983, 17678},
+{ -538, -22867, 11067, 10301, 385, 528},
+{ -8465, -3025, -16357, -23237, 16491, 3654},
+{ 5840, 575, 11890, 1947, 25157, 6653},
+{ 6625, -3516, -1964, 3850, -390, -116},
+{ 18005, 20900, 14323, -7621, -10922, 11802},
+{ -4857, -2932, -13334, -7815, 21622, 2267},
+{ -579, -9431, -748, -21321, 12367, 8265},
+{ -8317, 1375, -17847, 2921, 9062, 22046},
+{ 18398, 8635, -1503, -2418, -18295, -14734},
+{ -2987, 15129, -3331, 22300, 13878, -13639},
+{ 5874, -19026, 15587, 11350, -20738, 1971},
+{ 1581, -6955, -21440, 2455, 65, 414},
+{ 515, -4468, -665, -4672, 125, -19222},
+{ 21495, -20301, -1872, -1926, -211, -1022},
+{ 5189, -12250, -1775, -23550, -4546, 5813},
+{ 321, -6331, 14646, 6975, -1773, 867},
+{ -13814, 3180, 7927, 444, 19552, 3146},
+{ -6660, 12252, -1972, 17408, -24280, -12956},
+{ -745, 14356, -1107, 23742, -9631, -18344},
+{ 18284, -7909, -7531, 19118, 7721, -12659},
+{ 1926, 15101, -12848, 2153, 21631, 1864},
+{ -2130, 23416, 17056, -15597, -1544, 87},
+{ 8314, -11824, 14581, -20591, 7891, -2099},
+{ 19600, 22814, -17304, -2040, 285, -3863},
+{ -8214, -18322, 10724, -13744, -13469, -1666},
+{ 14351, 4880, -20034, 964, -4221, -180},
+{ -24598, -16635, 19724, 5925, 4777, 4414},
+{ -2495, 23493, -16141, 2918, -1038, -2010},
+{ 18974, -2540, 13343, 1405, -6194, -1136},
+{ 2489, 13670, 22638, -7311, -129, -2792},
+{ -13962, 16775, 23012, 728, 3397, 162},
+{ 3038, 993, 8774, -21969, -6609, 910},
+{ -12444, -22386, -2626, -5295, 19520, 9872},
+{ -1911, -18274, -18506, -14962, 4760, 7119},
+{ 8298, -2978, 25886, 7660, -7897, 1020},
+{ 6132, 15127, 18757, -24370, -6529, -6627},
+{ 7924, 12125, -9459, -23962, 5502, 937},
+{ -17056, -5373, 2522, 327, 1129, -390},
+{ 15774, 19955, -10380, 11172, -3107, 14853},
+{ -11904, -8091, -17928, -22287, -17237, -6803},
+{ -12862, -2172, -6509, 5927, 12458, -22355},
+{ -497, 322, 1038, -6643, -5404, 20311},
+{ 1083, -22984, -8494, 12130, -762, 2623},
+{ 5067, 19712, -1901, -30, -325, 85},
+{ 987, -5830, 4212, -9030, 9121, -25038},
+{ -7868, 7284, -12292, 12914, -21592, 20941},
+{ -1630, -7694, -2187, -8525, -5604, -25196},
+{ -6668, 388, -22535, 1526, 9082, 193},
+{ -7867, -22308, 5163, 362, 944, -259},
+{ 3824, -11850, 7591, -23176, 25342, 23771},
+{ -10504, 4123, -21111, 21173, 22439, -838},
+{ -4723, 21795, 6184, -122, 1642, -717},
+{ 24504, 19887, -2043, 986, 7, -55},
+{ -27313, -135, 2437, 259, 89, 307},
+{ 24446, -3873, -5391, -820, -2387, 361},
+{ 5529, 5784, 18682, 242, -21896, -4003},
+{ 22304, 4483, 722, -12242, 7570, 15448},
+{ 8673, 3009, 20437, 21108, -21100, -3080},
+{ -1132, 2705, -1825, 5420, -785, 18532},
+{ 16932, -13517, -16509, -14858, -20327, -14221},
+{ 2219, 1380, 21474, -1128, 327, 83},
+{ -2177, 21517, -3856, -14180, -204, -2191},
+{ 953, -9426, 15874, -10710, -3231, 21030},
+{ -421, -1377, 640, -8239, -20976, 2174},
+{ 4309, 18514, -9100, -18319, -15518, 3704},
+{ -5943, 449, -8387, 1075, -22210, -4992},
+{ 2953, 12788, 18285, 1430, 14937, 21731},
+{ -2913, 401, -4739, -20105, 1699, -1147},
+{ 3449, 5241, 8853, 22134, -7547, 1451},
+{ -2154, 8584, 18120, -15614, 19319, -5991},
+{ 3501, 2841, 5897, 6397, 8630, 23018},
+{ 2467, 2956, 379, 5703, -22047, -2189},
+{ -16963, -594, 18822, -5295, 1640, 774},
+{ 2896, -1424, 3586, -2292, 19910, -1822},
+{ -18575, 21219, -14001, -12573, 16466, 635},
+{ -1998, -19314, -16527, 12208, -16576, -7854},
+{ -9674, 1012, -21645, 2883, -12712, 2321},
+{ -1005, 471, -3629, 8045, -11087, 25533},
+{ 4141, -21472, -2673, 756, -663, -523},
+{ 6490, 8531, 19289, 18949, 6092, -9347},
+{ 16965, 24599, 14024, 10072, -536, -10438},
+{ -8147, 2145, -23028, -17073, 5451, -4401},
+{ -14873, 20520, -18303, -9717, -11885, -17831},
+{ -2290, -14120, 2070, 22467, 1671, 725},
+{ -8538, 14629, 3521, -20577, 6673, 8200},
+{ 20248, 4410, -1366, -585, 1229, -2449},
+{ 7467, -7148, 13667, -8246, 22392, -17320},
+{ -1932, 3875, -9064, -3812, 958, 265},
+{ -4399, 2959, -15911, 19598, 4954, -1105},
+{ 18009, -9923, -18137, -3862, 11178, 5821},
+{ -14596, -1227, 9660, 21619, 11228, -11721},
+{ -721, -1700, 109, -2142, 61, -6772},
+{ -24619, -22520, 5608, -1957, -1761, -1012},
+{ -23728, -4451, -2688, -14679, -4266, 9919},
+{ 8495, -894, 20438, -13820, -17267, 139},
+};
+static const int16_t vq_hebap6[256][6] = {
+{ 10154, 7365, 16861, 18681, -22893, -3636},
+{ -2619, -3788, -5529, -5192, -9009, -20298},
+{ -5583, -22800, 21297, 7012, 745, 720},
+{ 428, -1459, 109, -3082, 361, -8403},
+{ 8161, 22401, 241, 1755, -874, -2824},
+{ 1140, 12643, 2306, 22263, -25146, -17557},
+{ -2609, 3379, 10337, -19730, -15468, -23944},
+{ -4040, -12796, -25772, 13096, 3905, 1315},
+{ 4624, -23799, 13608, 25317, -1175, 2173},
+{ -97, 13747, -5122, 23255, 4214, -22145},
+{ 6878, -322, 18264, -854, -11916, -733},
+{ 17280, -12669, -9693, 23563, -16240, -1309},
+{ 5802, -4968, 19526, -21194, -24622, -183},
+{ 5851, -16137, 15229, -9496, -1538, 377},
+{ 14096, 25057, 13419, 8290, 23320, 16818},
+{ -7261, 118, -15867, 19097, 9781, -277},
+{ -4288, 21589, -13288, -16259, 16633, -4862},
+{ 4909, -19217, 23411, 14705, -722, 125},
+{ 19462, -4732, -1928, -11527, 20770, 5425},
+{ -27562, -2881, -4331, 384, -2103, 1367},
+{ -266, -9175, 5441, 26333, -1924, 4221},
+{ -2970, -20170, -21816, 5450, -7426, 5344},
+{ -221, -6696, 603, -9140, 1308, -27506},
+{ 9621, -8380, -1967, 9403, -1651, 22817},
+{ 7566, -5250, -4165, 1385, -990, 560},
+{ -1262, 24738, -19057, 10741, 7585, -7098},
+{ 451, 20130, -9949, -6015, -2188, -1458},
+{ 22249, 9380, 9096, 10959, -2365, -3724},
+{ 18668, -650, -1234, 11092, 7678, 5969},
+{ 19207, -1485, -1076, -731, -684, 43},
+{ -4973, 13430, 20139, 60, 476, -935},
+{ -20029, 8710, 2499, 1016, -1158, 335},
+{ -26413, 18598, -2201, -669, 3409, 793},
+{ -4726, 8875, -24607, -9646, 3643, -283},
+{ 13303, -21404, -3691, -1184, -1970, 1612},
+{ 173, 60, 919, 1229, 6942, -665},
+{ 16377, 16991, 5341, -14015, -2304, -20390},
+{ 25334, -10609, 11947, -7653, -6363, 14058},
+{ 23929, -13259, -7226, -937, 234, -187},
+{ 6311, -1877, 12506, -1879, 18751, -23341},
+{ 621, 6445, 3354, -24274, 8406, 5315},
+{ -3297, -5034, -4704, -5080, -25730, 5347},
+{ -1275, -13295, -965, -23318, 1214, 26259},
+{ -6252, 10035, -20105, 15301, -16073, 5136},
+{ 9562, -3911, -19510, 4745, 22270, -4171},
+{ 7978, -19600, 14024, -5745, -20855, 8939},
+{ 7, -4039, 991, -6065, 52, -19423},
+{ 3485, 2969, 7732, 7786, 25312, 6206},
+{ -959, -12812, -1840, -22743, 7324, 10830},
+{ -4686, 1678, -10172, -5205, 4294, -1271},
+{ 3889, 1302, 7450, 638, 20374, -3133},
+{ -12496, -9123, 18463, -12343, -7238, 18552},
+{ -6185, 8649, -6903, -895, 17109, 16604},
+{ -9896, 28579, 2845, 1640, 2925, -298},
+{ 14968, -25988, 14878, -24012, 1815, -6474},
+{ 26107, 5166, 21225, 15873, 21617, 14825},
+{ -21684, 16438, 20504, -14346, -7114, -4162},
+{ 28647, 90, -1572, 789, -902, -75},
+{ -1479, 2471, -4061, 3612, -2240, 10914},
+{ 8616, 17491, 17255, -17456, 17022, -16357},
+{ -20722, -18597, 25274, 17720, -3573, 1695},
+{ -997, 6129, -6303, 11250, -11359, -19739},
+{ -74, -4001, -1584, 13384, 162, -144},
+{ -529, 21068, 7923, -11396, 422, -26},
+{ 7102, -13531, -20055, 2629, -178, -429},
+{ 9201, 1368, -22238, 2623, -20499, 24889},
+{ -432, 6675, -266, 8723, 80, 28024},
+{ 19493, -3108, -9261, 1910, -21777, 5345},
+{ 14079, -11489, 12604, 6079, 19877, 1315},
+{ 10947, 9837, -18612, 15742, 4792, 605},
+{ -1777, 3758, -4087, 21696, 6024, -576},
+{ 3567, -3578, 16379, 2680, -1752, 716},
+{ -5049, -1399, -4550, -652, -17721, -3366},
+{ -3635, -4372, -6522, -22152, 7382, 1458},
+{ 12242, 19190, 5646, -7815, -20289, 21344},
+{ -7508, 19952, 23542, -9753, 5669, -1990},
+{ -2275, 15438, 10907, -17879, 6497, 13582},
+{ -15894, -15646, -4716, 6019, 24250, -6179},
+{ -2049, -6856, -1208, 918, 17735, -69},
+{ -3721, 9099, -16065, -23621, 5981, -2344},
+{ 7862, -8918, 24033, 25508, -11033, -741},
+{ -12588, 19468, 14649, 15451, -21226, 1171},
+{ 2102, 1147, 2789, 4096, 2179, 8750},
+{ -18214, -17758, -10366, -5203, -1066, -3541},
+{ -2819, -19958, -11921, 6032, 8315, 10374},
+{ -9078, -2100, 19431, -17, 732, -689},
+{ -14512, -19224, -7095, 18727, 1870, 22906},
+{ 3912, 659, 25597, -4006, 9619, 877},
+{ 2616, 22695, -5770, 17920, 3812, 20220},
+{ 2561, 26847, -5245, -10908, 2256, -517},
+{ -4974, 198, -21983, -3608, 22174, -18924},
+{ 21308, -1211, 19144, 16691, -1588, 11390},
+{ -1790, 3959, -3488, 7003, -7107, 20877},
+{ -6108, -17955, -18722, 24763, 16508, 3211},
+{ 20462, -24987, -20361, 4484, -5111, -478},
+{ -6378, -1998, -10229, -561, -22039, -22339},
+{ 3047, -18850, 7586, 14743, -19862, 6351},
+{ -5047, 1405, -9672, 1055, -21881, 11170},
+{ 3481, -9699, 6526, -16655, 22813, 21907},
+{ -18570, 17501, 14664, 1291, 5026, 19676},
+{ 16134, -19810, -16956, -17939, -16933, 5800},
+{ -8224, 4908, 8935, 2272, -1140, -23217},
+{ 1572, 2753, -1598, 2143, -3346, -21926},
+{ -9832, -1060, -27818, 1214, 7289, 150},
+{ 98, 1538, 535, 17429, -23198, -901},
+{ 21340, -20146, 3297, -1744, -8207, -21462},
+{ -4166, -4633, -17902, 5478, 1285, 136},
+{ 18713, 21003, 24818, 11421, 1282, -4618},
+{ -3535, 7636, -265, 2141, -829, -2035},
+{ -3184, 19713, 2775, -2, 1090, 104},
+{ -6771, -20185, 2938, -2125, -36, 1268},
+{ 9560, 9430, 9586, 22100, 13827, 6296},
+{ -535, -20018, 4276, -1868, -448, -17183},
+{ -24352, 14244, -13647, -21040, 2271, 11555},
+{ -2646, 15437, -4589, 18638, -4299, -622},
+{ -20064, 4169, 18115, -1404, 13722, -1825},
+{ -16359, 9080, 744, 22021, 125, 10794},
+{ 9644, -14607, -18479, -14714, 11174, -20754},
+{ -326, -23762, 6144, 7909, 602, 1540},
+{ -6650, 6634, -12683, 21396, 20785, -6839},
+{ 4252, -21043, 5628, 18687, 23860, 8328},
+{ 17986, 5704, -5245, -18093, -555, 3219},
+{ 6091, 14232, -5117, -17456, -19452, -11649},
+{ -21586, 11302, 15434, 25590, 6777, -26683},
+{ 21355, -8244, 5877, -3540, 6079, -2567},
+{ 2603, -2455, 5421, -12286, -19100, 5574},
+{ -1721, -26393, -23664, 22904, -349, 3787},
+{ 2189, -1203, 5340, 3249, -22617, 104},
+{ -1664, -11020, -2857, -20723, -24049, 19900},
+{ 22873, -7345, -18481, -14616, -8400, -12965},
+{ 3777, 3958, 8239, 20494, -6991, -1201},
+{ -160, -1613, -793, -8681, 573, 776},
+{ 4297, -3786, 20373, 6082, -5321, -18400},
+{ 18745, 2463, 12546, -7749, -7734, -2183},
+{ 11074, -4720, 22119, 1825, -24351, 4080},
+{ 1503, -19178, -1569, 13, -313, 375},
+{ 318, -575, 2544, 178, 102, 40},
+{ -15996, -26897, 5008, 3320, 686, 1159},
+{ 25755, 26886, 574, -5930, -3916, 1407},
+{ -9148, -7665, -2875, -8384, -18663, 26400},
+{ -7445, -18040, -18396, 8802, -2252, -21886},
+{ 7851, 11773, 27485, -12847, -1410, 19590},
+{ 2240, 5947, 11247, 15980, -6499, 24280},
+{ 21673, -18515, 9771, 6550, -2730, 334},
+{ -4149, 1576, -11010, 89, -24429, -5710},
+{ 7720, 1478, 21412, -25025, -8385, 9},
+{ -2448, 10218, -12756, -16079, 1161, -21284},
+{ -8757, -14429, -22918, -14812, 2629, 13844},
+{ -7252, 2843, -9639, 2882, -14625, 24497},
+{ -674, -6530, 414, -23333, -21343, 454},
+{ 2104, -6312, 10887, 18087, -1199, 175},
+{ -493, -562, -2739, 118, -1074, 93},
+{ -10011, -4075, -28071, 22180, 15077, -636},
+{ -4637, -16408, -9003, -20418, -11608, -20932},
+{ 4815, 15892, 24238, -13634, -3074, -1059},
+{ -6724, 4610, -18772, -15283, -16685, 23988},
+{ 15349, -674, -3682, 21679, 4475, -12088},
+{ 4756, 2593, 5354, 6001, 15063, 26490},
+{ -23815, -17251, 6944, 378, 694, 670},
+{ 23392, -8839, -14713, 7544, -876, 11088},
+{ 3640, 3336, 22593, -3495, -2328, -113},
+{ 284, 6914, 3097, 10171, 6638, -18621},
+{ 2472, 5976, 11054, -11936, -603, -663},
+{ 16175, 16441, 13164, -4043, 4667, 7431},
+{ 19338, 15534, -6533, 1681, -4857, 17048},
+{ 17027, 532, -19064, -1441, -5130, 1085},
+{ -12617, -17609, 2062, -25332, 19009, -16121},
+{ 10056, -21000, -13634, -2949, 15367, 19934},
+{ -648, -1605, 10046, -1592, 13296, 19808},
+{ -1054, 10744, 538, 24938, 9630, -9052},
+{ -10099, 3042, -25076, -24052, 13971, 100},
+{ 6547, 6907, 7031, 10348, 23775, -17886},
+{ -22793, -1984, -1393, -3330, 9267, 14317},
+{ -14346, -3967, 3042, 16254, -17303, 9646},
+{ -21393, 23628, 16773, 716, 2663, 114},
+{ -19016, -3038, 1574, -245, 1463, -793},
+{ 22410, 23441, -14637, -530, 17310, 13617},
+{ -11582, 7935, -13954, 23465, -24628, 26550},
+{ -1045, 3679, -2218, 10572, 20999, -3702},
+{ -15513, 197, 16718, -24603, 4945, 5},
+{ 10781, 4335, 26790, -9059, -16152, -2840},
+{ 16075, -24100, -3933, -6833, 12645, -7029},
+{ 2096, -25572, -8370, 6814, 11, 1178},
+{ -11848, -583, -8889, -20543, -10471, -380},
+{ -2487, 24777, -21639, -19341, 1660, -732},
+{ 2313, 13679, 4085, 24549, 24691, -21179},
+{ -2366, -504, -4130, -10570, 23668, 1961},
+{ 20379, 17809, -9506, 3733, -18954, -6292},
+{ -3856, 16802, -929, -20310, -17739, 6797},
+{ 12431, 6078, -11272, -14450, 6913, 23476},
+{ 7636, -1655, 23017, 10719, -8292, 838},
+{ -8559, -1235, -18096, 3897, 16093, 1490},
+{ -3586, 8276, 15165, -3791, -21149, 1741},
+{ -4497, 21739, 2366, -278, -4792, 15549},
+{ -23122, -13708, 7668, 16232, 24120, 15025},
+{ -20043, 12821, -20160, 16691, -11655, -16081},
+{ -12601, 20239, 3496, -2549, -6745, -11850},
+{ 4441, 7812, 20783, 17080, 11523, -9643},
+{ 24766, 8494, -23298, -3262, 11101, -7120},
+{ -10107, -7623, -22152, -18303, 26645, 9550},
+{ -25549, 477, 7874, -1538, 1123, -168},
+{ 470, 9834, -347, 23945, -10381, -9467},
+{ -4096, -9702, -6856, -21544, 20845, 7174},
+{ 5370, 9748, -23765, -1190, 512, -1538},
+{ -1006, -10046, -12649, 19234, -1790, -890},
+{ 15108, 23620, -15646, -2522, -1203, -1325},
+{ -7406, -2605, 1095, -247, -473, 177},
+{ 8089, 4, 12424, -22284, 10405, -7728},
+{ 22196, 10775, -5043, 690, 534, -212},
+{ -3153, -1418, -16835, 18426, 15821, 22956},
+{ 5681, -2229, 3196, -3414, -21817, -14807},
+{ 19, 787, 1032, 170, -8295, -645},
+{ -882, -2319, -27105, 432, -4392, 1499},
+{ -1354, -11819, -76, -20380, -10293, 11328},
+{ 211, -4753, -4675, -6933, -13538, 14479},
+{ 6043, 5260, -459, -462, 143, -65},
+{ -2572, 7256, -3317, 9212, -23184, -9990},
+{ -24882, -9532, 18874, 6101, 2429, -14482},
+{ 8314, 2277, 14192, 3512, 25881, 22000},
+{ 208, 20218, -281, -24778, -63, -1183},
+{ 1095, -6034, 2706, -21935, -2655, 563},
+{ 23, -5930, 243, -8989, 5345, 20558},
+{ -15466, 12699, 4160, 11087, 20621, -10416},
+{ 20995, -85, -8468, 194, 1003, -9515},
+{ -19637, -3335, -14081, 3574, -23381, -667},
+{ -2076, 3489, -3192, -19367, 539, -1530},
+{ 7352, -15213, 22596, 19369, 1043, 16627},
+{ -1872, -413, 1235, -5276, -3550, 21903},
+{ 7931, -2008, 16968, -6799, 29393, -2475},
+{ -13589, 8389, -23636, -22091, -14178, -14297},
+{ -11575, -20090, 16056, -1848, 15721, 4500},
+{ 3849, -16581, 20161, -21155, 7778, 11864},
+{ -6547, -1273, -18837, -11218, 11636, 1044},
+{ 2528, -6691, -17917, -11362, -4894, -1008},
+{ 1241, 4260, 2319, 6111, 3485, 20209},
+{ 3014, -3048, 5316, -4539, 20831, 8702},
+{ -1790, -14683, 278, 13956, -10065, -10547},
+{ -22732, -7957, -1154, 13821, -1484, -1247},
+{ -7317, -615, 13094, 18927, 9897, 1452},
+{ 2552, -2338, 3424, -4630, 11124, -19584},
+{ -11125, -20553, -10855, -10783, -20767, 6833},
+{ 984, -15095, 5775, 25125, 5377, -19799},
+{ 517, 13272, -7458, -1711, 20612, -6013},
+{ -21417, 13251, -20795, 13449, 17281, 13104},
+{ -15811, -16248, 23093, -4037, -8195, 871},
+{ 582, 12571, -21129, -14766, -9187, 5685},
+{ 4318, -1776, 11425, -17763, -9921, 577},
+{ 6013, 16830, 17655, -25766, -4400, -3550},
+{ -13744, -16541, 3636, -3330, -21091, -15886},
+{ 6565, -11147, 8649, -13114, 23345, -13565},
+{ -2542, -9046, -7558, 29240, 3701, -383},
+{ -10612, 24995, 1893, -8210, 20920, -16210},
+{ 5276, 16726, 10659, 19940, -4799, -19324},
+{ -532, -9300, 27856, 4965, -241, 536},
+{ -765, -20706, -3412, 18870, 2765, 1420},
+{ -3059, 2708, -19022, -331, 3537, 116},
+};
+static const int16_t vq_hebap7[512][6] = {
+{ -21173, 21893, 10390, 13646, 10718, -9177},
+{ -22519, -8193, 18328, -6629, 25518, -10848},
+{ 6800, -13758, -13278, 22418, 14667, -20938},
+{ 2347, 10516, 1125, -3455, 5569, 27136},
+{ -6617, 11851, -24524, 22937, 20362, -6019},
+{ -21768, 10681, -19615, -15021, -8478, -2081},
+{ -2745, 8684, -4895, 27739, 7554, -11961},
+{ -1020, 2460, -954, 4754, -627, -16368},
+{ -19702, 23097, 75, -13684, -2644, 2108},
+{ 4049, -2872, 5851, -4459, 22150, 12560},
+{ -21304, -17129, -730, 7419, -11658, -10523},
+{ 11332, 1792, 26666, 23518, -19561, -491},
+{ -17827, -16777, -13606, -14389, -22029, -2464},
+{ 1091, -5967, -7975, -16977, -20432, -21931},
+{ 18388, -1103, 1933, 13342, -17463, 18114},
+{ 22646, 17345, -9966, 17919, 18274, 698},
+{ 1484, 20297, -5754, -26515, 4941, -22263},
+{ -2603, 4587, -5842, 18464, 8767, -2568},
+{ -2797, -1602, 21713, 3099, -25683, 3224},
+{ -19027, 4693, -5007, 6060, 1972, -15095},
+{ -2189, 9516, -530, 20669, -4662, -8301},
+{ -22325, -8887, 2529, -11352, 5476, 998},
+{ 22100, -5052, 1651, -2657, 4615, 2319},
+{ 20855, -3078, -3330, 4105, 13470, 3069},
+{ 85, 17289, 10264, -14752, 214, 90},
+{ -26365, -18849, -19352, 19244, -10218, 9909},
+{ -9739, 20497, -6579, -6983, 2891, -738},
+{ 20575, -15860, -22913, 6870, 76, 327},
+{ 8744, -12877, -22945, -2372, -19424, -9771},
+{ -12886, 16183, 21084, 3821, 749, -13792},
+{ -15995, 18399, 2391, -17661, 19484, -6018},
+{ 1423, 11734, 4051, 19290, 6857, -19681},
+{ -5200, 9766, 18246, 2463, 18764, -4852},
+{ -597, 19498, 1323, -9096, -308, -1104},
+{ -3099, -25731, -15665, 25332, 4634, 2635},
+{ 19623, -2384, -7913, 11796, -9333, -14084},
+{ 2642, 26453, -21091, -10354, -1693, -1711},
+{ 22031, 21625, 11580, -22915, -4141, 129},
+{ -6122, 3542, 915, -261, -17, -383},
+{ 1696, 6704, -1425, 20838, 857, -4416},
+{ 1423, -15280, -8550, -9667, 5210, 5687},
+{ -4520, -613, -11683, 5618, 4230, 619},
+{ 937, -4963, -14102, -17104, -6906, -5952},
+{ -15068, -481, -7237, -14894, 18876, 21673},
+{ -25658, 2910, 1143, -327, -458, -995},
+{ -9656, -819, -24900, 2804, 20225, 1083},
+{ -1111, -3682, -1788, -19492, 966, 821},
+{ 7293, -21759, 10790, -7059, -23293, -1723},
+{ -282, -11093, 170, -20950, -28926, 12615},
+{ 17938, 3713, -1563, 885, 5, 564},
+{ 6116, 22696, 2242, -6951, 9975, -6132},
+{ 4338, 26808, -3705, 1976, -1079, -2570},
+{ -661, -7901, -2668, -15194, 17722, 4375},
+{ -4174, -11053, 717, -22506, 1562, 12252},
+{ -6405, 18334, 6103, 6983, 5956, 18195},
+{ 9851, 5370, 23604, -6861, -6569, -62},
+{ 21964, 13359, -683, 3785, 2168, 209},
+{ -3569, -1127, -19724, -1544, 1308, -803},
+{ -3083, 16049, -13791, -3077, 4294, 23713},
+{ -9999, 9943, -15872, 12934, -23631, 21699},
+{ 9722, 22837, 12192, 15091, 5533, 4837},
+{ 2243, 2099, 1243, 4089, 4748, 12956},
+{ 4007, -2468, 3353, -3092, 8843, 17024},
+{ 4330, 6127, 5549, 9249, 11226, 28592},
+{ -9586, -8825, 236, 1009, 455, -964},
+{ 6829, 19290, -1018, 200, 1821, 578},
+{ 5196, 957, 10372, 3330, -12800, -127},
+{ -3022, -8193, -14557, 22061, 5920, 1053},
+{ 10982, 25942, -24546, -23278, -11905, -6789},
+{ 22667, -11010, 5736, 2567, 23705, -10253},
+{ -3343, -4233, -5458, 20667, -10843, -3605},
+{ -4131, -3612, 4575, -829, -350, -847},
+{ -3303, 3451, -7398, -11604, 3023, 455},
+{ 3200, -9547, 3202, -22893, 11184, -26466},
+{ -14093, -4117, 15382, 14295, -10915, -20377},
+{ 3807, -11016, 22052, 14370, -15328, -7733},
+{ -6291, -17719, -1560, 12048, -19805, -443},
+{ -6147, -4234, -160, 8363, 22638, 11911},
+{ 19197, 1175, 7422, -9875, -4136, 4704},
+{ -72, -7652, -112, -11955, -3230, 27175},
+{ 3274, 5963, 7501, -17019, 866, -25452},
+{ 737, 1861, 1833, 2022, 2384, 4755},
+{ -5217, 7512, 3323, 2715, 3065, -1606},
+{ 4247, 565, 5629, 2497, 18019, -4920},
+{ -2833, -17920, -8062, 15738, -1018, 2136},
+{ 3050, -19483, 16930, 29835, -10222, 15153},
+{ -11346, 118, -25796, -13761, 15320, -468},
+{ -4824, 4960, -4263, 1575, -10593, 19561},
+{ -8203, -1409, -763, -1139, -607, 1408},
+{ -2203, -11415, 2021, -6388, -2600, 711},
+{ -413, -2511, -216, -3519, -28267, 1719},
+{ -14446, 17050, 13917, 13499, -25762, -16121},
+{ 19228, 7341, -12301, 682, -3791, -199},
+{ -4193, 20746, -15651, 11349, 5860, -824},
+{ -21490, -3546, -3, -1705, -3959, 9213},
+{ 15445, -1876, 2012, -19627, 16228, -4845},
+{ -2867, -3733, -7354, -175, -20119, 11174},
+{ -3571, -24587, 19700, 6654, 979, -654},
+{ 21820, -7430, -6639, -10767, -8362, 15543},
+{ 14827, 17977, -7204, -3409, 1906, -17288},
+{ 3525, -3947, -1415, -2798, 17648, 2082},
+{ -6580, -15255, -17913, 1337, 15338, 21158},
+{ 6210, 9698, 15155, -24666, -22507, -3999},
+{ -1740, -593, 1095, -7779, 25058, 5601},
+{ 21415, -432, -1658, -6898, -1438, -14454},
+{ -6943, 700, -12139, -745, -24187, 22466},
+{ 6287, 3283, 11006, 3844, 19184, 14781},
+{ -22502, 15274, 5443, -2808, -970, -3343},
+{ 3257, -3708, 4744, -8301, 22814, -10208},
+{ 24346, -20970, 19846, 987, -11958, -6277},
+{ 3906, -19701, 13060, -1609, 18641, 7466},
+{ -26409, -22549, 16305, 2014, 10975, 18032},
+{ -7039, 4655, -14818, 18739, 15789, 1296},
+{ 9310, -1681, 14667, -3326, 26535, -11853},
+{ 5728, 5917, 13400, 10020, -2236, -24704},
+{ 1741, -6727, 12695, -22009, 4080, 5450},
+{ -2621, 9393, 21143, -25938, -3162, -2529},
+{ 20672, 18894, -13939, 6990, -8260, 15811},
+{ -23818, 11183, -13639, 11868, 16045, 2630},
+{ 18361, -10220, 829, 856, -1010, 157},
+{ 14400, -4678, 5153, -13290, -27434, -11028},
+{ 21613, 11256, 17453, 7604, 13130, -484},
+{ 7, 1236, 573, 4214, 5576, -3081},
+{ 916, -9092, 1285, -8958, 1185, -28699},
+{ 21587, 23695, 19116, -2885, -14282, -8438},
+{ 23414, -6161, 12978, 3061, -9351, 2236},
+{ -3070, -7344, -20140, 5788, 582, -551},
+{ -3993, 315, -7773, 8224, -28082, -12465},
+{ 13766, -15357, 19205, -20624, 13043, -19247},
+{ 3777, -177, 8029, -1001, 17812, 5162},
+{ -7308, -4327, -18096, -620, -1350, 14932},
+{ 14756, -1221, -12819, -14922, -547, 27125},
+{ 2234, 1708, 2764, 5416, 7986, -25163},
+{ 2873, 3636, 3992, 5344, 10142, 21259},
+{ 1158, 5379, 508, -10514, 290, -1615},
+{ 1114, 24789, 16575, -25168, -298, -2832},
+{ -1107, -6144, -1918, -7791, -2971, -23276},
+{ 4016, 10793, 17317, -4342, -20982, -3383},
+{ -4494, -207, -9951, -3575, 7947, 1154},
+{ -7576, 8117, -14047, 16982, -26457, -27540},
+{ -15164, 16096, -16844, -8886, -23720, 15906},
+{ 24922, 5680, -1874, 420, 132, 117},
+{ -506, -19310, -198, 412, -311, 752},
+{ -1906, 3981, -7688, 16566, -19291, -14722},
+{ -399, -729, -3807, -4196, -12395, 7639},
+{ 3368, 2330, 9092, 23686, -10290, -1705},
+{ -3148, 2596, -7986, 14602, -4807, 16627},
+{ 8057, 1481, 49, 17205, 24869, 7474},
+{ -19304, -513, 11905, 2346, 5588, 3365},
+{ -5063, -21812, 11370, 10896, 4881, 261},
+{ 4794, 20577, 5109, -6025, -8049, -1521},
+{ 8125, -14756, 20639, -14918, 23941, -3650},
+{ 12451, 1381, 3613, 8687, -24002, 4848},
+{ 6726, 10643, 10086, 25217, -25159, -1065},
+{ 6561, 13977, 2911, 21737, 16465, -26050},
+{ -1776, 2575, -19606, -16800, 3032, 6679},
+{ 15012, -17910, -8438, -21554, -27111, 11808},
+{ 3448, -924, -15913, -1135, 5126, -20613},
+{ 7720, 2226, 17463, 5434, 28942, 17552},
+{ 1246, 15614, -11743, 24618, -17539, 3272},
+{ 3215, 17950, 2783, -722, -22672, 5979},
+{ -5678, -3184, -26087, 26034, 6583, 3302},
+{ 20310, -3555, -2715, -444, -1487, 1526},
+{ -20640, -21970, -12207, -25793, 8863, -1036},
+{ 17888, 570, -16102, 8329, -2553, 15275},
+{ -2677, 9950, -1879, 16477, -12762, -29007},
+{ -120, -2221, 219, 97, 365, 35},
+{ 1270, -718, 1480, -2689, 1930, -7527},
+{ 1896, 8750, 1906, 18235, -12692, -6174},
+{ -3733, 13713, -9882, -15960, -1376, -7146},
+{ -10600, 8496, 15967, -8792, 7532, 20439},
+{ 3041, -13457, 1032, -26952, 5787, 24984},
+{ -4590, -8220, -9322, -6112, -17243, 25745},
+{ -17808, 6970, 3752, 626, -114, 2178},
+{ 4449, -4862, 7054, -5404, 4738, -2827},
+{ 4922, -651, 18939, -9866, 848, 1886},
+{ -336, -5410, 7234, 20444, -9583, -600},
+{ 781, -19474, -12648, 6634, 1414, 450},
+{ -3399, -16770, 11107, 13200, -5498, 21663},
+{ -3265, 4859, -5961, 7530, -10837, 28086},
+{ 10350, -12901, 25699, 25640, -639, 351},
+{ 1163, 18763, -5466, -15087, -145, -1377},
+{ -14477, 27229, -31383, -32653, 21439, -2894},
+{ 15420, 18823, 22128, 19398, 22583, 13587},
+{ -10674, 10710, 5089, -4756, 909, -20760},
+{ -12948, -20660, 7410, 2722, 3427, 11585},
+{ -1105, 18374, 19731, -9650, 22442, 19634},
+{ -296, -6798, -14677, 21603, 19796, 21399},
+{ -19350, -7501, 25446, 13144, 8588, -25298},
+{ 3092, -10618, 20896, 9249, -3326, 1796},
+{ -811, 1449, 3106, 4748, 12073, -14262},
+{ -20720, 14275, -4332, -25838, -5781, -21149},
+{ -5132, 10554, -14020, -22150, 2840, -554},
+{ 25533, 17648, 14886, -21074, 2459, 25142},
+{ -9370, -1788, -12862, -5870, -25811, -11023},
+{ 6698, 819, 10313, 166, 27581, 523},
+{ 101, -19388, 3413, 9638, 64, 806},
+{ -2742, -17931, -2576, 22818, 8553, 1126},
+{ 2972, 15203, 1792, 25434, -5728, -17265},
+{ -1419, 1604, 4398, 11452, 1731, 23787},
+{ -5136, 4625, -10653, 27981, 9897, -2510},
+{ -10528, -28033, 2999, -1530, -832, -830},
+{ -11133, -12511, 22206, -7243, -23578, -21698},
+{ 16935, -21892, 1861, -9606, 9432, 19026},
+{ 10277, 9516, 26815, 2010, -4943, -9080},
+{ 5547, -2210, 14270, -15300, -19316, 1822},
+{ -4850, -783, -8959, -3076, -20056, -3197},
+{ 8232, -2794, -17752, 13308, 3229, -991},
+{ -12237, -6581, 10315, -9552, 2260, -20648},
+{ -7000, 5529, -7553, -7490, -10342, -10266},
+{ 3641, 19479, -5972, -19097, -18570, 12805},
+{ 1283, -4164, 4198, -28473, -2498, 1866},
+{ 16047, 26826, -13053, -6316, 985, -1597},
+{ -403, 13680, 6457, 25070, 27124, -20710},
+{ -18070, -1790, -24986, 5953, -954, 26600},
+{ -24224, -15383, 24788, 1953, -1136, 187},
+{ -2289, 12505, -20738, -904, 18324, 21258},
+{ 2658, -6140, 16179, 22276, -556, 2154},
+{ -6087, 13950, -25682, -27713, 4049, -4795},
+{ -21452, 26473, 19435, -9124, 895, 303},
+{ -22200, -26177, -6026, 24729, -22926, -9030},
+{ -14276, -15982, 23732, -22851, 9268, -3841},
+{ 29482, 21923, -6213, 1679, -2059, -1120},
+{ -435, 9802, -3891, 12359, -4288, -18971},
+{ 19768, -86, 2467, 1990, -1021, -5354},
+{ 20986, -8783, -5329, -23562, -4730, 2673},
+{ -5095, 5605, -4629, 19150, 26037, -12259},
+{ 972, 6858, 4551, 27949, -4025, -2272},
+{ 6075, -3260, -4989, -373, -1571, -3730},
+{ -7256, -12992, -8820, -5109, 23054, 5054},
+{ 920, 2615, 7912, -7353, -4905, 20186},
+{ -250, 5454, 3140, 6928, -18723, -2051},
+{ -10299, -4372, 19608, 4879, -661, -1885},
+{ 14816, -8603, -19815, 6135, -21210, 14108},
+{ -11945, -2223, 5018, 11892, 22741, 406},
+{ -13184, -2613, -13256, -22433, -12482, -8380},
+{ 17066, 25267, -2273, 5056, -342, 145},
+{ 8401, -17683, 19112, 10615, -19453, 17083},
+{ 20821, -5700, 12298, -25598, 10391, 7692},
+{ 4550, 15779, 17338, -19379, -4768, 1206},
+{ -7723, 10836, -27164, -11439, 6835, -1776},
+{ 2542, 3199, 4442, 17513, -3711, -914},
+{ 20960, -16774, -5814, 11087, -70, 22961},
+{ 3305, 2919, 6256, -4800, -20966, -3230},
+{ 5924, -16547, 2183, 2733, 3446, -23306},
+{ -6061, -194, -13852, -10971, 19488, 1029},
+{ 4467, -5964, -19004, 1519, -359, 855},
+{ -1581, -7607, 22070, -11580, -10032, 17102},
+{ -12412, 2553, 4324, 22500, 5751, 12170},
+{ -25127, 17996, -6384, 1180, 1182, 9622},
+{ 23462, -8471, -4392, -2669, 7638, -16835},
+{ -5511, -2887, -10757, -20883, 7246, 1053},
+{ 2703, -20602, -7554, 7516, -7740, 5868},
+{ 20670, 21901, 457, 14969, -17657, -11921},
+{ 3603, -1595, -2177, -157, -43, 605},
+{ 2513, 8954, 10527, 22559, -16100, -16041},
+{ 6002, 4951, 6795, -4862, -22400, 18849},
+{ 7590, -1693, -24688, -3404, 14169, 1214},
+{ -4398, -6663, -6870, -10083, -24596, 9253},
+{ 10468, 17751, -7748, 147, -6314, 4419},
+{ 16187, -16557, -4119, 4302, 7625, 5409},
+{ 3303, 2735, 7458, -19902, -2254, -3702},
+{ -2077, 21609, 14870, 12545, -6081, -1764},
+{ 4678, 11740, 2859, 6953, 1919, -3871},
+{ 3522, -21853, -2469, -10453, 18893, -10742},
+{ 3759, -10191, -4866, -2659, -17831, -1242},
+{ 14991, 9351, 11870, -1573, -4848, 22549},
+{ 9509, -27152, 10734, 20851, -26185, -17878},
+{ -7170, -1392, -19495, 12746, 8198, -1988},
+{ 1883, 28158, -846, -7235, 249, 233},
+{ -7200, 669, -371, -2948, 23234, -5635},
+{ 3141, 288, 3223, -1258, -98, -27607},
+{ 17373, -23235, 5110, -11199, -2574, -11487},
+{ -4928, 1518, -5456, 670, -18278, 1951},
+{ 10334, -19865, -4649, 361, -160, -923},
+{ 18732, 14264, -3155, -7485, -3328, 5959},
+{ -3614, 21077, 7276, 3536, 8121, -1528},
+{ -8422, 500, -19182, 18929, 26392, -1039},
+{ 15639, 25668, 8375, 1903, 1945, -11979},
+{ -2716, 3389, 26850, -4587, 1803, 22},
+{ 1177, -655, 1233, -2128, 7844, 1767},
+{ -761, 8209, -19290, -4593, 1923, -343},
+{ -689, -3530, -3267, -3804, -2753, 18566},
+{ -2110, 1962, -1353, 16643, 2765, -23102},
+{ -433, 4905, 302, 13016, 15933, -5905},
+{ 3203, 4126, 11181, -5496, -2529, -1160},
+{ -1091, -6469, -1415, 5682, -268, 583},
+{ -9405, -19572, 6216, 1658, 993, -75},
+{ -1695, -4504, -2289, -4088, -6556, -16577},
+{ 4760, -892, -10902, 6516, 24199, -6011},
+{ -253, 1000, 63, -81, -115, -382},
+{ -1333, 24224, -698, -4667, -2801, -19144},
+{ -876, -28866, -21873, 12677, -6344, 3235},
+{ 16847, 21145, -26172, -3183, -396, 230},
+{ 18296, -7790, -12857, -679, -1473, 5},
+{ -10488, 11429, 25805, -1122, 1401, -438},
+{ 3782, -7429, 26720, 17567, 19257, 12542},
+{ 6332, -746, 12789, 9316, -22542, -5354},
+{ 3418, -22728, 26978, 18303, 1076, 956},
+{ -27315, -2988, 920, 235, 2233, 81},
+{ 6199, 5296, 16093, 14768, -8429, -1112},
+{ -6432, 19244, 9921, -3253, 1278, -954},
+{ 24213, 2049, -22931, 2585, -2410, -4216},
+{ 9286, 14282, -19735, -3985, -2344, 1028},
+{ -20128, 17993, -9458, 23012, -16983, 8625},
+{ -6896, -20730, 3762, 17415, 22341, 19024},
+{ 842, 24181, 25062, -5839, -78, 937},
+{ -621, 19722, -24204, -1962, -14854, -56},
+{ 22766, -5119, 17365, 23868, -19480, -6558},
+{ -2158, 17490, -21435, 3340, -12819, -20295},
+{ -9621, 17325, 715, 2265, -4123, -492},
+{ 9156, 12947, 27303, -21175, -6072, -9457},
+{ -13164, -23269, -14006, -4184, 6978, 2},
+{ 938, -13381, 3520, -24297, 22902, 19589},
+{ -4911, -19774, 19764, -9310, -12650, 3819},
+{ -5462, -4249, -6987, -6260, -13943, -25150},
+{ 9341, 10369, -13862, -6704, 22556, -519},
+{ 6651, 18768, -4855, 12570, 14730, -10209},
+{ -823, 18119, 398, -1582, -116, -363},
+{ -6935, -12694, -28392, 8552, 6961, -239},
+{ -2602, -4704, -1021, 2015, 5129, 23670},
+{ -12559, -8190, -25028, 18544, 14179, 1663},
+{ 3813, 21036, -9620, -5051, -1800, -1087},
+{ -22057, 16675, 14960, 9459, 2786, 16991},
+{ -26040, -19318, -6414, 1104, 5798, -18039},
+{ -1737, 24825, 10417, -11087, 896, -5273},
+{ -1855, 11661, -2803, 24809, -21435, -19792},
+{ -23473, -16729, -5782, 5643, 2636, 4940},
+{ -1724, 4388, -26673, -13695, 10570, -25895},
+{ 15358, -19496, 26242, -18493, 1736, 8054},
+{ 5684, 20890, 4091, -19100, -14588, -10468},
+{ 17260, -16291, 14859, -17711, -19174, 12435},
+{ -27185, -12573, 6743, -562, 976, -257},
+{ 12395, -8618, -22248, -19843, 11013, 7762},
+{ 3799, 11853, -27622, -8473, 1089, -1495},
+{ 4141, -2182, -26720, -735, -774, 1469},
+{ 3125, 13762, 4606, 29257, 18771, -9958},
+{ -17465, -9445, -17562, -2530, -6435, -3726},
+{ -1742, 4351, -6841, -19773, 9627, -10654},
+{ 7251, 3525, 10835, 5601, 25198, -23348},
+{ -10300, -17830, 631, 11640, 2044, -20878},
+{ -873, -8502, -1063, -15674, -10693, 14934},
+{ -15957, 28137, 5268, 477, -1053, 1158},
+{ -1495, -8814, -5764, -24965, 25988, 7907},
+{ -1038, -114, -2308, -1319, -6480, 1472},
+{ 4895, -17897, -25850, 5301, -188, 1581},
+{ 3200, 17225, 4346, 22101, -18543, 22028},
+{ -10250, 545, -10932, 2276, -28070, 8118},
+{ 15343, 2329, 9316, 20537, 14908, 21021},
+{ 6329, 6130, -24508, 837, -8637, -5844},
+{ 7386, -501, 10503, 20131, 11435, -4755},
+{ -2745, 24174, -9274, 15273, -8389, -5835},
+{ 2992, -2864, 6048, -7473, 11687, -19996},
+{ -883, -11954, -9976, -21829, -4436, -27178},
+{ 3458, 19626, 1280, 2597, 19849, 5255},
+{ -5315, 19133, -14518, -8946, 13749, -1352},
+{ 18642, 17655, 11001, 6817, -18418, 6336},
+{ -1697, 2244, -4640, 3948, -12890, -5273},
+{ 20428, 10542, 4170, -1012, 19439, 21691},
+{ -2943, -19735, -4208, 1320, 909, -8897},
+{ 9351, -8066, -2618, -12933, 26582, 3507},
+{ 9705, -22628, 8311, 8167, -13293, 5608},
+{ 3222, 3749, -1508, 165, -52, -196},
+{ 102, -22744, -8832, 903, -11421, -14662},
+{ -120, 5998, 19765, 13401, 3628, 5197},
+{ 8528, 5827, -1066, 774, -39, -166},
+{ 9411, -9476, 9581, -13004, 24456, 24900},
+{ 17878, 2235, -21639, 20478, 4716, -7190},
+{ -2482, 9511, 1611, -21943, 14230, -1289},
+{ 9288, -2291, 23215, -3452, -10842, 11},
+{ 9496, 3041, 5130, -3890, -21219, -22589},
+{ 14262, -9838, 20195, 14019, 91, -17200},
+{ -18591, 980, 17, 821, 120, -574},
+{ 12285, -19269, 13742, 16373, -161, 6025},
+{ -3364, 1530, -4005, 2454, -10872, -23839},
+{ 105, 5085, -260, 5790, -588, 19170},
+{ 4121, 4169, 13439, 14644, 20899, 7434},
+{ -175, 13101, -3704, 23233, 3907, 10106},
+{ -6101, 23467, 5204, -1341, 1599, 13174},
+{ -3217, -3494, 15117, -8387, -11762, -4750},
+{ 1146, 4675, -19378, 14917, -5091, 249},
+{ -21506, 10136, -16473, -13305, 18382, -8601},
+{ 628, 2447, 3344, 3130, -5115, 119},
+{ 17900, -22422, -17633, 21967, -16293, -7676},
+{ 16863, 24214, 5612, -3858, -809, 3822},
+{ -2291, 10091, -2360, -25109, -1226, 312},
+{ 2957, 11256, 26745, -13266, -3455, -1128},
+{ -19762, -2708, 4604, 6355, 1638, 25501},
+{ -19593, -7753, 3159, -85, -489, -1855},
+{ 814, 12510, 19077, -4681, -2610, -1474},
+{ -23408, -19027, 8137, 19878, 7912, -282},
+{ 839, -19652, 11927, 27278, -3211, 2266},
+{ 4020, -1110, 8226, -1274, 20922, 25060},
+{ 26576, 325, -8693, -232, -2218, -699},
+{ -11293, -4200, 1805, -6673, -22940, -1339},
+{ -2005, -15886, -1047, -27687, -13235, 14370},
+{ -22073, 1949, 13175, -15656, -1846, 8055},
+{ 3039, 12025, 7132, -24632, 413, -2347},
+{ -24048, -206, 12459, -6654, -417, -10091},
+{ 18179, -23688, -20515, -16396, 7230, 763},
+{ 5659, -5085, 13878, -23729, -11077, -19587},
+{ 11340, 501, 25040, 7616, -19658, 1605},
+{ -26650, 8878, 10544, 417, 1299, 261},
+{ 14460, 11369, -3263, 9990, 8194, 18111},
+{ 1355, -20838, -9196, -16060, -8559, -730},
+{ -1918, -20937, -18293, -2461, -2651, 4316},
+{ -2810, 24521, -10996, -25721, 308, -1234},
+{ -9075, -17280, -1833, -29342, -24213, -16631},
+{ -2843, 10165, -5339, -2888, 21858, -21340},
+{ -15832, 14849, -23780, 5184, 10113, -20639},
+{ -19535, -11361, 8413, 1486, -23658, -5759},
+{ -7512, 1027, -20794, 13732, 19892, -21934},
+{ -12132, -7022, -19175, -8840, 22125, -16490},
+{ 1937, 5210, -6318, -23788, 13141, 11082},
+{ -205, 6036, -380, 8658, -233, 28020},
+{ -5523, 7477, 7635, 23595, 9763, -2590},
+{ 21658, -28313, -3086, -300, -1032, 1744},
+{ -22352, 16646, 208, 6665, -17400, -3028},
+{ 18482, 9336, -2737, -19372, 407, -4389},
+{ -4913, -17370, 18819, -17654, 13416, 15232},
+{ 7749, 6368, 23135, -18174, 7584, -4248},
+{ -1489, -6523, 586, -10157, 14964, 25568},
+{ 3844, -6156, 4897, -13045, -22526, 5647},
+{ -8491, -2105, -24774, 905, -9326, 1456},
+{ -3040, -1476, 1166, -4428, 11236, 9204},
+{ 3397, -1451, 13598, -15841, 24540, 5819},
+{ 8483, -2993, 21547, -16916, 7741, 24018},
+{ -14932, -23758, -5332, -6664, -4497, 13267},
+{ 19379, 12916, -2142, -737, 21100, -22101},
+{ 3393, -4629, 5735, -18913, -6969, 2687},
+{ 1148, -16147, -21433, -28095, -630, -14449},
+{ 7300, 672, 18530, -17452, -10149, 351},
+{ 11356, -10974, 17212, 4624, 145, 17791},
+{ -711, -3479, -2238, 15887, 2027, 0},
+{ -28048, 1794, -593, -2758, -21852, 11535},
+{ -19683, 4937, 22004, 21523, -3148, 1790},
+{ 813, 8231, 2633, 11981, -3043, 22201},
+{ 8952, -24760, -690, 14873, -2366, -5372},
+{ 8406, -5439, -274, -642, -145, 778},
+{ -6605, 7258, 20780, -23507, -18625, 22782},
+{ -22896, -25488, 10020, -1614, 1508, -1393},
+{ 7607, 407, -24678, -16385, -1804, -4699},
+{ -10592, -19139, 10462, -3747, 8721, -6919},
+{ 13010, 5292, -6230, -4884, -20904, -1797},
+{ 16891, -13770, -465, 19343, -10741, -12959},
+{ 25193, -14799, -5681, -521, -321, -1211},
+{ 6917, -3093, 20183, -26903, -12026, 1295},
+{ 305, 1992, 19457, -985, 25, -521},
+{ 6707, -3698, 8365, -8687, 21921, -27166},
+{ 4668, 5997, 7117, 11696, 24401, -10794},
+{ 744, -9416, 19893, 1963, 7922, -9824},
+{ 3430, 21282, -1736, 10844, 8821, 27015},
+{ -8813, 1521, -24038, 1651, 7838, -1208},
+{ 3911, -11221, 3273, -12541, 7168, 18402},
+{ 21642, 9117, -11536, -5256, 7077, 2382},
+{ 100, 3817, -6713, 1244, 1518, -321},
+{ 7946, -18670, 10667, -4866, 727, 776},
+{ -15883, -8150, -2087, 22739, 1567, -3482},
+{ 4380, -2735, 8469, -7025, -11424, 1317},
+{ 26970, 4393, 7665, 17561, -714, 650},
+{ -16191, -835, 8365, 1795, -14314, 16297},
+{ 4504, -10048, 7662, -26690, -17428, 2580},
+{ 48, -3984, 564, -5871, 2658, -18658},
+{ 12579, -26016, -15642, 2672, -1347, -887},
+{ -4950, 4208, -6811, 2569, -20621, -8658},
+{ -1836, -14818, -5571, -23322, -14800, 25867},
+{ 5434, -28139, -2357, -2883, -570, 2431},
+{ 13096, -2771, 24994, -12496, -24723, -1025},
+{ -5676, -4339, 1908, 18628, -21323, 17366},
+{ 27660, -27897, -15409, 1436, -7112, -2241},
+{ 8019, 3847, 24568, -469, 9674, 10683},
+{ -903, -10149, 1801, -21260, 4795, -8751},
+{ 1122, -9582, 2625, 22791, 956, 882},
+{ 7876, 19075, -9900, -24266, 7496, 9277},
+{ 980, -26764, -5386, 5396, 1086, 1648},
+{ 28838, -1270, -447, 5, -429, -20},
+{ -15283, 6132, 22812, 1252, -9963, 511},
+{ 851, 7925, -457, -12210, 4261, 7579},
+{ -4530, 8452, -1246, 14501, -24951, -5760},
+{ -17814, -10727, 9887, -23929, -13432, 1878},
+{ -15049, 10165, 16491, -14603, -11712, -21156},
+{ -3317, 840, -5683, 22413, 1994, 586},
+{ 23158, -5788, -15043, -10372, -9271, -13523},
+{ -773, -9509, -3993, -24264, 8463, 5804},
+{ -8545, -703, -12440, -3985, -25122, -28147},
+{ -16659, 16001, 2746, 1611, 5097, -1043},
+{ 41, -7181, 19903, 31555, -32237, 13927},
+{ -5658, 845, -12774, 5705, 16695, -86},
+{ 5282, 14875, 27026, 21124, 15776, -10477},
+{ 14712, 19648, -11487, -13361, -20196, -15229},
+{ 8597, -9138, -626, 10891, -6015, 6346},
+{ -1488, -1272, -1479, -1303, -3704, -5485},
+{ -3370, 17871, -6604, 24930, 25886, -3127},
+{ 8416, 27783, -1385, 5350, -4260, 19993},
+{ 5688, 362, 17246, 3809, -3246, 1088},
+{ -105, -29607, 2747, 15223, -167, 3722},
+{ 3502, -3195, 8602, 7772, -1566, -915},
+{ -491, 3257, -2423, 5522, 20606, -100},
+{ -13948, -11368, -15375, -21866, -8520, 12221},
+{ -616, 2424, -2023, 4398, -3805, 8108},
+{ -7204, 21043, 21211, -9395, -19391, 896},
+{ -5737, -15160, -21298, 17066, -1006, -366},
+{ 6261, 3240, -11937, -16213, -15820, 6581},
+{ -3155, 24796, 2733, -1257, -875, -1597},
+{ -20469, 11094, 24071, -8987, 14136, 2220},
+{ -14106, 11959, -22495, 4135, -1055, -5420},
+{ 801, -2655, 60, -5324, -790, 5937},
+{ -7372, -1764, -22433, -26060, 21707, 4178},
+{ -5715, -6648, -14908, 1325, -24044, 1493},
+{ -6024, -12488, 23930, 2950, 1601, 1173},
+{ 19067, 17630, 17929, -10654, 10928, -4958},
+{ 3231, -3284, 27336, 4174, -1683, 497},
+};
+
+const int16_t (* const ff_eac3_mantissa_vq[8])[6] = {
+ NULL,
+ vq_hebap1,
+ vq_hebap2,
+ vq_hebap3,
+ vq_hebap4,
+ vq_hebap5,
+ vq_hebap6,
+ vq_hebap7,
+};
+
+/**
+ * Table E2.14 Frame Exponent Strategy Combinations
+ */
+const uint8_t ff_eac3_frm_expstr[32][6] = {
+{ EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_REUSE},
+{ EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_D45},
+{ EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_D25, EXP_REUSE},
+{ EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_D45, EXP_D45},
+{ EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_REUSE},
+{ EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_D45},
+{ EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D45, EXP_D25, EXP_REUSE},
+{ EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D45, EXP_D45, EXP_D45},
+{ EXP_D25, EXP_REUSE, EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE},
+{ EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D45},
+{ EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE},
+{ EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_D45, EXP_D45},
+{ EXP_D25, EXP_REUSE, EXP_D45, EXP_D25, EXP_REUSE, EXP_REUSE},
+{ EXP_D25, EXP_REUSE, EXP_D45, EXP_D25, EXP_REUSE, EXP_D45},
+{ EXP_D25, EXP_REUSE, EXP_D45, EXP_D45, EXP_D25, EXP_REUSE},
+{ EXP_D25, EXP_REUSE, EXP_D45, EXP_D45, EXP_D45, EXP_D45},
+{ EXP_D45, EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_REUSE},
+{ EXP_D45, EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_D45},
+{ EXP_D45, EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D25, EXP_REUSE},
+{ EXP_D45, EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D45, EXP_D45},
+{ EXP_D45, EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_REUSE},
+{ EXP_D45, EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_D45},
+{ EXP_D45, EXP_D25, EXP_REUSE, EXP_D45, EXP_D25, EXP_REUSE},
+{ EXP_D45, EXP_D25, EXP_REUSE, EXP_D45, EXP_D45, EXP_D45},
+{ EXP_D45, EXP_D45, EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE},
+{ EXP_D45, EXP_D45, EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D45},
+{ EXP_D45, EXP_D45, EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE},
+{ EXP_D45, EXP_D45, EXP_D25, EXP_REUSE, EXP_D45, EXP_D45},
+{ EXP_D45, EXP_D45, EXP_D45, EXP_D25, EXP_REUSE, EXP_REUSE},
+{ EXP_D45, EXP_D45, EXP_D45, EXP_D25, EXP_REUSE, EXP_D45},
+{ EXP_D45, EXP_D45, EXP_D45, EXP_D45, EXP_D25, EXP_REUSE},
+{ EXP_D45, EXP_D45, EXP_D45, EXP_D45, EXP_D45, EXP_D45},
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eac3dec_data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eac3dec_data.h
new file mode 100644
index 000000000..76dd15456
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eac3dec_data.h
@@ -0,0 +1,35 @@
+/*
+ * E-AC-3 decoder tables
+ * Copyright (c) 2007 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_EAC3DEC_DATA_H
+#define AVCODEC_EAC3DEC_DATA_H
+
+#include <stdint.h>
+
+extern const uint8_t ff_eac3_bits_vs_hebap[20];
+extern const int16_t ff_eac3_gaq_remap_1[12];
+extern const int16_t ff_eac3_gaq_remap_2_4_a[9][2];
+extern const int16_t ff_eac3_gaq_remap_2_4_b[9][2];
+
+extern const int16_t (* const ff_eac3_mantissa_vq[8])[6];
+extern const uint8_t ff_eac3_frm_expstr[32][6];
+
+#endif /* AVCODEC_EAC3DEC_DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/error_resilience.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/error_resilience.c
new file mode 100644
index 000000000..80c7a5c4d
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/error_resilience.c
@@ -0,0 +1,1043 @@
+/*
+ * Error resilience / concealment
+ *
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/error_resilience.c
+ * Error resilience / concealment.
+ */
+
+#include <limits.h>
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+
+static void decode_mb(MpegEncContext *s){
+ s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* s->linesize ) + s->mb_x * 16;
+ s->dest[1] = s->current_picture.data[1] + (s->mb_y * (16>>s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16>>s->chroma_x_shift);
+ s->dest[2] = s->current_picture.data[2] + (s->mb_y * (16>>s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16>>s->chroma_x_shift);
+
+ MPV_decode_mb(s, s->block);
+}
+
+/**
+ * replaces the current MB with a flat dc only version.
+ */
+static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
+{
+ int dc, dcu, dcv, y, i;
+ for(i=0; i<4; i++){
+ dc= s->dc_val[0][mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*s->b8_stride];
+ if(dc<0) dc=0;
+ else if(dc>2040) dc=2040;
+ for(y=0; y<8; y++){
+ int x;
+ for(x=0; x<8; x++){
+ dest_y[x + (i&1)*8 + (y + (i>>1)*8)*s->linesize]= dc/8;
+ }
+ }
+ }
+ dcu = s->dc_val[1][mb_x + mb_y*s->mb_stride];
+ dcv = s->dc_val[2][mb_x + mb_y*s->mb_stride];
+ if (dcu<0 ) dcu=0;
+ else if(dcu>2040) dcu=2040;
+ if (dcv<0 ) dcv=0;
+ else if(dcv>2040) dcv=2040;
+ for(y=0; y<8; y++){
+ int x;
+ for(x=0; x<8; x++){
+ dest_cb[x + y*(s->uvlinesize)]= dcu/8;
+ dest_cr[x + y*(s->uvlinesize)]= dcv/8;
+ }
+ }
+}
+
+static void filter181(int16_t *data, int width, int height, int stride){
+ int x,y;
+
+ /* horizontal filter */
+ for(y=1; y<height-1; y++){
+ int prev_dc= data[0 + y*stride];
+
+ for(x=1; x<width-1; x++){
+ int dc;
+
+ dc= - prev_dc
+ + data[x + y*stride]*8
+ - data[x + 1 + y*stride];
+ dc= (dc*10923 + 32768)>>16;
+ prev_dc= data[x + y*stride];
+ data[x + y*stride]= dc;
+ }
+ }
+
+ /* vertical filter */
+ for(x=1; x<width-1; x++){
+ int prev_dc= data[x];
+
+ for(y=1; y<height-1; y++){
+ int dc;
+
+ dc= - prev_dc
+ + data[x + y *stride]*8
+ - data[x + (y+1)*stride];
+ dc= (dc*10923 + 32768)>>16;
+ prev_dc= data[x + y*stride];
+ data[x + y*stride]= dc;
+ }
+ }
+}
+
+/**
+ * guess the dc of blocks which do not have an undamaged dc
+ * @param w width in 8 pixel blocks
+ * @param h height in 8 pixel blocks
+ */
+static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, int is_luma){
+ int b_x, b_y;
+
+ for(b_y=0; b_y<h; b_y++){
+ for(b_x=0; b_x<w; b_x++){
+ int color[4]={1024,1024,1024,1024};
+ int distance[4]={9999,9999,9999,9999};
+ int mb_index, error, j;
+ int64_t guess, weight_sum;
+
+ mb_index= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
+
+ error= s->error_status_table[mb_index];
+
+ if(IS_INTER(s->current_picture.mb_type[mb_index])) continue; //inter
+ if(!(error&DC_ERROR)) continue; //dc-ok
+
+ /* right block */
+ for(j=b_x+1; j<w; j++){
+ int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
+ int error_j= s->error_status_table[mb_index_j];
+ int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+ if(intra_j==0 || !(error_j&DC_ERROR)){
+ color[0]= dc[j + b_y*stride];
+ distance[0]= j-b_x;
+ break;
+ }
+ }
+
+ /* left block */
+ for(j=b_x-1; j>=0; j--){
+ int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
+ int error_j= s->error_status_table[mb_index_j];
+ int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+ if(intra_j==0 || !(error_j&DC_ERROR)){
+ color[1]= dc[j + b_y*stride];
+ distance[1]= b_x-j;
+ break;
+ }
+ }
+
+ /* bottom block */
+ for(j=b_y+1; j<h; j++){
+ int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
+ int error_j= s->error_status_table[mb_index_j];
+ int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+ if(intra_j==0 || !(error_j&DC_ERROR)){
+ color[2]= dc[b_x + j*stride];
+ distance[2]= j-b_y;
+ break;
+ }
+ }
+
+ /* top block */
+ for(j=b_y-1; j>=0; j--){
+ int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
+ int error_j= s->error_status_table[mb_index_j];
+ int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+ if(intra_j==0 || !(error_j&DC_ERROR)){
+ color[3]= dc[b_x + j*stride];
+ distance[3]= b_y-j;
+ break;
+ }
+ }
+
+ weight_sum=0;
+ guess=0;
+ for(j=0; j<4; j++){
+ int64_t weight= 256*256*256*16/distance[j];
+ guess+= weight*(int64_t)color[j];
+ weight_sum+= weight;
+ }
+ guess= (guess + weight_sum/2) / weight_sum;
+
+ dc[b_x + b_y*stride]= guess;
+ }
+ }
+}
+
+/**
+ * simple horizontal deblocking filter used for error resilience
+ * @param w width in 8 pixel blocks
+ * @param h height in 8 pixel blocks
+ */
+static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
+ int b_x, b_y;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ for(b_y=0; b_y<h; b_y++){
+ for(b_x=0; b_x<w-1; b_x++){
+ int y;
+ int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride];
+ int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride];
+ int left_intra= IS_INTRA(s->current_picture.mb_type [( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride]);
+ int right_intra= IS_INTRA(s->current_picture.mb_type [((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride]);
+ int left_damage = left_status&(DC_ERROR|AC_ERROR|MV_ERROR);
+ int right_damage= right_status&(DC_ERROR|AC_ERROR|MV_ERROR);
+ int offset= b_x*8 + b_y*stride*8;
+ int16_t *left_mv= s->current_picture.motion_val[0][s->b8_stride*(b_y<<(1-is_luma)) + ( b_x <<(1-is_luma))];
+ int16_t *right_mv= s->current_picture.motion_val[0][s->b8_stride*(b_y<<(1-is_luma)) + ((b_x+1)<<(1-is_luma))];
+
+ if(!(left_damage||right_damage)) continue; // both undamaged
+
+ if( (!left_intra) && (!right_intra)
+ && FFABS(left_mv[0]-right_mv[0]) + FFABS(left_mv[1]+right_mv[1]) < 2) continue;
+
+ for(y=0; y<8; y++){
+ int a,b,c,d;
+
+ a= dst[offset + 7 + y*stride] - dst[offset + 6 + y*stride];
+ b= dst[offset + 8 + y*stride] - dst[offset + 7 + y*stride];
+ c= dst[offset + 9 + y*stride] - dst[offset + 8 + y*stride];
+
+ d= FFABS(b) - ((FFABS(a) + FFABS(c) + 1)>>1);
+ d= FFMAX(d, 0);
+ if(b<0) d= -d;
+
+ if(d==0) continue;
+
+ if(!(left_damage && right_damage))
+ d= d*16/9;
+
+ if(left_damage){
+ dst[offset + 7 + y*stride] = cm[dst[offset + 7 + y*stride] + ((d*7)>>4)];
+ dst[offset + 6 + y*stride] = cm[dst[offset + 6 + y*stride] + ((d*5)>>4)];
+ dst[offset + 5 + y*stride] = cm[dst[offset + 5 + y*stride] + ((d*3)>>4)];
+ dst[offset + 4 + y*stride] = cm[dst[offset + 4 + y*stride] + ((d*1)>>4)];
+ }
+ if(right_damage){
+ dst[offset + 8 + y*stride] = cm[dst[offset + 8 + y*stride] - ((d*7)>>4)];
+ dst[offset + 9 + y*stride] = cm[dst[offset + 9 + y*stride] - ((d*5)>>4)];
+ dst[offset + 10+ y*stride] = cm[dst[offset +10 + y*stride] - ((d*3)>>4)];
+ dst[offset + 11+ y*stride] = cm[dst[offset +11 + y*stride] - ((d*1)>>4)];
+ }
+ }
+ }
+ }
+}
+
+/**
+ * simple vertical deblocking filter used for error resilience
+ * @param w width in 8 pixel blocks
+ * @param h height in 8 pixel blocks
+ */
+static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
+ int b_x, b_y;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ for(b_y=0; b_y<h-1; b_y++){
+ for(b_x=0; b_x<w; b_x++){
+ int x;
+ int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride];
+ int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride];
+ int top_intra= IS_INTRA(s->current_picture.mb_type [(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride]);
+ int bottom_intra= IS_INTRA(s->current_picture.mb_type [(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride]);
+ int top_damage = top_status&(DC_ERROR|AC_ERROR|MV_ERROR);
+ int bottom_damage= bottom_status&(DC_ERROR|AC_ERROR|MV_ERROR);
+ int offset= b_x*8 + b_y*stride*8;
+ int16_t *top_mv= s->current_picture.motion_val[0][s->b8_stride*( b_y <<(1-is_luma)) + (b_x<<(1-is_luma))];
+ int16_t *bottom_mv= s->current_picture.motion_val[0][s->b8_stride*((b_y+1)<<(1-is_luma)) + (b_x<<(1-is_luma))];
+
+ if(!(top_damage||bottom_damage)) continue; // both undamaged
+
+ if( (!top_intra) && (!bottom_intra)
+ && FFABS(top_mv[0]-bottom_mv[0]) + FFABS(top_mv[1]+bottom_mv[1]) < 2) continue;
+
+ for(x=0; x<8; x++){
+ int a,b,c,d;
+
+ a= dst[offset + x + 7*stride] - dst[offset + x + 6*stride];
+ b= dst[offset + x + 8*stride] - dst[offset + x + 7*stride];
+ c= dst[offset + x + 9*stride] - dst[offset + x + 8*stride];
+
+ d= FFABS(b) - ((FFABS(a) + FFABS(c)+1)>>1);
+ d= FFMAX(d, 0);
+ if(b<0) d= -d;
+
+ if(d==0) continue;
+
+ if(!(top_damage && bottom_damage))
+ d= d*16/9;
+
+ if(top_damage){
+ dst[offset + x + 7*stride] = cm[dst[offset + x + 7*stride] + ((d*7)>>4)];
+ dst[offset + x + 6*stride] = cm[dst[offset + x + 6*stride] + ((d*5)>>4)];
+ dst[offset + x + 5*stride] = cm[dst[offset + x + 5*stride] + ((d*3)>>4)];
+ dst[offset + x + 4*stride] = cm[dst[offset + x + 4*stride] + ((d*1)>>4)];
+ }
+ if(bottom_damage){
+ dst[offset + x + 8*stride] = cm[dst[offset + x + 8*stride] - ((d*7)>>4)];
+ dst[offset + x + 9*stride] = cm[dst[offset + x + 9*stride] - ((d*5)>>4)];
+ dst[offset + x + 10*stride] = cm[dst[offset + x + 10*stride] - ((d*3)>>4)];
+ dst[offset + x + 11*stride] = cm[dst[offset + x + 11*stride] - ((d*1)>>4)];
+ }
+ }
+ }
+ }
+}
+
+static void guess_mv(MpegEncContext *s){
+ #if __STDC_VERSION__ >= 199901L
+ uint8_t fixed[s->mb_stride * s->mb_height];
+ #else
+ uint8_t *fixed=_alloca(sizeof(uint8_t)*(s->mb_stride * s->mb_height));
+ #endif
+#define MV_FROZEN 3
+#define MV_CHANGED 2
+#define MV_UNCHANGED 1
+ const int mb_stride = s->mb_stride;
+ const int mb_width = s->mb_width;
+ const int mb_height= s->mb_height;
+ int i, depth, num_avail;
+ int mb_x, mb_y;
+
+ num_avail=0;
+ for(i=0; i<s->mb_num; i++){
+ const int mb_xy= s->mb_index2xy[ i ];
+ int f=0;
+ int error= s->error_status_table[mb_xy];
+
+ if(IS_INTRA(s->current_picture.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
+ if(!(error&MV_ERROR)) f=MV_FROZEN; //inter with undamaged MV
+
+ fixed[mb_xy]= f;
+ if(f==MV_FROZEN)
+ num_avail++;
+ }
+
+ if((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) || num_avail <= mb_width/2){
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ const int mb_xy= mb_x + mb_y*s->mb_stride;
+
+ if(IS_INTRA(s->current_picture.mb_type[mb_xy])) continue;
+ if(!(s->error_status_table[mb_xy]&MV_ERROR)) continue;
+
+ s->mv_dir = s->last_picture.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
+ s->mb_intra=0;
+ s->mv_type = MV_TYPE_16X16;
+ s->mb_skipped=0;
+
+ s->dsp.clear_blocks(s->block[0]);
+
+ s->mb_x= mb_x;
+ s->mb_y= mb_y;
+ s->mv[0][0][0]= 0;
+ s->mv[0][0][1]= 0;
+ decode_mb(s);
+ }
+ }
+ return;
+ }
+
+ for(depth=0;; depth++){
+ int changed, pass, none_left;
+
+ none_left=1;
+ changed=1;
+ for(pass=0; (changed || pass<2) && pass<10; pass++){
+ int mb_x, mb_y;
+int score_sum=0;
+
+ changed=0;
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ const int mb_xy= mb_x + mb_y*s->mb_stride;
+ int mv_predictor[8][2]={{0}};
+ int pred_count=0;
+ int j;
+ int best_score=256*256*256*64;
+ int best_pred=0;
+ const int mot_stride= s->b8_stride;
+ const int mot_index= mb_x*2 + mb_y*2*mot_stride;
+ int prev_x= s->current_picture.motion_val[0][mot_index][0];
+ int prev_y= s->current_picture.motion_val[0][mot_index][1];
+
+ if((mb_x^mb_y^pass)&1) continue;
+
+ if(fixed[mb_xy]==MV_FROZEN) continue;
+ assert(!IS_INTRA(s->current_picture.mb_type[mb_xy]));
+ assert(s->last_picture_ptr && s->last_picture_ptr->data[0]);
+
+ j=0;
+ if(mb_x>0 && fixed[mb_xy-1 ]==MV_FROZEN) j=1;
+ if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_FROZEN) j=1;
+ if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_FROZEN) j=1;
+ if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_FROZEN) j=1;
+ if(j==0) continue;
+
+ j=0;
+ if(mb_x>0 && fixed[mb_xy-1 ]==MV_CHANGED) j=1;
+ if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_CHANGED) j=1;
+ if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_CHANGED) j=1;
+ if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_CHANGED) j=1;
+ if(j==0 && pass>1) continue;
+
+ none_left=0;
+
+ if(mb_x>0 && fixed[mb_xy-1]){
+ mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - 2][0];
+ mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - 2][1];
+ pred_count++;
+ }
+ if(mb_x+1<mb_width && fixed[mb_xy+1]){
+ mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + 2][0];
+ mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + 2][1];
+ pred_count++;
+ }
+ if(mb_y>0 && fixed[mb_xy-mb_stride]){
+ mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - mot_stride*2][0];
+ mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - mot_stride*2][1];
+ pred_count++;
+ }
+ if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
+ mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + mot_stride*2][0];
+ mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + mot_stride*2][1];
+ pred_count++;
+ }
+ if(pred_count==0) continue;
+
+ if(pred_count>1){
+ int sum_x=0, sum_y=0;
+ int max_x, max_y, min_x, min_y;
+
+ for(j=0; j<pred_count; j++){
+ sum_x+= mv_predictor[j][0];
+ sum_y+= mv_predictor[j][1];
+ }
+
+ /* mean */
+ mv_predictor[pred_count][0] = sum_x/j;
+ mv_predictor[pred_count][1] = sum_y/j;
+
+ /* median */
+ if(pred_count>=3){
+ min_y= min_x= 99999;
+ max_y= max_x=-99999;
+ }else{
+ min_x=min_y=max_x=max_y=0;
+ }
+ for(j=0; j<pred_count; j++){
+ max_x= FFMAX(max_x, mv_predictor[j][0]);
+ max_y= FFMAX(max_y, mv_predictor[j][1]);
+ min_x= FFMIN(min_x, mv_predictor[j][0]);
+ min_y= FFMIN(min_y, mv_predictor[j][1]);
+ }
+ mv_predictor[pred_count+1][0] = sum_x - max_x - min_x;
+ mv_predictor[pred_count+1][1] = sum_y - max_y - min_y;
+
+ if(pred_count==4){
+ mv_predictor[pred_count+1][0] /= 2;
+ mv_predictor[pred_count+1][1] /= 2;
+ }
+ pred_count+=2;
+ }
+
+ /* zero MV */
+ pred_count++;
+
+ /* last MV */
+ mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index][0];
+ mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index][1];
+ pred_count++;
+
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mb_intra=0;
+ s->mv_type = MV_TYPE_16X16;
+ s->mb_skipped=0;
+
+ s->dsp.clear_blocks(s->block[0]);
+
+ s->mb_x= mb_x;
+ s->mb_y= mb_y;
+
+ for(j=0; j<pred_count; j++){
+ int score=0;
+ uint8_t *src= s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
+
+ s->current_picture.motion_val[0][mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0];
+ s->current_picture.motion_val[0][mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1];
+
+ decode_mb(s);
+
+ if(mb_x>0 && fixed[mb_xy-1]){
+ int k;
+ for(k=0; k<16; k++)
+ score += FFABS(src[k*s->linesize-1 ]-src[k*s->linesize ]);
+ }
+ if(mb_x+1<mb_width && fixed[mb_xy+1]){
+ int k;
+ for(k=0; k<16; k++)
+ score += FFABS(src[k*s->linesize+15]-src[k*s->linesize+16]);
+ }
+ if(mb_y>0 && fixed[mb_xy-mb_stride]){
+ int k;
+ for(k=0; k<16; k++)
+ score += FFABS(src[k-s->linesize ]-src[k ]);
+ }
+ if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
+ int k;
+ for(k=0; k<16; k++)
+ score += FFABS(src[k+s->linesize*15]-src[k+s->linesize*16]);
+ }
+
+ if(score <= best_score){ // <= will favor the last MV
+ best_score= score;
+ best_pred= j;
+ }
+ }
+score_sum+= best_score;
+//FIXME no need to set s->current_picture.motion_val[0][mot_index][0] explicit
+ s->current_picture.motion_val[0][mot_index][0]= s->mv[0][0][0]= mv_predictor[best_pred][0];
+ s->current_picture.motion_val[0][mot_index][1]= s->mv[0][0][1]= mv_predictor[best_pred][1];
+
+ decode_mb(s);
+
+
+ if(s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y){
+ fixed[mb_xy]=MV_CHANGED;
+ changed++;
+ }else
+ fixed[mb_xy]=MV_UNCHANGED;
+ }
+ }
+
+// printf(".%d/%d", changed, score_sum); fflush(stdout);
+ }
+
+ if(none_left)
+ return;
+
+ for(i=0; i<s->mb_num; i++){
+ int mb_xy= s->mb_index2xy[i];
+ if(fixed[mb_xy])
+ fixed[mb_xy]=MV_FROZEN;
+ }
+// printf(":"); fflush(stdout);
+ }
+}
+
+static int is_intra_more_likely(MpegEncContext *s){
+ int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
+
+ if(!s->last_picture_ptr || !s->last_picture_ptr->data[0]) return 1; //no previous frame available -> use spatial prediction
+
+ undamaged_count=0;
+ for(i=0; i<s->mb_num; i++){
+ const int mb_xy= s->mb_index2xy[i];
+ const int error= s->error_status_table[mb_xy];
+ if(!((error&DC_ERROR) && (error&MV_ERROR)))
+ undamaged_count++;
+ }
+
+ if(undamaged_count < 5) return 0; //almost all MBs damaged -> use temporal prediction
+
+ skip_amount= FFMAX(undamaged_count/50, 1); //check only upto 50 MBs
+ is_intra_likely=0;
+
+ j=0;
+ for(mb_y= 0; mb_y<s->mb_height-1; mb_y++){
+ for(mb_x= 0; mb_x<s->mb_width; mb_x++){
+ int error;
+ const int mb_xy= mb_x + mb_y*s->mb_stride;
+
+ error= s->error_status_table[mb_xy];
+ if((error&DC_ERROR) && (error&MV_ERROR))
+ continue; //skip damaged
+
+ j++;
+ if((j%skip_amount) != 0) continue; //skip a few to speed things up
+
+ if(s->pict_type==FF_I_TYPE){
+ uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
+ uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize;
+
+ is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
+ is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
+ }else{
+ if(IS_INTRA(s->current_picture.mb_type[mb_xy]))
+ is_intra_likely++;
+ else
+ is_intra_likely--;
+ }
+ }
+ }
+//printf("is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
+ return is_intra_likely > 0;
+}
+
+void ff_er_frame_start(MpegEncContext *s){
+ if(!s->error_recognition) return;
+
+ memset(s->error_status_table, MV_ERROR|AC_ERROR|DC_ERROR|VP_START|AC_END|DC_END|MV_END, s->mb_stride*s->mb_height*sizeof(uint8_t));
+ s->error_count= 3*s->mb_num;
+}
+
+/**
+ * adds a slice.
+ * @param endx x component of the last macroblock, can be -1 for the last of the previous line
+ * @param status the status at the end (MV_END, AC_ERROR, ...), it is assumed that no earlier end or
+ * error of the same type occurred
+ */
+void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status){
+ const int start_i= av_clip(startx + starty * s->mb_width , 0, s->mb_num-1);
+ const int end_i = av_clip(endx + endy * s->mb_width , 0, s->mb_num);
+ const int start_xy= s->mb_index2xy[start_i];
+ const int end_xy = s->mb_index2xy[end_i];
+ int mask= -1;
+
+ if(start_i > end_i || start_xy > end_xy){
+ av_log(s->avctx, AV_LOG_ERROR, "internal error, slice end before start\n");
+ return;
+ }
+
+ if(!s->error_recognition) return;
+
+ mask &= ~VP_START;
+ if(status & (AC_ERROR|AC_END)){
+ mask &= ~(AC_ERROR|AC_END);
+ s->error_count -= end_i - start_i + 1;
+ }
+ if(status & (DC_ERROR|DC_END)){
+ mask &= ~(DC_ERROR|DC_END);
+ s->error_count -= end_i - start_i + 1;
+ }
+ if(status & (MV_ERROR|MV_END)){
+ mask &= ~(MV_ERROR|MV_END);
+ s->error_count -= end_i - start_i + 1;
+ }
+
+ if(status & (AC_ERROR|DC_ERROR|MV_ERROR)) s->error_count= INT_MAX;
+
+ if(mask == ~0x7F){
+ memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t));
+ }else{
+ int i;
+ for(i=start_xy; i<end_xy; i++){
+ s->error_status_table[ i ] &= mask;
+ }
+ }
+
+ if(end_i == s->mb_num)
+ s->error_count= INT_MAX;
+ else{
+ s->error_status_table[end_xy] &= mask;
+ s->error_status_table[end_xy] |= status;
+ }
+
+ s->error_status_table[start_xy] |= VP_START;
+
+ if(start_xy > 0 && s->avctx->thread_count <= 1 && s->avctx->skip_top*s->mb_width < start_i){
+ int prev_status= s->error_status_table[ s->mb_index2xy[start_i - 1] ];
+
+ prev_status &= ~ VP_START;
+ if(prev_status != (MV_END|DC_END|AC_END)) s->error_count= INT_MAX;
+ }
+}
+
+void ff_er_frame_end(MpegEncContext *s){
+ int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
+ int distance;
+ int threshold_part[4]= {100,100,100};
+ int threshold= 50;
+ int is_intra_likely;
+ int size = s->b8_stride * 2 * s->mb_height;
+ Picture *pic= s->current_picture_ptr;
+
+ if(!s->error_recognition || s->error_count==0 || s->avctx->lowres ||
+ s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return;
+
+ if(s->current_picture.motion_val[0] == NULL){
+ av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
+
+ for(i=0; i<2; i++){
+ pic->ref_index[i]= av_mallocz(size * sizeof(uint8_t));
+ pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t));
+ pic->motion_val[i]= pic->motion_val_base[i]+4;
+ }
+ pic->motion_subsample_log2= 3;
+ s->current_picture= *s->current_picture_ptr;
+ }
+
+ for(i=0; i<2; i++){
+ if(pic->ref_index[i])
+ memset(pic->ref_index[i], 0, size * sizeof(uint8_t));
+ }
+
+ if(s->avctx->debug&FF_DEBUG_ER){
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ int status= s->error_status_table[mb_x + mb_y*s->mb_stride];
+
+ av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
+ }
+ av_log(s->avctx, AV_LOG_DEBUG, "\n");
+ }
+ }
+
+#if 1
+ /* handle overlapping slices */
+ for(error_type=1; error_type<=3; error_type++){
+ int end_ok=0;
+
+ for(i=s->mb_num-1; i>=0; i--){
+ const int mb_xy= s->mb_index2xy[i];
+ int error= s->error_status_table[mb_xy];
+
+ if(error&(1<<error_type))
+ end_ok=1;
+ if(error&(8<<error_type))
+ end_ok=1;
+
+ if(!end_ok)
+ s->error_status_table[mb_xy]|= 1<<error_type;
+
+ if(error&VP_START)
+ end_ok=0;
+ }
+ }
+#endif
+#if 1
+ /* handle slices with partitions of different length */
+ if(s->partitioned_frame){
+ int end_ok=0;
+
+ for(i=s->mb_num-1; i>=0; i--){
+ const int mb_xy= s->mb_index2xy[i];
+ int error= s->error_status_table[mb_xy];
+
+ if(error&AC_END)
+ end_ok=0;
+ if((error&MV_END) || (error&DC_END) || (error&AC_ERROR))
+ end_ok=1;
+
+ if(!end_ok)
+ s->error_status_table[mb_xy]|= AC_ERROR;
+
+ if(error&VP_START)
+ end_ok=0;
+ }
+ }
+#endif
+ /* handle missing slices */
+ if(s->error_recognition>=4){
+ int end_ok=1;
+
+ for(i=s->mb_num-2; i>=s->mb_width+100; i--){ //FIXME +100 hack
+ const int mb_xy= s->mb_index2xy[i];
+ int error1= s->error_status_table[mb_xy ];
+ int error2= s->error_status_table[s->mb_index2xy[i+1]];
+
+ if(error1&VP_START)
+ end_ok=1;
+
+ if( error2==(VP_START|DC_ERROR|AC_ERROR|MV_ERROR|AC_END|DC_END|MV_END)
+ && error1!=(VP_START|DC_ERROR|AC_ERROR|MV_ERROR|AC_END|DC_END|MV_END)
+ && ((error1&AC_END) || (error1&DC_END) || (error1&MV_END))){ //end & uninit
+ end_ok=0;
+ }
+
+ if(!end_ok)
+ s->error_status_table[mb_xy]|= DC_ERROR|AC_ERROR|MV_ERROR;
+ }
+ }
+
+#if 1
+ /* backward mark errors */
+ distance=9999999;
+ for(error_type=1; error_type<=3; error_type++){
+ for(i=s->mb_num-1; i>=0; i--){
+ const int mb_xy= s->mb_index2xy[i];
+ int error= s->error_status_table[mb_xy];
+
+ if(!s->mbskip_table[mb_xy]) //FIXME partition specific
+ distance++;
+ if(error&(1<<error_type))
+ distance= 0;
+
+ if(s->partitioned_frame){
+ if(distance < threshold_part[error_type-1])
+ s->error_status_table[mb_xy]|= 1<<error_type;
+ }else{
+ if(distance < threshold)
+ s->error_status_table[mb_xy]|= 1<<error_type;
+ }
+
+ if(error&VP_START)
+ distance= 9999999;
+ }
+ }
+#endif
+
+ /* forward mark errors */
+ error=0;
+ for(i=0; i<s->mb_num; i++){
+ const int mb_xy= s->mb_index2xy[i];
+ int old_error= s->error_status_table[mb_xy];
+
+ if(old_error&VP_START)
+ error= old_error& (DC_ERROR|AC_ERROR|MV_ERROR);
+ else{
+ error|= old_error& (DC_ERROR|AC_ERROR|MV_ERROR);
+ s->error_status_table[mb_xy]|= error;
+ }
+ }
+#if 1
+ /* handle not partitioned case */
+ if(!s->partitioned_frame){
+ for(i=0; i<s->mb_num; i++){
+ const int mb_xy= s->mb_index2xy[i];
+ error= s->error_status_table[mb_xy];
+ if(error&(AC_ERROR|DC_ERROR|MV_ERROR))
+ error|= AC_ERROR|DC_ERROR|MV_ERROR;
+ s->error_status_table[mb_xy]= error;
+ }
+ }
+#endif
+
+ dc_error= ac_error= mv_error=0;
+ for(i=0; i<s->mb_num; i++){
+ const int mb_xy= s->mb_index2xy[i];
+ error= s->error_status_table[mb_xy];
+ if(error&DC_ERROR) dc_error ++;
+ if(error&AC_ERROR) ac_error ++;
+ if(error&MV_ERROR) mv_error ++;
+ }
+ av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors\n", dc_error, ac_error, mv_error);
+
+ is_intra_likely= is_intra_more_likely(s);
+
+ /* set unknown mb-type to most likely */
+ for(i=0; i<s->mb_num; i++){
+ const int mb_xy= s->mb_index2xy[i];
+ error= s->error_status_table[mb_xy];
+ if(!((error&DC_ERROR) && (error&MV_ERROR)))
+ continue;
+
+ if(is_intra_likely)
+ s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4;
+ else
+ s->current_picture.mb_type[mb_xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ }
+
+ // change inter to intra blocks if no reference frames are available
+ if (!s->last_picture.data[0] && !s->next_picture.data[0])
+ for(i=0; i<s->mb_num; i++){
+ const int mb_xy= s->mb_index2xy[i];
+ if(!IS_INTRA(s->current_picture.mb_type[mb_xy]))
+ s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4;
+ }
+
+ /* handle inter blocks with damaged AC */
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ const int mb_xy= mb_x + mb_y * s->mb_stride;
+ const int mb_type= s->current_picture.mb_type[mb_xy];
+ int dir = !s->last_picture.data[0];
+ error= s->error_status_table[mb_xy];
+
+ if(IS_INTRA(mb_type)) continue; //intra
+ if(error&MV_ERROR) continue; //inter with damaged MV
+ if(!(error&AC_ERROR)) continue; //undamaged inter
+
+ s->mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
+ s->mb_intra=0;
+ s->mb_skipped=0;
+ if(IS_8X8(mb_type)){
+ int mb_index= mb_x*2 + mb_y*2*s->b8_stride;
+ int j;
+ s->mv_type = MV_TYPE_8X8;
+ for(j=0; j<4; j++){
+ s->mv[0][j][0] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][0];
+ s->mv[0][j][1] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][1];
+ }
+ }else{
+ s->mv_type = MV_TYPE_16X16;
+ s->mv[0][0][0] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
+ s->mv[0][0][1] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
+ }
+
+ s->dsp.clear_blocks(s->block[0]);
+
+ s->mb_x= mb_x;
+ s->mb_y= mb_y;
+ decode_mb(s);
+ }
+ }
+
+ /* guess MVs */
+ if(s->pict_type==FF_B_TYPE){
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ int xy= mb_x*2 + mb_y*2*s->b8_stride;
+ const int mb_xy= mb_x + mb_y * s->mb_stride;
+ const int mb_type= s->current_picture.mb_type[mb_xy];
+ error= s->error_status_table[mb_xy];
+
+ if(IS_INTRA(mb_type)) continue;
+ if(!(error&MV_ERROR)) continue; //inter with undamaged MV
+ if(!(error&AC_ERROR)) continue; //undamaged inter
+
+ s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD;
+ if(!s->last_picture.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
+ if(!s->next_picture.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
+ s->mb_intra=0;
+ s->mv_type = MV_TYPE_16X16;
+ s->mb_skipped=0;
+
+ if(s->pp_time){
+ int time_pp= s->pp_time;
+ int time_pb= s->pb_time;
+
+ s->mv[0][0][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp;
+ s->mv[0][0][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp;
+ s->mv[1][0][0] = s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp;
+ s->mv[1][0][1] = s->next_picture.motion_val[0][xy][1]*(time_pb - time_pp)/time_pp;
+ }else{
+ s->mv[0][0][0]= 0;
+ s->mv[0][0][1]= 0;
+ s->mv[1][0][0]= 0;
+ s->mv[1][0][1]= 0;
+ }
+
+ s->dsp.clear_blocks(s->block[0]);
+ s->mb_x= mb_x;
+ s->mb_y= mb_y;
+ decode_mb(s);
+ }
+ }
+ }else
+ guess_mv(s);
+
+ /* fill DC for inter blocks */
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ int dc, dcu, dcv, y, n;
+ int16_t *dc_ptr;
+ uint8_t *dest_y, *dest_cb, *dest_cr;
+ const int mb_xy= mb_x + mb_y * s->mb_stride;
+ const int mb_type= s->current_picture.mb_type[mb_xy];
+
+ error= s->error_status_table[mb_xy];
+
+ if(IS_INTRA(mb_type) && s->partitioned_frame) continue;
+// if(error&MV_ERROR) continue; //inter data damaged FIXME is this good?
+
+ dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
+ dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
+ dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
+
+ dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride];
+ for(n=0; n<4; n++){
+ dc=0;
+ for(y=0; y<8; y++){
+ int x;
+ for(x=0; x<8; x++){
+ dc+= dest_y[x + (n&1)*8 + (y + (n>>1)*8)*s->linesize];
+ }
+ }
+ dc_ptr[(n&1) + (n>>1)*s->b8_stride]= (dc+4)>>3;
+ }
+
+ dcu=dcv=0;
+ for(y=0; y<8; y++){
+ int x;
+ for(x=0; x<8; x++){
+ dcu+=dest_cb[x + y*(s->uvlinesize)];
+ dcv+=dest_cr[x + y*(s->uvlinesize)];
+ }
+ }
+ s->dc_val[1][mb_x + mb_y*s->mb_stride]= (dcu+4)>>3;
+ s->dc_val[2][mb_x + mb_y*s->mb_stride]= (dcv+4)>>3;
+ }
+ }
+#if 1
+ /* guess DC for damaged blocks */
+ guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
+ guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0);
+ guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0);
+#endif
+ /* filter luma DC */
+ filter181(s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride);
+
+#if 1
+ /* render DC only intra */
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ uint8_t *dest_y, *dest_cb, *dest_cr;
+ const int mb_xy= mb_x + mb_y * s->mb_stride;
+ const int mb_type= s->current_picture.mb_type[mb_xy];
+
+ error= s->error_status_table[mb_xy];
+
+ if(IS_INTER(mb_type)) continue;
+ if(!(error&AC_ERROR)) continue; //undamaged
+
+ dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
+ dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
+ dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
+
+ put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
+ }
+ }
+#endif
+
+ if(s->avctx->error_concealment&FF_EC_DEBLOCK){
+ /* filter horizontal block boundaries */
+ h_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
+ h_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
+ h_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
+
+ /* filter vertical block boundaries */
+ v_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
+ v_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
+ v_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
+ }
+
+ec_clean:
+ /* clean a few tables */
+ for(i=0; i<s->mb_num; i++){
+ const int mb_xy= s->mb_index2xy[i];
+ int error= s->error_status_table[mb_xy];
+
+ if(s->pict_type!=FF_B_TYPE && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){
+ s->mbskip_table[mb_xy]=0;
+ }
+ s->mbintra_table[mb_xy]=1;
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eval.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eval.h
new file mode 100644
index 000000000..5481b1407
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/eval.h
@@ -0,0 +1,96 @@
+/*
+ * simple arithmetic expression evaluator
+ *
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/eval.h
+ * eval header.
+ */
+
+#ifndef AVCODEC_EVAL_H
+#define AVCODEC_EVAL_H
+
+/**
+ * Parses and evaluates an expression.
+ * Note, this is significantly slower than ff_parse_eval()
+ * @param s expression as a zero terminated string for example "1+2^3+5*5+sin(2/3)"
+ * @param func1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param const_name NULL terminated array of zero terminated strings of constant identifers for example {"PI", "E", 0}
+ * @param func1_name NULL terminated array of zero terminated strings of func1 identifers
+ * @param func2_name NULL terminated array of zero terminated strings of func2 identifers
+ * @param error pointer to a char* which is set to an error message if something goes wrong
+ * @param const_value a zero terminated array of values for the identifers from const_name
+ * @param opaque a pointer which will be passed to all functions from func1 and func2
+ * @return the value of the expression
+ */
+double ff_eval2(const char *s, const double *const_value, const char * const *const_name,
+ double (**func1)(void *, double), const char **func1_name,
+ double (**func2)(void *, double, double), const char **func2_name,
+ void *opaque, const char **error);
+
+typedef struct ff_expr_s AVEvalExpr;
+
+/**
+ * Parses a expression.
+ * @param s expression as a zero terminated string for example "1+2^3+5*5+sin(2/3)"
+ * @param func1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param const_name NULL terminated array of zero terminated strings of constant identifers for example {"PI", "E", 0}
+ * @param func1_name NULL terminated array of zero terminated strings of func1 identifers
+ * @param func2_name NULL terminated array of zero terminated strings of func2 identifers
+ * @param error pointer to a char* which is set to an error message if something goes wrong
+ * @return AVEvalExpr which must be freed with ff_eval_free by the user when it is not needed anymore
+ * NULL if anything went wrong
+ */
+AVEvalExpr * ff_parse(const char *s, const char * const *const_name,
+ double (**func1)(void *, double), const char **func1_name,
+ double (**func2)(void *, double, double), const char **func2_name,
+ const char **error);
+/**
+ * Evaluates a previously parsed expression.
+ * @param const_value a zero terminated array of values for the identifers from ff_parse const_name
+ * @param opaque a pointer which will be passed to all functions from func1 and func2
+ * @return the value of the expression
+ */
+double ff_parse_eval(AVEvalExpr * e, const double *const_value, void *opaque);
+void ff_eval_free(AVEvalExpr * e);
+
+/**
+ * Parses the string in numstr and returns its value as a double. If
+ * the string is empty, contains only whitespaces, or does not contain
+ * an initial substring that has the expected syntax for a
+ * floating-point number, no conversion is performed. In this case,
+ * returns a value of zero and the value returned in tail is the value
+ * of numstr.
+ *
+ * @param numstr a string representing a number, may contain one of
+ * the International System number postfixes, for example 'K', 'M',
+ * 'G'. If 'i' is appended after the postfix, powers of 2 are used
+ * instead of powers of 10. The 'B' postfix multiplies the value for
+ * 8, and can be appended after another postfix or used alone. This
+ * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix.
+ * @param tail if non-NULL puts here the pointer to the char next
+ * after the last parsed character
+ */
+double av_strtod(const char *numstr, char **tail);
+
+#endif /* AVCODEC_EVAL_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faandct.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faandct.c
new file mode 100644
index 000000000..08818a24c
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faandct.c
@@ -0,0 +1,232 @@
+/*
+ * Floating point AAN DCT
+ * this implementation is based upon the IJG integer AAN DCT (see jfdctfst.c)
+ *
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2003 Roman Shaposhnik
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * @file faandct.c
+ * @brief
+ * Floating point AAN DCT
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "dsputil.h"
+#include "faandct.h"
+
+#define FLOAT float
+#ifdef FAAN_POSTSCALE
+# define SCALE(x) postscale[x]
+#else
+# define SCALE(x) 1
+#endif
+
+//numbers generated by simple c code (not as accurate as they could be)
+/*
+for(i=0; i<8; i++){
+ printf("#define B%d %1.20llf\n", i, (long double)1.0/(cosl(i*acosl(-1.0)/(long double)16.0)*sqrtl(2)));
+}
+*/
+#define B0 1.00000000000000000000
+#define B1 0.72095982200694791383 // (cos(pi*1/16)sqrt(2))^-1
+#define B2 0.76536686473017954350 // (cos(pi*2/16)sqrt(2))^-1
+#define B3 0.85043009476725644878 // (cos(pi*3/16)sqrt(2))^-1
+#define B4 1.00000000000000000000 // (cos(pi*4/16)sqrt(2))^-1
+#define B5 1.27275858057283393842 // (cos(pi*5/16)sqrt(2))^-1
+#define B6 1.84775906502257351242 // (cos(pi*6/16)sqrt(2))^-1
+#define B7 3.62450978541155137218 // (cos(pi*7/16)sqrt(2))^-1
+
+
+#define A1 0.70710678118654752438 // cos(pi*4/16)
+#define A2 0.54119610014619698435 // cos(pi*6/16)sqrt(2)
+#define A5 0.38268343236508977170 // cos(pi*6/16)
+#define A4 1.30656296487637652774 // cos(pi*2/16)sqrt(2)
+
+static const FLOAT postscale[64]={
+B0*B0, B0*B1, B0*B2, B0*B3, B0*B4, B0*B5, B0*B6, B0*B7,
+B1*B0, B1*B1, B1*B2, B1*B3, B1*B4, B1*B5, B1*B6, B1*B7,
+B2*B0, B2*B1, B2*B2, B2*B3, B2*B4, B2*B5, B2*B6, B2*B7,
+B3*B0, B3*B1, B3*B2, B3*B3, B3*B4, B3*B5, B3*B6, B3*B7,
+B4*B0, B4*B1, B4*B2, B4*B3, B4*B4, B4*B5, B4*B6, B4*B7,
+B5*B0, B5*B1, B5*B2, B5*B3, B5*B4, B5*B5, B5*B6, B5*B7,
+B6*B0, B6*B1, B6*B2, B6*B3, B6*B4, B6*B5, B6*B6, B6*B7,
+B7*B0, B7*B1, B7*B2, B7*B3, B7*B4, B7*B5, B7*B6, B7*B7,
+};
+
+static av_always_inline void row_fdct(FLOAT temp[64], DCTELEM * data)
+{
+ FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ FLOAT tmp10, tmp11, tmp12, tmp13;
+ FLOAT z2, z4, z11, z13;
+ FLOAT av_unused z5;
+ int i;
+
+ for (i=0; i<8*8; i+=8) {
+ tmp0= data[0 + i] + data[7 + i];
+ tmp7= data[0 + i] - data[7 + i];
+ tmp1= data[1 + i] + data[6 + i];
+ tmp6= data[1 + i] - data[6 + i];
+ tmp2= data[2 + i] + data[5 + i];
+ tmp5= data[2 + i] - data[5 + i];
+ tmp3= data[3 + i] + data[4 + i];
+ tmp4= data[3 + i] - data[4 + i];
+
+ tmp10= tmp0 + tmp3;
+ tmp13= tmp0 - tmp3;
+ tmp11= tmp1 + tmp2;
+ tmp12= tmp1 - tmp2;
+
+ temp[0 + i]= tmp10 + tmp11;
+ temp[4 + i]= tmp10 - tmp11;
+
+ tmp12 += tmp13;
+ tmp12 *= A1;
+ temp[2 + i]= tmp13 + tmp12;
+ temp[6 + i]= tmp13 - tmp12;
+
+ tmp4 += tmp5;
+ tmp5 += tmp6;
+ tmp6 += tmp7;
+
+#if 0
+ z5= (tmp4 - tmp6) * A5;
+ z2= tmp4*A2 + z5;
+ z4= tmp6*A4 + z5;
+#else
+ z2= tmp4*(A2+A5) - tmp6*A5;
+ z4= tmp6*(A4-A5) + tmp4*A5;
+#endif
+ tmp5*=A1;
+
+ z11= tmp7 + tmp5;
+ z13= tmp7 - tmp5;
+
+ temp[5 + i]= z13 + z2;
+ temp[3 + i]= z13 - z2;
+ temp[1 + i]= z11 + z4;
+ temp[7 + i]= z11 - z4;
+ }
+}
+
+void ff_faandct(DCTELEM * data)
+{
+ FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ FLOAT tmp10, tmp11, tmp12, tmp13;
+ FLOAT z2, z4, z11, z13;
+ FLOAT av_unused z5;
+ FLOAT temp[64];
+ int i;
+
+ emms_c();
+
+ row_fdct(temp, data);
+
+ for (i=0; i<8; i++) {
+ tmp0= temp[8*0 + i] + temp[8*7 + i];
+ tmp7= temp[8*0 + i] - temp[8*7 + i];
+ tmp1= temp[8*1 + i] + temp[8*6 + i];
+ tmp6= temp[8*1 + i] - temp[8*6 + i];
+ tmp2= temp[8*2 + i] + temp[8*5 + i];
+ tmp5= temp[8*2 + i] - temp[8*5 + i];
+ tmp3= temp[8*3 + i] + temp[8*4 + i];
+ tmp4= temp[8*3 + i] - temp[8*4 + i];
+
+ tmp10= tmp0 + tmp3;
+ tmp13= tmp0 - tmp3;
+ tmp11= tmp1 + tmp2;
+ tmp12= tmp1 - tmp2;
+
+ data[8*0 + i]= lrintf(SCALE(8*0 + i) * (tmp10 + tmp11));
+ data[8*4 + i]= lrintf(SCALE(8*4 + i) * (tmp10 - tmp11));
+
+ tmp12 += tmp13;
+ tmp12 *= A1;
+ data[8*2 + i]= lrintf(SCALE(8*2 + i) * (tmp13 + tmp12));
+ data[8*6 + i]= lrintf(SCALE(8*6 + i) * (tmp13 - tmp12));
+
+ tmp4 += tmp5;
+ tmp5 += tmp6;
+ tmp6 += tmp7;
+
+#if 0
+ z5= (tmp4 - tmp6) * A5;
+ z2= tmp4*A2 + z5;
+ z4= tmp6*A4 + z5;
+#else
+ z2= tmp4*(A2+A5) - tmp6*A5;
+ z4= tmp6*(A4-A5) + tmp4*A5;
+#endif
+ tmp5*=A1;
+
+ z11= tmp7 + tmp5;
+ z13= tmp7 - tmp5;
+
+ data[8*5 + i]= lrintf(SCALE(8*5 + i) * (z13 + z2));
+ data[8*3 + i]= lrintf(SCALE(8*3 + i) * (z13 - z2));
+ data[8*1 + i]= lrintf(SCALE(8*1 + i) * (z11 + z4));
+ data[8*7 + i]= lrintf(SCALE(8*7 + i) * (z11 - z4));
+ }
+}
+
+void ff_faandct248(DCTELEM * data)
+{
+ FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ FLOAT tmp10, tmp11, tmp12, tmp13;
+ FLOAT temp[64];
+ int i;
+
+ emms_c();
+
+ row_fdct(temp, data);
+
+ for (i=0; i<8; i++) {
+ tmp0 = temp[8*0 + i] + temp[8*1 + i];
+ tmp1 = temp[8*2 + i] + temp[8*3 + i];
+ tmp2 = temp[8*4 + i] + temp[8*5 + i];
+ tmp3 = temp[8*6 + i] + temp[8*7 + i];
+ tmp4 = temp[8*0 + i] - temp[8*1 + i];
+ tmp5 = temp[8*2 + i] - temp[8*3 + i];
+ tmp6 = temp[8*4 + i] - temp[8*5 + i];
+ tmp7 = temp[8*6 + i] - temp[8*7 + i];
+
+ tmp10 = tmp0 + tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ tmp13 = tmp0 - tmp3;
+
+ data[8*0 + i] = lrintf(SCALE(8*0 + i) * (tmp10 + tmp11));
+ data[8*4 + i] = lrintf(SCALE(8*4 + i) * (tmp10 - tmp11));
+
+ tmp12 += tmp13;
+ tmp12 *= A1;
+ data[8*2 + i] = lrintf(SCALE(8*2 + i) * (tmp13 + tmp12));
+ data[8*6 + i] = lrintf(SCALE(8*6 + i) * (tmp13 - tmp12));
+
+ tmp10 = tmp4 + tmp7;
+ tmp11 = tmp5 + tmp6;
+ tmp12 = tmp5 - tmp6;
+ tmp13 = tmp4 - tmp7;
+
+ data[8*1 + i] = lrintf(SCALE(8*0 + i) * (tmp10 + tmp11));
+ data[8*5 + i] = lrintf(SCALE(8*4 + i) * (tmp10 - tmp11));
+
+ tmp12 += tmp13;
+ tmp12 *= A1;
+ data[8*3 + i] = lrintf(SCALE(8*2 + i) * (tmp13 + tmp12));
+ data[8*7 + i] = lrintf(SCALE(8*6 + i) * (tmp13 - tmp12));
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faandct.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faandct.h
new file mode 100644
index 000000000..ce9000923
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faandct.h
@@ -0,0 +1,39 @@
+/*
+ * Floating point AAN DCT
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file faandct.h
+ * @brief
+ * Floating point AAN DCT
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVCODEC_FAANDCT_H
+#define AVCODEC_FAANDCT_H
+
+#include "dsputil.h"
+
+#define FAAN_POSTSCALE
+
+void ff_faandct(DCTELEM * data);
+void ff_faandct248(DCTELEM * data);
+
+#endif /* AVCODEC_FAANDCT_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faanidct.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faanidct.c
new file mode 100644
index 000000000..dc3d8fbb8
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faanidct.c
@@ -0,0 +1,168 @@
+/*
+ * Floating point AAN IDCT
+ * Copyright (c) 2008 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "faanidct.h"
+
+/* To allow switching to double. */
+#define FLOAT float
+
+#define B0 1.0000000000000000000000
+#define B1 1.3870398453221474618216 // cos(pi*1/16)sqrt(2)
+#define B2 1.3065629648763765278566 // cos(pi*2/16)sqrt(2)
+#define B3 1.1758756024193587169745 // cos(pi*3/16)sqrt(2)
+#define B4 1.0000000000000000000000 // cos(pi*4/16)sqrt(2)
+#define B5 0.7856949583871021812779 // cos(pi*5/16)sqrt(2)
+#define B6 0.5411961001461969843997 // cos(pi*6/16)sqrt(2)
+#define B7 0.2758993792829430123360 // cos(pi*7/16)sqrt(2)
+
+#define A4 0.70710678118654752438 // cos(pi*4/16)
+#define A2 0.92387953251128675613 // cos(pi*2/16)
+
+static const FLOAT prescale[64]={
+B0*B0/8, B0*B1/8, B0*B2/8, B0*B3/8, B0*B4/8, B0*B5/8, B0*B6/8, B0*B7/8,
+B1*B0/8, B1*B1/8, B1*B2/8, B1*B3/8, B1*B4/8, B1*B5/8, B1*B6/8, B1*B7/8,
+B2*B0/8, B2*B1/8, B2*B2/8, B2*B3/8, B2*B4/8, B2*B5/8, B2*B6/8, B2*B7/8,
+B3*B0/8, B3*B1/8, B3*B2/8, B3*B3/8, B3*B4/8, B3*B5/8, B3*B6/8, B3*B7/8,
+B4*B0/8, B4*B1/8, B4*B2/8, B4*B3/8, B4*B4/8, B4*B5/8, B4*B6/8, B4*B7/8,
+B5*B0/8, B5*B1/8, B5*B2/8, B5*B3/8, B5*B4/8, B5*B5/8, B5*B6/8, B5*B7/8,
+B6*B0/8, B6*B1/8, B6*B2/8, B6*B3/8, B6*B4/8, B6*B5/8, B6*B6/8, B6*B7/8,
+B7*B0/8, B7*B1/8, B7*B2/8, B7*B3/8, B7*B4/8, B7*B5/8, B7*B6/8, B7*B7/8,
+};
+
+static inline void p8idct(DCTELEM data[64], FLOAT temp[64], uint8_t *dest, int stride, int x, int y, int type){
+ int i;
+ FLOAT av_unused tmp0;
+ FLOAT s04, d04, s17, d17, s26, d26, s53, d53;
+ FLOAT os07, os16, os25, os34;
+ FLOAT od07, od16, od25, od34;
+
+ for(i=0; i<y*8; i+=y){
+ s17= temp[1*x + i] + temp[7*x + i];
+ d17= temp[1*x + i] - temp[7*x + i];
+ s53= temp[5*x + i] + temp[3*x + i];
+ d53= temp[5*x + i] - temp[3*x + i];
+
+ od07= s17 + s53;
+ od25= (s17 - s53)*(2*A4);
+
+#if 0 //these 2 are equivalent
+ tmp0= (d17 + d53)*(2*A2);
+ od34= d17*( 2*B6) - tmp0;
+ od16= d53*(-2*B2) + tmp0;
+#else
+ od34= d17*(2*(B6-A2)) - d53*(2*A2);
+ od16= d53*(2*(A2-B2)) + d17*(2*A2);
+#endif
+
+ od16 -= od07;
+ od25 -= od16;
+ od34 += od25;
+
+ s26 = temp[2*x + i] + temp[6*x + i];
+ d26 = temp[2*x + i] - temp[6*x + i];
+ d26*= 2*A4;
+ d26-= s26;
+
+ s04= temp[0*x + i] + temp[4*x + i];
+ d04= temp[0*x + i] - temp[4*x + i];
+
+ os07= s04 + s26;
+ os34= s04 - s26;
+ os16= d04 + d26;
+ os25= d04 - d26;
+
+ if(type==0){
+ temp[0*x + i]= os07 + od07;
+ temp[7*x + i]= os07 - od07;
+ temp[1*x + i]= os16 + od16;
+ temp[6*x + i]= os16 - od16;
+ temp[2*x + i]= os25 + od25;
+ temp[5*x + i]= os25 - od25;
+ temp[3*x + i]= os34 - od34;
+ temp[4*x + i]= os34 + od34;
+ }else if(type==1){
+ data[0*x + i]= lrintf(os07 + od07);
+ data[7*x + i]= lrintf(os07 - od07);
+ data[1*x + i]= lrintf(os16 + od16);
+ data[6*x + i]= lrintf(os16 - od16);
+ data[2*x + i]= lrintf(os25 + od25);
+ data[5*x + i]= lrintf(os25 - od25);
+ data[3*x + i]= lrintf(os34 - od34);
+ data[4*x + i]= lrintf(os34 + od34);
+ }else if(type==2){
+ dest[0*stride + i]= av_clip_uint8(((int)dest[0*stride + i]) + lrintf(os07 + od07));
+ dest[7*stride + i]= av_clip_uint8(((int)dest[7*stride + i]) + lrintf(os07 - od07));
+ dest[1*stride + i]= av_clip_uint8(((int)dest[1*stride + i]) + lrintf(os16 + od16));
+ dest[6*stride + i]= av_clip_uint8(((int)dest[6*stride + i]) + lrintf(os16 - od16));
+ dest[2*stride + i]= av_clip_uint8(((int)dest[2*stride + i]) + lrintf(os25 + od25));
+ dest[5*stride + i]= av_clip_uint8(((int)dest[5*stride + i]) + lrintf(os25 - od25));
+ dest[3*stride + i]= av_clip_uint8(((int)dest[3*stride + i]) + lrintf(os34 - od34));
+ dest[4*stride + i]= av_clip_uint8(((int)dest[4*stride + i]) + lrintf(os34 + od34));
+ }else{
+ dest[0*stride + i]= av_clip_uint8(lrintf(os07 + od07));
+ dest[7*stride + i]= av_clip_uint8(lrintf(os07 - od07));
+ dest[1*stride + i]= av_clip_uint8(lrintf(os16 + od16));
+ dest[6*stride + i]= av_clip_uint8(lrintf(os16 - od16));
+ dest[2*stride + i]= av_clip_uint8(lrintf(os25 + od25));
+ dest[5*stride + i]= av_clip_uint8(lrintf(os25 - od25));
+ dest[3*stride + i]= av_clip_uint8(lrintf(os34 - od34));
+ dest[4*stride + i]= av_clip_uint8(lrintf(os34 + od34));
+ }
+ }
+}
+
+void ff_faanidct(DCTELEM block[64]){
+ FLOAT temp[64];
+ int i;
+
+ emms_c();
+
+ for(i=0; i<64; i++)
+ temp[i] = block[i] * prescale[i];
+
+ p8idct(block, temp, NULL, 0, 1, 8, 0);
+ p8idct(block, temp, NULL, 0, 8, 1, 1);
+}
+
+void ff_faanidct_add(uint8_t *dest, int line_size, DCTELEM block[64]){
+ FLOAT temp[64];
+ int i;
+
+ emms_c();
+
+ for(i=0; i<64; i++)
+ temp[i] = block[i] * prescale[i];
+
+ p8idct(block, temp, NULL, 0, 1, 8, 0);
+ p8idct(NULL , temp, dest, line_size, 8, 1, 2);
+}
+
+void ff_faanidct_put(uint8_t *dest, int line_size, DCTELEM block[64]){
+ FLOAT temp[64];
+ int i;
+
+ emms_c();
+
+ for(i=0; i<64; i++)
+ temp[i] = block[i] * prescale[i];
+
+ p8idct(block, temp, NULL, 0, 1, 8, 0);
+ p8idct(NULL , temp, dest, line_size, 8, 1, 3);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faanidct.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faanidct.h
new file mode 100644
index 000000000..4cf118928
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/faanidct.h
@@ -0,0 +1,32 @@
+/*
+ * Floating point AAN IDCT
+ * Copyright (c) 2008 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_FAANIDCT_H
+#define AVCODEC_FAANIDCT_H
+
+#include <stdint.h>
+#include "dsputil.h"
+
+void ff_faanidct(DCTELEM block[64]);
+void ff_faanidct_add(uint8_t *dest, int line_size, DCTELEM block[64]);
+void ff_faanidct_put(uint8_t *dest, int line_size, DCTELEM block[64]);
+
+#endif /* AVCODEC_FAANIDCT_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/fft.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/fft.c
new file mode 100644
index 000000000..150a59aeb
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/fft.c
@@ -0,0 +1,359 @@
+/*
+ * FFT/IFFT transforms
+ * Copyright (c) 2008 Loren Merritt
+ * Copyright (c) 2002 Fabrice Bellard
+ * Partly based on libdjbfft by D. J. Bernstein
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/fft.c
+ * FFT/IFFT transforms.
+ */
+
+#include "dsputil.h"
+
+/* cos(2*pi*x/n) for 0<=x<=n/4, followed by its reverse */
+#if !CONFIG_HARDCODED_TABLES
+COSTABLE(16);
+COSTABLE(32);
+COSTABLE(64);
+COSTABLE(128);
+COSTABLE(256);
+COSTABLE(512);
+COSTABLE(1024);
+COSTABLE(2048);
+COSTABLE(4096);
+COSTABLE(8192);
+COSTABLE(16384);
+COSTABLE(32768);
+COSTABLE(65536);
+#endif
+COSTABLE_CONST FFTSample * const ff_cos_tabs[] = {
+ NULL, NULL, NULL, NULL,
+ ff_cos_16, ff_cos_32, ff_cos_64, ff_cos_128, ff_cos_256, ff_cos_512, ff_cos_1024,
+ ff_cos_2048, ff_cos_4096, ff_cos_8192, ff_cos_16384, ff_cos_32768, ff_cos_65536,
+};
+
+static int split_radix_permutation(int i, int n, int inverse)
+{
+ int m;
+ if(n <= 2) return i&1;
+ m = n >> 1;
+ if(!(i&m)) return split_radix_permutation(i, m, inverse)*2;
+ m >>= 1;
+ if(inverse == !(i&m)) return split_radix_permutation(i, m, inverse)*4 + 1;
+ else return split_radix_permutation(i, m, inverse)*4 - 1;
+}
+
+av_cold void ff_init_ff_cos_tabs(int index)
+{
+#if !CONFIG_HARDCODED_TABLES
+ int i;
+ int m = 1<<index;
+ double freq = 2*M_PI/m;
+ FFTSample *tab = ff_cos_tabs[index];
+ for(i=0; i<=m/4; i++)
+ tab[i] = cos(i*freq);
+ for(i=1; i<m/4; i++)
+ tab[m/2-i] = tab[i];
+#endif
+}
+
+av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
+{
+ int i, j, m, n;
+ float alpha, c1, s1, s2;
+ int av_unused has_vectors;
+
+ if (nbits < 2 || nbits > 16)
+ goto fail;
+ s->nbits = nbits;
+ n = 1 << nbits;
+
+ s->tmp_buf = NULL;
+ s->exptab = av_malloc((n / 2) * sizeof(FFTComplex));
+ if (!s->exptab)
+ goto fail;
+ s->revtab = av_malloc(n * sizeof(uint16_t));
+ if (!s->revtab)
+ goto fail;
+ s->inverse = inverse;
+
+ s2 = inverse ? 1.0 : -1.0;
+
+ s->fft_permute = ff_fft_permute_c;
+ s->fft_calc = ff_fft_calc_c;
+ s->imdct_calc = ff_imdct_calc_c;
+ s->imdct_half = ff_imdct_half_c;
+ s->mdct_calc = ff_mdct_calc_c;
+ s->exptab1 = NULL;
+ s->split_radix = 1;
+
+ if (s->split_radix) {
+ for(j=4; j<=nbits; j++) {
+ ff_init_ff_cos_tabs(j);
+ }
+ for(i=0; i<n; i++)
+ s->revtab[-split_radix_permutation(i, n, s->inverse) & (n-1)] = i;
+ s->tmp_buf = av_malloc(n * sizeof(FFTComplex));
+ } else {
+ int np, nblocks, np2, l;
+ FFTComplex *q;
+
+ for(i=0; i<(n/2); i++) {
+ alpha = 2 * M_PI * (float)i / (float)n;
+ c1 = cos(alpha);
+ s1 = sin(alpha) * s2;
+ s->exptab[i].re = c1;
+ s->exptab[i].im = s1;
+ }
+
+ np = 1 << nbits;
+ nblocks = np >> 3;
+ np2 = np >> 1;
+ s->exptab1 = av_malloc(np * 2 * sizeof(FFTComplex));
+ if (!s->exptab1)
+ goto fail;
+ q = s->exptab1;
+ do {
+ for(l = 0; l < np2; l += 2 * nblocks) {
+ *q++ = s->exptab[l];
+ *q++ = s->exptab[l + nblocks];
+
+ q->re = -s->exptab[l].im;
+ q->im = s->exptab[l].re;
+ q++;
+ q->re = -s->exptab[l + nblocks].im;
+ q->im = s->exptab[l + nblocks].re;
+ q++;
+ }
+ nblocks = nblocks >> 1;
+ } while (nblocks != 0);
+ av_freep(&s->exptab);
+
+ /* compute bit reverse table */
+ for(i=0;i<n;i++) {
+ m=0;
+ for(j=0;j<nbits;j++) {
+ m |= ((i >> j) & 1) << (nbits-j-1);
+ }
+ s->revtab[i]=m;
+ }
+ }
+
+ return 0;
+ fail:
+ av_freep(&s->revtab);
+ av_freep(&s->exptab);
+ av_freep(&s->exptab1);
+ av_freep(&s->tmp_buf);
+ return -1;
+}
+
+void ff_fft_permute_c(FFTContext *s, FFTComplex *z)
+{
+ int j, k, np;
+ FFTComplex tmp;
+ const uint16_t *revtab = s->revtab;
+ np = 1 << s->nbits;
+
+ if (s->tmp_buf) {
+ /* TODO: handle split-radix permute in a more optimal way, probably in-place */
+ for(j=0;j<np;j++) s->tmp_buf[revtab[j]] = z[j];
+ memcpy(z, s->tmp_buf, np * sizeof(FFTComplex));
+ return;
+ }
+
+ /* reverse */
+ for(j=0;j<np;j++) {
+ k = revtab[j];
+ if (k < j) {
+ tmp = z[k];
+ z[k] = z[j];
+ z[j] = tmp;
+ }
+ }
+}
+
+av_cold void ff_fft_end(FFTContext *s)
+{
+ av_freep(&s->revtab);
+ av_freep(&s->exptab);
+ av_freep(&s->exptab1);
+ av_freep(&s->tmp_buf);
+}
+
+#define sqrthalf (float)M_SQRT1_2
+
+#define BF(x,y,a,b) {\
+ x = a - b;\
+ y = a + b;\
+}
+
+#define BUTTERFLIES(a0,a1,a2,a3) {\
+ BF(t3, t5, t5, t1);\
+ BF(a2.re, a0.re, a0.re, t5);\
+ BF(a3.im, a1.im, a1.im, t3);\
+ BF(t4, t6, t2, t6);\
+ BF(a3.re, a1.re, a1.re, t4);\
+ BF(a2.im, a0.im, a0.im, t6);\
+}
+
+// force loading all the inputs before storing any.
+// this is slightly slower for small data, but avoids store->load aliasing
+// for addresses separated by large powers of 2.
+#define BUTTERFLIES_BIG(a0,a1,a2,a3) {\
+ FFTSample r0=a0.re, i0=a0.im, r1=a1.re, i1=a1.im;\
+ BF(t3, t5, t5, t1);\
+ BF(a2.re, a0.re, r0, t5);\
+ BF(a3.im, a1.im, i1, t3);\
+ BF(t4, t6, t2, t6);\
+ BF(a3.re, a1.re, r1, t4);\
+ BF(a2.im, a0.im, i0, t6);\
+}
+
+#define TRANSFORM(a0,a1,a2,a3,wre,wim) {\
+ t1 = a2.re * wre + a2.im * wim;\
+ t2 = a2.im * wre - a2.re * wim;\
+ t5 = a3.re * wre - a3.im * wim;\
+ t6 = a3.im * wre + a3.re * wim;\
+ BUTTERFLIES(a0,a1,a2,a3)\
+}
+
+#define TRANSFORM_ZERO(a0,a1,a2,a3) {\
+ t1 = a2.re;\
+ t2 = a2.im;\
+ t5 = a3.re;\
+ t6 = a3.im;\
+ BUTTERFLIES(a0,a1,a2,a3)\
+}
+
+/* z[0...8n-1], w[1...2n-1] */
+#define PASS(name)\
+static void name(FFTComplex *z, const FFTSample *wre, unsigned int n)\
+{\
+ FFTSample t1, t2, t3, t4, t5, t6;\
+ int o1 = 2*n;\
+ int o2 = 4*n;\
+ int o3 = 6*n;\
+ const FFTSample *wim = wre+o1;\
+ n--;\
+\
+ TRANSFORM_ZERO(z[0],z[o1],z[o2],z[o3]);\
+ TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\
+ do {\
+ z += 2;\
+ wre += 2;\
+ wim -= 2;\
+ TRANSFORM(z[0],z[o1],z[o2],z[o3],wre[0],wim[0]);\
+ TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\
+ } while(--n);\
+}
+
+PASS(pass)
+#undef BUTTERFLIES
+#define BUTTERFLIES BUTTERFLIES_BIG
+PASS(pass_big)
+
+#define DECL_FFT(n,n2,n4)\
+static void fft##n(FFTComplex *z)\
+{\
+ fft##n2(z);\
+ fft##n4(z+n4*2);\
+ fft##n4(z+n4*3);\
+ pass(z,ff_cos_##n,n4/2);\
+}
+
+static void fft4(FFTComplex *z)
+{
+ FFTSample t1, t2, t3, t4, t5, t6, t7, t8;
+
+ BF(t3, t1, z[0].re, z[1].re);
+ BF(t8, t6, z[3].re, z[2].re);
+ BF(z[2].re, z[0].re, t1, t6);
+ BF(t4, t2, z[0].im, z[1].im);
+ BF(t7, t5, z[2].im, z[3].im);
+ BF(z[3].im, z[1].im, t4, t8);
+ BF(z[3].re, z[1].re, t3, t7);
+ BF(z[2].im, z[0].im, t2, t5);
+}
+
+static void fft8(FFTComplex *z)
+{
+ FFTSample t1, t2, t3, t4, t5, t6, t7, t8;
+
+ fft4(z);
+
+ BF(t1, z[5].re, z[4].re, -z[5].re);
+ BF(t2, z[5].im, z[4].im, -z[5].im);
+ BF(t3, z[7].re, z[6].re, -z[7].re);
+ BF(t4, z[7].im, z[6].im, -z[7].im);
+ BF(t8, t1, t3, t1);
+ BF(t7, t2, t2, t4);
+ BF(z[4].re, z[0].re, z[0].re, t1);
+ BF(z[4].im, z[0].im, z[0].im, t2);
+ BF(z[6].re, z[2].re, z[2].re, t7);
+ BF(z[6].im, z[2].im, z[2].im, t8);
+
+ TRANSFORM(z[1],z[3],z[5],z[7],sqrthalf,sqrthalf);
+}
+
+#if !CONFIG_SMALL
+static void fft16(FFTComplex *z)
+{
+ FFTSample t1, t2, t3, t4, t5, t6;
+
+ fft8(z);
+ fft4(z+8);
+ fft4(z+12);
+
+ TRANSFORM_ZERO(z[0],z[4],z[8],z[12]);
+ TRANSFORM(z[2],z[6],z[10],z[14],sqrthalf,sqrthalf);
+ TRANSFORM(z[1],z[5],z[9],z[13],ff_cos_16[1],ff_cos_16[3]);
+ TRANSFORM(z[3],z[7],z[11],z[15],ff_cos_16[3],ff_cos_16[1]);
+}
+#else
+DECL_FFT(16,8,4)
+#endif
+DECL_FFT(32,16,8)
+DECL_FFT(64,32,16)
+DECL_FFT(128,64,32)
+DECL_FFT(256,128,64)
+DECL_FFT(512,256,128)
+#if !CONFIG_SMALL
+#define pass pass_big
+#endif
+DECL_FFT(1024,512,256)
+DECL_FFT(2048,1024,512)
+DECL_FFT(4096,2048,1024)
+DECL_FFT(8192,4096,2048)
+DECL_FFT(16384,8192,4096)
+DECL_FFT(32768,16384,8192)
+DECL_FFT(65536,32768,16384)
+
+static void (* const fft_dispatch[])(FFTComplex*) = {
+ fft4, fft8, fft16, fft32, fft64, fft128, fft256, fft512, fft1024,
+ fft2048, fft4096, fft8192, fft16384, fft32768, fft65536,
+};
+
+void ff_fft_calc_c(FFTContext *s, FFTComplex *z)
+{
+ fft_dispatch[s->nbits-2](z);
+}
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/flv.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/flv.h
new file mode 100644
index 000000000..e12b3b026
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/flv.h
@@ -0,0 +1,30 @@
+/*
+ * FLV specific private header.
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_FLV_H
+#define AVCODEC_FLV_H
+
+void ff_flv_encode_picture_header(MpegEncContext * s, int picture_number);
+void ff_flv2_encode_ac_esc(PutBitContext *pb, int slevel, int level, int run, int last);
+
+int ff_flv_decode_picture_header(MpegEncContext *s);
+void ff_flv2_decode_ac_esc(GetBitContext *gb, int *level, int *run, int *last);
+
+#endif
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/flvdec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/flvdec.c
new file mode 100644
index 000000000..104e48d91
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/flvdec.c
@@ -0,0 +1,135 @@
+/*
+ * FLV decoding.
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "mpegvideo.h"
+#include "h263.h"
+#include "flv.h"
+
+void ff_flv2_decode_ac_esc(GetBitContext *gb, int *level, int *run, int *last){
+ int is11 = get_bits1(gb);
+ *last = get_bits1(gb);
+ *run = get_bits(gb, 6);
+ if(is11){
+ *level = get_sbits(gb, 11);
+ } else {
+ *level = get_sbits(gb, 7);
+ }
+}
+
+int ff_flv_decode_picture_header(MpegEncContext *s)
+{
+ int format, width, height;
+
+ /* picture header */
+ if (get_bits_long(&s->gb, 17) != 1) {
+ av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
+ return -1;
+ }
+ format = get_bits(&s->gb, 5);
+ if (format != 0 && format != 1) {
+ av_log(s->avctx, AV_LOG_ERROR, "Bad picture format\n");
+ return -1;
+ }
+ s->h263_flv = format+1;
+ s->picture_number = get_bits(&s->gb, 8); /* picture timestamp */
+ format = get_bits(&s->gb, 3);
+ switch (format) {
+ case 0:
+ width = get_bits(&s->gb, 8);
+ height = get_bits(&s->gb, 8);
+ break;
+ case 1:
+ width = get_bits(&s->gb, 16);
+ height = get_bits(&s->gb, 16);
+ break;
+ case 2:
+ width = 352;
+ height = 288;
+ break;
+ case 3:
+ width = 176;
+ height = 144;
+ break;
+ case 4:
+ width = 128;
+ height = 96;
+ break;
+ case 5:
+ width = 320;
+ height = 240;
+ break;
+ case 6:
+ width = 160;
+ height = 120;
+ break;
+ default:
+ width = height = 0;
+ break;
+ }
+ if(avcodec_check_dimensions(s->avctx, width, height))
+ return -1;
+ s->width = width;
+ s->height = height;
+
+ s->pict_type = FF_I_TYPE + get_bits(&s->gb, 2);
+ s->dropable= s->pict_type > FF_P_TYPE;
+ if (s->dropable)
+ s->pict_type = FF_P_TYPE;
+
+ skip_bits1(&s->gb); /* deblocking flag */
+ s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);
+
+ s->h263_plus = 0;
+
+ s->unrestricted_mv = 1;
+ s->h263_long_vectors = 0;
+
+ /* PEI */
+ while (get_bits1(&s->gb) != 0) {
+ skip_bits(&s->gb, 8);
+ }
+ s->f_code = 1;
+
+ if(s->avctx->debug & FF_DEBUG_PICT_INFO){
+ av_log(s->avctx, AV_LOG_DEBUG, "%c esc_type:%d, qp:%d num:%d\n",
+ s->dropable ? 'D' : av_get_pict_type_char(s->pict_type), s->h263_flv-1, s->qscale, s->picture_number);
+ }
+
+ s->y_dc_scale_table=
+ s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
+
+ return 0;
+}
+
+AVCodec flv_decoder = {
+ "flv",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_FLV1,
+ sizeof(MpegEncContext),
+ ff_h263_decode_init,
+ NULL,
+ ff_h263_decode_end,
+ ff_h263_decode_frame,
+ /*.capabilities = */CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/get_bits.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/get_bits.h
new file mode 100644
index 000000000..c284ab50e
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/get_bits.h
@@ -0,0 +1,712 @@
+/*
+ * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/get_bits.h
+ * bitstream reader API header.
+ */
+
+#ifndef AVCODEC_GET_BITS_H
+#define AVCODEC_GET_BITS_H
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <assert.h>
+#include "../libavutil/bswap.h"
+#include "libavutil/common.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/log.h"
+#include "mathops.h"
+
+#if defined(ALT_BITSTREAM_READER_LE) && !defined(ALT_BITSTREAM_READER)
+# define ALT_BITSTREAM_READER
+#endif
+
+#if !defined(LIBMPEG2_BITSTREAM_READER) && !defined(A32_BITSTREAM_READER) && !defined(ALT_BITSTREAM_READER)
+# define ALT_BITSTREAM_READER
+//#define LIBMPEG2_BITSTREAM_READER
+//#define A32_BITSTREAM_READER
+#endif
+
+#if ARCH_X86
+// avoid +32 for shift optimization (gcc should do that ...)
+static inline int32_t NEG_SSR32( int32_t a, int8_t s){
+ __asm__ ("sarl %1, %0\n\t"
+ : "+r" (a)
+ : "ic" ((uint8_t)(-s))
+ );
+ return a;
+}
+static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
+ __asm__ ("shrl %1, %0\n\t"
+ : "+r" (a)
+ : "ic" ((uint8_t)(-s))
+ );
+ return a;
+}
+#else
+# define NEG_SSR32(a,s) ((( int32_t)(a))>>(32-(s)))
+# define NEG_USR32(a,s) (((uint32_t)(a))>>(32-(s)))
+#endif
+
+/* bit input */
+/* buffer, buffer_end and size_in_bits must be present and used by every reader */
+typedef struct GetBitContext {
+ const uint8_t *buffer, *buffer_end;
+#ifdef ALT_BITSTREAM_READER
+ int index;
+#elif defined LIBMPEG2_BITSTREAM_READER
+ uint8_t *buffer_ptr;
+ uint32_t cache;
+ int bit_count;
+#elif defined A32_BITSTREAM_READER
+ uint32_t *buffer_ptr;
+ uint32_t cache0;
+ uint32_t cache1;
+ int bit_count;
+#endif
+ int size_in_bits;
+} GetBitContext;
+
+#define VLC_TYPE int16_t
+
+typedef struct VLC {
+ int bits;
+ VLC_TYPE (*table)[2]; ///< code, bits
+ int table_size, table_allocated;
+} VLC;
+
+typedef struct RL_VLC_ELEM {
+ int16_t level;
+ int8_t len;
+ uint8_t run;
+} RL_VLC_ELEM;
+
+/* Bitstream reader API docs:
+name
+ arbitrary name which is used as prefix for the internal variables
+
+gb
+ getbitcontext
+
+OPEN_READER(name, gb)
+ loads gb into local variables
+
+CLOSE_READER(name, gb)
+ stores local vars in gb
+
+UPDATE_CACHE(name, gb)
+ refills the internal cache from the bitstream
+ after this call at least MIN_CACHE_BITS will be available,
+
+GET_CACHE(name, gb)
+ will output the contents of the internal cache, next bit is MSB of 32 or 64 bit (FIXME 64bit)
+
+SHOW_UBITS(name, gb, num)
+ will return the next num bits
+
+SHOW_SBITS(name, gb, num)
+ will return the next num bits and do sign extension
+
+SKIP_BITS(name, gb, num)
+ will skip over the next num bits
+ note, this is equivalent to SKIP_CACHE; SKIP_COUNTER
+
+SKIP_CACHE(name, gb, num)
+ will remove the next num bits from the cache (note SKIP_COUNTER MUST be called before UPDATE_CACHE / CLOSE_READER)
+
+SKIP_COUNTER(name, gb, num)
+ will increment the internal bit counter (see SKIP_CACHE & SKIP_BITS)
+
+LAST_SKIP_CACHE(name, gb, num)
+ will remove the next num bits from the cache if it is needed for UPDATE_CACHE otherwise it will do nothing
+
+LAST_SKIP_BITS(name, gb, num)
+ is equivalent to LAST_SKIP_CACHE; SKIP_COUNTER
+
+for examples see get_bits, show_bits, skip_bits, get_vlc
+*/
+
+#ifdef ALT_BITSTREAM_READER
+# define MIN_CACHE_BITS 25
+
+# define OPEN_READER(name, gb)\
+ unsigned int name##_index= (gb)->index;\
+ int name##_cache= 0;\
+
+# define CLOSE_READER(name, gb)\
+ (gb)->index= name##_index;\
+
+# ifdef ALT_BITSTREAM_READER_LE
+# define UPDATE_CACHE(name, gb)\
+ name##_cache= AV_RL32( ((const uint8_t *)(gb)->buffer)+(name##_index>>3) ) >> (name##_index&0x07);\
+
+# define SKIP_CACHE(name, gb, num)\
+ name##_cache >>= (num);
+# else
+# define UPDATE_CACHE(name, gb)\
+ name##_cache= AV_RB32( ((const uint8_t *)(gb)->buffer)+(name##_index>>3) ) << (name##_index&0x07);\
+
+# define SKIP_CACHE(name, gb, num)\
+ name##_cache <<= (num);
+# endif
+
+// FIXME name?
+# define SKIP_COUNTER(name, gb, num)\
+ name##_index += (num);\
+
+# define SKIP_BITS(name, gb, num)\
+ {\
+ SKIP_CACHE(name, gb, num)\
+ SKIP_COUNTER(name, gb, num)\
+ }\
+
+# define LAST_SKIP_BITS(name, gb, num) SKIP_COUNTER(name, gb, num)
+# define LAST_SKIP_CACHE(name, gb, num) ;
+
+# ifdef ALT_BITSTREAM_READER_LE
+# define SHOW_UBITS(name, gb, num)\
+ ((name##_cache) & (NEG_USR32(0xffffffff,num)))
+
+# define SHOW_SBITS(name, gb, num)\
+ NEG_SSR32((name##_cache)<<(32-(num)), num)
+# else
+# define SHOW_UBITS(name, gb, num)\
+ NEG_USR32(name##_cache, num)
+
+# define SHOW_SBITS(name, gb, num)\
+ NEG_SSR32(name##_cache, num)
+# endif
+
+# define GET_CACHE(name, gb)\
+ ((uint32_t)name##_cache)
+
+static inline int get_bits_count(const GetBitContext *s){
+ return s->index;
+}
+
+static inline void skip_bits_long(GetBitContext *s, int n){
+ s->index += n;
+}
+
+#elif defined LIBMPEG2_BITSTREAM_READER
+//libmpeg2 like reader
+
+# define MIN_CACHE_BITS 17
+
+# define OPEN_READER(name, gb)\
+ int name##_bit_count=(gb)->bit_count;\
+ int name##_cache= (gb)->cache;\
+ uint8_t * name##_buffer_ptr=(gb)->buffer_ptr;\
+
+# define CLOSE_READER(name, gb)\
+ (gb)->bit_count= name##_bit_count;\
+ (gb)->cache= name##_cache;\
+ (gb)->buffer_ptr= name##_buffer_ptr;\
+
+# define UPDATE_CACHE(name, gb)\
+ if(name##_bit_count >= 0){\
+ name##_cache+= AV_RB16(name##_buffer_ptr) << name##_bit_count; \
+ name##_buffer_ptr+=2;\
+ name##_bit_count-= 16;\
+ }\
+
+# define SKIP_CACHE(name, gb, num)\
+ name##_cache <<= (num);\
+
+# define SKIP_COUNTER(name, gb, num)\
+ name##_bit_count += (num);\
+
+# define SKIP_BITS(name, gb, num)\
+ {\
+ SKIP_CACHE(name, gb, num)\
+ SKIP_COUNTER(name, gb, num)\
+ }\
+
+# define LAST_SKIP_BITS(name, gb, num) SKIP_BITS(name, gb, num)
+# define LAST_SKIP_CACHE(name, gb, num) SKIP_CACHE(name, gb, num)
+
+# define SHOW_UBITS(name, gb, num)\
+ NEG_USR32(name##_cache, num)
+
+# define SHOW_SBITS(name, gb, num)\
+ NEG_SSR32(name##_cache, num)
+
+# define GET_CACHE(name, gb)\
+ ((uint32_t)name##_cache)
+
+static inline int get_bits_count(const GetBitContext *s){
+ return (s->buffer_ptr - s->buffer)*8 - 16 + s->bit_count;
+}
+
+static inline void skip_bits_long(GetBitContext *s, int n){
+ OPEN_READER(re, s)
+ re_bit_count += n;
+ re_buffer_ptr += 2*(re_bit_count>>4);
+ re_bit_count &= 15;
+ re_cache = ((re_buffer_ptr[-2]<<8) + re_buffer_ptr[-1]) << (16+re_bit_count);
+ UPDATE_CACHE(re, s)
+ CLOSE_READER(re, s)
+}
+
+#elif defined A32_BITSTREAM_READER
+
+# define MIN_CACHE_BITS 32
+
+# define OPEN_READER(name, gb)\
+ int name##_bit_count=(gb)->bit_count;\
+ uint32_t name##_cache0= (gb)->cache0;\
+ uint32_t name##_cache1= (gb)->cache1;\
+ uint32_t * name##_buffer_ptr=(gb)->buffer_ptr;\
+
+# define CLOSE_READER(name, gb)\
+ (gb)->bit_count= name##_bit_count;\
+ (gb)->cache0= name##_cache0;\
+ (gb)->cache1= name##_cache1;\
+ (gb)->buffer_ptr= name##_buffer_ptr;\
+
+# define UPDATE_CACHE(name, gb)\
+ if(name##_bit_count > 0){\
+ const uint32_t next= be2me_32( *name##_buffer_ptr );\
+ name##_cache0 |= NEG_USR32(next,name##_bit_count);\
+ name##_cache1 |= next<<name##_bit_count;\
+ name##_buffer_ptr++;\
+ name##_bit_count-= 32;\
+ }\
+
+#if ARCH_X86
+# define SKIP_CACHE(name, gb, num)\
+ __asm__(\
+ "shldl %2, %1, %0 \n\t"\
+ "shll %2, %1 \n\t"\
+ : "+r" (name##_cache0), "+r" (name##_cache1)\
+ : "Ic" ((uint8_t)(num))\
+ );
+#else
+# define SKIP_CACHE(name, gb, num)\
+ name##_cache0 <<= (num);\
+ name##_cache0 |= NEG_USR32(name##_cache1,num);\
+ name##_cache1 <<= (num);
+#endif
+
+# define SKIP_COUNTER(name, gb, num)\
+ name##_bit_count += (num);\
+
+# define SKIP_BITS(name, gb, num)\
+ {\
+ SKIP_CACHE(name, gb, num)\
+ SKIP_COUNTER(name, gb, num)\
+ }\
+
+# define LAST_SKIP_BITS(name, gb, num) SKIP_BITS(name, gb, num)
+# define LAST_SKIP_CACHE(name, gb, num) SKIP_CACHE(name, gb, num)
+
+# define SHOW_UBITS(name, gb, num)\
+ NEG_USR32(name##_cache0, num)
+
+# define SHOW_SBITS(name, gb, num)\
+ NEG_SSR32(name##_cache0, num)
+
+# define GET_CACHE(name, gb)\
+ (name##_cache0)
+
+static inline int get_bits_count(const GetBitContext *s){
+ return ((uint8_t*)s->buffer_ptr - s->buffer)*8 - 32 + s->bit_count;
+}
+
+static inline void skip_bits_long(GetBitContext *s, int n){
+ OPEN_READER(re, s)
+ re_bit_count += n;
+ re_buffer_ptr += re_bit_count>>5;
+ re_bit_count &= 31;
+ re_cache0 = be2me_32( re_buffer_ptr[-1] ) << re_bit_count;
+ re_cache1 = 0;
+ UPDATE_CACHE(re, s)
+ CLOSE_READER(re, s)
+}
+
+#endif
+
+/**
+ * read mpeg1 dc style vlc (sign bit + mantisse with no MSB).
+ * if MSB not set it is negative
+ * @param n length in bits
+ * @author BERO
+ */
+static inline int get_xbits(GetBitContext *s, int n){
+ register int sign;
+ register int32_t cache;
+ OPEN_READER(re, s)
+ UPDATE_CACHE(re, s)
+ cache = GET_CACHE(re,s);
+ sign=(~cache)>>31;
+ LAST_SKIP_BITS(re, s, n)
+ CLOSE_READER(re, s)
+ return (NEG_USR32(sign ^ cache, n) ^ sign) - sign;
+}
+
+static inline int get_sbits(GetBitContext *s, int n){
+ register int tmp;
+ OPEN_READER(re, s)
+ UPDATE_CACHE(re, s)
+ tmp= SHOW_SBITS(re, s, n);
+ LAST_SKIP_BITS(re, s, n)
+ CLOSE_READER(re, s)
+ return tmp;
+}
+
+/**
+ * reads 1-17 bits.
+ * Note, the alt bitstream reader can read up to 25 bits, but the libmpeg2 reader can't
+ */
+static inline unsigned int get_bits(GetBitContext *s, int n){
+ register int tmp;
+ OPEN_READER(re, s)
+ UPDATE_CACHE(re, s)
+ tmp= SHOW_UBITS(re, s, n);
+ LAST_SKIP_BITS(re, s, n)
+ CLOSE_READER(re, s)
+ return tmp;
+}
+
+/**
+ * shows 1-17 bits.
+ * Note, the alt bitstream reader can read up to 25 bits, but the libmpeg2 reader can't
+ */
+static inline unsigned int show_bits(GetBitContext *s, int n){
+ register int tmp;
+ OPEN_READER(re, s)
+ UPDATE_CACHE(re, s)
+ tmp= SHOW_UBITS(re, s, n);
+// CLOSE_READER(re, s)
+ return tmp;
+}
+
+static inline void skip_bits(GetBitContext *s, int n){
+ //Note gcc seems to optimize this to s->index+=n for the ALT_READER :))
+ OPEN_READER(re, s)
+ UPDATE_CACHE(re, s)
+ LAST_SKIP_BITS(re, s, n)
+ CLOSE_READER(re, s)
+}
+
+static inline unsigned int get_bits1(GetBitContext *s){
+#ifdef ALT_BITSTREAM_READER
+ unsigned int index= s->index;
+ uint8_t result= s->buffer[ index>>3 ];
+#ifdef ALT_BITSTREAM_READER_LE
+ result>>= (index&0x07);
+ result&= 1;
+#else
+ result<<= (index&0x07);
+ result>>= 8 - 1;
+#endif
+ index++;
+ s->index= index;
+
+ return result;
+#else
+ return get_bits(s, 1);
+#endif
+}
+
+static inline unsigned int show_bits1(GetBitContext *s){
+ return show_bits(s, 1);
+}
+
+static inline void skip_bits1(GetBitContext *s){
+ skip_bits(s, 1);
+}
+
+/**
+ * reads 0-32 bits.
+ */
+static inline unsigned int get_bits_long(GetBitContext *s, int n){
+ if(n<=17) return get_bits(s, n);
+ else{
+#ifdef ALT_BITSTREAM_READER_LE
+ int ret= get_bits(s, 16);
+ return ret | (get_bits(s, n-16) << 16);
+#else
+ int ret= get_bits(s, 16) << (n-16);
+ return ret | get_bits(s, n-16);
+#endif
+ }
+}
+
+/**
+ * reads 0-32 bits as a signed integer.
+ */
+static inline int get_sbits_long(GetBitContext *s, int n) {
+ return sign_extend(get_bits_long(s, n), n);
+}
+
+/**
+ * shows 0-32 bits.
+ */
+static inline unsigned int show_bits_long(GetBitContext *s, int n){
+ if(n<=17) return show_bits(s, n);
+ else{
+ GetBitContext gb= *s;
+ return get_bits_long(&gb, n);
+ }
+}
+
+static inline int check_marker(GetBitContext *s, const char *msg)
+{
+ int bit= get_bits1(s);
+ if(!bit)
+ av_log(NULL, AV_LOG_INFO, "Marker bit missing %s\n", msg);
+
+ return bit;
+}
+
+/**
+ * init GetBitContext.
+ * @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes larger then the actual read bits
+ * because some optimized bitstream readers read 32 or 64 bit at once and could read over the end
+ * @param bit_size the size of the buffer in bits
+ *
+ * While GetBitContext stores the buffer size, for performance reasons you are
+ * responsible for checking for the buffer end yourself (take advantage of the padding)!
+ */
+static inline void init_get_bits(GetBitContext *s,
+ const uint8_t *buffer, int bit_size)
+{
+ int buffer_size= (bit_size+7)>>3;
+ if(buffer_size < 0 || bit_size < 0) {
+ buffer_size = bit_size = 0;
+ buffer = NULL;
+ }
+
+ s->buffer= buffer;
+ s->size_in_bits= bit_size;
+ s->buffer_end= buffer + buffer_size;
+#ifdef ALT_BITSTREAM_READER
+ s->index=0;
+#elif defined LIBMPEG2_BITSTREAM_READER
+ s->buffer_ptr = (uint8_t*)((intptr_t)buffer&(~1));
+ s->bit_count = 16 + 8*((intptr_t)buffer&1);
+ skip_bits_long(s, 0);
+#elif defined A32_BITSTREAM_READER
+ s->buffer_ptr = (uint32_t*)((intptr_t)buffer&(~3));
+ s->bit_count = 32 + 8*((intptr_t)buffer&3);
+ skip_bits_long(s, 0);
+#endif
+}
+
+static inline void align_get_bits(GetBitContext *s)
+{
+ int n= (-get_bits_count(s)) & 7;
+ if(n) skip_bits(s, n);
+}
+
+#define init_vlc(vlc, nb_bits, nb_codes,\
+ bits, bits_wrap, bits_size,\
+ codes, codes_wrap, codes_size,\
+ flags)\
+ init_vlc_sparse(vlc, nb_bits, nb_codes,\
+ bits, bits_wrap, bits_size,\
+ codes, codes_wrap, codes_size,\
+ NULL, 0, 0, flags)
+
+int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
+ const void *bits, int bits_wrap, int bits_size,
+ const void *codes, int codes_wrap, int codes_size,
+ const void *symbols, int symbols_wrap, int symbols_size,
+ int flags);
+#define INIT_VLC_LE 2
+#define INIT_VLC_USE_NEW_STATIC 4
+void free_vlc(VLC *vlc);
+
+#define INIT_VLC_STATIC(vlc, bits, a,b,c,d,e,f,g, static_size)\
+{\
+ static VLC_TYPE table[static_size][2];\
+ (vlc)->table= table;\
+ (vlc)->table_allocated= static_size;\
+ init_vlc(vlc, bits, a,b,c,d,e,f,g, INIT_VLC_USE_NEW_STATIC);\
+}
+
+
+/**
+ *
+ * if the vlc code is invalid and max_depth=1 than no bits will be removed
+ * if the vlc code is invalid and max_depth>1 than the number of bits removed
+ * is undefined
+ */
+#define GET_VLC(code, name, gb, table, bits, max_depth)\
+{\
+ int n, nb_bits;\
+ unsigned int index;\
+\
+ index= SHOW_UBITS(name, gb, bits);\
+ code = table[index][0];\
+ n = table[index][1];\
+\
+ if(max_depth > 1 && n < 0){\
+ LAST_SKIP_BITS(name, gb, bits)\
+ UPDATE_CACHE(name, gb)\
+\
+ nb_bits = -n;\
+\
+ index= SHOW_UBITS(name, gb, nb_bits) + code;\
+ code = table[index][0];\
+ n = table[index][1];\
+ if(max_depth > 2 && n < 0){\
+ LAST_SKIP_BITS(name, gb, nb_bits)\
+ UPDATE_CACHE(name, gb)\
+\
+ nb_bits = -n;\
+\
+ index= SHOW_UBITS(name, gb, nb_bits) + code;\
+ code = table[index][0];\
+ n = table[index][1];\
+ }\
+ }\
+ SKIP_BITS(name, gb, n)\
+}
+
+#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)\
+{\
+ int n, nb_bits;\
+ unsigned int index;\
+\
+ index= SHOW_UBITS(name, gb, bits);\
+ level = table[index].level;\
+ n = table[index].len;\
+\
+ if(max_depth > 1 && n < 0){\
+ SKIP_BITS(name, gb, bits)\
+ if(need_update){\
+ UPDATE_CACHE(name, gb)\
+ }\
+\
+ nb_bits = -n;\
+\
+ index= SHOW_UBITS(name, gb, nb_bits) + level;\
+ level = table[index].level;\
+ n = table[index].len;\
+ }\
+ run= table[index].run;\
+ SKIP_BITS(name, gb, n)\
+}
+
+
+/**
+ * parses a vlc code, faster then get_vlc()
+ * @param bits is the number of bits which will be read at once, must be
+ * identical to nb_bits in init_vlc()
+ * @param max_depth is the number of times bits bits must be read to completely
+ * read the longest vlc code
+ * = (max_vlc_length + bits - 1) / bits
+ */
+static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE (*table)[2],
+ int bits, int max_depth)
+{
+ int code;
+
+ OPEN_READER(re, s)
+ UPDATE_CACHE(re, s)
+
+ GET_VLC(code, re, s, table, bits, max_depth)
+
+ CLOSE_READER(re, s)
+ return code;
+}
+
+//#define TRACE
+
+#ifdef TRACE
+static inline void print_bin(int bits, int n){
+ int i;
+
+ for(i=n-1; i>=0; i--){
+ av_log(NULL, AV_LOG_DEBUG, "%d", (bits>>i)&1);
+ }
+ for(i=n; i<24; i++)
+ av_log(NULL, AV_LOG_DEBUG, " ");
+}
+
+static inline int get_bits_trace(GetBitContext *s, int n, char *file, const char *func, int line){
+ int r= get_bits(s, n);
+
+ print_bin(r, n);
+ av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d bit @%5d in %s %s:%d\n", r, n, r, get_bits_count(s)-n, file, func, line);
+ return r;
+}
+static inline int get_vlc_trace(GetBitContext *s, VLC_TYPE (*table)[2], int bits, int max_depth, char *file, const char *func, int line){
+ int show= show_bits(s, 24);
+ int pos= get_bits_count(s);
+ int r= get_vlc2(s, table, bits, max_depth);
+ int len= get_bits_count(s) - pos;
+ int bits2= show>>(24-len);
+
+ print_bin(bits2, len);
+
+ av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d vlc @%5d in %s %s:%d\n", bits2, len, r, pos, file, func, line);
+ return r;
+}
+static inline int get_xbits_trace(GetBitContext *s, int n, char *file, const char *func, int line){
+ int show= show_bits(s, n);
+ int r= get_xbits(s, n);
+
+ print_bin(show, n);
+ av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d xbt @%5d in %s %s:%d\n", show, n, r, get_bits_count(s)-n, file, func, line);
+ return r;
+}
+
+#define get_bits(s, n) get_bits_trace(s, n, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_bits1(s) get_bits_trace(s, 1, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_xbits(s, n) get_xbits_trace(s, n, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_vlc(s, vlc) get_vlc_trace(s, (vlc)->table, (vlc)->bits, 3, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_vlc2(s, tab, bits, max) get_vlc_trace(s, tab, bits, max, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+
+#define tprintf(p, ...) av_log(p, AV_LOG_DEBUG, __VA_ARGS__)
+
+#else //TRACE
+ #ifdef __GNUC__
+ #define tprintf(p, ...) {}
+ #else
+ #define tprintf(p) {}
+ #endif
+#endif
+
+static inline int decode012(GetBitContext *gb){
+ int n;
+ n = get_bits1(gb);
+ if (n == 0)
+ return 0;
+ else
+ return get_bits1(gb) + 1;
+}
+
+static inline int decode210(GetBitContext *gb){
+ if (get_bits1(gb))
+ return 0;
+ else
+ return 2 - get_bits1(gb);
+}
+
+static inline int get_bits_left(GetBitContext *gb)
+{
+ return gb->size_in_bits - get_bits_count(gb);
+}
+
+#endif /* AVCODEC_GET_BITS_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/golomb.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/golomb.c
new file mode 100644
index 000000000..a33b94491
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/golomb.c
@@ -0,0 +1,173 @@
+/*
+ * exp golomb vlc stuff
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file golomb.c
+ * @brief
+ * exp golomb vlc stuff
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "libavutil/common.h"
+
+const uint8_t ff_golomb_vlc_len[512]={
+14,13,12,12,11,11,11,11,10,10,10,10,10,10,10,10,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1
+};
+
+const uint8_t ff_ue_golomb_vlc_code[512]={
+31,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,
+ 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9,10,10,10,10,11,11,11,11,12,12,12,12,13,13,13,13,14,14,14,14,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+const int8_t ff_se_golomb_vlc_code[512]={
+ 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 8, -8, 9, -9, 10,-10, 11,-11, 12,-12, 13,-13, 14,-14, 15,-15,
+ 4, 4, 4, 4, -4, -4, -4, -4, 5, 5, 5, 5, -5, -5, -5, -5, 6, 6, 6, 6, -6, -6, -6, -6, 7, 7, 7, 7, -7, -7, -7, -7,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+
+const uint8_t ff_ue_golomb_len[256]={
+ 1, 3, 3, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,11,
+11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,13,
+13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,
+13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,15,
+15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
+15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
+15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
+15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,17,
+};
+
+const uint8_t ff_interleaved_golomb_vlc_len[256]={
+9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+};
+
+const uint8_t ff_interleaved_ue_golomb_vlc_code[256]={
+ 15,16,7, 7, 17,18,8, 8, 3, 3, 3, 3, 3, 3, 3, 3,
+ 19,20,9, 9, 21,22,10,10,4, 4, 4, 4, 4, 4, 4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 23,24,11,11,25,26,12,12,5, 5, 5, 5, 5, 5, 5, 5,
+ 27,28,13,13,29,30,14,14,6, 6, 6, 6, 6, 6, 6, 6,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+const int8_t ff_interleaved_se_golomb_vlc_code[256]={
+ 8, -8, 4, 4, 9, -9, -4, -4, 2, 2, 2, 2, 2, 2, 2, 2,
+ 10,-10, 5, 5, 11,-11, -5, -5, -2, -2, -2, -2, -2, -2, -2, -2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 12,-12, 6, 6, 13,-13, -6, -6, 3, 3, 3, 3, 3, 3, 3, 3,
+ 14,-14, 7, 7, 15,-15, -7, -7, -3, -3, -3, -3, -3, -3, -3, -3,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+const uint8_t ff_interleaved_dirac_golomb_vlc_code[256]={
+0, 1, 0, 0, 2, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+4, 5, 2, 2, 6, 7, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+8, 9, 4, 4, 10,11,5, 5, 2, 2, 2, 2, 2, 2, 2, 2,
+12,13,6, 6, 14,15,7, 7, 3, 3, 3, 3, 3, 3, 3, 3,
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/golomb.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/golomb.h
new file mode 100644
index 000000000..3021a5600
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/golomb.h
@@ -0,0 +1,541 @@
+/*
+ * exp golomb vlc stuff
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2004 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/golomb.h
+ * @brief
+ * exp golomb vlc stuff
+ * @author Michael Niedermayer <michaelni@gmx.at> and Alex Beregszaszi
+ */
+
+#ifndef AVCODEC_GOLOMB_H
+#define AVCODEC_GOLOMB_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include "get_bits.h"
+#include "put_bits.h"
+
+#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
+
+#define INVALID_VLC 0x80000000
+
+extern const uint8_t ff_golomb_vlc_len[512];
+extern const uint8_t ff_ue_golomb_vlc_code[512];
+extern const int8_t ff_se_golomb_vlc_code[512];
+extern const uint8_t ff_ue_golomb_len[256];
+
+extern const uint8_t ff_interleaved_golomb_vlc_len[256];
+extern const uint8_t ff_interleaved_ue_golomb_vlc_code[256];
+extern const int8_t ff_interleaved_se_golomb_vlc_code[256];
+extern const uint8_t ff_interleaved_dirac_golomb_vlc_code[256];
+
+
+ /**
+ * read unsigned exp golomb code.
+ */
+static inline int get_ue_golomb(GetBitContext *gb){
+ unsigned int buf;
+ int log;
+
+ OPEN_READER(re, gb);
+ /* ffdshow custom code */
+ #if defined(__INTEL_COMPILER) || defined(DEBUG)
+ #ifdef ALT_BITSTREAM_READER_LE
+ re_cache= AV_RL32( ((const uint8_t *)(gb)->buffer)+(re_index>>3) ) >> (re_index&0x07);
+ #else
+ re_cache= AV_RB32( ((const uint8_t *)(gb)->buffer)+(re_index>>3) ) >> (re_index&0x07);
+ #endif
+ #else
+ // ICL9.1-Release and MSVC8-DEBUG build can't process this macro properly.
+ UPDATE_CACHE(re, gb);
+ #endif
+ buf=GET_CACHE(re, gb);
+
+ if(buf >= (1<<27)){
+ buf >>= 32 - 9;
+ LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_ue_golomb_vlc_code[buf];
+ }else{
+ log= 2*av_log2(buf) - 31;
+ buf>>= log;
+ buf--;
+ LAST_SKIP_BITS(re, gb, 32 - log);
+ CLOSE_READER(re, gb);
+
+ return buf;
+ }
+}
+
+ /**
+ * read unsigned exp golomb code, constraint to a max of 31.
+ * the return value is undefined if the stored value exceeds 31.
+ */
+static inline int get_ue_golomb_31(GetBitContext *gb){
+ unsigned int buf;
+
+ OPEN_READER(re, gb);
+ /* ffdshow custom code */
+ #if defined(__INTEL_COMPILER) || defined(DEBUG)
+ #ifdef ALT_BITSTREAM_READER_LE
+ re_cache= AV_RL32( ((const uint8_t *)(gb)->buffer)+(re_index>>3) ) >> (re_index&0x07);
+ #else
+ re_cache= AV_RB32( ((const uint8_t *)(gb)->buffer)+(re_index>>3) ) >> (re_index&0x07);
+ #endif
+ #else
+ // ICL9.1-Release and MSVC8-DEBUG build can't process this macro properly.
+ UPDATE_CACHE(re, gb);
+ #endif
+ buf=GET_CACHE(re, gb);
+
+ buf >>= 32 - 9;
+ LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_ue_golomb_vlc_code[buf];
+}
+
+static inline int svq3_get_ue_golomb(GetBitContext *gb){
+ uint32_t buf;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf=GET_CACHE(re, gb);
+
+ if(buf&0xAA800000){
+ buf >>= 32 - 8;
+ LAST_SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_interleaved_ue_golomb_vlc_code[buf];
+ }else{
+ int ret = 1;
+
+ while (1) {
+ buf >>= 32 - 8;
+ LAST_SKIP_BITS(re, gb, FFMIN(ff_interleaved_golomb_vlc_len[buf], 8));
+
+ if (ff_interleaved_golomb_vlc_len[buf] != 9){
+ ret <<= (ff_interleaved_golomb_vlc_len[buf] - 1) >> 1;
+ ret |= ff_interleaved_dirac_golomb_vlc_code[buf];
+ break;
+ }
+ ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf];
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+ }
+
+ CLOSE_READER(re, gb);
+ return ret - 1;
+ }
+}
+
+/**
+ * read unsigned truncated exp golomb code.
+ */
+static inline int get_te0_golomb(GetBitContext *gb, int range){
+ assert(range >= 1);
+
+ if(range==1) return 0;
+ else if(range==2) return get_bits1(gb)^1;
+ else return get_ue_golomb(gb);
+}
+
+/**
+ * read unsigned truncated exp golomb code.
+ */
+static inline int get_te_golomb(GetBitContext *gb, int range){
+ assert(range >= 1);
+
+ if(range==2) return get_bits1(gb)^1;
+ else return get_ue_golomb(gb);
+}
+
+
+/**
+ * read signed exp golomb code.
+ */
+static inline int get_se_golomb(GetBitContext *gb){
+ unsigned int buf;
+ int log;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf=GET_CACHE(re, gb);
+
+ if(buf >= (1<<27)){
+ buf >>= 32 - 9;
+ LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_se_golomb_vlc_code[buf];
+ }else{
+ log= 2*av_log2(buf) - 31;
+ buf>>= log;
+
+ LAST_SKIP_BITS(re, gb, 32 - log);
+ CLOSE_READER(re, gb);
+
+ if(buf&1) buf= -(buf>>1);
+ else buf= (buf>>1);
+
+ return buf;
+ }
+}
+
+static inline int svq3_get_se_golomb(GetBitContext *gb){
+ unsigned int buf;
+ int log;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf=GET_CACHE(re, gb);
+
+ if(buf&0xAA800000){
+ buf >>= 32 - 8;
+ LAST_SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_interleaved_se_golomb_vlc_code[buf];
+ }else{
+ LAST_SKIP_BITS(re, gb, 8);
+ UPDATE_CACHE(re, gb);
+ buf |= 1 | (GET_CACHE(re, gb) >> 8);
+
+ if((buf & 0xAAAAAAAA) == 0)
+ return INVALID_VLC;
+
+ for(log=31; (buf & 0x80000000) == 0; log--){
+ buf = (buf << 2) - ((buf << log) >> (log - 1)) + (buf >> 30);
+ }
+
+ LAST_SKIP_BITS(re, gb, 63 - 2*log - 8);
+ CLOSE_READER(re, gb);
+
+ return (signed) (((((buf << log) >> log) - 1) ^ -(buf & 0x1)) + 1) >> 1;
+ }
+}
+
+/**
+ * read unsigned golomb rice code (ffv1).
+ */
+static inline int get_ur_golomb(GetBitContext *gb, int k, int limit, int esc_len){
+ unsigned int buf;
+ int log;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf=GET_CACHE(re, gb);
+
+ log= av_log2(buf);
+
+ if(log > 31-limit){
+ buf >>= log - k;
+ buf += (30-log)<<k;
+ LAST_SKIP_BITS(re, gb, 32 + k - log);
+ CLOSE_READER(re, gb);
+
+ return buf;
+ }else{
+ LAST_SKIP_BITS(re, gb, limit);
+ UPDATE_CACHE(re, gb);
+
+ buf = SHOW_UBITS(re, gb, esc_len);
+
+ LAST_SKIP_BITS(re, gb, esc_len);
+ CLOSE_READER(re, gb);
+
+ return buf + limit - 1;
+ }
+}
+
+/**
+ * read unsigned golomb rice code (jpegls).
+ */
+static inline int get_ur_golomb_jpegls(GetBitContext *gb, int k, int limit, int esc_len){
+ unsigned int buf;
+ int log;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf=GET_CACHE(re, gb);
+
+ log= av_log2(buf);
+
+ if(log - k >= 32-MIN_CACHE_BITS+(MIN_CACHE_BITS==32) && 32-log < limit){
+ buf >>= log - k;
+ buf += (30-log)<<k;
+ LAST_SKIP_BITS(re, gb, 32 + k - log);
+ CLOSE_READER(re, gb);
+
+ return buf;
+ }else{
+ int i;
+ for(i=0; SHOW_UBITS(re, gb, 1) == 0; i++){
+ LAST_SKIP_BITS(re, gb, 1);
+ UPDATE_CACHE(re, gb);
+ }
+ SKIP_BITS(re, gb, 1);
+
+ if(i < limit - 1){
+ if(k){
+ buf = SHOW_UBITS(re, gb, k);
+ LAST_SKIP_BITS(re, gb, k);
+ }else{
+ buf=0;
+ }
+
+ CLOSE_READER(re, gb);
+ return buf + (i<<k);
+ }else if(i == limit - 1){
+ buf = SHOW_UBITS(re, gb, esc_len);
+ LAST_SKIP_BITS(re, gb, esc_len);
+ CLOSE_READER(re, gb);
+
+ return buf + 1;
+ }else
+ return -1;
+ }
+}
+
+/**
+ * read signed golomb rice code (ffv1).
+ */
+static inline int get_sr_golomb(GetBitContext *gb, int k, int limit, int esc_len){
+ int v= get_ur_golomb(gb, k, limit, esc_len);
+
+ v++;
+ if (v&1) return v>>1;
+ else return -(v>>1);
+
+// return (v>>1) ^ -(v&1);
+}
+
+/**
+ * read signed golomb rice code (flac).
+ */
+static inline int get_sr_golomb_flac(GetBitContext *gb, int k, int limit, int esc_len){
+ int v= get_ur_golomb_jpegls(gb, k, limit, esc_len);
+ return (v>>1) ^ -(v&1);
+}
+
+/**
+ * read unsigned golomb rice code (shorten).
+ */
+static inline unsigned int get_ur_golomb_shorten(GetBitContext *gb, int k){
+ return get_ur_golomb_jpegls(gb, k, INT_MAX, 0);
+}
+
+/**
+ * read signed golomb rice code (shorten).
+ */
+static inline int get_sr_golomb_shorten(GetBitContext* gb, int k)
+{
+ int uvar = get_ur_golomb_jpegls(gb, k + 1, INT_MAX, 0);
+ if (uvar & 1)
+ return ~(uvar >> 1);
+ else
+ return uvar >> 1;
+}
+
+
+
+#ifdef TRACE
+
+static inline int get_ue(GetBitContext *s, char *file, const char *func, int line){
+ int show= show_bits(s, 24);
+ int pos= get_bits_count(s);
+ int i= get_ue_golomb(s);
+ int len= get_bits_count(s) - pos;
+ int bits= show>>(24-len);
+
+ print_bin(bits, len);
+
+ av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d ue @%5d in %s %s:%d\n", bits, len, i, pos, file, func, line);
+
+ return i;
+}
+
+static inline int get_se(GetBitContext *s, char *file, const char *func, int line){
+ int show= show_bits(s, 24);
+ int pos= get_bits_count(s);
+ int i= get_se_golomb(s);
+ int len= get_bits_count(s) - pos;
+ int bits= show>>(24-len);
+
+ print_bin(bits, len);
+
+ av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d se @%5d in %s %s:%d\n", bits, len, i, pos, file, func, line);
+
+ return i;
+}
+
+static inline int get_te(GetBitContext *s, int r, char *file, const char *func, int line){
+ int show= show_bits(s, 24);
+ int pos= get_bits_count(s);
+ int i= get_te0_golomb(s, r);
+ int len= get_bits_count(s) - pos;
+ int bits= show>>(24-len);
+
+ print_bin(bits, len);
+
+ av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d te @%5d in %s %s:%d\n", bits, len, i, pos, file, func, line);
+
+ return i;
+}
+
+#define get_ue_golomb(a) get_ue(a, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_se_golomb(a) get_se(a, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_te_golomb(a, r) get_te(a, r, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_te0_golomb(a, r) get_te(a, r, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+
+#endif
+
+/**
+ * write unsigned exp golomb code.
+ */
+static inline void set_ue_golomb(PutBitContext *pb, int i){
+ int e;
+
+ assert(i>=0);
+
+#if 0
+ if(i=0){
+ put_bits(pb, 1, 1);
+ return;
+ }
+#endif
+ if(i<256)
+ put_bits(pb, ff_ue_golomb_len[i], i+1);
+ else{
+ e= av_log2(i+1);
+
+ put_bits(pb, 2*e+1, i+1);
+ }
+}
+
+/**
+ * write truncated unsigned exp golomb code.
+ */
+static inline void set_te_golomb(PutBitContext *pb, int i, int range){
+ assert(range >= 1);
+ assert(i<=range);
+
+ if(range==2) put_bits(pb, 1, i^1);
+ else set_ue_golomb(pb, i);
+}
+
+/**
+ * write signed exp golomb code. 16 bits at most.
+ */
+static inline void set_se_golomb(PutBitContext *pb, int i){
+// if (i>32767 || i<-32767)
+// av_log(NULL,AV_LOG_ERROR,"value out of range %d\n", i);
+#if 0
+ if(i<=0) i= -2*i;
+ else i= 2*i-1;
+#elif 1
+ i= 2*i-1;
+ if(i<0) i^= -1; //FIXME check if gcc does the right thing
+#else
+ i= 2*i-1;
+ i^= (i>>31);
+#endif
+ set_ue_golomb(pb, i);
+}
+
+/**
+ * write unsigned golomb rice code (ffv1).
+ */
+static inline void set_ur_golomb(PutBitContext *pb, int i, int k, int limit, int esc_len){
+ int e;
+
+ assert(i>=0);
+
+ e= i>>k;
+ if(e<limit){
+ put_bits(pb, e + k + 1, (1<<k) + (i&((1<<k)-1)));
+ }else{
+ put_bits(pb, limit + esc_len, i - limit + 1);
+ }
+}
+
+/**
+ * write unsigned golomb rice code (jpegls).
+ */
+static inline void set_ur_golomb_jpegls(PutBitContext *pb, int i, int k, int limit, int esc_len){
+ int e;
+
+ assert(i>=0);
+
+ e= (i>>k) + 1;
+ if(e<limit){
+ while(e > 31) {
+ put_bits(pb, 31, 0);
+ e -= 31;
+ }
+ put_bits(pb, e, 1);
+ if(k)
+ put_sbits(pb, k, i);
+ }else{
+ while(limit > 31) {
+ put_bits(pb, 31, 0);
+ limit -= 31;
+ }
+ put_bits(pb, limit , 1);
+ put_bits(pb, esc_len, i - 1);
+ }
+}
+
+/**
+ * write signed golomb rice code (ffv1).
+ */
+static inline void set_sr_golomb(PutBitContext *pb, int i, int k, int limit, int esc_len){
+ int v;
+
+ v = -2*i-1;
+ v ^= (v>>31);
+
+ set_ur_golomb(pb, v, k, limit, esc_len);
+}
+
+/**
+ * write signed golomb rice code (flac).
+ */
+static inline void set_sr_golomb_flac(PutBitContext *pb, int i, int k, int limit, int esc_len){
+ int v;
+
+ v = -2*i-1;
+ v ^= (v>>31);
+
+ set_ur_golomb_jpegls(pb, v, k, limit, esc_len);
+}
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AVCODEC_GOLOMB_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261.c
new file mode 100644
index 000000000..00ebf8f49
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261.c
@@ -0,0 +1,54 @@
+/*
+ * H261 common code
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2004 Maarten Daniels
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file h261.c
+ * h261codec.
+ */
+
+#include "dsputil.h"
+#include "avcodec.h"
+#include "h261.h"
+
+#define IS_FIL(a) ((a)&MB_TYPE_H261_FIL)
+
+uint8_t ff_h261_rl_table_store[2][2*MAX_RUN + MAX_LEVEL + 3];
+
+void ff_h261_loop_filter(MpegEncContext *s){
+ H261Context * h= (H261Context*)s;
+ const int linesize = s->linesize;
+ const int uvlinesize= s->uvlinesize;
+ uint8_t *dest_y = s->dest[0];
+ uint8_t *dest_cb= s->dest[1];
+ uint8_t *dest_cr= s->dest[2];
+
+ if(!(IS_FIL (h->mtype)))
+ return;
+
+ s->dsp.h261_loop_filter(dest_y , linesize);
+ s->dsp.h261_loop_filter(dest_y + 8, linesize);
+ s->dsp.h261_loop_filter(dest_y + 8 * linesize , linesize);
+ s->dsp.h261_loop_filter(dest_y + 8 * linesize + 8, linesize);
+ s->dsp.h261_loop_filter(dest_cb, uvlinesize);
+ s->dsp.h261_loop_filter(dest_cr, uvlinesize);
+}
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261.h
new file mode 100644
index 000000000..b5eb539e7
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261.h
@@ -0,0 +1,51 @@
+/*
+ * H261 decoder
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2004 Maarten Daniels
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file h261.c
+ * h261codec.
+ */
+
+#ifndef AVCODEC_H261_H
+#define AVCODEC_H261_H
+
+#include "mpegvideo.h"
+
+/**
+ * H261Context
+ */
+typedef struct H261Context{
+ MpegEncContext s;
+
+ int current_mba;
+ int previous_mba;
+ int mba_diff;
+ int mtype;
+ int current_mv_x;
+ int current_mv_y;
+ int gob_number;
+ int gob_start_code_skipped; // 1 if gob start code is already read before gob header is read
+}H261Context;
+
+#define MB_TYPE_H261_FIL 0x800000
+
+#endif /* AVCODEC_H261_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261_parser.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261_parser.c
new file mode 100644
index 000000000..8353d7a9f
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261_parser.c
@@ -0,0 +1,90 @@
+/*
+ * H261 parser
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2004 Maarten Daniels
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file h261_parser.c
+ * h261codec.
+ */
+
+#include "parser.h"
+
+
+static int h261_find_frame_end(ParseContext *pc, AVCodecContext* avctx, const uint8_t *buf, int buf_size){
+ int vop_found, i, j;
+ uint32_t state;
+
+ vop_found= pc->frame_start_found;
+ state= pc->state;
+
+ for(i=0; i<buf_size && !vop_found; i++){
+ state= (state<<8) | buf[i];
+ for(j=0; j<8; j++){
+ if(((state>>j)&0xFFFFF0) == 0x000100){
+ vop_found=1;
+ break;
+ }
+ }
+ }
+ if(vop_found){
+ for(; i<buf_size; i++){
+ state= (state<<8) | buf[i];
+ for(j=0; j<8; j++){
+ if(((state>>j)&0xFFFFF0) == 0x000100){
+ pc->frame_start_found=0;
+ pc->state= (state>>(3*8))+0xFF00;
+ return i-2;
+ }
+ }
+ }
+ }
+
+ pc->frame_start_found= vop_found;
+ pc->state= state;
+ return END_NOT_FOUND;
+}
+
+static int h261_parse(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size)
+{
+ ParseContext *pc = s->priv_data;
+ int next;
+
+ next= h261_find_frame_end(pc,avctx, buf, buf_size);
+ if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
+ *poutbuf = NULL;
+ *poutbuf_size = 0;
+ return buf_size;
+ }
+ *poutbuf = buf;
+ *poutbuf_size = buf_size;
+ return next;
+}
+
+AVCodecParser h261_parser = {
+ { CODEC_ID_H261 },
+ sizeof(ParseContext),
+ NULL,
+ h261_parse,
+ ff_parse_close,
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261data.h
new file mode 100644
index 000000000..54d74b82d
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261data.h
@@ -0,0 +1,164 @@
+/*
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * copyright (c) 2004 Maarten Daniels
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file h261data.h
+ * H.261 tables.
+ */
+
+#ifndef AVCODEC_H261DATA_H
+#define AVCODEC_H261DATA_H
+
+#include <stdint.h>
+#include "h261.h"
+
+// H.261 VLC table for macroblock addressing
+static const uint8_t h261_mba_code[35] = {
+ 1, 3, 2, 3,
+ 2, 3, 2, 7,
+ 6, 11, 10, 9,
+ 8, 7, 6, 23,
+ 22, 21, 20, 19,
+ 18, 35, 34, 33,
+ 32, 31, 30, 29,
+ 28, 27, 26, 25,
+ 24,
+ 15, //(MBA stuffing)
+ 1 //(start code)
+};
+
+static const uint8_t h261_mba_bits[35] = {
+ 1, 3, 3, 4,
+ 4, 5, 5, 7,
+ 7, 8, 8, 8,
+ 8, 8, 8, 10,
+ 10, 10, 10, 10,
+ 10, 11, 11, 11,
+ 11, 11, 11, 11,
+ 11, 11, 11, 11,
+ 11,
+ 11, //(MBA stuffing)
+ 16 //(start code)
+};
+
+//H.261 VLC table for macroblock type
+static const uint8_t h261_mtype_code[10] = {
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 1, 1
+};
+
+static const uint8_t h261_mtype_bits[10] = {
+ 4, 7, 1, 5,
+ 9, 8, 10, 3,
+ 2, 6
+};
+
+static const int h261_mtype_map[10]= {
+ MB_TYPE_INTRA4x4,
+ MB_TYPE_INTRA4x4 | MB_TYPE_QUANT,
+ MB_TYPE_CBP,
+ MB_TYPE_QUANT | MB_TYPE_CBP,
+ MB_TYPE_16x16,
+ MB_TYPE_CBP | MB_TYPE_16x16,
+ MB_TYPE_QUANT | MB_TYPE_CBP | MB_TYPE_16x16,
+ MB_TYPE_16x16 | MB_TYPE_H261_FIL,
+ MB_TYPE_CBP | MB_TYPE_16x16 | MB_TYPE_H261_FIL,
+ MB_TYPE_QUANT | MB_TYPE_CBP | MB_TYPE_16x16 | MB_TYPE_H261_FIL
+};
+
+//H.261 VLC table for motion vectors
+static const uint8_t h261_mv_tab[17][2] = {
+ {1,1}, {1,2}, {1,3}, {1,4}, {3,6}, {5,7}, {4,7}, {3,7},
+ {11,9}, {10,9}, {9,9}, {17,10}, {16,10}, {15,10}, {14,10}, {13,10}, {12,10}
+};
+
+static const int mvmap[17] =
+{
+ 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16
+};
+
+//H.261 VLC table for coded block pattern
+static const uint8_t h261_cbp_tab[63][2] =
+{
+ {11,5}, {9,5}, {13,6}, {13,4}, {23,7}, {19,7}, {31,8}, {12,4},
+ {22,7}, {18,7}, {30,8}, {19,5}, {27,8}, {23,8}, {19,8}, {11,4},
+ {21,7}, {17,7}, {29,8}, {17,5}, {25,8}, {21,8}, {17,8}, {15,6},
+ {15,8}, {13,8}, {3,9}, {15,5}, {11,8}, {7,8}, {7,9}, {10,4},
+ {20,7}, {16,7}, {28,8}, {14,6}, {14,8}, {12,8}, {2,9}, {16,5},
+ {24,8}, {20,8}, {16,8}, {14,5}, {10,8}, {6,8}, {6,9}, {18,5},
+ {26,8}, {22,8}, {18,8}, {13,5}, {9,8}, {5,8}, {5,9}, {12,5},
+ {8,8}, {4,8}, {4,9}, {7,3}, {10,5}, {8,5}, {12,6}
+};
+
+//H.261 VLC table for transform coefficients
+static const uint16_t h261_tcoeff_vlc[65][2] = {
+{ 0x2, 2 }, { 0x3, 2 },{ 0x4, 4 },{ 0x5, 5 },
+{ 0x6, 7 },{ 0x26, 8 },{ 0x21, 8 },{ 0xa, 10 },
+{ 0x1d, 12 },{ 0x18, 12 },{ 0x13, 12 },{ 0x10 , 12 },
+{ 0x1a, 13},{ 0x19, 13 }, { 0x18, 13 }, { 0x17, 13 },
+{ 0x3, 3 }, { 0x6, 6 }, { 0x25 , 8 }, { 0xc, 10 },
+{ 0x1b, 12 }, { 0x16, 13 }, { 0x15, 13 }, { 0x5, 4},
+{ 0x4, 7}, { 0xb, 10 }, { 0x14, 12 }, { 0x14, 13 },
+{ 0x7, 5 }, { 0x24, 8 }, { 0x1c, 12 }, { 0x13, 13 },
+{ 0x6, 5 }, { 0xf, 10 }, { 0x12, 12}, { 0x7, 6},
+{ 0x9 , 10 }, { 0x12, 13 }, { 0x5, 6 }, { 0x1e, 12 },
+{ 0x4, 6 }, { 0x15, 12 }, { 0x7, 7 }, { 0x11, 12},
+{ 0x5, 7 }, { 0x11, 13 }, { 0x27, 8 }, { 0x10, 13 },
+{ 0x23, 8 }, { 0x22, 8 }, { 0x20, 8 }, { 0xe , 10 },
+{ 0xd, 10 }, { 0x8, 10 },{ 0x1f, 12 }, { 0x1a, 12 },
+{ 0x19, 12 }, { 0x17, 12 }, { 0x16, 12}, { 0x1f, 13},
+{ 0x1e, 13 }, { 0x1d, 13 }, { 0x1c, 13}, { 0x1b, 13},
+{ 0x1, 6 } //escape
+};
+
+static const int8_t h261_tcoeff_level[64] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 1, 2, 3, 4, 5, 6, 7, 1,
+ 2, 3, 4, 5, 1, 2, 3, 4,
+ 1, 2, 3, 1, 2, 3, 1, 2,
+ 1, 2, 1, 2, 1, 2, 1, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1
+};
+
+static const int8_t h261_tcoeff_run[64] = {
+ 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 4,
+ 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 8, 8, 9, 9, 10, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26
+};
+
+static RLTable h261_rl_tcoeff = {
+ 64,
+ 64,
+ h261_tcoeff_vlc,
+ h261_tcoeff_run,
+ h261_tcoeff_level,
+};
+
+#endif /* AVCODEC_H261DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261dec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261dec.c
new file mode 100644
index 000000000..3287f5e42
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h261dec.c
@@ -0,0 +1,656 @@
+/*
+ * H261 decoder
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2004 Maarten Daniels
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h261dec.c
+ * H.261 decoder.
+ */
+
+#include "dsputil.h"
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "h261.h"
+#include "h261data.h"
+
+#define H261_MBA_VLC_BITS 9
+#define H261_MTYPE_VLC_BITS 6
+#define H261_MV_VLC_BITS 7
+#define H261_CBP_VLC_BITS 9
+#define TCOEFF_VLC_BITS 9
+#define MBA_STUFFING 33
+#define MBA_STARTCODE 34
+
+extern uint8_t ff_h261_rl_table_store[2][2*MAX_RUN + MAX_LEVEL + 3];
+
+static VLC h261_mba_vlc;
+static VLC h261_mtype_vlc;
+static VLC h261_mv_vlc;
+static VLC h261_cbp_vlc;
+
+static int h261_decode_block(H261Context * h, DCTELEM * block, int n, int coded);
+
+static av_cold void h261_decode_init_vlc(H261Context *h){
+ static int done = 0;
+
+ if(!done){
+ done = 1;
+ INIT_VLC_STATIC(&h261_mba_vlc, H261_MBA_VLC_BITS, 35,
+ h261_mba_bits, 1, 1,
+ h261_mba_code, 1, 1, 662);
+ INIT_VLC_STATIC(&h261_mtype_vlc, H261_MTYPE_VLC_BITS, 10,
+ h261_mtype_bits, 1, 1,
+ h261_mtype_code, 1, 1, 80);
+ INIT_VLC_STATIC(&h261_mv_vlc, H261_MV_VLC_BITS, 17,
+ &h261_mv_tab[0][1], 2, 1,
+ &h261_mv_tab[0][0], 2, 1, 144);
+ INIT_VLC_STATIC(&h261_cbp_vlc, H261_CBP_VLC_BITS, 63,
+ &h261_cbp_tab[0][1], 2, 1,
+ &h261_cbp_tab[0][0], 2, 1, 512);
+ init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
+ INIT_VLC_RL(h261_rl_tcoeff, 552);
+ }
+}
+
+static av_cold int h261_decode_init(AVCodecContext *avctx){
+ H261Context *h= avctx->priv_data;
+ MpegEncContext * const s = &h->s;
+
+ // set defaults
+ MPV_decode_defaults(s);
+ s->avctx = avctx;
+
+ s->width = s->avctx->coded_width;
+ s->height = s->avctx->coded_height;
+ s->codec_id = s->avctx->codec->id;
+
+ s->out_format = FMT_H261;
+ s->low_delay= 1;
+ avctx->pix_fmt= PIX_FMT_YUV420P;
+
+ s->codec_id= avctx->codec->id;
+
+ h261_decode_init_vlc(h);
+
+ h->gob_start_code_skipped = 0;
+
+ return 0;
+}
+
+/**
+ * decodes the group of blocks header or slice header.
+ * @return <0 if an error occurred
+ */
+static int h261_decode_gob_header(H261Context *h){
+ unsigned int val;
+ MpegEncContext * const s = &h->s;
+
+ if ( !h->gob_start_code_skipped ){
+ /* Check for GOB Start Code */
+ val = show_bits(&s->gb, 15);
+ if(val)
+ return -1;
+
+ /* We have a GBSC */
+ skip_bits(&s->gb, 16);
+ }
+
+ h->gob_start_code_skipped = 0;
+
+ h->gob_number = get_bits(&s->gb, 4); /* GN */
+ s->qscale = get_bits(&s->gb, 5); /* GQUANT */
+
+ /* Check if gob_number is valid */
+ if (s->mb_height==18){ //cif
+ if ((h->gob_number<=0) || (h->gob_number>12))
+ return -1;
+ }
+ else{ //qcif
+ if ((h->gob_number!=1) && (h->gob_number!=3) && (h->gob_number!=5))
+ return -1;
+ }
+
+ /* GEI */
+ while (get_bits1(&s->gb) != 0) {
+ skip_bits(&s->gb, 8);
+ }
+
+ if(s->qscale==0) {
+ av_log(s->avctx, AV_LOG_ERROR, "qscale has forbidden 0 value\n");
+ if (s->avctx->error_recognition >= FF_ER_COMPLIANT)
+ return -1;
+ }
+
+ // For the first transmitted macroblock in a GOB, MBA is the absolute address. For
+ // subsequent macroblocks, MBA is the difference between the absolute addresses of
+ // the macroblock and the last transmitted macroblock.
+ h->current_mba = 0;
+ h->mba_diff = 0;
+
+ return 0;
+}
+
+/**
+ * decodes the group of blocks / video packet header.
+ * @return <0 if no resync found
+ */
+static int ff_h261_resync(H261Context *h){
+ MpegEncContext * const s = &h->s;
+ int left, ret;
+
+ if ( h->gob_start_code_skipped ){
+ ret= h261_decode_gob_header(h);
+ if(ret>=0)
+ return 0;
+ }
+ else{
+ if(show_bits(&s->gb, 15)==0){
+ ret= h261_decode_gob_header(h);
+ if(ret>=0)
+ return 0;
+ }
+ //OK, it is not where it is supposed to be ...
+ s->gb= s->last_resync_gb;
+ align_get_bits(&s->gb);
+ left= get_bits_left(&s->gb);
+
+ for(;left>15+1+4+5; left-=8){
+ if(show_bits(&s->gb, 15)==0){
+ GetBitContext bak= s->gb;
+
+ ret= h261_decode_gob_header(h);
+ if(ret>=0)
+ return 0;
+
+ s->gb= bak;
+ }
+ skip_bits(&s->gb, 8);
+ }
+ }
+
+ return -1;
+}
+
+/**
+ * decodes skipped macroblocks
+ * @return 0
+ */
+static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 )
+{
+ MpegEncContext * const s = &h->s;
+ int i;
+
+ s->mb_intra = 0;
+
+ for(i=mba1; i<mba2; i++){
+ int j, xy;
+
+ s->mb_x= ((h->gob_number-1) % 2) * 11 + i % 11;
+ s->mb_y= ((h->gob_number-1) / 2) * 3 + i / 11;
+ xy = s->mb_x + s->mb_y * s->mb_stride;
+ ff_init_block_index(s);
+ ff_update_block_index(s);
+
+ for(j=0;j<6;j++)
+ s->block_last_index[j] = -1;
+
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->mv[0][0][0] = 0;
+ s->mv[0][0][1] = 0;
+ s->mb_skipped = 1;
+ h->mtype &= ~MB_TYPE_H261_FIL;
+
+ MPV_decode_mb(s, s->block);
+ }
+
+ return 0;
+}
+
+static int decode_mv_component(GetBitContext *gb, int v){
+ int mv_diff = get_vlc2(gb, h261_mv_vlc.table, H261_MV_VLC_BITS, 2);
+
+ /* check if mv_diff is valid */
+ if ( mv_diff < 0 )
+ return v;
+
+ mv_diff = mvmap[mv_diff];
+
+ if(mv_diff && !get_bits1(gb))
+ mv_diff= -mv_diff;
+
+ v += mv_diff;
+ if (v <=-16) v+= 32;
+ else if(v >= 16) v-= 32;
+
+ return v;
+}
+
+static int h261_decode_mb(H261Context *h){
+ MpegEncContext * const s = &h->s;
+ int i, cbp, xy;
+
+ cbp = 63;
+ // Read mba
+ do{
+ h->mba_diff = get_vlc2(&s->gb, h261_mba_vlc.table, H261_MBA_VLC_BITS, 2);
+
+ /* Check for slice end */
+ /* NOTE: GOB can be empty (no MB data) or exist only of MBA_stuffing */
+ if (h->mba_diff == MBA_STARTCODE){ // start code
+ h->gob_start_code_skipped = 1;
+ return SLICE_END;
+ }
+ }
+ while( h->mba_diff == MBA_STUFFING ); // stuffing
+
+ if ( h->mba_diff < 0 ){
+ if ( get_bits_count(&s->gb) + 7 >= s->gb.size_in_bits )
+ return SLICE_END;
+
+ av_log(s->avctx, AV_LOG_ERROR, "illegal mba at %d %d\n", s->mb_x, s->mb_y);
+ return SLICE_ERROR;
+ }
+
+ h->mba_diff += 1;
+ h->current_mba += h->mba_diff;
+
+ if ( h->current_mba > MBA_STUFFING )
+ return SLICE_ERROR;
+
+ s->mb_x= ((h->gob_number-1) % 2) * 11 + ((h->current_mba-1) % 11);
+ s->mb_y= ((h->gob_number-1) / 2) * 3 + ((h->current_mba-1) / 11);
+ xy = s->mb_x + s->mb_y * s->mb_stride;
+ ff_init_block_index(s);
+ ff_update_block_index(s);
+
+ // Read mtype
+ h->mtype = get_vlc2(&s->gb, h261_mtype_vlc.table, H261_MTYPE_VLC_BITS, 2);
+ h->mtype = h261_mtype_map[h->mtype];
+
+ // Read mquant
+ if ( IS_QUANT ( h->mtype ) ){
+ ff_set_qscale(s, get_bits(&s->gb, 5));
+ }
+
+ s->mb_intra = IS_INTRA4x4(h->mtype);
+
+ // Read mv
+ if ( IS_16X16 ( h->mtype ) ){
+ // Motion vector data is included for all MC macroblocks. MVD is obtained from the macroblock vector by subtracting the
+ // vector of the preceding macroblock. For this calculation the vector of the preceding macroblock is regarded as zero in the
+ // following three situations:
+ // 1) evaluating MVD for macroblocks 1, 12 and 23;
+ // 2) evaluating MVD for macroblocks in which MBA does not represent a difference of 1;
+ // 3) MTYPE of the previous macroblock was not MC.
+ if ( ( h->current_mba == 1 ) || ( h->current_mba == 12 ) || ( h->current_mba == 23 ) ||
+ ( h->mba_diff != 1))
+ {
+ h->current_mv_x = 0;
+ h->current_mv_y = 0;
+ }
+
+ h->current_mv_x= decode_mv_component(&s->gb, h->current_mv_x);
+ h->current_mv_y= decode_mv_component(&s->gb, h->current_mv_y);
+ }else{
+ h->current_mv_x = 0;
+ h->current_mv_y = 0;
+ }
+
+ // Read cbp
+ if ( HAS_CBP( h->mtype ) ){
+ cbp = get_vlc2(&s->gb, h261_cbp_vlc.table, H261_CBP_VLC_BITS, 2) + 1;
+ }
+
+ if(s->mb_intra){
+ s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
+ goto intra;
+ }
+
+ //set motion vectors
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation
+ s->mv[0][0][1] = h->current_mv_y * 2;
+
+intra:
+ /* decode each block */
+ if(s->mb_intra || HAS_CBP(h->mtype)){
+ s->dsp.clear_blocks(s->block[0]);
+ for (i = 0; i < 6; i++) {
+ if (h261_decode_block(h, s->block[i], i, cbp&32) < 0){
+ return SLICE_ERROR;
+ }
+ cbp+=cbp;
+ }
+ }else{
+ for (i = 0; i < 6; i++)
+ s->block_last_index[i]= -1;
+ }
+
+ MPV_decode_mb(s, s->block);
+
+ return SLICE_OK;
+}
+
+/**
+ * decodes a macroblock
+ * @return <0 if an error occurred
+ */
+static int h261_decode_block(H261Context * h, DCTELEM * block,
+ int n, int coded)
+{
+ MpegEncContext * const s = &h->s;
+ int code, level, i, j, run;
+ RLTable *rl = &h261_rl_tcoeff;
+ const uint8_t *scan_table;
+
+ // For the variable length encoding there are two code tables, one being used for
+ // the first transmitted LEVEL in INTER, INTER+MC and INTER+MC+FIL blocks, the second
+ // for all other LEVELs except the first one in INTRA blocks which is fixed length
+ // coded with 8 bits.
+ // NOTE: the two code tables only differ in one VLC so we handle that manually.
+ scan_table = s->intra_scantable.permutated;
+ if (s->mb_intra){
+ /* DC coef */
+ level = get_bits(&s->gb, 8);
+ // 0 (00000000b) and -128 (10000000b) are FORBIDDEN
+ if((level&0x7F) == 0){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal dc %d at %d %d\n", level, s->mb_x, s->mb_y);
+ return -1;
+ }
+ // The code 1000 0000 is not used, the reconstruction level of 1024 being coded as 1111 1111.
+ if (level == 255)
+ level = 128;
+ block[0] = level;
+ i = 1;
+ }else if(coded){
+ // Run Level Code
+ // EOB Not possible for first level when cbp is available (that's why the table is different)
+ // 0 1 1s
+ // * * 0*
+ int check = show_bits(&s->gb, 2);
+ i = 0;
+ if ( check & 0x2 ){
+ skip_bits(&s->gb, 2);
+ block[0] = ( check & 0x1 ) ? -1 : 1;
+ i = 1;
+ }
+ }else{
+ i = 0;
+ }
+ if(!coded){
+ s->block_last_index[n] = i - 1;
+ return 0;
+ }
+ for(;;){
+ code = get_vlc2(&s->gb, rl->vlc.table, TCOEFF_VLC_BITS, 2);
+ if (code < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal ac vlc code at %dx%d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ if (code == rl->n) {
+ /* escape */
+ // The remaining combinations of (run, level) are encoded with a 20-bit word consisting of 6 bits escape, 6 bits run and 8 bits level.
+ run = get_bits(&s->gb, 6);
+ level = get_sbits(&s->gb, 8);
+ }else if(code == 0){
+ break;
+ }else{
+ run = rl->table_run[code];
+ level = rl->table_level[code];
+ if (get_bits1(&s->gb))
+ level = -level;
+ }
+ i += run;
+ if (i >= 64){
+ av_log(s->avctx, AV_LOG_ERROR, "run overflow at %dx%d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ j = scan_table[i];
+ block[j] = level;
+ i++;
+ }
+ s->block_last_index[n] = i-1;
+ return 0;
+}
+
+/**
+ * decodes the H261 picture header.
+ * @return <0 if no startcode found
+ */
+static int h261_decode_picture_header(H261Context *h){
+ MpegEncContext * const s = &h->s;
+ int format, i;
+ uint32_t startcode= 0;
+
+ for(i= get_bits_left(&s->gb); i>24; i-=1){
+ startcode = ((startcode << 1) | get_bits(&s->gb, 1)) & 0x000FFFFF;
+
+ if(startcode == 0x10)
+ break;
+ }
+
+ if (startcode != 0x10){
+ av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
+ return -1;
+ }
+
+ /* temporal reference */
+ i= get_bits(&s->gb, 5); /* picture timestamp */
+ if(i < (s->picture_number&31))
+ i += 32;
+ s->picture_number = (s->picture_number&~31) + i;
+
+ s->avctx->time_base.num = 1001; s->avctx->time_base.den = 30000;
+ s->current_picture.pts= s->picture_number;
+
+
+ /* PTYPE starts here */
+ skip_bits1(&s->gb); /* split screen off */
+ skip_bits1(&s->gb); /* camera off */
+ skip_bits1(&s->gb); /* freeze picture release off */
+
+ format = get_bits1(&s->gb);
+
+ //only 2 formats possible
+ if (format == 0){//QCIF
+ s->width = 176;
+ s->height = 144;
+ s->mb_width = 11;
+ s->mb_height = 9;
+ }else{//CIF
+ s->width = 352;
+ s->height = 288;
+ s->mb_width = 22;
+ s->mb_height = 18;
+ }
+
+ s->mb_num = s->mb_width * s->mb_height;
+
+ skip_bits1(&s->gb); /* still image mode off */
+ skip_bits1(&s->gb); /* Reserved */
+
+ /* PEI */
+ while (get_bits1(&s->gb) != 0){
+ skip_bits(&s->gb, 8);
+ }
+
+ // h261 has no I-FRAMES, but if we pass FF_I_TYPE for the first frame, the codec crashes if it does
+ // not contain all I-blocks (e.g. when a packet is lost)
+ s->pict_type = FF_P_TYPE;
+
+ h->gob_number = 0;
+ return 0;
+}
+
+static int h261_decode_gob(H261Context *h){
+ MpegEncContext * const s = &h->s;
+
+ ff_set_qscale(s, s->qscale);
+
+ /* decode mb's */
+ while(h->current_mba <= MBA_STUFFING)
+ {
+ int ret;
+ /* DCT & quantize */
+ ret= h261_decode_mb(h);
+ if(ret<0){
+ if(ret==SLICE_END){
+ h261_decode_mb_skipped(h, h->current_mba, 33);
+ return 0;
+ }
+ av_log(s->avctx, AV_LOG_ERROR, "Error at MB: %d\n", s->mb_x + s->mb_y*s->mb_stride);
+ return -1;
+ }
+
+ h261_decode_mb_skipped(h, h->current_mba-h->mba_diff, h->current_mba-1);
+ }
+
+ return -1;
+}
+
+/**
+ * returns the number of bytes consumed for building the current frame
+ */
+static int get_consumed_bytes(MpegEncContext *s, int buf_size){
+ int pos= get_bits_count(&s->gb)>>3;
+ if(pos==0) pos=1; //avoid infinite loops (i doubt that is needed but ...)
+ if(pos+10>buf_size) pos=buf_size; // oops ;)
+
+ return pos;
+}
+
+static int h261_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ H261Context *h= avctx->priv_data;
+ MpegEncContext *s = &h->s;
+ int ret;
+ AVFrame *pict = data;
+
+ dprintf(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size);
+ dprintf(avctx, "bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]);
+ s->flags= avctx->flags;
+ s->flags2= avctx->flags2;
+
+ h->gob_start_code_skipped=0;
+
+retry:
+
+ init_get_bits(&s->gb, buf, buf_size*8);
+
+ if(!s->context_initialized){
+ if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
+ return -1;
+ }
+
+ //we need to set current_picture_ptr before reading the header, otherwise we cannot store anyting im there
+ if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
+ int i= ff_find_unused_picture(s, 0);
+ s->current_picture_ptr= &s->picture[i];
+ }
+
+ ret = h261_decode_picture_header(h);
+
+ /* skip if the header was thrashed */
+ if (ret < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
+ return -1;
+ }
+
+ if (s->width != avctx->coded_width || s->height != avctx->coded_height){
+ ParseContext pc= s->parse_context; //FIXME move this demuxing hack to libavformat
+ s->parse_context.buffer=0;
+ MPV_common_end(s);
+ s->parse_context= pc;
+ }
+ if (!s->context_initialized) {
+ avcodec_set_dimensions(avctx, s->width, s->height);
+
+ goto retry;
+ }
+
+ // for hurry_up==5
+ s->current_picture.pict_type= s->pict_type;
+ s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
+
+ /* skip everything if we are in a hurry>=5 */
+ if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
+ ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
+ || avctx->skip_frame >= AVDISCARD_ALL)
+ return get_consumed_bytes(s, buf_size);
+
+ if(MPV_frame_start(s, avctx) < 0)
+ return -1;
+
+ ff_er_frame_start(s);
+
+ /* decode each macroblock */
+ s->mb_x=0;
+ s->mb_y=0;
+
+ while(h->gob_number < (s->mb_height==18 ? 12 : 5)){
+ if(ff_h261_resync(h)<0)
+ break;
+ h261_decode_gob(h);
+ }
+ MPV_frame_end(s);
+
+assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
+assert(s->current_picture.pict_type == s->pict_type);
+ *pict= *(AVFrame*)s->current_picture_ptr;
+ ff_print_debug_info(s, pict);
+
+ *data_size = sizeof(AVFrame);
+
+ return get_consumed_bytes(s, buf_size);
+}
+
+static av_cold int h261_decode_end(AVCodecContext *avctx)
+{
+ H261Context *h= avctx->priv_data;
+ MpegEncContext *s = &h->s;
+
+ MPV_common_end(s);
+ return 0;
+}
+
+AVCodec h261_decoder = {
+ "h261",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_H261,
+ sizeof(H261Context),
+ /*.init = */h261_decode_init,
+ /*.encode = */NULL,
+ /*.close = */h261_decode_end,
+ /*.decode = */h261_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("H.261"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.c
new file mode 100644
index 000000000..a07c654e1
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.c
@@ -0,0 +1,385 @@
+/*
+ * H263/MPEG4 backend for ffmpeg encoder and decoder
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ * H263+ support.
+ * Copyright (c) 2001 Juan J. Sierralta P
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h263.c
+ * h263/mpeg4 codec.
+ */
+
+//#define DEBUG
+#include <limits.h>
+
+#include "dsputil.h"
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "h263.h"
+#include "h263data.h"
+#include "mathops.h"
+#include "unary.h"
+#include "flv.h"
+#include "mpeg4video.h"
+
+//#undef NDEBUG
+//#include <assert.h>
+
+uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
+
+
+void ff_h263_update_motion_val(MpegEncContext * s){
+ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
+ //FIXME a lot of that is only needed for !low_delay
+ const int wrap = s->b8_stride;
+ const int xy = s->block_index[0];
+
+ s->current_picture.mbskip_table[mb_xy]= s->mb_skipped;
+
+ if(s->mv_type != MV_TYPE_8X8){
+ int motion_x, motion_y;
+ if (s->mb_intra) {
+ motion_x = 0;
+ motion_y = 0;
+ } else if (s->mv_type == MV_TYPE_16X16) {
+ motion_x = s->mv[0][0][0];
+ motion_y = s->mv[0][0][1];
+ } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
+ int i;
+ motion_x = s->mv[0][0][0] + s->mv[0][1][0];
+ motion_y = s->mv[0][0][1] + s->mv[0][1][1];
+ motion_x = (motion_x>>1) | (motion_x&1);
+ for(i=0; i<2; i++){
+ s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
+ s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
+ }
+ s->current_picture.ref_index[0][xy ]=
+ s->current_picture.ref_index[0][xy + 1]= s->field_select[0][0];
+ s->current_picture.ref_index[0][xy + wrap ]=
+ s->current_picture.ref_index[0][xy + wrap + 1]= s->field_select[0][1];
+ }
+
+ /* no update if 8X8 because it has been done during parsing */
+ s->current_picture.motion_val[0][xy][0] = motion_x;
+ s->current_picture.motion_val[0][xy][1] = motion_y;
+ s->current_picture.motion_val[0][xy + 1][0] = motion_x;
+ s->current_picture.motion_val[0][xy + 1][1] = motion_y;
+ s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
+ s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
+ s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
+ s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
+ }
+
+ if(s->encoding){ //FIXME encoding MUST be cleaned up
+ if (s->mv_type == MV_TYPE_8X8)
+ s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
+ else if(s->mb_intra)
+ s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA;
+ else
+ s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
+ }
+}
+
+int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
+{
+ int x, y, wrap, a, c, pred_dc;
+ int16_t *dc_val;
+
+ /* find prediction */
+ if (n < 4) {
+ x = 2 * s->mb_x + (n & 1);
+ y = 2 * s->mb_y + ((n & 2) >> 1);
+ wrap = s->b8_stride;
+ dc_val = s->dc_val[0];
+ } else {
+ x = s->mb_x;
+ y = s->mb_y;
+ wrap = s->mb_stride;
+ dc_val = s->dc_val[n - 4 + 1];
+ }
+ /* B C
+ * A X
+ */
+ a = dc_val[(x - 1) + (y) * wrap];
+ c = dc_val[(x) + (y - 1) * wrap];
+
+ /* No prediction outside GOB boundary */
+ if(s->first_slice_line && n!=3){
+ if(n!=2) c= 1024;
+ if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
+ }
+ /* just DC prediction */
+ if (a != 1024 && c != 1024)
+ pred_dc = (a + c) >> 1;
+ else if (a != 1024)
+ pred_dc = a;
+ else
+ pred_dc = c;
+
+ /* we assume pred is positive */
+ *dc_val_ptr = &dc_val[x + y * wrap];
+ return pred_dc;
+}
+
+void ff_h263_loop_filter(MpegEncContext * s){
+ int qp_c;
+ const int linesize = s->linesize;
+ const int uvlinesize= s->uvlinesize;
+ const int xy = s->mb_y * s->mb_stride + s->mb_x;
+ uint8_t *dest_y = s->dest[0];
+ uint8_t *dest_cb= s->dest[1];
+ uint8_t *dest_cr= s->dest[2];
+
+// if(s->pict_type==FF_B_TYPE && !s->readable) return;
+
+ /*
+ Diag Top
+ Left Center
+ */
+ if(!IS_SKIP(s->current_picture.mb_type[xy])){
+ qp_c= s->qscale;
+ s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
+ s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
+ }else
+ qp_c= 0;
+
+ if(s->mb_y){
+ int qp_dt, qp_tt, qp_tc;
+
+ if(IS_SKIP(s->current_picture.mb_type[xy-s->mb_stride]))
+ qp_tt=0;
+ else
+ qp_tt= s->current_picture.qscale_table[xy-s->mb_stride];
+
+ if(qp_c)
+ qp_tc= qp_c;
+ else
+ qp_tc= qp_tt;
+
+ if(qp_tc){
+ const int chroma_qp= s->chroma_qscale_table[qp_tc];
+ s->dsp.h263_v_loop_filter(dest_y , linesize, qp_tc);
+ s->dsp.h263_v_loop_filter(dest_y+8, linesize, qp_tc);
+
+ s->dsp.h263_v_loop_filter(dest_cb , uvlinesize, chroma_qp);
+ s->dsp.h263_v_loop_filter(dest_cr , uvlinesize, chroma_qp);
+ }
+
+ if(qp_tt)
+ s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
+
+ if(s->mb_x){
+ if(qp_tt || IS_SKIP(s->current_picture.mb_type[xy-1-s->mb_stride]))
+ qp_dt= qp_tt;
+ else
+ qp_dt= s->current_picture.qscale_table[xy-1-s->mb_stride];
+
+ if(qp_dt){
+ const int chroma_qp= s->chroma_qscale_table[qp_dt];
+ s->dsp.h263_h_loop_filter(dest_y -8*linesize , linesize, qp_dt);
+ s->dsp.h263_h_loop_filter(dest_cb-8*uvlinesize, uvlinesize, chroma_qp);
+ s->dsp.h263_h_loop_filter(dest_cr-8*uvlinesize, uvlinesize, chroma_qp);
+ }
+ }
+ }
+
+ if(qp_c){
+ s->dsp.h263_h_loop_filter(dest_y +8, linesize, qp_c);
+ if(s->mb_y + 1 == s->mb_height)
+ s->dsp.h263_h_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
+ }
+
+ if(s->mb_x){
+ int qp_lc;
+ if(qp_c || IS_SKIP(s->current_picture.mb_type[xy-1]))
+ qp_lc= qp_c;
+ else
+ qp_lc= s->current_picture.qscale_table[xy-1];
+
+ if(qp_lc){
+ s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
+ if(s->mb_y + 1 == s->mb_height){
+ const int chroma_qp= s->chroma_qscale_table[qp_lc];
+ s->dsp.h263_h_loop_filter(dest_y +8* linesize, linesize, qp_lc);
+ s->dsp.h263_h_loop_filter(dest_cb , uvlinesize, chroma_qp);
+ s->dsp.h263_h_loop_filter(dest_cr , uvlinesize, chroma_qp);
+ }
+ }
+ }
+}
+
+void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
+{
+ int x, y, wrap, a, c, pred_dc, scale, i;
+ int16_t *dc_val, *ac_val, *ac_val1;
+
+ /* find prediction */
+ if (n < 4) {
+ x = 2 * s->mb_x + (n & 1);
+ y = 2 * s->mb_y + (n>> 1);
+ wrap = s->b8_stride;
+ dc_val = s->dc_val[0];
+ ac_val = s->ac_val[0][0];
+ scale = s->y_dc_scale;
+ } else {
+ x = s->mb_x;
+ y = s->mb_y;
+ wrap = s->mb_stride;
+ dc_val = s->dc_val[n - 4 + 1];
+ ac_val = s->ac_val[n - 4 + 1][0];
+ scale = s->c_dc_scale;
+ }
+
+ ac_val += ((y) * wrap + (x)) * 16;
+ ac_val1 = ac_val;
+
+ /* B C
+ * A X
+ */
+ a = dc_val[(x - 1) + (y) * wrap];
+ c = dc_val[(x) + (y - 1) * wrap];
+
+ /* No prediction outside GOB boundary */
+ if(s->first_slice_line && n!=3){
+ if(n!=2) c= 1024;
+ if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
+ }
+
+ if (s->ac_pred) {
+ pred_dc = 1024;
+ if (s->h263_aic_dir) {
+ /* left prediction */
+ if (a != 1024) {
+ ac_val -= 16;
+ for(i=1;i<8;i++) {
+ block[s->dsp.idct_permutation[i<<3]] += ac_val[i];
+ }
+ pred_dc = a;
+ }
+ } else {
+ /* top prediction */
+ if (c != 1024) {
+ ac_val -= 16 * wrap;
+ for(i=1;i<8;i++) {
+ block[s->dsp.idct_permutation[i ]] += ac_val[i + 8];
+ }
+ pred_dc = c;
+ }
+ }
+ } else {
+ /* just DC prediction */
+ if (a != 1024 && c != 1024)
+ pred_dc = (a + c) >> 1;
+ else if (a != 1024)
+ pred_dc = a;
+ else
+ pred_dc = c;
+ }
+
+ /* we assume pred is positive */
+ block[0]=block[0]*scale + pred_dc;
+
+ if (block[0] < 0)
+ block[0] = 0;
+ else
+ block[0] |= 1;
+
+ /* Update AC/DC tables */
+ dc_val[(x) + (y) * wrap] = block[0];
+
+ /* left copy */
+ for(i=1;i<8;i++)
+ ac_val1[i ] = block[s->dsp.idct_permutation[i<<3]];
+ /* top copy */
+ for(i=1;i<8;i++)
+ ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]];
+}
+
+int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
+ int *px, int *py)
+{
+ int wrap;
+ int16_t *A, *B, *C, (*mot_val)[2];
+ static const int off[4]= {2, 1, 1, -1};
+
+ wrap = s->b8_stride;
+ mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
+
+ A = mot_val[ - 1];
+ /* special case for first (slice) line */
+ if (s->first_slice_line && block<3) {
+ // we can't just change some MVs to simulate that as we need them for the B frames (and ME)
+ // and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
+ if(block==0){ //most common case
+ if(s->mb_x == s->resync_mb_x){ //rare
+ *px= *py = 0;
+ }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
+ C = mot_val[off[block] - wrap];
+ if(s->mb_x==0){
+ *px = C[0];
+ *py = C[1];
+ }else{
+ *px = mid_pred(A[0], 0, C[0]);
+ *py = mid_pred(A[1], 0, C[1]);
+ }
+ }else{
+ *px = A[0];
+ *py = A[1];
+ }
+ }else if(block==1){
+ if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
+ C = mot_val[off[block] - wrap];
+ *px = mid_pred(A[0], 0, C[0]);
+ *py = mid_pred(A[1], 0, C[1]);
+ }else{
+ *px = A[0];
+ *py = A[1];
+ }
+ }else{ /* block==2*/
+ B = mot_val[ - wrap];
+ C = mot_val[off[block] - wrap];
+ if(s->mb_x == s->resync_mb_x) //rare
+ A[0]=A[1]=0;
+
+ *px = mid_pred(A[0], B[0], C[0]);
+ *py = mid_pred(A[1], B[1], C[1]);
+ }
+ } else {
+ B = mot_val[ - wrap];
+ C = mot_val[off[block] - wrap];
+ *px = mid_pred(A[0], B[0], C[0]);
+ *py = mid_pred(A[1], B[1], C[1]);
+ }
+ return *mot_val;
+}
+
+
+/**
+ * Get the GOB height based on picture height.
+ */
+int ff_h263_get_gob_height(MpegEncContext *s){
+ if (s->height <= 400)
+ return 1;
+ else if (s->height <= 800)
+ return 2;
+ else
+ return 4;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.h
new file mode 100644
index 000000000..9adc17843
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.h
@@ -0,0 +1,246 @@
+/*
+ * H263 internal header
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef AVCODEC_H263_H
+#define AVCODEC_H263_H
+
+// The defines below define the number of bits that are read at once for
+// reading vlc values. Changing these may improve speed and data cache needs
+// be aware though that decreasing them may need the number of stages that is
+// passed to get_vlc* to be increased.
+#define INTRA_MCBPC_VLC_BITS 6
+#define INTER_MCBPC_VLC_BITS 7
+#define CBPY_VLC_BITS 6
+#define TEX_VLC_BITS 9
+
+extern const AVRational ff_h263_pixel_aspect[16];
+extern const uint8_t ff_h263_cbpy_tab[16][2];
+
+extern const uint8_t cbpc_b_tab[4][2];
+
+extern const uint8_t mvtab[33][2];
+
+extern const uint8_t ff_h263_intra_MCBPC_code[9];
+extern const uint8_t ff_h263_intra_MCBPC_bits[9];
+
+extern const uint8_t ff_h263_inter_MCBPC_code[28];
+extern const uint8_t ff_h263_inter_MCBPC_bits[28];
+extern const uint8_t h263_mbtype_b_tab[15][2];
+
+extern VLC ff_h263_intra_MCBPC_vlc;
+extern VLC ff_h263_inter_MCBPC_vlc;
+extern VLC ff_h263_cbpy_vlc;
+
+extern RLTable ff_h263_rl_inter;
+
+extern RLTable rl_intra_aic;
+
+extern const uint16_t h263_format[8][2];
+extern const uint8_t modified_quant_tab[2][32];
+extern uint16_t ff_mba_max[6];
+extern uint8_t ff_mba_length[7];
+
+extern uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
+
+
+int h263_decode_motion(MpegEncContext * s, int pred, int f_code);
+av_const int ff_h263_aspect_to_info(AVRational aspect);
+int ff_h263_decode_init(AVCodecContext *avctx);
+int ff_h263_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size);
+int ff_h263_decode_end(AVCodecContext *avctx);
+void h263_encode_mb(MpegEncContext *s,
+ DCTELEM block[6][64],
+ int motion_x, int motion_y);
+void h263_encode_picture_header(MpegEncContext *s, int picture_number);
+void h263_encode_gob_header(MpegEncContext * s, int mb_line);
+int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
+ int *px, int *py);
+void h263_encode_init(MpegEncContext *s);
+void h263_decode_init_vlc(MpegEncContext *s);
+int h263_decode_picture_header(MpegEncContext *s);
+int ff_h263_decode_gob_header(MpegEncContext *s);
+void ff_h263_update_motion_val(MpegEncContext * s);
+void ff_h263_loop_filter(MpegEncContext * s);
+void ff_set_qscale(MpegEncContext * s, int qscale);
+int ff_h263_decode_mba(MpegEncContext *s);
+void ff_h263_encode_mba(MpegEncContext *s);
+void ff_init_qscale_tab(MpegEncContext *s);
+int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
+void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
+
+
+/**
+ * Prints picture info if FF_DEBUG_PICT_INFO is set.
+ */
+void ff_h263_show_pict_info(MpegEncContext *s);
+
+int ff_intel_h263_decode_picture_header(MpegEncContext *s);
+int ff_h263_decode_mb(MpegEncContext *s,
+ DCTELEM block[6][64]);
+
+/**
+ * Returns the value of the 3bit "source format" syntax element.
+ * that represents some standard picture dimensions or indicates that
+ * width&height are explicitly stored later.
+ */
+int av_const h263_get_picture_format(int width, int height);
+
+void ff_clean_h263_qscales(MpegEncContext *s);
+int ff_h263_resync(MpegEncContext *s);
+const uint8_t *ff_h263_find_resync_marker(const uint8_t *p, const uint8_t *end);
+int ff_h263_get_gob_height(MpegEncContext *s);
+void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code);
+
+
+static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code){
+ int l, bit_size, code;
+
+ if (val == 0) {
+ return mvtab[0][1];
+ } else {
+ bit_size = f_code - 1;
+ /* modulo encoding */
+ l= INT_BIT - 6 - bit_size;
+ val = (val<<l)>>l;
+ val--;
+ code = (val >> bit_size) + 1;
+
+ return mvtab[code][1] + 1 + bit_size;
+ }
+}
+
+static inline void ff_h263_encode_motion_vector(MpegEncContext * s, int x, int y, int f_code){
+ if(s->flags2 & CODEC_FLAG2_NO_OUTPUT){
+ skip_put_bits(&s->pb,
+ h263_get_motion_length(s, x, f_code)
+ +h263_get_motion_length(s, y, f_code));
+ }else{
+ ff_h263_encode_motion(s, x, f_code);
+ ff_h263_encode_motion(s, y, f_code);
+ }
+}
+
+static inline int get_p_cbp(MpegEncContext * s,
+ DCTELEM block[6][64],
+ int motion_x, int motion_y){
+ int cbp, i;
+
+ if(s->flags & CODEC_FLAG_CBP_RD){
+ int best_cbpy_score= INT_MAX;
+ int best_cbpc_score= INT_MAX;
+ int cbpc = (-1), cbpy= (-1);
+ const int offset= (s->mv_type==MV_TYPE_16X16 ? 0 : 16) + (s->dquant ? 8 : 0);
+ const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
+
+ for(i=0; i<4; i++){
+ int score= ff_h263_inter_MCBPC_bits[i + offset] * lambda;
+ if(i&1) score += s->coded_score[5];
+ if(i&2) score += s->coded_score[4];
+
+ if(score < best_cbpc_score){
+ best_cbpc_score= score;
+ cbpc= i;
+ }
+ }
+
+ for(i=0; i<16; i++){
+ int score= ff_h263_cbpy_tab[i ^ 0xF][1] * lambda;
+ if(i&1) score += s->coded_score[3];
+ if(i&2) score += s->coded_score[2];
+ if(i&4) score += s->coded_score[1];
+ if(i&8) score += s->coded_score[0];
+
+ if(score < best_cbpy_score){
+ best_cbpy_score= score;
+ cbpy= i;
+ }
+ }
+ cbp= cbpc + 4*cbpy;
+ if ((motion_x | motion_y | s->dquant) == 0 && s->mv_type==MV_TYPE_16X16){
+ if(best_cbpy_score + best_cbpc_score + 2*lambda >= 0)
+ cbp= 0;
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (s->block_last_index[i] >= 0 && ((cbp >> (5 - i))&1)==0 ){
+ s->block_last_index[i]= -1;
+ s->dsp.clear_block(s->block[i]);
+ }
+ }
+ }else{
+ cbp= 0;
+ for (i = 0; i < 6; i++) {
+ if (s->block_last_index[i] >= 0)
+ cbp |= 1 << (5 - i);
+ }
+ }
+ return cbp;
+}
+
+static inline int get_b_cbp(MpegEncContext * s, DCTELEM block[6][64],
+ int motion_x, int motion_y, int mb_type){
+ int cbp=0, i;
+
+ if(s->flags & CODEC_FLAG_CBP_RD){
+ int score=0;
+ const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
+
+ for(i=0; i<6; i++){
+ if(s->coded_score[i] < 0){
+ score += s->coded_score[i];
+ cbp |= 1 << (5 - i);
+ }
+ }
+
+ if(cbp){
+ int zero_score= -6;
+ if ((motion_x | motion_y | s->dquant | mb_type) == 0){
+ zero_score-= 4; //2*MV + mb_type + cbp bit
+ }
+
+ zero_score*= lambda;
+ if(zero_score <= score){
+ cbp=0;
+ }
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (s->block_last_index[i] >= 0 && ((cbp >> (5 - i))&1)==0 ){
+ s->block_last_index[i]= -1;
+ s->dsp.clear_block(s->block[i]);
+ }
+ }
+ }else{
+ for (i = 0; i < 6; i++) {
+ if (s->block_last_index[i] >= 0)
+ cbp |= 1 << (5 - i);
+ }
+ }
+ return cbp;
+}
+
+static inline void memsetw(short *tab, int val, int n)
+{
+ int i;
+ for(i=0;i<n;i++)
+ tab[i] = val;
+}
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263_parser.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263_parser.c
new file mode 100644
index 000000000..44f632e92
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263_parser.c
@@ -0,0 +1,62 @@
+/*
+ * H.263 parser
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file h263_parser.c
+ * H.263 parser
+ */
+
+#include "parser.h"
+
+int ff_h263_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size){
+ int vop_found, i;
+ uint32_t state;
+
+ vop_found= pc->frame_start_found;
+ state= pc->state;
+
+ i=0;
+ if(!vop_found){
+ for(i=0; i<buf_size; i++){
+ state= (state<<8) | buf[i];
+ if(state>>(32-22) == 0x20){
+ i++;
+ vop_found=1;
+ break;
+ }
+ }
+ }
+
+ if(vop_found){
+ for(; i<buf_size; i++){
+ state= (state<<8) | buf[i];
+ if(state>>(32-22) == 0x20){
+ pc->frame_start_found=0;
+ pc->state=-1;
+ return i-3;
+ }
+ }
+ }
+ pc->frame_start_found= vop_found;
+ pc->state= state;
+
+ return END_NOT_FOUND;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263_parser.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263_parser.h
new file mode 100644
index 000000000..565a222bc
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263_parser.h
@@ -0,0 +1,29 @@
+/*
+ * H.263 parser
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_H263_PARSER_H
+#define AVCODEC_H263_PARSER_H
+
+#include "parser.h"
+
+int ff_h263_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size);
+
+#endif /* AVCODEC_H263_PARSER_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263data.h
new file mode 100644
index 000000000..0c0d307f2
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263data.h
@@ -0,0 +1,299 @@
+/*
+ * copyright (c) 2000,2001 Fabrice Bellard
+ * H263+ support
+ * copyright (c) 2001 Juan J. Sierralta P
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h263data.h
+ * H.263 tables.
+ */
+
+#ifndef AVCODEC_H263DATA_H
+#define AVCODEC_H263DATA_H
+
+#include <stdint.h>
+#include "mpegvideo.h"
+
+/* intra MCBPC, mb_type = (intra), then (intraq) */
+const uint8_t ff_h263_intra_MCBPC_code[9] = { 1, 1, 2, 3, 1, 1, 2, 3, 1 };
+const uint8_t ff_h263_intra_MCBPC_bits[9] = { 1, 3, 3, 3, 4, 6, 6, 6, 9 };
+
+/* inter MCBPC, mb_type = (inter), (intra), (interq), (intraq), (inter4v) */
+/* Changed the tables for interq and inter4v+q, following the standard ** Juanjo ** */
+const uint8_t ff_h263_inter_MCBPC_code[28] = {
+ 1, 3, 2, 5,
+ 3, 4, 3, 3,
+ 3, 7, 6, 5,
+ 4, 4, 3, 2,
+ 2, 5, 4, 5,
+ 1, 0, 0, 0, /* Stuffing */
+ 2, 12, 14, 15,
+};
+const uint8_t ff_h263_inter_MCBPC_bits[28] = {
+ 1, 4, 4, 6, /* inter */
+ 5, 8, 8, 7, /* intra */
+ 3, 7, 7, 9, /* interQ */
+ 6, 9, 9, 9, /* intraQ */
+ 3, 7, 7, 8, /* inter4 */
+ 9, 0, 0, 0, /* Stuffing */
+ 11, 13, 13, 13,/* inter4Q*/
+};
+
+const uint8_t h263_mbtype_b_tab[15][2] = {
+ {1, 1},
+ {3, 3},
+ {1, 5},
+ {4, 4},
+ {5, 4},
+ {6, 6},
+ {2, 4},
+ {3, 4},
+ {7, 6},
+ {4, 6},
+ {5, 6},
+ {1, 6},
+ {1,10},
+ {1, 7},
+ {1, 8},
+};
+
+const uint8_t cbpc_b_tab[4][2] = {
+{0, 1},
+{2, 2},
+{7, 3},
+{6, 3},
+};
+
+const uint8_t ff_h263_cbpy_tab[16][2] =
+{
+ {3,4}, {5,5}, {4,5}, {9,4}, {3,5}, {7,4}, {2,6}, {11,4},
+ {2,5}, {3,6}, {5,4}, {10,4}, {4,4}, {8,4}, {6,4}, {3,2}
+};
+
+const uint8_t mvtab[33][2] =
+{
+ {1,1}, {1,2}, {1,3}, {1,4}, {3,6}, {5,7}, {4,7}, {3,7},
+ {11,9}, {10,9}, {9,9}, {17,10}, {16,10}, {15,10}, {14,10}, {13,10},
+ {12,10}, {11,10}, {10,10}, {9,10}, {8,10}, {7,10}, {6,10}, {5,10},
+ {4,10}, {7,11}, {6,11}, {5,11}, {4,11}, {3,11}, {2,11}, {3,12},
+ {2,12}
+};
+
+/* third non intra table */
+const uint16_t inter_vlc[103][2] = {
+{ 0x2, 2 },{ 0xf, 4 },{ 0x15, 6 },{ 0x17, 7 },
+{ 0x1f, 8 },{ 0x25, 9 },{ 0x24, 9 },{ 0x21, 10 },
+{ 0x20, 10 },{ 0x7, 11 },{ 0x6, 11 },{ 0x20, 11 },
+{ 0x6, 3 },{ 0x14, 6 },{ 0x1e, 8 },{ 0xf, 10 },
+{ 0x21, 11 },{ 0x50, 12 },{ 0xe, 4 },{ 0x1d, 8 },
+{ 0xe, 10 },{ 0x51, 12 },{ 0xd, 5 },{ 0x23, 9 },
+{ 0xd, 10 },{ 0xc, 5 },{ 0x22, 9 },{ 0x52, 12 },
+{ 0xb, 5 },{ 0xc, 10 },{ 0x53, 12 },{ 0x13, 6 },
+{ 0xb, 10 },{ 0x54, 12 },{ 0x12, 6 },{ 0xa, 10 },
+{ 0x11, 6 },{ 0x9, 10 },{ 0x10, 6 },{ 0x8, 10 },
+{ 0x16, 7 },{ 0x55, 12 },{ 0x15, 7 },{ 0x14, 7 },
+{ 0x1c, 8 },{ 0x1b, 8 },{ 0x21, 9 },{ 0x20, 9 },
+{ 0x1f, 9 },{ 0x1e, 9 },{ 0x1d, 9 },{ 0x1c, 9 },
+{ 0x1b, 9 },{ 0x1a, 9 },{ 0x22, 11 },{ 0x23, 11 },
+{ 0x56, 12 },{ 0x57, 12 },{ 0x7, 4 },{ 0x19, 9 },
+{ 0x5, 11 },{ 0xf, 6 },{ 0x4, 11 },{ 0xe, 6 },
+{ 0xd, 6 },{ 0xc, 6 },{ 0x13, 7 },{ 0x12, 7 },
+{ 0x11, 7 },{ 0x10, 7 },{ 0x1a, 8 },{ 0x19, 8 },
+{ 0x18, 8 },{ 0x17, 8 },{ 0x16, 8 },{ 0x15, 8 },
+{ 0x14, 8 },{ 0x13, 8 },{ 0x18, 9 },{ 0x17, 9 },
+{ 0x16, 9 },{ 0x15, 9 },{ 0x14, 9 },{ 0x13, 9 },
+{ 0x12, 9 },{ 0x11, 9 },{ 0x7, 10 },{ 0x6, 10 },
+{ 0x5, 10 },{ 0x4, 10 },{ 0x24, 11 },{ 0x25, 11 },
+{ 0x26, 11 },{ 0x27, 11 },{ 0x58, 12 },{ 0x59, 12 },
+{ 0x5a, 12 },{ 0x5b, 12 },{ 0x5c, 12 },{ 0x5d, 12 },
+{ 0x5e, 12 },{ 0x5f, 12 },{ 0x3, 7 },
+};
+
+const int8_t inter_level[102] = {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 1, 2, 3, 4,
+ 5, 6, 1, 2, 3, 4, 1, 2,
+ 3, 1, 2, 3, 1, 2, 3, 1,
+ 2, 3, 1, 2, 1, 2, 1, 2,
+ 1, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 3, 1, 2, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1,
+};
+
+const int8_t inter_run[102] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3,
+ 3, 4, 4, 4, 5, 5, 5, 6,
+ 6, 6, 7, 7, 8, 8, 9, 9,
+ 10, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 0, 0, 0, 1, 1, 2,
+ 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40,
+};
+
+RLTable ff_h263_rl_inter = {
+ 102,
+ 58,
+ inter_vlc,
+ inter_run,
+ inter_level,
+};
+
+static const uint16_t intra_vlc_aic[103][2] = {
+{ 0x2, 2 }, { 0x6, 3 }, { 0xe, 4 }, { 0xc, 5 },
+{ 0xd, 5 }, { 0x10, 6 }, { 0x11, 6 }, { 0x12, 6 },
+{ 0x16, 7 }, { 0x1b, 8 }, { 0x20, 9 }, { 0x21, 9 },
+{ 0x1a, 9 }, { 0x1b, 9 }, { 0x1c, 9 }, { 0x1d, 9 },
+{ 0x1e, 9 }, { 0x1f, 9 }, { 0x23, 11 }, { 0x22, 11 },
+{ 0x57, 12 }, { 0x56, 12 }, { 0x55, 12 }, { 0x54, 12 },
+{ 0x53, 12 }, { 0xf, 4 }, { 0x14, 6 }, { 0x14, 7 },
+{ 0x1e, 8 }, { 0xf, 10 }, { 0x21, 11 }, { 0x50, 12 },
+{ 0xb, 5 }, { 0x15, 7 }, { 0xe, 10 }, { 0x9, 10 },
+{ 0x15, 6 }, { 0x1d, 8 }, { 0xd, 10 }, { 0x51, 12 },
+{ 0x13, 6 }, { 0x23, 9 }, { 0x7, 11 }, { 0x17, 7 },
+{ 0x22, 9 }, { 0x52, 12 }, { 0x1c, 8 }, { 0xc, 10 },
+{ 0x1f, 8 }, { 0xb, 10 }, { 0x25, 9 }, { 0xa, 10 },
+{ 0x24, 9 }, { 0x6, 11 }, { 0x21, 10 }, { 0x20, 10 },
+{ 0x8, 10 }, { 0x20, 11 }, { 0x7, 4 }, { 0xc, 6 },
+{ 0x10, 7 }, { 0x13, 8 }, { 0x11, 9 }, { 0x12, 9 },
+{ 0x4, 10 }, { 0x27, 11 }, { 0x26, 11 }, { 0x5f, 12 },
+{ 0xf, 6 }, { 0x13, 9 }, { 0x5, 10 }, { 0x25, 11 },
+{ 0xe, 6 }, { 0x14, 9 }, { 0x24, 11 }, { 0xd, 6 },
+{ 0x6, 10 }, { 0x5e, 12 }, { 0x11, 7 }, { 0x7, 10 },
+{ 0x13, 7 }, { 0x5d, 12 }, { 0x12, 7 }, { 0x5c, 12 },
+{ 0x14, 8 }, { 0x5b, 12 }, { 0x15, 8 }, { 0x1a, 8 },
+{ 0x19, 8 }, { 0x18, 8 }, { 0x17, 8 }, { 0x16, 8 },
+{ 0x19, 9 }, { 0x15, 9 }, { 0x16, 9 }, { 0x18, 9 },
+{ 0x17, 9 }, { 0x4, 11 }, { 0x5, 11 }, { 0x58, 12 },
+{ 0x59, 12 }, { 0x5a, 12 }, { 0x3, 7 },
+};
+
+static const int8_t intra_run_aic[102] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 4, 5, 5, 5, 6, 6,
+ 7, 7, 8, 8, 9, 9, 10, 11,
+12, 13, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 2, 2, 2, 3, 3, 3, 4, 4,
+ 5, 5, 6, 6, 7, 7, 8, 9,
+10, 11, 12, 13, 14, 15, 16, 17,
+18, 19, 20, 21, 22, 23,
+};
+
+static const int8_t intra_level_aic[102] = {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+17, 18, 19, 20, 21, 22, 23, 24,
+25, 1, 2, 3, 4, 5, 6, 7,
+ 1, 2, 3, 4, 1, 2, 3, 4,
+ 1, 2, 3, 1, 2, 3, 1, 2,
+ 1, 2, 1, 2, 1, 2, 1, 1,
+ 1, 1, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 1, 2, 3, 4,
+ 1, 2, 3, 1, 2, 3, 1, 2,
+ 1, 2, 1, 2, 1, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1,
+};
+
+RLTable rl_intra_aic = {
+ 102,
+ 58,
+ intra_vlc_aic,
+ intra_run_aic,
+ intra_level_aic,
+};
+
+const uint16_t h263_format[8][2] = {
+ { 0, 0 },
+ { 128, 96 },
+ { 176, 144 },
+ { 352, 288 },
+ { 704, 576 },
+ { 1408, 1152 },
+};
+
+const uint8_t ff_aic_dc_scale_table[32]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 0, 2, 4, 6, 8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62
+};
+
+const uint8_t modified_quant_tab[2][32]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+{
+ 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,10,11,12,13,14,15,16,17,18,18,19,20,21,22,23,24,25,26,27,28
+},{
+ 0, 2, 3, 4, 5, 6, 7, 8, 9,10,11,13,14,15,16,17,18,19,20,21,22,24,25,26,27,28,29,30,31,31,31,26
+}
+};
+
+const uint8_t ff_h263_chroma_qscale_table[32]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 9,10,10,11,11,12,12,12,13,13,13,14,14,14,14,14,15,15,15,15,15
+};
+
+uint16_t ff_mba_max[6]={
+ 47, 98, 395,1583,6335,9215
+};
+
+uint8_t ff_mba_length[7]={
+ 6, 7, 9, 11, 13, 14, 14
+};
+
+const uint8_t ff_h263_loop_filter_strength[32]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9,10,10,10,11,11,11,12,12,12
+};
+
+const AVRational ff_h263_pixel_aspect[16]={
+ {0, 1},
+ {1, 1},
+ {12, 11},
+ {10, 11},
+ {16, 11},
+ {40, 33},
+ {0, 1},
+ {0, 1},
+ {0, 1},
+ {0, 1},
+ {0, 1},
+ {0, 1},
+ {0, 1},
+ {0, 1},
+ {0, 1},
+ {0, 1},
+};
+
+#endif /* AVCODEC_H263DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263dec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263dec.c
new file mode 100644
index 000000000..4313e8b4b
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263dec.c
@@ -0,0 +1,717 @@
+/*
+ * H.263 decoder
+ * Copyright (c) 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h263dec.c
+ * H.263 decoder.
+ */
+
+#include "internal.h"
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+#include "h263.h"
+#include "h263_parser.h"
+#include "mpeg4video_parser.h"
+#include "msmpeg4.h"
+#include "flv.h"
+#include "mpeg4video.h"
+
+//#define DEBUG
+//#define PRINT_FRAME_TIME
+
+av_cold int ff_h263_decode_init(AVCodecContext *avctx)
+{
+ MpegEncContext *s = avctx->priv_data;
+
+ s->avctx = avctx;
+ s->out_format = FMT_H263;
+
+ s->width = avctx->coded_width;
+ s->height = avctx->coded_height;
+ s->workaround_bugs= avctx->workaround_bugs;
+
+ // set defaults
+ MPV_decode_defaults(s);
+ s->quant_precision=5;
+ s->decode_mb= ff_h263_decode_mb;
+ s->low_delay= 1;
+ avctx->pix_fmt= PIX_FMT_YUV420P; /* ffdshow custom code */
+ s->unrestricted_mv= 1;
+
+ /* select sub codec */
+ switch(avctx->codec->id) {
+ case CODEC_ID_H263:
+ s->unrestricted_mv= 0;
+ avctx->chroma_sample_location = AVCHROMA_LOC_CENTER;
+ break;
+ case CODEC_ID_MPEG4:
+ break;
+ case CODEC_ID_MSMPEG4V1:
+ s->h263_msmpeg4 = 1;
+ s->h263_pred = 1;
+ s->msmpeg4_version=1;
+ break;
+ case CODEC_ID_MSMPEG4V2:
+ s->h263_msmpeg4 = 1;
+ s->h263_pred = 1;
+ s->msmpeg4_version=2;
+ break;
+ case CODEC_ID_MSMPEG4V3:
+ s->h263_msmpeg4 = 1;
+ s->h263_pred = 1;
+ s->msmpeg4_version=3;
+ break;
+ case CODEC_ID_WMV1:
+ s->h263_msmpeg4 = 1;
+ s->h263_pred = 1;
+ s->msmpeg4_version=4;
+ break;
+ case CODEC_ID_WMV2:
+ s->h263_msmpeg4 = 1;
+ s->h263_pred = 1;
+ s->msmpeg4_version=5;
+ break;
+ case CODEC_ID_VC1:
+ case CODEC_ID_WMV3:
+ s->h263_msmpeg4 = 1;
+ s->h263_pred = 1;
+ s->msmpeg4_version=6;
+ avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
+ break;
+ case CODEC_ID_H263I:
+ break;
+ case CODEC_ID_FLV1:
+ s->h263_flv = 1;
+ break;
+ default:
+ return -1;
+ }
+ s->codec_id= avctx->codec->id;
+
+ /* for h263, we allocate the images after having read the header */
+ if (avctx->codec->id != CODEC_ID_H263 && avctx->codec->id != CODEC_ID_MPEG4)
+ if (MPV_common_init(s) < 0)
+ return -1;
+
+ h263_decode_init_vlc(s);
+
+ return 0;
+}
+
+av_cold int ff_h263_decode_end(AVCodecContext *avctx)
+{
+ MpegEncContext *s = avctx->priv_data;
+
+ MPV_common_end(s);
+ return 0;
+}
+
+/**
+ * returns the number of bytes consumed for building the current frame
+ */
+static int get_consumed_bytes(MpegEncContext *s, int buf_size){
+ int pos= (get_bits_count(&s->gb)+7)>>3;
+
+ if(s->divx_packed){
+ //we would have to scan through the whole buf to handle the weird reordering ...
+ return buf_size;
+ }else if(s->flags&CODEC_FLAG_TRUNCATED){
+ pos -= s->parse_context.last_index;
+ if(pos<0) pos=0; // padding is not really read so this might be -1
+ return pos;
+ }else{
+ if(pos==0) pos=1; //avoid infinite loops (i doubt that is needed but ...)
+ if(pos+10>buf_size) pos=buf_size; // oops ;)
+
+ return pos;
+ }
+}
+
+static int decode_slice(MpegEncContext *s){
+ const int part_mask= s->partitioned_frame ? (AC_END|AC_ERROR) : 0x7F;
+ const int mb_size= 16>>s->avctx->lowres;
+ s->last_resync_gb= s->gb;
+ s->first_slice_line= 1;
+
+ s->resync_mb_x= s->mb_x;
+ s->resync_mb_y= s->mb_y;
+
+ ff_set_qscale(s, s->qscale);
+
+ if(s->partitioned_frame){
+ const int qscale= s->qscale;
+
+ if(CONFIG_MPEG4_DECODER && s->codec_id==CODEC_ID_MPEG4){
+ if(ff_mpeg4_decode_partitions(s) < 0)
+ return -1;
+ }
+
+ /* restore variables which were modified */
+ s->first_slice_line=1;
+ s->mb_x= s->resync_mb_x;
+ s->mb_y= s->resync_mb_y;
+ ff_set_qscale(s, qscale);
+ }
+
+ for(; s->mb_y < s->mb_height; s->mb_y++) {
+ /* per-row end of slice checks */
+ if(s->msmpeg4_version){
+ if(s->resync_mb_y + s->slice_height == s->mb_y){
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);
+
+ return 0;
+ }
+ }
+
+ if(s->msmpeg4_version==1){
+ s->last_dc[0]=
+ s->last_dc[1]=
+ s->last_dc[2]= 128;
+ }
+
+ ff_init_block_index(s);
+ for(; s->mb_x < s->mb_width; s->mb_x++) {
+ int ret;
+
+ ff_update_block_index(s);
+
+ if(s->resync_mb_x == s->mb_x && s->resync_mb_y+1 == s->mb_y){
+ s->first_slice_line=0;
+ }
+
+ /* DCT & quantize */
+
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+// s->mb_skipped = 0;
+//printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24));
+ ret= s->decode_mb(s, s->block);
+
+ if (s->pict_type!=FF_B_TYPE)
+ ff_h263_update_motion_val(s);
+
+ if(ret<0){
+ const int xy= s->mb_x + s->mb_y*s->mb_stride;
+ if(ret==SLICE_END){
+ MPV_decode_mb(s, s->block);
+ if(s->loop_filter)
+ ff_h263_loop_filter(s);
+
+//printf("%d %d %d %06X\n", s->mb_x, s->mb_y, s->gb.size*8 - get_bits_count(&s->gb), show_bits(&s->gb, 24));
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
+
+ s->padding_bug_score--;
+
+ if(++s->mb_x >= s->mb_width){
+ s->mb_x=0;
+ ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
+ s->mb_y++;
+ }
+ return 0;
+ }else if(ret==SLICE_NOEND){
+ av_log(s->avctx, AV_LOG_ERROR, "Slice mismatch at MB: %d\n", xy);
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x+1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
+ return -1;
+ }
+ av_log(s->avctx, AV_LOG_ERROR, "Error at MB: %d\n", xy);
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
+
+ return -1;
+ }
+
+ MPV_decode_mb(s, s->block);
+ if(s->loop_filter)
+ ff_h263_loop_filter(s);
+ }
+
+ ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
+
+ s->mb_x= 0;
+ }
+
+ assert(s->mb_x==0 && s->mb_y==s->mb_height);
+
+ /* try to detect the padding bug */
+ if( s->codec_id==CODEC_ID_MPEG4
+ && (s->workaround_bugs&FF_BUG_AUTODETECT)
+ && get_bits_left(&s->gb) >=0
+ && get_bits_left(&s->gb) < 48
+// && !s->resync_marker
+ && !s->data_partitioning){
+
+ const int bits_count= get_bits_count(&s->gb);
+ const int bits_left = s->gb.size_in_bits - bits_count;
+
+ if(bits_left==0){
+ s->padding_bug_score+=16;
+ } else if(bits_left != 1){
+ int v= show_bits(&s->gb, 8);
+ v|= 0x7F >> (7-(bits_count&7));
+
+ if(v==0x7F && bits_left<=8)
+ s->padding_bug_score--;
+ else if(v==0x7F && ((get_bits_count(&s->gb)+8)&8) && bits_left<=16)
+ s->padding_bug_score+= 4;
+ else
+ s->padding_bug_score++;
+ }
+ }
+
+ if(s->workaround_bugs&FF_BUG_AUTODETECT){
+ if(s->padding_bug_score > -2 && !s->data_partitioning /*&& (s->divx_version || !s->resync_marker)*/)
+ s->workaround_bugs |= FF_BUG_NO_PADDING;
+ else
+ s->workaround_bugs &= ~FF_BUG_NO_PADDING;
+ }
+
+ // handle formats which don't have unique end markers
+ if(s->msmpeg4_version || (s->workaround_bugs&FF_BUG_NO_PADDING)){ //FIXME perhaps solve this more cleanly
+ int left= get_bits_left(&s->gb);
+ int max_extra=7;
+
+ /* no markers in M$ crap */
+ if(s->msmpeg4_version && s->pict_type==FF_I_TYPE)
+ max_extra+= 17;
+
+ /* buggy padding but the frame should still end approximately at the bitstream end */
+ if((s->workaround_bugs&FF_BUG_NO_PADDING) && s->error_recognition>=3)
+ max_extra+= 48;
+ else if((s->workaround_bugs&FF_BUG_NO_PADDING))
+ max_extra+= 256*256*256*64;
+
+ if(left>max_extra){
+ av_log(s->avctx, AV_LOG_ERROR, "discarding %d junk bits at end, next would be %X\n", left, show_bits(&s->gb, 24));
+ }
+ else if(left<0){
+ av_log(s->avctx, AV_LOG_ERROR, "overreading %d bits\n", -left);
+ }else
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);
+
+ return 0;
+ }
+
+ av_log(s->avctx, AV_LOG_ERROR, "slice end not reached but screenspace end (%d left %06X, score= %d)\n",
+ get_bits_left(&s->gb),
+ show_bits(&s->gb, 24), s->padding_bug_score);
+
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
+
+ return -1;
+}
+
+int ff_h263_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ MpegEncContext *s = avctx->priv_data;
+ int ret;
+ AVFrame *pict = data;
+
+#ifdef PRINT_FRAME_TIME
+uint64_t time= rdtsc();
+#endif
+ s->flags= avctx->flags;
+ s->flags2= avctx->flags2;
+
+ /* no supplementary picture */
+ if (buf_size == 0) {
+ /* special case for last picture */
+ if (s->low_delay==0 && s->next_picture_ptr) {
+ *pict= *(AVFrame*)s->next_picture_ptr;
+ s->next_picture_ptr= NULL;
+
+ *data_size = sizeof(AVFrame);
+ }
+
+ return 0;
+ }
+
+ if(s->flags&CODEC_FLAG_TRUNCATED){
+ int next;
+
+ if(CONFIG_MPEG4_DECODER && s->codec_id==CODEC_ID_MPEG4){
+ next= ff_mpeg4_find_frame_end(&s->parse_context, buf, buf_size);
+ }else if(CONFIG_H263_DECODER && s->codec_id==CODEC_ID_H263){
+ next= ff_h263_find_frame_end(&s->parse_context, buf, buf_size);
+ }else{
+ av_log(s->avctx, AV_LOG_ERROR, "this codec does not support truncated bitstreams\n");
+ return -1;
+ }
+
+ if( ff_combine_frame(&s->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0 )
+ return buf_size;
+ }
+
+
+retry:
+
+ if(s->bitstream_buffer_size && (s->divx_packed || buf_size<20)){ //divx 5.01+/xvid frame reorder
+ init_get_bits(&s->gb, s->bitstream_buffer, s->bitstream_buffer_size*8);
+ }else
+ init_get_bits(&s->gb, buf, buf_size*8);
+ s->bitstream_buffer_size=0;
+
+ if (!s->context_initialized) {
+ if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
+ return -1;
+ }
+
+ /* We need to set current_picture_ptr before reading the header,
+ * otherwise we cannot store anyting in there */
+ if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
+ int i= ff_find_unused_picture(s, 0);
+ s->current_picture_ptr= &s->picture[i];
+ }
+
+ /* let's go :-) */
+ if (CONFIG_WMV2_DECODER && s->msmpeg4_version==5) {
+ ret= ff_wmv2_decode_picture_header(s);
+ } else if (CONFIG_MSMPEG4_DECODER && s->msmpeg4_version) {
+ ret = msmpeg4_decode_picture_header(s);
+ } else if (CONFIG_MPEG4_DECODER && s->h263_pred) {
+ if(s->avctx->extradata_size && s->picture_number==0){
+ GetBitContext gb;
+
+ init_get_bits(&gb, s->avctx->extradata, s->avctx->extradata_size*8);
+ ret = ff_mpeg4_decode_picture_header(s, &gb);
+ }
+ ret = ff_mpeg4_decode_picture_header(s, &s->gb);
+ } else if (CONFIG_H263I_DECODER && s->codec_id == CODEC_ID_H263I) {
+ ret = ff_intel_h263_decode_picture_header(s);
+ } else if (CONFIG_FLV_DECODER && s->h263_flv) {
+ ret = ff_flv_decode_picture_header(s);
+ } else {
+ ret = h263_decode_picture_header(s);
+ }
+
+ if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_size);
+
+ /* skip if the header was thrashed */
+ if (ret < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
+ return -1;
+ }
+
+ avctx->has_b_frames= !s->low_delay;
+
+ if(s->xvid_build==0 && s->divx_version==0 && s->lavc_build==0){
+ if(s->stream_codec_tag == AV_RL32("XVID") ||
+ s->codec_tag == AV_RL32("XVID") || s->codec_tag == AV_RL32("XVIX") ||
+ s->codec_tag == AV_RL32("RMP4"))
+ s->xvid_build= -1;
+#if 0
+ if(s->codec_tag == AV_RL32("DIVX") && s->vo_type==0 && s->vol_control_parameters==1
+ && s->padding_bug_score > 0 && s->low_delay) // XVID with modified fourcc
+ s->xvid_build= -1;
+#endif
+ }
+
+ if(s->xvid_build==0 && s->divx_version==0 && s->lavc_build==0){
+ if(s->codec_tag == AV_RL32("DIVX") && s->vo_type==0 && s->vol_control_parameters==0)
+ s->divx_version= 400; //divx 4
+ }
+
+ if(s->xvid_build && s->divx_version){
+ s->divx_version=
+ s->divx_build= 0;
+ }
+
+ if(s->workaround_bugs&FF_BUG_AUTODETECT){
+ if(s->codec_tag == AV_RL32("XVIX"))
+ s->workaround_bugs|= FF_BUG_XVID_ILACE;
+
+ if(s->codec_tag == AV_RL32("UMP4")){
+ s->workaround_bugs|= FF_BUG_UMP4;
+ }
+
+ if(s->divx_version>=500 && s->divx_build<1814){
+ s->workaround_bugs|= FF_BUG_QPEL_CHROMA;
+ }
+
+ if(s->divx_version>502 && s->divx_build<1814){
+ s->workaround_bugs|= FF_BUG_QPEL_CHROMA2;
+ }
+
+ if(s->xvid_build && s->xvid_build<=3)
+ s->padding_bug_score= 256*256*256*64;
+
+ if(s->xvid_build && s->xvid_build<=1)
+ s->workaround_bugs|= FF_BUG_QPEL_CHROMA;
+
+ if(s->xvid_build && s->xvid_build<=12)
+ s->workaround_bugs|= FF_BUG_EDGE;
+
+ if(s->xvid_build && s->xvid_build<=32)
+ s->workaround_bugs|= FF_BUG_DC_CLIP;
+
+#define SET_QPEL_FUNC(postfix1, postfix2) \
+ s->dsp.put_ ## postfix1 = ff_put_ ## postfix2;\
+ s->dsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2;\
+ s->dsp.avg_ ## postfix1 = ff_avg_ ## postfix2;
+
+ if(s->lavc_build && s->lavc_build<4653)
+ s->workaround_bugs|= FF_BUG_STD_QPEL;
+
+ if(s->lavc_build && s->lavc_build<4655)
+ s->workaround_bugs|= FF_BUG_DIRECT_BLOCKSIZE;
+
+ if(s->lavc_build && s->lavc_build<4670){
+ s->workaround_bugs|= FF_BUG_EDGE;
+ }
+
+ if(s->lavc_build && s->lavc_build<=4712)
+ s->workaround_bugs|= FF_BUG_DC_CLIP;
+
+ if(s->divx_version)
+ s->workaround_bugs|= FF_BUG_DIRECT_BLOCKSIZE;
+//printf("padding_bug_score: %d\n", s->padding_bug_score);
+ if(s->divx_version==501 && s->divx_build==20020416)
+ s->padding_bug_score= 256*256*256*64;
+
+ if(s->divx_version && s->divx_version<500){
+ s->workaround_bugs|= FF_BUG_EDGE;
+ }
+
+ if(s->divx_version)
+ s->workaround_bugs|= FF_BUG_HPEL_CHROMA;
+#if 0
+ if(s->divx_version==500)
+ s->padding_bug_score= 256*256*256*64;
+
+ /* very ugly XVID padding bug detection FIXME/XXX solve this differently
+ * Let us hope this at least works.
+ */
+ if( s->resync_marker==0 && s->data_partitioning==0 && s->divx_version==0
+ && s->codec_id==CODEC_ID_MPEG4 && s->vo_type==0)
+ s->workaround_bugs|= FF_BUG_NO_PADDING;
+
+ if(s->lavc_build && s->lavc_build<4609) //FIXME not sure about the version num but a 4609 file seems ok
+ s->workaround_bugs|= FF_BUG_NO_PADDING;
+#endif
+ }
+
+ if(s->workaround_bugs& FF_BUG_STD_QPEL){
+ SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_old_c)
+ SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_old_c)
+ SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_old_c)
+ SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_old_c)
+ SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_old_c)
+ SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_old_c)
+
+ SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_old_c)
+ SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_old_c)
+ SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_old_c)
+ SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_old_c)
+ SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_old_c)
+ SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_old_c)
+ }
+
+ if(avctx->debug & FF_DEBUG_BUGS)
+ av_log(s->avctx, AV_LOG_DEBUG, "bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n",
+ s->workaround_bugs, s->lavc_build, s->xvid_build, s->divx_version, s->divx_build,
+ s->divx_packed ? "p" : "");
+
+#if 0 // dump bits per frame / qp / complexity
+{
+ static FILE *f=NULL;
+ if(!f) f=fopen("rate_qp_cplx.txt", "w");
+ fprintf(f, "%d %d %f\n", buf_size, s->qscale, buf_size*(double)s->qscale);
+}
+#endif
+
+#if HAVE_MMX
+ if(s->codec_id == CODEC_ID_MPEG4 && s->xvid_build && avctx->idct_algo == FF_IDCT_AUTO && (mm_flags & FF_MM_MMX)){
+ avctx->idct_algo= FF_IDCT_XVIDMMX;
+ avctx->coded_width= 0; // force reinit
+// dsputil_init(&s->dsp, avctx);
+ s->picture_number=0;
+ }
+#endif
+
+ /* After H263 & mpeg4 header decode we have the height, width,*/
+ /* and other parameters. So then we could init the picture */
+ /* FIXME: By the way H263 decoder is evolving it should have */
+ /* an H263EncContext */
+
+ if ( s->width != avctx->coded_width
+ || s->height != avctx->coded_height) {
+ /* H.263 could change picture size any time */
+ ParseContext pc= s->parse_context; //FIXME move these demuxng hack to avformat
+ s->parse_context.buffer=0;
+ MPV_common_end(s);
+ s->parse_context= pc;
+ }
+ if (!s->context_initialized) {
+ avcodec_set_dimensions(avctx, s->width, s->height);
+
+ goto retry;
+ }
+
+ if((s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P || s->codec_id == CODEC_ID_H263I))
+ s->gob_index = ff_h263_get_gob_height(s);
+
+ // for hurry_up==5
+ s->current_picture.pict_type= s->pict_type;
+ s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
+
+ /* skip B-frames if we don't have reference frames */
+ if(s->last_picture_ptr==NULL && (s->pict_type==FF_B_TYPE || s->dropable)) return get_consumed_bytes(s, buf_size);
+ /* skip b frames if we are in a hurry */
+ if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return get_consumed_bytes(s, buf_size);
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
+ || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
+ || avctx->skip_frame >= AVDISCARD_ALL)
+ return get_consumed_bytes(s, buf_size);
+ /* skip everything if we are in a hurry>=5 */
+ if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
+
+ if(s->next_p_frame_damaged){
+ if(s->pict_type==FF_B_TYPE)
+ return get_consumed_bytes(s, buf_size);
+ else
+ s->next_p_frame_damaged=0;
+ }
+
+ if((s->avctx->flags2 & CODEC_FLAG2_FAST) && s->pict_type==FF_B_TYPE){
+ s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab;
+ }else if((!s->no_rounding) || s->pict_type==FF_B_TYPE){
+ s->me.qpel_put= s->dsp.put_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
+ }else{
+ s->me.qpel_put= s->dsp.put_no_rnd_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
+ }
+
+ if(MPV_frame_start(s, avctx) < 0)
+ return -1;
+
+ ff_er_frame_start(s);
+
+ //the second part of the wmv2 header contains the MB skip bits which are stored in current_picture->mb_type
+ //which is not available before MPV_frame_start()
+ if (CONFIG_WMV2_DECODER && s->msmpeg4_version==5){
+ ret = ff_wmv2_decode_secondary_picture_header(s);
+ if(ret<0) return ret;
+ if(ret==1) goto intrax8_decoded;
+ }
+
+ /* decode each macroblock */
+ s->mb_x=0;
+ s->mb_y=0;
+
+ decode_slice(s);
+ while(s->mb_y<s->mb_height){
+ if(s->msmpeg4_version){
+ if(s->slice_height==0 || s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_count(&s->gb) > s->gb.size_in_bits)
+ break;
+ }else{
+ if(ff_h263_resync(s)<0)
+ break;
+ }
+
+ if(s->msmpeg4_version<4 && s->h263_pred)
+ ff_mpeg4_clean_buffers(s);
+
+ decode_slice(s);
+ }
+
+ if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==FF_I_TYPE)
+ if(!CONFIG_MSMPEG4_DECODER || msmpeg4_decode_ext_header(s, buf_size) < 0){
+ s->error_status_table[s->mb_num-1]= AC_ERROR|DC_ERROR|MV_ERROR;
+ }
+
+ assert(s->bitstream_buffer_size==0);
+ /* divx 5.01+ bistream reorder stuff */
+ if(s->codec_id==CODEC_ID_MPEG4 && s->divx_packed){
+ int current_pos= get_bits_count(&s->gb)>>3;
+ int startcode_found=0;
+
+ if(buf_size - current_pos > 5){
+ int i;
+ for(i=current_pos; i<buf_size-3; i++){
+ if(buf[i]==0 && buf[i+1]==0 && buf[i+2]==1 && buf[i+3]==0xB6){
+ startcode_found=1;
+ break;
+ }
+ }
+ }
+ if(s->gb.buffer == s->bitstream_buffer && buf_size>7 && s->xvid_build){ //xvid style
+ startcode_found=1;
+ current_pos=0;
+ }
+
+ if(startcode_found){
+ av_fast_malloc(
+ &s->bitstream_buffer,
+ &s->allocated_bitstream_buffer_size,
+ buf_size - current_pos + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!s->bitstream_buffer)
+ return AVERROR(ENOMEM);
+ memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos);
+ s->bitstream_buffer_size= buf_size - current_pos;
+ }
+ }
+
+intrax8_decoded:
+ ff_er_frame_end(s);
+
+frame_end:
+ MPV_frame_end(s);
+
+assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
+assert(s->current_picture.pict_type == s->pict_type);
+ if (s->pict_type == FF_B_TYPE || s->low_delay) {
+ *pict= *(AVFrame*)s->current_picture_ptr;
+ } else if (s->last_picture_ptr != NULL) {
+ *pict= *(AVFrame*)s->last_picture_ptr;
+ }
+
+ if(s->last_picture_ptr || s->low_delay){
+ *data_size = sizeof(AVFrame);
+ ff_print_debug_info(s, pict);
+ }
+
+#ifdef PRINT_FRAME_TIME
+av_log(avctx, AV_LOG_DEBUG, "%"PRId64"\n", rdtsc()-time);
+#endif
+
+ return get_consumed_bytes(s, buf_size);
+}
+
+AVCodec h263_decoder = {
+ "h263",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_H263,
+ sizeof(MpegEncContext),
+ ff_h263_decode_init,
+ NULL,
+ ff_h263_decode_end,
+ ff_h263_decode_frame,
+ CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
+ /*.next = */NULL,
+ /*.flush = */ff_mpeg_flush,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264.c
new file mode 100644
index 000000000..4f6e7b33f
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264.c
@@ -0,0 +1,3280 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264.c
+ * H.264 / AVC / MPEG4 part10 codec.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "internal.h"
+#include "dsputil.h"
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "h264.h"
+#include "h264data.h"
+#include "h264_mvpred.h"
+#include "h264_parser.h"
+#include "golomb.h"
+#include "mathops.h"
+#include "rectangle.h"
+
+#include "cabac.h"
+
+//#undef NDEBUG
+#include <assert.h>
+
+static const uint8_t rem6[52]={
+0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+};
+
+static const uint8_t div6[52]={
+0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
+};
+
+void ff_h264_write_back_intra_pred_mode(H264Context *h){
+ const int mb_xy= h->mb_xy;
+
+ h->intra4x4_pred_mode[mb_xy][0]= h->intra4x4_pred_mode_cache[7+8*1];
+ h->intra4x4_pred_mode[mb_xy][1]= h->intra4x4_pred_mode_cache[7+8*2];
+ h->intra4x4_pred_mode[mb_xy][2]= h->intra4x4_pred_mode_cache[7+8*3];
+ h->intra4x4_pred_mode[mb_xy][3]= h->intra4x4_pred_mode_cache[7+8*4];
+ h->intra4x4_pred_mode[mb_xy][4]= h->intra4x4_pred_mode_cache[4+8*4];
+ h->intra4x4_pred_mode[mb_xy][5]= h->intra4x4_pred_mode_cache[5+8*4];
+ h->intra4x4_pred_mode[mb_xy][6]= h->intra4x4_pred_mode_cache[6+8*4];
+}
+
+/**
+ * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
+ */
+int ff_h264_check_intra4x4_pred_mode(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ static const int8_t top [12]= {-1, 0,LEFT_DC_PRED,-1,-1,-1,-1,-1, 0};
+ static const int8_t left[12]= { 0,-1, TOP_DC_PRED, 0,-1,-1,-1, 0,-1,DC_128_PRED};
+ int i;
+
+ if(!(h->top_samples_available&0x8000)){
+ for(i=0; i<4; i++){
+ int status= top[ h->intra4x4_pred_mode_cache[scan8[0] + i] ];
+ if(status<0){
+ av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
+ return -1;
+ } else if(status){
+ h->intra4x4_pred_mode_cache[scan8[0] + i]= status;
+ }
+ }
+ }
+
+ if((h->left_samples_available&0x8888)!=0x8888){
+ static const int mask[4]={0x8000,0x2000,0x80,0x20};
+ for(i=0; i<4; i++){
+ if(!(h->left_samples_available&mask[i])){
+ int status= left[ h->intra4x4_pred_mode_cache[scan8[0] + 8*i] ];
+ if(status<0){
+ av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
+ return -1;
+ } else if(status){
+ h->intra4x4_pred_mode_cache[scan8[0] + 8*i]= status;
+ }
+ }
+ }
+ }
+
+ return 0;
+} //FIXME cleanup like ff_h264_check_intra_pred_mode
+
+/**
+ * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
+ */
+int ff_h264_check_intra_pred_mode(H264Context *h, int mode){
+ MpegEncContext * const s = &h->s;
+ static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
+ static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
+
+ if(mode > 6U) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "out of range intra chroma pred mode at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ if(!(h->top_samples_available&0x8000)){
+ mode= top[ mode ];
+ if(mode<0){
+ av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ }
+
+ if((h->left_samples_available&0x8080) != 0x8080){
+ mode= left[ mode ];
+ if(h->left_samples_available&0x8080){ //mad cow disease mode, aka MBAFF + constrained_intra_pred
+ mode= ALZHEIMER_DC_L0T_PRED8x8 + (!(h->left_samples_available&0x8000)) + 2*(mode == DC_128_PRED8x8);
+ }
+ if(mode<0){
+ av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ }
+
+ return mode;
+}
+
+const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length){
+ int i, si, di;
+ uint8_t *dst;
+ int bufidx;
+
+// src[0]&0x80; //forbidden bit
+ h->nal_ref_idc= src[0]>>5;
+ h->nal_unit_type= src[0]&0x1F;
+
+ src++; length--;
+#if 0
+ for(i=0; i<length; i++)
+ printf("%2X ", src[i]);
+#endif
+
+#if HAVE_FAST_UNALIGNED
+# if HAVE_FAST_64BIT
+# define RS 7
+ for(i=0; i+1<length; i+=9){
+ if(!((~*(const uint64_t*)(src+i) & (*(const uint64_t*)(src+i) - 0x0100010001000101ULL)) & 0x8000800080008080ULL))
+# else
+# define RS 3
+ for(i=0; i+1<length; i+=5){
+ if(!((~*(const uint32_t*)(src+i) & (*(const uint32_t*)(src+i) - 0x01000101U)) & 0x80008080U))
+# endif
+ continue;
+ if(i>0 && !src[i]) i--;
+ while(src[i]) i++;
+#else
+# define RS 0
+ for(i=0; i+1<length; i+=2){
+ if(src[i]) continue;
+ if(i>0 && src[i-1]==0) i--;
+#endif
+ if(i+2<length && src[i+1]==0 && src[i+2]<=3){
+ if(src[i+2]!=3){
+ /* startcode, so we must be past the end */
+ length=i;
+ }
+ break;
+ }
+ i-= RS;
+ }
+
+ if(i>=length-1){ //no escaped 0
+ *dst_length= length;
+ *consumed= length+1; //+1 for the header
+ return src;
+ }
+
+ bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0; // use second escape buffer for inter data
+ av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+FF_INPUT_BUFFER_PADDING_SIZE);
+ dst= h->rbsp_buffer[bufidx];
+
+ if (dst == NULL){
+ return NULL;
+ }
+
+//printf("decoding esc\n");
+ memcpy(dst, src, i);
+ si=di=i;
+ while(si+2<length){
+ //remove escapes (very rare 1:2^22)
+ if(src[si+2]>3){
+ dst[di++]= src[si++];
+ dst[di++]= src[si++];
+ }else if(src[si]==0 && src[si+1]==0){
+ if(src[si+2]==3){ //escape
+ dst[di++]= 0;
+ dst[di++]= 0;
+ si+=3;
+ continue;
+ }else //next start code
+ goto nsc;
+ }
+
+ dst[di++]= src[si++];
+ }
+ while(si<length)
+ dst[di++]= src[si++];
+nsc:
+
+ memset(dst+di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+
+ *dst_length= di;
+ *consumed= si + 1;//+1 for the header
+//FIXME store exact number of bits in the getbitcontext (it is needed for decoding)
+ return dst;
+}
+
+int ff_h264_decode_rbsp_trailing(H264Context *h, const uint8_t *src){
+ int v= *src;
+ int r;
+
+ tprintf(h->s.avctx, "rbsp trailing %X\n", v);
+
+ for(r=1; r<9; r++){
+ if(v&1) return r;
+ v>>=1;
+ }
+ return 0;
+}
+
+/**
+ * IDCT transforms the 16 dc values and dequantizes them.
+ * @param qp quantization parameter
+ */
+static void h264_luma_dc_dequant_idct_c(DCTELEM *block, int qp, int qmul){
+#define stride 16
+ int i;
+ int temp[16]; //FIXME check if this is a good idea
+ static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride};
+ static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
+
+//memset(block, 64, 2*256);
+//return;
+ for(i=0; i<4; i++){
+ const int offset= y_offset[i];
+ const int z0= block[offset+stride*0] + block[offset+stride*4];
+ const int z1= block[offset+stride*0] - block[offset+stride*4];
+ const int z2= block[offset+stride*1] - block[offset+stride*5];
+ const int z3= block[offset+stride*1] + block[offset+stride*5];
+
+ temp[4*i+0]= z0+z3;
+ temp[4*i+1]= z1+z2;
+ temp[4*i+2]= z1-z2;
+ temp[4*i+3]= z0-z3;
+ }
+
+ for(i=0; i<4; i++){
+ const int offset= x_offset[i];
+ const int z0= temp[4*0+i] + temp[4*2+i];
+ const int z1= temp[4*0+i] - temp[4*2+i];
+ const int z2= temp[4*1+i] - temp[4*3+i];
+ const int z3= temp[4*1+i] + temp[4*3+i];
+
+ block[stride*0 +offset]= ((((z0 + z3)*qmul + 128 ) >> 8)); //FIXME think about merging this into decode_residual
+ block[stride*2 +offset]= ((((z1 + z2)*qmul + 128 ) >> 8));
+ block[stride*8 +offset]= ((((z1 - z2)*qmul + 128 ) >> 8));
+ block[stride*10+offset]= ((((z0 - z3)*qmul + 128 ) >> 8));
+ }
+}
+
+#undef xStride
+#undef stride
+
+static void chroma_dc_dequant_idct_c(DCTELEM *block, int qp, int qmul){
+ const int stride= 16*2;
+ const int xStride= 16;
+ int a,b,c,d,e;
+
+ a= block[stride*0 + xStride*0];
+ b= block[stride*0 + xStride*1];
+ c= block[stride*1 + xStride*0];
+ d= block[stride*1 + xStride*1];
+
+ e= a-b;
+ a= a+b;
+ b= c-d;
+ c= c+d;
+
+ block[stride*0 + xStride*0]= ((a+c)*qmul) >> 7;
+ block[stride*0 + xStride*1]= ((e+b)*qmul) >> 7;
+ block[stride*1 + xStride*0]= ((a-c)*qmul) >> 7;
+ block[stride*1 + xStride*1]= ((e-b)*qmul) >> 7;
+}
+
+static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int chroma_height, int delta, int list,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ int src_x_offset, int src_y_offset,
+ qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op){
+ MpegEncContext * const s = &h->s;
+ const int mx= h->mv_cache[list][ scan8[n] ][0] + src_x_offset*8;
+ int my= h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8;
+ const int luma_xy= (mx&3) + ((my&3)<<2);
+ uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->mb_linesize;
+ uint8_t * src_cb, * src_cr;
+ int extra_width= h->emu_edge_width;
+ int extra_height= h->emu_edge_height;
+ int emu=0;
+ const int full_mx= mx>>2;
+ const int full_my= my>>2;
+ const int pic_width = 16*s->mb_width;
+ const int pic_height = 16*s->mb_height >> MB_FIELD;
+
+ if(mx&7) extra_width -= 3;
+ if(my&7) extra_height -= 3;
+
+ if( full_mx < 0-extra_width
+ || full_my < 0-extra_height
+ || full_mx + 16/*FIXME*/ > pic_width + extra_width
+ || full_my + 16/*FIXME*/ > pic_height + extra_height){
+ ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*h->mb_linesize, h->mb_linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
+ src_y= s->edge_emu_buffer + 2 + 2*h->mb_linesize;
+ emu=1;
+ }
+
+ qpix_op[luma_xy](dest_y, src_y, h->mb_linesize); //FIXME try variable height perhaps?
+ if(!square){
+ qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize);
+ }
+
+ if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return;
+
+ if(MB_FIELD){
+ // chroma offset when predicting from a field of opposite parity
+ my += 2 * ((s->mb_y & 1) - (pic->reference - 1));
+ emu |= (my>>3) < 0 || (my>>3) + 8 >= (pic_height>>1);
+ }
+ src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->mb_uvlinesize;
+ src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->mb_uvlinesize;
+
+ if(emu){
+ ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, h->mb_uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
+ src_cb= s->edge_emu_buffer;
+ }
+ chroma_op(dest_cb, src_cb, h->mb_uvlinesize, chroma_height, mx&7, my&7);
+
+ if(emu){
+ ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, h->mb_uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
+ src_cr= s->edge_emu_buffer;
+ }
+ chroma_op(dest_cr, src_cr, h->mb_uvlinesize, chroma_height, mx&7, my&7);
+}
+
+static inline void mc_part_std(H264Context *h, int n, int square, int chroma_height, int delta,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ int x_offset, int y_offset,
+ qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
+ qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg,
+ int list0, int list1){
+ MpegEncContext * const s = &h->s;
+ qpel_mc_func *qpix_op= qpix_put;
+ h264_chroma_mc_func chroma_op= chroma_put;
+
+ dest_y += 2*x_offset + 2*y_offset*h-> mb_linesize;
+ dest_cb += x_offset + y_offset*h->mb_uvlinesize;
+ dest_cr += x_offset + y_offset*h->mb_uvlinesize;
+ x_offset += 8*s->mb_x;
+ y_offset += 8*(s->mb_y >> MB_FIELD);
+
+ if(list0){
+ Picture *ref= &h->ref_list[0][ h->ref_cache[0][ scan8[n] ] ];
+ mc_dir_part(h, ref, n, square, chroma_height, delta, 0,
+ dest_y, dest_cb, dest_cr, x_offset, y_offset,
+ qpix_op, chroma_op);
+
+ qpix_op= qpix_avg;
+ chroma_op= chroma_avg;
+ }
+
+ if(list1){
+ Picture *ref= &h->ref_list[1][ h->ref_cache[1][ scan8[n] ] ];
+ mc_dir_part(h, ref, n, square, chroma_height, delta, 1,
+ dest_y, dest_cb, dest_cr, x_offset, y_offset,
+ qpix_op, chroma_op);
+ }
+}
+
+static inline void mc_part_weighted(H264Context *h, int n, int square, int chroma_height, int delta,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ int x_offset, int y_offset,
+ qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
+ h264_weight_func luma_weight_op, h264_weight_func chroma_weight_op,
+ h264_biweight_func luma_weight_avg, h264_biweight_func chroma_weight_avg,
+ int list0, int list1){
+ MpegEncContext * const s = &h->s;
+
+ dest_y += 2*x_offset + 2*y_offset*h-> mb_linesize;
+ dest_cb += x_offset + y_offset*h->mb_uvlinesize;
+ dest_cr += x_offset + y_offset*h->mb_uvlinesize;
+ x_offset += 8*s->mb_x;
+ y_offset += 8*(s->mb_y >> MB_FIELD);
+
+ if(list0 && list1){
+ /* don't optimize for luma-only case, since B-frames usually
+ * use implicit weights => chroma too. */
+ uint8_t *tmp_cb = s->obmc_scratchpad;
+ uint8_t *tmp_cr = s->obmc_scratchpad + 8;
+ uint8_t *tmp_y = s->obmc_scratchpad + 8*h->mb_uvlinesize;
+ int refn0 = h->ref_cache[0][ scan8[n] ];
+ int refn1 = h->ref_cache[1][ scan8[n] ];
+
+ mc_dir_part(h, &h->ref_list[0][refn0], n, square, chroma_height, delta, 0,
+ dest_y, dest_cb, dest_cr,
+ x_offset, y_offset, qpix_put, chroma_put);
+ mc_dir_part(h, &h->ref_list[1][refn1], n, square, chroma_height, delta, 1,
+ tmp_y, tmp_cb, tmp_cr,
+ x_offset, y_offset, qpix_put, chroma_put);
+
+ if(h->use_weight == 2){
+ int weight0 = h->implicit_weight[refn0][refn1];
+ int weight1 = 64 - weight0;
+ luma_weight_avg( dest_y, tmp_y, h-> mb_linesize, 5, weight0, weight1, 0);
+ chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, 5, weight0, weight1, 0);
+ chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, 5, weight0, weight1, 0);
+ }else{
+ luma_weight_avg(dest_y, tmp_y, h->mb_linesize, h->luma_log2_weight_denom,
+ h->luma_weight[0][refn0], h->luma_weight[1][refn1],
+ h->luma_offset[0][refn0] + h->luma_offset[1][refn1]);
+ chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, h->chroma_log2_weight_denom,
+ h->chroma_weight[0][refn0][0], h->chroma_weight[1][refn1][0],
+ h->chroma_offset[0][refn0][0] + h->chroma_offset[1][refn1][0]);
+ chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, h->chroma_log2_weight_denom,
+ h->chroma_weight[0][refn0][1], h->chroma_weight[1][refn1][1],
+ h->chroma_offset[0][refn0][1] + h->chroma_offset[1][refn1][1]);
+ }
+ }else{
+ int list = list1 ? 1 : 0;
+ int refn = h->ref_cache[list][ scan8[n] ];
+ Picture *ref= &h->ref_list[list][refn];
+ mc_dir_part(h, ref, n, square, chroma_height, delta, list,
+ dest_y, dest_cb, dest_cr, x_offset, y_offset,
+ qpix_put, chroma_put);
+
+ luma_weight_op(dest_y, h->mb_linesize, h->luma_log2_weight_denom,
+ h->luma_weight[list][refn], h->luma_offset[list][refn]);
+ if(h->use_weight_chroma){
+ chroma_weight_op(dest_cb, h->mb_uvlinesize, h->chroma_log2_weight_denom,
+ h->chroma_weight[list][refn][0], h->chroma_offset[list][refn][0]);
+ chroma_weight_op(dest_cr, h->mb_uvlinesize, h->chroma_log2_weight_denom,
+ h->chroma_weight[list][refn][1], h->chroma_offset[list][refn][1]);
+ }
+ }
+}
+
+static inline void mc_part(H264Context *h, int n, int square, int chroma_height, int delta,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ int x_offset, int y_offset,
+ qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
+ qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg,
+ h264_weight_func *weight_op, h264_biweight_func *weight_avg,
+ int list0, int list1){
+ if((h->use_weight==2 && list0 && list1
+ && (h->implicit_weight[ h->ref_cache[0][scan8[n]] ][ h->ref_cache[1][scan8[n]] ] != 32))
+ || h->use_weight==1)
+ mc_part_weighted(h, n, square, chroma_height, delta, dest_y, dest_cb, dest_cr,
+ x_offset, y_offset, qpix_put, chroma_put,
+ weight_op[0], weight_op[3], weight_avg[0], weight_avg[3], list0, list1);
+ else
+ mc_part_std(h, n, square, chroma_height, delta, dest_y, dest_cb, dest_cr,
+ x_offset, y_offset, qpix_put, chroma_put, qpix_avg, chroma_avg, list0, list1);
+}
+
+static inline void prefetch_motion(H264Context *h, int list){
+ /* fetch pixels for estimated mv 4 macroblocks ahead
+ * optimized for 64byte cache lines */
+ MpegEncContext * const s = &h->s;
+ const int refn = h->ref_cache[list][scan8[0]];
+ if(refn >= 0){
+ const int mx= (h->mv_cache[list][scan8[0]][0]>>2) + 16*s->mb_x + 8;
+ const int my= (h->mv_cache[list][scan8[0]][1]>>2) + 16*s->mb_y;
+ uint8_t **src= h->ref_list[list][refn].data;
+ int off= mx + (my + (s->mb_x&3)*4)*h->mb_linesize + 64;
+ s->dsp.prefetch(src[0]+off, s->linesize, 4);
+ off= (mx>>1) + ((my>>1) + (s->mb_x&7))*s->uvlinesize + 64;
+ s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
+ }
+}
+
+static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ qpel_mc_func (*qpix_put)[16], h264_chroma_mc_func (*chroma_put),
+ qpel_mc_func (*qpix_avg)[16], h264_chroma_mc_func (*chroma_avg),
+ h264_weight_func *weight_op, h264_biweight_func *weight_avg){
+ MpegEncContext * const s = &h->s;
+ const int mb_xy= h->mb_xy;
+ const int mb_type= s->current_picture.mb_type[mb_xy];
+
+ assert(IS_INTER(mb_type));
+
+ prefetch_motion(h, 0);
+
+ if(IS_16X16(mb_type)){
+ mc_part(h, 0, 1, 8, 0, dest_y, dest_cb, dest_cr, 0, 0,
+ qpix_put[0], chroma_put[0], qpix_avg[0], chroma_avg[0],
+ weight_op, weight_avg,
+ IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
+ }else if(IS_16X8(mb_type)){
+ mc_part(h, 0, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 0,
+ qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
+ &weight_op[1], &weight_avg[1],
+ IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
+ mc_part(h, 8, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 4,
+ qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
+ &weight_op[1], &weight_avg[1],
+ IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
+ }else if(IS_8X16(mb_type)){
+ mc_part(h, 0, 0, 8, 8*h->mb_linesize, dest_y, dest_cb, dest_cr, 0, 0,
+ qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
+ &weight_op[2], &weight_avg[2],
+ IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
+ mc_part(h, 4, 0, 8, 8*h->mb_linesize, dest_y, dest_cb, dest_cr, 4, 0,
+ qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
+ &weight_op[2], &weight_avg[2],
+ IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
+ }else{
+ int i;
+
+ assert(IS_8X8(mb_type));
+
+ for(i=0; i<4; i++){
+ const int sub_mb_type= h->sub_mb_type[i];
+ const int n= 4*i;
+ int x_offset= (i&1)<<2;
+ int y_offset= (i&2)<<1;
+
+ if(IS_SUB_8X8(sub_mb_type)){
+ mc_part(h, n, 1, 4, 0, dest_y, dest_cb, dest_cr, x_offset, y_offset,
+ qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
+ &weight_op[3], &weight_avg[3],
+ IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
+ }else if(IS_SUB_8X4(sub_mb_type)){
+ mc_part(h, n , 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset,
+ qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
+ &weight_op[4], &weight_avg[4],
+ IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
+ mc_part(h, n+2, 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset+2,
+ qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
+ &weight_op[4], &weight_avg[4],
+ IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
+ }else if(IS_SUB_4X8(sub_mb_type)){
+ mc_part(h, n , 0, 4, 4*h->mb_linesize, dest_y, dest_cb, dest_cr, x_offset, y_offset,
+ qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
+ &weight_op[5], &weight_avg[5],
+ IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
+ mc_part(h, n+1, 0, 4, 4*h->mb_linesize, dest_y, dest_cb, dest_cr, x_offset+2, y_offset,
+ qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
+ &weight_op[5], &weight_avg[5],
+ IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
+ }else{
+ int j;
+ assert(IS_SUB_4X4(sub_mb_type));
+ for(j=0; j<4; j++){
+ int sub_x_offset= x_offset + 2*(j&1);
+ int sub_y_offset= y_offset + (j&2);
+ mc_part(h, n+j, 1, 2, 0, dest_y, dest_cb, dest_cr, sub_x_offset, sub_y_offset,
+ qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
+ &weight_op[6], &weight_avg[6],
+ IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
+ }
+ }
+ }
+ }
+
+ prefetch_motion(h, 1);
+}
+
+
+static void free_tables(H264Context *h){
+ int i;
+ H264Context *hx;
+ av_freep(&h->intra4x4_pred_mode);
+ av_freep(&h->chroma_pred_mode_table);
+ av_freep(&h->cbp_table);
+ av_freep(&h->mvd_table[0]);
+ av_freep(&h->mvd_table[1]);
+ av_freep(&h->direct_table);
+ av_freep(&h->non_zero_count);
+ av_freep(&h->slice_table_base);
+ h->slice_table= NULL;
+ av_freep(&h->list_counts);
+
+ av_freep(&h->mb2b_xy);
+ av_freep(&h->mb2b8_xy);
+
+ for(i = 0; i < MAX_THREADS; i++) {
+ hx = h->thread_context[i];
+ if(!hx) continue;
+ av_freep(&hx->top_borders[1]);
+ av_freep(&hx->top_borders[0]);
+ av_freep(&hx->s.obmc_scratchpad);
+ av_freep(&hx->rbsp_buffer[1]);
+ av_freep(&hx->rbsp_buffer[0]);
+ hx->rbsp_buffer_size[0] = 0;
+ hx->rbsp_buffer_size[1] = 0;
+ if (i) av_freep(&h->thread_context[i]);
+ }
+}
+
+static void init_dequant8_coeff_table(H264Context *h){
+ int i,q,x;
+ const int transpose = (h->s.dsp.h264_idct8_add != ff_h264_idct8_add_c); //FIXME ugly
+ h->dequant8_coeff[0] = h->dequant8_buffer[0];
+ h->dequant8_coeff[1] = h->dequant8_buffer[1];
+
+ for(i=0; i<2; i++ ){
+ if(i && !memcmp(h->pps.scaling_matrix8[0], h->pps.scaling_matrix8[1], 64*sizeof(uint8_t))){
+ h->dequant8_coeff[1] = h->dequant8_buffer[0];
+ break;
+ }
+
+ for(q=0; q<52; q++){
+ int shift = div6[q];
+ int idx = rem6[q];
+ for(x=0; x<64; x++)
+ h->dequant8_coeff[i][q][transpose ? (x>>3)|((x&7)<<3) : x] =
+ ((uint32_t)dequant8_coeff_init[idx][ dequant8_coeff_init_scan[((x>>1)&12) | (x&3)] ] *
+ h->pps.scaling_matrix8[i][x]) << shift;
+ }
+ }
+}
+
+static void init_dequant4_coeff_table(H264Context *h){
+ int i,j,q,x;
+ const int transpose = (h->s.dsp.h264_idct_add != ff_h264_idct_add_c); //FIXME ugly
+ for(i=0; i<6; i++ ){
+ h->dequant4_coeff[i] = h->dequant4_buffer[i];
+ for(j=0; j<i; j++){
+ if(!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i], 16*sizeof(uint8_t))){
+ h->dequant4_coeff[i] = h->dequant4_buffer[j];
+ break;
+ }
+ }
+ if(j<i)
+ continue;
+
+ for(q=0; q<52; q++){
+ int shift = div6[q] + 2;
+ int idx = rem6[q];
+ for(x=0; x<16; x++)
+ h->dequant4_coeff[i][q][transpose ? (x>>2)|((x<<2)&0xF) : x] =
+ ((uint32_t)dequant4_coeff_init[idx][(x&1) + ((x>>2)&1)] *
+ h->pps.scaling_matrix4[i][x]) << shift;
+ }
+ }
+}
+
+static void init_dequant_tables(H264Context *h){
+ int i,x;
+ init_dequant4_coeff_table(h);
+ if(h->pps.transform_8x8_mode)
+ init_dequant8_coeff_table(h);
+ if(h->sps.transform_bypass){
+ for(i=0; i<6; i++)
+ for(x=0; x<16; x++)
+ h->dequant4_coeff[i][0][x] = 1<<6;
+ if(h->pps.transform_8x8_mode)
+ for(i=0; i<2; i++)
+ for(x=0; x<64; x++)
+ h->dequant8_coeff[i][0][x] = 1<<6;
+ }
+}
+
+
+int ff_h264_alloc_tables(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ const int big_mb_num= s->mb_stride * (s->mb_height+1);
+ int x,y;
+
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->intra4x4_pred_mode, big_mb_num * 8 * sizeof(uint8_t), fail)
+
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->non_zero_count , big_mb_num * 32 * sizeof(uint8_t), fail)
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->slice_table_base , (big_mb_num+s->mb_stride) * sizeof(*h->slice_table_base), fail)
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->cbp_table, big_mb_num * sizeof(uint16_t), fail)
+
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t), fail)
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mvd_table[0], 32*big_mb_num * sizeof(uint16_t), fail);
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mvd_table[1], 32*big_mb_num * sizeof(uint16_t), fail);
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->direct_table, 32*big_mb_num * sizeof(uint8_t) , fail);
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->list_counts, big_mb_num * sizeof(uint8_t), fail)
+
+ memset(h->slice_table_base, -1, (big_mb_num+s->mb_stride) * sizeof(*h->slice_table_base));
+ h->slice_table= h->slice_table_base + s->mb_stride*2 + 1;
+
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mb2b_xy , big_mb_num * sizeof(uint32_t), fail);
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mb2b8_xy , big_mb_num * sizeof(uint32_t), fail);
+ for(y=0; y<s->mb_height; y++){
+ for(x=0; x<s->mb_width; x++){
+ const int mb_xy= x + y*s->mb_stride;
+ const int b_xy = 4*x + 4*y*h->b_stride;
+ const int b8_xy= 2*x + 2*y*h->b8_stride;
+
+ h->mb2b_xy [mb_xy]= b_xy;
+ h->mb2b8_xy[mb_xy]= b8_xy;
+ }
+ }
+
+ s->obmc_scratchpad = NULL;
+
+ if(!h->dequant4_coeff[0])
+ init_dequant_tables(h);
+
+ return 0;
+fail:
+ free_tables(h);
+ return -1;
+}
+
+/**
+ * Mimic alloc_tables(), but for every context thread.
+ */
+static void clone_tables(H264Context *dst, H264Context *src){
+ dst->intra4x4_pred_mode = src->intra4x4_pred_mode;
+ dst->non_zero_count = src->non_zero_count;
+ dst->slice_table = src->slice_table;
+ dst->cbp_table = src->cbp_table;
+ dst->mb2b_xy = src->mb2b_xy;
+ dst->mb2b8_xy = src->mb2b8_xy;
+ dst->chroma_pred_mode_table = src->chroma_pred_mode_table;
+ dst->mvd_table[0] = src->mvd_table[0];
+ dst->mvd_table[1] = src->mvd_table[1];
+ dst->direct_table = src->direct_table;
+ dst->list_counts = src->list_counts;
+
+ dst->s.obmc_scratchpad = NULL;
+ ff_h264_pred_init(&dst->hpc, src->s.codec_id);
+}
+
+/**
+ * Init context
+ * Allocate buffers which are not shared amongst multiple threads.
+ */
+static int context_init(H264Context *h){
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->top_borders[0], h->s.mb_width * (16+8+8) * sizeof(uint8_t), fail)
+ FF_ALLOCZ_OR_GOTO(h->s.avctx, h->top_borders[1], h->s.mb_width * (16+8+8) * sizeof(uint8_t), fail)
+
+ return 0;
+fail:
+ return -1; // free_tables will clean up for us
+}
+
+static av_cold void common_init(H264Context *h){
+ MpegEncContext * const s = &h->s;
+
+ s->width = s->avctx->width;
+ s->height = s->avctx->height;
+ s->codec_id= s->avctx->codec->id;
+
+ ff_h264_pred_init(&h->hpc, s->codec_id);
+
+ h->dequant_coeff_pps= -1;
+ s->unrestricted_mv=1;
+ s->decode=1; //FIXME
+
+ dsputil_init(&s->dsp, s->avctx); // needed so that idct permutation is known early
+
+ memset(h->pps.scaling_matrix4, 16, 6*16*sizeof(uint8_t));
+ memset(h->pps.scaling_matrix8, 16, 2*64*sizeof(uint8_t));
+}
+
+// ffdshow custom code - adapted for DirectShow
+av_cold int avcodec_h264_decode_init_is_avc(AVCodecContext *avctx){
+ if(avctx->extradata_size > 0 && avctx->extradata &&
+ (*(char *)avctx->extradata == 1 || (avctx->codec_tag == 0x31637661 || avctx->codec_tag == 0x31435641))){
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+av_cold int ff_h264_decode_init(AVCodecContext *avctx){
+ H264Context *h= avctx->priv_data;
+ MpegEncContext * const s = &h->s;
+
+ MPV_decode_defaults(s);
+
+ s->avctx = avctx;
+ common_init(h);
+
+ s->out_format = FMT_H264;
+ s->workaround_bugs= avctx->workaround_bugs;
+
+ // set defaults
+// s->decode_mb= ff_h263_decode_mb;
+ s->quarter_sample = 1;
+ if(!avctx->has_b_frames)
+ s->low_delay= 1;
+
+ /* ffdshow custom code (begin) */
+ if(avctx->codec_id == CODEC_ID_SVQ3)
+ avctx->pix_fmt= PIX_FMT_YUVJ420P;
+ else
+ avctx->pix_fmt= PIX_FMT_YUV420P;
+ /* ffdshow custom code (end) */
+ avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
+
+ ff_h264_decode_init_vlc();
+
+ /* ffdshow custom code (begin) */
+ h->is_avc = avcodec_h264_decode_init_is_avc(avctx);
+ h->got_avcC = 0;
+ /* ffdshow custom code (end) */
+
+ h->thread_context[0] = h;
+ h->outputed_poc = INT_MIN;
+ h->prev_poc_msb= 1<<16;
+ ff_h264_reset_sei(h);
+ return 0;
+}
+
+int ff_h264_frame_start(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ int i;
+
+ if(MPV_frame_start(s, s->avctx) < 0)
+ return -1;
+ ff_er_frame_start(s);
+ /*
+ * MPV_frame_start uses pict_type to derive key_frame.
+ * This is incorrect for H.264; IDR markings must be used.
+ * Zero here; IDR markings per slice in frame or fields are ORed in later.
+ * See decode_nal_units().
+ */
+ s->current_picture_ptr->key_frame= 0;
+ s->current_picture_ptr->mmco_reset= 0;
+
+ assert(s->linesize && s->uvlinesize);
+
+ for(i=0; i<16; i++){
+ h->block_offset[i]= 4*((scan8[i] - scan8[0])&7) + 4*s->linesize*((scan8[i] - scan8[0])>>3);
+ h->block_offset[24+i]= 4*((scan8[i] - scan8[0])&7) + 8*s->linesize*((scan8[i] - scan8[0])>>3);
+ }
+ for(i=0; i<4; i++){
+ h->block_offset[16+i]=
+ h->block_offset[20+i]= 4*((scan8[i] - scan8[0])&7) + 4*s->uvlinesize*((scan8[i] - scan8[0])>>3);
+ h->block_offset[24+16+i]=
+ h->block_offset[24+20+i]= 4*((scan8[i] - scan8[0])&7) + 8*s->uvlinesize*((scan8[i] - scan8[0])>>3);
+ }
+
+ /* can't be in alloc_tables because linesize isn't known there.
+ * FIXME: redo bipred weight to not require extra buffer? */
+ for(i = 0; i < s->avctx->thread_count; i++)
+ if(!h->thread_context[i]->s.obmc_scratchpad)
+ h->thread_context[i]->s.obmc_scratchpad = av_malloc(16*2*s->linesize + 8*2*s->uvlinesize);
+
+ /* some macroblocks will be accessed before they're available */
+ if(FRAME_MBAFF || s->avctx->thread_count > 1)
+ memset(h->slice_table, -1, (s->mb_height*s->mb_stride-1) * sizeof(*h->slice_table));
+
+// s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1;
+
+ // We mark the current picture as non-reference after allocating it, so
+ // that if we break out due to an error it can be released automatically
+ // in the next MPV_frame_start().
+ // SVQ3 as well as most other codecs have only last/next/current and thus
+ // get released even with set reference, besides SVQ3 and others do not
+ // mark frames as reference later "naturally".
+ if(s->codec_id != CODEC_ID_SVQ3)
+ s->current_picture_ptr->reference= 0;
+
+ s->current_picture_ptr->field_poc[0]=
+ s->current_picture_ptr->field_poc[1]= INT_MAX;
+ assert(s->current_picture_ptr->long_ref==0);
+
+ return 0;
+}
+
+static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple){
+ MpegEncContext * const s = &h->s;
+ uint8_t *top_border;
+ int top_idx = 1;
+
+ src_y -= linesize;
+ src_cb -= uvlinesize;
+ src_cr -= uvlinesize;
+
+ if(!simple && FRAME_MBAFF){
+ if(s->mb_y&1){
+ if(!MB_MBAFF){
+ top_border = h->top_borders[0][s->mb_x];
+ AV_COPY128(top_border, src_y + 15*linesize);
+ if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ AV_COPY64(top_border+16, src_cb+7*uvlinesize);
+ AV_COPY64(top_border+24, src_cr+7*uvlinesize);
+ }
+ }
+ }else if(MB_MBAFF){
+ top_idx = 0;
+ }else
+ return;
+ }
+
+ top_border = h->top_borders[top_idx][s->mb_x];
+ // There are two lines saved, the line above the the top macroblock of a pair,
+ // and the line above the bottom macroblock
+ AV_COPY128(top_border, src_y + 16*linesize);
+
+ if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ AV_COPY64(top_border+16, src_cb+8*uvlinesize);
+ AV_COPY64(top_border+24, src_cr+8*uvlinesize);
+ }
+}
+
+static inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int simple){
+ MpegEncContext * const s = &h->s;
+ int temp8, i;
+ uint64_t temp64;
+ int deblock_left;
+ int deblock_top;
+ int mb_xy;
+ int top_idx = 1;
+ uint8_t *top_border_m1;
+ uint8_t *top_border;
+
+ if(!simple && FRAME_MBAFF){
+ if(s->mb_y&1){
+ if(!MB_MBAFF)
+ return;
+ }else{
+ top_idx = MB_MBAFF ? 0 : 1;
+ }
+ }
+
+ if(h->deblocking_filter == 2) {
+ mb_xy = h->mb_xy;
+ deblock_left = h->slice_table[mb_xy] == h->slice_table[mb_xy - 1];
+ deblock_top = h->slice_table[mb_xy] == h->slice_table[h->top_mb_xy];
+ } else {
+ deblock_left = (s->mb_x > 0);
+ deblock_top = (s->mb_y > !!MB_FIELD);
+ }
+
+ src_y -= linesize + 1;
+ src_cb -= uvlinesize + 1;
+ src_cr -= uvlinesize + 1;
+
+ top_border_m1 = h->top_borders[top_idx][s->mb_x-1];
+ top_border = h->top_borders[top_idx][s->mb_x];
+
+#define XCHG(a,b,xchg)\
+if (xchg) AV_SWAP64(b,a);\
+else AV_COPY64(b,a);
+
+ if(deblock_top){
+ if(deblock_left){
+ XCHG(top_border_m1+8, src_y -7, 1);
+ }
+ XCHG(top_border+0, src_y +1, xchg);
+ XCHG(top_border+8, src_y +9, 1);
+ if(s->mb_x+1 < s->mb_width){
+ XCHG(h->top_borders[top_idx][s->mb_x+1], src_y +17, 1);
+ }
+ }
+
+ if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ if(deblock_top){
+ if(deblock_left){
+ XCHG(top_border_m1+16, src_cb -7, 1);
+ XCHG(top_border_m1+24, src_cr -7, 1);
+ }
+ XCHG(top_border+16, src_cb+1, 1);
+ XCHG(top_border+24, src_cr+1, 1);
+ }
+ }
+}
+
+static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple){
+ MpegEncContext * const s = &h->s;
+ const int mb_x= s->mb_x;
+ const int mb_y= s->mb_y;
+ const int mb_xy= h->mb_xy;
+ const int mb_type= s->current_picture.mb_type[mb_xy];
+ uint8_t *dest_y, *dest_cb, *dest_cr;
+ int linesize, uvlinesize /*dct_offset*/;
+ int i;
+ int *block_offset = &h->block_offset[0];
+ const int transform_bypass = !simple && (s->qscale == 0 && h->sps.transform_bypass);
+ /* is_h264 should always be true if SVQ3 is disabled. */
+ const int is_h264 = !CONFIG_SVQ3_DECODER || simple || s->codec_id == CODEC_ID_H264;
+ void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
+ void (*idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride);
+
+ dest_y = s->current_picture.data[0] + (mb_x + mb_y * s->linesize ) * 16;
+ dest_cb = s->current_picture.data[1] + (mb_x + mb_y * s->uvlinesize) * 8;
+ dest_cr = s->current_picture.data[2] + (mb_x + mb_y * s->uvlinesize) * 8;
+
+ s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + 64, s->linesize, 4);
+ s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + 64, dest_cr - dest_cb, 2);
+
+ h->list_counts[mb_xy]= h->list_count;
+
+ if (!simple && MB_FIELD) {
+ linesize = h->mb_linesize = s->linesize * 2;
+ uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2;
+ block_offset = &h->block_offset[24];
+ if(mb_y&1){ //FIXME move out of this function?
+ dest_y -= s->linesize*15;
+ dest_cb-= s->uvlinesize*7;
+ dest_cr-= s->uvlinesize*7;
+ }
+ if(FRAME_MBAFF) {
+ int list;
+ for(list=0; list<h->list_count; list++){
+ if(!USES_LIST(mb_type, list))
+ continue;
+ if(IS_16X16(mb_type)){
+ int8_t *ref = &h->ref_cache[list][scan8[0]];
+ fill_rectangle(ref, 4, 4, 8, (16+*ref)^(s->mb_y&1), 1);
+ }else{
+ for(i=0; i<16; i+=4){
+ int ref = h->ref_cache[list][scan8[i]];
+ if(ref >= 0)
+ fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2, 8, (16+ref)^(s->mb_y&1), 1);
+ }
+ }
+ }
+ }
+ } else {
+ linesize = h->mb_linesize = s->linesize;
+ uvlinesize = h->mb_uvlinesize = s->uvlinesize;
+// dct_offset = s->linesize * 16;
+ }
+
+ if (!simple && IS_INTRA_PCM(mb_type)) {
+ for (i=0; i<16; i++) {
+ memcpy(dest_y + i* linesize, h->mb + i*8, 16);
+ }
+ for (i=0; i<8; i++) {
+ memcpy(dest_cb+ i*uvlinesize, h->mb + 128 + i*4, 8);
+ memcpy(dest_cr+ i*uvlinesize, h->mb + 160 + i*4, 8);
+ }
+ } else {
+ if(IS_INTRA(mb_type)){
+ if(h->deblocking_filter)
+ xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1, simple);
+
+ if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ h->hpc.pred8x8[ h->chroma_pred_mode ](dest_cb, uvlinesize);
+ h->hpc.pred8x8[ h->chroma_pred_mode ](dest_cr, uvlinesize);
+ }
+
+ if(IS_INTRA4x4(mb_type)){
+ if(simple || !s->encoding){
+ if(IS_8x8DCT(mb_type)){
+ if(transform_bypass){
+ idct_dc_add =
+ idct_add = s->dsp.add_pixels8;
+ }else{
+ idct_dc_add = s->dsp.h264_idct8_dc_add;
+ idct_add = s->dsp.h264_idct8_add;
+ }
+ for(i=0; i<16; i+=4){
+ uint8_t * const ptr= dest_y + block_offset[i];
+ const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
+ if(transform_bypass && h->sps.profile_idc==244 && dir<=1){
+ h->hpc.pred8x8l_add[dir](ptr, h->mb + i*16, linesize);
+ }else{
+ const int nnz = h->non_zero_count_cache[ scan8[i] ];
+ h->hpc.pred8x8l[ dir ](ptr, (h->topleft_samples_available<<i)&0x8000,
+ (h->topright_samples_available<<i)&0x4000, linesize);
+ if(nnz){
+ if(nnz == 1 && h->mb[i*16])
+ idct_dc_add(ptr, h->mb + i*16, linesize);
+ else
+ idct_add (ptr, h->mb + i*16, linesize);
+ }
+ }
+ }
+ }else{
+ if(transform_bypass){
+ idct_dc_add =
+ idct_add = s->dsp.add_pixels4;
+ }else{
+ idct_dc_add = s->dsp.h264_idct_dc_add;
+ idct_add = s->dsp.h264_idct_add;
+ }
+ for(i=0; i<16; i++){
+ uint8_t * const ptr= dest_y + block_offset[i];
+ const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
+
+ if(transform_bypass && h->sps.profile_idc==244 && dir<=1){
+ h->hpc.pred4x4_add[dir](ptr, h->mb + i*16, linesize);
+ }else{
+ uint8_t *topright;
+ int nnz, tr;
+ if(dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED){
+ const int topright_avail= (h->topright_samples_available<<i)&0x8000;
+ assert(mb_y || linesize <= block_offset[i]);
+ if(!topright_avail){
+ tr= ptr[3 - linesize]*0x01010101;
+ topright= (uint8_t*) &tr;
+ }else
+ topright= ptr + 4 - linesize;
+ }else
+ topright= NULL;
+
+ h->hpc.pred4x4[ dir ](ptr, topright, linesize);
+ nnz = h->non_zero_count_cache[ scan8[i] ];
+ if(nnz){
+ if(is_h264){
+ if(nnz == 1 && h->mb[i*16])
+ idct_dc_add(ptr, h->mb + i*16, linesize);
+ else
+ idct_add (ptr, h->mb + i*16, linesize);
+ }else
+ ff_svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, 0);
+ }
+ }
+ }
+ }
+ }
+ }else{
+ h->hpc.pred16x16[ h->intra16x16_pred_mode ](dest_y , linesize);
+ if(is_h264){
+ if(!transform_bypass)
+ h264_luma_dc_dequant_idct_c(h->mb, s->qscale, h->dequant4_coeff[0][s->qscale][0]);
+ }else
+ ff_svq3_luma_dc_dequant_idct_c(h->mb, s->qscale);
+ }
+ if(h->deblocking_filter)
+ xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0, simple);
+ }else if(is_h264){
+ hl_motion(h, dest_y, dest_cb, dest_cr,
+ s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
+ s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
+ s->dsp.weight_h264_pixels_tab, s->dsp.biweight_h264_pixels_tab);
+ }
+
+
+ if(!IS_INTRA4x4(mb_type)){
+ if(is_h264){
+ if(IS_INTRA16x16(mb_type)){
+ if(transform_bypass){
+ if(h->sps.profile_idc==244 && (h->intra16x16_pred_mode==VERT_PRED8x8 || h->intra16x16_pred_mode==HOR_PRED8x8)){
+ h->hpc.pred16x16_add[h->intra16x16_pred_mode](dest_y, block_offset, h->mb, linesize);
+ }else{
+ for(i=0; i<16; i++){
+ if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16])
+ s->dsp.add_pixels4(dest_y + block_offset[i], h->mb + i*16, linesize);
+ }
+ }
+ }else{
+ s->dsp.h264_idct_add16intra(dest_y, block_offset, h->mb, linesize, h->non_zero_count_cache);
+ }
+ }else if(h->cbp&15){
+ if(transform_bypass){
+ const int di = IS_8x8DCT(mb_type) ? 4 : 1;
+ idct_add= IS_8x8DCT(mb_type) ? s->dsp.add_pixels8 : s->dsp.add_pixels4;
+ for(i=0; i<16; i+=di){
+ if(h->non_zero_count_cache[ scan8[i] ]){
+ idct_add(dest_y + block_offset[i], h->mb + i*16, linesize);
+ }
+ }
+ }else{
+ if(IS_8x8DCT(mb_type)){
+ s->dsp.h264_idct8_add4(dest_y, block_offset, h->mb, linesize, h->non_zero_count_cache);
+ }else{
+ s->dsp.h264_idct_add16(dest_y, block_offset, h->mb, linesize, h->non_zero_count_cache);
+ }
+ }
+ }
+ }else{
+ for(i=0; i<16; i++){
+ if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ //FIXME benchmark weird rule, & below
+ uint8_t * const ptr= dest_y + block_offset[i];
+ ff_svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, IS_INTRA(mb_type) ? 1 : 0);
+ }
+ }
+ }
+ }
+
+ if((simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)) && (h->cbp&0x30)){
+ uint8_t *dest[2] = {dest_cb, dest_cr};
+ if(transform_bypass){
+ if(IS_INTRA(mb_type) && h->sps.profile_idc==244 && (h->chroma_pred_mode==VERT_PRED8x8 || h->chroma_pred_mode==HOR_PRED8x8)){
+ h->hpc.pred8x8_add[h->chroma_pred_mode](dest[0], block_offset + 16, h->mb + 16*16, uvlinesize);
+ h->hpc.pred8x8_add[h->chroma_pred_mode](dest[1], block_offset + 20, h->mb + 20*16, uvlinesize);
+ }else{
+ idct_add = s->dsp.add_pixels4;
+ for(i=16; i<16+8; i++){
+ if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16])
+ idct_add (dest[(i&4)>>2] + block_offset[i], h->mb + i*16, uvlinesize);
+ }
+ }
+ }else{
+ chroma_dc_dequant_idct_c(h->mb + 16*16, h->chroma_qp[0], h->dequant4_coeff[IS_INTRA(mb_type) ? 1:4][h->chroma_qp[0]][0]);
+ chroma_dc_dequant_idct_c(h->mb + 16*16+4*16, h->chroma_qp[1], h->dequant4_coeff[IS_INTRA(mb_type) ? 2:5][h->chroma_qp[1]][0]);
+ if(is_h264){
+ idct_add = s->dsp.h264_idct_add;
+ idct_dc_add = s->dsp.h264_idct_dc_add;
+ for(i=16; i<16+8; i++){
+ if(h->non_zero_count_cache[ scan8[i] ])
+ idct_add (dest[(i&4)>>2] + block_offset[i], h->mb + i*16, uvlinesize);
+ else if(h->mb[i*16])
+ idct_dc_add(dest[(i&4)>>2] + block_offset[i], h->mb + i*16, uvlinesize);
+ }
+ }else{
+ for(i=16; i<16+8; i++){
+ if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
+ uint8_t * const ptr= dest[(i&4)>>2] + block_offset[i];
+ ff_svq3_add_idct_c(ptr, h->mb + i*16, uvlinesize, ff_h264_chroma_qp[s->qscale + 12] - 12, 2);
+ }
+ }
+ }
+ }
+ }
+ }
+#if ENABLE_SLICE_MT_PATCH
+ if((s->avctx->thread_count < 2) && (h->cbp || IS_INTRA(mb_type)))
+ s->dsp.clear_blocks(h->mb);
+#else
+ if(h->cbp || IS_INTRA(mb_type))
+ s->dsp.clear_blocks(h->mb);
+#endif
+
+ if(h->deblocking_filter && 0) {
+ backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, simple);
+ fill_filter_caches(h, mb_type); //FIXME don't fill stuff which isn't used by filter_mb
+ h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.qscale_table[mb_xy]);
+ h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.qscale_table[mb_xy]);
+ if (!simple && FRAME_MBAFF) {
+ ff_h264_filter_mb (h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
+ } else {
+ ff_h264_filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
+ }
+ }
+}
+
+/**
+ * Process a macroblock; this case avoids checks for expensive uncommon cases.
+ */
+static void hl_decode_mb_simple(H264Context *h){
+ hl_decode_mb_internal(h, 1);
+}
+
+/**
+ * Process a macroblock; this handles edge cases, such as interlacing.
+ */
+static void av_noinline hl_decode_mb_complex(H264Context *h){
+ hl_decode_mb_internal(h, 0);
+}
+
+void ff_h264_hl_decode_mb(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ const int mb_xy= h->mb_xy;
+ const int mb_type= s->current_picture.mb_type[mb_xy];
+ int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || s->qscale == 0;
+
+ if (is_complex)
+ hl_decode_mb_complex(h);
+ else hl_decode_mb_simple(h);
+}
+
+static int pred_weight_table(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ int list, i;
+ int luma_def, chroma_def;
+
+ h->use_weight= 0;
+ h->use_weight_chroma= 0;
+ h->luma_log2_weight_denom= get_ue_golomb(&s->gb);
+ h->chroma_log2_weight_denom= get_ue_golomb(&s->gb);
+ luma_def = 1<<h->luma_log2_weight_denom;
+ chroma_def = 1<<h->chroma_log2_weight_denom;
+
+ for(list=0; list<2; list++){
+ h->luma_weight_flag[list] = 0;
+ h->chroma_weight_flag[list] = 0;
+ for(i=0; i<h->ref_count[list]; i++){
+ int luma_weight_flag, chroma_weight_flag;
+
+ luma_weight_flag= get_bits1(&s->gb);
+ if(luma_weight_flag){
+ h->luma_weight[list][i]= get_se_golomb(&s->gb);
+ h->luma_offset[list][i]= get_se_golomb(&s->gb);
+ if( h->luma_weight[list][i] != luma_def
+ || h->luma_offset[list][i] != 0) {
+ h->use_weight= 1;
+ h->luma_weight_flag[list]= 1;
+ }
+ }else{
+ h->luma_weight[list][i]= luma_def;
+ h->luma_offset[list][i]= 0;
+ }
+
+ if(CHROMA){
+ chroma_weight_flag= get_bits1(&s->gb);
+ if(chroma_weight_flag){
+ int j;
+ for(j=0; j<2; j++){
+ h->chroma_weight[list][i][j]= get_se_golomb(&s->gb);
+ h->chroma_offset[list][i][j]= get_se_golomb(&s->gb);
+ if( h->chroma_weight[list][i][j] != chroma_def
+ || h->chroma_offset[list][i][j] != 0) {
+ h->use_weight_chroma= 1;
+ h->chroma_weight_flag[list]= 1;
+ }
+ }
+ }else{
+ int j;
+ for(j=0; j<2; j++){
+ h->chroma_weight[list][i][j]= chroma_def;
+ h->chroma_offset[list][i][j]= 0;
+ }
+ }
+ }
+ }
+ if(h->slice_type_nos != FF_B_TYPE) break;
+ }
+ h->use_weight= h->use_weight || h->use_weight_chroma;
+ return 0;
+}
+
+static void implicit_weight_table(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ int ref0, ref1, i;
+ int cur_poc = s->current_picture_ptr->poc;
+
+ for (i = 0; i < 2; i++) {
+ h->luma_weight_flag[i] = 0;
+ h->chroma_weight_flag[i] = 0;
+ }
+
+ if( h->ref_count[0] == 1 && h->ref_count[1] == 1
+ && h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2*cur_poc){
+ h->use_weight= 0;
+ h->use_weight_chroma= 0;
+ return;
+ }
+
+ h->use_weight= 2;
+ h->use_weight_chroma= 2;
+ h->luma_log2_weight_denom= 5;
+ h->chroma_log2_weight_denom= 5;
+
+ for(ref0=0; ref0 < h->ref_count[0]; ref0++){
+ int poc0 = h->ref_list[0][ref0].poc;
+ for(ref1=0; ref1 < h->ref_count[1]; ref1++){
+ int poc1 = h->ref_list[1][ref1].poc;
+ int td = av_clip(poc1 - poc0, -128, 127);
+ if(td){
+ int tb = av_clip(cur_poc - poc0, -128, 127);
+ int tx = (16384 + (FFABS(td) >> 1)) / td;
+ int dist_scale_factor = av_clip((tb*tx + 32) >> 6, -1024, 1023) >> 2;
+ if(dist_scale_factor < -64 || dist_scale_factor > 128)
+ h->implicit_weight[ref0][ref1] = 32;
+ else
+ h->implicit_weight[ref0][ref1] = 64 - dist_scale_factor;
+ }else
+ h->implicit_weight[ref0][ref1] = 32;
+ }
+ }
+}
+
+/**
+ * instantaneous decoder refresh.
+ */
+static void idr(H264Context *h){
+ ff_h264_remove_all_refs(h);
+ h->prev_frame_num= 0;
+ h->prev_frame_num_offset= 0;
+ h->prev_poc_msb=
+ h->prev_poc_lsb= 0;
+}
+
+/* forget old pics after a seek */
+static void flush_dpb(AVCodecContext *avctx){
+ H264Context *h= avctx->priv_data;
+ int i;
+ for(i=0; i<MAX_DELAYED_PIC_COUNT; i++) {
+ if(h->delayed_pic[i])
+ h->delayed_pic[i]->reference= 0;
+ h->delayed_pic[i]= NULL;
+ }
+ h->outputed_poc= INT_MIN;
+ h->prev_interlaced_frame = 1;
+ idr(h);
+ if(h->s.current_picture_ptr)
+ h->s.current_picture_ptr->reference= 0;
+ h->s.first_field= 0;
+ ff_h264_reset_sei(h);
+ h->has_to_drop_first_non_ref = avctx->h264_has_to_drop_first_non_ref; /* ffdshow custom code */
+ ff_mpeg_flush(avctx);
+}
+
+static int init_poc(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ const int max_frame_num= 1<<h->sps.log2_max_frame_num;
+ int field_poc[2];
+ Picture *cur = s->current_picture_ptr;
+
+ h->frame_num_offset= h->prev_frame_num_offset;
+ if(h->frame_num < h->prev_frame_num)
+ h->frame_num_offset += max_frame_num;
+
+ if(h->sps.poc_type==0){
+ const int max_poc_lsb= 1<<h->sps.log2_max_poc_lsb;
+
+ if (h->poc_lsb < h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb/2)
+ h->poc_msb = h->prev_poc_msb + max_poc_lsb;
+ else if(h->poc_lsb > h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb/2)
+ h->poc_msb = h->prev_poc_msb - max_poc_lsb;
+ else
+ h->poc_msb = h->prev_poc_msb;
+//printf("poc: %d %d\n", h->poc_msb, h->poc_lsb);
+ field_poc[0] =
+ field_poc[1] = h->poc_msb + h->poc_lsb;
+ if(s->picture_structure == PICT_FRAME)
+ field_poc[1] += h->delta_poc_bottom;
+ }else if(h->sps.poc_type==1){
+ int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
+ int i;
+
+ if(h->sps.poc_cycle_length != 0)
+ abs_frame_num = h->frame_num_offset + h->frame_num;
+ else
+ abs_frame_num = 0;
+
+ if(h->nal_ref_idc==0 && abs_frame_num > 0)
+ abs_frame_num--;
+
+ expected_delta_per_poc_cycle = 0;
+ for(i=0; i < h->sps.poc_cycle_length; i++)
+ expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[ i ]; //FIXME integrate during sps parse
+
+ if(abs_frame_num > 0){
+ int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
+ int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
+
+ expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
+ for(i = 0; i <= frame_num_in_poc_cycle; i++)
+ expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[ i ];
+ } else
+ expectedpoc = 0;
+
+ if(h->nal_ref_idc == 0)
+ expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
+
+ field_poc[0] = expectedpoc + h->delta_poc[0];
+ field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
+
+ if(s->picture_structure == PICT_FRAME)
+ field_poc[1] += h->delta_poc[1];
+ }else{
+ int poc= 2*(h->frame_num_offset + h->frame_num);
+
+ if(!h->nal_ref_idc)
+ poc--;
+
+ field_poc[0]= poc;
+ field_poc[1]= poc;
+ }
+
+ if(s->picture_structure != PICT_BOTTOM_FIELD)
+ s->current_picture_ptr->field_poc[0]= field_poc[0];
+ if(s->picture_structure != PICT_TOP_FIELD)
+ s->current_picture_ptr->field_poc[1]= field_poc[1];
+ cur->poc= FFMIN(cur->field_poc[0], cur->field_poc[1]);
+
+ return 0;
+}
+
+
+/**
+ * initialize scan tables
+ */
+static void init_scan_tables(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ int i;
+ if(s->dsp.h264_idct_add == ff_h264_idct_add_c){ //FIXME little ugly
+ memcpy(h->zigzag_scan, zigzag_scan, 16*sizeof(uint8_t));
+ memcpy(h-> field_scan, field_scan, 16*sizeof(uint8_t));
+ }else{
+ for(i=0; i<16; i++){
+#define T(x) (x>>2) | ((x<<2) & 0xF)
+ h->zigzag_scan[i] = T(zigzag_scan[i]);
+ h-> field_scan[i] = T( field_scan[i]);
+#undef T
+ }
+ }
+ if(s->dsp.h264_idct8_add == ff_h264_idct8_add_c){
+ memcpy(h->zigzag_scan8x8, ff_zigzag_direct, 64*sizeof(uint8_t));
+ memcpy(h->zigzag_scan8x8_cavlc, zigzag_scan8x8_cavlc, 64*sizeof(uint8_t));
+ memcpy(h->field_scan8x8, field_scan8x8, 64*sizeof(uint8_t));
+ memcpy(h->field_scan8x8_cavlc, field_scan8x8_cavlc, 64*sizeof(uint8_t));
+ }else{
+ for(i=0; i<64; i++){
+#define T(x) (x>>3) | ((x&7)<<3)
+ h->zigzag_scan8x8[i] = T(ff_zigzag_direct[i]);
+ h->zigzag_scan8x8_cavlc[i] = T(zigzag_scan8x8_cavlc[i]);
+ h->field_scan8x8[i] = T(field_scan8x8[i]);
+ h->field_scan8x8_cavlc[i] = T(field_scan8x8_cavlc[i]);
+#undef T
+ }
+ }
+ if(h->sps.transform_bypass){ //FIXME same ugly
+ h->zigzag_scan_q0 = zigzag_scan;
+ h->zigzag_scan8x8_q0 = ff_zigzag_direct;
+ h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc;
+ h->field_scan_q0 = field_scan;
+ h->field_scan8x8_q0 = field_scan8x8;
+ h->field_scan8x8_cavlc_q0 = field_scan8x8_cavlc;
+ }else{
+ h->zigzag_scan_q0 = h->zigzag_scan;
+ h->zigzag_scan8x8_q0 = h->zigzag_scan8x8;
+ h->zigzag_scan8x8_cavlc_q0 = h->zigzag_scan8x8_cavlc;
+ h->field_scan_q0 = h->field_scan;
+ h->field_scan8x8_q0 = h->field_scan8x8;
+ h->field_scan8x8_cavlc_q0 = h->field_scan8x8_cavlc;
+ }
+}
+
+static void field_end(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ AVCodecContext * const avctx= s->avctx;
+ s->mb_y= 0;
+
+ s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264;
+ s->current_picture_ptr->pict_type= s->pict_type;
+
+ if(!s->dropable) {
+ ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index);
+ h->prev_poc_msb= h->poc_msb;
+ h->prev_poc_lsb= h->poc_lsb;
+ }
+ h->prev_frame_num_offset= h->frame_num_offset;
+ h->prev_frame_num= h->frame_num;
+
+ /*
+ * FIXME: Error handling code does not seem to support interlaced
+ * when slices span multiple rows
+ * The ff_er_add_slice calls don't work right for bottom
+ * fields; they cause massive erroneous error concealing
+ * Error marking covers both fields (top and bottom).
+ * This causes a mismatched s->error_count
+ * and a bad error table. Further, the error count goes to
+ * INT_MAX when called for bottom field, because mb_y is
+ * past end by one (callers fault) and resync_mb_y != 0
+ * causes problems for the first MB line, too.
+ */
+ if (!FIELD_PICTURE)
+ ff_er_frame_end(s);
+
+ MPV_frame_end(s);
+
+ h->current_slice=0;
+}
+
+/**
+ * Replicates H264 "master" context to thread contexts.
+ */
+static void clone_slice(H264Context *dst, H264Context *src, int full) /* ffdshow custom code */
+{
+ memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset));
+ dst->s.current_picture_ptr = src->s.current_picture_ptr;
+ dst->s.current_picture = src->s.current_picture;
+ dst->s.linesize = src->s.linesize;
+ dst->s.uvlinesize = src->s.uvlinesize;
+ dst->s.first_field = src->s.first_field;
+
+ dst->prev_poc_msb = src->prev_poc_msb;
+ dst->prev_poc_lsb = src->prev_poc_lsb;
+ dst->prev_frame_num_offset = src->prev_frame_num_offset;
+ dst->prev_frame_num = src->prev_frame_num;
+ dst->short_ref_count = src->short_ref_count;
+
+ memcpy(dst->short_ref, src->short_ref, sizeof(dst->short_ref));
+ memcpy(dst->long_ref, src->long_ref, sizeof(dst->long_ref));
+ memcpy(dst->default_ref_list, src->default_ref_list, sizeof(dst->default_ref_list));
+ memcpy(dst->ref_list, src->ref_list, sizeof(dst->ref_list));
+
+ memcpy(dst->dequant4_coeff, src->dequant4_coeff, sizeof(src->dequant4_coeff));
+ memcpy(dst->dequant8_coeff, src->dequant8_coeff, sizeof(src->dequant8_coeff));
+
+ /* ffdshow custom code (begin) */
+ if(!full)
+ return;
+
+ dst->slice_type = src->slice_type;
+
+ dst->mb_linesize = src->mb_linesize;
+
+ dst->sps = src->sps;
+ dst->pps = src->pps;
+
+ dst->mb_mbaff = src->mb_mbaff;
+ dst->mb_aff_frame = src->mb_aff_frame;
+
+ dst->s.me.qpel_put = src->s.me.qpel_put;
+ dst->s.me.qpel_avg = src->s.me.qpel_avg;
+
+ /* weighted motion pred */
+
+ dst->use_weight = src->use_weight;
+ if(dst->use_weight != 0) {
+ dst->use_weight_chroma = src->use_weight_chroma;
+ dst->luma_log2_weight_denom = src->luma_log2_weight_denom;
+ dst->chroma_log2_weight_denom = src->chroma_log2_weight_denom;
+
+ memcpy(dst->luma_weight, src->luma_weight, sizeof(src->luma_weight));
+ memcpy(dst->luma_offset, src->luma_offset, sizeof(src->luma_offset));
+ memcpy(dst->chroma_weight, src->chroma_weight, sizeof(src->chroma_weight));
+ memcpy(dst->chroma_offset, src->chroma_offset, sizeof(src->chroma_offset));
+
+ memcpy(dst->implicit_weight, src->implicit_weight, sizeof(src->implicit_weight));
+ }
+
+ /* deblocking */
+ dst->deblocking_filter = src->deblocking_filter;
+ dst->slice_alpha_c0_offset = src->slice_alpha_c0_offset;
+ dst->slice_beta_offset = src->slice_beta_offset;
+ dst->last_qscale_diff = src->last_qscale_diff;
+ memcpy(dst->ref2frm, src->ref2frm, sizeof(src->ref2frm));
+
+ dst->slice_num = src->slice_num;
+ dst->slice_type_nos = src->slice_type_nos;
+
+ // FIXME not sure if below are necessary
+ dst->direct_spatial_mv_pred= src->direct_spatial_mv_pred;
+ dst->s.qscale = src->s.qscale;
+ dst->b_stride = src->b_stride;
+ dst->b8_stride = src->b8_stride;
+ dst->list_count = src->list_count;
+ dst->emu_edge_height = src->emu_edge_height;
+ dst->emu_edge_width = src->emu_edge_width;
+ dst->s.picture_structure = src->s.picture_structure;
+ dst->s.codec_id = src->s.codec_id;
+ dst->s.flags = src->s.flags;
+ dst->s.flags2 = src->s.flags2;
+ /* ffdshow custom code (end) */
+}
+
+/**
+ * decodes a slice header.
+ * This will also call MPV_common_init() and frame_start() as needed.
+ *
+ * @param h h264context
+ * @param h0 h264 master context (differs from 'h' when doing sliced based parallel decoding)
+ *
+ * @return 0 if okay, <0 if an error occurred, 1 if decoding must not be multithreaded
+ */
+static int decode_slice_header(H264Context *h, H264Context *h0){
+ MpegEncContext * const s = &h->s;
+ MpegEncContext * const s0 = &h0->s;
+ unsigned int first_mb_in_slice;
+ unsigned int pps_id;
+ int num_ref_idx_active_override_flag;
+ unsigned int slice_type, tmp, i, j;
+ int default_ref_list_done = 0;
+ int last_pic_structure;
+
+ s->dropable= h->nal_ref_idc == 0;
+
+ if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !h->nal_ref_idc){
+ s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab;
+ }else{
+ s->me.qpel_put= s->dsp.put_h264_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_h264_qpel_pixels_tab;
+ }
+
+ first_mb_in_slice= get_ue_golomb(&s->gb);
+
+ if(first_mb_in_slice == 0){ //FIXME better field boundary detection
+ if(h0->current_slice && FIELD_PICTURE){
+ field_end(h);
+ }
+
+ h0->current_slice = 0;
+ if (!s0->first_field)
+ s->current_picture_ptr= NULL;
+ }
+
+ slice_type= get_ue_golomb_31(&s->gb);
+ if(slice_type > 9){
+ av_log(h->s.avctx, AV_LOG_ERROR, "slice type too large (%d) at %d %d\n", h->slice_type, s->mb_x, s->mb_y);
+ return -1;
+ }
+ if(slice_type > 4){
+ slice_type -= 5;
+ h->slice_type_fixed=1;
+ }else
+ h->slice_type_fixed=0;
+
+ slice_type= golomb_to_pict_type[ slice_type ];
+ if (slice_type == FF_I_TYPE
+ || (h0->current_slice != 0 && slice_type == h0->last_slice_type) ) {
+ default_ref_list_done = 1;
+ }
+ h->slice_type= slice_type;
+ h->slice_type_nos= slice_type & 3;
+
+ s->pict_type= h->slice_type; // to make a few old functions happy, it's wrong though
+ if (s->pict_type == FF_B_TYPE && s0->last_picture_ptr == NULL) {
+ av_log(h->s.avctx, AV_LOG_ERROR,
+ "B picture before any references, skipping\n");
+ return -1;
+ }
+
+ pps_id= get_ue_golomb(&s->gb);
+ if(pps_id>=MAX_PPS_COUNT){
+ av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n");
+ return -1;
+ }
+ if(!h0->pps_buffers[pps_id]) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", pps_id);
+ return -1;
+ }
+ h->pps= *h0->pps_buffers[pps_id];
+
+ if(!h0->sps_buffers[h->pps.sps_id]) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", h->pps.sps_id);
+ return -1;
+ }
+ h->sps = *h0->sps_buffers[h->pps.sps_id];
+
+ if(h == h0 && h->dequant_coeff_pps != pps_id){
+ h->dequant_coeff_pps = pps_id;
+ init_dequant_tables(h);
+ }
+
+ s->mb_width= h->sps.mb_width;
+ s->mb_height= h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
+
+ h->b_stride= s->mb_width*4;
+ h->b8_stride= s->mb_width*2;
+
+ s->width = 16*s->mb_width - 2*FFMIN(h->sps.crop_right, 7);
+ if(h->sps.frame_mbs_only_flag)
+ s->height= 16*s->mb_height - 2*FFMIN(h->sps.crop_bottom, 7);
+ else
+ s->height= 16*s->mb_height - 4*FFMIN(h->sps.crop_bottom, 3);
+
+ if (s->context_initialized
+ && ( s->width != s->avctx->width || s->height != s->avctx->height)) {
+ if(h != h0)
+ return -1; // width / height changed during parallelized decoding
+ free_tables(h);
+ flush_dpb(s->avctx);
+ MPV_common_end(s);
+ }
+ if (!s->context_initialized) {
+ if(h != h0)
+ return -1; // we cant (re-)initialize context during parallel decoding
+
+ avcodec_set_dimensions(s->avctx, s->width, s->height);
+ s->avctx->sample_aspect_ratio= h->sps.sar;
+ if(!s->avctx->sample_aspect_ratio.den)
+ s->avctx->sample_aspect_ratio.den = 1;
+
+ if(h->sps.video_signal_type_present_flag){
+ s->avctx->color_range = h->sps.full_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
+ if(h->sps.colour_description_present_flag){
+ s->avctx->color_primaries = h->sps.color_primaries;
+ s->avctx->color_trc = h->sps.color_trc;
+ s->avctx->colorspace = h->sps.colorspace;
+ }
+ }
+
+ if(h->sps.timing_info_present_flag){
+ #if __STDC_VERSION__ >= 199901L
+ s->avctx->time_base= (AVRational){h->sps.num_units_in_tick * 2, h->sps.time_scale};
+ #else
+ s->avctx->time_base.num = h->sps.num_units_in_tick * 2;
+ s->avctx->time_base.den = h->sps.time_scale;
+ #endif
+ if(h->x264_build > 0 && h->x264_build < 44)
+ s->avctx->time_base.den *= 2;
+ av_reduce(&s->avctx->time_base.num, &s->avctx->time_base.den,
+ s->avctx->time_base.num, s->avctx->time_base.den, 1<<30);
+ }
+
+ if (MPV_common_init(s) < 0)
+ return -1;
+ s->first_field = 0;
+ h->prev_interlaced_frame = 1;
+
+ init_scan_tables(h);
+ ff_h264_alloc_tables(h);
+
+ for(i = 1; i < s->avctx->thread_count; i++) {
+ H264Context *c;
+ c = h->thread_context[i] = av_malloc(sizeof(H264Context));
+ memcpy(c, h->s.thread_context[i], sizeof(MpegEncContext));
+ memset(&c->s + 1, 0, sizeof(H264Context) - sizeof(MpegEncContext));
+ c->sps = h->sps;
+ c->pps = h->pps;
+ init_scan_tables(c);
+ clone_tables(c, h);
+ }
+
+ for(i = 0; i < s->avctx->thread_count; i++)
+ if(context_init(h->thread_context[i]) < 0)
+ return -1;
+ }
+
+ h->frame_num= get_bits(&s->gb, h->sps.log2_max_frame_num);
+
+ h->mb_mbaff = 0;
+ h->mb_aff_frame = 0;
+ last_pic_structure = s0->picture_structure;
+ if(h->sps.frame_mbs_only_flag){
+ s->picture_structure= PICT_FRAME;
+ }else{
+ if(get_bits1(&s->gb)) { //field_pic_flag
+ s->picture_structure= PICT_TOP_FIELD + get_bits1(&s->gb); //bottom_field_flag
+ } else {
+ s->picture_structure= PICT_FRAME;
+ h->mb_aff_frame = h->sps.mb_aff;
+ }
+ }
+ h->mb_field_decoding_flag= s->picture_structure != PICT_FRAME;
+
+ /* ffdshow custom code begin */
+ //
+ // Workaround Haali's media splitter (http://forum.doom9.org/showthread.php?p=1226434#post1226434)
+ //
+ // Disallow unpaired field referencing just after seeking.
+ // This rule is applied only if h->has_to_drop_first_non_ref == 3.
+ // This is wrong because an unpaired field is allowed to be a reference field.
+ // And that's why this is optional and tried to be minimized.
+ if (h->has_to_drop_first_non_ref == 1 && s->dropable){
+ if (FIELD_PICTURE){
+ h->has_to_drop_first_non_ref = 2;
+ }else{
+ h->has_to_drop_first_non_ref = 0;
+ }
+ } else if (h->has_to_drop_first_non_ref == 2){
+ if (FIELD_PICTURE && !s->dropable)
+ h->has_to_drop_first_non_ref = 3;
+ else if (!FIELD_PICTURE)
+ h->has_to_drop_first_non_ref = 0;
+ } else if (h->has_to_drop_first_non_ref == 3){
+ h->has_to_drop_first_non_ref = 0;
+ }
+ /* ffdshow custom code end */
+
+ if(h0->current_slice == 0){
+ while(h->frame_num != h->prev_frame_num &&
+ h->frame_num != (h->prev_frame_num+1)%(1<<h->sps.log2_max_frame_num)){
+ av_log(NULL, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num);
+ if (ff_h264_frame_start(h) < 0)
+ return -1;
+ h->prev_frame_num++;
+ h->prev_frame_num %= 1<<h->sps.log2_max_frame_num;
+ s->current_picture_ptr->frame_num= h->prev_frame_num;
+ ff_h264_execute_ref_pic_marking(h, NULL, 0);
+ }
+
+ /* See if we have a decoded first field looking for a pair... */
+ if (s0->first_field) {
+ assert(s0->current_picture_ptr);
+ assert(s0->current_picture_ptr->data[0]);
+ assert(s0->current_picture_ptr->reference != DELAYED_PIC_REF);
+
+ /* figure out if we have a complementary field pair */
+ if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) {
+ /*
+ * Previous field is unmatched. Don't display it, but let it
+ * remain for reference if marked as such.
+ */
+ s0->current_picture_ptr = NULL;
+ s0->first_field = FIELD_PICTURE;
+
+ } else {
+ if ((h->nal_ref_idc &&
+ s0->current_picture_ptr->reference &&
+ s0->current_picture_ptr->frame_num != h->frame_num) ||
+ h->has_to_drop_first_non_ref == 3) { /* ffdshow custom code */
+ /*
+ * This and previous field were reference, but had
+ * different frame_nums. Consider this field first in
+ * pair. Throw away previous field except for reference
+ * purposes.
+ */
+ s0->first_field = 1;
+ s0->current_picture_ptr = NULL;
+
+ } else {
+ /* Second field in complementary pair */
+ s0->first_field = 0;
+ }
+ }
+
+ } else {
+ /* Frame or first field in a potentially complementary pair */
+ assert(!s0->current_picture_ptr);
+ s0->first_field = FIELD_PICTURE;
+ }
+
+ if((!FIELD_PICTURE || s0->first_field) && ff_h264_frame_start(h) < 0) {
+ s0->first_field = 0;
+ return -1;
+ }
+ }
+ if(h != h0)
+ clone_slice(h, h0, 0); /* ffdshow custom code */
+
+ s->current_picture_ptr->frame_num= h->frame_num; //FIXME frame_num cleanup
+
+ assert(s->mb_num == s->mb_width * s->mb_height);
+ if(first_mb_in_slice << FIELD_OR_MBAFF_PICTURE >= s->mb_num ||
+ first_mb_in_slice >= s->mb_num){
+ av_log(h->s.avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
+ return -1;
+ }
+ s->resync_mb_x = s->mb_x = first_mb_in_slice % s->mb_width;
+ s->resync_mb_y = s->mb_y = (first_mb_in_slice / s->mb_width) << FIELD_OR_MBAFF_PICTURE;
+ if (s->picture_structure == PICT_BOTTOM_FIELD)
+ s->resync_mb_y = s->mb_y = s->mb_y + 1;
+ assert(s->mb_y < s->mb_height);
+
+ if(s->picture_structure==PICT_FRAME){
+ h->curr_pic_num= h->frame_num;
+ h->max_pic_num= 1<< h->sps.log2_max_frame_num;
+ }else{
+ h->curr_pic_num= 2*h->frame_num + 1;
+ h->max_pic_num= 1<<(h->sps.log2_max_frame_num + 1);
+ }
+
+ if(h->nal_unit_type == NAL_IDR_SLICE){
+ get_ue_golomb(&s->gb); /* idr_pic_id */
+ }
+
+ if(h->sps.poc_type==0){
+ h->poc_lsb= get_bits(&s->gb, h->sps.log2_max_poc_lsb);
+
+ if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME){
+ h->delta_poc_bottom= get_se_golomb(&s->gb);
+ }
+ }
+
+ if(h->sps.poc_type==1 && !h->sps.delta_pic_order_always_zero_flag){
+ h->delta_poc[0]= get_se_golomb(&s->gb);
+
+ if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME)
+ h->delta_poc[1]= get_se_golomb(&s->gb);
+ }
+
+ init_poc(h);
+
+ if(h->pps.redundant_pic_cnt_present){
+ h->redundant_pic_count= get_ue_golomb(&s->gb);
+ }
+
+ //set defaults, might be overridden a few lines later
+ h->ref_count[0]= h->pps.ref_count[0];
+ h->ref_count[1]= h->pps.ref_count[1];
+
+ if(h->slice_type_nos != FF_I_TYPE){
+ if(h->slice_type_nos == FF_B_TYPE){
+ h->direct_spatial_mv_pred= get_bits1(&s->gb);
+ }
+ num_ref_idx_active_override_flag= get_bits1(&s->gb);
+
+ if(num_ref_idx_active_override_flag){
+ h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
+ if(h->slice_type_nos==FF_B_TYPE)
+ h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
+
+ if(h->ref_count[0]-1 > 32-1 || h->ref_count[1]-1 > 32-1){
+ av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
+ h->ref_count[0]= h->ref_count[1]= 1;
+ return -1;
+ }
+ }
+ if(h->slice_type_nos == FF_B_TYPE)
+ h->list_count= 2;
+ else
+ h->list_count= 1;
+ }else
+ h->list_count= 0;
+
+ if(!default_ref_list_done){
+ ff_h264_fill_default_ref_list(h);
+ }
+
+ if(h->slice_type_nos!=FF_I_TYPE && ff_h264_decode_ref_pic_list_reordering(h) < 0)
+ return -1;
+
+ if(h->slice_type_nos!=FF_I_TYPE){
+ s->last_picture_ptr= &h->ref_list[0][0];
+ ff_copy_picture(&s->last_picture, s->last_picture_ptr);
+ }
+ if(h->slice_type_nos==FF_B_TYPE){
+ s->next_picture_ptr= &h->ref_list[1][0];
+ ff_copy_picture(&s->next_picture, s->next_picture_ptr);
+ }
+
+ if( (h->pps.weighted_pred && h->slice_type_nos == FF_P_TYPE )
+ || (h->pps.weighted_bipred_idc==1 && h->slice_type_nos== FF_B_TYPE ) )
+ pred_weight_table(h);
+ else if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== FF_B_TYPE)
+ implicit_weight_table(h);
+ else {
+ h->use_weight = 0;
+ for (i = 0; i < 2; i++) {
+ h->luma_weight_flag[i] = 0;
+ h->chroma_weight_flag[i] = 0;
+ }
+ }
+
+ if(h->nal_ref_idc)
+ ff_h264_decode_ref_pic_marking(h0, &s->gb);
+
+ if(FRAME_MBAFF)
+ ff_h264_fill_mbaff_ref_list(h);
+
+ if(h->slice_type_nos==FF_B_TYPE && !h->direct_spatial_mv_pred)
+ ff_h264_direct_dist_scale_factor(h);
+ ff_h264_direct_ref_list_init(h);
+
+ if( h->slice_type_nos != FF_I_TYPE && h->pps.cabac ){
+ tmp = get_ue_golomb_31(&s->gb);
+ if(tmp > 2){
+ av_log(s->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n");
+ return -1;
+ }
+ h->cabac_init_idc= tmp;
+ }
+
+ h->last_qscale_diff = 0;
+ // ==> Start patch MPC
+ h->slice_qp_delta = get_se_golomb(&s->gb);
+ tmp = h->pps.init_qp + h->slice_qp_delta;
+ // <== End patch MPC
+ if(tmp>51){
+ av_log(s->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
+ return -1;
+ }
+ s->qscale= tmp;
+ h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale);
+ h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale);
+ //FIXME qscale / qp ... stuff
+ if(h->slice_type == FF_SP_TYPE){
+ get_bits1(&s->gb); /* sp_for_switch_flag */
+ }
+ if(h->slice_type==FF_SP_TYPE || h->slice_type == FF_SI_TYPE){
+ // ==> Start patch MPC
+ h->slice_qs_delta = get_se_golomb(&s->gb); /* slice_qs_delta */
+ // <== End patch MPC
+ }
+
+ h->deblocking_filter = 1;
+ h->slice_alpha_c0_offset = 52;
+ h->slice_beta_offset = 52;
+ if( h->pps.deblocking_filter_parameters_present ) {
+ tmp= get_ue_golomb_31(&s->gb);
+ if(tmp > 2){
+ av_log(s->avctx, AV_LOG_ERROR, "deblocking_filter_idc %u out of range\n", tmp);
+ return -1;
+ }
+ h->deblocking_filter= tmp;
+ if(h->deblocking_filter < 2)
+ h->deblocking_filter^= 1; // 1<->0
+
+ if( h->deblocking_filter ) {
+ h->slice_alpha_c0_offset += get_se_golomb(&s->gb) << 1;
+ h->slice_beta_offset += get_se_golomb(&s->gb) << 1;
+ if( h->slice_alpha_c0_offset > 104U
+ || h->slice_beta_offset > 104U){
+ av_log(s->avctx, AV_LOG_ERROR, "deblocking filter parameters %d %d out of range\n", h->slice_alpha_c0_offset, h->slice_beta_offset);
+ return -1;
+ }
+ }
+ }
+
+ if( s->avctx->skip_loop_filter >= AVDISCARD_ALL
+ ||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != FF_I_TYPE)
+ ||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type_nos == FF_B_TYPE)
+ ||(s->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0))
+ h->deblocking_filter= 0;
+
+ if(h->deblocking_filter == 1 && h0->max_contexts > 1) {
+ if(s->avctx->flags2 & CODEC_FLAG2_FAST) {
+ /* Cheat slightly for speed:
+ Do not bother to deblock across slices. */
+ h->deblocking_filter = 2;
+ } else {
+ h0->max_contexts = 1;
+ if(!h0->single_decode_warning) {
+ av_log(s->avctx, AV_LOG_INFO, "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n");
+ h0->single_decode_warning = 1;
+ }
+ if(h != h0)
+ return 1; // deblocking switched inside frame
+ }
+ }
+ h->qp_thresh= 15 + 52 - FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) - FFMAX3(0, h->pps.chroma_qp_index_offset[0], h->pps.chroma_qp_index_offset[1]);
+
+#if 0 //FMO
+ if( h->pps.num_slice_groups > 1 && h->pps.mb_slice_group_map_type >= 3 && h->pps.mb_slice_group_map_type <= 5)
+ slice_group_change_cycle= get_bits(&s->gb, ?);
+#endif
+
+ h0->last_slice_type = slice_type;
+ h->slice_num = ++h0->current_slice;
+ if(h->slice_num >= MAX_SLICES){
+ av_log(s->avctx, AV_LOG_ERROR, "Too many slices, increase MAX_SLICES and recompile\n");
+ }
+
+ for(j=0; j<2; j++){
+ int id_list[16];
+ int *ref2frm= h->ref2frm[h->slice_num&(MAX_SLICES-1)][j];
+ for(i=0; i<16; i++){
+ id_list[i]= 60;
+ if(h->ref_list[j][i].data[0]){
+ int k;
+ uint8_t *base= h->ref_list[j][i].base[0];
+ for(k=0; k<h->short_ref_count; k++)
+ if(h->short_ref[k]->base[0] == base){
+ id_list[i]= k;
+ break;
+ }
+ for(k=0; k<h->long_ref_count; k++)
+ if(h->long_ref[k] && h->long_ref[k]->base[0] == base){
+ id_list[i]= h->short_ref_count + k;
+ break;
+ }
+ }
+ }
+
+ ref2frm[0]=
+ ref2frm[1]= -1;
+ for(i=0; i<16; i++)
+ ref2frm[i+2]= 4*id_list[i]
+ +(h->ref_list[j][i].reference&3);
+ ref2frm[18+0]=
+ ref2frm[18+1]= -1;
+ for(i=16; i<48; i++)
+ ref2frm[i+4]= 4*id_list[(i-16)>>1]
+ +(h->ref_list[j][i].reference&3);
+ }
+
+ h->emu_edge_width= (s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
+ h->emu_edge_height= (FRAME_MBAFF || FIELD_PICTURE) ? 0 : h->emu_edge_width;
+
+ s->avctx->refs= h->sps.ref_frame_count;
+
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO){
+ av_log(h->s.avctx, AV_LOG_DEBUG, "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
+ h->slice_num,
+ (s->picture_structure==PICT_FRAME ? "F" : s->picture_structure==PICT_TOP_FIELD ? "T" : "B"),
+ first_mb_in_slice,
+ av_get_pict_type_char(h->slice_type), h->slice_type_fixed ? " fix" : "", h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
+ pps_id, h->frame_num,
+ s->current_picture_ptr->field_poc[0], s->current_picture_ptr->field_poc[1],
+ h->ref_count[0], h->ref_count[1],
+ s->qscale,
+ h->deblocking_filter, h->slice_alpha_c0_offset/2-26, h->slice_beta_offset/2-26,
+ h->use_weight,
+ h->use_weight==1 && h->use_weight_chroma ? "c" : "",
+ h->slice_type == FF_B_TYPE ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : ""
+ );
+ }
+
+ return 0;
+}
+
+int ff_h264_get_slice_type(const H264Context *h)
+{
+ switch (h->slice_type) {
+ case FF_P_TYPE: return 0;
+ case FF_B_TYPE: return 1;
+ case FF_I_TYPE: return 2;
+ case FF_SP_TYPE: return 3;
+ case FF_SI_TYPE: return 4;
+ default: return -1;
+ }
+}
+
+static void loop_filter(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ uint8_t *dest_y, *dest_cb, *dest_cr;
+ int linesize, uvlinesize, mb_x, mb_y;
+ const int end_mb_y= s->mb_y + FRAME_MBAFF;
+ const int old_slice_type= h->slice_type;
+
+ if(h->deblocking_filter) {
+ for(mb_x= 0; mb_x<s->mb_width; mb_x++){
+ for(mb_y=end_mb_y - FRAME_MBAFF; mb_y<= end_mb_y; mb_y++){
+ int list, mb_xy, mb_type;
+ mb_xy = h->mb_xy = mb_x + mb_y*s->mb_stride;
+ h->slice_num= h->slice_table[mb_xy];
+ mb_type= s->current_picture.mb_type[mb_xy];
+ h->list_count= h->list_counts[mb_xy];
+
+ if(FRAME_MBAFF)
+ h->mb_mbaff = h->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
+
+ s->mb_x= mb_x;
+ s->mb_y= mb_y;
+ dest_y = s->current_picture.data[0] + (mb_x + mb_y * s->linesize ) * 16;
+ dest_cb = s->current_picture.data[1] + (mb_x + mb_y * s->uvlinesize) * 8;
+ dest_cr = s->current_picture.data[2] + (mb_x + mb_y * s->uvlinesize) * 8;
+ //FIXME simplify above
+
+ if (MB_FIELD) {
+ linesize = h->mb_linesize = s->linesize * 2;
+ uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2;
+ if(mb_y&1){ //FIXME move out of this function?
+ dest_y -= s->linesize*15;
+ dest_cb-= s->uvlinesize*7;
+ dest_cr-= s->uvlinesize*7;
+ }
+ } else {
+ linesize = h->mb_linesize = s->linesize;
+ uvlinesize = h->mb_uvlinesize = s->uvlinesize;
+ }
+ backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0);
+ if(fill_filter_caches(h, mb_type))
+ continue;
+ h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.qscale_table[mb_xy]);
+ h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.qscale_table[mb_xy]);
+
+ if (FRAME_MBAFF) {
+ ff_h264_filter_mb (h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
+ } else {
+ ff_h264_filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
+ }
+ }
+ }
+ }
+ h->slice_type= old_slice_type;
+ s->mb_x= 0;
+ s->mb_y= end_mb_y - FRAME_MBAFF;
+}
+
+static int decode_slice(struct AVCodecContext *avctx, void *arg){
+ H264Context *h = *(void**)arg;
+ MpegEncContext * const s = &h->s;
+ const int part_mask= s->partitioned_frame ? (AC_END|AC_ERROR) : 0x7F;
+
+ s->mb_skip_run= -1;
+
+ h->is_complex = FRAME_MBAFF || s->picture_structure != PICT_FRAME || s->codec_id != CODEC_ID_H264 ||
+ (CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY));
+
+ if( h->pps.cabac ) {
+ /* realign */
+ align_get_bits( &s->gb );
+
+ /* init cabac */
+ ff_init_cabac_states( &h->cabac);
+ ff_init_cabac_decoder( &h->cabac,
+ s->gb.buffer + get_bits_count(&s->gb)/8,
+ (get_bits_left(&s->gb) + 7)/8);
+
+ ff_h264_init_cabac_states(h);
+
+ for(;;){
+//START_TIMER
+ int ret = ff_h264_decode_mb_cabac(h);
+ int eos;
+//STOP_TIMER("decode_mb_cabac")
+
+ if(ret>=0) ff_h264_hl_decode_mb(h);
+
+ if( ret >= 0 && FRAME_MBAFF ) { //FIXME optimal? or let mb_decode decode 16x32 ?
+ s->mb_y++;
+
+ ret = ff_h264_decode_mb_cabac(h);
+
+ if(ret>=0) ff_h264_hl_decode_mb(h);
+ s->mb_y--;
+ }
+ eos = get_cabac_terminate( &h->cabac );
+
+ if( ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 2) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d, bytestream (%Id)\n", s->mb_x, s->mb_y, h->cabac.bytestream_end - h->cabac.bytestream);
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
+ return -1;
+ }
+
+ if( ++s->mb_x >= s->mb_width ) {
+ s->mb_x = 0;
+ loop_filter(h);
+ ff_draw_horiz_band(s, 16*s->mb_y, 16);
+ ++s->mb_y;
+ if(FIELD_OR_MBAFF_PICTURE) {
+ ++s->mb_y;
+ }
+ }
+
+ if( eos || s->mb_y >= s->mb_height ) {
+ tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
+ return 0;
+ }
+ }
+
+ } else {
+ for(;;){
+ int ret = ff_h264_decode_mb_cavlc(h);
+
+ if(ret>=0) ff_h264_hl_decode_mb(h);
+
+ if(ret>=0 && FRAME_MBAFF){ //FIXME optimal? or let mb_decode decode 16x32 ?
+ s->mb_y++;
+ ret = ff_h264_decode_mb_cavlc(h);
+
+ if(ret>=0) ff_h264_hl_decode_mb(h);
+ s->mb_y--;
+ }
+
+ if(ret<0){
+ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
+
+ return -1;
+ }
+
+ if(++s->mb_x >= s->mb_width){
+ s->mb_x=0;
+ loop_filter(h);
+ ff_draw_horiz_band(s, 16*s->mb_y, 16);
+ ++s->mb_y;
+ if(FIELD_OR_MBAFF_PICTURE) {
+ ++s->mb_y;
+ }
+ if(s->mb_y >= s->mb_height){
+ tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
+
+ if(get_bits_count(&s->gb) == s->gb.size_in_bits ) {
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
+
+ return 0;
+ }else{
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
+
+ return -1;
+ }
+ }
+ }
+
+ if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->mb_skip_run<=0){
+ tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
+ if(get_bits_count(&s->gb) == s->gb.size_in_bits ){
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
+
+ return 0;
+ }else{
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
+
+ return -1;
+ }
+ }
+ }
+ }
+
+#if 0
+ for(;s->mb_y < s->mb_height; s->mb_y++){
+ for(;s->mb_x < s->mb_width; s->mb_x++){
+ int ret= decode_mb(h);
+
+ ff_h264_hl_decode_mb(h);
+
+ if(ret<0){
+ av_log(s->avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
+
+ return -1;
+ }
+
+ if(++s->mb_x >= s->mb_width){
+ s->mb_x=0;
+ if(++s->mb_y >= s->mb_height){
+ if(get_bits_count(s->gb) == s->gb.size_in_bits){
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
+
+ return 0;
+ }else{
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
+
+ return -1;
+ }
+ }
+ }
+
+ if(get_bits_count(s->?gb) >= s->gb?.size_in_bits){
+ if(get_bits_count(s->gb) == s->gb.size_in_bits){
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
+
+ return 0;
+ }else{
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
+
+ return -1;
+ }
+ }
+ }
+ s->mb_x=0;
+ ff_draw_horiz_band(s, 16*s->mb_y, 16);
+ }
+#endif
+ return -1; //not reached
+}
+
+/* ffdshow custom code */
+#if ENABLE_SLICE_MT_PATCH
+static void copy_context_to_mb(H264mb *dst, H264Context *src)
+{
+ dst->mb_x = src->s.mb_x;
+ dst->mb_y = src->s.mb_y;
+ dst->qscale = src->s.qscale;
+ dst->chroma_qp[0] = src->chroma_qp[0];
+ dst->chroma_qp[1] = src->chroma_qp[1];
+ dst->chroma_pred_mode = src->chroma_pred_mode;
+ dst->intra16x16_pred_mode = src->intra16x16_pred_mode;
+ dst->topleft_samples_available = src->topleft_samples_available;
+ dst->topright_samples_available = src->topright_samples_available;
+
+ memcpy(dst->mb, src->mb, sizeof(src->mb));
+ memcpy(dst->intra4x4_pred_mode_cache, src->intra4x4_pred_mode_cache, sizeof(src->intra4x4_pred_mode_cache));
+ memcpy(dst->non_zero_count_cache, src->non_zero_count_cache, sizeof(src->non_zero_count_cache));
+
+ if(src->slice_type != FF_I_TYPE && src->slice_type != FF_SI_TYPE) {
+ memcpy(dst->sub_mb_type, src->sub_mb_type, sizeof(src->sub_mb_type));
+ memcpy(dst->mv_cache, src->mv_cache, sizeof(src->mv_cache));
+ memcpy(dst->mvd_cache, src->mvd_cache, sizeof(src->mvd_cache));
+ memcpy(dst->direct_cache, src->direct_cache, sizeof(src->direct_cache));
+ memcpy(dst->ref_cache, src->ref_cache, sizeof(src->ref_cache));
+ }
+
+ dst->top_mb_xy = src->top_mb_xy;
+ dst->left_mb_xy[0] = src->left_mb_xy[0];
+ dst->left_mb_xy[1] = src->left_mb_xy[1];
+ dst->cbp = src->cbp;
+
+ dst->mb_xy = src->mb_xy;
+
+ /* FIXME not necessary? */
+ dst->top_samples_available = src->top_samples_available;
+ dst->left_samples_available = src->left_samples_available;
+}
+
+static void copy_mb_to_context(H264Context *dst, H264mb *src)
+{
+ dst->s.mb_x = src->mb_x;
+ dst->s.mb_y = src->mb_y;
+ dst->s.qscale = src->qscale;
+ dst->chroma_qp[0] = src->chroma_qp[0];
+ dst->chroma_qp[1] = src->chroma_qp[1];
+ dst->chroma_pred_mode = src->chroma_pred_mode;
+ dst->intra16x16_pred_mode = src->intra16x16_pred_mode;
+ dst->topleft_samples_available = src->topleft_samples_available;
+ dst->topright_samples_available = src->topright_samples_available;
+
+ memcpy(dst->mb, src->mb, sizeof(src->mb));
+ memcpy(dst->intra4x4_pred_mode_cache, src->intra4x4_pred_mode_cache, sizeof(src->intra4x4_pred_mode_cache));
+ memcpy(dst->non_zero_count_cache, src->non_zero_count_cache, sizeof(src->non_zero_count_cache));
+
+ if(dst->slice_type != FF_I_TYPE && dst->slice_type != FF_SI_TYPE) {
+ memcpy(dst->sub_mb_type, src->sub_mb_type, sizeof(src->sub_mb_type));
+ memcpy(dst->mv_cache, src->mv_cache, sizeof(src->mv_cache));
+ memcpy(dst->mvd_cache, src->mvd_cache, sizeof(src->mvd_cache));
+ memcpy(dst->direct_cache, src->direct_cache, sizeof(src->direct_cache));
+ memcpy(dst->ref_cache, src->ref_cache, sizeof(src->ref_cache));
+ }
+
+ /* Needed for deblocking */
+
+ dst->top_mb_xy = src->top_mb_xy;
+ dst->left_mb_xy[0] = src->left_mb_xy[0];
+ dst->left_mb_xy[1] = src->left_mb_xy[1];
+ dst->cbp = src->cbp;
+
+ dst->mb_xy = src->mb_xy;
+
+ /* FIXME not necessary? */
+ dst->top_samples_available = src->top_samples_available;
+ dst->left_samples_available = src->left_samples_available;
+}
+
+#define MAXBLOCKS 128
+
+static int decode_mb_parallelized(struct AVCodecContext *avctx, void *arg)
+{
+ H264Context *h = *(void**)arg;
+ H264Context *h0 = avctx->priv_data;
+ MpegEncContext * const s = &h->s;
+ const int part_mask= s->partitioned_frame ? (AC_END|AC_ERROR) : 0x7F;
+ int i, ret;
+
+ if(h0 == h) {
+ /* first thread does entropy decode */
+
+ for(i = 0; i < MAXBLOCKS; i++) {
+ ret = ff_h264_decode_mb_cabac(h);
+ if(ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 2) {
+ av_log(h->s.avctx, AV_LOG_ERROR,
+ "error while decoding MB %d %d, bytestream (%td)\n",
+ s->mb_x, s->mb_y, h->cabac.bytestream_end - h->cabac.bytestream);
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
+ (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
+ return -1;
+ }
+
+ copy_context_to_mb(h->blocks[h->phaze] + i, h);
+ if(++s->mb_x >= s->mb_width) {
+ s->mb_x = 0;
+ ++s->mb_y;
+ }
+
+ if(get_cabac_terminate(&h->cabac) || s->mb_y >= s->mb_height)
+ return i + 1;
+ }
+ return 0;
+
+ } else {
+ /* second thread does hl decode */
+
+ for(i = 0; i < h0->todecode; i++) {
+ copy_mb_to_context(h, h0->blocks[!h0->phaze] + i);
+ ff_h264_hl_decode_mb(h);
+ }
+ return 0;
+ }
+}
+
+static int decode_slice2(struct AVCodecContext *avctx, void *arg){
+ H264Context *h = *(void**)arg;
+ MpegEncContext * const s = &h->s;
+ const int part_mask= s->partitioned_frame ? (AC_END|AC_ERROR) : 0x7F;
+ H264Context *h2 = h->thread_context[1];
+ int i, rv[2];
+
+ h->is_complex = FRAME_MBAFF || s->picture_structure != PICT_FRAME || s->codec_id != CODEC_ID_H264 ||
+ (CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY)) || (CONFIG_H264_ENCODER && s->encoding);
+
+ clone_slice(h2, h, 1);
+
+ if(!h->blocks[0]) {
+ h->blocks[0] = av_malloc(sizeof(H264mb) * MAXBLOCKS);
+ h->blocks[1] = av_malloc(sizeof(H264mb) * MAXBLOCKS);
+ }
+
+ s->mb_skip_run= -1;
+
+ /* realign */
+ align_get_bits( &s->gb );
+
+ /* init cabac */
+ ff_init_cabac_states( &h->cabac);
+ ff_init_cabac_decoder( &h->cabac,
+ s->gb.buffer + get_bits_count(&s->gb)/8,
+ ( s->gb.size_in_bits - get_bits_count(&s->gb) + 7)/8);
+ ff_h264_init_cabac_states(h);
+
+ h->todecode = 0;
+
+ while(1) {
+ avctx->execute(avctx, (void *)decode_mb_parallelized,
+ (void **)h->thread_context, rv, 2, sizeof(void*));
+
+ h->phaze = !h->phaze;
+
+ if(rv[0] == -1)
+ return -1;
+ else if(rv[0] == 0)
+ h->todecode = MAXBLOCKS;
+ else
+ break;
+ }
+
+ for(i = 0; i < rv[0]; i++) {
+ copy_mb_to_context(h2, h->blocks[!h->phaze] + i);
+ ff_h264_hl_decode_mb(h2);
+ }
+
+ tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
+ return 0;
+}
+#endif /* ENABLE_SLICE_MT_PATCH */
+
+/**
+ * Call decode_slice() for each context.
+ *
+ * @param h h264 master context
+ * @param context_count number of contexts to execute
+ */
+static void execute_decode_slices(H264Context *h, int context_count){
+ MpegEncContext * const s = &h->s;
+ AVCodecContext * const avctx= s->avctx;
+ H264Context *hx;
+ int i;
+
+ if(context_count == 1) {
+ /* ffdshow custom code, interlacing is not supported in multithreading mode */
+ #if ENABLE_SLICE_MT_PATCH
+ if(avctx->thread_count > 1 && h->pps.cabac && !(FIELD_OR_MBAFF_PICTURE)) {
+ decode_slice2(avctx, &h);
+ } else {
+ decode_slice(avctx, &h);
+ }
+ #else
+ decode_slice(avctx, &h);
+ #endif
+ } else {
+ for(i = 1; i < context_count; i++) {
+ hx = h->thread_context[i];
+ hx->s.error_recognition = avctx->error_recognition;
+ hx->s.error_count = 0;
+ }
+
+ avctx->execute(avctx, (void *)decode_slice,
+ h->thread_context, NULL, context_count, sizeof(void*));
+
+ /* pull back stuff from slices to master context */
+ hx = h->thread_context[context_count - 1];
+ s->mb_x = hx->s.mb_x;
+ s->mb_y = hx->s.mb_y;
+ s->dropable = hx->s.dropable;
+ s->picture_structure = hx->s.picture_structure;
+ for(i = 1; i < context_count; i++)
+ h->s.error_count += h->thread_context[i]->s.error_count;
+ }
+}
+
+
+static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
+ MpegEncContext * const s = &h->s;
+ AVCodecContext * const avctx= s->avctx;
+ int buf_index=0;
+ H264Context *hx; ///< thread context
+ int context_count = 0;
+ int next_avc= h->is_avc ? 0 : buf_size;
+
+ h->max_contexts = avctx->thread_count;
+#if 0
+ int i;
+ for(i=0; i<50; i++){
+ av_log(NULL, AV_LOG_ERROR,"%02X ", buf[i]);
+ }
+#endif
+ if(!(s->flags2 & CODEC_FLAG2_CHUNKS)){
+ h->current_slice = 0;
+ if (!s->first_field)
+ s->current_picture_ptr= NULL;
+ ff_h264_reset_sei(h);
+ }
+
+ for(;;){
+ int consumed;
+ int dst_length;
+ int bit_length;
+ const uint8_t *ptr;
+ int i, nalsize = 0;
+ int err;
+
+ if(buf_index >= next_avc) {
+ if(buf_index >= buf_size) break;
+ nalsize = 0;
+ for(i = 0; i < h->nal_length_size; i++)
+ nalsize = (nalsize << 8) | buf[buf_index++];
+ if(nalsize <= 1 || nalsize > buf_size - buf_index){
+ if(nalsize == 1){
+ buf_index++;
+ continue;
+ }else{
+ av_log(h->s.avctx, AV_LOG_ERROR, "AVC: nal size %d\n", nalsize);
+ break;
+ }
+ }
+ next_avc= buf_index + nalsize;
+ } else {
+ // start code prefix search
+ for(; buf_index + 3 < next_avc; buf_index++){
+ // This should always succeed in the first iteration.
+ if(buf[buf_index] == 0 && buf[buf_index+1] == 0 && buf[buf_index+2] == 1)
+ break;
+ }
+
+ if(buf_index+3 >= buf_size) break;
+
+ buf_index+=3;
+ if(buf_index >= next_avc) continue;
+ }
+
+ hx = h->thread_context[context_count];
+
+ ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, next_avc - buf_index);
+ if (ptr==NULL || dst_length < 0){
+ return -1;
+ }
+ while(ptr[dst_length - 1] == 0 && dst_length > 0)
+ dst_length--;
+ bit_length= !dst_length ? 0 : (8*dst_length - ff_h264_decode_rbsp_trailing(h, ptr + dst_length - 1));
+
+ if(s->avctx->debug&FF_DEBUG_STARTCODE){
+ av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d at %d/%d length %d\n", hx->nal_unit_type, buf_index, buf_size, dst_length);
+ }
+
+ if (h->is_avc && (nalsize != consumed) && nalsize){
+ av_log(h->s.avctx, AV_LOG_DEBUG, "AVC: Consumed only %d bytes instead of %d\n", consumed, nalsize);
+ }
+
+ buf_index += consumed;
+
+ /* ffdshow custom code */
+ if( (s->hurry_up == 1 && hx->nal_ref_idc == 0) //FIXME do not discard SEI id
+ ||(avctx->skip_frame >= AVDISCARD_NONREF && hx->nal_ref_idc == 0))
+ continue;
+
+ again:
+ err = 0;
+ switch(hx->nal_unit_type){
+ case NAL_IDR_SLICE:
+ if (h->nal_unit_type != NAL_IDR_SLICE) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "Invalid mix of idr and non-idr slices");
+ return -1;
+ }
+ idr(h); //FIXME ensure we don't loose some frames if there is reordering
+ case NAL_SLICE:
+ init_get_bits(&hx->s.gb, ptr, bit_length);
+ hx->intra_gb_ptr=
+ hx->inter_gb_ptr= &hx->s.gb;
+ hx->s.data_partitioning = 0;
+
+ if((err = decode_slice_header(hx, h)))
+ break;
+
+ s->current_picture_ptr->key_frame |=
+ (hx->nal_unit_type == NAL_IDR_SLICE) ||
+ (h->sei_recovery_frame_cnt >= 0);
+ if(hx->redundant_pic_count==0 && hx->s.hurry_up < 5
+ && (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
+ && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=FF_B_TYPE)
+ && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==FF_I_TYPE)
+ && avctx->skip_frame < AVDISCARD_ALL){
+ context_count++;
+ }
+ break;
+ case NAL_DPA:
+ init_get_bits(&hx->s.gb, ptr, bit_length);
+ hx->intra_gb_ptr=
+ hx->inter_gb_ptr= NULL;
+
+ if ((err = decode_slice_header(hx, h)) < 0)
+ break;
+
+ hx->s.data_partitioning = 1;
+
+ break;
+ case NAL_DPB:
+ init_get_bits(&hx->intra_gb, ptr, bit_length);
+ hx->intra_gb_ptr= &hx->intra_gb;
+ break;
+ case NAL_DPC:
+ init_get_bits(&hx->inter_gb, ptr, bit_length);
+ hx->inter_gb_ptr= &hx->inter_gb;
+
+ if(hx->redundant_pic_count==0 && hx->intra_gb_ptr && hx->s.data_partitioning
+ && s->context_initialized
+ && s->hurry_up < 5
+ && (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
+ && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=FF_B_TYPE)
+ && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==FF_I_TYPE)
+ && avctx->skip_frame < AVDISCARD_ALL)
+ context_count++;
+ break;
+ case NAL_SEI:
+ init_get_bits(&s->gb, ptr, bit_length);
+ ff_h264_decode_sei(h);
+ break;
+ case NAL_SPS:
+ init_get_bits(&s->gb, ptr, bit_length);
+ ff_h264_decode_seq_parameter_set(h);
+
+ if(s->flags& CODEC_FLAG_LOW_DELAY)
+ s->low_delay=1;
+
+ if(avctx->has_b_frames < 2)
+ avctx->has_b_frames= !s->low_delay;
+ break;
+ case NAL_PPS:
+ init_get_bits(&s->gb, ptr, bit_length);
+
+ ff_h264_decode_picture_parameter_set(h, bit_length);
+
+ break;
+ case NAL_AUD:
+ case NAL_END_SEQUENCE:
+ case NAL_END_STREAM:
+ case NAL_FILLER_DATA:
+ case NAL_SPS_EXT:
+ case NAL_AUXILIARY_SLICE:
+ break;
+ default:
+ av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n", hx->nal_unit_type, bit_length);
+ }
+
+ if(context_count == h->max_contexts) {
+ execute_decode_slices(h, context_count);
+ context_count = 0;
+ }
+
+ if (err < 0)
+ av_log(h->s.avctx, AV_LOG_ERROR, "decode_slice_header error\n");
+ else if(err == 1) {
+ /* Slice could not be decoded in parallel mode, copy down
+ * NAL unit stuff to context 0 and restart. Note that
+ * rbsp_buffer is not transferred, but since we no longer
+ * run in parallel mode this should not be an issue. */
+ h->nal_unit_type = hx->nal_unit_type;
+ h->nal_ref_idc = hx->nal_ref_idc;
+ hx = h;
+ goto again;
+ }
+ }
+ if(context_count)
+ execute_decode_slices(h, context_count);
+ return buf_index;
+}
+
+/**
+ * returns the number of bytes consumed for building the current frame
+ */
+static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size){
+ if(pos==0) pos=1; //avoid infinite loops (i doubt that is needed but ...)
+ if(pos+10>buf_size) pos=buf_size; // oops ;)
+
+ return pos;
+}
+
+static int decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ H264Context *h = avctx->priv_data;
+ MpegEncContext *s = &h->s;
+ AVFrame *pict = data;
+ int buf_index;
+
+ s->flags= avctx->flags;
+ s->flags2= avctx->flags2;
+
+ /* end of stream, output what is still in the buffers */
+ if (buf_size == 0) {
+ Picture *out;
+ int i, out_idx;
+
+//FIXME factorize this with the output code below
+ out = h->delayed_pic[0];
+ out_idx = 0;
+ for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++)
+ if(h->delayed_pic[i]->poc < out->poc){
+ out = h->delayed_pic[i];
+ out_idx = i;
+ }
+
+ for(i=out_idx; h->delayed_pic[i]; i++)
+ h->delayed_pic[i] = h->delayed_pic[i+1];
+
+ if(out){
+ *data_size = sizeof(AVFrame);
+ *pict= *(AVFrame*)out;
+ }
+
+ return 0;
+ }
+
+ /* ffdshow custom code (begin) */
+ if(h->is_avc && !h->got_avcC) {
+ int i, cnt, nalsize;
+ unsigned char *p = avctx->extradata, *pend=p+avctx->extradata_size;
+
+ h->nal_length_size = 2;
+ cnt = 1;
+
+ for (i = 0; i < cnt; i++) {
+ nalsize = AV_RB16(p) + 2;
+ if(decode_nal_units(h, p, nalsize) != nalsize) {
+ av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed\n", i);
+ return -1;
+ }
+ p += nalsize;
+ }
+ // Decode pps from avcC
+ for (i = 0; p<pend-2; i++) {
+ nalsize = AV_RB16(p) + 2;
+ if(decode_nal_units(h, p, nalsize) != nalsize) {
+ av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed\n", i);
+ return -1;
+ }
+ p += nalsize;
+ }
+ // Now store right nal length size, that will be use to parse all other nals
+ h->nal_length_size = avctx->nal_length_size?avctx->nal_length_size:4;//((*(((char*)(avctx->extradata))+4))&0x03)+1;
+ // Do not reparse avcC
+ h->got_avcC = 1;
+ }
+
+ if(!h->got_avcC && !h->is_avc && s->avctx->extradata_size){
+ if(decode_nal_units(h, s->avctx->extradata, s->avctx->extradata_size) < 0)
+ return -1;
+ h->got_avcC = 1;
+ s->picture_number++;
+ }
+ /* ffdshow custom code (end) */
+
+ buf_index=decode_nal_units(h, buf, buf_size);
+ if(buf_index < 0)
+ return -1;
+
+ if(!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr){
+ if (avctx->skip_frame >= AVDISCARD_NONREF || s->hurry_up) return 0;
+ av_log(avctx, AV_LOG_ERROR, "no frame!\n");
+ return -1;
+ }
+
+ if(!(s->flags2 & CODEC_FLAG2_CHUNKS) || (s->mb_y >= s->mb_height && s->mb_height)){
+ Picture *out = s->current_picture_ptr;
+ Picture *cur = s->current_picture_ptr;
+ int i, pics, out_of_order, out_idx;
+
+ field_end(h);
+
+ if (cur->field_poc[0]==INT_MAX || cur->field_poc[1]==INT_MAX) {
+ /* Wait for second field. */
+ *data_size = 0;
+
+ } else {
+ cur->interlaced_frame = 0;
+ cur->repeat_pict = 0;
+
+ /* Signal interlacing information externally. */
+ /* Prioritize picture timing SEI information over used decoding process if it exists. */
+
+ if(h->sps.pic_struct_present_flag){
+ switch (h->sei_pic_struct)
+ {
+ case SEI_PIC_STRUCT_FRAME:
+ break;
+ case SEI_PIC_STRUCT_TOP_FIELD:
+ case SEI_PIC_STRUCT_BOTTOM_FIELD:
+ cur->interlaced_frame = 1;
+ break;
+ case SEI_PIC_STRUCT_TOP_BOTTOM:
+ case SEI_PIC_STRUCT_BOTTOM_TOP:
+ if (FIELD_OR_MBAFF_PICTURE)
+ cur->interlaced_frame = 1;
+ else
+ // try to flag soft telecine progressive
+ cur->interlaced_frame = h->prev_interlaced_frame;
+ break;
+ case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
+ case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
+ // Signal the possibility of telecined film externally (pic_struct 5,6)
+ // From these hints, let the applications decide if they apply deinterlacing.
+ cur->repeat_pict = 1;
+ break;
+ case SEI_PIC_STRUCT_FRAME_DOUBLING:
+ // Force progressive here, as doubling interlaced frame is a bad idea.
+ cur->repeat_pict = 2;
+ break;
+ case SEI_PIC_STRUCT_FRAME_TRIPLING:
+ cur->repeat_pict = 4;
+ break;
+ }
+
+ if ((h->sei_ct_type & 3) && h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP)
+ cur->interlaced_frame = (h->sei_ct_type & (1<<1)) != 0;
+ }else{
+ /* Derive interlacing flag from used decoding process. */
+ cur->interlaced_frame = FIELD_OR_MBAFF_PICTURE;
+ }
+ h->prev_interlaced_frame = cur->interlaced_frame;
+
+ if (cur->field_poc[0] != cur->field_poc[1]){
+ /* Derive top_field_first from field pocs. */
+ cur->top_field_first = cur->field_poc[0] < cur->field_poc[1];
+ }else{
+ if(cur->interlaced_frame || h->sps.pic_struct_present_flag){
+ /* Use picture timing SEI information. Even if it is a information of a past frame, better than nothing. */
+ if(h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM
+ || h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
+ cur->top_field_first = 1;
+ else
+ cur->top_field_first = 0;
+ }else{
+ /* Most likely progressive */
+ cur->top_field_first = 0;
+ }
+ }
+
+ /* ffdshow custom code */
+ cur->video_full_range_flag = h->sps.full_range;
+ cur->YCbCr_RGB_matrix_coefficients = h->sps.colorspace;
+
+ //FIXME do something with unavailable reference frames
+
+ /* Sort B-frames into display order */
+
+ if(h->sps.bitstream_restriction_flag
+ && s->avctx->has_b_frames < h->sps.num_reorder_frames){
+ s->avctx->has_b_frames = h->sps.num_reorder_frames;
+ s->low_delay = 0;
+ }
+
+ if( s->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT
+ && !h->sps.bitstream_restriction_flag){
+ s->avctx->has_b_frames= MAX_DELAYED_PIC_COUNT;
+ s->low_delay= 0;
+ }
+
+ pics = 0;
+ while(h->delayed_pic[pics]) pics++;
+
+ assert(pics <= MAX_DELAYED_PIC_COUNT);
+
+ h->delayed_pic[pics++] = cur;
+ if(cur->reference == 0)
+ cur->reference = DELAYED_PIC_REF;
+
+ out = h->delayed_pic[0];
+ out_idx = 0;
+ for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++)
+ if(h->delayed_pic[i]->poc < out->poc){
+ out = h->delayed_pic[i];
+ out_idx = i;
+ }
+ if(s->avctx->has_b_frames == 0 && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset))
+ h->outputed_poc= INT_MIN;
+ out_of_order = out->poc < h->outputed_poc;
+
+ if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames >= h->sps.num_reorder_frames)
+ { }
+ else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT)
+ || (s->low_delay &&
+ ((h->outputed_poc != INT_MIN && out->poc > h->outputed_poc + 2)
+ || cur->pict_type == FF_B_TYPE)))
+ {
+ s->low_delay = 0;
+ s->avctx->has_b_frames++;
+ }
+
+ if(out_of_order || pics > s->avctx->has_b_frames){
+ out->reference &= ~DELAYED_PIC_REF;
+ for(i=out_idx; h->delayed_pic[i]; i++)
+ h->delayed_pic[i] = h->delayed_pic[i+1];
+ }
+ if(!out_of_order && pics > s->avctx->has_b_frames){
+ *data_size = sizeof(AVFrame);
+
+ if(out_idx==0 && h->delayed_pic[0] && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset)) {
+ h->outputed_poc = INT_MIN;
+ } else
+ h->outputed_poc = out->poc;
+ *pict= *(AVFrame*)out;
+ }else{
+ av_log(avctx, AV_LOG_DEBUG, "no picture\n");
+ }
+ }
+ }
+
+ assert(pict->data[0] || !*data_size);
+ ff_print_debug_info(s, pict);
+
+ /* ffdshow custom code (begin) */
+ pict->h264_poc_decoded = h->poc_lsb + h->poc_msb;
+ pict->h264_poc_outputed = h->outputed_poc;
+ pict->h264_frame_num_decoded = h-> frame_num;
+ pict->h264_max_frame_num = 1 << h->sps.log2_max_frame_num;
+ /* ffdshow custom code (end) */
+
+ return get_consumed_bytes(s, buf_index, buf_size);
+}
+
+/**
+ * ffdshow custom stuff (based on decode_nal_units)
+ *
+ * @param[out] recovery_frame_cnt. Valid only if GDR.
+ * @return 0: no recovery point, 1:I-frame 2:Recovery Point SEI (GDR), 3:IDR, -1:error
+ */
+int avcodec_h264_search_recovery_point(AVCodecContext *avctx,
+ const uint8_t *buf, int buf_size, int *recovery_frame_cnt)
+{
+ H264Context *h = avctx->priv_data;
+ MpegEncContext *s = &h->s;
+ int buf_index = 0;
+ int found = 0; // 0: no recovery point, 1:Recovery Point SEI (GDR), 2:IDR
+ H264Context *hx;
+ int Islice_detected = 0;
+
+ h->nal_length_size = avctx->nal_length_size ? avctx->nal_length_size : 4;
+
+ for(;;){
+ int consumed;
+ int dst_length;
+ int bit_length;
+ const uint8_t *ptr;
+ int i, nalsize = 0;
+ int err;
+
+ if(h->is_avc) {
+ if(buf_index >= buf_size) break;
+ nalsize = 0;
+ for(i = 0; i < h->nal_length_size; i++)
+ nalsize = (nalsize << 8) | buf[buf_index++];
+ if(nalsize <= 1 || (nalsize+buf_index > buf_size)){
+ if(nalsize == 1){
+ buf_index++;
+ continue;
+ }else{
+ av_log(h->s.avctx, AV_LOG_ERROR, "AVC: nal size %d\n", nalsize);
+ break;
+ }
+ }
+ } else {
+ // start code prefix search
+ for(; buf_index + 3 < buf_size; buf_index++){
+ // This should always succeed in the first iteration.
+ if(buf[buf_index] == 0 && buf[buf_index+1] == 0 && buf[buf_index+2] == 1)
+ break;
+ }
+
+ if(buf_index+3 >= buf_size) break;
+
+ buf_index+=3;
+ }
+
+ hx = h->thread_context[0];
+
+ // h->nal_ref_idc = buf[buf_index] >> 5;
+ // h->nal_unit_type = src[buf_index] & 0x1F;
+
+ ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, h->is_avc ? nalsize : buf_size - buf_index);
+ if (ptr==NULL || dst_length < 0){
+ return -1;
+ }
+ while(ptr[dst_length - 1] == 0 && dst_length > 0)
+ dst_length--;
+ bit_length= !dst_length ? 0 : (8*dst_length - ff_h264_decode_rbsp_trailing(h, ptr + dst_length - 1));
+
+ if (h->is_avc && (nalsize != consumed)){
+ av_log(h->s.avctx, AV_LOG_ERROR, "AVC: Consumed only %d bytes instead of %d\n", consumed, nalsize);
+ consumed= nalsize;
+ }
+
+ buf_index += consumed;
+
+
+ switch(hx->nal_unit_type){
+ case NAL_IDR_SLICE:
+ return 3;
+ case NAL_SEI:
+ if (ptr[0] == 6/* Recovery Point SEI */){
+ init_get_bits(&s->gb, ptr, bit_length);
+ ff_h264_decode_sei(h);
+ if (found < 2)
+ found = 2;
+ break;
+ }
+ break;
+ case NAL_SPS:
+ init_get_bits(&s->gb, ptr, bit_length);
+ ff_h264_decode_seq_parameter_set(h);
+
+ if(s->flags& CODEC_FLAG_LOW_DELAY)
+ s->low_delay=1;
+
+ if(avctx->has_b_frames < 2)
+ avctx->has_b_frames= !s->low_delay;
+ break;
+ case NAL_PPS:
+ init_get_bits(&s->gb, ptr, bit_length);
+
+ ff_h264_decode_picture_parameter_set(h, bit_length);
+
+ break;
+ case NAL_AUD:
+ {
+ int primary_pic_type;
+ init_get_bits(&s->gb, ptr, bit_length);
+ primary_pic_type = get_bits(&s->gb, 3);
+ if (found == 0 && (primary_pic_type == 0 || primary_pic_type == 3)) // I-frame (all I/SI slices)
+ found = 1;
+ break;
+ }
+ case NAL_SLICE:
+ case NAL_DPA:
+ // decode part of slice header and find I frame
+ if (found == 0){
+ unsigned int slice_type;
+
+ init_get_bits(&s->gb, ptr, bit_length);
+ get_ue_golomb(&s->gb); // first_mb_in_slice
+ slice_type= get_ue_golomb(&s->gb);
+ if (slice_type == 2 || slice_type == 4 || slice_type == 7 || slice_type == 9) // I/SI slice
+ Islice_detected = 1;
+ else
+ return 0;
+ break;
+ }
+ case NAL_DPB:
+ case NAL_DPC:
+ case NAL_END_SEQUENCE:
+ case NAL_END_STREAM:
+ case NAL_FILLER_DATA:
+ case NAL_SPS_EXT:
+ case NAL_AUXILIARY_SLICE:
+ break;
+ default:
+ av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n", h->nal_unit_type, bit_length);
+ }
+
+ }
+ if (found == 0 && Islice_detected)
+ found = 1;
+ *recovery_frame_cnt = h->sei_recovery_frame_cnt;
+ return found;
+}
+
+
+av_cold void ff_h264_free_context(H264Context *h)
+{
+ int i;
+
+ free_tables(h); //FIXME cleanup init stuff perhaps
+
+ for(i = 0; i < MAX_SPS_COUNT; i++)
+ av_freep(h->sps_buffers + i);
+
+ for(i = 0; i < MAX_PPS_COUNT; i++)
+ av_freep(h->pps_buffers + i);
+}
+
+av_cold int ff_h264_decode_end(AVCodecContext *avctx)
+{
+ H264Context *h = avctx->priv_data;
+ MpegEncContext *s = &h->s;
+
+ ff_h264_free_context(h);
+
+ MPV_common_end(s);
+
+// memset(h, 0, sizeof(H264Context));
+
+ return 0;
+}
+
+
+AVCodec h264_decoder = {
+ "h264",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_H264,
+ sizeof(H264Context),
+ /*.init = */ff_h264_decode_init,
+ /*.encode = */NULL,
+ /*.close = */ff_h264_decode_end,
+ /*.decode = */decode_frame,
+ /*.capabilities = *//*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ /*.next = */NULL,
+ /*.flush = */flush_dpb,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
+};
+
+#include "h264_dxva.c"
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264.h
new file mode 100644
index 000000000..aaa704fd5
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264.h
@@ -0,0 +1,1540 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264.h
+ * H.264 / AVC / MPEG4 part10 codec.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVCODEC_H264_H
+#define AVCODEC_H264_H
+
+/* currently broken! */
+#define ENABLE_SLICE_MT_PATCH 0
+
+#include "dsputil.h"
+#include "cabac.h"
+#include "mpegvideo.h"
+#include "h264pred.h"
+#include "rectangle.h"
+
+#define interlaced_dct interlaced_dct_is_a_bad_name
+#define mb_intra mb_intra_is_not_initialized_see_mb_type
+
+#define LUMA_DC_BLOCK_INDEX 25
+#define CHROMA_DC_BLOCK_INDEX 26
+
+#define CHROMA_DC_COEFF_TOKEN_VLC_BITS 8
+#define COEFF_TOKEN_VLC_BITS 8
+#define TOTAL_ZEROS_VLC_BITS 9
+#define CHROMA_DC_TOTAL_ZEROS_VLC_BITS 3
+#define RUN_VLC_BITS 3
+#define RUN7_VLC_BITS 6
+
+#define MAX_SPS_COUNT 32
+#define MAX_PPS_COUNT 256
+
+#define MAX_MMCO_COUNT 66
+
+#define MAX_DELAYED_PIC_COUNT 16
+
+/* Compiling in interlaced support reduces the speed
+ * of progressive decoding by about 2%. */
+#define ALLOW_INTERLACE
+
+#define ALLOW_NOCHROMA
+
+/**
+ * The maximum number of slices supported by the decoder.
+ * must be a power of 2
+ */
+#define MAX_SLICES 16
+
+#ifdef ALLOW_INTERLACE
+#define MB_MBAFF h->mb_mbaff
+#define MB_FIELD h->mb_field_decoding_flag
+#define FRAME_MBAFF h->mb_aff_frame
+#define FIELD_PICTURE (s->picture_structure != PICT_FRAME)
+#else
+#define MB_MBAFF 0
+#define MB_FIELD 0
+#define FRAME_MBAFF 0
+#define FIELD_PICTURE 0
+#undef IS_INTERLACED
+#define IS_INTERLACED(mb_type) 0
+#endif
+#define FIELD_OR_MBAFF_PICTURE (FRAME_MBAFF || FIELD_PICTURE)
+
+#ifdef ALLOW_NOCHROMA
+#define CHROMA h->sps.chroma_format_idc
+#else
+#define CHROMA 1
+#endif
+
+#ifndef CABAC
+#define CABAC h->pps.cabac
+#endif
+
+#define EXTENDED_SAR 255
+
+#define MB_TYPE_REF0 MB_TYPE_ACPRED //dirty but it fits in 16 bit
+#define MB_TYPE_8x8DCT 0x01000000
+#define IS_REF0(a) ((a) & MB_TYPE_REF0)
+#define IS_8x8DCT(a) ((a) & MB_TYPE_8x8DCT)
+
+/**
+ * Value of Picture.reference when Picture is not a reference picture, but
+ * is held for delayed output.
+ */
+#define DELAYED_PIC_REF 4
+
+
+/* NAL unit types */
+enum {
+ NAL_SLICE=1,
+ NAL_DPA,
+ NAL_DPB,
+ NAL_DPC,
+ NAL_IDR_SLICE,
+ NAL_SEI,
+ NAL_SPS,
+ NAL_PPS,
+ NAL_AUD,
+ NAL_END_SEQUENCE,
+ NAL_END_STREAM,
+ NAL_FILLER_DATA,
+ NAL_SPS_EXT,
+ NAL_AUXILIARY_SLICE=19
+};
+
+/**
+ * SEI message types
+ */
+typedef enum {
+ SEI_BUFFERING_PERIOD = 0, ///< buffering period (H.264, D.1.1)
+ SEI_TYPE_PIC_TIMING = 1, ///< picture timing
+ SEI_TYPE_USER_DATA_UNREGISTERED = 5, ///< unregistered user data
+ SEI_TYPE_RECOVERY_POINT = 6 ///< recovery point (frame # to decoder sync)
+} SEI_Type;
+
+/**
+ * pic_struct in picture timing SEI message
+ */
+typedef enum {
+ SEI_PIC_STRUCT_FRAME = 0, ///< 0: %frame
+ SEI_PIC_STRUCT_TOP_FIELD = 1, ///< 1: top field
+ SEI_PIC_STRUCT_BOTTOM_FIELD = 2, ///< 2: bottom field
+ SEI_PIC_STRUCT_TOP_BOTTOM = 3, ///< 3: top field, bottom field, in that order
+ SEI_PIC_STRUCT_BOTTOM_TOP = 4, ///< 4: bottom field, top field, in that order
+ SEI_PIC_STRUCT_TOP_BOTTOM_TOP = 5, ///< 5: top field, bottom field, top field repeated, in that order
+ SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM = 6, ///< 6: bottom field, top field, bottom field repeated, in that order
+ SEI_PIC_STRUCT_FRAME_DOUBLING = 7, ///< 7: %frame doubling
+ SEI_PIC_STRUCT_FRAME_TRIPLING = 8 ///< 8: %frame tripling
+} SEI_PicStructType;
+
+/**
+ * Sequence parameter set
+ */
+typedef struct SPS{
+
+ int profile_idc;
+ int level_idc;
+ int chroma_format_idc;
+ int transform_bypass; ///< qpprime_y_zero_transform_bypass_flag
+ int log2_max_frame_num; ///< log2_max_frame_num_minus4 + 4
+ int poc_type; ///< pic_order_cnt_type
+ int log2_max_poc_lsb; ///< log2_max_pic_order_cnt_lsb_minus4
+ int delta_pic_order_always_zero_flag;
+ int offset_for_non_ref_pic;
+ int offset_for_top_to_bottom_field;
+ int poc_cycle_length; ///< num_ref_frames_in_pic_order_cnt_cycle
+ int ref_frame_count; ///< num_ref_frames
+ int gaps_in_frame_num_allowed_flag;
+ int mb_width; ///< pic_width_in_mbs_minus1 + 1
+ int mb_height; ///< pic_height_in_map_units_minus1 + 1
+ int frame_mbs_only_flag;
+ int mb_aff; ///<mb_adaptive_frame_field_flag
+ int direct_8x8_inference_flag;
+ int crop; ///< frame_cropping_flag
+ unsigned int crop_left; ///< frame_cropping_rect_left_offset
+ unsigned int crop_right; ///< frame_cropping_rect_right_offset
+ unsigned int crop_top; ///< frame_cropping_rect_top_offset
+ unsigned int crop_bottom; ///< frame_cropping_rect_bottom_offset
+ int vui_parameters_present_flag;
+ AVRational sar;
+ int video_signal_type_present_flag;
+ int full_range;
+ int colour_description_present_flag;
+ enum AVColorPrimaries color_primaries;
+ enum AVColorTransferCharacteristic color_trc;
+ enum AVColorSpace colorspace;
+ int timing_info_present_flag;
+ uint32_t num_units_in_tick;
+ uint32_t time_scale;
+ int fixed_frame_rate_flag;
+ short offset_for_ref_frame[256]; //FIXME dyn aloc?
+ int bitstream_restriction_flag;
+ int num_reorder_frames;
+ int scaling_matrix_present;
+ uint8_t scaling_matrix4[6][16];
+ uint8_t scaling_matrix8[2][64];
+ int nal_hrd_parameters_present_flag;
+ int vcl_hrd_parameters_present_flag;
+ int pic_struct_present_flag;
+ int time_offset_length;
+ int cpb_cnt; ///< See H.264 E.1.2
+ int initial_cpb_removal_delay_length; ///< initial_cpb_removal_delay_length_minus1 +1
+ int cpb_removal_delay_length; ///< cpb_removal_delay_length_minus1 + 1
+ int dpb_output_delay_length; ///< dpb_output_delay_length_minus1 + 1
+ int bit_depth_luma; ///< bit_depth_luma_minus8 + 8
+ int bit_depth_chroma; ///< bit_depth_chroma_minus8 + 8
+ int residual_color_transform_flag; ///< residual_colour_transform_flag
+}SPS;
+
+/**
+ * Picture parameter set
+ */
+typedef struct PPS{
+ unsigned int sps_id;
+ int cabac; ///< entropy_coding_mode_flag
+ int pic_order_present; ///< pic_order_present_flag
+ int slice_group_count; ///< num_slice_groups_minus1 + 1
+ int mb_slice_group_map_type;
+ unsigned int ref_count[2]; ///< num_ref_idx_l0/1_active_minus1 + 1
+ int weighted_pred; ///< weighted_pred_flag
+ int weighted_bipred_idc;
+ int init_qp; ///< pic_init_qp_minus26 + 26
+ int init_qs; ///< pic_init_qs_minus26 + 26
+ int chroma_qp_index_offset[2];
+ int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
+ int constrained_intra_pred; ///< constrained_intra_pred_flag
+ int redundant_pic_cnt_present; ///< redundant_pic_cnt_present_flag
+ int transform_8x8_mode; ///< transform_8x8_mode_flag
+ uint8_t scaling_matrix4[6][16];
+ uint8_t scaling_matrix8[2][64];
+ uint8_t chroma_qp_table[2][64]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
+ int chroma_qp_diff;
+ // ==> Start patch MPC
+ int slice_group_change_direction_flag;
+ int slice_group_change_rate_minus1;
+ // <== End patch MPC
+}PPS;
+
+/**
+ * Memory management control operation opcode.
+ */
+typedef enum MMCOOpcode{
+ MMCO_END=0,
+ MMCO_SHORT2UNUSED,
+ MMCO_LONG2UNUSED,
+ MMCO_SHORT2LONG,
+ MMCO_SET_MAX_LONG,
+ MMCO_RESET,
+ MMCO_LONG,
+} MMCOOpcode;
+
+/**
+ * Memory management control operation.
+ */
+typedef struct MMCO{
+ MMCOOpcode opcode;
+ int short_pic_num; ///< pic_num without wrapping (pic_num & max_pic_num)
+ int long_arg; ///< index, pic_num, or num long refs depending on opcode
+} MMCO;
+
+/**
+ *
+ */
+
+typedef struct H264mb {
+ int mb_x, mb_y;
+ int qscale;
+ int chroma_qp[2]; //QPc
+ int chroma_pred_mode;
+ int intra16x16_pred_mode;
+ unsigned int topleft_samples_available;
+ unsigned int topright_samples_available;
+ int8_t intra4x4_pred_mode_cache[5*8];
+
+ uint8_t non_zero_count_cache[6*8];
+
+ int16_t mv_cache[2][5*8][2];
+ int8_t ref_cache[2][5*8];
+
+ int cbp;
+ int top_mb_xy;
+ int left_mb_xy[2];
+
+ int mb_xy;
+
+ unsigned int sub_mb_type[4];
+
+ DCTELEM mb[16*24];
+ unsigned int top_samples_available,left_samples_available;
+ int16_t mvd_cache[2][5*8][2];
+ uint8_t direct_cache[5*8];
+} H264mb;
+
+
+/**
+ * H264Context
+ */
+typedef struct H264Context{
+ MpegEncContext s;
+ int nal_ref_idc;
+ int nal_unit_type;
+ uint8_t *rbsp_buffer[2];
+ unsigned int rbsp_buffer_size[2];
+
+ /**
+ * Used to parse AVC variant of h264
+ */
+ int is_avc; ///< this flag is != 0 if codec is avc1
+ int got_avcC; ///< flag used to parse avcC data only once
+ int nal_length_size; ///< Number of bytes used for nal length (1, 2 or 4)
+
+ int chroma_qp[2]; //QPc
+
+ int qp_thresh; ///< QP threshold to skip loopfilter
+
+ int prev_mb_skipped;
+ int next_mb_skipped;
+
+ //prediction stuff
+ int chroma_pred_mode;
+ int intra16x16_pred_mode;
+
+ int top_mb_xy;
+ int left_mb_xy[2];
+
+ int top_type;
+ int left_type[2];
+
+ int8_t intra4x4_pred_mode_cache[5*8];
+ int8_t (*intra4x4_pred_mode)[8];
+ H264PredContext hpc;
+ unsigned int topleft_samples_available;
+ unsigned int top_samples_available;
+ unsigned int topright_samples_available;
+ unsigned int left_samples_available;
+ uint8_t (*top_borders[2])[16+2*8];
+ uint8_t left_border[2*(17+2*9)];
+
+ /**
+ * non zero coeff count cache.
+ * is 64 if not available.
+ */
+ DECLARE_ALIGNED_8(uint8_t, non_zero_count_cache)[6*8];
+
+ /*
+ .UU.YYYY
+ .UU.YYYY
+ .vv.YYYY
+ .VV.YYYY
+ */
+ uint8_t (*non_zero_count)[32];
+
+ /**
+ * Motion vector cache.
+ */
+ DECLARE_ALIGNED_16(int16_t, mv_cache)[2][5*8][2];
+ DECLARE_ALIGNED_8(int8_t, ref_cache)[2][5*8];
+#define LIST_NOT_USED -1 //FIXME rename?
+#define PART_NOT_AVAILABLE -2
+
+ /**
+ * is 1 if the specific list MV&references are set to 0,0,-2.
+ */
+ int mv_cache_clean[2];
+
+ /**
+ * number of neighbors (top and/or left) that used 8x8 dct
+ */
+ int neighbor_transform_size;
+
+ /**
+ * block_offset[ 0..23] for frame macroblocks
+ * block_offset[24..47] for field macroblocks
+ */
+ int block_offset[2*(16+8)];
+
+ uint32_t *mb2b_xy; //FIXME are these 4 a good idea?
+ uint32_t *mb2b8_xy;
+ int b_stride; //FIXME use s->b4_stride
+ int b8_stride;
+
+ int mb_linesize; ///< may be equal to s->linesize or s->linesize*2, for mbaff
+ int mb_uvlinesize;
+
+ int emu_edge_width;
+ int emu_edge_height;
+
+ int halfpel_flag;
+ int thirdpel_flag;
+
+ int unknown_svq3_flag;
+ int next_slice_index;
+
+ SPS *sps_buffers[MAX_SPS_COUNT];
+ SPS sps; ///< current sps
+
+ PPS *pps_buffers[MAX_PPS_COUNT];
+ /**
+ * current pps
+ */
+ PPS pps; //FIXME move to Picture perhaps? (->no) do we need that?
+
+ uint32_t dequant4_buffer[6][52][16];
+ uint32_t dequant8_buffer[2][52][64];
+ uint32_t (*dequant4_coeff[6])[16];
+ uint32_t (*dequant8_coeff[2])[64];
+ int dequant_coeff_pps; ///< reinit tables when pps changes
+
+ int slice_num;
+ uint16_t *slice_table_base;
+ uint16_t *slice_table; ///< slice_table_base + 2*mb_stride + 1
+ int slice_type;
+ int slice_type_nos; ///< S free slice type (SI/SP are remapped to I/P)
+ int slice_type_fixed;
+
+ //interlacing specific flags
+ int mb_aff_frame;
+ int mb_field_decoding_flag;
+ int mb_mbaff; ///< mb_aff_frame && mb_field_decoding_flag
+
+ DECLARE_ALIGNED_8(uint16_t, sub_mb_type)[4];
+
+ //POC stuff
+ int poc_lsb;
+ int poc_msb;
+ int delta_poc_bottom;
+ int delta_poc[2];
+ int frame_num;
+ int prev_poc_msb; ///< poc_msb of the last reference pic for POC type 0
+ int prev_poc_lsb; ///< poc_lsb of the last reference pic for POC type 0
+ int frame_num_offset; ///< for POC type 2
+ int prev_frame_num_offset; ///< for POC type 2
+ int prev_frame_num; ///< frame_num of the last pic for POC type 1/2
+
+ /**
+ * frame_num for frames or 2*frame_num+1 for field pics.
+ */
+ int curr_pic_num;
+
+ /**
+ * max_frame_num or 2*max_frame_num for field pics.
+ */
+ int max_pic_num;
+
+ //Weighted pred stuff
+ int use_weight;
+ int use_weight_chroma;
+ int luma_log2_weight_denom;
+ int chroma_log2_weight_denom;
+ int luma_weight[2][48];
+ int luma_offset[2][48];
+ int chroma_weight[2][48][2];
+ int chroma_offset[2][48][2];
+ int implicit_weight[48][48];
+
+ //deblock
+ int deblocking_filter; ///< disable_deblocking_filter_idc with 1<->0
+ int slice_alpha_c0_offset;
+ int slice_beta_offset;
+
+ int redundant_pic_count;
+
+ int direct_spatial_mv_pred;
+ int dist_scale_factor[16];
+ int dist_scale_factor_field[2][32];
+ int map_col_to_list0[2][16+32];
+ int map_col_to_list0_field[2][2][16+32];
+
+ /**
+ * num_ref_idx_l0/1_active_minus1 + 1
+ */
+ unsigned int list_count;
+ uint8_t *list_counts; ///< Array of list_count per MB specifying the slice type
+ unsigned int ref_count[2]; ///< counts frames or fields, depending on current mb mode
+ Picture *short_ref[32];
+ Picture *long_ref[32];
+ Picture default_ref_list[2][32]; ///< base reference list for all slices of a coded picture
+ Picture ref_list[2][48]; /**< 0..15: frame refs, 16..47: mbaff field refs.
+ Reordered version of default_ref_list
+ according to picture reordering in slice header */
+ int ref2frm[MAX_SLICES][2][64]; ///< reference to frame number lists, used in the loop filter, the first 2 are for -2,-1
+ Picture *delayed_pic[MAX_DELAYED_PIC_COUNT+2]; //FIXME size?
+ int outputed_poc;
+
+ /**
+ * memory management control operations buffer.
+ */
+ MMCO mmco[MAX_MMCO_COUNT];
+ int mmco_index;
+
+ int long_ref_count; ///< number of actual long term references
+ int short_ref_count; ///< number of actual short term references
+
+ //data partitioning
+ GetBitContext intra_gb;
+ GetBitContext inter_gb;
+ GetBitContext *intra_gb_ptr;
+ GetBitContext *inter_gb_ptr;
+
+ DECLARE_ALIGNED_16(DCTELEM, mb)[16*24];
+ DCTELEM mb_padding[256]; ///< as mb is addressed by scantable[i] and scantable is uint8_t we can either check that i is not too large or ensure that there is some unused stuff after mb
+
+ /**
+ * Cabac
+ */
+ CABACContext cabac;
+ uint8_t cabac_state[460];
+ int cabac_init_idc;
+
+ /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0,1,2), 0x0? luma_cbp */
+ uint16_t *cbp_table;
+ int cbp;
+ int top_cbp;
+ int left_cbp;
+ /* chroma_pred_mode for i4x4 or i16x16, else 0 */
+ uint8_t *chroma_pred_mode_table;
+ int last_qscale_diff;
+ int16_t (*mvd_table[2])[2];
+ DECLARE_ALIGNED_16(int16_t, mvd_cache)[2][5*8][2];
+ uint8_t *direct_table;
+ uint8_t direct_cache[5*8];
+
+ uint8_t zigzag_scan[16];
+ uint8_t zigzag_scan8x8[64];
+ uint8_t zigzag_scan8x8_cavlc[64];
+ uint8_t field_scan[16];
+ uint8_t field_scan8x8[64];
+ uint8_t field_scan8x8_cavlc[64];
+ const uint8_t *zigzag_scan_q0;
+ const uint8_t *zigzag_scan8x8_q0;
+ const uint8_t *zigzag_scan8x8_cavlc_q0;
+ const uint8_t *field_scan_q0;
+ const uint8_t *field_scan8x8_q0;
+ const uint8_t *field_scan8x8_cavlc_q0;
+
+ int x264_build;
+
+ /**
+ * @defgroup multithreading Members for slice based multithreading
+ * @{
+ */
+ struct H264Context *thread_context[MAX_THREADS];
+
+ /**
+ * current slice number, used to initalize slice_num of each thread/context
+ */
+ int current_slice;
+
+ /**
+ * Max number of threads / contexts.
+ * This is equal to AVCodecContext.thread_count unless
+ * multithreaded decoding is impossible, in which case it is
+ * reduced to 1.
+ */
+ int max_contexts;
+
+ /**
+ * 1 if the single thread fallback warning has already been
+ * displayed, 0 otherwise.
+ */
+ int single_decode_warning;
+
+ int last_slice_type;
+ /** @} */
+
+ int mb_xy;
+
+ uint32_t svq3_watermark_key;
+
+ /**
+ * pic_struct in picture timing SEI message
+ */
+ SEI_PicStructType sei_pic_struct;
+
+ /**
+ * Complement sei_pic_struct
+ * SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced frames.
+ * However, soft telecined frames may have these values.
+ * This is used in an attempt to flag soft telecine progressive.
+ */
+ int prev_interlaced_frame;
+
+ /**
+ * Bit set of clock types for fields/frames in picture timing SEI message.
+ * For each found ct_type, appropriate bit is set (e.g., bit 1 for
+ * interlaced).
+ */
+ int sei_ct_type;
+
+ /**
+ * dpb_output_delay in picture timing SEI message, see H.264 C.2.2
+ */
+ int sei_dpb_output_delay;
+
+ /**
+ * cpb_removal_delay in picture timing SEI message, see H.264 C.1.2
+ */
+ int sei_cpb_removal_delay;
+
+ /**
+ * recovery_frame_cnt from SEI message
+ *
+ * Set to -1 if no recovery point SEI message found or to number of frames
+ * before playback synchronizes. Frames having recovery point are key
+ * frames.
+ */
+ int sei_recovery_frame_cnt;
+
+ int is_complex;
+
+ int luma_weight_flag[2]; ///< 7.4.3.2 luma_weight_lX_flag
+ int chroma_weight_flag[2]; ///< 7.4.3.2 chroma_weight_lX_flag
+
+ // Timestamp stuff
+ int sei_buffering_period_present; ///< Buffering period SEI flag
+ int initial_cpb_removal_delay[32]; ///< Initial timestamps for CPBs
+
+#if ENABLE_SLICE_MT_PATCH
+ /* experimental */
+ int phaze;
+ int todecode;
+ H264mb *blocks[2];
+#endif
+
+ /* ffdshow custom stuff */
+ int has_to_drop_first_non_ref; // Workaround Haali's media splitter (http://forum.doom9.org/showthread.php?p=1226434#post1226434)
+
+ // ==> Start patch MPC
+ int sp_for_switch_flag;
+ int slice_qs_delta;
+ int slice_qp_delta;
+ unsigned int first_mb_in_slice;
+ int bit_offset_to_slice_data;
+ int raw_slice_type;
+ int64_t outputed_rtstart;
+ void* dxva_slice_long;
+ int ref_pic_flag;
+ // <== End patch MPC
+}H264Context;
+
+
+extern const uint8_t ff_h264_chroma_qp[52];
+
+void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp);
+
+void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc);
+
+/**
+ * Decode SEI
+ */
+int ff_h264_decode_sei(H264Context *h);
+
+/**
+ * Decode SPS
+ */
+int ff_h264_decode_seq_parameter_set(H264Context *h);
+
+/**
+ * Decode PPS
+ */
+int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length);
+
+/**
+ * Decodes a network abstraction layer unit.
+ * @param consumed is the number of bytes used as input
+ * @param length is the length of the array
+ * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp tailing?
+ * @returns decoded bytes, might be src+1 if no escapes
+ */
+const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length);
+
+/**
+ * identifies the exact end of the bitstream
+ * @return the length of the trailing, or 0 if damaged
+ */
+int ff_h264_decode_rbsp_trailing(H264Context *h, const uint8_t *src);
+
+/**
+ * frees any data that may have been allocated in the H264 context like SPS, PPS etc.
+ */
+av_cold void ff_h264_free_context(H264Context *h);
+
+/**
+ * reconstructs bitstream slice_type.
+ */
+int ff_h264_get_slice_type(const H264Context *h);
+
+/**
+ * allocates tables.
+ * needs width/height
+ */
+int ff_h264_alloc_tables(H264Context *h);
+
+/**
+ * fills the default_ref_list.
+ */
+int ff_h264_fill_default_ref_list(H264Context *h);
+
+int ff_h264_decode_ref_pic_list_reordering(H264Context *h);
+void ff_h264_fill_mbaff_ref_list(H264Context *h);
+void ff_h264_remove_all_refs(H264Context *h);
+
+/**
+ * Executes the reference picture marking (memory management control operations).
+ */
+int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count);
+
+int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb);
+
+
+/**
+ * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
+ */
+int ff_h264_check_intra4x4_pred_mode(H264Context *h);
+
+/**
+ * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
+ */
+int ff_h264_check_intra_pred_mode(H264Context *h, int mode);
+
+void ff_h264_write_back_intra_pred_mode(H264Context *h);
+void ff_h264_hl_decode_mb(H264Context *h);
+int ff_h264_frame_start(H264Context *h);
+av_cold int ff_h264_decode_init(AVCodecContext *avctx);
+av_cold int ff_h264_decode_end(AVCodecContext *avctx);
+av_cold void ff_h264_decode_init_vlc(void);
+
+/**
+ * decodes a macroblock
+ * @returns 0 if OK, AC_ERROR / DC_ERROR / MV_ERROR if an error is noticed
+ */
+int ff_h264_decode_mb_cavlc(H264Context *h);
+
+/**
+ * decodes a CABAC coded macroblock
+ * @returns 0 if OK, AC_ERROR / DC_ERROR / MV_ERROR if an error is noticed
+ */
+int ff_h264_decode_mb_cabac(H264Context *h);
+
+void ff_h264_init_cabac_states(H264Context *h);
+
+void ff_h264_direct_dist_scale_factor(H264Context * const h);
+void ff_h264_direct_ref_list_init(H264Context * const h);
+void ff_h264_pred_direct_motion(H264Context * const h, int *mb_type);
+
+void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
+void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
+
+/**
+ * Reset SEI values at the beginning of the frame.
+ *
+ * @param h H.264 context.
+ */
+void ff_h264_reset_sei(H264Context *h);
+
+
+/*
+o-o o-o
+ / / /
+o-o o-o
+ ,---'
+o-o o-o
+ / / /
+o-o o-o
+*/
+//This table must be here because scan8[constant] must be known at compiletime
+static const uint8_t scan8[16 + 2*4]={
+ 4+1*8, 5+1*8, 4+2*8, 5+2*8,
+ 6+1*8, 7+1*8, 6+2*8, 7+2*8,
+ 4+3*8, 5+3*8, 4+4*8, 5+4*8,
+ 6+3*8, 7+3*8, 6+4*8, 7+4*8,
+ 1+1*8, 2+1*8,
+ 1+2*8, 2+2*8,
+ 1+4*8, 2+4*8,
+ 1+5*8, 2+5*8,
+};
+
+static av_always_inline uint32_t pack16to32(int a, int b){
+#if HAVE_BIGENDIAN
+ return (b&0xFFFF) + (a<<16);
+#else
+ return (a&0xFFFF) + (b<<16);
+#endif
+}
+
+/**
+ * gets the chroma qp.
+ */
+static inline int get_chroma_qp(H264Context *h, int t, int qscale){
+ return h->pps.chroma_qp_table[t][qscale];
+}
+
+static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my);
+
+static void fill_decode_caches(H264Context *h, int mb_type){
+ MpegEncContext * const s = &h->s;
+ const int mb_xy= h->mb_xy;
+ int topleft_xy, top_xy, topright_xy, left_xy[2];
+ int topleft_type, top_type, topright_type, left_type[2];
+ const uint8_t * left_block;
+ int topleft_partition= -1;
+ int i;
+ static const uint8_t left_block_options[4][16]={
+ {0,1,2,3,7,10,8,11,7+0*8, 7+1*8, 7+2*8, 7+3*8, 2+0*8, 2+3*8, 2+1*8, 2+2*8},
+ {2,2,3,3,8,11,8,11,7+2*8, 7+2*8, 7+3*8, 7+3*8, 2+1*8, 2+2*8, 2+1*8, 2+2*8},
+ {0,0,1,1,7,10,7,10,7+0*8, 7+0*8, 7+1*8, 7+1*8, 2+0*8, 2+3*8, 2+0*8, 2+3*8},
+ {0,2,0,2,7,10,7,10,7+0*8, 7+2*8, 7+0*8, 7+2*8, 2+0*8, 2+3*8, 2+0*8, 2+3*8}
+ };
+
+ top_xy = mb_xy - (s->mb_stride << MB_FIELD);
+
+ /* Wow, what a mess, why didn't they simplify the interlacing & intra
+ * stuff, I can't imagine that these complex rules are worth it. */
+
+ topleft_xy = top_xy - 1;
+ topright_xy= top_xy + 1;
+ left_xy[1] = left_xy[0] = mb_xy-1;
+ left_block = left_block_options[0];
+ if(FRAME_MBAFF){
+ const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]);
+ const int curr_mb_field_flag = IS_INTERLACED(mb_type);
+ if(s->mb_y&1){
+ if (left_mb_field_flag != curr_mb_field_flag) {
+ left_xy[1] = left_xy[0] = mb_xy - s->mb_stride - 1;
+ if (curr_mb_field_flag) {
+ left_xy[1] += s->mb_stride;
+ left_block = left_block_options[3];
+ } else {
+ topleft_xy += s->mb_stride;
+ // take top left mv from the middle of the mb, as opposed to all other modes which use the bottom right partition
+ topleft_partition = 0;
+ left_block = left_block_options[1];
+ }
+ }
+ }else{
+ if(curr_mb_field_flag){
+ topleft_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy - 1]>>7)&1)-1);
+ topright_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy + 1]>>7)&1)-1);
+ top_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy ]>>7)&1)-1);
+ }
+ if (left_mb_field_flag != curr_mb_field_flag) {
+ left_xy[1] = left_xy[0] = mb_xy - 1;
+ if (curr_mb_field_flag) {
+ left_xy[1] += s->mb_stride;
+ left_block = left_block_options[3];
+ } else {
+ left_block = left_block_options[2];
+ }
+ }
+ }
+ }
+
+ h->top_mb_xy = top_xy;
+ h->left_mb_xy[0] = left_xy[0];
+ h->left_mb_xy[1] = left_xy[1];
+ topleft_type = h->slice_table[topleft_xy ] == h->slice_num ? s->current_picture.mb_type[topleft_xy] : 0;
+ top_type = h->slice_table[top_xy ] == h->slice_num ? s->current_picture.mb_type[top_xy] : 0;
+ topright_type= h->slice_table[topright_xy] == h->slice_num ? s->current_picture.mb_type[topright_xy]: 0;
+ left_type[0] = h->slice_table[left_xy[0] ] == h->slice_num ? s->current_picture.mb_type[left_xy[0]] : 0;
+ left_type[1] = h->slice_table[left_xy[1] ] == h->slice_num ? s->current_picture.mb_type[left_xy[1]] : 0;
+
+ if(IS_INTRA(mb_type)){
+ int type_mask= h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1;
+ h->topleft_samples_available=
+ h->top_samples_available=
+ h->left_samples_available= 0xFFFF;
+ h->topright_samples_available= 0xEEEA;
+
+ if(!(top_type & type_mask)){
+ h->topleft_samples_available= 0xB3FF;
+ h->top_samples_available= 0x33FF;
+ h->topright_samples_available= 0x26EA;
+ }
+ if(IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[0])){
+ if(IS_INTERLACED(mb_type)){
+ if(!(left_type[0] & type_mask)){
+ h->topleft_samples_available&= 0xDFFF;
+ h->left_samples_available&= 0x5FFF;
+ }
+ if(!(left_type[1] & type_mask)){
+ h->topleft_samples_available&= 0xFF5F;
+ h->left_samples_available&= 0xFF5F;
+ }
+ }else{
+ int left_typei = h->slice_table[left_xy[0] + s->mb_stride ] == h->slice_num
+ ? s->current_picture.mb_type[left_xy[0] + s->mb_stride] : 0;
+ assert(left_xy[0] == left_xy[1]);
+ if(!((left_typei & type_mask) && (left_type[0] & type_mask))){
+ h->topleft_samples_available&= 0xDF5F;
+ h->left_samples_available&= 0x5F5F;
+ }
+ }
+ }else{
+ if(!(left_type[0] & type_mask)){
+ h->topleft_samples_available&= 0xDF5F;
+ h->left_samples_available&= 0x5F5F;
+ }
+ }
+
+ if(!(topleft_type & type_mask))
+ h->topleft_samples_available&= 0x7FFF;
+
+ if(!(topright_type & type_mask))
+ h->topright_samples_available&= 0xFBFF;
+
+ if(IS_INTRA4x4(mb_type)){
+ if(IS_INTRA4x4(top_type)){
+ h->intra4x4_pred_mode_cache[4+8*0]= h->intra4x4_pred_mode[top_xy][4];
+ h->intra4x4_pred_mode_cache[5+8*0]= h->intra4x4_pred_mode[top_xy][5];
+ h->intra4x4_pred_mode_cache[6+8*0]= h->intra4x4_pred_mode[top_xy][6];
+ h->intra4x4_pred_mode_cache[7+8*0]= h->intra4x4_pred_mode[top_xy][3];
+ }else{
+ int pred;
+ if(!(top_type & type_mask))
+ pred= -1;
+ else{
+ pred= 2;
+ }
+ h->intra4x4_pred_mode_cache[4+8*0]=
+ h->intra4x4_pred_mode_cache[5+8*0]=
+ h->intra4x4_pred_mode_cache[6+8*0]=
+ h->intra4x4_pred_mode_cache[7+8*0]= pred;
+ }
+ for(i=0; i<2; i++){
+ if(IS_INTRA4x4(left_type[i])){
+ h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[0+2*i]];
+ h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[1+2*i]];
+ }else{
+ int pred;
+ if(!(left_type[i] & type_mask))
+ pred= -1;
+ else{
+ pred= 2;
+ }
+ h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]=
+ h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= pred;
+ }
+ }
+ }
+ }
+
+
+/*
+0 . T T. T T T T
+1 L . .L . . . .
+2 L . .L . . . .
+3 . T TL . . . .
+4 L . .L . . . .
+5 L . .. . . . .
+*/
+//FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec)
+ if(top_type){
+ *(uint32_t*)&h->non_zero_count_cache[4+8*0]= *(uint32_t*)&h->non_zero_count[top_xy][4+3*8];
+ h->non_zero_count_cache[1+8*0]= h->non_zero_count[top_xy][1+1*8];
+ h->non_zero_count_cache[2+8*0]= h->non_zero_count[top_xy][2+1*8];
+
+ h->non_zero_count_cache[1+8*3]= h->non_zero_count[top_xy][1+2*8];
+ h->non_zero_count_cache[2+8*3]= h->non_zero_count[top_xy][2+2*8];
+ }else {
+ h->non_zero_count_cache[1+8*0]=
+ h->non_zero_count_cache[2+8*0]=
+
+ h->non_zero_count_cache[1+8*3]=
+ h->non_zero_count_cache[2+8*3]=
+ *(uint32_t*)&h->non_zero_count_cache[4+8*0]= CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040;
+ }
+
+ for (i=0; i<2; i++) {
+ if(left_type[i]){
+ h->non_zero_count_cache[3+8*1 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[8+0+2*i]];
+ h->non_zero_count_cache[3+8*2 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[8+1+2*i]];
+ h->non_zero_count_cache[0+8*1 + 8*i]= h->non_zero_count[left_xy[i]][left_block[8+4+2*i]];
+ h->non_zero_count_cache[0+8*4 + 8*i]= h->non_zero_count[left_xy[i]][left_block[8+5+2*i]];
+ }else{
+ h->non_zero_count_cache[3+8*1 + 2*8*i]=
+ h->non_zero_count_cache[3+8*2 + 2*8*i]=
+ h->non_zero_count_cache[0+8*1 + 8*i]=
+ h->non_zero_count_cache[0+8*4 + 8*i]= CABAC && !IS_INTRA(mb_type) ? 0 : 64;
+ }
+ }
+
+ if( CABAC ) {
+ // top_cbp
+ if(top_type) {
+ h->top_cbp = h->cbp_table[top_xy];
+ } else if(IS_INTRA(mb_type)) {
+ h->top_cbp = 0x1C0;
+ } else {
+ h->top_cbp = 0;
+ }
+ // left_cbp
+ if (left_type[0]) {
+ h->left_cbp = h->cbp_table[left_xy[0]] & 0x1f0;
+ } else if(IS_INTRA(mb_type)) {
+ h->left_cbp = 0x1C0;
+ } else {
+ h->left_cbp = 0;
+ }
+ if (left_type[0]) {
+ h->left_cbp |= ((h->cbp_table[left_xy[0]]>>((left_block[0]&(~1))+1))&0x1) << 1;
+ }
+ if (left_type[1]) {
+ h->left_cbp |= ((h->cbp_table[left_xy[1]]>>((left_block[2]&(~1))+1))&0x1) << 3;
+ }
+ }
+
+#if 1
+ if(IS_INTER(mb_type) || IS_DIRECT(mb_type)){
+ int list;
+ for(list=0; list<h->list_count; list++){
+ if(!USES_LIST(mb_type, list) && !IS_DIRECT(mb_type)){
+ /*if(!h->mv_cache_clean[list]){
+ memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all?
+ memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t));
+ h->mv_cache_clean[list]= 1;
+ }*/
+ continue;
+ }
+ h->mv_cache_clean[list]= 0;
+
+ if(USES_LIST(top_type, list)){
+ const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
+ const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride;
+ AV_COPY128(h->mv_cache[list][scan8[0] + 0 - 1*8], s->current_picture.motion_val[list][b_xy + 0]);
+ h->ref_cache[list][scan8[0] + 0 - 1*8]=
+ h->ref_cache[list][scan8[0] + 1 - 1*8]= s->current_picture.ref_index[list][b8_xy + 0];
+ h->ref_cache[list][scan8[0] + 2 - 1*8]=
+ h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][b8_xy + 1];
+ }else{
+ AV_ZERO128(h->mv_cache[list][scan8[0] + 0 - 1*8]);
+ *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101;
+ }
+
+ for(i=0; i<2; i++){
+ int cache_idx = scan8[0] - 1 + i*2*8;
+ if(USES_LIST(left_type[i], list)){
+ const int b_xy= h->mb2b_xy[left_xy[i]] + 3;
+ const int b8_xy= h->mb2b8_xy[left_xy[i]] + 1;
+ *(uint32_t*)h->mv_cache[list][cache_idx ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0+i*2]];
+ *(uint32_t*)h->mv_cache[list][cache_idx+8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1+i*2]];
+ h->ref_cache[list][cache_idx ]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0+i*2]>>1)];
+ h->ref_cache[list][cache_idx+8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[1+i*2]>>1)];
+ }else{
+ *(uint32_t*)h->mv_cache [list][cache_idx ]=
+ *(uint32_t*)h->mv_cache [list][cache_idx+8]= 0;
+ h->ref_cache[list][cache_idx ]=
+ h->ref_cache[list][cache_idx+8]= (left_type[i]) ? LIST_NOT_USED : PART_NOT_AVAILABLE;
+ }
+ }
+
+ if((IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred) && !FRAME_MBAFF)
+ continue;
+
+ if(USES_LIST(topleft_type, list)){
+ const int b_xy = h->mb2b_xy[topleft_xy] + 3 + h->b_stride + (topleft_partition & 2*h->b_stride);
+ const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + (topleft_partition & h->b8_stride);
+ *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
+ h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy];
+ }else{
+ *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0;
+ h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
+ }
+
+ if(USES_LIST(topright_type, list)){
+ const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride;
+ const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride;
+ *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
+ h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy];
+ }else{
+ *(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0;
+ h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
+ }
+
+ if((IS_SKIP(mb_type) || IS_DIRECT(mb_type)) && !FRAME_MBAFF)
+ continue;
+
+ h->ref_cache[list][scan8[5 ]+1] =
+ h->ref_cache[list][scan8[7 ]+1] =
+ h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewhere else)
+ h->ref_cache[list][scan8[4 ]] =
+ h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE;
+ *(uint32_t*)h->mv_cache [list][scan8[5 ]+1]=
+ *(uint32_t*)h->mv_cache [list][scan8[7 ]+1]=
+ *(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else)
+ *(uint32_t*)h->mv_cache [list][scan8[4 ]]=
+ *(uint32_t*)h->mv_cache [list][scan8[12]]= 0;
+
+ if( CABAC ) {
+ /* XXX beurk, Load mvd */
+ if(USES_LIST(top_type, list)){
+ const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
+ AV_COPY128(h->mvd_cache[list][scan8[0] + 0 - 1*8], h->mvd_table[list][b_xy + 0]);
+ }else{
+ AV_ZERO128(h->mvd_cache[list][scan8[0] + 0 - 1*8]);
+ }
+ if(USES_LIST(left_type[0], list)){
+ const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
+ *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[0]];
+ *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[1]];
+ }else{
+ *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 0*8]=
+ *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 1*8]= 0;
+ }
+ if(USES_LIST(left_type[1], list)){
+ const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
+ *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[2]];
+ *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[3]];
+ }else{
+ *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 2*8]=
+ *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 3*8]= 0;
+ }
+ *(uint32_t*)h->mvd_cache [list][scan8[5 ]+1]=
+ *(uint32_t*)h->mvd_cache [list][scan8[7 ]+1]=
+ *(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else)
+ *(uint32_t*)h->mvd_cache [list][scan8[4 ]]=
+ *(uint32_t*)h->mvd_cache [list][scan8[12]]= 0;
+
+ if(h->slice_type_nos == FF_B_TYPE){
+ fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, 0, 1);
+
+ if(IS_DIRECT(top_type)){
+ *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0x01010101;
+ }else if(IS_8X8(top_type)){
+ int b8_xy = h->mb2b8_xy[top_xy] + h->b8_stride;
+ h->direct_cache[scan8[0] + 0 - 1*8]= h->direct_table[b8_xy];
+ h->direct_cache[scan8[0] + 2 - 1*8]= h->direct_table[b8_xy + 1];
+ }else{
+ *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0;
+ }
+
+ if(IS_DIRECT(left_type[0]))
+ h->direct_cache[scan8[0] - 1 + 0*8]= 1;
+ else if(IS_8X8(left_type[0]))
+ h->direct_cache[scan8[0] - 1 + 0*8]= h->direct_table[h->mb2b8_xy[left_xy[0]] + 1 + h->b8_stride*(left_block[0]>>1)];
+ else
+ h->direct_cache[scan8[0] - 1 + 0*8]= 0;
+
+ if(IS_DIRECT(left_type[1]))
+ h->direct_cache[scan8[0] - 1 + 2*8]= 1;
+ else if(IS_8X8(left_type[1]))
+ h->direct_cache[scan8[0] - 1 + 2*8]= h->direct_table[h->mb2b8_xy[left_xy[1]] + 1 + h->b8_stride*(left_block[2]>>1)];
+ else
+ h->direct_cache[scan8[0] - 1 + 2*8]= 0;
+ }
+ }
+
+ if(FRAME_MBAFF){
+#define MAP_MVS\
+ MAP_F2F(scan8[0] - 1 - 1*8, topleft_type)\
+ MAP_F2F(scan8[0] + 0 - 1*8, top_type)\
+ MAP_F2F(scan8[0] + 1 - 1*8, top_type)\
+ MAP_F2F(scan8[0] + 2 - 1*8, top_type)\
+ MAP_F2F(scan8[0] + 3 - 1*8, top_type)\
+ MAP_F2F(scan8[0] + 4 - 1*8, topright_type)\
+ MAP_F2F(scan8[0] - 1 + 0*8, left_type[0])\
+ MAP_F2F(scan8[0] - 1 + 1*8, left_type[0])\
+ MAP_F2F(scan8[0] - 1 + 2*8, left_type[1])\
+ MAP_F2F(scan8[0] - 1 + 3*8, left_type[1])
+ if(MB_FIELD){
+#define MAP_F2F(idx, mb_type)\
+ if(!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
+ h->ref_cache[list][idx] <<= 1;\
+ h->mv_cache[list][idx][1] /= 2;\
+ h->mvd_cache[list][idx][1] /= 2;\
+ }
+ MAP_MVS
+#undef MAP_F2F
+ }else{
+#define MAP_F2F(idx, mb_type)\
+ if(IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
+ h->ref_cache[list][idx] >>= 1;\
+ h->mv_cache[list][idx][1] <<= 1;\
+ h->mvd_cache[list][idx][1] <<= 1;\
+ }
+ MAP_MVS
+#undef MAP_F2F
+ }
+ }
+ }
+ }
+#endif
+
+ h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[0]);
+}
+
+/**
+ *
+ * @returns non zero if the loop filter can be skiped
+ */
+static int fill_filter_caches(H264Context *h, int mb_type){
+ MpegEncContext * const s = &h->s;
+ const int mb_xy= h->mb_xy;
+ int top_xy, left_xy[2];
+ int top_type, left_type[2];
+ int i;
+
+ top_xy = mb_xy - (s->mb_stride << MB_FIELD);
+
+ //FIXME deblocking could skip the intra and nnz parts.
+
+ /* Wow, what a mess, why didn't they simplify the interlacing & intra
+ * stuff, I can't imagine that these complex rules are worth it. */
+
+ left_xy[1] = left_xy[0] = mb_xy-1;
+ if(FRAME_MBAFF){
+ const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]);
+ const int curr_mb_field_flag = IS_INTERLACED(mb_type);
+ if(s->mb_y&1){
+ if (left_mb_field_flag != curr_mb_field_flag) {
+ left_xy[0] -= s->mb_stride;
+ }
+ }else{
+ if(curr_mb_field_flag){
+ top_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy ]>>7)&1)-1);
+ }
+ if (left_mb_field_flag != curr_mb_field_flag) {
+ left_xy[1] += s->mb_stride;
+ }
+ }
+ }
+
+ h->top_mb_xy = top_xy;
+ h->left_mb_xy[0] = left_xy[0];
+ h->left_mb_xy[1] = left_xy[1];
+ {
+ //for sufficiently low qp, filtering wouldn't do anything
+ //this is a conservative estimate: could also check beta_offset and more accurate chroma_qp
+ int qp_thresh = h->qp_thresh; //FIXME strictly we should store qp_thresh for each mb of a slice
+ int qp = s->current_picture.qscale_table[mb_xy];
+ if(qp <= qp_thresh
+ && (left_xy[0]<0 || ((qp + s->current_picture.qscale_table[left_xy[0]] + 1)>>1) <= qp_thresh)
+ && (top_xy < 0 || ((qp + s->current_picture.qscale_table[top_xy ] + 1)>>1) <= qp_thresh)){
+ if(!FRAME_MBAFF)
+ return 1;
+ if( (left_xy[0]< 0 || ((qp + s->current_picture.qscale_table[left_xy[1] ] + 1)>>1) <= qp_thresh)
+ && (top_xy < s->mb_stride || ((qp + s->current_picture.qscale_table[top_xy -s->mb_stride] + 1)>>1) <= qp_thresh))
+ return 1;
+ }
+ }
+
+ if(h->deblocking_filter == 2){
+ h->top_type = top_type = h->slice_table[top_xy ] == h->slice_num ? s->current_picture.mb_type[top_xy] : 0;
+ h->left_type[0]= left_type[0] = h->slice_table[left_xy[0] ] == h->slice_num ? s->current_picture.mb_type[left_xy[0]] : 0;
+ h->left_type[1]= left_type[1] = h->slice_table[left_xy[1] ] == h->slice_num ? s->current_picture.mb_type[left_xy[1]] : 0;
+ }else{
+ h->top_type = top_type = h->slice_table[top_xy ] < 0xFFFF ? s->current_picture.mb_type[top_xy] : 0;
+ h->left_type[0]= left_type[0] = h->slice_table[left_xy[0] ] < 0xFFFF ? s->current_picture.mb_type[left_xy[0]] : 0;
+ h->left_type[1]= left_type[1] = h->slice_table[left_xy[1] ] < 0xFFFF ? s->current_picture.mb_type[left_xy[1]] : 0;
+ }
+ if(IS_INTRA(mb_type))
+ return 0;
+
+ AV_COPY64(&h->non_zero_count_cache[0+8*1], &h->non_zero_count[mb_xy][ 0]);
+ AV_COPY64(&h->non_zero_count_cache[0+8*2], &h->non_zero_count[mb_xy][ 8]);
+ *((uint32_t*)&h->non_zero_count_cache[0+8*5])= *((uint32_t*)&h->non_zero_count[mb_xy][16]);
+ *((uint32_t*)&h->non_zero_count_cache[4+8*3])= *((uint32_t*)&h->non_zero_count[mb_xy][20]);
+ AV_COPY64(&h->non_zero_count_cache[0+8*4], &h->non_zero_count[mb_xy][24]);
+
+ h->cbp= h->cbp_table[mb_xy];
+
+ {
+ int list;
+ for(list=0; list<h->list_count; list++){
+ int8_t *ref;
+ int y, b_stride;
+ int16_t (*mv_dst)[2];
+ int16_t (*mv_src)[2];
+
+ if(!USES_LIST(mb_type, list)){
+ fill_rectangle( h->mv_cache[list][scan8[0]], 4, 4, 8, pack16to32(0,0), 4);
+ *(uint32_t*)&h->ref_cache[list][scan8[ 0]] =
+ *(uint32_t*)&h->ref_cache[list][scan8[ 2]] =
+ *(uint32_t*)&h->ref_cache[list][scan8[ 8]] =
+ *(uint32_t*)&h->ref_cache[list][scan8[10]] = ((LIST_NOT_USED)&0xFF)*0x01010101;
+ continue;
+ }
+
+ ref = &s->current_picture.ref_index[list][h->mb2b8_xy[mb_xy]];
+ {
+ int (*ref2frm)[64] = h->ref2frm[ h->slice_num&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
+ *(uint32_t*)&h->ref_cache[list][scan8[ 0]] =
+ *(uint32_t*)&h->ref_cache[list][scan8[ 2]] = (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101;
+ ref += h->b8_stride;
+ *(uint32_t*)&h->ref_cache[list][scan8[ 8]] =
+ *(uint32_t*)&h->ref_cache[list][scan8[10]] = (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101;
+ }
+
+ b_stride = h->b_stride;
+ mv_dst = &h->mv_cache[list][scan8[0]];
+ mv_src = &s->current_picture.motion_val[list][4*s->mb_x + 4*s->mb_y*b_stride];
+ for(y=0; y<4; y++){
+ AV_COPY128(mv_dst + 8*y, mv_src + y*b_stride);
+ }
+
+ }
+ }
+
+
+/*
+0 . T T. T T T T
+1 L . .L . . . .
+2 L . .L . . . .
+3 . T TL . . . .
+4 L . .L . . . .
+5 L . .. . . . .
+*/
+//FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec)
+ if(top_type){
+ *(uint32_t*)&h->non_zero_count_cache[4+8*0]= *(uint32_t*)&h->non_zero_count[top_xy][4+3*8];
+ }
+
+ if(left_type[0]){
+ h->non_zero_count_cache[3+8*1]= h->non_zero_count[left_xy[0]][7+0*8];
+ h->non_zero_count_cache[3+8*2]= h->non_zero_count[left_xy[0]][7+1*8];
+ h->non_zero_count_cache[3+8*3]= h->non_zero_count[left_xy[0]][7+2*8];
+ h->non_zero_count_cache[3+8*4]= h->non_zero_count[left_xy[0]][7+3*8];
+ }
+
+ // CAVLC 8x8dct requires NNZ values for residual decoding that differ from what the loop filter needs
+ if(!CABAC && h->pps.transform_8x8_mode){
+ if(IS_8x8DCT(top_type)){
+ h->non_zero_count_cache[4+8*0]=
+ h->non_zero_count_cache[5+8*0]= h->cbp_table[top_xy] & 4;
+ h->non_zero_count_cache[6+8*0]=
+ h->non_zero_count_cache[7+8*0]= h->cbp_table[top_xy] & 8;
+ }
+ if(IS_8x8DCT(left_type[0])){
+ h->non_zero_count_cache[3+8*1]=
+ h->non_zero_count_cache[3+8*2]= h->cbp_table[left_xy[0]]&2; //FIXME check MBAFF
+ }
+ if(IS_8x8DCT(left_type[1])){
+ h->non_zero_count_cache[3+8*3]=
+ h->non_zero_count_cache[3+8*4]= h->cbp_table[left_xy[1]]&8; //FIXME check MBAFF
+ }
+
+ if(IS_8x8DCT(mb_type)){
+ h->non_zero_count_cache[scan8[0 ]]= h->non_zero_count_cache[scan8[1 ]]=
+ h->non_zero_count_cache[scan8[2 ]]= h->non_zero_count_cache[scan8[3 ]]= h->cbp & 1;
+
+ h->non_zero_count_cache[scan8[0+ 4]]= h->non_zero_count_cache[scan8[1+ 4]]=
+ h->non_zero_count_cache[scan8[2+ 4]]= h->non_zero_count_cache[scan8[3+ 4]]= h->cbp & 2;
+
+ h->non_zero_count_cache[scan8[0+ 8]]= h->non_zero_count_cache[scan8[1+ 8]]=
+ h->non_zero_count_cache[scan8[2+ 8]]= h->non_zero_count_cache[scan8[3+ 8]]= h->cbp & 4;
+
+ h->non_zero_count_cache[scan8[0+12]]= h->non_zero_count_cache[scan8[1+12]]=
+ h->non_zero_count_cache[scan8[2+12]]= h->non_zero_count_cache[scan8[3+12]]= h->cbp & 8;
+ }
+ }
+
+ if(IS_INTER(mb_type) || IS_DIRECT(mb_type)){
+ int list;
+ for(list=0; list<h->list_count; list++){
+ if(USES_LIST(top_type, list)){
+ const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
+ const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride;
+ int (*ref2frm)[64] = h->ref2frm[ h->slice_table[top_xy]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
+ AV_COPY128(h->mv_cache[list][scan8[0] + 0 - 1*8], s->current_picture.motion_val[list][b_xy + 0]);
+ h->ref_cache[list][scan8[0] + 0 - 1*8]=
+ h->ref_cache[list][scan8[0] + 1 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 0]];
+ h->ref_cache[list][scan8[0] + 2 - 1*8]=
+ h->ref_cache[list][scan8[0] + 3 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 1]];
+ }else{
+ AV_ZERO128(h->mv_cache[list][scan8[0] + 0 - 1*8]);
+ *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((LIST_NOT_USED)&0xFF)*0x01010101;
+ }
+
+ if(!IS_INTERLACED(mb_type^left_type[0])){
+ if(USES_LIST(left_type[0], list)){
+ const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
+ const int b8_xy= h->mb2b8_xy[left_xy[0]] + 1;
+ int (*ref2frm)[64] = h->ref2frm[ h->slice_table[left_xy[0]]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
+ *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 0 ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*0];
+ *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 8 ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*1];
+ *(uint32_t*)h->mv_cache[list][scan8[0] - 1 +16 ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*2];
+ *(uint32_t*)h->mv_cache[list][scan8[0] - 1 +24 ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*3];
+ h->ref_cache[list][scan8[0] - 1 + 0 ]=
+ h->ref_cache[list][scan8[0] - 1 + 8 ]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + h->b8_stride*0]];
+ h->ref_cache[list][scan8[0] - 1 +16 ]=
+ h->ref_cache[list][scan8[0] - 1 +24 ]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + h->b8_stride*1]];
+ }else{
+ *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 0 ]=
+ *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 8 ]=
+ *(uint32_t*)h->mv_cache [list][scan8[0] - 1 +16 ]=
+ *(uint32_t*)h->mv_cache [list][scan8[0] - 1 +24 ]= 0;
+ h->ref_cache[list][scan8[0] - 1 + 0 ]=
+ h->ref_cache[list][scan8[0] - 1 + 8 ]=
+ h->ref_cache[list][scan8[0] - 1 + 16 ]=
+ h->ref_cache[list][scan8[0] - 1 + 24 ]= LIST_NOT_USED;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * gets the predicted intra4x4 prediction mode.
+ */
+static inline int pred_intra_mode(H264Context *h, int n){
+ const int index8= scan8[n];
+ const int left= h->intra4x4_pred_mode_cache[index8 - 1];
+ const int top = h->intra4x4_pred_mode_cache[index8 - 8];
+ const int min= FFMIN(left, top);
+
+ tprintf(h->s.avctx, "mode:%d %d min:%d\n", left ,top, min);
+
+ if(min<0) return DC_PRED;
+ else return min;
+}
+
+static inline void write_back_non_zero_count(H264Context *h){
+ const int mb_xy= h->mb_xy;
+
+ AV_COPY64(&h->non_zero_count[mb_xy][ 0], &h->non_zero_count_cache[0+8*1]);
+ AV_COPY64(&h->non_zero_count[mb_xy][ 8], &h->non_zero_count_cache[0+8*2]);
+ *((uint32_t*)&h->non_zero_count[mb_xy][16]) = *((uint32_t*)&h->non_zero_count_cache[0+8*5]);
+ *((uint32_t*)&h->non_zero_count[mb_xy][20]) = *((uint32_t*)&h->non_zero_count_cache[4+8*3]);
+ AV_COPY64(&h->non_zero_count[mb_xy][24], &h->non_zero_count_cache[0+8*4]);
+}
+
+static inline void write_back_motion(H264Context *h, int mb_type){
+ MpegEncContext * const s = &h->s;
+ const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
+ const int b8_xy= 2*s->mb_x + 2*s->mb_y*h->b8_stride;
+ int list;
+
+ if(!USES_LIST(mb_type, 0))
+ fill_rectangle(&s->current_picture.ref_index[0][b8_xy], 2, 2, h->b8_stride, (uint8_t)LIST_NOT_USED, 1);
+
+ for(list=0; list<h->list_count; list++){
+ int y, b_stride;
+ int16_t (*mv_dst)[2];
+ int16_t (*mv_src)[2];
+
+ if(!USES_LIST(mb_type, list))
+ continue;
+
+ b_stride = h->b_stride;
+ mv_dst = &s->current_picture.motion_val[list][b_xy];
+ mv_src = &h->mv_cache[list][scan8[0]];
+ for(y=0; y<4; y++){
+ AV_COPY128(mv_dst + y*b_stride, mv_src + 8*y);
+ }
+ if( CABAC ) {
+ int16_t (*mvd_dst)[2] = &h->mvd_table[list][b_xy];
+ int16_t (*mvd_src)[2] = &h->mvd_cache[list][scan8[0]];
+ if(IS_SKIP(mb_type))
+ fill_rectangle(mvd_dst, 4, 4, h->b_stride, 0, 4);
+ else
+ for(y=0; y<4; y++){
+ AV_COPY128(mvd_dst + y*b_stride, mvd_src + 8*y);
+ }
+ }
+
+ {
+ int8_t *ref_index = &s->current_picture.ref_index[list][b8_xy];
+ ref_index[0+0*h->b8_stride]= h->ref_cache[list][scan8[0]];
+ ref_index[1+0*h->b8_stride]= h->ref_cache[list][scan8[4]];
+ ref_index[0+1*h->b8_stride]= h->ref_cache[list][scan8[8]];
+ ref_index[1+1*h->b8_stride]= h->ref_cache[list][scan8[12]];
+ }
+ }
+
+ if(h->slice_type_nos == FF_B_TYPE && CABAC){
+ if(IS_8X8(mb_type)){
+ uint8_t *direct_table = &h->direct_table[b8_xy];
+ direct_table[1+0*h->b8_stride] = IS_DIRECT(h->sub_mb_type[1]) ? 1 : 0;
+ direct_table[0+1*h->b8_stride] = IS_DIRECT(h->sub_mb_type[2]) ? 1 : 0;
+ direct_table[1+1*h->b8_stride] = IS_DIRECT(h->sub_mb_type[3]) ? 1 : 0;
+ }
+ }
+}
+
+static inline int get_dct8x8_allowed(H264Context *h){
+ if(h->sps.direct_8x8_inference_flag)
+ return !(*(uint64_t*)h->sub_mb_type & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8 )*0x0001000100010001ULL));
+ else
+ return !(*(uint64_t*)h->sub_mb_type & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8|MB_TYPE_DIRECT2)*0x0001000100010001ULL));
+}
+
+static void predict_field_decoding_flag(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ const int mb_xy= h->mb_xy;
+ int mb_type = (h->slice_table[mb_xy-1] == h->slice_num)
+ ? s->current_picture.mb_type[mb_xy-1]
+ : (h->slice_table[mb_xy-s->mb_stride] == h->slice_num)
+ ? s->current_picture.mb_type[mb_xy-s->mb_stride]
+ : 0;
+ h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
+}
+
+/**
+ * decodes a P_SKIP or B_SKIP macroblock
+ */
+static void decode_mb_skip(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ const int mb_xy= h->mb_xy;
+ int mb_type=0;
+
+ memset(h->non_zero_count[mb_xy], 0, 32);
+ memset(h->non_zero_count_cache + 8, 0, 8*5); //FIXME ugly, remove pfui
+
+ if(MB_FIELD)
+ mb_type|= MB_TYPE_INTERLACED;
+
+ if( h->slice_type_nos == FF_B_TYPE )
+ {
+ // just for fill_caches. pred_direct_motion will set the real mb_type
+ mb_type|= MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2|MB_TYPE_SKIP;
+
+ fill_decode_caches(h, mb_type); //FIXME check what is needed and what not ...
+ ff_h264_pred_direct_motion(h, &mb_type);
+ mb_type|= MB_TYPE_SKIP;
+ }
+ else
+ {
+ int mx, my;
+ mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0|MB_TYPE_SKIP;
+
+ fill_decode_caches(h, mb_type); //FIXME check what is needed and what not ...
+ pred_pskip_motion(h, &mx, &my);
+ fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
+ fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx,my), 4);
+ }
+
+ write_back_motion(h, mb_type);
+ s->current_picture.mb_type[mb_xy]= mb_type;
+ s->current_picture.qscale_table[mb_xy]= s->qscale;
+ h->slice_table[ mb_xy ]= h->slice_num;
+ h->prev_mb_skipped= 1;
+}
+
+#include "h264_mvpred.h" //For pred_pskip_motion()
+
+#endif /* AVCODEC_H264_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_cabac.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_cabac.c
new file mode 100644
index 000000000..7b2c8e87c
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_cabac.c
@@ -0,0 +1,1788 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... cabac decoding
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264_cabac.c
+ * H.264 / AVC / MPEG4 part10 cabac decoding.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#define CABAC 1
+
+#include "internal.h"
+#include "dsputil.h"
+#include "avcodec.h"
+#include "h264.h"
+#include "h264data.h"
+#include "h264_mvpred.h"
+#include "golomb.h"
+
+#include "cabac.h"
+#if ARCH_X86
+#include "x86/h264_i386.h"
+#endif
+
+//#undef NDEBUG
+#include <assert.h>
+
+/* Cabac pre state table */
+
+static const int8_t cabac_context_init_I[460][2] =
+{
+ /* 0 - 10 */
+ { 20, -15 }, { 2, 54 }, { 3, 74 }, { 20, -15 },
+ { 2, 54 }, { 3, 74 }, { -28,127 }, { -23, 104 },
+ { -6, 53 }, { -1, 54 }, { 7, 51 },
+
+ /* 11 - 23 unsused for I */
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 },
+
+ /* 24- 39 */
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 },
+
+ /* 40 - 53 */
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 },
+
+ /* 54 - 59 */
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 },
+
+ /* 60 - 69 */
+ { 0, 41 }, { 0, 63 }, { 0, 63 }, { 0, 63 },
+ { -9, 83 }, { 4, 86 }, { 0, 97 }, { -7, 72 },
+ { 13, 41 }, { 3, 62 },
+
+ /* 70 -> 87 */
+ { 0, 11 }, { 1, 55 }, { 0, 69 }, { -17, 127 },
+ { -13, 102 },{ 0, 82 }, { -7, 74 }, { -21, 107 },
+ { -27, 127 },{ -31, 127 },{ -24, 127 }, { -18, 95 },
+ { -27, 127 },{ -21, 114 },{ -30, 127 }, { -17, 123 },
+ { -12, 115 },{ -16, 122 },
+
+ /* 88 -> 104 */
+ { -11, 115 },{ -12, 63 }, { -2, 68 }, { -15, 84 },
+ { -13, 104 },{ -3, 70 }, { -8, 93 }, { -10, 90 },
+ { -30, 127 },{ -1, 74 }, { -6, 97 }, { -7, 91 },
+ { -20, 127 },{ -4, 56 }, { -5, 82 }, { -7, 76 },
+ { -22, 125 },
+
+ /* 105 -> 135 */
+ { -7, 93 }, { -11, 87 }, { -3, 77 }, { -5, 71 },
+ { -4, 63 }, { -4, 68 }, { -12, 84 }, { -7, 62 },
+ { -7, 65 }, { 8, 61 }, { 5, 56 }, { -2, 66 },
+ { 1, 64 }, { 0, 61 }, { -2, 78 }, { 1, 50 },
+ { 7, 52 }, { 10, 35 }, { 0, 44 }, { 11, 38 },
+ { 1, 45 }, { 0, 46 }, { 5, 44 }, { 31, 17 },
+ { 1, 51 }, { 7, 50 }, { 28, 19 }, { 16, 33 },
+ { 14, 62 }, { -13, 108 },{ -15, 100 },
+
+ /* 136 -> 165 */
+ { -13, 101 },{ -13, 91 }, { -12, 94 }, { -10, 88 },
+ { -16, 84 }, { -10, 86 }, { -7, 83 }, { -13, 87 },
+ { -19, 94 }, { 1, 70 }, { 0, 72 }, { -5, 74 },
+ { 18, 59 }, { -8, 102 }, { -15, 100 }, { 0, 95 },
+ { -4, 75 }, { 2, 72 }, { -11, 75 }, { -3, 71 },
+ { 15, 46 }, { -13, 69 }, { 0, 62 }, { 0, 65 },
+ { 21, 37 }, { -15, 72 }, { 9, 57 }, { 16, 54 },
+ { 0, 62 }, { 12, 72 },
+
+ /* 166 -> 196 */
+ { 24, 0 }, { 15, 9 }, { 8, 25 }, { 13, 18 },
+ { 15, 9 }, { 13, 19 }, { 10, 37 }, { 12, 18 },
+ { 6, 29 }, { 20, 33 }, { 15, 30 }, { 4, 45 },
+ { 1, 58 }, { 0, 62 }, { 7, 61 }, { 12, 38 },
+ { 11, 45 }, { 15, 39 }, { 11, 42 }, { 13, 44 },
+ { 16, 45 }, { 12, 41 }, { 10, 49 }, { 30, 34 },
+ { 18, 42 }, { 10, 55 }, { 17, 51 }, { 17, 46 },
+ { 0, 89 }, { 26, -19 }, { 22, -17 },
+
+ /* 197 -> 226 */
+ { 26, -17 }, { 30, -25 }, { 28, -20 }, { 33, -23 },
+ { 37, -27 }, { 33, -23 }, { 40, -28 }, { 38, -17 },
+ { 33, -11 }, { 40, -15 }, { 41, -6 }, { 38, 1 },
+ { 41, 17 }, { 30, -6 }, { 27, 3 }, { 26, 22 },
+ { 37, -16 }, { 35, -4 }, { 38, -8 }, { 38, -3 },
+ { 37, 3 }, { 38, 5 }, { 42, 0 }, { 35, 16 },
+ { 39, 22 }, { 14, 48 }, { 27, 37 }, { 21, 60 },
+ { 12, 68 }, { 2, 97 },
+
+ /* 227 -> 251 */
+ { -3, 71 }, { -6, 42 }, { -5, 50 }, { -3, 54 },
+ { -2, 62 }, { 0, 58 }, { 1, 63 }, { -2, 72 },
+ { -1, 74 }, { -9, 91 }, { -5, 67 }, { -5, 27 },
+ { -3, 39 }, { -2, 44 }, { 0, 46 }, { -16, 64 },
+ { -8, 68 }, { -10, 78 }, { -6, 77 }, { -10, 86 },
+ { -12, 92 }, { -15, 55 }, { -10, 60 }, { -6, 62 },
+ { -4, 65 },
+
+ /* 252 -> 275 */
+ { -12, 73 }, { -8, 76 }, { -7, 80 }, { -9, 88 },
+ { -17, 110 },{ -11, 97 }, { -20, 84 }, { -11, 79 },
+ { -6, 73 }, { -4, 74 }, { -13, 86 }, { -13, 96 },
+ { -11, 97 }, { -19, 117 },{ -8, 78 }, { -5, 33 },
+ { -4, 48 }, { -2, 53 }, { -3, 62 }, { -13, 71 },
+ { -10, 79 }, { -12, 86 }, { -13, 90 }, { -14, 97 },
+
+ /* 276 a bit special (not used, bypass is used instead) */
+ { 0, 0 },
+
+ /* 277 -> 307 */
+ { -6, 93 }, { -6, 84 }, { -8, 79 }, { 0, 66 },
+ { -1, 71 }, { 0, 62 }, { -2, 60 }, { -2, 59 },
+ { -5, 75 }, { -3, 62 }, { -4, 58 }, { -9, 66 },
+ { -1, 79 }, { 0, 71 }, { 3, 68 }, { 10, 44 },
+ { -7, 62 }, { 15, 36 }, { 14, 40 }, { 16, 27 },
+ { 12, 29 }, { 1, 44 }, { 20, 36 }, { 18, 32 },
+ { 5, 42 }, { 1, 48 }, { 10, 62 }, { 17, 46 },
+ { 9, 64 }, { -12, 104 },{ -11, 97 },
+
+ /* 308 -> 337 */
+ { -16, 96 }, { -7, 88 }, { -8, 85 }, { -7, 85 },
+ { -9, 85 }, { -13, 88 }, { 4, 66 }, { -3, 77 },
+ { -3, 76 }, { -6, 76 }, { 10, 58 }, { -1, 76 },
+ { -1, 83 }, { -7, 99 }, { -14, 95 }, { 2, 95 },
+ { 0, 76 }, { -5, 74 }, { 0, 70 }, { -11, 75 },
+ { 1, 68 }, { 0, 65 }, { -14, 73 }, { 3, 62 },
+ { 4, 62 }, { -1, 68 }, { -13, 75 }, { 11, 55 },
+ { 5, 64 }, { 12, 70 },
+
+ /* 338 -> 368 */
+ { 15, 6 }, { 6, 19 }, { 7, 16 }, { 12, 14 },
+ { 18, 13 }, { 13, 11 }, { 13, 15 }, { 15, 16 },
+ { 12, 23 }, { 13, 23 }, { 15, 20 }, { 14, 26 },
+ { 14, 44 }, { 17, 40 }, { 17, 47 }, { 24, 17 },
+ { 21, 21 }, { 25, 22 }, { 31, 27 }, { 22, 29 },
+ { 19, 35 }, { 14, 50 }, { 10, 57 }, { 7, 63 },
+ { -2, 77 }, { -4, 82 }, { -3, 94 }, { 9, 69 },
+ { -12, 109 },{ 36, -35 }, { 36, -34 },
+
+ /* 369 -> 398 */
+ { 32, -26 }, { 37, -30 }, { 44, -32 }, { 34, -18 },
+ { 34, -15 }, { 40, -15 }, { 33, -7 }, { 35, -5 },
+ { 33, 0 }, { 38, 2 }, { 33, 13 }, { 23, 35 },
+ { 13, 58 }, { 29, -3 }, { 26, 0 }, { 22, 30 },
+ { 31, -7 }, { 35, -15 }, { 34, -3 }, { 34, 3 },
+ { 36, -1 }, { 34, 5 }, { 32, 11 }, { 35, 5 },
+ { 34, 12 }, { 39, 11 }, { 30, 29 }, { 34, 26 },
+ { 29, 39 }, { 19, 66 },
+
+ /* 399 -> 435 */
+ { 31, 21 }, { 31, 31 }, { 25, 50 },
+ { -17, 120 }, { -20, 112 }, { -18, 114 }, { -11, 85 },
+ { -15, 92 }, { -14, 89 }, { -26, 71 }, { -15, 81 },
+ { -14, 80 }, { 0, 68 }, { -14, 70 }, { -24, 56 },
+ { -23, 68 }, { -24, 50 }, { -11, 74 }, { 23, -13 },
+ { 26, -13 }, { 40, -15 }, { 49, -14 }, { 44, 3 },
+ { 45, 6 }, { 44, 34 }, { 33, 54 }, { 19, 82 },
+ { -3, 75 }, { -1, 23 }, { 1, 34 }, { 1, 43 },
+ { 0, 54 }, { -2, 55 }, { 0, 61 }, { 1, 64 },
+ { 0, 68 }, { -9, 92 },
+
+ /* 436 -> 459 */
+ { -14, 106 }, { -13, 97 }, { -15, 90 }, { -12, 90 },
+ { -18, 88 }, { -10, 73 }, { -9, 79 }, { -14, 86 },
+ { -10, 73 }, { -10, 70 }, { -10, 69 }, { -5, 66 },
+ { -9, 64 }, { -5, 58 }, { 2, 59 }, { 21, -10 },
+ { 24, -11 }, { 28, -8 }, { 28, -1 }, { 29, 3 },
+ { 29, 9 }, { 35, 20 }, { 29, 36 }, { 14, 67 }
+};
+
+static const int8_t cabac_context_init_PB[3][460][2] =
+{
+ /* i_cabac_init_idc == 0 */
+ {
+ /* 0 - 10 */
+ { 20, -15 }, { 2, 54 }, { 3, 74 }, { 20, -15 },
+ { 2, 54 }, { 3, 74 }, { -28, 127 }, { -23, 104 },
+ { -6, 53 }, { -1, 54 }, { 7, 51 },
+
+ /* 11 - 23 */
+ { 23, 33 }, { 23, 2 }, { 21, 0 }, { 1, 9 },
+ { 0, 49 }, { -37, 118 }, { 5, 57 }, { -13, 78 },
+ { -11, 65 }, { 1, 62 }, { 12, 49 }, { -4, 73 },
+ { 17, 50 },
+
+ /* 24 - 39 */
+ { 18, 64 }, { 9, 43 }, { 29, 0 }, { 26, 67 },
+ { 16, 90 }, { 9, 104 }, { -46, 127 }, { -20, 104 },
+ { 1, 67 }, { -13, 78 }, { -11, 65 }, { 1, 62 },
+ { -6, 86 }, { -17, 95 }, { -6, 61 }, { 9, 45 },
+
+ /* 40 - 53 */
+ { -3, 69 }, { -6, 81 }, { -11, 96 }, { 6, 55 },
+ { 7, 67 }, { -5, 86 }, { 2, 88 }, { 0, 58 },
+ { -3, 76 }, { -10, 94 }, { 5, 54 }, { 4, 69 },
+ { -3, 81 }, { 0, 88 },
+
+ /* 54 - 59 */
+ { -7, 67 }, { -5, 74 }, { -4, 74 }, { -5, 80 },
+ { -7, 72 }, { 1, 58 },
+
+ /* 60 - 69 */
+ { 0, 41 }, { 0, 63 }, { 0, 63 }, { 0, 63 },
+ { -9, 83 }, { 4, 86 }, { 0, 97 }, { -7, 72 },
+ { 13, 41 }, { 3, 62 },
+
+ /* 70 - 87 */
+ { 0, 45 }, { -4, 78 }, { -3, 96 }, { -27, 126 },
+ { -28, 98 }, { -25, 101 }, { -23, 67 }, { -28, 82 },
+ { -20, 94 }, { -16, 83 }, { -22, 110 }, { -21, 91 },
+ { -18, 102 }, { -13, 93 }, { -29, 127 }, { -7, 92 },
+ { -5, 89 }, { -7, 96 }, { -13, 108 }, { -3, 46 },
+ { -1, 65 }, { -1, 57 }, { -9, 93 }, { -3, 74 },
+ { -9, 92 }, { -8, 87 }, { -23, 126 }, { 5, 54 },
+ { 6, 60 }, { 6, 59 }, { 6, 69 }, { -1, 48 },
+ { 0, 68 }, { -4, 69 }, { -8, 88 },
+
+ /* 105 -> 165 */
+ { -2, 85 }, { -6, 78 }, { -1, 75 }, { -7, 77 },
+ { 2, 54 }, { 5, 50 }, { -3, 68 }, { 1, 50 },
+ { 6, 42 }, { -4, 81 }, { 1, 63 }, { -4, 70 },
+ { 0, 67 }, { 2, 57 }, { -2, 76 }, { 11, 35 },
+ { 4, 64 }, { 1, 61 }, { 11, 35 }, { 18, 25 },
+ { 12, 24 }, { 13, 29 }, { 13, 36 }, { -10, 93 },
+ { -7, 73 }, { -2, 73 }, { 13, 46 }, { 9, 49 },
+ { -7, 100 }, { 9, 53 }, { 2, 53 }, { 5, 53 },
+ { -2, 61 }, { 0, 56 }, { 0, 56 }, { -13, 63 },
+ { -5, 60 }, { -1, 62 }, { 4, 57 }, { -6, 69 },
+ { 4, 57 }, { 14, 39 }, { 4, 51 }, { 13, 68 },
+ { 3, 64 }, { 1, 61 }, { 9, 63 }, { 7, 50 },
+ { 16, 39 }, { 5, 44 }, { 4, 52 }, { 11, 48 },
+ { -5, 60 }, { -1, 59 }, { 0, 59 }, { 22, 33 },
+ { 5, 44 }, { 14, 43 }, { -1, 78 }, { 0, 60 },
+ { 9, 69 },
+
+ /* 166 - 226 */
+ { 11, 28 }, { 2, 40 }, { 3, 44 }, { 0, 49 },
+ { 0, 46 }, { 2, 44 }, { 2, 51 }, { 0, 47 },
+ { 4, 39 }, { 2, 62 }, { 6, 46 }, { 0, 54 },
+ { 3, 54 }, { 2, 58 }, { 4, 63 }, { 6, 51 },
+ { 6, 57 }, { 7, 53 }, { 6, 52 }, { 6, 55 },
+ { 11, 45 }, { 14, 36 }, { 8, 53 }, { -1, 82 },
+ { 7, 55 }, { -3, 78 }, { 15, 46 }, { 22, 31 },
+ { -1, 84 }, { 25, 7 }, { 30, -7 }, { 28, 3 },
+ { 28, 4 }, { 32, 0 }, { 34, -1 }, { 30, 6 },
+ { 30, 6 }, { 32, 9 }, { 31, 19 }, { 26, 27 },
+ { 26, 30 }, { 37, 20 }, { 28, 34 }, { 17, 70 },
+ { 1, 67 }, { 5, 59 }, { 9, 67 }, { 16, 30 },
+ { 18, 32 }, { 18, 35 }, { 22, 29 }, { 24, 31 },
+ { 23, 38 }, { 18, 43 }, { 20, 41 }, { 11, 63 },
+ { 9, 59 }, { 9, 64 }, { -1, 94 }, { -2, 89 },
+ { -9, 108 },
+
+ /* 227 - 275 */
+ { -6, 76 }, { -2, 44 }, { 0, 45 }, { 0, 52 },
+ { -3, 64 }, { -2, 59 }, { -4, 70 }, { -4, 75 },
+ { -8, 82 }, { -17, 102 }, { -9, 77 }, { 3, 24 },
+ { 0, 42 }, { 0, 48 }, { 0, 55 }, { -6, 59 },
+ { -7, 71 }, { -12, 83 }, { -11, 87 }, { -30, 119 },
+ { 1, 58 }, { -3, 29 }, { -1, 36 }, { 1, 38 },
+ { 2, 43 }, { -6, 55 }, { 0, 58 }, { 0, 64 },
+ { -3, 74 }, { -10, 90 }, { 0, 70 }, { -4, 29 },
+ { 5, 31 }, { 7, 42 }, { 1, 59 }, { -2, 58 },
+ { -3, 72 }, { -3, 81 }, { -11, 97 }, { 0, 58 },
+ { 8, 5 }, { 10, 14 }, { 14, 18 }, { 13, 27 },
+ { 2, 40 }, { 0, 58 }, { -3, 70 }, { -6, 79 },
+ { -8, 85 },
+
+ /* 276 a bit special (not used, bypass is used instead) */
+ { 0, 0 },
+
+ /* 277 - 337 */
+ { -13, 106 }, { -16, 106 }, { -10, 87 }, { -21, 114 },
+ { -18, 110 }, { -14, 98 }, { -22, 110 }, { -21, 106 },
+ { -18, 103 }, { -21, 107 }, { -23, 108 }, { -26, 112 },
+ { -10, 96 }, { -12, 95 }, { -5, 91 }, { -9, 93 },
+ { -22, 94 }, { -5, 86 }, { 9, 67 }, { -4, 80 },
+ { -10, 85 }, { -1, 70 }, { 7, 60 }, { 9, 58 },
+ { 5, 61 }, { 12, 50 }, { 15, 50 }, { 18, 49 },
+ { 17, 54 }, { 10, 41 }, { 7, 46 }, { -1, 51 },
+ { 7, 49 }, { 8, 52 }, { 9, 41 }, { 6, 47 },
+ { 2, 55 }, { 13, 41 }, { 10, 44 }, { 6, 50 },
+ { 5, 53 }, { 13, 49 }, { 4, 63 }, { 6, 64 },
+ { -2, 69 }, { -2, 59 }, { 6, 70 }, { 10, 44 },
+ { 9, 31 }, { 12, 43 }, { 3, 53 }, { 14, 34 },
+ { 10, 38 }, { -3, 52 }, { 13, 40 }, { 17, 32 },
+ { 7, 44 }, { 7, 38 }, { 13, 50 }, { 10, 57 },
+ { 26, 43 },
+
+ /* 338 - 398 */
+ { 14, 11 }, { 11, 14 }, { 9, 11 }, { 18, 11 },
+ { 21, 9 }, { 23, -2 }, { 32, -15 }, { 32, -15 },
+ { 34, -21 }, { 39, -23 }, { 42, -33 }, { 41, -31 },
+ { 46, -28 }, { 38, -12 }, { 21, 29 }, { 45, -24 },
+ { 53, -45 }, { 48, -26 }, { 65, -43 }, { 43, -19 },
+ { 39, -10 }, { 30, 9 }, { 18, 26 }, { 20, 27 },
+ { 0, 57 }, { -14, 82 }, { -5, 75 }, { -19, 97 },
+ { -35, 125 }, { 27, 0 }, { 28, 0 }, { 31, -4 },
+ { 27, 6 }, { 34, 8 }, { 30, 10 }, { 24, 22 },
+ { 33, 19 }, { 22, 32 }, { 26, 31 }, { 21, 41 },
+ { 26, 44 }, { 23, 47 }, { 16, 65 }, { 14, 71 },
+ { 8, 60 }, { 6, 63 }, { 17, 65 }, { 21, 24 },
+ { 23, 20 }, { 26, 23 }, { 27, 32 }, { 28, 23 },
+ { 28, 24 }, { 23, 40 }, { 24, 32 }, { 28, 29 },
+ { 23, 42 }, { 19, 57 }, { 22, 53 }, { 22, 61 },
+ { 11, 86 },
+
+ /* 399 - 435 */
+ { 12, 40 }, { 11, 51 }, { 14, 59 },
+ { -4, 79 }, { -7, 71 }, { -5, 69 }, { -9, 70 },
+ { -8, 66 }, { -10, 68 }, { -19, 73 }, { -12, 69 },
+ { -16, 70 }, { -15, 67 }, { -20, 62 }, { -19, 70 },
+ { -16, 66 }, { -22, 65 }, { -20, 63 }, { 9, -2 },
+ { 26, -9 }, { 33, -9 }, { 39, -7 }, { 41, -2 },
+ { 45, 3 }, { 49, 9 }, { 45, 27 }, { 36, 59 },
+ { -6, 66 }, { -7, 35 }, { -7, 42 }, { -8, 45 },
+ { -5, 48 }, { -12, 56 }, { -6, 60 }, { -5, 62 },
+ { -8, 66 }, { -8, 76 },
+
+ /* 436 - 459 */
+ { -5, 85 }, { -6, 81 }, { -10, 77 }, { -7, 81 },
+ { -17, 80 }, { -18, 73 }, { -4, 74 }, { -10, 83 },
+ { -9, 71 }, { -9, 67 }, { -1, 61 }, { -8, 66 },
+ { -14, 66 }, { 0, 59 }, { 2, 59 }, { 21, -13 },
+ { 33, -14 }, { 39, -7 }, { 46, -2 }, { 51, 2 },
+ { 60, 6 }, { 61, 17 }, { 55, 34 }, { 42, 62 },
+ },
+
+ /* i_cabac_init_idc == 1 */
+ {
+ /* 0 - 10 */
+ { 20, -15 }, { 2, 54 }, { 3, 74 }, { 20, -15 },
+ { 2, 54 }, { 3, 74 }, { -28, 127 }, { -23, 104 },
+ { -6, 53 }, { -1, 54 }, { 7, 51 },
+
+ /* 11 - 23 */
+ { 22, 25 }, { 34, 0 }, { 16, 0 }, { -2, 9 },
+ { 4, 41 }, { -29, 118 }, { 2, 65 }, { -6, 71 },
+ { -13, 79 }, { 5, 52 }, { 9, 50 }, { -3, 70 },
+ { 10, 54 },
+
+ /* 24 - 39 */
+ { 26, 34 }, { 19, 22 }, { 40, 0 }, { 57, 2 },
+ { 41, 36 }, { 26, 69 }, { -45, 127 }, { -15, 101 },
+ { -4, 76 }, { -6, 71 }, { -13, 79 }, { 5, 52 },
+ { 6, 69 }, { -13, 90 }, { 0, 52 }, { 8, 43 },
+
+ /* 40 - 53 */
+ { -2, 69 },{ -5, 82 },{ -10, 96 },{ 2, 59 },
+ { 2, 75 },{ -3, 87 },{ -3, 100 },{ 1, 56 },
+ { -3, 74 },{ -6, 85 },{ 0, 59 },{ -3, 81 },
+ { -7, 86 },{ -5, 95 },
+
+ /* 54 - 59 */
+ { -1, 66 },{ -1, 77 },{ 1, 70 },{ -2, 86 },
+ { -5, 72 },{ 0, 61 },
+
+ /* 60 - 69 */
+ { 0, 41 }, { 0, 63 }, { 0, 63 }, { 0, 63 },
+ { -9, 83 }, { 4, 86 }, { 0, 97 }, { -7, 72 },
+ { 13, 41 }, { 3, 62 },
+
+ /* 70 - 104 */
+ { 13, 15 }, { 7, 51 }, { 2, 80 }, { -39, 127 },
+ { -18, 91 }, { -17, 96 }, { -26, 81 }, { -35, 98 },
+ { -24, 102 }, { -23, 97 }, { -27, 119 }, { -24, 99 },
+ { -21, 110 }, { -18, 102 }, { -36, 127 }, { 0, 80 },
+ { -5, 89 }, { -7, 94 }, { -4, 92 }, { 0, 39 },
+ { 0, 65 }, { -15, 84 }, { -35, 127 }, { -2, 73 },
+ { -12, 104 }, { -9, 91 }, { -31, 127 }, { 3, 55 },
+ { 7, 56 }, { 7, 55 }, { 8, 61 }, { -3, 53 },
+ { 0, 68 }, { -7, 74 }, { -9, 88 },
+
+ /* 105 -> 165 */
+ { -13, 103 }, { -13, 91 }, { -9, 89 }, { -14, 92 },
+ { -8, 76 }, { -12, 87 }, { -23, 110 }, { -24, 105 },
+ { -10, 78 }, { -20, 112 }, { -17, 99 }, { -78, 127 },
+ { -70, 127 }, { -50, 127 }, { -46, 127 }, { -4, 66 },
+ { -5, 78 }, { -4, 71 }, { -8, 72 }, { 2, 59 },
+ { -1, 55 }, { -7, 70 }, { -6, 75 }, { -8, 89 },
+ { -34, 119 }, { -3, 75 }, { 32, 20 }, { 30, 22 },
+ { -44, 127 }, { 0, 54 }, { -5, 61 }, { 0, 58 },
+ { -1, 60 }, { -3, 61 }, { -8, 67 }, { -25, 84 },
+ { -14, 74 }, { -5, 65 }, { 5, 52 }, { 2, 57 },
+ { 0, 61 }, { -9, 69 }, { -11, 70 }, { 18, 55 },
+ { -4, 71 }, { 0, 58 }, { 7, 61 }, { 9, 41 },
+ { 18, 25 }, { 9, 32 }, { 5, 43 }, { 9, 47 },
+ { 0, 44 }, { 0, 51 }, { 2, 46 }, { 19, 38 },
+ { -4, 66 }, { 15, 38 }, { 12, 42 }, { 9, 34 },
+ { 0, 89 },
+
+ /* 166 - 226 */
+ { 4, 45 }, { 10, 28 }, { 10, 31 }, { 33, -11 },
+ { 52, -43 }, { 18, 15 }, { 28, 0 }, { 35, -22 },
+ { 38, -25 }, { 34, 0 }, { 39, -18 }, { 32, -12 },
+ { 102, -94 }, { 0, 0 }, { 56, -15 }, { 33, -4 },
+ { 29, 10 }, { 37, -5 }, { 51, -29 }, { 39, -9 },
+ { 52, -34 }, { 69, -58 }, { 67, -63 }, { 44, -5 },
+ { 32, 7 }, { 55, -29 }, { 32, 1 }, { 0, 0 },
+ { 27, 36 }, { 33, -25 }, { 34, -30 }, { 36, -28 },
+ { 38, -28 }, { 38, -27 }, { 34, -18 }, { 35, -16 },
+ { 34, -14 }, { 32, -8 }, { 37, -6 }, { 35, 0 },
+ { 30, 10 }, { 28, 18 }, { 26, 25 }, { 29, 41 },
+ { 0, 75 }, { 2, 72 }, { 8, 77 }, { 14, 35 },
+ { 18, 31 }, { 17, 35 }, { 21, 30 }, { 17, 45 },
+ { 20, 42 }, { 18, 45 }, { 27, 26 }, { 16, 54 },
+ { 7, 66 }, { 16, 56 }, { 11, 73 }, { 10, 67 },
+ { -10, 116 },
+
+ /* 227 - 275 */
+ { -23, 112 }, { -15, 71 }, { -7, 61 }, { 0, 53 },
+ { -5, 66 }, { -11, 77 }, { -9, 80 }, { -9, 84 },
+ { -10, 87 }, { -34, 127 }, { -21, 101 }, { -3, 39 },
+ { -5, 53 }, { -7, 61 }, { -11, 75 }, { -15, 77 },
+ { -17, 91 }, { -25, 107 }, { -25, 111 }, { -28, 122 },
+ { -11, 76 }, { -10, 44 }, { -10, 52 }, { -10, 57 },
+ { -9, 58 }, { -16, 72 }, { -7, 69 }, { -4, 69 },
+ { -5, 74 }, { -9, 86 }, { 2, 66 }, { -9, 34 },
+ { 1, 32 }, { 11, 31 }, { 5, 52 }, { -2, 55 },
+ { -2, 67 }, { 0, 73 }, { -8, 89 }, { 3, 52 },
+ { 7, 4 }, { 10, 8 }, { 17, 8 }, { 16, 19 },
+ { 3, 37 }, { -1, 61 }, { -5, 73 }, { -1, 70 },
+ { -4, 78 },
+
+ /* 276 a bit special (not used, bypass is used instead) */
+ { 0, 0 },
+
+ /* 277 - 337 */
+ { -21, 126 }, { -23, 124 }, { -20, 110 }, { -26, 126 },
+ { -25, 124 }, { -17, 105 }, { -27, 121 }, { -27, 117 },
+ { -17, 102 }, { -26, 117 }, { -27, 116 }, { -33, 122 },
+ { -10, 95 }, { -14, 100 }, { -8, 95 }, { -17, 111 },
+ { -28, 114 }, { -6, 89 }, { -2, 80 }, { -4, 82 },
+ { -9, 85 }, { -8, 81 }, { -1, 72 }, { 5, 64 },
+ { 1, 67 }, { 9, 56 }, { 0, 69 }, { 1, 69 },
+ { 7, 69 }, { -7, 69 }, { -6, 67 }, { -16, 77 },
+ { -2, 64 }, { 2, 61 }, { -6, 67 }, { -3, 64 },
+ { 2, 57 }, { -3, 65 }, { -3, 66 }, { 0, 62 },
+ { 9, 51 }, { -1, 66 }, { -2, 71 }, { -2, 75 },
+ { -1, 70 }, { -9, 72 }, { 14, 60 }, { 16, 37 },
+ { 0, 47 }, { 18, 35 }, { 11, 37 }, { 12, 41 },
+ { 10, 41 }, { 2, 48 }, { 12, 41 }, { 13, 41 },
+ { 0, 59 }, { 3, 50 }, { 19, 40 }, { 3, 66 },
+ { 18, 50 },
+
+ /* 338 - 398 */
+ { 19, -6 }, { 18, -6 }, { 14, 0 }, { 26, -12 },
+ { 31, -16 }, { 33, -25 }, { 33, -22 }, { 37, -28 },
+ { 39, -30 }, { 42, -30 }, { 47, -42 }, { 45, -36 },
+ { 49, -34 }, { 41, -17 }, { 32, 9 }, { 69, -71 },
+ { 63, -63 }, { 66, -64 }, { 77, -74 }, { 54, -39 },
+ { 52, -35 }, { 41, -10 }, { 36, 0 }, { 40, -1 },
+ { 30, 14 }, { 28, 26 }, { 23, 37 }, { 12, 55 },
+ { 11, 65 }, { 37, -33 }, { 39, -36 }, { 40, -37 },
+ { 38, -30 }, { 46, -33 }, { 42, -30 }, { 40, -24 },
+ { 49, -29 }, { 38, -12 }, { 40, -10 }, { 38, -3 },
+ { 46, -5 }, { 31, 20 }, { 29, 30 }, { 25, 44 },
+ { 12, 48 }, { 11, 49 }, { 26, 45 }, { 22, 22 },
+ { 23, 22 }, { 27, 21 }, { 33, 20 }, { 26, 28 },
+ { 30, 24 }, { 27, 34 }, { 18, 42 }, { 25, 39 },
+ { 18, 50 }, { 12, 70 }, { 21, 54 }, { 14, 71 },
+ { 11, 83 },
+
+ /* 399 - 435 */
+ { 25, 32 }, { 21, 49 }, { 21, 54 },
+ { -5, 85 }, { -6, 81 }, { -10, 77 }, { -7, 81 },
+ { -17, 80 }, { -18, 73 }, { -4, 74 }, { -10, 83 },
+ { -9, 71 }, { -9, 67 }, { -1, 61 }, { -8, 66 },
+ { -14, 66 }, { 0, 59 }, { 2, 59 }, { 17, -10 },
+ { 32, -13 }, { 42, -9 }, { 49, -5 }, { 53, 0 },
+ { 64, 3 }, { 68, 10 }, { 66, 27 }, { 47, 57 },
+ { -5, 71 }, { 0, 24 }, { -1, 36 }, { -2, 42 },
+ { -2, 52 }, { -9, 57 }, { -6, 63 }, { -4, 65 },
+ { -4, 67 }, { -7, 82 },
+
+ /* 436 - 459 */
+ { -3, 81 }, { -3, 76 }, { -7, 72 }, { -6, 78 },
+ { -12, 72 }, { -14, 68 }, { -3, 70 }, { -6, 76 },
+ { -5, 66 }, { -5, 62 }, { 0, 57 }, { -4, 61 },
+ { -9, 60 }, { 1, 54 }, { 2, 58 }, { 17, -10 },
+ { 32, -13 }, { 42, -9 }, { 49, -5 }, { 53, 0 },
+ { 64, 3 }, { 68, 10 }, { 66, 27 }, { 47, 57 },
+ },
+
+ /* i_cabac_init_idc == 2 */
+ {
+ /* 0 - 10 */
+ { 20, -15 }, { 2, 54 }, { 3, 74 }, { 20, -15 },
+ { 2, 54 }, { 3, 74 }, { -28, 127 }, { -23, 104 },
+ { -6, 53 }, { -1, 54 }, { 7, 51 },
+
+ /* 11 - 23 */
+ { 29, 16 }, { 25, 0 }, { 14, 0 }, { -10, 51 },
+ { -3, 62 }, { -27, 99 }, { 26, 16 }, { -4, 85 },
+ { -24, 102 }, { 5, 57 }, { 6, 57 }, { -17, 73 },
+ { 14, 57 },
+
+ /* 24 - 39 */
+ { 20, 40 }, { 20, 10 }, { 29, 0 }, { 54, 0 },
+ { 37, 42 }, { 12, 97 }, { -32, 127 }, { -22, 117 },
+ { -2, 74 }, { -4, 85 }, { -24, 102 }, { 5, 57 },
+ { -6, 93 }, { -14, 88 }, { -6, 44 }, { 4, 55 },
+
+ /* 40 - 53 */
+ { -11, 89 },{ -15, 103 },{ -21, 116 },{ 19, 57 },
+ { 20, 58 },{ 4, 84 },{ 6, 96 },{ 1, 63 },
+ { -5, 85 },{ -13, 106 },{ 5, 63 },{ 6, 75 },
+ { -3, 90 },{ -1, 101 },
+
+ /* 54 - 59 */
+ { 3, 55 },{ -4, 79 },{ -2, 75 },{ -12, 97 },
+ { -7, 50 },{ 1, 60 },
+
+ /* 60 - 69 */
+ { 0, 41 }, { 0, 63 }, { 0, 63 }, { 0, 63 },
+ { -9, 83 }, { 4, 86 }, { 0, 97 }, { -7, 72 },
+ { 13, 41 }, { 3, 62 },
+
+ /* 70 - 104 */
+ { 7, 34 }, { -9, 88 }, { -20, 127 }, { -36, 127 },
+ { -17, 91 }, { -14, 95 }, { -25, 84 }, { -25, 86 },
+ { -12, 89 }, { -17, 91 }, { -31, 127 }, { -14, 76 },
+ { -18, 103 }, { -13, 90 }, { -37, 127 }, { 11, 80 },
+ { 5, 76 }, { 2, 84 }, { 5, 78 }, { -6, 55 },
+ { 4, 61 }, { -14, 83 }, { -37, 127 }, { -5, 79 },
+ { -11, 104 }, { -11, 91 }, { -30, 127 }, { 0, 65 },
+ { -2, 79 }, { 0, 72 }, { -4, 92 }, { -6, 56 },
+ { 3, 68 }, { -8, 71 }, { -13, 98 },
+
+ /* 105 -> 165 */
+ { -4, 86 }, { -12, 88 }, { -5, 82 }, { -3, 72 },
+ { -4, 67 }, { -8, 72 }, { -16, 89 }, { -9, 69 },
+ { -1, 59 }, { 5, 66 }, { 4, 57 }, { -4, 71 },
+ { -2, 71 }, { 2, 58 }, { -1, 74 }, { -4, 44 },
+ { -1, 69 }, { 0, 62 }, { -7, 51 }, { -4, 47 },
+ { -6, 42 }, { -3, 41 }, { -6, 53 }, { 8, 76 },
+ { -9, 78 }, { -11, 83 }, { 9, 52 }, { 0, 67 },
+ { -5, 90 }, { 1, 67 }, { -15, 72 }, { -5, 75 },
+ { -8, 80 }, { -21, 83 }, { -21, 64 }, { -13, 31 },
+ { -25, 64 }, { -29, 94 }, { 9, 75 }, { 17, 63 },
+ { -8, 74 }, { -5, 35 }, { -2, 27 }, { 13, 91 },
+ { 3, 65 }, { -7, 69 }, { 8, 77 }, { -10, 66 },
+ { 3, 62 }, { -3, 68 }, { -20, 81 }, { 0, 30 },
+ { 1, 7 }, { -3, 23 }, { -21, 74 }, { 16, 66 },
+ { -23, 124 }, { 17, 37 }, { 44, -18 }, { 50, -34 },
+ { -22, 127 },
+
+ /* 166 - 226 */
+ { 4, 39 }, { 0, 42 }, { 7, 34 }, { 11, 29 },
+ { 8, 31 }, { 6, 37 }, { 7, 42 }, { 3, 40 },
+ { 8, 33 }, { 13, 43 }, { 13, 36 }, { 4, 47 },
+ { 3, 55 }, { 2, 58 }, { 6, 60 }, { 8, 44 },
+ { 11, 44 }, { 14, 42 }, { 7, 48 }, { 4, 56 },
+ { 4, 52 }, { 13, 37 }, { 9, 49 }, { 19, 58 },
+ { 10, 48 }, { 12, 45 }, { 0, 69 }, { 20, 33 },
+ { 8, 63 }, { 35, -18 }, { 33, -25 }, { 28, -3 },
+ { 24, 10 }, { 27, 0 }, { 34, -14 }, { 52, -44 },
+ { 39, -24 }, { 19, 17 }, { 31, 25 }, { 36, 29 },
+ { 24, 33 }, { 34, 15 }, { 30, 20 }, { 22, 73 },
+ { 20, 34 }, { 19, 31 }, { 27, 44 }, { 19, 16 },
+ { 15, 36 }, { 15, 36 }, { 21, 28 }, { 25, 21 },
+ { 30, 20 }, { 31, 12 }, { 27, 16 }, { 24, 42 },
+ { 0, 93 }, { 14, 56 }, { 15, 57 }, { 26, 38 },
+ { -24, 127 },
+
+ /* 227 - 275 */
+ { -24, 115 }, { -22, 82 }, { -9, 62 }, { 0, 53 },
+ { 0, 59 }, { -14, 85 }, { -13, 89 }, { -13, 94 },
+ { -11, 92 }, { -29, 127 }, { -21, 100 }, { -14, 57 },
+ { -12, 67 }, { -11, 71 }, { -10, 77 }, { -21, 85 },
+ { -16, 88 }, { -23, 104 }, { -15, 98 }, { -37, 127 },
+ { -10, 82 }, { -8, 48 }, { -8, 61 }, { -8, 66 },
+ { -7, 70 }, { -14, 75 }, { -10, 79 }, { -9, 83 },
+ { -12, 92 }, { -18, 108 }, { -4, 79 }, { -22, 69 },
+ { -16, 75 }, { -2, 58 }, { 1, 58 }, { -13, 78 },
+ { -9, 83 }, { -4, 81 }, { -13, 99 }, { -13, 81 },
+ { -6, 38 }, { -13, 62 }, { -6, 58 }, { -2, 59 },
+ { -16, 73 }, { -10, 76 }, { -13, 86 }, { -9, 83 },
+ { -10, 87 },
+
+ /* 276 a bit special (not used, bypass is used instead) */
+ { 0, 0 },
+
+ /* 277 - 337 */
+ { -22, 127 }, { -25, 127 }, { -25, 120 }, { -27, 127 },
+ { -19, 114 }, { -23, 117 }, { -25, 118 }, { -26, 117 },
+ { -24, 113 }, { -28, 118 }, { -31, 120 }, { -37, 124 },
+ { -10, 94 }, { -15, 102 }, { -10, 99 }, { -13, 106 },
+ { -50, 127 }, { -5, 92 }, { 17, 57 }, { -5, 86 },
+ { -13, 94 }, { -12, 91 }, { -2, 77 }, { 0, 71 },
+ { -1, 73 }, { 4, 64 }, { -7, 81 }, { 5, 64 },
+ { 15, 57 }, { 1, 67 }, { 0, 68 }, { -10, 67 },
+ { 1, 68 }, { 0, 77 }, { 2, 64 }, { 0, 68 },
+ { -5, 78 }, { 7, 55 }, { 5, 59 }, { 2, 65 },
+ { 14, 54 }, { 15, 44 }, { 5, 60 }, { 2, 70 },
+ { -2, 76 }, { -18, 86 }, { 12, 70 }, { 5, 64 },
+ { -12, 70 }, { 11, 55 }, { 5, 56 }, { 0, 69 },
+ { 2, 65 }, { -6, 74 }, { 5, 54 }, { 7, 54 },
+ { -6, 76 }, { -11, 82 }, { -2, 77 }, { -2, 77 },
+ { 25, 42 },
+
+ /* 338 - 398 */
+ { 17, -13 }, { 16, -9 }, { 17, -12 }, { 27, -21 },
+ { 37, -30 }, { 41, -40 }, { 42, -41 }, { 48, -47 },
+ { 39, -32 }, { 46, -40 }, { 52, -51 }, { 46, -41 },
+ { 52, -39 }, { 43, -19 }, { 32, 11 }, { 61, -55 },
+ { 56, -46 }, { 62, -50 }, { 81, -67 }, { 45, -20 },
+ { 35, -2 }, { 28, 15 }, { 34, 1 }, { 39, 1 },
+ { 30, 17 }, { 20, 38 }, { 18, 45 }, { 15, 54 },
+ { 0, 79 }, { 36, -16 }, { 37, -14 }, { 37, -17 },
+ { 32, 1 }, { 34, 15 }, { 29, 15 }, { 24, 25 },
+ { 34, 22 }, { 31, 16 }, { 35, 18 }, { 31, 28 },
+ { 33, 41 }, { 36, 28 }, { 27, 47 }, { 21, 62 },
+ { 18, 31 }, { 19, 26 }, { 36, 24 }, { 24, 23 },
+ { 27, 16 }, { 24, 30 }, { 31, 29 }, { 22, 41 },
+ { 22, 42 }, { 16, 60 }, { 15, 52 }, { 14, 60 },
+ { 3, 78 }, { -16, 123 }, { 21, 53 }, { 22, 56 },
+ { 25, 61 },
+
+ /* 399 - 435 */
+ { 21, 33 }, { 19, 50 }, { 17, 61 },
+ { -3, 78 }, { -8, 74 }, { -9, 72 }, { -10, 72 },
+ { -18, 75 }, { -12, 71 }, { -11, 63 }, { -5, 70 },
+ { -17, 75 }, { -14, 72 }, { -16, 67 }, { -8, 53 },
+ { -14, 59 }, { -9, 52 }, { -11, 68 }, { 9, -2 },
+ { 30, -10 }, { 31, -4 }, { 33, -1 }, { 33, 7 },
+ { 31, 12 }, { 37, 23 }, { 31, 38 }, { 20, 64 },
+ { -9, 71 }, { -7, 37 }, { -8, 44 }, { -11, 49 },
+ { -10, 56 }, { -12, 59 }, { -8, 63 }, { -9, 67 },
+ { -6, 68 }, { -10, 79 },
+
+ /* 436 - 459 */
+ { -3, 78 }, { -8, 74 }, { -9, 72 }, { -10, 72 },
+ { -18, 75 }, { -12, 71 }, { -11, 63 }, { -5, 70 },
+ { -17, 75 }, { -14, 72 }, { -16, 67 }, { -8, 53 },
+ { -14, 59 }, { -9, 52 }, { -11, 68 }, { 9, -2 },
+ { 30, -10 }, { 31, -4 }, { 33, -1 }, { 33, 7 },
+ { 31, 12 }, { 37, 23 }, { 31, 38 }, { 20, 64 },
+ }
+};
+
+void ff_h264_init_cabac_states(H264Context *h) {
+ MpegEncContext * const s = &h->s;
+ int i;
+
+ /* calculate pre-state */
+ for( i= 0; i < 460; i++ ) {
+ int pre;
+ if( h->slice_type_nos == FF_I_TYPE )
+ pre = av_clip( ((cabac_context_init_I[i][0] * s->qscale) >>4 ) + cabac_context_init_I[i][1], 1, 126 );
+ else
+ pre = av_clip( ((cabac_context_init_PB[h->cabac_init_idc][i][0] * s->qscale) >>4 ) + cabac_context_init_PB[h->cabac_init_idc][i][1], 1, 126 );
+
+ if( pre <= 63 )
+ h->cabac_state[i] = 2 * ( 63 - pre ) + 0;
+ else
+ h->cabac_state[i] = 2 * ( pre - 64 ) + 1;
+ }
+}
+
+static int decode_cabac_field_decoding_flag(H264Context *h) {
+ MpegEncContext * const s = &h->s;
+ const long mba_xy = h->mb_xy - 1L;
+ const long mbb_xy = h->mb_xy - 2L*s->mb_stride;
+
+ unsigned long ctx = 0;
+
+ ctx += (s->current_picture.mb_type[mba_xy]>>7)&(h->slice_table[mba_xy] == h->slice_num);
+ ctx += (s->current_picture.mb_type[mbb_xy]>>7)&(h->slice_table[mbb_xy] == h->slice_num);
+
+ return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] );
+}
+
+static int decode_cabac_intra_mb_type(H264Context *h, int ctx_base, int intra_slice) {
+ uint8_t *state= &h->cabac_state[ctx_base];
+ int mb_type;
+
+ if(intra_slice){
+ MpegEncContext * const s = &h->s;
+ const int mba_xy = h->left_mb_xy[0];
+ const int mbb_xy = h->top_mb_xy;
+ int ctx=0;
+ if( h->slice_table[mba_xy] == h->slice_num && !IS_INTRA4x4( s->current_picture.mb_type[mba_xy] ) )
+ ctx++;
+ if( h->slice_table[mbb_xy] == h->slice_num && !IS_INTRA4x4( s->current_picture.mb_type[mbb_xy] ) )
+ ctx++;
+ if( get_cabac_noinline( &h->cabac, &state[ctx] ) == 0 )
+ return 0; /* I4x4 */
+ state += 2;
+ }else{
+ if( get_cabac_noinline( &h->cabac, state ) == 0 )
+ return 0; /* I4x4 */
+ }
+
+ if( get_cabac_terminate( &h->cabac ) )
+ return 25; /* PCM */
+
+ mb_type = 1; /* I16x16 */
+ mb_type += 12 * get_cabac_noinline( &h->cabac, &state[1] ); /* cbp_luma != 0 */
+ if( get_cabac_noinline( &h->cabac, &state[2] ) ) /* cbp_chroma */
+ mb_type += 4 + 4 * get_cabac_noinline( &h->cabac, &state[2+intra_slice] );
+ mb_type += 2 * get_cabac_noinline( &h->cabac, &state[3+intra_slice] );
+ mb_type += 1 * get_cabac_noinline( &h->cabac, &state[3+2*intra_slice] );
+ return mb_type;
+}
+
+static int decode_cabac_mb_type_b( H264Context *h ) {
+ MpegEncContext * const s = &h->s;
+
+ const int mba_xy = h->left_mb_xy[0];
+ const int mbb_xy = h->top_mb_xy;
+ int ctx = 0;
+ int bits;
+ assert(h->slice_type_nos == FF_B_TYPE);
+
+ if( h->slice_table[mba_xy] == h->slice_num && !IS_DIRECT( s->current_picture.mb_type[mba_xy] ) )
+ ctx++;
+ if( h->slice_table[mbb_xy] == h->slice_num && !IS_DIRECT( s->current_picture.mb_type[mbb_xy] ) )
+ ctx++;
+
+ if( !get_cabac_noinline( &h->cabac, &h->cabac_state[27+ctx] ) )
+ return 0; /* B_Direct_16x16 */
+
+ if( !get_cabac_noinline( &h->cabac, &h->cabac_state[27+3] ) ) {
+ return 1 + get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ); /* B_L[01]_16x16 */
+ }
+
+ bits = get_cabac_noinline( &h->cabac, &h->cabac_state[27+4] ) << 3;
+ bits|= get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ) << 2;
+ bits|= get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ) << 1;
+ bits|= get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] );
+ if( bits < 8 )
+ return bits + 3; /* B_Bi_16x16 through B_L1_L0_16x8 */
+ else if( bits == 13 ) {
+ return decode_cabac_intra_mb_type(h, 32, 0) + 23;
+ } else if( bits == 14 )
+ return 11; /* B_L1_L0_8x16 */
+ else if( bits == 15 )
+ return 22; /* B_8x8 */
+
+ bits= ( bits<<1 ) | get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] );
+ return bits - 4; /* B_L0_Bi_* through B_Bi_Bi_* */
+}
+
+static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
+ MpegEncContext * const s = &h->s;
+ int mba_xy, mbb_xy;
+ int ctx = 0;
+
+ if(FRAME_MBAFF){ //FIXME merge with the stuff in fill_caches?
+ int mb_xy = mb_x + (mb_y&~1)*s->mb_stride;
+ mba_xy = mb_xy - 1;
+ if( (mb_y&1)
+ && h->slice_table[mba_xy] == h->slice_num
+ && MB_FIELD == !!IS_INTERLACED( s->current_picture.mb_type[mba_xy] ) )
+ mba_xy += s->mb_stride;
+ if( MB_FIELD ){
+ mbb_xy = mb_xy - s->mb_stride;
+ if( !(mb_y&1)
+ && h->slice_table[mbb_xy] == h->slice_num
+ && IS_INTERLACED( s->current_picture.mb_type[mbb_xy] ) )
+ mbb_xy -= s->mb_stride;
+ }else
+ mbb_xy = mb_x + (mb_y-1)*s->mb_stride;
+ }else{
+ int mb_xy = h->mb_xy;
+ mba_xy = mb_xy - 1;
+ mbb_xy = mb_xy - (s->mb_stride << FIELD_PICTURE);
+ }
+
+ if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mba_xy] ))
+ ctx++;
+ if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] ))
+ ctx++;
+
+ if( h->slice_type_nos == FF_B_TYPE )
+ ctx += 13;
+ return get_cabac_noinline( &h->cabac, &h->cabac_state[11+ctx] );
+}
+
+static int decode_cabac_mb_intra4x4_pred_mode( H264Context *h, int pred_mode ) {
+ int mode = 0;
+
+ if( get_cabac( &h->cabac, &h->cabac_state[68] ) )
+ return pred_mode;
+
+ mode += 1 * get_cabac( &h->cabac, &h->cabac_state[69] );
+ mode += 2 * get_cabac( &h->cabac, &h->cabac_state[69] );
+ mode += 4 * get_cabac( &h->cabac, &h->cabac_state[69] );
+
+ if( mode >= pred_mode )
+ return mode + 1;
+ else
+ return mode;
+}
+
+static int decode_cabac_mb_chroma_pre_mode( H264Context *h) {
+ const int mba_xy = h->left_mb_xy[0];
+ const int mbb_xy = h->top_mb_xy;
+
+ int ctx = 0;
+
+ /* No need to test for IS_INTRA4x4 and IS_INTRA16x16, as we set chroma_pred_mode_table to 0 */
+ if( h->slice_table[mba_xy] == h->slice_num && h->chroma_pred_mode_table[mba_xy] != 0 )
+ ctx++;
+
+ if( h->slice_table[mbb_xy] == h->slice_num && h->chroma_pred_mode_table[mbb_xy] != 0 )
+ ctx++;
+
+ if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+ctx] ) == 0 )
+ return 0;
+
+ if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+3] ) == 0 )
+ return 1;
+ if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+3] ) == 0 )
+ return 2;
+ else
+ return 3;
+}
+
+static int decode_cabac_mb_cbp_luma( H264Context *h) {
+ int cbp_b, cbp_a, ctx, cbp = 0;
+
+ cbp_a = h->slice_table[h->left_mb_xy[0]] == h->slice_num ? h->left_cbp : -1;
+ cbp_b = h->slice_table[h->top_mb_xy] == h->slice_num ? h->top_cbp : -1;
+
+ ctx = !(cbp_a & 0x02) + 2 * !(cbp_b & 0x04);
+ cbp |= get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]);
+ ctx = !(cbp & 0x01) + 2 * !(cbp_b & 0x08);
+ cbp |= get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]) << 1;
+ ctx = !(cbp_a & 0x08) + 2 * !(cbp & 0x01);
+ cbp |= get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]) << 2;
+ ctx = !(cbp & 0x04) + 2 * !(cbp & 0x02);
+ cbp |= get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]) << 3;
+ return cbp;
+}
+static int decode_cabac_mb_cbp_chroma( H264Context *h) {
+ int ctx;
+ int cbp_a, cbp_b;
+
+ cbp_a = (h->left_cbp>>4)&0x03;
+ cbp_b = (h-> top_cbp>>4)&0x03;
+
+ ctx = 0;
+ if( cbp_a > 0 ) ctx++;
+ if( cbp_b > 0 ) ctx += 2;
+ if( get_cabac_noinline( &h->cabac, &h->cabac_state[77 + ctx] ) == 0 )
+ return 0;
+
+ ctx = 4;
+ if( cbp_a == 2 ) ctx++;
+ if( cbp_b == 2 ) ctx += 2;
+ return 1 + get_cabac_noinline( &h->cabac, &h->cabac_state[77 + ctx] );
+}
+static int decode_cabac_mb_dqp( H264Context *h) {
+ int ctx= h->last_qscale_diff != 0;
+ int val = 0;
+
+ while( get_cabac_noinline( &h->cabac, &h->cabac_state[60 + ctx] ) ) {
+ ctx= 2+(ctx>>1);
+ val++;
+ if(val > 102) //prevent infinite loop
+ return INT_MIN;
+ }
+
+ if( val&0x01 )
+ return (val + 1)>>1 ;
+ else
+ return -((val + 1)>>1);
+}
+static int decode_cabac_p_mb_sub_type( H264Context *h ) {
+ if( get_cabac( &h->cabac, &h->cabac_state[21] ) )
+ return 0; /* 8x8 */
+ if( !get_cabac( &h->cabac, &h->cabac_state[22] ) )
+ return 1; /* 8x4 */
+ if( get_cabac( &h->cabac, &h->cabac_state[23] ) )
+ return 2; /* 4x8 */
+ return 3; /* 4x4 */
+}
+static int decode_cabac_b_mb_sub_type( H264Context *h ) {
+ int type;
+ if( !get_cabac( &h->cabac, &h->cabac_state[36] ) )
+ return 0; /* B_Direct_8x8 */
+ if( !get_cabac( &h->cabac, &h->cabac_state[37] ) )
+ return 1 + get_cabac( &h->cabac, &h->cabac_state[39] ); /* B_L0_8x8, B_L1_8x8 */
+ type = 3;
+ if( get_cabac( &h->cabac, &h->cabac_state[38] ) ) {
+ if( get_cabac( &h->cabac, &h->cabac_state[39] ) )
+ return 11 + get_cabac( &h->cabac, &h->cabac_state[39] ); /* B_L1_4x4, B_Bi_4x4 */
+ type += 4;
+ }
+ type += 2*get_cabac( &h->cabac, &h->cabac_state[39] );
+ type += get_cabac( &h->cabac, &h->cabac_state[39] );
+ return type;
+}
+
+static inline int decode_cabac_mb_transform_size( H264Context *h ) {
+ return get_cabac_noinline( &h->cabac, &h->cabac_state[399 + h->neighbor_transform_size] );
+}
+
+static int decode_cabac_mb_ref( H264Context *h, int list, int n ) {
+ int refa = h->ref_cache[list][scan8[n] - 1];
+ int refb = h->ref_cache[list][scan8[n] - 8];
+ int ref = 0;
+ int ctx = 0;
+
+ if( h->slice_type_nos == FF_B_TYPE) {
+ if( refa > 0 && !h->direct_cache[scan8[n] - 1] )
+ ctx++;
+ if( refb > 0 && !h->direct_cache[scan8[n] - 8] )
+ ctx += 2;
+ } else {
+ if( refa > 0 )
+ ctx++;
+ if( refb > 0 )
+ ctx += 2;
+ }
+
+ while( get_cabac( &h->cabac, &h->cabac_state[54+ctx] ) ) {
+ ref++;
+ ctx = (ctx>>2)+4;
+ if(ref >= 32 /*h->ref_list[list]*/){
+ return -1;
+ }
+ }
+ return ref;
+}
+
+static int decode_cabac_mb_mvd( H264Context *h, int list, int n, int l ) {
+ int amvd = abs( h->mvd_cache[list][scan8[n] - 1][l] ) +
+ abs( h->mvd_cache[list][scan8[n] - 8][l] );
+ int ctxbase = (l == 0) ? 40 : 47;
+ int mvd;
+ int ctx = (amvd>2) + (amvd>32);
+
+ if(!get_cabac(&h->cabac, &h->cabac_state[ctxbase+ctx]))
+ return 0;
+
+ mvd= 1;
+ ctx= 3;
+ while( mvd < 9 && get_cabac( &h->cabac, &h->cabac_state[ctxbase+ctx] ) ) {
+ mvd++;
+ if( ctx < 6 )
+ ctx++;
+ }
+
+ if( mvd >= 9 ) {
+ int k = 3;
+ while( get_cabac_bypass( &h->cabac ) ) {
+ mvd += 1 << k;
+ k++;
+ if(k>24){
+ av_log(h->s.avctx, AV_LOG_ERROR, "overflow in decode_cabac_mb_mvd\n");
+ return INT_MIN;
+ }
+ }
+ while( k-- ) {
+ if( get_cabac_bypass( &h->cabac ) )
+ mvd += 1 << k;
+ }
+ }
+ return get_cabac_bypass_sign( &h->cabac, -mvd );
+}
+
+static av_always_inline int get_cabac_cbf_ctx( H264Context *h, int cat, int idx, int is_dc ) {
+ int nza, nzb;
+ int ctx = 0;
+
+ if( is_dc ) {
+ if( cat == 0 ) {
+ nza = h->left_cbp&0x100;
+ nzb = h-> top_cbp&0x100;
+ } else {
+ nza = (h->left_cbp>>(6+idx))&0x01;
+ nzb = (h-> top_cbp>>(6+idx))&0x01;
+ }
+ } else {
+ assert(cat == 1 || cat == 2 || cat == 4);
+ nza = h->non_zero_count_cache[scan8[idx] - 1];
+ nzb = h->non_zero_count_cache[scan8[idx] - 8];
+ }
+
+ if( nza > 0 )
+ ctx++;
+
+ if( nzb > 0 )
+ ctx += 2;
+
+ return ctx + 4 * cat;
+}
+
+DECLARE_ASM_CONST(1, uint8_t, last_coeff_flag_offset_8x8)[63] = {
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8
+};
+
+static av_always_inline void decode_cabac_residual_internal( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff, int is_dc ) {
+ static const int significant_coeff_flag_offset[2][6] = {
+ { 105+0, 105+15, 105+29, 105+44, 105+47, 402 },
+ { 277+0, 277+15, 277+29, 277+44, 277+47, 436 }
+ };
+ static const int last_coeff_flag_offset[2][6] = {
+ { 166+0, 166+15, 166+29, 166+44, 166+47, 417 },
+ { 338+0, 338+15, 338+29, 338+44, 338+47, 451 }
+ };
+ static const int coeff_abs_level_m1_offset[6] = {
+ 227+0, 227+10, 227+20, 227+30, 227+39, 426
+ };
+ static const uint8_t significant_coeff_flag_offset_8x8[2][63] = {
+ { 0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5,
+ 4, 4, 4, 4, 3, 3, 6, 7, 7, 7, 8, 9,10, 9, 8, 7,
+ 7, 6,11,12,13,11, 6, 7, 8, 9,14,10, 9, 8, 6,11,
+ 12,13,11, 6, 9,14,10, 9,11,12,13,11,14,10,12 },
+ { 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 7, 7, 8, 4, 5,
+ 6, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,11,12,11,
+ 9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,13,13, 9,
+ 9,10,10, 8,13,13, 9, 9,10,10,14,14,14,14,14 }
+ };
+ /* node ctx: 0..3: abslevel1 (with abslevelgt1 == 0).
+ * 4..7: abslevelgt1 + 3 (and abslevel1 doesn't matter).
+ * map node ctx => cabac ctx for level=1 */
+ static const uint8_t coeff_abs_level1_ctx[8] = { 1, 2, 3, 4, 0, 0, 0, 0 };
+ /* map node ctx => cabac ctx for level>1 */
+ static const uint8_t coeff_abs_levelgt1_ctx[8] = { 5, 5, 5, 5, 6, 7, 8, 9 };
+ static const uint8_t coeff_abs_level_transition[2][8] = {
+ /* update node ctx after decoding a level=1 */
+ { 1, 2, 3, 3, 4, 5, 6, 7 },
+ /* update node ctx after decoding a level>1 */
+ { 4, 4, 4, 4, 5, 6, 7, 7 }
+ };
+
+ int index[64];
+
+ int av_unused last;
+ int coeff_count = 0;
+ int node_ctx = 0;
+
+ uint8_t *significant_coeff_ctx_base;
+ uint8_t *last_coeff_ctx_base;
+ uint8_t *abs_level_m1_ctx_base;
+
+#if !ARCH_X86
+#define CABAC_ON_STACK
+#endif
+#ifdef CABAC_ON_STACK
+#define CC &cc
+ CABACContext cc;
+ cc.range = h->cabac.range;
+ cc.low = h->cabac.low;
+ cc.bytestream= h->cabac.bytestream;
+#else
+#define CC &h->cabac
+#endif
+
+
+ /* cat: 0-> DC 16x16 n = 0
+ * 1-> AC 16x16 n = luma4x4idx
+ * 2-> Luma4x4 n = luma4x4idx
+ * 3-> DC Chroma n = iCbCr
+ * 4-> AC Chroma n = 16 + 4 * iCbCr + chroma4x4idx
+ * 5-> Luma8x8 n = 4 * luma8x8idx
+ */
+
+ /* read coded block flag */
+ if( is_dc || cat != 5 ) {
+ if( get_cabac( CC, &h->cabac_state[85 + get_cabac_cbf_ctx( h, cat, n, is_dc ) ] ) == 0 ) {
+ if( !is_dc )
+ h->non_zero_count_cache[scan8[n]] = 0;
+
+#ifdef CABAC_ON_STACK
+ h->cabac.range = cc.range ;
+ h->cabac.low = cc.low ;
+ h->cabac.bytestream= cc.bytestream;
+#endif
+ return;
+ }
+ }
+
+ significant_coeff_ctx_base = h->cabac_state
+ + significant_coeff_flag_offset[MB_FIELD][cat];
+ last_coeff_ctx_base = h->cabac_state
+ + last_coeff_flag_offset[MB_FIELD][cat];
+ abs_level_m1_ctx_base = h->cabac_state
+ + coeff_abs_level_m1_offset[cat];
+
+ if( !is_dc && cat == 5 ) {
+#define DECODE_SIGNIFICANCE( coefs, sig_off, last_off ) \
+ for(last= 0; last < coefs; last++) { \
+ uint8_t *sig_ctx = significant_coeff_ctx_base + sig_off; \
+ if( get_cabac( CC, sig_ctx )) { \
+ uint8_t *last_ctx = last_coeff_ctx_base + last_off; \
+ index[coeff_count++] = last; \
+ if( get_cabac( CC, last_ctx ) ) { \
+ last= max_coeff; \
+ break; \
+ } \
+ } \
+ }\
+ if( last == max_coeff -1 ) {\
+ index[coeff_count++] = last;\
+ }
+ const uint8_t *sig_off = significant_coeff_flag_offset_8x8[MB_FIELD];
+#if ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS)
+ coeff_count= decode_significance_8x8_x86(CC, significant_coeff_ctx_base, index, sig_off);
+ } else {
+ coeff_count= decode_significance_x86(CC, max_coeff, significant_coeff_ctx_base, index);
+#else
+ DECODE_SIGNIFICANCE( 63, sig_off[last], last_coeff_flag_offset_8x8[last] );
+ } else {
+ DECODE_SIGNIFICANCE( max_coeff - 1, last, last );
+#endif
+ }
+ assert(coeff_count > 0);
+
+ if( is_dc ) {
+ if( cat == 0 )
+ h->cbp_table[h->mb_xy] |= 0x100;
+ else
+ h->cbp_table[h->mb_xy] |= 0x40 << n;
+ } else {
+ if( cat == 5 )
+ fill_rectangle(&h->non_zero_count_cache[scan8[n]], 2, 2, 8, coeff_count, 1);
+ else {
+ assert( cat == 1 || cat == 2 || cat == 4 );
+ h->non_zero_count_cache[scan8[n]] = coeff_count;
+ }
+ }
+
+ do {
+ uint8_t *ctx = coeff_abs_level1_ctx[node_ctx] + abs_level_m1_ctx_base;
+
+ int j= scantable[index[--coeff_count]];
+
+ if( get_cabac( CC, ctx ) == 0 ) {
+ node_ctx = coeff_abs_level_transition[0][node_ctx];
+ if( is_dc ) {
+ block[j] = get_cabac_bypass_sign( CC, -1);
+ }else{
+ block[j] = (get_cabac_bypass_sign( CC, -qmul[j]) + 32) >> 6;
+ }
+ } else {
+ int coeff_abs = 2;
+ ctx = coeff_abs_levelgt1_ctx[node_ctx] + abs_level_m1_ctx_base;
+ node_ctx = coeff_abs_level_transition[1][node_ctx];
+
+ while( coeff_abs < 15 && get_cabac( CC, ctx ) ) {
+ coeff_abs++;
+ }
+
+ if( coeff_abs >= 15 ) {
+ int j = 0;
+ while( get_cabac_bypass( CC ) ) {
+ j++;
+ }
+
+ coeff_abs=1;
+ while( j-- ) {
+ coeff_abs += coeff_abs + get_cabac_bypass( CC );
+ }
+ coeff_abs+= 14;
+ }
+
+ if( is_dc ) {
+ block[j] = get_cabac_bypass_sign( CC, -coeff_abs );
+ }else{
+ block[j] = (get_cabac_bypass_sign( CC, -coeff_abs ) * qmul[j] + 32) >> 6;
+ }
+ }
+ } while( coeff_count );
+#ifdef CABAC_ON_STACK
+ h->cabac.range = cc.range ;
+ h->cabac.low = cc.low ;
+ h->cabac.bytestream= cc.bytestream;
+#endif
+
+}
+
+#if !CONFIG_SMALL
+static void decode_cabac_residual_dc( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff ) {
+ decode_cabac_residual_internal(h, block, cat, n, scantable, qmul, max_coeff, 1);
+}
+
+static void decode_cabac_residual_nondc( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff ) {
+ decode_cabac_residual_internal(h, block, cat, n, scantable, qmul, max_coeff, 0);
+}
+#endif
+
+static void decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff ) {
+#if CONFIG_SMALL
+ decode_cabac_residual_internal(h, block, cat, n, scantable, qmul, max_coeff, cat == 0 || cat == 3);
+#else
+ if( cat == 0 || cat == 3 ) decode_cabac_residual_dc(h, block, cat, n, scantable, qmul, max_coeff);
+ else decode_cabac_residual_nondc(h, block, cat, n, scantable, qmul, max_coeff);
+#endif
+}
+
+static inline void compute_mb_neighbors(H264Context *h)
+{
+ MpegEncContext * const s = &h->s;
+ const int mb_xy = h->mb_xy;
+ h->top_mb_xy = mb_xy - s->mb_stride;
+ h->left_mb_xy[0] = mb_xy - 1;
+ if(FRAME_MBAFF){
+ const int pair_xy = s->mb_x + (s->mb_y & ~1)*s->mb_stride;
+ const int top_pair_xy = pair_xy - s->mb_stride;
+ const int top_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[top_pair_xy]);
+ const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[pair_xy-1]);
+ const int curr_mb_field_flag = MB_FIELD;
+ const int bottom = (s->mb_y & 1);
+
+ if (curr_mb_field_flag && (bottom || top_mb_field_flag)){
+ h->top_mb_xy -= s->mb_stride;
+ }
+ if (!left_mb_field_flag == curr_mb_field_flag) {
+ h->left_mb_xy[0] = pair_xy - 1;
+ }
+ } else if (FIELD_PICTURE) {
+ h->top_mb_xy -= s->mb_stride;
+ }
+ return;
+}
+
+/**
+ * decodes a macroblock
+ * @returns 0 if OK, AC_ERROR / DC_ERROR / MV_ERROR if an error is noticed
+ */
+int ff_h264_decode_mb_cabac(H264Context *h) {
+ MpegEncContext * const s = &h->s;
+ int mb_xy;
+ int mb_type, partition_count, cbp = 0;
+ int dct8x8_allowed= h->pps.transform_8x8_mode;
+
+ mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
+
+ /* ffdshow custom code */
+ #if ENABLE_SLICE_MT_PATCH
+ if(s->avctx->thread_count > 1) {
+ s->dsp.clear_blocks(h->mb);
+ }
+ #endif
+
+ tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
+ if( h->slice_type_nos != FF_I_TYPE ) {
+ int skip;
+ /* a skipped mb needs the aff flag from the following mb */
+ if( FRAME_MBAFF && s->mb_x==0 && (s->mb_y&1)==0 )
+ predict_field_decoding_flag(h);
+ if( FRAME_MBAFF && (s->mb_y&1)==1 && h->prev_mb_skipped )
+ skip = h->next_mb_skipped;
+ else
+ skip = decode_cabac_mb_skip( h, s->mb_x, s->mb_y );
+ /* read skip flags */
+ if( skip ) {
+ if( FRAME_MBAFF && (s->mb_y&1)==0 ){
+ s->current_picture.mb_type[mb_xy] = MB_TYPE_SKIP;
+ h->next_mb_skipped = decode_cabac_mb_skip( h, s->mb_x, s->mb_y+1 );
+ if(!h->next_mb_skipped)
+ h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
+ }
+
+ decode_mb_skip(h);
+
+ h->cbp_table[mb_xy] = 0;
+ h->chroma_pred_mode_table[mb_xy] = 0;
+ h->last_qscale_diff = 0;
+
+ return 0;
+
+ }
+ }
+ if(FRAME_MBAFF){
+ if( (s->mb_y&1) == 0 )
+ h->mb_mbaff =
+ h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
+ }
+
+ h->prev_mb_skipped = 0;
+
+ compute_mb_neighbors(h);
+
+ if( h->slice_type_nos == FF_B_TYPE ) {
+ mb_type = decode_cabac_mb_type_b( h );
+ if( mb_type < 23 ){
+ partition_count= b_mb_type_info[mb_type].partition_count;
+ mb_type= b_mb_type_info[mb_type].type;
+ }else{
+ mb_type -= 23;
+ goto decode_intra_mb;
+ }
+ } else if( h->slice_type_nos == FF_P_TYPE ) {
+ if( get_cabac_noinline( &h->cabac, &h->cabac_state[14] ) == 0 ) {
+ /* P-type */
+ if( get_cabac_noinline( &h->cabac, &h->cabac_state[15] ) == 0 ) {
+ /* P_L0_D16x16, P_8x8 */
+ mb_type= 3 * get_cabac_noinline( &h->cabac, &h->cabac_state[16] );
+ } else {
+ /* P_L0_D8x16, P_L0_D16x8 */
+ mb_type= 2 - get_cabac_noinline( &h->cabac, &h->cabac_state[17] );
+ }
+ partition_count= p_mb_type_info[mb_type].partition_count;
+ mb_type= p_mb_type_info[mb_type].type;
+ } else {
+ mb_type= decode_cabac_intra_mb_type(h, 17, 0);
+ goto decode_intra_mb;
+ }
+ } else {
+ mb_type= decode_cabac_intra_mb_type(h, 3, 1);
+ if(h->slice_type == FF_SI_TYPE && mb_type)
+ mb_type--;
+ assert(h->slice_type_nos == FF_I_TYPE);
+decode_intra_mb:
+ partition_count = 0;
+ cbp= i_mb_type_info[mb_type].cbp;
+ h->intra16x16_pred_mode= i_mb_type_info[mb_type].pred_mode;
+ mb_type= i_mb_type_info[mb_type].type;
+ }
+ if(MB_FIELD)
+ mb_type |= MB_TYPE_INTERLACED;
+
+ h->slice_table[ mb_xy ]= h->slice_num;
+
+ if(IS_INTRA_PCM(mb_type)) {
+ const uint8_t *ptr;
+
+ // We assume these blocks are very rare so we do not optimize it.
+ // FIXME The two following lines get the bitstream position in the cabac
+ // decode, I think it should be done by a function in cabac.h (or cabac.c).
+ ptr= h->cabac.bytestream;
+ if(h->cabac.low&0x1) ptr--;
+ if(CABAC_BITS==16){
+ if(h->cabac.low&0x1FF) ptr--;
+ }
+
+ // The pixels are stored in the same order as levels in h->mb array.
+ memcpy(h->mb, ptr, 256); ptr+=256;
+ if(CHROMA){
+ memcpy(h->mb+128, ptr, 128); ptr+=128;
+ }
+
+ ff_init_cabac_decoder(&h->cabac, ptr, h->cabac.bytestream_end - ptr);
+
+ // All blocks are present
+ h->cbp_table[mb_xy] = 0x1ef;
+ h->chroma_pred_mode_table[mb_xy] = 0;
+ // In deblocking, the quantizer is 0
+ s->current_picture.qscale_table[mb_xy]= 0;
+ // All coeffs are present
+ memset(h->non_zero_count[mb_xy], 16, 32);
+ s->current_picture.mb_type[mb_xy]= mb_type;
+ h->last_qscale_diff = 0;
+ return 0;
+ }
+
+ if(MB_MBAFF){
+ h->ref_count[0] <<= 1;
+ h->ref_count[1] <<= 1;
+ }
+
+ fill_decode_caches(h, mb_type);
+
+ if( IS_INTRA( mb_type ) ) {
+ int i, pred_mode;
+ if( IS_INTRA4x4( mb_type ) ) {
+ if( dct8x8_allowed && decode_cabac_mb_transform_size( h ) ) {
+ mb_type |= MB_TYPE_8x8DCT;
+ for( i = 0; i < 16; i+=4 ) {
+ int pred = pred_intra_mode( h, i );
+ int mode = decode_cabac_mb_intra4x4_pred_mode( h, pred );
+ fill_rectangle( &h->intra4x4_pred_mode_cache[ scan8[i] ], 2, 2, 8, mode, 1 );
+ }
+ } else {
+ for( i = 0; i < 16; i++ ) {
+ int pred = pred_intra_mode( h, i );
+ h->intra4x4_pred_mode_cache[ scan8[i] ] = decode_cabac_mb_intra4x4_pred_mode( h, pred );
+
+ //av_log( s->avctx, AV_LOG_ERROR, "i4x4 pred=%d mode=%d\n", pred, h->intra4x4_pred_mode_cache[ scan8[i] ] );
+ }
+ }
+ ff_h264_write_back_intra_pred_mode(h);
+ if( ff_h264_check_intra4x4_pred_mode(h) < 0 ) return -1;
+ } else {
+ h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode( h, h->intra16x16_pred_mode );
+ if( h->intra16x16_pred_mode < 0 ) return -1;
+ }
+ if(CHROMA){
+ h->chroma_pred_mode_table[mb_xy] =
+ pred_mode = decode_cabac_mb_chroma_pre_mode( h );
+
+ pred_mode= ff_h264_check_intra_pred_mode( h, pred_mode );
+ if( pred_mode < 0 ) return -1;
+ h->chroma_pred_mode= pred_mode;
+ }
+ } else if( partition_count == 4 ) {
+ int i, j, sub_partition_count[4], list, ref[2][4];
+
+ if( h->slice_type_nos == FF_B_TYPE ) {
+ for( i = 0; i < 4; i++ ) {
+ h->sub_mb_type[i] = decode_cabac_b_mb_sub_type( h );
+ sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
+ h->sub_mb_type[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].type;
+ }
+ if( IS_DIRECT(h->sub_mb_type[0] | h->sub_mb_type[1] |
+ h->sub_mb_type[2] | h->sub_mb_type[3]) ) {
+ ff_h264_pred_direct_motion(h, &mb_type);
+ h->ref_cache[0][scan8[4]] =
+ h->ref_cache[1][scan8[4]] =
+ h->ref_cache[0][scan8[12]] =
+ h->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE;
+ if( h->ref_count[0] > 1 || h->ref_count[1] > 1 ) {
+ for( i = 0; i < 4; i++ )
+ if( IS_DIRECT(h->sub_mb_type[i]) )
+ fill_rectangle( &h->direct_cache[scan8[4*i]], 2, 2, 8, 1, 1 );
+ }
+ }
+ } else {
+ for( i = 0; i < 4; i++ ) {
+ h->sub_mb_type[i] = decode_cabac_p_mb_sub_type( h );
+ sub_partition_count[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
+ h->sub_mb_type[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].type;
+ }
+ }
+
+ for( list = 0; list < h->list_count; list++ ) {
+ for( i = 0; i < 4; i++ ) {
+ if(IS_DIRECT(h->sub_mb_type[i])) continue;
+ if(IS_DIR(h->sub_mb_type[i], 0, list)){
+ if( h->ref_count[list] > 1 ){
+ ref[list][i] = decode_cabac_mb_ref( h, list, 4*i );
+ if(ref[list][i] >= (unsigned)h->ref_count[list]){
+ av_log(s->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref[list][i], h->ref_count[list]);
+ return -1;
+ }
+ }else
+ ref[list][i] = 0;
+ } else {
+ ref[list][i] = -1;
+ }
+ h->ref_cache[list][ scan8[4*i]+1 ]=
+ h->ref_cache[list][ scan8[4*i]+8 ]=h->ref_cache[list][ scan8[4*i]+9 ]= ref[list][i];
+ }
+ }
+
+ if(dct8x8_allowed)
+ dct8x8_allowed = get_dct8x8_allowed(h);
+
+ for(list=0; list<h->list_count; list++){
+ for(i=0; i<4; i++){
+ h->ref_cache[list][ scan8[4*i] ]=h->ref_cache[list][ scan8[4*i]+1 ];
+ if(IS_DIRECT(h->sub_mb_type[i])){
+ fill_rectangle(h->mvd_cache[list][scan8[4*i]], 2, 2, 8, 0, 4);
+ continue;
+ }
+
+ if(IS_DIR(h->sub_mb_type[i], 0, list) && !IS_DIRECT(h->sub_mb_type[i])){
+ const int sub_mb_type= h->sub_mb_type[i];
+ const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1;
+ for(j=0; j<sub_partition_count[i]; j++){
+ int mpx, mpy;
+ int mx, my;
+ const int index= 4*i + block_width*j;
+ int16_t (* mv_cache)[2]= &h->mv_cache[list][ scan8[index] ];
+ int16_t (* mvd_cache)[2]= &h->mvd_cache[list][ scan8[index] ];
+ pred_motion(h, index, block_width, list, h->ref_cache[list][ scan8[index] ], &mpx, &mpy);
+
+ mx = mpx + decode_cabac_mb_mvd( h, list, index, 0 );
+ my = mpy + decode_cabac_mb_mvd( h, list, index, 1 );
+ tprintf(s->avctx, "final mv:%d %d\n", mx, my);
+
+ if(IS_SUB_8X8(sub_mb_type)){
+ mv_cache[ 1 ][0]=
+ mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx;
+ mv_cache[ 1 ][1]=
+ mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my;
+
+ mvd_cache[ 1 ][0]=
+ mvd_cache[ 8 ][0]= mvd_cache[ 9 ][0]= mx - mpx;
+ mvd_cache[ 1 ][1]=
+ mvd_cache[ 8 ][1]= mvd_cache[ 9 ][1]= my - mpy;
+ }else if(IS_SUB_8X4(sub_mb_type)){
+ mv_cache[ 1 ][0]= mx;
+ mv_cache[ 1 ][1]= my;
+
+ mvd_cache[ 1 ][0]= mx - mpx;
+ mvd_cache[ 1 ][1]= my - mpy;
+ }else if(IS_SUB_4X8(sub_mb_type)){
+ mv_cache[ 8 ][0]= mx;
+ mv_cache[ 8 ][1]= my;
+
+ mvd_cache[ 8 ][0]= mx - mpx;
+ mvd_cache[ 8 ][1]= my - mpy;
+ }
+ mv_cache[ 0 ][0]= mx;
+ mv_cache[ 0 ][1]= my;
+
+ mvd_cache[ 0 ][0]= mx - mpx;
+ mvd_cache[ 0 ][1]= my - mpy;
+ }
+ }else{
+ uint32_t *p= (uint32_t *)&h->mv_cache[list][ scan8[4*i] ][0];
+ uint32_t *pd= (uint32_t *)&h->mvd_cache[list][ scan8[4*i] ][0];
+ p[0] = p[1] = p[8] = p[9] = 0;
+ pd[0]= pd[1]= pd[8]= pd[9]= 0;
+ }
+ }
+ }
+ } else if( IS_DIRECT(mb_type) ) {
+ ff_h264_pred_direct_motion(h, &mb_type);
+ fill_rectangle(h->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 4);
+ fill_rectangle(h->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 4);
+ dct8x8_allowed &= h->sps.direct_8x8_inference_flag;
+ } else {
+ int list, mx, my, i, mpx, mpy;
+ if(IS_16X16(mb_type)){
+ for(list=0; list<h->list_count; list++){
+ if(IS_DIR(mb_type, 0, list)){
+ int ref;
+ if(h->ref_count[list] > 1){
+ ref= decode_cabac_mb_ref(h, list, 0);
+ if(ref >= (unsigned)h->ref_count[list]){
+ av_log(s->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref, h->ref_count[list]);
+ return -1;
+ }
+ }else
+ ref=0;
+ fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, ref, 1);
+ }else
+ fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, (uint8_t)LIST_NOT_USED, 1); //FIXME factorize and the other fill_rect below too
+ }
+ for(list=0; list<h->list_count; list++){
+ if(IS_DIR(mb_type, 0, list)){
+ pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mpx, &mpy);
+
+ mx = mpx + decode_cabac_mb_mvd( h, list, 0, 0 );
+ my = mpy + decode_cabac_mb_mvd( h, list, 0, 1 );
+ tprintf(s->avctx, "final mv:%d %d\n", mx, my);
+
+ fill_rectangle(h->mvd_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx-mpx,my-mpy), 4);
+ fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4);
+ }else
+ fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, 0, 4);
+ }
+ }
+ else if(IS_16X8(mb_type)){
+ for(list=0; list<h->list_count; list++){
+ for(i=0; i<2; i++){
+ if(IS_DIR(mb_type, i, list)){
+ int ref;
+ if(h->ref_count[list] > 1){
+ ref= decode_cabac_mb_ref( h, list, 8*i );
+ if(ref >= (unsigned)h->ref_count[list]){
+ av_log(s->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref, h->ref_count[list]);
+ return -1;
+ }
+ }else
+ ref=0;
+ fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, ref, 1);
+ }else
+ fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, (LIST_NOT_USED&0xFF), 1);
+ }
+ }
+ for(list=0; list<h->list_count; list++){
+ for(i=0; i<2; i++){
+ if(IS_DIR(mb_type, i, list)){
+ pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mpx, &mpy);
+ mx = mpx + decode_cabac_mb_mvd( h, list, 8*i, 0 );
+ my = mpy + decode_cabac_mb_mvd( h, list, 8*i, 1 );
+ tprintf(s->avctx, "final mv:%d %d\n", mx, my);
+
+ fill_rectangle(h->mvd_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack16to32(mx-mpx,my-mpy), 4);
+ fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack16to32(mx,my), 4);
+ }else{
+ fill_rectangle(h->mvd_cache[list][ scan8[0] + 16*i ], 4, 2, 8, 0, 4);
+ fill_rectangle(h-> mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, 0, 4);
+ }
+ }
+ }
+ }else{
+ assert(IS_8X16(mb_type));
+ for(list=0; list<h->list_count; list++){
+ for(i=0; i<2; i++){
+ if(IS_DIR(mb_type, i, list)){ //FIXME optimize
+ int ref;
+ if(h->ref_count[list] > 1){
+ ref= decode_cabac_mb_ref( h, list, 4*i );
+ if(ref >= (unsigned)h->ref_count[list]){
+ av_log(s->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref, h->ref_count[list]);
+ return -1;
+ }
+ }else
+ ref=0;
+ fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, ref, 1);
+ }else
+ fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, (LIST_NOT_USED&0xFF), 1);
+ }
+ }
+ for(list=0; list<h->list_count; list++){
+ for(i=0; i<2; i++){
+ if(IS_DIR(mb_type, i, list)){
+ pred_8x16_motion(h, i*4, list, h->ref_cache[list][ scan8[0] + 2*i ], &mpx, &mpy);
+ mx = mpx + decode_cabac_mb_mvd( h, list, 4*i, 0 );
+ my = mpy + decode_cabac_mb_mvd( h, list, 4*i, 1 );
+
+ tprintf(s->avctx, "final mv:%d %d\n", mx, my);
+ fill_rectangle(h->mvd_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack16to32(mx-mpx,my-mpy), 4);
+ fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack16to32(mx,my), 4);
+ }else{
+ fill_rectangle(h->mvd_cache[list][ scan8[0] + 2*i ], 2, 4, 8, 0, 4);
+ fill_rectangle(h-> mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, 0, 4);
+ }
+ }
+ }
+ }
+ }
+
+ if( IS_INTER( mb_type ) ) {
+ h->chroma_pred_mode_table[mb_xy] = 0;
+ write_back_motion( h, mb_type );
+ }
+
+ if( !IS_INTRA16x16( mb_type ) ) {
+ cbp = decode_cabac_mb_cbp_luma( h );
+ if(CHROMA)
+ cbp |= decode_cabac_mb_cbp_chroma( h ) << 4;
+ }
+
+ h->cbp_table[mb_xy] = h->cbp = cbp;
+
+ if( dct8x8_allowed && (cbp&15) && !IS_INTRA( mb_type ) ) {
+ if( decode_cabac_mb_transform_size( h ) )
+ mb_type |= MB_TYPE_8x8DCT;
+ }
+ s->current_picture.mb_type[mb_xy]= mb_type;
+
+ if( cbp || IS_INTRA16x16( mb_type ) ) {
+ const uint8_t *scan, *scan8x8, *dc_scan;
+ const uint32_t *qmul;
+ int dqp;
+
+ if(IS_INTERLACED(mb_type)){
+ scan8x8= s->qscale ? h->field_scan8x8 : h->field_scan8x8_q0;
+ scan= s->qscale ? h->field_scan : h->field_scan_q0;
+ dc_scan= luma_dc_field_scan;
+ }else{
+ scan8x8= s->qscale ? h->zigzag_scan8x8 : h->zigzag_scan8x8_q0;
+ scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0;
+ dc_scan= luma_dc_zigzag_scan;
+ }
+
+ h->last_qscale_diff = dqp = decode_cabac_mb_dqp( h );
+ if( dqp == INT_MIN ){
+ av_log(h->s.avctx, AV_LOG_ERROR, "cabac decode of qscale diff failed at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ s->qscale += dqp;
+ if(((unsigned)s->qscale) > 51){
+ if(s->qscale<0) s->qscale+= 52;
+ else s->qscale-= 52;
+ }
+ h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale);
+ h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale);
+
+ if( IS_INTRA16x16( mb_type ) ) {
+ int i;
+ //av_log( s->avctx, AV_LOG_ERROR, "INTRA16x16 DC\n" );
+ decode_cabac_residual( h, h->mb, 0, 0, dc_scan, NULL, 16);
+
+ if( cbp&15 ) {
+ qmul = h->dequant4_coeff[0][s->qscale];
+ for( i = 0; i < 16; i++ ) {
+ //av_log( s->avctx, AV_LOG_ERROR, "INTRA16x16 AC:%d\n", i );
+ decode_cabac_residual(h, h->mb + 16*i, 1, i, scan + 1, qmul, 15);
+ }
+ } else {
+ fill_rectangle(&h->non_zero_count_cache[scan8[0]], 4, 4, 8, 0, 1);
+ }
+ } else {
+ int i8x8, i4x4;
+ for( i8x8 = 0; i8x8 < 4; i8x8++ ) {
+ if( cbp & (1<<i8x8) ) {
+ if( IS_8x8DCT(mb_type) ) {
+ decode_cabac_residual(h, h->mb + 64*i8x8, 5, 4*i8x8,
+ scan8x8, h->dequant8_coeff[IS_INTRA( mb_type ) ? 0:1][s->qscale], 64);
+ } else {
+ qmul = h->dequant4_coeff[IS_INTRA( mb_type ) ? 0:3][s->qscale];
+ for( i4x4 = 0; i4x4 < 4; i4x4++ ) {
+ const int index = 4*i8x8 + i4x4;
+ //av_log( s->avctx, AV_LOG_ERROR, "Luma4x4: %d\n", index );
+//START_TIMER
+ decode_cabac_residual(h, h->mb + 16*index, 2, index, scan, qmul, 16);
+//STOP_TIMER("decode_residual")
+ }
+ }
+ } else {
+ uint8_t * const nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ];
+ nnz[0] = nnz[1] = nnz[8] = nnz[9] = 0;
+ }
+ }
+ }
+
+ if( cbp&0x30 ){
+ int c;
+ for( c = 0; c < 2; c++ ) {
+ //av_log( s->avctx, AV_LOG_ERROR, "INTRA C%d-DC\n",c );
+ decode_cabac_residual(h, h->mb + 256 + 16*4*c, 3, c, chroma_dc_scan, NULL, 4);
+ }
+ }
+
+ if( cbp&0x20 ) {
+ int c, i;
+ for( c = 0; c < 2; c++ ) {
+ qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][h->chroma_qp[c]];
+ for( i = 0; i < 4; i++ ) {
+ const int index = 16 + 4 * c + i;
+ //av_log( s->avctx, AV_LOG_ERROR, "INTRA C%d-AC %d\n",c, index - 16 );
+ decode_cabac_residual(h, h->mb + 16*index, 4, index, scan + 1, qmul, 15);
+ }
+ }
+ } else {
+ uint8_t * const nnz= &h->non_zero_count_cache[0];
+ nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] =
+ nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0;
+ }
+ } else {
+ uint8_t * const nnz= &h->non_zero_count_cache[0];
+ fill_rectangle(&nnz[scan8[0]], 4, 4, 8, 0, 1);
+ nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] =
+ nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0;
+ h->last_qscale_diff = 0;
+ }
+
+ s->current_picture.qscale_table[mb_xy]= s->qscale;
+ write_back_non_zero_count(h);
+
+ if(MB_MBAFF){
+ h->ref_count[0] >>= 1;
+ h->ref_count[1] >>= 1;
+ }
+
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_cavlc.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_cavlc.c
new file mode 100644
index 000000000..d2c4fbc05
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_cavlc.c
@@ -0,0 +1,1037 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... cavlc bitstream decoding
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264_cavlc.c
+ * H.264 / AVC / MPEG4 part10 cavlc bitstream decoding.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#define CABAC 0
+
+#include "internal.h"
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "h264.h"
+#include "h264data.h" // FIXME FIXME FIXME
+#include "h264_mvpred.h"
+#include "golomb.h"
+
+//#undef NDEBUG
+#include <assert.h>
+
+static const uint8_t golomb_to_inter_cbp_gray[16]={
+ 0, 1, 2, 4, 8, 3, 5,10,12,15, 7,11,13,14, 6, 9,
+};
+
+static const uint8_t golomb_to_intra4x4_cbp_gray[16]={
+15, 0, 7,11,13,14, 3, 5,10,12, 1, 2, 4, 8, 6, 9,
+};
+
+static const uint8_t chroma_dc_coeff_token_len[4*5]={
+ 2, 0, 0, 0,
+ 6, 1, 0, 0,
+ 6, 6, 3, 0,
+ 6, 7, 7, 6,
+ 6, 8, 8, 7,
+};
+
+static const uint8_t chroma_dc_coeff_token_bits[4*5]={
+ 1, 0, 0, 0,
+ 7, 1, 0, 0,
+ 4, 6, 1, 0,
+ 3, 3, 2, 5,
+ 2, 3, 2, 0,
+};
+
+static const uint8_t coeff_token_len[4][4*17]={
+{
+ 1, 0, 0, 0,
+ 6, 2, 0, 0, 8, 6, 3, 0, 9, 8, 7, 5, 10, 9, 8, 6,
+ 11,10, 9, 7, 13,11,10, 8, 13,13,11, 9, 13,13,13,10,
+ 14,14,13,11, 14,14,14,13, 15,15,14,14, 15,15,15,14,
+ 16,15,15,15, 16,16,16,15, 16,16,16,16, 16,16,16,16,
+},
+{
+ 2, 0, 0, 0,
+ 6, 2, 0, 0, 6, 5, 3, 0, 7, 6, 6, 4, 8, 6, 6, 4,
+ 8, 7, 7, 5, 9, 8, 8, 6, 11, 9, 9, 6, 11,11,11, 7,
+ 12,11,11, 9, 12,12,12,11, 12,12,12,11, 13,13,13,12,
+ 13,13,13,13, 13,14,13,13, 14,14,14,13, 14,14,14,14,
+},
+{
+ 4, 0, 0, 0,
+ 6, 4, 0, 0, 6, 5, 4, 0, 6, 5, 5, 4, 7, 5, 5, 4,
+ 7, 5, 5, 4, 7, 6, 6, 4, 7, 6, 6, 4, 8, 7, 7, 5,
+ 8, 8, 7, 6, 9, 8, 8, 7, 9, 9, 8, 8, 9, 9, 9, 8,
+ 10, 9, 9, 9, 10,10,10,10, 10,10,10,10, 10,10,10,10,
+},
+{
+ 6, 0, 0, 0,
+ 6, 6, 0, 0, 6, 6, 6, 0, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+}
+};
+
+static const uint8_t coeff_token_bits[4][4*17]={
+{
+ 1, 0, 0, 0,
+ 5, 1, 0, 0, 7, 4, 1, 0, 7, 6, 5, 3, 7, 6, 5, 3,
+ 7, 6, 5, 4, 15, 6, 5, 4, 11,14, 5, 4, 8,10,13, 4,
+ 15,14, 9, 4, 11,10,13,12, 15,14, 9,12, 11,10,13, 8,
+ 15, 1, 9,12, 11,14,13, 8, 7,10, 9,12, 4, 6, 5, 8,
+},
+{
+ 3, 0, 0, 0,
+ 11, 2, 0, 0, 7, 7, 3, 0, 7,10, 9, 5, 7, 6, 5, 4,
+ 4, 6, 5, 6, 7, 6, 5, 8, 15, 6, 5, 4, 11,14,13, 4,
+ 15,10, 9, 4, 11,14,13,12, 8,10, 9, 8, 15,14,13,12,
+ 11,10, 9,12, 7,11, 6, 8, 9, 8,10, 1, 7, 6, 5, 4,
+},
+{
+ 15, 0, 0, 0,
+ 15,14, 0, 0, 11,15,13, 0, 8,12,14,12, 15,10,11,11,
+ 11, 8, 9,10, 9,14,13, 9, 8,10, 9, 8, 15,14,13,13,
+ 11,14,10,12, 15,10,13,12, 11,14, 9,12, 8,10,13, 8,
+ 13, 7, 9,12, 9,12,11,10, 5, 8, 7, 6, 1, 4, 3, 2,
+},
+{
+ 3, 0, 0, 0,
+ 0, 1, 0, 0, 4, 5, 6, 0, 8, 9,10,11, 12,13,14,15,
+ 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31,
+ 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47,
+ 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63,
+}
+};
+
+static const uint8_t total_zeros_len[16][16]= {
+ {1,3,3,4,4,5,5,6,6,7,7,8,8,9,9,9},
+ {3,3,3,3,3,4,4,4,4,5,5,6,6,6,6},
+ {4,3,3,3,4,4,3,3,4,5,5,6,5,6},
+ {5,3,4,4,3,3,3,4,3,4,5,5,5},
+ {4,4,4,3,3,3,3,3,4,5,4,5},
+ {6,5,3,3,3,3,3,3,4,3,6},
+ {6,5,3,3,3,2,3,4,3,6},
+ {6,4,5,3,2,2,3,3,6},
+ {6,6,4,2,2,3,2,5},
+ {5,5,3,2,2,2,4},
+ {4,4,3,3,1,3},
+ {4,4,2,1,3},
+ {3,3,1,2},
+ {2,2,1},
+ {1,1},
+};
+
+static const uint8_t total_zeros_bits[16][16]= {
+ {1,3,2,3,2,3,2,3,2,3,2,3,2,3,2,1},
+ {7,6,5,4,3,5,4,3,2,3,2,3,2,1,0},
+ {5,7,6,5,4,3,4,3,2,3,2,1,1,0},
+ {3,7,5,4,6,5,4,3,3,2,2,1,0},
+ {5,4,3,7,6,5,4,3,2,1,1,0},
+ {1,1,7,6,5,4,3,2,1,1,0},
+ {1,1,5,4,3,3,2,1,1,0},
+ {1,1,1,3,3,2,2,1,0},
+ {1,0,1,3,2,1,1,1},
+ {1,0,1,3,2,1,1},
+ {0,1,1,2,1,3},
+ {0,1,1,1,1},
+ {0,1,1,1},
+ {0,1,1},
+ {0,1},
+};
+
+static const uint8_t chroma_dc_total_zeros_len[3][4]= {
+ { 1, 2, 3, 3,},
+ { 1, 2, 2, 0,},
+ { 1, 1, 0, 0,},
+};
+
+static const uint8_t chroma_dc_total_zeros_bits[3][4]= {
+ { 1, 1, 1, 0,},
+ { 1, 1, 0, 0,},
+ { 1, 0, 0, 0,},
+};
+
+static const uint8_t run_len[7][16]={
+ {1,1},
+ {1,2,2},
+ {2,2,2,2},
+ {2,2,2,3,3},
+ {2,2,3,3,3,3},
+ {2,3,3,3,3,3,3},
+ {3,3,3,3,3,3,3,4,5,6,7,8,9,10,11},
+};
+
+static const uint8_t run_bits[7][16]={
+ {1,0},
+ {1,1,0},
+ {3,2,1,0},
+ {3,2,1,1,0},
+ {3,2,3,2,1,0},
+ {3,0,1,3,2,5,4},
+ {7,6,5,4,3,2,1,1,1,1,1,1,1,1,1},
+};
+
+static VLC coeff_token_vlc[4];
+static VLC_TYPE coeff_token_vlc_tables[520+332+280+256][2];
+static const int coeff_token_vlc_tables_size[4]={520,332,280,256};
+
+static VLC chroma_dc_coeff_token_vlc;
+static VLC_TYPE chroma_dc_coeff_token_vlc_table[256][2];
+static const int chroma_dc_coeff_token_vlc_table_size = 256;
+
+static VLC total_zeros_vlc[15];
+static VLC_TYPE total_zeros_vlc_tables[15][512][2];
+static const int total_zeros_vlc_tables_size = 512;
+
+static VLC chroma_dc_total_zeros_vlc[3];
+static VLC_TYPE chroma_dc_total_zeros_vlc_tables[3][8][2];
+static const int chroma_dc_total_zeros_vlc_tables_size = 8;
+
+static VLC run_vlc[6];
+static VLC_TYPE run_vlc_tables[6][8][2];
+static const int run_vlc_tables_size = 8;
+
+static VLC run7_vlc;
+static VLC_TYPE run7_vlc_table[96][2];
+static const int run7_vlc_table_size = 96;
+
+#define LEVEL_TAB_BITS 8
+static int8_t cavlc_level_tab[7][1<<LEVEL_TAB_BITS][2];
+
+
+/**
+ * gets the predicted number of non-zero coefficients.
+ * @param n block index
+ */
+static inline int pred_non_zero_count(H264Context *h, int n){
+ const int index8= scan8[n];
+ const int left= h->non_zero_count_cache[index8 - 1];
+ const int top = h->non_zero_count_cache[index8 - 8];
+ int i= left + top;
+
+ if(i<64) i= (i+1)>>1;
+
+ tprintf(h->s.avctx, "pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31);
+
+ return i&31;
+}
+
+static av_cold void init_cavlc_level_tab(void){
+ int suffix_length, mask;
+ unsigned int i;
+
+ for(suffix_length=0; suffix_length<7; suffix_length++){
+ for(i=0; i<(1<<LEVEL_TAB_BITS); i++){
+ int prefix= LEVEL_TAB_BITS - av_log2(2*i);
+ int level_code= (prefix<<suffix_length) + (i>>(LEVEL_TAB_BITS-prefix-1-suffix_length)) - (1<<suffix_length);
+
+ mask= -(level_code&1);
+ level_code= (((2+level_code)>>1) ^ mask) - mask;
+ if(prefix + 1 + suffix_length <= LEVEL_TAB_BITS){
+ cavlc_level_tab[suffix_length][i][0]= level_code;
+ cavlc_level_tab[suffix_length][i][1]= prefix + 1 + suffix_length;
+ }else if(prefix + 1 <= LEVEL_TAB_BITS){
+ cavlc_level_tab[suffix_length][i][0]= prefix+100;
+ cavlc_level_tab[suffix_length][i][1]= prefix + 1;
+ }else{
+ cavlc_level_tab[suffix_length][i][0]= LEVEL_TAB_BITS+100;
+ cavlc_level_tab[suffix_length][i][1]= LEVEL_TAB_BITS;
+ }
+ }
+ }
+}
+
+av_cold void ff_h264_decode_init_vlc(void){
+ static int done = 0;
+
+ if (!done) {
+ int i;
+ int offset;
+ done = 1;
+
+ chroma_dc_coeff_token_vlc.table = chroma_dc_coeff_token_vlc_table;
+ chroma_dc_coeff_token_vlc.table_allocated = chroma_dc_coeff_token_vlc_table_size;
+ init_vlc(&chroma_dc_coeff_token_vlc, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 4*5,
+ &chroma_dc_coeff_token_len [0], 1, 1,
+ &chroma_dc_coeff_token_bits[0], 1, 1,
+ INIT_VLC_USE_NEW_STATIC);
+
+ offset = 0;
+ for(i=0; i<4; i++){
+ coeff_token_vlc[i].table = coeff_token_vlc_tables+offset;
+ coeff_token_vlc[i].table_allocated = coeff_token_vlc_tables_size[i];
+ init_vlc(&coeff_token_vlc[i], COEFF_TOKEN_VLC_BITS, 4*17,
+ &coeff_token_len [i][0], 1, 1,
+ &coeff_token_bits[i][0], 1, 1,
+ INIT_VLC_USE_NEW_STATIC);
+ offset += coeff_token_vlc_tables_size[i];
+ }
+ /*
+ * This is a one time safety check to make sure that
+ * the packed static coeff_token_vlc table sizes
+ * were initialized correctly.
+ */
+ assert(offset == FF_ARRAY_ELEMS(coeff_token_vlc_tables));
+
+ for(i=0; i<3; i++){
+ chroma_dc_total_zeros_vlc[i].table = chroma_dc_total_zeros_vlc_tables[i];
+ chroma_dc_total_zeros_vlc[i].table_allocated = chroma_dc_total_zeros_vlc_tables_size;
+ init_vlc(&chroma_dc_total_zeros_vlc[i],
+ CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 4,
+ &chroma_dc_total_zeros_len [i][0], 1, 1,
+ &chroma_dc_total_zeros_bits[i][0], 1, 1,
+ INIT_VLC_USE_NEW_STATIC);
+ }
+ for(i=0; i<15; i++){
+ total_zeros_vlc[i].table = total_zeros_vlc_tables[i];
+ total_zeros_vlc[i].table_allocated = total_zeros_vlc_tables_size;
+ init_vlc(&total_zeros_vlc[i],
+ TOTAL_ZEROS_VLC_BITS, 16,
+ &total_zeros_len [i][0], 1, 1,
+ &total_zeros_bits[i][0], 1, 1,
+ INIT_VLC_USE_NEW_STATIC);
+ }
+
+ for(i=0; i<6; i++){
+ run_vlc[i].table = run_vlc_tables[i];
+ run_vlc[i].table_allocated = run_vlc_tables_size;
+ init_vlc(&run_vlc[i],
+ RUN_VLC_BITS, 7,
+ &run_len [i][0], 1, 1,
+ &run_bits[i][0], 1, 1,
+ INIT_VLC_USE_NEW_STATIC);
+ }
+ run7_vlc.table = run7_vlc_table,
+ run7_vlc.table_allocated = run7_vlc_table_size;
+ init_vlc(&run7_vlc, RUN7_VLC_BITS, 16,
+ &run_len [6][0], 1, 1,
+ &run_bits[6][0], 1, 1,
+ INIT_VLC_USE_NEW_STATIC);
+
+ init_cavlc_level_tab();
+ }
+}
+
+/**
+ *
+ */
+static inline int get_level_prefix(GetBitContext *gb){
+ unsigned int buf;
+ int log;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf=GET_CACHE(re, gb);
+
+ log= 32 - av_log2(buf);
+#ifdef TRACE
+ print_bin(buf>>(32-log), log);
+ av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d lpr @%5d in %s get_level_prefix\n", buf>>(32-log), log, log-1, get_bits_count(gb), __FILE__);
+#endif
+
+ LAST_SKIP_BITS(re, gb, log);
+ CLOSE_READER(re, gb);
+
+ return log-1;
+}
+
+/**
+ * decodes a residual block.
+ * @param n block index
+ * @param scantable scantable
+ * @param max_coeff number of coefficients in the block
+ * @return <0 if an error occurred
+ */
+static int decode_residual(H264Context *h, GetBitContext *gb, DCTELEM *block, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff){
+ MpegEncContext * const s = &h->s;
+ static const int coeff_token_table_index[17]= {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3};
+ int level[16];
+ int zeros_left, coeff_num, coeff_token, total_coeff, i, j, trailing_ones, run_before;
+
+ //FIXME put trailing_onex into the context
+
+ if(n == CHROMA_DC_BLOCK_INDEX){
+ coeff_token= get_vlc2(gb, chroma_dc_coeff_token_vlc.table, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 1);
+ total_coeff= coeff_token>>2;
+ }else{
+ if(n == LUMA_DC_BLOCK_INDEX){
+ total_coeff= pred_non_zero_count(h, 0);
+ coeff_token= get_vlc2(gb, coeff_token_vlc[ coeff_token_table_index[total_coeff] ].table, COEFF_TOKEN_VLC_BITS, 2);
+ total_coeff= coeff_token>>2;
+ }else{
+ total_coeff= pred_non_zero_count(h, n);
+ coeff_token= get_vlc2(gb, coeff_token_vlc[ coeff_token_table_index[total_coeff] ].table, COEFF_TOKEN_VLC_BITS, 2);
+ total_coeff= coeff_token>>2;
+ h->non_zero_count_cache[ scan8[n] ]= total_coeff;
+ }
+ }
+
+ //FIXME set last_non_zero?
+
+ if(total_coeff==0)
+ return 0;
+ if(total_coeff > (unsigned)max_coeff) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "corrupted macroblock %d %d (total_coeff=%d)\n", s->mb_x, s->mb_y, total_coeff);
+ return -1;
+ }
+
+ trailing_ones= coeff_token&3;
+ tprintf(h->s.avctx, "trailing:%d, total:%d\n", trailing_ones, total_coeff);
+ assert(total_coeff<=16);
+
+ i = show_bits(gb, 3);
+ skip_bits(gb, trailing_ones);
+ level[0] = 1-((i&4)>>1);
+ level[1] = 1-((i&2) );
+ level[2] = 1-((i&1)<<1);
+
+ if(trailing_ones<total_coeff) {
+ int mask, prefix;
+ int suffix_length = total_coeff > 10 & trailing_ones < 3;
+ int bitsi= show_bits(gb, LEVEL_TAB_BITS);
+ int level_code= cavlc_level_tab[suffix_length][bitsi][0];
+
+ skip_bits(gb, cavlc_level_tab[suffix_length][bitsi][1]);
+ if(level_code >= 100){
+ prefix= level_code - 100;
+ if(prefix == LEVEL_TAB_BITS)
+ prefix += get_level_prefix(gb);
+
+ //first coefficient has suffix_length equal to 0 or 1
+ if(prefix<14){ //FIXME try to build a large unified VLC table for all this
+ if(suffix_length)
+ level_code= (prefix<<1) + get_bits1(gb); //part
+ else
+ level_code= prefix; //part
+ }else if(prefix==14){
+ if(suffix_length)
+ level_code= (prefix<<1) + get_bits1(gb); //part
+ else
+ level_code= prefix + get_bits(gb, 4); //part
+ }else{
+ level_code= 30 + get_bits(gb, prefix-3); //part
+ if(prefix>=16)
+ level_code += (1<<(prefix-3))-4096;
+ }
+
+ if(trailing_ones < 3) level_code += 2;
+
+ suffix_length = 2;
+ mask= -(level_code&1);
+ level[trailing_ones]= (((2+level_code)>>1) ^ mask) - mask;
+ }else{
+ level_code += ((level_code>>31)|1) & -(trailing_ones < 3);
+
+ suffix_length = 1 + (level_code + 3U > 6U);
+ level[trailing_ones]= level_code;
+ }
+
+ //remaining coefficients have suffix_length > 0
+ for(i=trailing_ones+1;i<total_coeff;i++) {
+ static const unsigned int suffix_limit[7] = {0,3,6,12,24,48,INT_MAX };
+ int bitsi= show_bits(gb, LEVEL_TAB_BITS);
+ level_code= cavlc_level_tab[suffix_length][bitsi][0];
+
+ skip_bits(gb, cavlc_level_tab[suffix_length][bitsi][1]);
+ if(level_code >= 100){
+ prefix= level_code - 100;
+ if(prefix == LEVEL_TAB_BITS){
+ prefix += get_level_prefix(gb);
+ }
+ if(prefix<15){
+ level_code = (prefix<<suffix_length) + get_bits(gb, suffix_length);
+ }else{
+ level_code = (15<<suffix_length) + get_bits(gb, prefix-3);
+ if(prefix>=16)
+ level_code += (1<<(prefix-3))-4096;
+ }
+ mask= -(level_code&1);
+ level_code= (((2+level_code)>>1) ^ mask) - mask;
+ }
+ level[i]= level_code;
+ suffix_length+= suffix_limit[suffix_length] + level_code > 2U*suffix_limit[suffix_length];
+ }
+ }
+
+ if(total_coeff == max_coeff)
+ zeros_left=0;
+ else{
+ if(n == CHROMA_DC_BLOCK_INDEX)
+ zeros_left= get_vlc2(gb, (chroma_dc_total_zeros_vlc-1)[ total_coeff ].table, CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 1);
+ else
+ zeros_left= get_vlc2(gb, (total_zeros_vlc-1)[ total_coeff ].table, TOTAL_ZEROS_VLC_BITS, 1);
+ }
+
+ coeff_num = zeros_left + total_coeff - 1;
+ j = scantable[coeff_num];
+ if(n > 24){
+ block[j] = level[0];
+ for(i=1;i<total_coeff;i++) {
+ if(zeros_left <= 0)
+ run_before = 0;
+ else if(zeros_left < 7){
+ run_before= get_vlc2(gb, (run_vlc-1)[zeros_left].table, RUN_VLC_BITS, 1);
+ }else{
+ run_before= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2);
+ }
+ zeros_left -= run_before;
+ coeff_num -= 1 + run_before;
+ j= scantable[ coeff_num ];
+
+ block[j]= level[i];
+ }
+ }else{
+ block[j] = (level[0] * qmul[j] + 32)>>6;
+ for(i=1;i<total_coeff;i++) {
+ if(zeros_left <= 0)
+ run_before = 0;
+ else if(zeros_left < 7){
+ run_before= get_vlc2(gb, (run_vlc-1)[zeros_left].table, RUN_VLC_BITS, 1);
+ }else{
+ run_before= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2);
+ }
+ zeros_left -= run_before;
+ coeff_num -= 1 + run_before;
+ j= scantable[ coeff_num ];
+
+ block[j]= (level[i] * qmul[j] + 32)>>6;
+ }
+ }
+
+ if(zeros_left<0){
+ av_log(h->s.avctx, AV_LOG_ERROR, "negative number of zero coeffs at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ return 0;
+}
+
+int ff_h264_decode_mb_cavlc(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ int mb_xy;
+ int partition_count;
+ unsigned int mb_type, cbp;
+ int dct8x8_allowed= h->pps.transform_8x8_mode;
+
+ mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
+
+ /* ffdshow custom code */
+ #if ENABLE_SLICE_MT_PATCH
+ if(s->avctx->thread_count > 1) {
+ s->dsp.clear_blocks(h->mb);
+ }
+ #endif
+
+ tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
+ cbp = 0; /* avoid warning. FIXME: find a solution without slowing
+ down the code */
+ if(h->slice_type_nos != FF_I_TYPE){
+ if(s->mb_skip_run==-1)
+ s->mb_skip_run= get_ue_golomb(&s->gb);
+
+ if (s->mb_skip_run--) {
+ if(FRAME_MBAFF && (s->mb_y&1) == 0){
+ if(s->mb_skip_run==0)
+ h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb);
+ else
+ predict_field_decoding_flag(h);
+ }
+ decode_mb_skip(h);
+ return 0;
+ }
+ }
+ if(FRAME_MBAFF){
+ if( (s->mb_y&1) == 0 )
+ h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb);
+ }
+
+ h->prev_mb_skipped= 0;
+
+ mb_type= get_ue_golomb(&s->gb);
+ if(h->slice_type_nos == FF_B_TYPE){
+ if(mb_type < 23){
+ partition_count= b_mb_type_info[mb_type].partition_count;
+ mb_type= b_mb_type_info[mb_type].type;
+ }else{
+ mb_type -= 23;
+ goto decode_intra_mb;
+ }
+ }else if(h->slice_type_nos == FF_P_TYPE){
+ if(mb_type < 5){
+ partition_count= p_mb_type_info[mb_type].partition_count;
+ mb_type= p_mb_type_info[mb_type].type;
+ }else{
+ mb_type -= 5;
+ goto decode_intra_mb;
+ }
+ }else{
+ assert(h->slice_type_nos == FF_I_TYPE);
+ if(h->slice_type == FF_SI_TYPE && mb_type)
+ mb_type--;
+decode_intra_mb:
+ if(mb_type > 25){
+ av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y);
+ return -1;
+ }
+ partition_count=0;
+ cbp= i_mb_type_info[mb_type].cbp;
+ h->intra16x16_pred_mode= i_mb_type_info[mb_type].pred_mode;
+ mb_type= i_mb_type_info[mb_type].type;
+ }
+
+ if(MB_FIELD)
+ mb_type |= MB_TYPE_INTERLACED;
+
+ h->slice_table[ mb_xy ]= h->slice_num;
+
+ if(IS_INTRA_PCM(mb_type)){
+ unsigned int x;
+
+ // We assume these blocks are very rare so we do not optimize it.
+ align_get_bits(&s->gb);
+
+ // The pixels are stored in the same order as levels in h->mb array.
+ for(x=0; x < (CHROMA ? 384 : 256); x++){
+ ((uint8_t*)h->mb)[x]= get_bits(&s->gb, 8);
+ }
+
+ // In deblocking, the quantizer is 0
+ s->current_picture.qscale_table[mb_xy]= 0;
+ // All coeffs are present
+ memset(h->non_zero_count[mb_xy], 16, 32);
+
+ s->current_picture.mb_type[mb_xy]= mb_type;
+ return 0;
+ }
+
+ if(MB_MBAFF){
+ h->ref_count[0] <<= 1;
+ h->ref_count[1] <<= 1;
+ }
+
+ fill_decode_caches(h, mb_type);
+
+ //mb_pred
+ if(IS_INTRA(mb_type)){
+ int pred_mode;
+// init_top_left_availability(h);
+ if(IS_INTRA4x4(mb_type)){
+ int i;
+ int di = 1;
+ if(dct8x8_allowed && get_bits1(&s->gb)){
+ mb_type |= MB_TYPE_8x8DCT;
+ di = 4;
+ }
+
+// fill_intra4x4_pred_table(h);
+ for(i=0; i<16; i+=di){
+ int mode= pred_intra_mode(h, i);
+
+ if(!get_bits1(&s->gb)){
+ const int rem_mode= get_bits(&s->gb, 3);
+ mode = rem_mode + (rem_mode >= mode);
+ }
+
+ if(di==4)
+ fill_rectangle( &h->intra4x4_pred_mode_cache[ scan8[i] ], 2, 2, 8, mode, 1 );
+ else
+ h->intra4x4_pred_mode_cache[ scan8[i] ] = mode;
+ }
+ ff_h264_write_back_intra_pred_mode(h);
+ if( ff_h264_check_intra4x4_pred_mode(h) < 0)
+ return -1;
+ }else{
+ h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode(h, h->intra16x16_pred_mode);
+ if(h->intra16x16_pred_mode < 0)
+ return -1;
+ }
+ if(CHROMA){
+ pred_mode= ff_h264_check_intra_pred_mode(h, get_ue_golomb_31(&s->gb));
+ if(pred_mode < 0)
+ return -1;
+ h->chroma_pred_mode= pred_mode;
+ }
+ }else if(partition_count==4){
+ int i, j, sub_partition_count[4], list, ref[2][4];
+
+ if(h->slice_type_nos == FF_B_TYPE){
+ for(i=0; i<4; i++){
+ h->sub_mb_type[i]= get_ue_golomb_31(&s->gb);
+ if(h->sub_mb_type[i] >=13){
+ av_log(h->s.avctx, AV_LOG_ERROR, "B sub_mb_type %u out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y);
+ return -1;
+ }
+ sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
+ h->sub_mb_type[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].type;
+ }
+ if( IS_DIRECT(h->sub_mb_type[0]|h->sub_mb_type[1]|h->sub_mb_type[2]|h->sub_mb_type[3])) {
+ ff_h264_pred_direct_motion(h, &mb_type);
+ h->ref_cache[0][scan8[4]] =
+ h->ref_cache[1][scan8[4]] =
+ h->ref_cache[0][scan8[12]] =
+ h->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE;
+ }
+ }else{
+ assert(h->slice_type_nos == FF_P_TYPE); //FIXME SP correct ?
+ for(i=0; i<4; i++){
+ h->sub_mb_type[i]= get_ue_golomb_31(&s->gb);
+ if(h->sub_mb_type[i] >=4){
+ av_log(h->s.avctx, AV_LOG_ERROR, "P sub_mb_type %u out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y);
+ return -1;
+ }
+ sub_partition_count[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
+ h->sub_mb_type[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].type;
+ }
+ }
+
+ for(list=0; list<h->list_count; list++){
+ int ref_count= IS_REF0(mb_type) ? 1 : h->ref_count[list];
+ for(i=0; i<4; i++){
+ if(IS_DIRECT(h->sub_mb_type[i])) continue;
+ if(IS_DIR(h->sub_mb_type[i], 0, list)){
+ unsigned int tmp;
+ if(ref_count == 1){
+ tmp= 0;
+ }else if(ref_count == 2){
+ tmp= get_bits1(&s->gb)^1;
+ }else{
+ tmp= get_ue_golomb_31(&s->gb);
+ if(tmp>=ref_count){
+ av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", tmp);
+ return -1;
+ }
+ }
+ ref[list][i]= tmp;
+ }else{
+ //FIXME
+ ref[list][i] = -1;
+ }
+ }
+ }
+
+ if(dct8x8_allowed)
+ dct8x8_allowed = get_dct8x8_allowed(h);
+
+ for(list=0; list<h->list_count; list++){
+ for(i=0; i<4; i++){
+ if(IS_DIRECT(h->sub_mb_type[i])) {
+ h->ref_cache[list][ scan8[4*i] ] = h->ref_cache[list][ scan8[4*i]+1 ];
+ continue;
+ }
+ h->ref_cache[list][ scan8[4*i] ]=h->ref_cache[list][ scan8[4*i]+1 ]=
+ h->ref_cache[list][ scan8[4*i]+8 ]=h->ref_cache[list][ scan8[4*i]+9 ]= ref[list][i];
+
+ if(IS_DIR(h->sub_mb_type[i], 0, list)){
+ const int sub_mb_type= h->sub_mb_type[i];
+ const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1;
+ for(j=0; j<sub_partition_count[i]; j++){
+ int mx, my;
+ const int index= 4*i + block_width*j;
+ int16_t (* mv_cache)[2]= &h->mv_cache[list][ scan8[index] ];
+ pred_motion(h, index, block_width, list, h->ref_cache[list][ scan8[index] ], &mx, &my);
+ mx += get_se_golomb(&s->gb);
+ my += get_se_golomb(&s->gb);
+ tprintf(s->avctx, "final mv:%d %d\n", mx, my);
+
+ if(IS_SUB_8X8(sub_mb_type)){
+ mv_cache[ 1 ][0]=
+ mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx;
+ mv_cache[ 1 ][1]=
+ mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my;
+ }else if(IS_SUB_8X4(sub_mb_type)){
+ mv_cache[ 1 ][0]= mx;
+ mv_cache[ 1 ][1]= my;
+ }else if(IS_SUB_4X8(sub_mb_type)){
+ mv_cache[ 8 ][0]= mx;
+ mv_cache[ 8 ][1]= my;
+ }
+ mv_cache[ 0 ][0]= mx;
+ mv_cache[ 0 ][1]= my;
+ }
+ }else{
+ uint32_t *p= (uint32_t *)&h->mv_cache[list][ scan8[4*i] ][0];
+ p[0] = p[1]=
+ p[8] = p[9]= 0;
+ }
+ }
+ }
+ }else if(IS_DIRECT(mb_type)){
+ ff_h264_pred_direct_motion(h, &mb_type);
+ dct8x8_allowed &= h->sps.direct_8x8_inference_flag;
+ }else{
+ int list, mx, my, i;
+ //FIXME we should set ref_idx_l? to 0 if we use that later ...
+ if(IS_16X16(mb_type)){
+ for(list=0; list<h->list_count; list++){
+ unsigned int val;
+ if(IS_DIR(mb_type, 0, list)){
+ if(h->ref_count[list]==1){
+ val= 0;
+ }else if(h->ref_count[list]==2){
+ val= get_bits1(&s->gb)^1;
+ }else{
+ val= get_ue_golomb_31(&s->gb);
+ if(val >= h->ref_count[list]){
+ av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val);
+ return -1;
+ }
+ }
+ }else
+ val= LIST_NOT_USED&0xFF;
+ fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, val, 1);
+ }
+ for(list=0; list<h->list_count; list++){
+ unsigned int val;
+ if(IS_DIR(mb_type, 0, list)){
+ pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mx, &my);
+ mx += get_se_golomb(&s->gb);
+ my += get_se_golomb(&s->gb);
+ tprintf(s->avctx, "final mv:%d %d\n", mx, my);
+
+ val= pack16to32(mx,my);
+ }else
+ val=0;
+ fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, val, 4);
+ }
+ }
+ else if(IS_16X8(mb_type)){
+ for(list=0; list<h->list_count; list++){
+ for(i=0; i<2; i++){
+ unsigned int val;
+ if(IS_DIR(mb_type, i, list)){
+ if(h->ref_count[list] == 1){
+ val= 0;
+ }else if(h->ref_count[list] == 2){
+ val= get_bits1(&s->gb)^1;
+ }else{
+ val= get_ue_golomb_31(&s->gb);
+ if(val >= h->ref_count[list]){
+ av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val);
+ return -1;
+ }
+ }
+ }else
+ val= LIST_NOT_USED&0xFF;
+ fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 1);
+ }
+ }
+ for(list=0; list<h->list_count; list++){
+ for(i=0; i<2; i++){
+ unsigned int val;
+ if(IS_DIR(mb_type, i, list)){
+ pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mx, &my);
+ mx += get_se_golomb(&s->gb);
+ my += get_se_golomb(&s->gb);
+ tprintf(s->avctx, "final mv:%d %d\n", mx, my);
+
+ val= pack16to32(mx,my);
+ }else
+ val=0;
+ fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 4);
+ }
+ }
+ }else{
+ assert(IS_8X16(mb_type));
+ for(list=0; list<h->list_count; list++){
+ for(i=0; i<2; i++){
+ unsigned int val;
+ if(IS_DIR(mb_type, i, list)){ //FIXME optimize
+ if(h->ref_count[list]==1){
+ val= 0;
+ }else if(h->ref_count[list]==2){
+ val= get_bits1(&s->gb)^1;
+ }else{
+ val= get_ue_golomb_31(&s->gb);
+ if(val >= h->ref_count[list]){
+ av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val);
+ return -1;
+ }
+ }
+ }else
+ val= LIST_NOT_USED&0xFF;
+ fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 1);
+ }
+ }
+ for(list=0; list<h->list_count; list++){
+ for(i=0; i<2; i++){
+ unsigned int val;
+ if(IS_DIR(mb_type, i, list)){
+ pred_8x16_motion(h, i*4, list, h->ref_cache[list][ scan8[0] + 2*i ], &mx, &my);
+ mx += get_se_golomb(&s->gb);
+ my += get_se_golomb(&s->gb);
+ tprintf(s->avctx, "final mv:%d %d\n", mx, my);
+
+ val= pack16to32(mx,my);
+ }else
+ val=0;
+ fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 4);
+ }
+ }
+ }
+ }
+
+ if(IS_INTER(mb_type))
+ write_back_motion(h, mb_type);
+
+ if(!IS_INTRA16x16(mb_type)){
+ cbp= get_ue_golomb(&s->gb);
+ if(cbp > 47){
+ av_log(h->s.avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ if(CHROMA){
+ if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp[cbp];
+ else cbp= golomb_to_inter_cbp [cbp];
+ }else{
+ if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp_gray[cbp];
+ else cbp= golomb_to_inter_cbp_gray[cbp];
+ }
+ }
+
+ if(dct8x8_allowed && (cbp&15) && !IS_INTRA(mb_type)){
+ mb_type |= MB_TYPE_8x8DCT*get_bits1(&s->gb);
+ }
+ h->cbp=
+ h->cbp_table[mb_xy]= cbp;
+ s->current_picture.mb_type[mb_xy]= mb_type;
+
+ if(cbp || IS_INTRA16x16(mb_type)){
+ int i8x8, i4x4, chroma_idx;
+ int dquant;
+ GetBitContext *gb= IS_INTRA(mb_type) ? h->intra_gb_ptr : h->inter_gb_ptr;
+ const uint8_t *scan, *scan8x8, *dc_scan;
+
+ if(IS_INTERLACED(mb_type)){
+ scan8x8= s->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0;
+ scan= s->qscale ? h->field_scan : h->field_scan_q0;
+ dc_scan= luma_dc_field_scan;
+ }else{
+ scan8x8= s->qscale ? h->zigzag_scan8x8_cavlc : h->zigzag_scan8x8_cavlc_q0;
+ scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0;
+ dc_scan= luma_dc_zigzag_scan;
+ }
+
+ dquant= get_se_golomb(&s->gb);
+
+ s->qscale += dquant;
+
+ if(((unsigned)s->qscale) > 51){
+ if(s->qscale<0) s->qscale+= 52;
+ else s->qscale-= 52;
+ if(((unsigned)s->qscale) > 51){
+ av_log(h->s.avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %d\n", dquant, s->mb_x, s->mb_y);
+ return -1;
+ }
+ }
+
+ h->chroma_qp[0]= get_chroma_qp(h, 0, s->qscale);
+ h->chroma_qp[1]= get_chroma_qp(h, 1, s->qscale);
+ if(IS_INTRA16x16(mb_type)){
+ if( decode_residual(h, h->intra_gb_ptr, h->mb, LUMA_DC_BLOCK_INDEX, dc_scan, h->dequant4_coeff[0][s->qscale], 16) < 0){
+ return -1; //FIXME continue if partitioned and other return -1 too
+ }
+
+ assert((cbp&15) == 0 || (cbp&15) == 15);
+
+ if(cbp&15){
+ for(i8x8=0; i8x8<4; i8x8++){
+ for(i4x4=0; i4x4<4; i4x4++){
+ const int index= i4x4 + 4*i8x8;
+ if( decode_residual(h, h->intra_gb_ptr, h->mb + 16*index, index, scan + 1, h->dequant4_coeff[0][s->qscale], 15) < 0 ){
+ return -1;
+ }
+ }
+ }
+ }else{
+ fill_rectangle(&h->non_zero_count_cache[scan8[0]], 4, 4, 8, 0, 1);
+ }
+ }else{
+ for(i8x8=0; i8x8<4; i8x8++){
+ if(cbp & (1<<i8x8)){
+ if(IS_8x8DCT(mb_type)){
+ DCTELEM *buf = &h->mb[64*i8x8];
+ uint8_t *nnz;
+ for(i4x4=0; i4x4<4; i4x4++){
+ if( decode_residual(h, gb, buf, i4x4+4*i8x8, scan8x8+16*i4x4,
+ h->dequant8_coeff[IS_INTRA( mb_type ) ? 0:1][s->qscale], 16) <0 )
+ return -1;
+ }
+ nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ];
+ nnz[0] += nnz[1] + nnz[8] + nnz[9];
+ }else{
+ for(i4x4=0; i4x4<4; i4x4++){
+ const int index= i4x4 + 4*i8x8;
+
+ if( decode_residual(h, gb, h->mb + 16*index, index, scan, h->dequant4_coeff[IS_INTRA( mb_type ) ? 0:3][s->qscale], 16) <0 ){
+ return -1;
+ }
+ }
+ }
+ }else{
+ uint8_t * const nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ];
+ nnz[0] = nnz[1] = nnz[8] = nnz[9] = 0;
+ }
+ }
+ }
+
+ if(cbp&0x30){
+ for(chroma_idx=0; chroma_idx<2; chroma_idx++)
+ if( decode_residual(h, gb, h->mb + 256 + 16*4*chroma_idx, CHROMA_DC_BLOCK_INDEX, chroma_dc_scan, NULL, 4) < 0){
+ return -1;
+ }
+ }
+
+ if(cbp&0x20){
+ for(chroma_idx=0; chroma_idx<2; chroma_idx++){
+ const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][h->chroma_qp[chroma_idx]];
+ for(i4x4=0; i4x4<4; i4x4++){
+ const int index= 16 + 4*chroma_idx + i4x4;
+ if( decode_residual(h, gb, h->mb + 16*index, index, scan + 1, qmul, 15) < 0){
+ return -1;
+ }
+ }
+ }
+ }else{
+ uint8_t * const nnz= &h->non_zero_count_cache[0];
+ nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] =
+ nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0;
+ }
+ }else{
+ uint8_t * const nnz= &h->non_zero_count_cache[0];
+ fill_rectangle(&nnz[scan8[0]], 4, 4, 8, 0, 1);
+ nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] =
+ nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0;
+ }
+ s->current_picture.qscale_table[mb_xy]= s->qscale;
+ write_back_non_zero_count(h);
+
+ if(MB_MBAFF){
+ h->ref_count[0] >>= 1;
+ h->ref_count[1] >>= 1;
+ }
+
+ return 0;
+}
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_direct.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_direct.c
new file mode 100644
index 000000000..43b0d0d8a
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_direct.c
@@ -0,0 +1,465 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... direct mb/block decoding
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264_direct.c
+ * H.264 / AVC / MPEG4 part10 direct mb/block decoding.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "internal.h"
+#include "dsputil.h"
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "h264.h"
+#include "h264_mvpred.h"
+#include "rectangle.h"
+
+//#undef NDEBUG
+#include <assert.h>
+
+
+static int get_scale_factor(H264Context * const h, int poc, int poc1, int i){
+ int poc0 = h->ref_list[0][i].poc;
+ int td = av_clip(poc1 - poc0, -128, 127);
+ if(td == 0 || h->ref_list[0][i].long_ref){
+ return 256;
+ }else{
+ int tb = av_clip(poc - poc0, -128, 127);
+ int tx = (16384 + (FFABS(td) >> 1)) / td;
+ return av_clip((tb*tx + 32) >> 6, -1024, 1023);
+ }
+}
+
+void ff_h264_direct_dist_scale_factor(H264Context * const h){
+ MpegEncContext * const s = &h->s;
+ const int poc = h->s.current_picture_ptr->field_poc[ s->picture_structure == PICT_BOTTOM_FIELD ];
+ const int poc1 = h->ref_list[1][0].poc;
+ int i, field;
+ for(field=0; field<2; field++){
+ const int poc = h->s.current_picture_ptr->field_poc[field];
+ const int poc1 = h->ref_list[1][0].field_poc[field];
+ for(i=0; i < 2*h->ref_count[0]; i++)
+ h->dist_scale_factor_field[field][i^field] = get_scale_factor(h, poc, poc1, i+16);
+ }
+
+ for(i=0; i<h->ref_count[0]; i++){
+ h->dist_scale_factor[i] = get_scale_factor(h, poc, poc1, i);
+ }
+}
+
+static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, int colfield, int mbafi){
+ MpegEncContext * const s = &h->s;
+ Picture * const ref1 = &h->ref_list[1][0];
+ int j, old_ref, rfield;
+ int start= mbafi ? 16 : 0;
+ int end = mbafi ? 16+2*h->ref_count[list] : h->ref_count[list];
+ int interl= mbafi || s->picture_structure != PICT_FRAME;
+
+ /* bogus; fills in for missing frames */
+ memset(map[list], 0, sizeof(map[list]));
+
+ for(rfield=0; rfield<2; rfield++){
+ for(old_ref=0; old_ref<ref1->ref_count[colfield][list]; old_ref++){
+ int poc = ref1->ref_poc[colfield][list][old_ref];
+
+ if (!interl)
+ poc |= 3;
+ else if( interl && (poc&3) == 3) //FIXME store all MBAFF references so this isnt needed
+ poc= (poc&~3) + rfield + 1;
+
+ for(j=start; j<end; j++){
+ if(4*h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference&3) == poc){
+ int cur_ref= mbafi ? (j-16)^field : j;
+ map[list][2*old_ref + (rfield^field) + 16] = cur_ref;
+ if(rfield == field)
+ map[list][old_ref] = cur_ref;
+ break;
+ }
+ }
+ }
+ }
+}
+
+void ff_h264_direct_ref_list_init(H264Context * const h){
+ MpegEncContext * const s = &h->s;
+ Picture * const ref1 = &h->ref_list[1][0];
+ Picture * const cur = s->current_picture_ptr;
+ int list, j, field;
+ int sidx= (s->picture_structure&1)^1;
+ int ref1sidx= (ref1->reference&1)^1;
+
+ for(list=0; list<2; list++){
+ cur->ref_count[sidx][list] = h->ref_count[list];
+ for(j=0; j<h->ref_count[list]; j++)
+ cur->ref_poc[sidx][list][j] = 4*h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference&3);
+ }
+
+ if(s->picture_structure == PICT_FRAME){
+ memcpy(cur->ref_count[1], cur->ref_count[0], sizeof(cur->ref_count[0]));
+ memcpy(cur->ref_poc [1], cur->ref_poc [0], sizeof(cur->ref_poc [0]));
+ }
+
+ cur->mbaff= FRAME_MBAFF;
+
+ if(cur->pict_type != FF_B_TYPE || h->direct_spatial_mv_pred)
+ return;
+
+ for(list=0; list<2; list++){
+ fill_colmap(h, h->map_col_to_list0, list, sidx, ref1sidx, 0);
+ for(field=0; field<2; field++)
+ fill_colmap(h, h->map_col_to_list0_field[field], list, field, field, 1);
+ }
+}
+
+void ff_h264_pred_direct_motion(H264Context * const h, int *mb_type){
+ MpegEncContext * const s = &h->s;
+ int b8_stride = h->b8_stride;
+ int b4_stride = h->b_stride;
+ int mb_xy = h->mb_xy;
+ int mb_type_col[2];
+ const int16_t (*l1mv0)[2], (*l1mv1)[2];
+ const int8_t *l1ref0, *l1ref1;
+ const int is_b8x8 = IS_8X8(*mb_type);
+ unsigned int sub_mb_type;
+ int i8, i4;
+
+ assert(h->ref_list[1][0].reference&3);
+
+#define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)
+
+ if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL
+ if(!IS_INTERLACED(*mb_type)){ // AFR/FR -> AFL/FL
+ int cur_poc = s->current_picture_ptr->poc;
+ int *col_poc = h->ref_list[1]->field_poc;
+ int col_parity = FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc);
+ mb_xy= s->mb_x + ((s->mb_y&~1) + col_parity)*s->mb_stride;
+ b8_stride = 0;
+ }else if(!(s->picture_structure & h->ref_list[1][0].reference) && !h->ref_list[1][0].mbaff){// FL -> FL & differ parity
+ int fieldoff= 2*(h->ref_list[1][0].reference)-3;
+ mb_xy += s->mb_stride*fieldoff;
+ }
+ goto single_col;
+ }else{ // AFL/AFR/FR/FL -> AFR/FR
+ if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
+ mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
+ mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
+ mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride];
+ b8_stride *= 3;
+ b4_stride *= 6;
+ //FIXME IS_8X8(mb_type_col[0]) && !h->sps.direct_8x8_inference_flag
+ if( (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
+ && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA)
+ && !is_b8x8){
+ sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
+ *mb_type |= MB_TYPE_16x8 |MB_TYPE_L0L1|MB_TYPE_DIRECT2; /* B_16x8 */
+ }else{
+ sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
+ *mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1;
+ }
+ }else{ // AFR/FR -> AFR/FR
+single_col:
+ mb_type_col[0] =
+ mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
+ if(IS_8X8(mb_type_col[0]) && !h->sps.direct_8x8_inference_flag){
+ /* FIXME save sub mb types from previous frames (or derive from MVs)
+ * so we know exactly what block size to use */
+ sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */
+ *mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1;
+ }else if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
+ sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
+ *mb_type |= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */
+ }else{
+ sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
+ *mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1;
+ }
+ }
+ }
+
+ l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
+ l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
+ l1ref0 = &h->ref_list[1][0].ref_index [0][h->mb2b8_xy[mb_xy]];
+ l1ref1 = &h->ref_list[1][0].ref_index [1][h->mb2b8_xy[mb_xy]];
+ if(!b8_stride){
+ if(s->mb_y&1){
+ l1ref0 += h->b8_stride;
+ l1ref1 += h->b8_stride;
+ l1mv0 += 2*b4_stride;
+ l1mv1 += 2*b4_stride;
+ }
+ }
+
+ if(h->direct_spatial_mv_pred){
+ int ref[2];
+ int mv[2][2];
+ int list;
+
+ /* FIXME interlacing + spatial direct uses wrong colocated block positions */
+
+ /* ref = min(neighbors) */
+ for(list=0; list<2; list++){
+ int refa = h->ref_cache[list][scan8[0] - 1];
+ int refb = h->ref_cache[list][scan8[0] - 8];
+ int refc = h->ref_cache[list][scan8[0] - 8 + 4];
+ if(refc == PART_NOT_AVAILABLE)
+ refc = h->ref_cache[list][scan8[0] - 8 - 1];
+ ref[list] = FFMIN3((unsigned)refa, (unsigned)refb, (unsigned)refc);
+ if(ref[list] < 0)
+ ref[list] = -1;
+ }
+
+ if(ref[0] < 0 && ref[1] < 0){
+ ref[0] = ref[1] = 0;
+ mv[0][0] = mv[0][1] =
+ mv[1][0] = mv[1][1] = 0;
+ }else{
+ for(list=0; list<2; list++){
+ if(ref[list] >= 0)
+ pred_motion(h, 0, 4, list, ref[list], &mv[list][0], &mv[list][1]);
+ else
+ mv[list][0] = mv[list][1] = 0;
+ }
+ }
+
+ if(ref[1] < 0){
+ if(!is_b8x8)
+ *mb_type &= ~MB_TYPE_L1;
+ sub_mb_type &= ~MB_TYPE_L1;
+ }else if(ref[0] < 0){
+ if(!is_b8x8)
+ *mb_type &= ~MB_TYPE_L0;
+ sub_mb_type &= ~MB_TYPE_L0;
+ }
+
+ if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
+ for(i8=0; i8<4; i8++){
+ int x8 = i8&1;
+ int y8 = i8>>1;
+ int xy8 = x8+y8*b8_stride;
+ int xy4 = 3*x8+y8*b4_stride;
+ int a=0, b=0;
+
+ if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
+ continue;
+ h->sub_mb_type[i8] = sub_mb_type;
+
+ fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
+ fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
+ if(!IS_INTRA(mb_type_col[y8])
+ && ( (l1ref0[xy8] == 0 && FFABS(l1mv0[xy4][0]) <= 1 && FFABS(l1mv0[xy4][1]) <= 1)
+ || (l1ref0[xy8] < 0 && l1ref1[xy8] == 0 && FFABS(l1mv1[xy4][0]) <= 1 && FFABS(l1mv1[xy4][1]) <= 1))){
+ if(ref[0] > 0)
+ a= pack16to32(mv[0][0],mv[0][1]);
+ if(ref[1] > 0)
+ b= pack16to32(mv[1][0],mv[1][1]);
+ }else{
+ a= pack16to32(mv[0][0],mv[0][1]);
+ b= pack16to32(mv[1][0],mv[1][1]);
+ }
+ fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, a, 4);
+ fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, b, 4);
+ }
+ }else if(IS_16X16(*mb_type)){
+ int a=0, b=0;
+
+ fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
+ fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
+ if(!IS_INTRA(mb_type_col[0])
+ && ( (l1ref0[0] == 0 && FFABS(l1mv0[0][0]) <= 1 && FFABS(l1mv0[0][1]) <= 1)
+ || (l1ref0[0] < 0 && l1ref1[0] == 0 && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1
+ && (h->x264_build>33 || !h->x264_build)))){
+ if(ref[0] > 0)
+ a= pack16to32(mv[0][0],mv[0][1]);
+ if(ref[1] > 0)
+ b= pack16to32(mv[1][0],mv[1][1]);
+ }else{
+ a= pack16to32(mv[0][0],mv[0][1]);
+ b= pack16to32(mv[1][0],mv[1][1]);
+ }
+ fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, a, 4);
+ fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, b, 4);
+ }else{
+ for(i8=0; i8<4; i8++){
+ const int x8 = i8&1;
+ const int y8 = i8>>1;
+
+ if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
+ continue;
+ h->sub_mb_type[i8] = sub_mb_type;
+
+ fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mv[0][0],mv[0][1]), 4);
+ fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mv[1][0],mv[1][1]), 4);
+ fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
+ fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
+
+ /* col_zero_flag */
+ if(!IS_INTRA(mb_type_col[0]) && ( l1ref0[x8 + y8*b8_stride] == 0
+ || (l1ref0[x8 + y8*b8_stride] < 0 && l1ref1[x8 + y8*b8_stride] == 0
+ && (h->x264_build>33 || !h->x264_build)))){
+ const int16_t (*l1mv)[2]= l1ref0[x8 + y8*b8_stride] == 0 ? l1mv0 : l1mv1;
+ if(IS_SUB_8X8(sub_mb_type)){
+ const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride];
+ if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
+ if(ref[0] == 0)
+ fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
+ if(ref[1] == 0)
+ fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
+ }
+ }else
+ for(i4=0; i4<4; i4++){
+ const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
+ if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
+ if(ref[0] == 0)
+ *(uint32_t*)h->mv_cache[0][scan8[i8*4+i4]] = 0;
+ if(ref[1] == 0)
+ *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = 0;
+ }
+ }
+ }
+ }
+ }
+ }else{ /* direct temporal mv pred */
+ const int *map_col_to_list0[2] = {h->map_col_to_list0[0], h->map_col_to_list0[1]};
+ const int *dist_scale_factor = h->dist_scale_factor;
+ int ref_offset= 0;
+
+ if(FRAME_MBAFF && IS_INTERLACED(*mb_type)){
+ map_col_to_list0[0] = h->map_col_to_list0_field[s->mb_y&1][0];
+ map_col_to_list0[1] = h->map_col_to_list0_field[s->mb_y&1][1];
+ dist_scale_factor =h->dist_scale_factor_field[s->mb_y&1];
+ }
+ if(h->ref_list[1][0].mbaff && IS_INTERLACED(mb_type_col[0]))
+ ref_offset += 16;
+
+ if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
+ /* FIXME assumes direct_8x8_inference == 1 */
+ int y_shift = 2*!IS_INTERLACED(*mb_type);
+
+ for(i8=0; i8<4; i8++){
+ const int x8 = i8&1;
+ const int y8 = i8>>1;
+ int ref0, scale;
+ const int16_t (*l1mv)[2]= l1mv0;
+
+ if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
+ continue;
+ h->sub_mb_type[i8] = sub_mb_type;
+
+ fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
+ if(IS_INTRA(mb_type_col[y8])){
+ fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
+ fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
+ fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
+ continue;
+ }
+
+ ref0 = l1ref0[x8 + y8*b8_stride];
+ if(ref0 >= 0)
+ ref0 = map_col_to_list0[0][ref0 + ref_offset];
+ else{
+ ref0 = map_col_to_list0[1][l1ref1[x8 + y8*b8_stride] + ref_offset];
+ l1mv= l1mv1;
+ }
+ scale = dist_scale_factor[ref0];
+ fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
+
+ {
+ const int16_t *mv_col = l1mv[x8*3 + y8*b4_stride];
+ int my_col = (mv_col[1]<<y_shift)/2;
+ int mx = (scale * mv_col[0] + 128) >> 8;
+ int my = (scale * my_col + 128) >> 8;
+ fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
+ fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-my_col), 4);
+ }
+ }
+ return;
+ }
+
+ /* one-to-one mv scaling */
+
+ if(IS_16X16(*mb_type)){
+ int ref, mv0, mv1;
+
+ fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
+ if(IS_INTRA(mb_type_col[0])){
+ ref=mv0=mv1=0;
+ }else{
+ const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset]
+ : map_col_to_list0[1][l1ref1[0] + ref_offset];
+ const int scale = dist_scale_factor[ref0];
+ const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
+ int mv_l0[2];
+ mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
+ mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
+ ref= ref0;
+ mv0= pack16to32(mv_l0[0],mv_l0[1]);
+ mv1= pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
+ }
+ fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
+ fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
+ fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
+ }else{
+ for(i8=0; i8<4; i8++){
+ const int x8 = i8&1;
+ const int y8 = i8>>1;
+ int ref0, scale;
+ const int16_t (*l1mv)[2]= l1mv0;
+
+ if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
+ continue;
+ h->sub_mb_type[i8] = sub_mb_type;
+ fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
+ if(IS_INTRA(mb_type_col[0])){
+ fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
+ fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
+ fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
+ continue;
+ }
+
+ ref0 = l1ref0[x8 + y8*b8_stride] + ref_offset;
+ if(ref0 >= 0)
+ ref0 = map_col_to_list0[0][ref0];
+ else{
+ ref0 = map_col_to_list0[1][l1ref1[x8 + y8*b8_stride] + ref_offset];
+ l1mv= l1mv1;
+ }
+ scale = dist_scale_factor[ref0];
+
+ fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
+ if(IS_SUB_8X8(sub_mb_type)){
+ const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride];
+ int mx = (scale * mv_col[0] + 128) >> 8;
+ int my = (scale * mv_col[1] + 128) >> 8;
+ fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
+ fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-mv_col[1]), 4);
+ }else
+ for(i4=0; i4<4; i4++){
+ const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
+ int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]];
+ mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
+ mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
+ *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] =
+ pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
+ }
+ }
+ }
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_dxva.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_dxva.c
new file mode 100644
index 000000000..9b73e4a0e
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_dxva.c
@@ -0,0 +1,1102 @@
+/*
+ * This file is part of Media Player Classic HomeCinema.
+ *
+ * MPC-HC is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * MPC-HC is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "h264.h"
+#include "h264data.h"
+#include "h264_parser.h"
+#include "golomb.h"
+
+#include <windows.h>
+#include <dxva.h>
+
+
+static void fill_dxva_slice_long(H264Context *h){
+ MpegEncContext* const s = &h->s;
+ AVCodecContext* const avctx= s->avctx;
+ DXVA_Slice_H264_Long* pSlice = &((DXVA_Slice_H264_Long*) h->dxva_slice_long)[h->current_slice-1];
+ int field_pic_flag;
+ unsigned int i,j,k;
+
+ field_pic_flag = (h->s.picture_structure != PICT_FRAME);
+
+ pSlice->slice_id = h->current_slice-1;
+ pSlice->first_mb_in_slice = h->first_mb_in_slice;
+ pSlice->NumMbsForSlice = 0; // h->s.mb_num; // TODO : to be checked !
+ pSlice->BitOffsetToSliceData = h->bit_offset_to_slice_data;
+ pSlice->slice_type = h->raw_slice_type;
+ pSlice->luma_log2_weight_denom = h->luma_log2_weight_denom;
+ pSlice->chroma_log2_weight_denom = h->chroma_log2_weight_denom;
+ pSlice->num_ref_idx_l0_active_minus1 = h->ref_count[0]-1; // num_ref_idx_l0_active_minus1;
+ pSlice->num_ref_idx_l1_active_minus1 = h->ref_count[1]-1; // num_ref_idx_l1_active_minus1;
+ pSlice->slice_alpha_c0_offset_div2 = h->slice_alpha_c0_offset / 2;
+ pSlice->slice_beta_offset_div2 = h->slice_beta_offset / 2;
+ pSlice->Reserved8Bits = 0;
+
+ // Fill prediction weights
+ memset (pSlice->Weights, 0, sizeof(pSlice->Weights));
+ for(j=0; j<2; j++){
+ for(i=0; i<h->ref_count[j]; i++){
+ // L0&L1 Y,Cb,Cr Weight,Offset
+ // Weights [2] [32] [3] [2]
+ pSlice->Weights[j][i][0][0] = h->luma_weight[j][i];
+ pSlice->Weights[j][i][0][1] = h->luma_offset[j][i];
+
+ for(k=0; k<2; k++){
+ pSlice->Weights[j][i][k+1][0] = h->chroma_weight[j][i][k];
+ pSlice->Weights[j][i][k+1][1] = h->chroma_offset[j][i][k];
+ }
+ }
+ }
+
+ pSlice->slice_qs_delta = h->slice_qs_delta;
+ pSlice->slice_qp_delta = h->slice_qp_delta;
+ pSlice->redundant_pic_cnt = h->redundant_pic_count;
+ pSlice->direct_spatial_mv_pred_flag = h->direct_spatial_mv_pred;
+ pSlice->cabac_init_idc = h->cabac_init_idc;
+ pSlice->disable_deblocking_filter_idc = h->deblocking_filter;
+
+ for(i=0; i<32; i++)
+ { pSlice->RefPicList[0][i].AssociatedFlag = 1;
+ pSlice->RefPicList[0][i].bPicEntry = 255;
+ pSlice->RefPicList[0][i].Index7Bits = 127;
+ pSlice->RefPicList[1][i].AssociatedFlag = 1;
+ pSlice->RefPicList[1][i].bPicEntry = 255;
+ pSlice->RefPicList[1][i].Index7Bits = 127;
+ }
+}
+
+static void field_end_noexecute(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ AVCodecContext * const avctx= s->avctx;
+ s->mb_y= 0;
+
+ s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264;
+ s->current_picture_ptr->pict_type= s->pict_type;
+
+ if(!s->dropable) {
+ ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index);
+ h->prev_poc_msb= h->poc_msb;
+ h->prev_poc_lsb= h->poc_lsb;
+ }
+ h->prev_frame_num_offset= h->frame_num_offset;
+ h->prev_frame_num= h->frame_num;
+
+ MPV_frame_end(s);
+
+ h->current_slice=0;
+}
+
+int decode_slice_header_noexecute (H264Context *h){
+ // ==> Start patch MPC DXVA
+ H264Context *h0 = h;
+ // <== End patch MPC DXVA
+ MpegEncContext * const s = &h->s;
+ MpegEncContext * const s0 = &h0->s;
+ unsigned int pps_id;
+ int num_ref_idx_active_override_flag;
+ unsigned int slice_type, tmp, i, j;
+ int default_ref_list_done = 0;
+ int last_pic_structure;
+
+ s->dropable= h->nal_ref_idc == 0;
+
+ if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !h->nal_ref_idc){
+ s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab;
+ }else{
+ s->me.qpel_put= s->dsp.put_h264_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_h264_qpel_pixels_tab;
+ }
+
+ // ==> Start patch MPC DXVA
+ h->first_mb_in_slice= get_ue_golomb(&s->gb);
+ // <== End patch MPC DXVA
+
+ if(h->first_mb_in_slice == 0){ //FIXME better field boundary detection
+ if(h0->current_slice && FIELD_PICTURE){
+ // ==> Start patch MPC DXVA
+ field_end_noexecute(h);
+ // <== End patch MPC DXVA
+ }
+
+ h0->current_slice = 0;
+ if (!s0->first_field)
+ s->current_picture_ptr= NULL;
+ }
+
+ slice_type= get_ue_golomb_31(&s->gb);
+ if(slice_type > 9){
+ av_log(h->s.avctx, AV_LOG_ERROR, "slice type too large (%d) at %d %d\n", h->slice_type, s->mb_x, s->mb_y);
+ return -1;
+ }
+ if(slice_type > 4){
+ slice_type -= 5;
+ h->slice_type_fixed=1;
+ }else
+ h->slice_type_fixed=0;
+
+ // ==> Start patch MPC DXVA
+ h->raw_slice_type = slice_type;
+ // <== End patch MPC DXVA
+ slice_type= golomb_to_pict_type[ slice_type ];
+ if (slice_type == FF_I_TYPE
+ || (h0->current_slice != 0 && slice_type == h0->last_slice_type) ) {
+ default_ref_list_done = 1;
+ }
+ h->slice_type= slice_type;
+ h->slice_type_nos= slice_type & 3;
+
+ s->pict_type= h->slice_type; // to make a few old functions happy, it's wrong though
+ if (s->pict_type == FF_B_TYPE && s0->last_picture_ptr == NULL) {
+ av_log(h->s.avctx, AV_LOG_ERROR,
+ "B picture before any references, skipping\n");
+ return -1;
+ }
+
+ pps_id= get_ue_golomb(&s->gb);
+ if(pps_id>=MAX_PPS_COUNT){
+ av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n");
+ return -1;
+ }
+ if(!h0->pps_buffers[pps_id]) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", pps_id);
+ return -1;
+ }
+ h->pps= *h0->pps_buffers[pps_id];
+
+ if(!h0->sps_buffers[h->pps.sps_id]) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", h->pps.sps_id);
+ return -1;
+ }
+ h->sps = *h0->sps_buffers[h->pps.sps_id];
+
+ if(h == h0 && h->dequant_coeff_pps != pps_id){
+ h->dequant_coeff_pps = pps_id;
+ init_dequant_tables(h);
+ }
+
+ s->mb_width= h->sps.mb_width;
+ s->mb_height= h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
+
+ h->b_stride= s->mb_width*4;
+ h->b8_stride= s->mb_width*2;
+
+ s->width = 16*s->mb_width - 2*FFMIN(h->sps.crop_right, 7);
+ if(h->sps.frame_mbs_only_flag)
+ s->height= 16*s->mb_height - 2*FFMIN(h->sps.crop_bottom, 7);
+ else
+ s->height= 16*s->mb_height - 4*FFMIN(h->sps.crop_bottom, 3);
+
+ if (s->context_initialized
+ && ( s->width != s->avctx->width || s->height != s->avctx->height)) {
+ if(h != h0)
+ return -1; // width / height changed during parallelized decoding
+ free_tables(h);
+ flush_dpb(s->avctx);
+ MPV_common_end(s);
+ }
+ if (!s->context_initialized) {
+ if(h != h0)
+ return -1; // we cant (re-)initialize context during parallel decoding
+
+ avcodec_set_dimensions(s->avctx, s->width, s->height);
+ s->avctx->sample_aspect_ratio= h->sps.sar;
+ if(!s->avctx->sample_aspect_ratio.den)
+ s->avctx->sample_aspect_ratio.den = 1;
+
+ if(h->sps.video_signal_type_present_flag){
+ s->avctx->color_range = h->sps.full_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
+ if(h->sps.colour_description_present_flag){
+ s->avctx->color_primaries = h->sps.color_primaries;
+ s->avctx->color_trc = h->sps.color_trc;
+ s->avctx->colorspace = h->sps.colorspace;
+ }
+ }
+
+ if(h->sps.timing_info_present_flag){
+ #if __STDC_VERSION__ >= 199901L
+ s->avctx->time_base= (AVRational){h->sps.num_units_in_tick * 2, h->sps.time_scale};
+ #else
+ s->avctx->time_base.num = h->sps.num_units_in_tick * 2;
+ s->avctx->time_base.den = h->sps.time_scale;
+ #endif
+ if(h->x264_build > 0 && h->x264_build < 44)
+ s->avctx->time_base.den *= 2;
+ av_reduce(&s->avctx->time_base.num, &s->avctx->time_base.den,
+ s->avctx->time_base.num, s->avctx->time_base.den, 1<<30);
+ }
+
+ if (MPV_common_init(s) < 0)
+ return -1;
+ s->first_field = 0;
+ h->prev_interlaced_frame = 1;
+
+ init_scan_tables(h);
+ ff_h264_alloc_tables(h);
+
+ for(i = 1; i < s->avctx->thread_count; i++) {
+ H264Context *c;
+ c = h->thread_context[i] = av_malloc(sizeof(H264Context));
+ memcpy(c, h->s.thread_context[i], sizeof(MpegEncContext));
+ memset(&c->s + 1, 0, sizeof(H264Context) - sizeof(MpegEncContext));
+ c->sps = h->sps;
+ c->pps = h->pps;
+ init_scan_tables(c);
+ clone_tables(c, h);
+ }
+
+ for(i = 0; i < s->avctx->thread_count; i++)
+ if(context_init(h->thread_context[i]) < 0)
+ return -1;
+ }
+
+ h->frame_num= get_bits(&s->gb, h->sps.log2_max_frame_num);
+
+ h->mb_mbaff = 0;
+ h->mb_aff_frame = 0;
+ last_pic_structure = s0->picture_structure;
+ if(h->sps.frame_mbs_only_flag){
+ s->picture_structure= PICT_FRAME;
+ }else{
+ if(get_bits1(&s->gb)) { //field_pic_flag
+ s->picture_structure= PICT_TOP_FIELD + get_bits1(&s->gb); //bottom_field_flag
+ } else {
+ s->picture_structure= PICT_FRAME;
+ h->mb_aff_frame = h->sps.mb_aff;
+ }
+ }
+ h->mb_field_decoding_flag= s->picture_structure != PICT_FRAME;
+
+ /* ffdshow custom code begin */
+ //
+ // Workaround Haali's media splitter (http://forum.doom9.org/showthread.php?p=1226434#post1226434)
+ //
+ // Disallow unpaired field referencing just after seeking.
+ // This rule is applied only if h->has_to_drop_first_non_ref == 3.
+ // This is wrong because an unpaired field is allowed to be a reference field.
+ // And that's why this is optional and tried to be minimized.
+ if (h->has_to_drop_first_non_ref == 1 && s->dropable){
+ if (FIELD_PICTURE){
+ h->has_to_drop_first_non_ref = 2;
+ }else{
+ h->has_to_drop_first_non_ref = 0;
+ }
+ } else if (h->has_to_drop_first_non_ref == 2){
+ if (FIELD_PICTURE && !s->dropable)
+ h->has_to_drop_first_non_ref = 3;
+ else if (!FIELD_PICTURE)
+ h->has_to_drop_first_non_ref = 0;
+ } else if (h->has_to_drop_first_non_ref == 3){
+ h->has_to_drop_first_non_ref = 0;
+ }
+ /* ffdshow custom code end */
+
+ if(h0->current_slice == 0){
+ while(h->frame_num != h->prev_frame_num &&
+ h->frame_num != (h->prev_frame_num+1)%(1<<h->sps.log2_max_frame_num)){
+ av_log(NULL, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num);
+ if (ff_h264_frame_start(h) < 0)
+ return -1;
+ h->prev_frame_num++;
+ h->prev_frame_num %= 1<<h->sps.log2_max_frame_num;
+ s->current_picture_ptr->frame_num= h->prev_frame_num;
+ ff_h264_execute_ref_pic_marking(h, NULL, 0);
+ }
+
+ /* See if we have a decoded first field looking for a pair... */
+ if (s0->first_field) {
+ assert(s0->current_picture_ptr);
+ assert(s0->current_picture_ptr->data[0]);
+ assert(s0->current_picture_ptr->reference != DELAYED_PIC_REF);
+
+ /* figure out if we have a complementary field pair */
+ if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) {
+ /*
+ * Previous field is unmatched. Don't display it, but let it
+ * remain for reference if marked as such.
+ */
+ s0->current_picture_ptr = NULL;
+ s0->first_field = FIELD_PICTURE;
+
+ } else {
+ if ((h->nal_ref_idc &&
+ s0->current_picture_ptr->reference &&
+ s0->current_picture_ptr->frame_num != h->frame_num) ||
+ h->has_to_drop_first_non_ref == 3) { /* ffdshow custom code */
+ /*
+ * This and previous field were reference, but had
+ * different frame_nums. Consider this field first in
+ * pair. Throw away previous field except for reference
+ * purposes.
+ */
+ s0->first_field = 1;
+ s0->current_picture_ptr = NULL;
+
+ } else {
+ /* Second field in complementary pair */
+ s0->first_field = 0;
+ }
+ }
+
+ } else {
+ /* Frame or first field in a potentially complementary pair */
+ assert(!s0->current_picture_ptr);
+ s0->first_field = FIELD_PICTURE;
+ }
+
+ if((!FIELD_PICTURE || s0->first_field) && ff_h264_frame_start(h) < 0) {
+ s0->first_field = 0;
+ return -1;
+ }
+ }
+ if(h != h0)
+ clone_slice(h, h0, 0); /* ffdshow custom code */
+
+ s->current_picture_ptr->frame_num= h->frame_num; //FIXME frame_num cleanup
+
+ assert(s->mb_num == s->mb_width * s->mb_height);
+ if(h->first_mb_in_slice << FIELD_OR_MBAFF_PICTURE >= s->mb_num ||
+ h->first_mb_in_slice >= s->mb_num){
+ av_log(h->s.avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
+ return -1;
+ }
+ s->resync_mb_x = s->mb_x = h->first_mb_in_slice % s->mb_width;
+ s->resync_mb_y = s->mb_y = (h->first_mb_in_slice / s->mb_width) << FIELD_OR_MBAFF_PICTURE;
+ if (s->picture_structure == PICT_BOTTOM_FIELD)
+ s->resync_mb_y = s->mb_y = s->mb_y + 1;
+ assert(s->mb_y < s->mb_height);
+
+ if(s->picture_structure==PICT_FRAME){
+ h->curr_pic_num= h->frame_num;
+ h->max_pic_num= 1<< h->sps.log2_max_frame_num;
+ }else{
+ h->curr_pic_num= 2*h->frame_num + 1;
+ h->max_pic_num= 1<<(h->sps.log2_max_frame_num + 1);
+ }
+
+ if(h->nal_unit_type == NAL_IDR_SLICE){
+ get_ue_golomb(&s->gb); /* idr_pic_id */
+ }
+
+ if(h->sps.poc_type==0){
+ h->poc_lsb= get_bits(&s->gb, h->sps.log2_max_poc_lsb);
+
+ if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME){
+ h->delta_poc_bottom= get_se_golomb(&s->gb);
+ }
+ }
+
+ if(h->sps.poc_type==1 && !h->sps.delta_pic_order_always_zero_flag){
+ h->delta_poc[0]= get_se_golomb(&s->gb);
+
+ if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME)
+ h->delta_poc[1]= get_se_golomb(&s->gb);
+ }
+
+ init_poc(h);
+
+ if(h->pps.redundant_pic_cnt_present){
+ h->redundant_pic_count= get_ue_golomb(&s->gb);
+ }
+
+ //set defaults, might be overridden a few lines later
+ h->ref_count[0]= h->pps.ref_count[0];
+ h->ref_count[1]= h->pps.ref_count[1];
+
+ if(h->slice_type_nos != FF_I_TYPE){
+ if(h->slice_type_nos == FF_B_TYPE){
+ h->direct_spatial_mv_pred= get_bits1(&s->gb);
+ }
+ num_ref_idx_active_override_flag= get_bits1(&s->gb);
+
+ if(num_ref_idx_active_override_flag){
+ h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
+ if(h->slice_type_nos==FF_B_TYPE)
+ h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
+
+ if(h->ref_count[0]-1 > 32-1 || h->ref_count[1]-1 > 32-1){
+ av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
+ h->ref_count[0]= h->ref_count[1]= 1;
+ return -1;
+ }
+ }
+ if(h->slice_type_nos == FF_B_TYPE)
+ h->list_count= 2;
+ else
+ h->list_count= 1;
+ }else
+ h->list_count= 0;
+
+ if(!default_ref_list_done){
+ ff_h264_fill_default_ref_list(h);
+ }
+
+ if(h->slice_type_nos!=FF_I_TYPE && ff_h264_decode_ref_pic_list_reordering(h) < 0)
+ return -1;
+
+ if(h->slice_type_nos!=FF_I_TYPE){
+ s->last_picture_ptr= &h->ref_list[0][0];
+ ff_copy_picture(&s->last_picture, s->last_picture_ptr);
+ }
+ if(h->slice_type_nos==FF_B_TYPE){
+ s->next_picture_ptr= &h->ref_list[1][0];
+ ff_copy_picture(&s->next_picture, s->next_picture_ptr);
+ }
+
+ if( (h->pps.weighted_pred && h->slice_type_nos == FF_P_TYPE )
+ || (h->pps.weighted_bipred_idc==1 && h->slice_type_nos== FF_B_TYPE ) )
+ pred_weight_table(h);
+ else if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== FF_B_TYPE)
+ implicit_weight_table(h);
+ else {
+ h->use_weight = 0;
+ for (i = 0; i < 2; i++) {
+ h->luma_weight_flag[i] = 0;
+ h->chroma_weight_flag[i] = 0;
+ }
+ }
+
+ if(h->nal_ref_idc)
+ ff_h264_decode_ref_pic_marking(h0, &s->gb);
+
+ if(FRAME_MBAFF)
+ ff_h264_fill_mbaff_ref_list(h);
+
+ if(h->slice_type_nos==FF_B_TYPE && !h->direct_spatial_mv_pred)
+ ff_h264_direct_dist_scale_factor(h);
+ ff_h264_direct_ref_list_init(h);
+
+ if( h->slice_type_nos != FF_I_TYPE && h->pps.cabac ){
+ tmp = get_ue_golomb_31(&s->gb);
+ if(tmp > 2){
+ av_log(s->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n");
+ return -1;
+ }
+ h->cabac_init_idc= tmp;
+ }
+
+ h->last_qscale_diff = 0;
+ // ==> Start patch MPC
+ h->slice_qp_delta = get_se_golomb(&s->gb);
+ tmp = h->pps.init_qp + h->slice_qp_delta;
+ // <== End patch MPC
+ if(tmp>51){
+ av_log(s->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
+ return -1;
+ }
+ s->qscale= tmp;
+ h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale);
+ h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale);
+ //FIXME qscale / qp ... stuff
+ if(h->slice_type == FF_SP_TYPE){
+ get_bits1(&s->gb); /* sp_for_switch_flag */
+ }
+ if(h->slice_type==FF_SP_TYPE || h->slice_type == FF_SI_TYPE){
+ // ==> Start patch MPC
+ h->slice_qs_delta = get_se_golomb(&s->gb); /* slice_qs_delta */
+ // <== End patch MPC
+ }
+
+ h->deblocking_filter = 1;
+ h->slice_alpha_c0_offset = 52;
+ h->slice_beta_offset = 52;
+ if( h->pps.deblocking_filter_parameters_present ) {
+ tmp= get_ue_golomb_31(&s->gb);
+ if(tmp > 2){
+ av_log(s->avctx, AV_LOG_ERROR, "deblocking_filter_idc %u out of range\n", tmp);
+ return -1;
+ }
+ h->deblocking_filter= tmp;
+ if(h->deblocking_filter < 2)
+ h->deblocking_filter^= 1; // 1<->0
+
+ if( h->deblocking_filter ) {
+ h->slice_alpha_c0_offset += get_se_golomb(&s->gb) << 1;
+ h->slice_beta_offset += get_se_golomb(&s->gb) << 1;
+ if( h->slice_alpha_c0_offset > 104U
+ || h->slice_beta_offset > 104U){
+ av_log(s->avctx, AV_LOG_ERROR, "deblocking filter parameters %d %d out of range\n", h->slice_alpha_c0_offset, h->slice_beta_offset);
+ return -1;
+ }
+ }
+ }
+
+ if( s->avctx->skip_loop_filter >= AVDISCARD_ALL
+ ||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != FF_I_TYPE)
+ ||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type_nos == FF_B_TYPE)
+ ||(s->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0))
+ h->deblocking_filter= 0;
+
+ if(h->deblocking_filter == 1 && h0->max_contexts > 1) {
+ if(s->avctx->flags2 & CODEC_FLAG2_FAST) {
+ /* Cheat slightly for speed:
+ Do not bother to deblock across slices. */
+ h->deblocking_filter = 2;
+ } else {
+ h0->max_contexts = 1;
+ if(!h0->single_decode_warning) {
+ av_log(s->avctx, AV_LOG_INFO, "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n");
+ h0->single_decode_warning = 1;
+ }
+ if(h != h0)
+ return 1; // deblocking switched inside frame
+ }
+ }
+ h->qp_thresh= 15 + 52 - FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) - FFMAX3(0, h->pps.chroma_qp_index_offset[0], h->pps.chroma_qp_index_offset[1]);
+
+#if 0 //FMO
+ if( h->pps.num_slice_groups > 1 && h->pps.mb_slice_group_map_type >= 3 && h->pps.mb_slice_group_map_type <= 5)
+ slice_group_change_cycle= get_bits(&s->gb, ?);
+#endif
+
+ // ==> Start patch MPC
+ // If entropy_coding_mode, align to 8 bits
+ if (h->pps.cabac) align_get_bits( &s->gb );
+
+ h->bit_offset_to_slice_data = s->gb.index;
+ // <== End patch MPC
+
+ h0->last_slice_type = slice_type;
+ h->slice_num = ++h0->current_slice;
+ if(h->slice_num >= MAX_SLICES){
+ av_log(s->avctx, AV_LOG_ERROR, "Too many slices, increase MAX_SLICES and recompile\n");
+ }
+
+ for(j=0; j<2; j++){
+ int id_list[16];
+ int *ref2frm= h->ref2frm[h->slice_num&(MAX_SLICES-1)][j];
+ for(i=0; i<16; i++){
+ id_list[i]= 60;
+ if(h->ref_list[j][i].data[0]){
+ int k;
+ uint8_t *base= h->ref_list[j][i].base[0];
+ for(k=0; k<h->short_ref_count; k++)
+ if(h->short_ref[k]->base[0] == base){
+ id_list[i]= k;
+ break;
+ }
+ for(k=0; k<h->long_ref_count; k++)
+ if(h->long_ref[k] && h->long_ref[k]->base[0] == base){
+ id_list[i]= h->short_ref_count + k;
+ break;
+ }
+ }
+ }
+
+ ref2frm[0]=
+ ref2frm[1]= -1;
+ for(i=0; i<16; i++)
+ ref2frm[i+2]= 4*id_list[i]
+ +(h->ref_list[j][i].reference&3);
+ ref2frm[18+0]=
+ ref2frm[18+1]= -1;
+ for(i=16; i<48; i++)
+ ref2frm[i+4]= 4*id_list[(i-16)>>1]
+ +(h->ref_list[j][i].reference&3);
+ }
+
+ h->emu_edge_width= (s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
+ h->emu_edge_height= (FRAME_MBAFF || FIELD_PICTURE) ? 0 : h->emu_edge_width;
+
+ s->avctx->refs= h->sps.ref_frame_count;
+
+ fill_dxva_slice_long(h);
+
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO){
+ av_log(h->s.avctx, AV_LOG_DEBUG, "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
+ h->slice_num,
+ (s->picture_structure==PICT_FRAME ? "F" : s->picture_structure==PICT_TOP_FIELD ? "T" : "B"),
+ h->first_mb_in_slice,
+ av_get_pict_type_char(h->slice_type), h->slice_type_fixed ? " fix" : "", h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
+ pps_id, h->frame_num,
+ s->current_picture_ptr->field_poc[0], s->current_picture_ptr->field_poc[1],
+ h->ref_count[0], h->ref_count[1],
+ s->qscale,
+ h->deblocking_filter, h->slice_alpha_c0_offset/2, h->slice_beta_offset/2,
+ h->use_weight,
+ h->use_weight==1 && h->use_weight_chroma ? "c" : "",
+ h->slice_type == FF_B_TYPE ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : ""
+ );
+ }
+
+ return 0;
+}
+
+
+static int decode_nal_units_noexecute(H264Context *h, uint8_t *buf, int buf_size){
+ MpegEncContext * const s = &h->s;
+ AVCodecContext * const avctx= s->avctx;
+ int buf_index=0;
+ H264Context *hx; ///< thread context
+ int context_count = 0;
+ int next_avc= h->is_avc ? 0 : buf_size;
+
+ h->max_contexts = avctx->thread_count;
+#if 0
+ int i;
+ for(i=0; i<50; i++){
+ av_log(NULL, AV_LOG_ERROR,"%02X ", buf[i]);
+ }
+#endif
+ if(!(s->flags2 & CODEC_FLAG2_CHUNKS)){
+ h->current_slice = 0;
+ if (!s->first_field)
+ s->current_picture_ptr= NULL;
+ ff_h264_reset_sei(h);
+ }
+
+ for(;;){
+ int consumed;
+ int dst_length;
+ int bit_length;
+ const uint8_t *ptr;
+ int i, nalsize = 0;
+ int err;
+
+ if(buf_index >= next_avc) {
+ if(buf_index >= buf_size) break;
+ nalsize = 0;
+ for(i = 0; i < h->nal_length_size; i++)
+ nalsize = (nalsize << 8) | buf[buf_index++];
+ if(nalsize <= 1 || nalsize > buf_size - buf_index){
+ if(nalsize == 1){
+ buf_index++;
+ continue;
+ }else{
+ av_log(h->s.avctx, AV_LOG_ERROR, "AVC: nal size %d\n", nalsize);
+ break;
+ }
+ }
+ next_avc= buf_index + nalsize;
+ } else {
+ // start code prefix search
+ for(; buf_index + 3 < next_avc; buf_index++){
+ // This should always succeed in the first iteration.
+ if(buf[buf_index] == 0 && buf[buf_index+1] == 0 && buf[buf_index+2] == 1)
+ break;
+ }
+
+ if(buf_index+3 >= buf_size) break;
+
+ buf_index+=3;
+ if(buf_index >= next_avc) continue;
+ }
+
+ hx = h->thread_context[context_count];
+
+ ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, next_avc - buf_index);
+ if (ptr==NULL || dst_length < 0){
+ return -1;
+ }
+ while(ptr[dst_length - 1] == 0 && dst_length > 0)
+ dst_length--;
+ bit_length= !dst_length ? 0 : (8*dst_length - ff_h264_decode_rbsp_trailing(h, ptr + dst_length - 1));
+
+ if(s->avctx->debug&FF_DEBUG_STARTCODE){
+ av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d at %d/%d length %d\n", hx->nal_unit_type, buf_index, buf_size, dst_length);
+ }
+
+ if (h->is_avc && (nalsize != consumed) && nalsize){
+ av_log(h->s.avctx, AV_LOG_DEBUG, "AVC: Consumed only %d bytes instead of %d\n", consumed, nalsize);
+ }
+
+ buf_index += consumed;
+
+ /* ffdshow custom code */
+ if( (s->hurry_up == 1 && hx->nal_ref_idc == 0) //FIXME do not discard SEI id
+ ||(avctx->skip_frame >= AVDISCARD_NONREF && hx->nal_ref_idc == 0))
+ continue;
+
+ again:
+ err = 0;
+ switch(hx->nal_unit_type){
+ case NAL_IDR_SLICE:
+ if (h->nal_unit_type != NAL_IDR_SLICE) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "Invalid mix of idr and non-idr slices");
+ return -1;
+ }
+ idr(h); //FIXME ensure we don't loose some frames if there is reordering
+ case NAL_SLICE:
+ init_get_bits(&hx->s.gb, ptr, bit_length);
+ hx->intra_gb_ptr=
+ hx->inter_gb_ptr= &hx->s.gb;
+ hx->s.data_partitioning = 0;
+
+ // ==> Start patch MPC DXVA
+ hx->ref_pic_flag = (h->nal_ref_idc != 0);
+ if((err = decode_slice_header_noexecute(hx)))
+ break;
+ // <== End patch MPC DXVA
+
+ s->current_picture_ptr->key_frame |=
+ (hx->nal_unit_type == NAL_IDR_SLICE) ||
+ (h->sei_recovery_frame_cnt >= 0);
+ if(hx->redundant_pic_count==0 && hx->s.hurry_up < 5
+ && (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
+ && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=FF_B_TYPE)
+ && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==FF_I_TYPE)
+ && avctx->skip_frame < AVDISCARD_ALL){
+ context_count++;
+ }
+ break;
+ case NAL_DPA:
+ init_get_bits(&hx->s.gb, ptr, bit_length);
+ hx->intra_gb_ptr=
+ hx->inter_gb_ptr= NULL;
+ // ==> Start patch MPC DXVA
+ if ((err = decode_slice_header_noexecute(hx)) < 0)
+ break;
+ // <== End patch MPC DXVA
+ hx->s.data_partitioning = 1;
+
+ break;
+ case NAL_DPB:
+ init_get_bits(&hx->intra_gb, ptr, bit_length);
+ hx->intra_gb_ptr= &hx->intra_gb;
+ break;
+ case NAL_DPC:
+ init_get_bits(&hx->inter_gb, ptr, bit_length);
+ hx->inter_gb_ptr= &hx->inter_gb;
+
+ if(hx->redundant_pic_count==0 && hx->intra_gb_ptr && hx->s.data_partitioning
+ && s->context_initialized
+ && s->hurry_up < 5
+ && (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
+ && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=FF_B_TYPE)
+ && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==FF_I_TYPE)
+ && avctx->skip_frame < AVDISCARD_ALL)
+ context_count++;
+ break;
+ case NAL_SEI:
+ init_get_bits(&s->gb, ptr, bit_length);
+ ff_h264_decode_sei(h);
+ break;
+ case NAL_SPS:
+ init_get_bits(&s->gb, ptr, bit_length);
+ ff_h264_decode_seq_parameter_set(h);
+
+ if(s->flags& CODEC_FLAG_LOW_DELAY)
+ s->low_delay=1;
+
+ if(avctx->has_b_frames < 2)
+ avctx->has_b_frames= !s->low_delay;
+ break;
+ case NAL_PPS:
+ init_get_bits(&s->gb, ptr, bit_length);
+
+ ff_h264_decode_picture_parameter_set(h, bit_length);
+
+ break;
+ case NAL_AUD:
+ case NAL_END_SEQUENCE:
+ case NAL_END_STREAM:
+ case NAL_FILLER_DATA:
+ case NAL_SPS_EXT:
+ case NAL_AUXILIARY_SLICE:
+ break;
+ default:
+ av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n", hx->nal_unit_type, bit_length);
+ }
+
+ if(context_count == h->max_contexts) {
+ // ==> Start patch MPC DXVA
+ // execute_decode_slices(h, context_count);
+ // <== End patch MPC DXVA
+ context_count = 0;
+ }
+
+ if (err < 0)
+ av_log(h->s.avctx, AV_LOG_ERROR, "decode_slice_header error\n");
+ else if(err == 1) {
+ /* Slice could not be decoded in parallel mode, copy down
+ * NAL unit stuff to context 0 and restart. Note that
+ * rbsp_buffer is not transferred, but since we no longer
+ * run in parallel mode this should not be an issue. */
+ h->nal_unit_type = hx->nal_unit_type;
+ h->nal_ref_idc = hx->nal_ref_idc;
+ hx = h;
+ goto again;
+ }
+ }
+ // ==> Start patch MPC DXVA
+ //if(context_count)
+ // execute_decode_slices(h, context_count);
+ // <== End patch MPC DXVA
+ return buf_index;
+}
+
+
+int av_h264_decode_frame(AVCodecContext *avctx,
+ int* nOutPOC, int64_t* rtStartTime,
+ const uint8_t *buf, int buf_size)
+{
+ H264Context *h = avctx->priv_data;
+ MpegEncContext *s = &h->s;
+ int buf_index;
+
+ // ==> Start patch MPC DXVA
+ if (nOutPOC) *nOutPOC = -1;
+ // <== End patch MPC DXVA
+
+ s->flags= avctx->flags;
+ s->flags2= avctx->flags2;
+
+ /* end of stream, output what is still in the buffers */
+ if (buf_size == 0) {
+ Picture *out;
+ int i, out_idx;
+
+//FIXME factorize this with the output code below
+ out = h->delayed_pic[0];
+ out_idx = 0;
+ for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++)
+ if(h->delayed_pic[i]->poc < out->poc){
+ out = h->delayed_pic[i];
+ out_idx = i;
+ }
+
+ for(i=out_idx; h->delayed_pic[i]; i++)
+ h->delayed_pic[i] = h->delayed_pic[i+1];
+
+ if(out){
+ // ==> Start patch MPC DXVA
+ if (nOutPOC) *nOutPOC = out->poc;
+ if (rtStartTime) *rtStartTime = out->reordered_opaque;
+ // <== End patch MPC DXVA
+ }
+
+ return 0;
+ }
+
+ /* ffdshow custom code (begin) */
+ if(h->is_avc && !h->got_avcC) {
+ int i, cnt, nalsize;
+ unsigned char *p = avctx->extradata, *pend=p+avctx->extradata_size;
+
+ h->nal_length_size = 2;
+ cnt = 1;
+
+ for (i = 0; i < cnt; i++) {
+ nalsize = AV_RB16(p) + 2;
+ if(decode_nal_units(h, p, nalsize) != nalsize) {
+ av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed\n", i);
+ return -1;
+ }
+ p += nalsize;
+ }
+ // Decode pps from avcC
+ for (i = 0; p<pend-2; i++) {
+ nalsize = AV_RB16(p) + 2;
+ if(decode_nal_units(h, p, nalsize) != nalsize) {
+ av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed\n", i);
+ return -1;
+ }
+ p += nalsize;
+ }
+ // Now store right nal length size, that will be use to parse all other nals
+ h->nal_length_size = avctx->nal_length_size?avctx->nal_length_size:4;//((*(((char*)(avctx->extradata))+4))&0x03)+1;
+ // Do not reparse avcC
+ h->got_avcC = 1;
+ }
+
+ if(!h->got_avcC && !h->is_avc && s->avctx->extradata_size){
+ if(decode_nal_units(h, s->avctx->extradata, s->avctx->extradata_size) < 0)
+ return -1;
+ h->got_avcC = 1;
+ s->picture_number++;
+ }
+ /* ffdshow custom code (end) */
+
+ // ==> Start patch MPC DXVA
+ buf_index=decode_nal_units_noexecute(h, buf, buf_size);
+ // <== End patch MPC DXVA
+ if(buf_index < 0)
+ return -1;
+
+ if(!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr){
+ if (avctx->skip_frame >= AVDISCARD_NONREF || s->hurry_up) return 0;
+ av_log(avctx, AV_LOG_ERROR, "no frame!\n");
+ return -1;
+ }
+
+ if(!(s->flags2 & CODEC_FLAG2_CHUNKS) || (s->mb_y >= s->mb_height && s->mb_height)){
+ Picture *out = s->current_picture_ptr;
+ Picture *cur = s->current_picture_ptr;
+ int i, pics, out_of_order, out_idx;
+
+ // ==> Start patch MPC DXVA
+ field_end_noexecute(h);
+ // <== End patch MPC DXVA
+
+ if (cur->field_poc[0]==INT_MAX || cur->field_poc[1]==INT_MAX) {
+ /* Wait for second field. */
+ // ==> Start patch MPC DXVA
+ //*data_size = 0;
+ // <== End patch MPC DXVA
+
+ } else {
+ cur->interlaced_frame = 0;
+ cur->repeat_pict = 0;
+
+ /* Signal interlacing information externally. */
+ /* Prioritize picture timing SEI information over used decoding process if it exists. */
+
+ if(h->sps.pic_struct_present_flag){
+ switch (h->sei_pic_struct)
+ {
+ case SEI_PIC_STRUCT_FRAME:
+ break;
+ case SEI_PIC_STRUCT_TOP_FIELD:
+ case SEI_PIC_STRUCT_BOTTOM_FIELD:
+ cur->interlaced_frame = 1;
+ break;
+ case SEI_PIC_STRUCT_TOP_BOTTOM:
+ case SEI_PIC_STRUCT_BOTTOM_TOP:
+ if (FIELD_OR_MBAFF_PICTURE)
+ cur->interlaced_frame = 1;
+ else
+ // try to flag soft telecine progressive
+ cur->interlaced_frame = h->prev_interlaced_frame;
+ break;
+ case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
+ case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
+ // Signal the possibility of telecined film externally (pic_struct 5,6)
+ // From these hints, let the applications decide if they apply deinterlacing.
+ cur->repeat_pict = 1;
+ break;
+ case SEI_PIC_STRUCT_FRAME_DOUBLING:
+ // Force progressive here, as doubling interlaced frame is a bad idea.
+ cur->repeat_pict = 2;
+ break;
+ case SEI_PIC_STRUCT_FRAME_TRIPLING:
+ cur->repeat_pict = 4;
+ break;
+ }
+
+ if ((h->sei_ct_type & 3) && h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP)
+ cur->interlaced_frame = (h->sei_ct_type & (1<<1)) != 0;
+ }else{
+ /* Derive interlacing flag from used decoding process. */
+ cur->interlaced_frame = FIELD_OR_MBAFF_PICTURE;
+ }
+ h->prev_interlaced_frame = cur->interlaced_frame;
+
+ if (cur->field_poc[0] != cur->field_poc[1]){
+ /* Derive top_field_first from field pocs. */
+ cur->top_field_first = cur->field_poc[0] < cur->field_poc[1];
+ }else{
+ if(cur->interlaced_frame || h->sps.pic_struct_present_flag){
+ /* Use picture timing SEI information. Even if it is a information of a past frame, better than nothing. */
+ if(h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM
+ || h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
+ cur->top_field_first = 1;
+ else
+ cur->top_field_first = 0;
+ }else{
+ /* Most likely progressive */
+ cur->top_field_first = 0;
+ }
+ }
+
+ /* ffdshow custom code */
+ cur->video_full_range_flag = h->sps.full_range;
+ cur->YCbCr_RGB_matrix_coefficients = h->sps.colorspace;
+
+ //FIXME do something with unavailable reference frames
+
+ /* Sort B-frames into display order */
+
+ if(h->sps.bitstream_restriction_flag
+ && s->avctx->has_b_frames < h->sps.num_reorder_frames){
+ s->avctx->has_b_frames = h->sps.num_reorder_frames;
+ s->low_delay = 0;
+ }
+
+ if( s->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT
+ && !h->sps.bitstream_restriction_flag){
+ s->avctx->has_b_frames= MAX_DELAYED_PIC_COUNT;
+ s->low_delay= 0;
+ }
+
+ pics = 0;
+ while(h->delayed_pic[pics]) pics++;
+
+ assert(pics <= MAX_DELAYED_PIC_COUNT);
+
+ h->delayed_pic[pics++] = cur;
+ if(cur->reference == 0)
+ cur->reference = DELAYED_PIC_REF;
+
+ out = h->delayed_pic[0];
+ out_idx = 0;
+ for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++)
+ if(h->delayed_pic[i]->poc < out->poc){
+ out = h->delayed_pic[i];
+ out_idx = i;
+ }
+ if(s->avctx->has_b_frames == 0 && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset))
+ h->outputed_poc= INT_MIN;
+ out_of_order = out->poc < h->outputed_poc;
+
+ if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames >= h->sps.num_reorder_frames)
+ { }
+ else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT)
+ || (s->low_delay &&
+ ((h->outputed_poc != INT_MIN && out->poc > h->outputed_poc + 2)
+ || cur->pict_type == FF_B_TYPE)))
+ {
+ s->low_delay = 0;
+ s->avctx->has_b_frames++;
+ }
+
+ if(out_of_order || pics > s->avctx->has_b_frames){
+ out->reference &= ~DELAYED_PIC_REF;
+ for(i=out_idx; h->delayed_pic[i]; i++)
+ h->delayed_pic[i] = h->delayed_pic[i+1];
+ }
+ // ==> Start patch MPC DXVA
+ if(!out_of_order && pics > s->avctx->has_b_frames){
+
+
+ if(out_idx==0 && h->delayed_pic[0] && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset)) {
+ h->outputed_poc = INT_MIN;
+ } else
+ h->outputed_poc = out->poc;
+ if (nOutPOC) *nOutPOC = out->poc;
+ if (rtStartTime) *rtStartTime = out->reordered_opaque;
+ }else{
+ av_log(avctx, AV_LOG_DEBUG, "no picture\n");
+ }
+ // <== End patch MPC DXVA
+ }
+ }
+
+ // ==> Start patch MPC DXVA
+ //assert(pict->data[0] || !*data_size);
+ //ff_print_debug_info(s, pict);
+ // <== End patch MPC DXVA
+
+ /* ffdshow custom code (begin) */
+ //pict->h264_poc_decoded = h->poc_lsb + h->poc_msb;
+ //pict->h264_poc_outputed = h->outputed_poc;
+ //pict->h264_frame_num_decoded = h-> frame_num;
+ //pict->h264_max_frame_num = 1 << h->sps.log2_max_frame_num;
+ /* ffdshow custom code (end) */
+
+ return get_consumed_bytes(s, buf_index, buf_size);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_loopfilter.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_loopfilter.c
new file mode 100644
index 000000000..1a2ceb816
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_loopfilter.c
@@ -0,0 +1,751 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... loop filter
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264_loopfilter.c
+ * H.264 / AVC / MPEG4 part10 loop filter.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "internal.h"
+#include "dsputil.h"
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "h264.h"
+#include "mathops.h"
+#include "rectangle.h"
+
+//#undef NDEBUG
+#include <assert.h>
+
+/* Deblocking filter (p153) */
+static const uint8_t alpha_table[52*3] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 4, 4, 5, 6,
+ 7, 8, 9, 10, 12, 13, 15, 17, 20, 22,
+ 25, 28, 32, 36, 40, 45, 50, 56, 63, 71,
+ 80, 90,101,113,127,144,162,182,203,226,
+ 255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,
+};
+static const uint8_t beta_table[52*3] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 2, 2, 2, 3,
+ 3, 3, 3, 4, 4, 4, 6, 6, 7, 7,
+ 8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
+ 13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
+ 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+};
+static const uint8_t tc0_table[52*3][4] = {
+ {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
+ {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
+ {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
+ {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
+ {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
+ {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
+ {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
+ {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
+ {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
+ {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
+ {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
+ {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 1 },
+ {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 1, 1 }, {-1, 0, 1, 1 }, {-1, 1, 1, 1 },
+ {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 },
+ {-1, 1, 1, 2 }, {-1, 1, 2, 3 }, {-1, 1, 2, 3 }, {-1, 2, 2, 3 }, {-1, 2, 2, 4 }, {-1, 2, 3, 4 },
+ {-1, 2, 3, 4 }, {-1, 3, 3, 5 }, {-1, 3, 4, 6 }, {-1, 3, 4, 6 }, {-1, 4, 5, 7 }, {-1, 4, 5, 8 },
+ {-1, 4, 6, 9 }, {-1, 5, 7,10 }, {-1, 6, 8,11 }, {-1, 6, 8,13 }, {-1, 7,10,14 }, {-1, 8,11,16 },
+ {-1, 9,12,18 }, {-1,10,13,20 }, {-1,11,15,23 }, {-1,13,17,25 },
+ {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
+ {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
+ {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
+ {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
+ {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
+ {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
+ {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
+ {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
+ {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
+};
+
+static void av_always_inline filter_mb_edgev( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h) {
+ const unsigned int index_a = qp + h->slice_alpha_c0_offset;
+ const int alpha = alpha_table[index_a];
+ const int beta = beta_table[qp + h->slice_beta_offset];
+ if (alpha ==0 || beta == 0) return;
+
+ if( bS[0] < 4 ) {
+ int8_t tc[4];
+ tc[0] = tc0_table[index_a][bS[0]];
+ tc[1] = tc0_table[index_a][bS[1]];
+ tc[2] = tc0_table[index_a][bS[2]];
+ tc[3] = tc0_table[index_a][bS[3]];
+ h->s.dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc);
+ } else {
+ h->s.dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta);
+ }
+}
+static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) {
+ const unsigned int index_a = qp + h->slice_alpha_c0_offset;
+ const int alpha = alpha_table[index_a];
+ const int beta = beta_table[qp + h->slice_beta_offset];
+ if (alpha ==0 || beta == 0) return;
+
+ if( bS[0] < 4 ) {
+ int8_t tc[4];
+ tc[0] = tc0_table[index_a][bS[0]]+1;
+ tc[1] = tc0_table[index_a][bS[1]]+1;
+ tc[2] = tc0_table[index_a][bS[2]]+1;
+ tc[3] = tc0_table[index_a][bS[3]]+1;
+ h->s.dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc);
+ } else {
+ h->s.dsp.h264_h_loop_filter_chroma_intra(pix, stride, alpha, beta);
+ }
+}
+
+static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int bsi, int qp ) {
+ int i;
+ int index_a = qp + h->slice_alpha_c0_offset;
+ int alpha = alpha_table[index_a];
+ int beta = beta_table[qp + h->slice_beta_offset];
+ for( i = 0; i < 8; i++, pix += stride) {
+ const int bS_index = (i >> 1) * bsi;
+
+ if( bS[bS_index] == 0 ) {
+ continue;
+ }
+
+ if( bS[bS_index] < 4 ) {
+ const int tc0 = tc0_table[index_a][bS[bS_index]];
+ const int p0 = pix[-1];
+ const int p1 = pix[-2];
+ const int p2 = pix[-3];
+ const int q0 = pix[0];
+ const int q1 = pix[1];
+ const int q2 = pix[2];
+
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
+ int tc = tc0;
+ int i_delta;
+
+ if( FFABS( p2 - p0 ) < beta ) {
+ if(tc0)
+ pix[-2] = p1 + av_clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 );
+ tc++;
+ }
+ if( FFABS( q2 - q0 ) < beta ) {
+ if(tc0)
+ pix[1] = q1 + av_clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 );
+ tc++;
+ }
+
+ i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
+ pix[-1] = av_clip_uint8( p0 + i_delta ); /* p0' */
+ pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */
+ tprintf(h->s.avctx, "filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1);
+ }
+ }else{
+ const int p0 = pix[-1];
+ const int p1 = pix[-2];
+ const int p2 = pix[-3];
+
+ const int q0 = pix[0];
+ const int q1 = pix[1];
+ const int q2 = pix[2];
+
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
+
+ if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
+ if( FFABS( p2 - p0 ) < beta)
+ {
+ const int p3 = pix[-4];
+ /* p0', p1', p2' */
+ pix[-1] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3;
+ pix[-2] = ( p2 + p1 + p0 + q0 + 2 ) >> 2;
+ pix[-3] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3;
+ } else {
+ /* p0' */
+ pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
+ }
+ if( FFABS( q2 - q0 ) < beta)
+ {
+ const int q3 = pix[3];
+ /* q0', q1', q2' */
+ pix[0] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3;
+ pix[1] = ( p0 + q0 + q1 + q2 + 2 ) >> 2;
+ pix[2] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3;
+ } else {
+ /* q0' */
+ pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
+ }
+ }else{
+ /* p0', q0' */
+ pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
+ pix[ 0] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
+ }
+ tprintf(h->s.avctx, "filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, p2, p1, p0, q0, q1, q2, pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]);
+ }
+ }
+ }
+}
+static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int bsi, int qp ) {
+ int i;
+ int index_a = qp + h->slice_alpha_c0_offset;
+ int alpha = alpha_table[index_a];
+ int beta = beta_table[qp + h->slice_beta_offset];
+ for( i = 0; i < 4; i++, pix += stride) {
+ const int bS_index = i*bsi;
+
+ if( bS[bS_index] == 0 ) {
+ continue;
+ }
+
+ if( bS[bS_index] < 4 ) {
+ const int tc = tc0_table[index_a][bS[bS_index]] + 1;
+ const int p0 = pix[-1];
+ const int p1 = pix[-2];
+ const int q0 = pix[0];
+ const int q1 = pix[1];
+
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
+ const int i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
+
+ pix[-1] = av_clip_uint8( p0 + i_delta ); /* p0' */
+ pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */
+ tprintf(h->s.avctx, "filter_mb_mbaff_edgecv i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1);
+ }
+ }else{
+ const int p0 = pix[-1];
+ const int p1 = pix[-2];
+ const int q0 = pix[0];
+ const int q1 = pix[1];
+
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
+
+ pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */
+ pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */
+ tprintf(h->s.avctx, "filter_mb_mbaff_edgecv i:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, pix[-3], p1, p0, q0, q1, pix[2], pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]);
+ }
+ }
+ }
+}
+
+static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) {
+ const unsigned int index_a = qp + h->slice_alpha_c0_offset;
+ const int alpha = alpha_table[index_a];
+ const int beta = beta_table[qp + h->slice_beta_offset];
+ if (alpha ==0 || beta == 0) return;
+
+ if( bS[0] < 4 ) {
+ int8_t tc[4];
+ tc[0] = tc0_table[index_a][bS[0]];
+ tc[1] = tc0_table[index_a][bS[1]];
+ tc[2] = tc0_table[index_a][bS[2]];
+ tc[3] = tc0_table[index_a][bS[3]];
+ h->s.dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc);
+ } else {
+ h->s.dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta);
+ }
+}
+
+static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) {
+ const unsigned int index_a = qp + h->slice_alpha_c0_offset;
+ const int alpha = alpha_table[index_a];
+ const int beta = beta_table[qp + h->slice_beta_offset];
+ if (alpha ==0 || beta == 0) return;
+
+ if( bS[0] < 4 ) {
+ int8_t tc[4];
+ tc[0] = tc0_table[index_a][bS[0]]+1;
+ tc[1] = tc0_table[index_a][bS[1]]+1;
+ tc[2] = tc0_table[index_a][bS[2]]+1;
+ tc[3] = tc0_table[index_a][bS[3]]+1;
+ h->s.dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc);
+ } else {
+ h->s.dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta);
+ }
+}
+
+void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
+ MpegEncContext * const s = &h->s;
+ int mb_xy;
+ int mb_type, left_type;
+ int qp, qp0, qp1, qpc, qpc0, qpc1, qp_thresh;
+
+ mb_xy = h->mb_xy;
+
+ if(!h->top_type || !s->dsp.h264_loop_filter_strength || h->pps.chroma_qp_diff) {
+ ff_h264_filter_mb(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize);
+ return;
+ }
+ assert(!FRAME_MBAFF);
+ left_type= h->left_type[0];
+
+ mb_type = s->current_picture.mb_type[mb_xy];
+ qp = s->current_picture.qscale_table[mb_xy];
+ qp0 = s->current_picture.qscale_table[mb_xy-1];
+ qp1 = s->current_picture.qscale_table[h->top_mb_xy];
+ qpc = get_chroma_qp( h, 0, qp );
+ qpc0 = get_chroma_qp( h, 0, qp0 );
+ qpc1 = get_chroma_qp( h, 0, qp1 );
+ qp0 = (qp + qp0 + 1) >> 1;
+ qp1 = (qp + qp1 + 1) >> 1;
+ qpc0 = (qpc + qpc0 + 1) >> 1;
+ qpc1 = (qpc + qpc1 + 1) >> 1;
+ qp_thresh = 15+52 - h->slice_alpha_c0_offset;
+ if(qp <= qp_thresh && qp0 <= qp_thresh && qp1 <= qp_thresh &&
+ qpc <= qp_thresh && qpc0 <= qp_thresh && qpc1 <= qp_thresh)
+ return;
+
+ if( IS_INTRA(mb_type) ) {
+ int16_t bS4[4] = {4,4,4,4};
+ int16_t bS3[4] = {3,3,3,3};
+ int16_t *bSH = FIELD_PICTURE ? bS3 : bS4;
+ if( IS_8x8DCT(mb_type) ) {
+ if(left_type)
+ filter_mb_edgev( &img_y[4*0], linesize, bS4, qp0, h);
+ filter_mb_edgev( &img_y[4*2], linesize, bS3, qp, h);
+ filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, h);
+ filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, h);
+ } else {
+ if(left_type)
+ filter_mb_edgev( &img_y[4*0], linesize, bS4, qp0, h);
+ filter_mb_edgev( &img_y[4*1], linesize, bS3, qp, h);
+ filter_mb_edgev( &img_y[4*2], linesize, bS3, qp, h);
+ filter_mb_edgev( &img_y[4*3], linesize, bS3, qp, h);
+ filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, h);
+ filter_mb_edgeh( &img_y[4*1*linesize], linesize, bS3, qp, h);
+ filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, h);
+ filter_mb_edgeh( &img_y[4*3*linesize], linesize, bS3, qp, h);
+ }
+ if(left_type){
+ filter_mb_edgecv( &img_cb[2*0], uvlinesize, bS4, qpc0, h);
+ filter_mb_edgecv( &img_cr[2*0], uvlinesize, bS4, qpc0, h);
+ }
+ filter_mb_edgecv( &img_cb[2*2], uvlinesize, bS3, qpc, h);
+ filter_mb_edgecv( &img_cr[2*2], uvlinesize, bS3, qpc, h);
+ filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, h);
+ filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, h);
+ filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, h);
+ filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, h);
+ return;
+ } else {
+ DECLARE_ALIGNED_8(int16_t, bS)[2][4][4];
+ uint64_t (*bSv)[4] = (uint64_t(*)[4])bS;
+ int edges;
+ if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) {
+ edges = 4;
+ bSv[0][0] = bSv[0][2] = bSv[1][0] = bSv[1][2] = 0x0002000200020002ULL;
+ } else {
+ int mask_edge1 = (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 :
+ (mb_type & MB_TYPE_16x8) ? 1 : 0;
+ int mask_edge0 = (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16))
+ && (h->left_type[0] & (MB_TYPE_16x16 | MB_TYPE_8x16))
+ ? 3 : 0;
+ int step = IS_8x8DCT(mb_type) ? 2 : 1;
+ edges = (mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4;
+ s->dsp.h264_loop_filter_strength( bS, h->non_zero_count_cache, h->ref_cache, h->mv_cache,
+ h->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE);
+ }
+ if( IS_INTRA(h->left_type[0]) )
+ bSv[0][0] = 0x0004000400040004ULL;
+ if( IS_INTRA(h->top_type) )
+ bSv[1][0] = FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL;
+
+#define FILTER(hv,dir,edge)\
+ if(bSv[dir][edge]) {\
+ filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qp : qp##dir, h );\
+ if(!(edge&1)) {\
+ filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\
+ filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\
+ }\
+ }
+ if( edges == 1 ) {
+ if(left_type)
+ FILTER(v,0,0);
+ FILTER(h,1,0);
+ } else if( IS_8x8DCT(mb_type) ) {
+ if(left_type)
+ FILTER(v,0,0);
+ FILTER(v,0,2);
+ FILTER(h,1,0);
+ FILTER(h,1,2);
+ } else {
+ if(left_type)
+ FILTER(v,0,0);
+ FILTER(v,0,1);
+ FILTER(v,0,2);
+ FILTER(v,0,3);
+ FILTER(h,1,0);
+ FILTER(h,1,1);
+ FILTER(h,1,2);
+ FILTER(h,1,3);
+ }
+#undef FILTER
+ }
+}
+
+static int check_mv(H264Context *h, long b_idx, long bn_idx, int mvy_limit){
+ int v;
+
+ v = h->ref_cache[0][b_idx] != h->ref_cache[0][bn_idx] |
+ h->mv_cache[0][b_idx][0] - h->mv_cache[0][bn_idx][0] + 3 >= 7U |
+ FFABS( h->mv_cache[0][b_idx][1] - h->mv_cache[0][bn_idx][1] ) >= mvy_limit;
+
+ if(h->list_count==2){
+ if(!v)
+ v = h->ref_cache[1][b_idx] != h->ref_cache[1][bn_idx] |
+ h->mv_cache[1][b_idx][0] - h->mv_cache[1][bn_idx][0] + 3 >= 7U |
+ FFABS( h->mv_cache[1][b_idx][1] - h->mv_cache[1][bn_idx][1] ) >= mvy_limit;
+
+ if(v){
+ if(h->ref_cache[0][b_idx] != h->ref_cache[1][bn_idx] |
+ h->ref_cache[1][b_idx] != h->ref_cache[0][bn_idx])
+ return 1;
+ return
+ h->mv_cache[0][b_idx][0] - h->mv_cache[1][bn_idx][0] + 3 >= 7U |
+ FFABS( h->mv_cache[0][b_idx][1] - h->mv_cache[1][bn_idx][1] ) >= mvy_limit |
+ h->mv_cache[1][b_idx][0] - h->mv_cache[0][bn_idx][0] + 3 >= 7U |
+ FFABS( h->mv_cache[1][b_idx][1] - h->mv_cache[0][bn_idx][1] ) >= mvy_limit;
+ }
+ }
+
+ return v;
+}
+
+static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int dir) {
+ MpegEncContext * const s = &h->s;
+ int edge;
+ const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy;
+ const int mbm_type = dir == 0 ? h->left_type[0] : h->top_type;
+
+ // how often to recheck mv-based bS when iterating between edges
+ static const uint8_t mask_edge_tab[2][8]={{0,3,3,3,1,1,1,1},
+ {0,3,1,1,3,3,3,3}};
+ const int mask_edge = mask_edge_tab[dir][(mb_type>>3)&7];
+ const int edges = mask_edge== 3 && !(h->cbp&15) ? 1 : 4;
+
+ // how often to recheck mv-based bS when iterating along each edge
+ const int mask_par0 = mb_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir));
+
+ if(mbm_type && !first_vertical_edge_done){
+
+ if (FRAME_MBAFF && (dir == 1) && ((mb_y&1) == 0)
+ && IS_INTERLACED(mbm_type&~mb_type)
+ ) {
+ // This is a special case in the norm where the filtering must
+ // be done twice (one each of the field) even if we are in a
+ // frame macroblock.
+ //
+ unsigned int tmp_linesize = 2 * linesize;
+ unsigned int tmp_uvlinesize = 2 * uvlinesize;
+ int mbn_xy = mb_xy - 2 * s->mb_stride;
+ int j;
+
+ for(j=0; j<2; j++, mbn_xy += s->mb_stride){
+ DECLARE_ALIGNED_8(int16_t, bS)[4];
+ int qp;
+ if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) {
+ *(uint64_t*)bS= 0x0003000300030003ULL;
+ } else {
+ const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 4+3*8; //FIXME 8x8dct?
+ int i;
+ for( i = 0; i < 4; i++ ) {
+ bS[i] = 1 + !!(h->non_zero_count_cache[scan8[0]+i] | mbn_nnz[i]);
+ }
+ }
+ // Do not use s->qscale as luma quantizer because it has not the same
+ // value in IPCM macroblocks.
+ qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1;
+ tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
+ { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
+ filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, h );
+ filter_mb_edgech( &img_cb[j*uvlinesize], tmp_uvlinesize, bS,
+ ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1, h);
+ filter_mb_edgech( &img_cr[j*uvlinesize], tmp_uvlinesize, bS,
+ ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1, h);
+ }
+ }else{
+ DECLARE_ALIGNED_8(int16_t, bS)[4];
+ int qp;
+
+ if( IS_INTRA(mb_type|mbm_type)) {
+ *(uint64_t*)bS= 0x0003000300030003ULL;
+ if ( (!IS_INTERLACED(mb_type|mbm_type))
+ || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0))
+ )
+ *(uint64_t*)bS= 0x0004000400040004ULL;
+ } else {
+ int i, l;
+ int mv_done;
+
+ if( FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbm_type)) { //FIXME not posible left
+ *(uint64_t*)bS= 0x0001000100010001ULL;
+ mv_done = 1;
+ }
+ else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) {
+ int b_idx= 8 + 4;
+ int bn_idx= b_idx - (dir ? 8:1);
+
+ bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, 8 + 4, bn_idx, mvy_limit);
+ mv_done = 1;
+ }
+ else
+ mv_done = 0;
+
+ for( i = 0; i < 4; i++ ) {
+ int x = dir == 0 ? 0 : i;
+ int y = dir == 0 ? i : 0;
+ int b_idx= 8 + 4 + x + 8*y;
+ int bn_idx= b_idx - (dir ? 8:1);
+
+ if( h->non_zero_count_cache[b_idx] |
+ h->non_zero_count_cache[bn_idx] ) {
+ bS[i] = 2;
+ }
+ else if(!mv_done)
+ {
+ bS[i] = check_mv(h, b_idx, bn_idx, mvy_limit);
+ }
+ }
+ }
+
+ /* Filter edge */
+ // Do not use s->qscale as luma quantizer because it has not the same
+ // value in IPCM macroblocks.
+ if(bS[0]+bS[1]+bS[2]+bS[3]){
+ qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbm_xy] + 1 ) >> 1;
+ //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
+ tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
+ //{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
+ if( dir == 0 ) {
+ filter_mb_edgev( &img_y[0], linesize, bS, qp, h );
+ {
+ int qp= ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1;
+ filter_mb_edgecv( &img_cb[0], uvlinesize, bS, qp, h);
+ if(h->pps.chroma_qp_diff)
+ qp= ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1;
+ filter_mb_edgecv( &img_cr[0], uvlinesize, bS, qp, h);
+ }
+ } else {
+ filter_mb_edgeh( &img_y[0], linesize, bS, qp, h );
+ {
+ int qp= ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1;
+ filter_mb_edgech( &img_cb[0], uvlinesize, bS, qp, h);
+ if(h->pps.chroma_qp_diff)
+ qp= ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1;
+ filter_mb_edgech( &img_cr[0], uvlinesize, bS, qp, h);
+ }
+ }
+ }
+ }
+ }
+
+ /* Calculate bS */
+ for( edge = 1; edge < edges; edge++ ) {
+ DECLARE_ALIGNED_8(int16_t, bS)[4];
+ int qp;
+
+ if( IS_8x8DCT(mb_type & (edge<<24)) ) // (edge&1) && IS_8x8DCT(mb_type)
+ continue;
+
+ if( IS_INTRA(mb_type)) {
+ *(uint64_t*)bS= 0x0003000300030003ULL;
+ } else {
+ int i, l;
+ int mv_done;
+
+ if( edge & mask_edge ) {
+ *(uint64_t*)bS= 0;
+ mv_done = 1;
+ }
+ else if( mask_par0 ) {
+ int b_idx= 8 + 4 + edge * (dir ? 8:1);
+ int bn_idx= b_idx - (dir ? 8:1);
+
+ bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, b_idx, bn_idx, mvy_limit);
+ mv_done = 1;
+ }
+ else
+ mv_done = 0;
+
+ for( i = 0; i < 4; i++ ) {
+ int x = dir == 0 ? edge : i;
+ int y = dir == 0 ? i : edge;
+ int b_idx= 8 + 4 + x + 8*y;
+ int bn_idx= b_idx - (dir ? 8:1);
+
+ if( h->non_zero_count_cache[b_idx] |
+ h->non_zero_count_cache[bn_idx] ) {
+ bS[i] = 2;
+ }
+ else if(!mv_done)
+ {
+ bS[i] = check_mv(h, b_idx, bn_idx, mvy_limit);
+ }
+ }
+
+ if(bS[0]+bS[1]+bS[2]+bS[3] == 0)
+ continue;
+ }
+
+ /* Filter edge */
+ // Do not use s->qscale as luma quantizer because it has not the same
+ // value in IPCM macroblocks.
+ qp = s->current_picture.qscale_table[mb_xy];
+ //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
+ tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
+ //{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
+ if( dir == 0 ) {
+ filter_mb_edgev( &img_y[4*edge], linesize, bS, qp, h );
+ if( (edge&1) == 0 ) {
+ filter_mb_edgecv( &img_cb[2*edge], uvlinesize, bS, h->chroma_qp[0], h);
+ filter_mb_edgecv( &img_cr[2*edge], uvlinesize, bS, h->chroma_qp[1], h);
+ }
+ } else {
+ filter_mb_edgeh( &img_y[4*edge*linesize], linesize, bS, qp, h );
+ if( (edge&1) == 0 ) {
+ filter_mb_edgech( &img_cb[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], h);
+ filter_mb_edgech( &img_cr[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], h);
+ }
+ }
+ }
+}
+
+void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
+ MpegEncContext * const s = &h->s;
+ const int mb_xy= mb_x + mb_y*s->mb_stride;
+ const int mb_type = s->current_picture.mb_type[mb_xy];
+ const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
+ int first_vertical_edge_done = 0;
+ av_unused int dir;
+ int list;
+
+ if (FRAME_MBAFF
+ // and current and left pair do not have the same interlaced type
+ && IS_INTERLACED(mb_type^h->left_type[0])
+ // and left mb is in available to us
+ && h->left_type[0]) {
+ /* First vertical edge is different in MBAFF frames
+ * There are 8 different bS to compute and 2 different Qp
+ */
+ DECLARE_ALIGNED_8(int16_t, bS)[8];
+ int qp[2];
+ int bqp[2];
+ int rqp[2];
+ int mb_qp, mbn0_qp, mbn1_qp;
+ int i;
+ first_vertical_edge_done = 1;
+
+ if( IS_INTRA(mb_type) )
+ *(uint64_t*)&bS[0]=
+ *(uint64_t*)&bS[4]= 0x0004000400040004ULL;
+ else {
+ static const uint8_t offset[2][2][8]={
+ {
+ {7+8*0, 7+8*0, 7+8*0, 7+8*0, 7+8*1, 7+8*1, 7+8*1, 7+8*1},
+ {7+8*2, 7+8*2, 7+8*2, 7+8*2, 7+8*3, 7+8*3, 7+8*3, 7+8*3},
+ },{
+ {7+8*0, 7+8*1, 7+8*2, 7+8*3, 7+8*0, 7+8*1, 7+8*2, 7+8*3},
+ {7+8*0, 7+8*1, 7+8*2, 7+8*3, 7+8*0, 7+8*1, 7+8*2, 7+8*3},
+ }
+ };
+ const uint8_t *off= offset[MB_FIELD][mb_y&1];
+ for( i = 0; i < 8; i++ ) {
+ int j= MB_FIELD ? i>>2 : i&1;
+ int mbn_xy = h->left_mb_xy[j];
+ int mbn_type= h->left_type[j];
+
+ if( IS_INTRA( mbn_type ) )
+ bS[i] = 4;
+ else{
+ bS[i] = 1 + !!(h->non_zero_count_cache[12+8*(i>>1)] |
+ ((!h->pps.cabac && IS_8x8DCT(mbn_type)) ?
+ (h->cbp_table[mbn_xy] & ((MB_FIELD ? (i&2) : (mb_y&1)) ? 8 : 2))
+ :
+ h->non_zero_count[mbn_xy][ off[i] ]));
+ }
+ }
+ }
+
+ mb_qp = s->current_picture.qscale_table[mb_xy];
+ mbn0_qp = s->current_picture.qscale_table[h->left_mb_xy[0]];
+ mbn1_qp = s->current_picture.qscale_table[h->left_mb_xy[1]];
+ qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
+ bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
+ get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;
+ rqp[0] = ( get_chroma_qp( h, 1, mb_qp ) +
+ get_chroma_qp( h, 1, mbn0_qp ) + 1 ) >> 1;
+ qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1;
+ bqp[1] = ( get_chroma_qp( h, 0, mb_qp ) +
+ get_chroma_qp( h, 0, mbn1_qp ) + 1 ) >> 1;
+ rqp[1] = ( get_chroma_qp( h, 1, mb_qp ) +
+ get_chroma_qp( h, 1, mbn1_qp ) + 1 ) >> 1;
+
+ /* Filter edge */
+ tprintf(s->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize);
+ { int i; for (i = 0; i < 8; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
+ if(MB_FIELD){
+ filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0] );
+ filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1] );
+ filter_mb_mbaff_edgecv( h, img_cb, uvlinesize, bS , 1, bqp[0] );
+ filter_mb_mbaff_edgecv( h, img_cb + 4*uvlinesize, uvlinesize, bS+4, 1, bqp[1] );
+ filter_mb_mbaff_edgecv( h, img_cr, uvlinesize, bS , 1, rqp[0] );
+ filter_mb_mbaff_edgecv( h, img_cr + 4*uvlinesize, uvlinesize, bS+4, 1, rqp[1] );
+ }else{
+ filter_mb_mbaff_edgev ( h, img_y , 2* linesize, bS , 2, qp [0] );
+ filter_mb_mbaff_edgev ( h, img_y + linesize, 2* linesize, bS+1, 2, qp [1] );
+ filter_mb_mbaff_edgecv( h, img_cb, 2*uvlinesize, bS , 2, bqp[0] );
+ filter_mb_mbaff_edgecv( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1] );
+ filter_mb_mbaff_edgecv( h, img_cr, 2*uvlinesize, bS , 2, rqp[0] );
+ filter_mb_mbaff_edgecv( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1] );
+ }
+ }
+
+#if CONFIG_SMALL
+ for( dir = 0; dir < 2; dir++ )
+ filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, dir ? 0 : first_vertical_edge_done, dir);
+#else
+ filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, first_vertical_edge_done, 0);
+ filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, 0, 1);
+#endif
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_mvpred.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_mvpred.h
new file mode 100644
index 000000000..c4e99c244
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_mvpred.h
@@ -0,0 +1,235 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... motion vector predicion
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264_mvpred.h
+ * H.264 / AVC / MPEG4 part10 motion vector predicion.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVCODEC_H264_MVPRED_H
+#define AVCODEC_H264_MVPRED_H
+
+#include "internal.h"
+#include "avcodec.h"
+#include "h264.h"
+
+//#undef NDEBUG
+#include <assert.h>
+
+static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){
+ const int topright_ref= h->ref_cache[list][ i - 8 + part_width ];
+ MpegEncContext *s = &h->s;
+
+ /* there is no consistent mapping of mvs to neighboring locations that will
+ * make mbaff happy, so we can't move all this logic to fill_caches */
+ if(FRAME_MBAFF){
+
+#define SET_DIAG_MV(MV_OP, REF_OP, X4, Y4)\
+ const int x4 = X4, y4 = Y4;\
+ const int mb_type = mb_types[(x4>>2)+(y4>>2)*s->mb_stride];\
+ if(!USES_LIST(mb_type,list))\
+ return LIST_NOT_USED;\
+ mv = s->current_picture_ptr->motion_val[list][x4 + y4*h->b_stride];\
+ h->mv_cache[list][scan8[0]-2][0] = mv[0];\
+ h->mv_cache[list][scan8[0]-2][1] = mv[1] MV_OP;\
+ return s->current_picture_ptr->ref_index[list][(x4>>1) + (y4>>1)*h->b8_stride] REF_OP;
+
+ if(topright_ref == PART_NOT_AVAILABLE
+ && i >= scan8[0]+8 && (i&7)==4
+ && h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){
+ const uint32_t *mb_types = s->current_picture_ptr->mb_type;
+ const int16_t *mv;
+ *(uint32_t*)h->mv_cache[list][scan8[0]-2] = 0;
+ *C = h->mv_cache[list][scan8[0]-2];
+
+ if(!MB_FIELD
+ && IS_INTERLACED(mb_types[h->left_mb_xy[0]])){
+ SET_DIAG_MV(*2, >>1, s->mb_x*4-1, (s->mb_y|1)*4+(s->mb_y&1)*2+(i>>4)-1);
+ }
+ if(MB_FIELD
+ && !IS_INTERLACED(mb_types[h->left_mb_xy[0]])){
+ // left shift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's OK.
+ SET_DIAG_MV(/2, <<1, s->mb_x*4-1, (s->mb_y&~1)*4 - 1 + ((i-scan8[0])>>3)*2);
+ }
+ }
+#undef SET_DIAG_MV
+ }
+
+ if(topright_ref != PART_NOT_AVAILABLE){
+ *C= h->mv_cache[list][ i - 8 + part_width ];
+ return topright_ref;
+ }else{
+ tprintf(s->avctx, "topright MV not available\n");
+
+ *C= h->mv_cache[list][ i - 8 - 1 ];
+ return h->ref_cache[list][ i - 8 - 1 ];
+ }
+}
+
+/**
+ * gets the predicted MV.
+ * @param n the block index
+ * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
+ * @param mx the x component of the predicted motion vector
+ * @param my the y component of the predicted motion vector
+ */
+static inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){
+ const int index8= scan8[n];
+ const int top_ref= h->ref_cache[list][ index8 - 8 ];
+ const int left_ref= h->ref_cache[list][ index8 - 1 ];
+ const int16_t * const A= h->mv_cache[list][ index8 - 1 ];
+ const int16_t * const B= h->mv_cache[list][ index8 - 8 ];
+ const int16_t * C;
+ int diagonal_ref, match_count;
+
+ assert(part_width==1 || part_width==2 || part_width==4);
+
+/* mv_cache
+ B . . A T T T T
+ U . . L . . , .
+ U . . L . . . .
+ U . . L . . , .
+ . . . L . . . .
+*/
+
+ diagonal_ref= fetch_diagonal_mv(h, &C, index8, list, part_width);
+ match_count= (diagonal_ref==ref) + (top_ref==ref) + (left_ref==ref);
+ tprintf(h->s.avctx, "pred_motion match_count=%d\n", match_count);
+ if(match_count > 1){ //most common
+ *mx= mid_pred(A[0], B[0], C[0]);
+ *my= mid_pred(A[1], B[1], C[1]);
+ }else if(match_count==1){
+ if(left_ref==ref){
+ *mx= A[0];
+ *my= A[1];
+ }else if(top_ref==ref){
+ *mx= B[0];
+ *my= B[1];
+ }else{
+ *mx= C[0];
+ *my= C[1];
+ }
+ }else{
+ if(top_ref == PART_NOT_AVAILABLE && diagonal_ref == PART_NOT_AVAILABLE && left_ref != PART_NOT_AVAILABLE){
+ *mx= A[0];
+ *my= A[1];
+ }else{
+ *mx= mid_pred(A[0], B[0], C[0]);
+ *my= mid_pred(A[1], B[1], C[1]);
+ }
+ }
+
+ tprintf(h->s.avctx, "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list);
+}
+
+/**
+ * gets the directionally predicted 16x8 MV.
+ * @param n the block index
+ * @param mx the x component of the predicted motion vector
+ * @param my the y component of the predicted motion vector
+ */
+static inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
+ if(n==0){
+ const int top_ref= h->ref_cache[list][ scan8[0] - 8 ];
+ const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];
+
+ tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list);
+
+ if(top_ref == ref){
+ *mx= B[0];
+ *my= B[1];
+ return;
+ }
+ }else{
+ const int left_ref= h->ref_cache[list][ scan8[8] - 1 ];
+ const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ];
+
+ tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
+
+ if(left_ref == ref){
+ *mx= A[0];
+ *my= A[1];
+ return;
+ }
+ }
+
+ //RARE
+ pred_motion(h, n, 4, list, ref, mx, my);
+}
+
+/**
+ * gets the directionally predicted 8x16 MV.
+ * @param n the block index
+ * @param mx the x component of the predicted motion vector
+ * @param my the y component of the predicted motion vector
+ */
+static inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
+ if(n==0){
+ const int left_ref= h->ref_cache[list][ scan8[0] - 1 ];
+ const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
+
+ tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
+
+ if(left_ref == ref){
+ *mx= A[0];
+ *my= A[1];
+ return;
+ }
+ }else{
+ const int16_t * C;
+ int diagonal_ref;
+
+ diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2);
+
+ tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list);
+
+ if(diagonal_ref == ref){
+ *mx= C[0];
+ *my= C[1];
+ return;
+ }
+ }
+
+ //RARE
+ pred_motion(h, n, 2, list, ref, mx, my);
+}
+
+static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){
+ const int top_ref = h->ref_cache[0][ scan8[0] - 8 ];
+ const int left_ref= h->ref_cache[0][ scan8[0] - 1 ];
+
+ tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y);
+
+ if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE
+ || !( top_ref | *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ])
+ || !(left_ref | *(uint32_t*)h->mv_cache[0][ scan8[0] - 1 ])){
+
+ *mx = *my = 0;
+ return;
+ }
+
+ pred_motion(h, 0, 4, 0, 0, mx, my);
+
+ return;
+}
+
+#endif /* AVCODEC_H264_MVPRED_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_parser.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_parser.c
new file mode 100644
index 000000000..08cb8f9fb
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_parser.c
@@ -0,0 +1,234 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... parser
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264_parser.c
+ * H.264 / AVC / MPEG4 part10 parser.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "parser.h"
+#include "h264_parser.h"
+#include "h264data.h"
+#include "golomb.h"
+
+#include <assert.h>
+
+
+int ff_h264_find_frame_end(H264Context *h, const uint8_t *buf, int buf_size)
+{
+ int i;
+ uint32_t state;
+ ParseContext *pc = &(h->s.parse_context);
+//printf("first %02X%02X%02X%02X\n", buf[0], buf[1],buf[2],buf[3]);
+// mb_addr= pc->mb_addr - 1;
+ state= pc->state;
+ if(state>13)
+ state= 7;
+
+ for(i=0; i<buf_size; i++){
+ if(state==7){
+#if HAVE_FAST_UNALIGNED
+ /* we check i<buf_size instead of i+3/7 because its simpler
+ * and there should be FF_INPUT_BUFFER_PADDING_SIZE bytes at the end
+ */
+# if HAVE_FAST_64BIT
+ while(i<buf_size && !((~*(const uint64_t*)(buf+i) & (*(const uint64_t*)(buf+i) - 0x0101010101010101ULL)) & 0x8080808080808080ULL))
+ i+=8;
+# else
+ while(i<buf_size && !((~*(const uint32_t*)(buf+i) & (*(const uint32_t*)(buf+i) - 0x01010101U)) & 0x80808080U))
+ i+=4;
+# endif
+#endif
+ for(; i<buf_size; i++){
+ if(!buf[i]){
+ state=2;
+ break;
+ }
+ }
+ }else if(state<=2){
+ if(buf[i]==1) state^= 5; //2->7, 1->4, 0->5
+ else if(buf[i]) state = 7;
+ else state>>=1; //2->1, 1->0, 0->0
+ }else if(state<=5){
+ int v= buf[i] & 0x1F;
+ if(v==6 || v==7 || v==8 || v==9){
+ if(pc->frame_start_found){
+ i++;
+ goto found;
+ }
+ }else if(v==1 || v==2 || v==5){
+ if(pc->frame_start_found){
+ state+=8;
+ continue;
+ }else
+ pc->frame_start_found = 1;
+ }
+ state= 7;
+ }else{
+ if(buf[i] & 0x80)
+ goto found;
+ state= 7;
+ }
+ }
+ pc->state= state;
+ return END_NOT_FOUND;
+
+found:
+ pc->state=7;
+ pc->frame_start_found= 0;
+ return i-(state&5);
+}
+
+/*!
+ * Parse NAL units of found picture and decode some basic information.
+ *
+ * @param s parser context.
+ * @param avctx codec context.
+ * @param buf buffer with field/frame data.
+ * @param buf_size size of the buffer.
+ */
+static inline int parse_nal_units(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ const uint8_t *buf, int buf_size)
+{
+ H264Context *h = s->priv_data;
+ const uint8_t *buf_end = buf + buf_size;
+ unsigned int pps_id;
+ unsigned int slice_type;
+ int state = -1;
+ const uint8_t *ptr;
+
+ /* set some sane default values */
+ s->pict_type = FF_I_TYPE;
+ s->key_frame = 0;
+
+ h->s.avctx= avctx;
+ h->sei_recovery_frame_cnt = -1;
+ h->sei_dpb_output_delay = 0;
+ h->sei_cpb_removal_delay = -1;
+ h->sei_buffering_period_present = 0;
+
+ for(;;) {
+ int src_length, dst_length, consumed;
+ buf = ff_find_start_code(buf, buf_end, &state);
+ if(buf >= buf_end)
+ break;
+ --buf;
+ src_length = buf_end - buf;
+ switch (state & 0x1f) {
+ case NAL_SLICE:
+ case NAL_IDR_SLICE:
+ // Do not walk the whole buffer just to decode slice header
+ if (src_length > 20)
+ src_length = 20;
+ break;
+ }
+ ptr= ff_h264_decode_nal(h, buf, &dst_length, &consumed, src_length);
+ if (ptr==NULL || dst_length < 0)
+ break;
+
+ init_get_bits(&h->s.gb, ptr, 8*dst_length);
+ switch(h->nal_unit_type) {
+ case NAL_SPS:
+ ff_h264_decode_seq_parameter_set(h);
+ break;
+ case NAL_PPS:
+ ff_h264_decode_picture_parameter_set(h, h->s.gb.size_in_bits);
+ break;
+ case NAL_SEI:
+ ff_h264_decode_sei(h);
+ break;
+ case NAL_IDR_SLICE:
+ s->key_frame = 1;
+ /* fall through */
+ case NAL_SLICE:
+ get_ue_golomb(&h->s.gb); // skip first_mb_in_slice
+ slice_type = get_ue_golomb_31(&h->s.gb);
+ s->pict_type = golomb_to_pict_type[slice_type % 5];
+ if (h->sei_recovery_frame_cnt >= 0) {
+ /* key frame, since recovery_frame_cnt is set */
+ s->key_frame = 1;
+ }
+ pps_id= get_ue_golomb(&h->s.gb);
+ if(pps_id>=MAX_PPS_COUNT) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n");
+ return -1;
+ }
+ if(!h->pps_buffers[pps_id]) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "non-existing PPS referenced\n");
+ return -1;
+ }
+ h->pps= *h->pps_buffers[pps_id];
+ if(!h->sps_buffers[h->pps.sps_id]) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "non-existing SPS referenced\n");
+ return -1;
+ }
+ h->sps = *h->sps_buffers[h->pps.sps_id];
+ h->frame_num = get_bits(&h->s.gb, h->sps.log2_max_frame_num);
+
+ if(h->sps.frame_mbs_only_flag){
+ h->s.picture_structure= PICT_FRAME;
+ }else{
+ if(get_bits1(&h->s.gb)) { //field_pic_flag
+ h->s.picture_structure= PICT_TOP_FIELD + get_bits1(&h->s.gb); //bottom_field_flag
+ } else {
+ h->s.picture_structure= PICT_FRAME;
+ }
+ }
+
+ if(h->sps.pic_struct_present_flag) {
+ switch (h->sei_pic_struct) {
+ case SEI_PIC_STRUCT_TOP_FIELD:
+ case SEI_PIC_STRUCT_BOTTOM_FIELD:
+ s->repeat_pict = -1;
+ break;
+ case SEI_PIC_STRUCT_FRAME:
+ case SEI_PIC_STRUCT_TOP_BOTTOM:
+ case SEI_PIC_STRUCT_BOTTOM_TOP:
+ s->repeat_pict = 0;
+ break;
+ case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
+ case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
+ s->repeat_pict = 1;
+ break;
+ case SEI_PIC_STRUCT_FRAME_DOUBLING:
+ s->repeat_pict = 2;
+ break;
+ case SEI_PIC_STRUCT_FRAME_TRIPLING:
+ s->repeat_pict = 4;
+ break;
+ default:
+ s->repeat_pict = h->s.picture_structure == PICT_FRAME ? 0 : -1;
+ break;
+ }
+ } else {
+ s->repeat_pict = h->s.picture_structure == PICT_FRAME ? 0 : -1;
+ }
+
+ return 0; /* no need to evaluate the rest */
+ }
+ buf += consumed;
+ }
+ /* didn't find a picture! */
+ av_log(h->s.avctx, AV_LOG_ERROR, "missing picture in access unit\n");
+ return -1;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_parser.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_parser.h
new file mode 100644
index 000000000..27ae0c1ab
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_parser.h
@@ -0,0 +1,39 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... parser
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file h264_parser.h
+ * H.264 / AVC / MPEG4 part10 parser.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVCODEC_H264_PARSER_H
+#define AVCODEC_H264_PARSER_H
+
+#include "h264.h"
+
+/**
+ * finds the end of the current frame in the bitstream.
+ * @return the position of the first byte of the next frame, or -1
+ */
+int ff_h264_find_frame_end(H264Context *h, const uint8_t *buf, int buf_size);
+
+#endif /* AVCODEC_H264_PARSER_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_ps.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_ps.c
new file mode 100644
index 000000000..7fe8f0f08
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_ps.c
@@ -0,0 +1,544 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... parameter set decoding
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264_ps.c
+ * H.264 / AVC / MPEG4 part10 parameter set decoding.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "internal.h"
+#include "dsputil.h"
+#include "avcodec.h"
+#include "h264.h"
+#include "h264data.h" //FIXME FIXME FIXME (just for zigzag_scan)
+#include "golomb.h"
+
+
+//#undef NDEBUG
+#include <assert.h>
+
+static const AVRational pixel_aspect[17]={
+ {0, 1},
+ {1, 1},
+ {12, 11},
+ {10, 11},
+ {16, 11},
+ {40, 33},
+ {24, 11},
+ {20, 11},
+ {32, 11},
+ {80, 33},
+ {18, 11},
+ {15, 11},
+ {64, 33},
+ {160,99},
+ {4, 3},
+ {3, 2},
+ {2, 1},
+};
+
+const uint8_t ff_h264_chroma_qp[52]={
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,
+ 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,
+ 28,29,29,30,31,32,32,33,34,34,35,35,36,36,37,37,
+ 37,38,38,38,39,39,39,39
+};
+
+static const uint8_t default_scaling4[2][16]={
+{ 6,13,20,28,
+ 13,20,28,32,
+ 20,28,32,37,
+ 28,32,37,42
+},{
+ 10,14,20,24,
+ 14,20,24,27,
+ 20,24,27,30,
+ 24,27,30,34
+}};
+
+static const uint8_t default_scaling8[2][64]={
+{ 6,10,13,16,18,23,25,27,
+ 10,11,16,18,23,25,27,29,
+ 13,16,18,23,25,27,29,31,
+ 16,18,23,25,27,29,31,33,
+ 18,23,25,27,29,31,33,36,
+ 23,25,27,29,31,33,36,38,
+ 25,27,29,31,33,36,38,40,
+ 27,29,31,33,36,38,40,42
+},{
+ 9,13,15,17,19,21,22,24,
+ 13,13,17,19,21,22,24,25,
+ 15,17,19,21,22,24,25,27,
+ 17,19,21,22,24,25,27,28,
+ 19,21,22,24,25,27,28,30,
+ 21,22,24,25,27,28,30,32,
+ 22,24,25,27,28,30,32,33,
+ 24,25,27,28,30,32,33,35
+}};
+
+static inline int decode_hrd_parameters(H264Context *h, SPS *sps){
+ MpegEncContext * const s = &h->s;
+ int cpb_count, i;
+ cpb_count = get_ue_golomb_31(&s->gb) + 1;
+
+ if(cpb_count > 32U){
+ av_log(h->s.avctx, AV_LOG_ERROR, "cpb_count %d invalid\n", cpb_count);
+ return -1;
+ }
+
+ get_bits(&s->gb, 4); /* bit_rate_scale */
+ get_bits(&s->gb, 4); /* cpb_size_scale */
+ for(i=0; i<cpb_count; i++){
+ get_ue_golomb(&s->gb); /* bit_rate_value_minus1 */
+ get_ue_golomb(&s->gb); /* cpb_size_value_minus1 */
+ get_bits1(&s->gb); /* cbr_flag */
+ }
+ sps->initial_cpb_removal_delay_length = get_bits(&s->gb, 5) + 1;
+ sps->cpb_removal_delay_length = get_bits(&s->gb, 5) + 1;
+ sps->dpb_output_delay_length = get_bits(&s->gb, 5) + 1;
+ sps->time_offset_length = get_bits(&s->gb, 5);
+ sps->cpb_cnt = cpb_count;
+ return 0;
+}
+
+static inline int decode_vui_parameters(H264Context *h, SPS *sps){
+ MpegEncContext * const s = &h->s;
+ int aspect_ratio_info_present_flag;
+ unsigned int aspect_ratio_idc;
+
+ aspect_ratio_info_present_flag= get_bits1(&s->gb);
+
+ if( aspect_ratio_info_present_flag ) {
+ aspect_ratio_idc= get_bits(&s->gb, 8);
+ if( aspect_ratio_idc == EXTENDED_SAR ) {
+ sps->sar.num= get_bits(&s->gb, 16);
+ sps->sar.den= get_bits(&s->gb, 16);
+ }else if(aspect_ratio_idc < FF_ARRAY_ELEMS(pixel_aspect)){
+ sps->sar= pixel_aspect[aspect_ratio_idc];
+ }else{
+ av_log(h->s.avctx, AV_LOG_ERROR, "illegal aspect ratio\n");
+ return -1;
+ }
+ }else{
+ sps->sar.num=
+ sps->sar.den= 0;
+ }
+// s->avctx->aspect_ratio= sar_width*s->width / (float)(s->height*sar_height);
+
+ if(get_bits1(&s->gb)){ /* overscan_info_present_flag */
+ get_bits1(&s->gb); /* overscan_appropriate_flag */
+ }
+
+ sps->video_signal_type_present_flag = get_bits1(&s->gb);
+ if(sps->video_signal_type_present_flag){
+ get_bits(&s->gb, 3); /* video_format */
+ sps->full_range = get_bits1(&s->gb); /* video_full_range_flag */
+
+ sps->colour_description_present_flag = get_bits1(&s->gb);
+ if(sps->colour_description_present_flag){
+ sps->color_primaries = get_bits(&s->gb, 8); /* colour_primaries */
+ sps->color_trc = get_bits(&s->gb, 8); /* transfer_characteristics */
+ sps->colorspace = get_bits(&s->gb, 8); /* matrix_coefficients */
+ if (sps->color_primaries >= AVCOL_PRI_NB)
+ sps->color_primaries = AVCOL_PRI_UNSPECIFIED;
+ if (sps->color_trc >= AVCOL_TRC_NB)
+ sps->color_trc = AVCOL_TRC_UNSPECIFIED;
+ if (sps->colorspace >= AVCOL_SPC_NB)
+ sps->colorspace = AVCOL_SPC_UNSPECIFIED;
+ }
+ }
+
+ if(get_bits1(&s->gb)){ /* chroma_location_info_present_flag */
+ s->avctx->chroma_sample_location = get_ue_golomb(&s->gb)+1; /* chroma_sample_location_type_top_field */
+ get_ue_golomb(&s->gb); /* chroma_sample_location_type_bottom_field */
+ }
+
+ sps->timing_info_present_flag = get_bits1(&s->gb);
+ if(sps->timing_info_present_flag){
+ sps->num_units_in_tick = get_bits_long(&s->gb, 32);
+ sps->time_scale = get_bits_long(&s->gb, 32);
+ if(sps->num_units_in_tick-1 > 0x7FFFFFFEU || sps->time_scale-1 > 0x7FFFFFFEU){
+ av_log(h->s.avctx, AV_LOG_ERROR, "time_scale/num_units_in_tick invalid or unsupported (%d/%d)\n", sps->time_scale, sps->num_units_in_tick);
+ return -1;
+ }
+ sps->fixed_frame_rate_flag = get_bits1(&s->gb);
+ }
+
+ sps->nal_hrd_parameters_present_flag = get_bits1(&s->gb);
+ if(sps->nal_hrd_parameters_present_flag)
+ if(decode_hrd_parameters(h, sps) < 0)
+ return -1;
+ sps->vcl_hrd_parameters_present_flag = get_bits1(&s->gb);
+ if(sps->vcl_hrd_parameters_present_flag)
+ if(decode_hrd_parameters(h, sps) < 0)
+ return -1;
+ if(sps->nal_hrd_parameters_present_flag || sps->vcl_hrd_parameters_present_flag)
+ get_bits1(&s->gb); /* low_delay_hrd_flag */
+ sps->pic_struct_present_flag = get_bits1(&s->gb);
+
+ sps->bitstream_restriction_flag = get_bits1(&s->gb);
+ if(sps->bitstream_restriction_flag){
+ get_bits1(&s->gb); /* motion_vectors_over_pic_boundaries_flag */
+ get_ue_golomb(&s->gb); /* max_bytes_per_pic_denom */
+ get_ue_golomb(&s->gb); /* max_bits_per_mb_denom */
+ get_ue_golomb(&s->gb); /* log2_max_mv_length_horizontal */
+ get_ue_golomb(&s->gb); /* log2_max_mv_length_vertical */
+ sps->num_reorder_frames= get_ue_golomb(&s->gb);
+ get_ue_golomb(&s->gb); /*max_dec_frame_buffering*/
+
+ if(sps->num_reorder_frames > 16U /*max_dec_frame_buffering || max_dec_frame_buffering > 16*/){
+ av_log(h->s.avctx, AV_LOG_ERROR, "illegal num_reorder_frames %d\n", sps->num_reorder_frames);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void decode_scaling_list(H264Context *h, uint8_t *factors, int size,
+ const uint8_t *jvt_list, const uint8_t *fallback_list){
+ MpegEncContext * const s = &h->s;
+ int i, last = 8, next = 8;
+ const uint8_t *scan = size == 16 ? zigzag_scan : ff_zigzag_direct;
+ if(!get_bits1(&s->gb)) /* matrix not written, we use the predicted one */
+ memcpy(factors, fallback_list, size*sizeof(uint8_t));
+ else
+ for(i=0;i<size;i++){
+ if(next)
+ next = (last + get_se_golomb(&s->gb)) & 0xff;
+ if(!i && !next){ /* matrix not written, we use the preset one */
+ memcpy(factors, jvt_list, size*sizeof(uint8_t));
+ break;
+ }
+ last = factors[scan[i]] = next ? next : last;
+ }
+}
+
+static void decode_scaling_matrices(H264Context *h, SPS *sps, PPS *pps, int is_sps,
+ uint8_t (*scaling_matrix4)[16], uint8_t (*scaling_matrix8)[64]){
+ MpegEncContext * const s = &h->s;
+ int fallback_sps = !is_sps && sps->scaling_matrix_present;
+ const uint8_t *fallback[4] = {
+ fallback_sps ? sps->scaling_matrix4[0] : default_scaling4[0],
+ fallback_sps ? sps->scaling_matrix4[3] : default_scaling4[1],
+ fallback_sps ? sps->scaling_matrix8[0] : default_scaling8[0],
+ fallback_sps ? sps->scaling_matrix8[1] : default_scaling8[1]
+ };
+ if(get_bits1(&s->gb)){
+ sps->scaling_matrix_present |= is_sps;
+ decode_scaling_list(h,scaling_matrix4[0],16,default_scaling4[0],fallback[0]); // Intra, Y
+ decode_scaling_list(h,scaling_matrix4[1],16,default_scaling4[0],scaling_matrix4[0]); // Intra, Cr
+ decode_scaling_list(h,scaling_matrix4[2],16,default_scaling4[0],scaling_matrix4[1]); // Intra, Cb
+ decode_scaling_list(h,scaling_matrix4[3],16,default_scaling4[1],fallback[1]); // Inter, Y
+ decode_scaling_list(h,scaling_matrix4[4],16,default_scaling4[1],scaling_matrix4[3]); // Inter, Cr
+ decode_scaling_list(h,scaling_matrix4[5],16,default_scaling4[1],scaling_matrix4[4]); // Inter, Cb
+ if(is_sps || pps->transform_8x8_mode){
+ decode_scaling_list(h,scaling_matrix8[0],64,default_scaling8[0],fallback[2]); // Intra, Y
+ decode_scaling_list(h,scaling_matrix8[1],64,default_scaling8[1],fallback[3]); // Inter, Y
+ }
+ }
+}
+
+int ff_h264_decode_seq_parameter_set(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ int profile_idc, level_idc;
+ unsigned int sps_id;
+ int i;
+ SPS *sps;
+
+ profile_idc= get_bits(&s->gb, 8);
+ get_bits1(&s->gb); //constraint_set0_flag
+ get_bits1(&s->gb); //constraint_set1_flag
+ get_bits1(&s->gb); //constraint_set2_flag
+ get_bits1(&s->gb); //constraint_set3_flag
+ get_bits(&s->gb, 4); // reserved
+ level_idc= get_bits(&s->gb, 8);
+ sps_id= get_ue_golomb_31(&s->gb);
+
+ if(sps_id >= MAX_SPS_COUNT) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "sps_id (%d) out of range\n", sps_id);
+ return -1;
+ }
+ sps= av_mallocz(sizeof(SPS));
+ if(sps == NULL)
+ return -1;
+
+ sps->profile_idc= profile_idc;
+ sps->level_idc= level_idc;
+ sps->full_range = VIDEO_FULL_RANGE_INVALID; /* ffdshow custom code */
+
+ memset(sps->scaling_matrix4, 16, sizeof(sps->scaling_matrix4));
+ memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8));
+ sps->scaling_matrix_present = 0;
+ sps->colorspace = 2; /* ffdshow custom code */
+
+ // ==> Start patch MPC Fidelity Range Extensions stuff
+ sps->chroma_format_idc = 1;
+ sps->bit_depth_luma = 8; // bit_depth_luma_minus8
+ sps->bit_depth_chroma = 8; // bit_depth_chroma_minus8
+ sps->residual_color_transform_flag = 0;
+ // <== End patch MPC
+
+ if(sps->profile_idc >= 100){ //high profile
+ sps->chroma_format_idc= get_ue_golomb_31(&s->gb);
+ if(sps->chroma_format_idc == 3)
+ sps->residual_color_transform_flag = get_bits1(&s->gb);
+ sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8;
+ sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;
+ sps->transform_bypass = get_bits1(&s->gb);
+ decode_scaling_matrices(h, sps, NULL, 1, sps->scaling_matrix4, sps->scaling_matrix8);
+ }else{
+ sps->chroma_format_idc= 1;
+ sps->bit_depth_luma = 8;
+ sps->bit_depth_chroma = 8;
+ }
+
+ sps->log2_max_frame_num= get_ue_golomb(&s->gb) + 4;
+ sps->poc_type= get_ue_golomb_31(&s->gb);
+
+ if(sps->poc_type == 0){ //FIXME #define
+ sps->log2_max_poc_lsb= get_ue_golomb(&s->gb) + 4;
+ } else if(sps->poc_type == 1){//FIXME #define
+ sps->delta_pic_order_always_zero_flag= get_bits1(&s->gb);
+ sps->offset_for_non_ref_pic= get_se_golomb(&s->gb);
+ sps->offset_for_top_to_bottom_field= get_se_golomb(&s->gb);
+ sps->poc_cycle_length = get_ue_golomb(&s->gb);
+
+ if((unsigned)sps->poc_cycle_length >= FF_ARRAY_ELEMS(sps->offset_for_ref_frame)){
+ av_log(h->s.avctx, AV_LOG_ERROR, "poc_cycle_length overflow %u\n", sps->poc_cycle_length);
+ goto fail;
+ }
+
+ for(i=0; i<sps->poc_cycle_length; i++)
+ sps->offset_for_ref_frame[i]= get_se_golomb(&s->gb);
+ }else if(sps->poc_type != 2){
+ av_log(h->s.avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type);
+ goto fail;
+ }
+
+ sps->ref_frame_count= get_ue_golomb_31(&s->gb);
+ if(sps->ref_frame_count > MAX_PICTURE_COUNT-2 || sps->ref_frame_count >= 32U){
+ av_log(h->s.avctx, AV_LOG_ERROR, "too many reference frames\n");
+ goto fail;
+ }
+ sps->gaps_in_frame_num_allowed_flag= get_bits1(&s->gb);
+ sps->mb_width = get_ue_golomb(&s->gb) + 1;
+ sps->mb_height= get_ue_golomb(&s->gb) + 1;
+ if((unsigned)sps->mb_width >= INT_MAX/16 || (unsigned)sps->mb_height >= INT_MAX/16 ||
+ avcodec_check_dimensions(NULL, 16*sps->mb_width, 16*sps->mb_height)){
+ av_log(h->s.avctx, AV_LOG_ERROR, "mb_width/height overflow\n");
+ goto fail;
+ }
+
+ sps->frame_mbs_only_flag= get_bits1(&s->gb);
+ if(!sps->frame_mbs_only_flag)
+ sps->mb_aff= get_bits1(&s->gb);
+ else
+ sps->mb_aff= 0;
+
+ sps->direct_8x8_inference_flag= get_bits1(&s->gb);
+
+#ifndef ALLOW_INTERLACE
+ if(sps->mb_aff)
+ av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF support not included; enable it at compile-time.\n");
+#endif
+ sps->crop= get_bits1(&s->gb);
+ if(sps->crop){
+ sps->crop_left = get_ue_golomb(&s->gb);
+ sps->crop_right = get_ue_golomb(&s->gb);
+ sps->crop_top = get_ue_golomb(&s->gb);
+ sps->crop_bottom= get_ue_golomb(&s->gb);
+ if(sps->crop_left || sps->crop_top){
+ av_log(h->s.avctx, AV_LOG_ERROR, "insane cropping not completely supported, this could look slightly wrong ...\n");
+ }
+ if(sps->crop_right >= 8 || sps->crop_bottom >= (8>> !sps->frame_mbs_only_flag)){
+ av_log(h->s.avctx, AV_LOG_ERROR, "brainfart cropping not supported, this could look slightly wrong ...\n");
+ }
+ }else{
+ sps->crop_left =
+ sps->crop_right =
+ sps->crop_top =
+ sps->crop_bottom= 0;
+ }
+
+ sps->vui_parameters_present_flag= get_bits1(&s->gb);
+ if( sps->vui_parameters_present_flag )
+ if (decode_vui_parameters(h, sps) < 0)
+ goto fail;
+
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO){
+ av_log(h->s.avctx, AV_LOG_DEBUG, "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%d/%d/%d/%d %s %s %d/%d\n",
+ sps_id, sps->profile_idc, sps->level_idc,
+ sps->poc_type,
+ sps->ref_frame_count,
+ sps->mb_width, sps->mb_height,
+ sps->frame_mbs_only_flag ? "FRM" : (sps->mb_aff ? "MB-AFF" : "PIC-AFF"),
+ sps->direct_8x8_inference_flag ? "8B8" : "",
+ sps->crop_left, sps->crop_right,
+ sps->crop_top, sps->crop_bottom,
+ sps->vui_parameters_present_flag ? "VUI" : "",
+ #if __STDC_VERSION__ >= 199901L
+ ((const char*[]){"Gray","420","422","444"})[sps->chroma_format_idc],
+ #else
+ "",
+ #endif
+ sps->timing_info_present_flag ? sps->num_units_in_tick : 0,
+ sps->timing_info_present_flag ? sps->time_scale : 0
+ );
+ }
+
+ av_free(h->sps_buffers[sps_id]);
+ h->sps_buffers[sps_id]= sps;
+ h->sps = *sps;
+ return 0;
+fail:
+ av_free(sps);
+ return -1;
+}
+
+static void
+build_qp_table(PPS *pps, int t, int index)
+{
+ int i;
+ for(i = 0; i < 52; i++)
+ pps->chroma_qp_table[t][i] = ff_h264_chroma_qp[av_clip(i + index, 0, 51)];
+}
+
+int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length){
+ MpegEncContext * const s = &h->s;
+ unsigned int pps_id= get_ue_golomb(&s->gb);
+ PPS *pps;
+
+ if(pps_id >= MAX_PPS_COUNT) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "pps_id (%d) out of range\n", pps_id);
+ return -1;
+ }
+
+ pps= av_mallocz(sizeof(PPS));
+ if(pps == NULL)
+ return -1;
+ pps->sps_id= get_ue_golomb_31(&s->gb);
+ if((unsigned)pps->sps_id>=MAX_SPS_COUNT || h->sps_buffers[pps->sps_id] == NULL){
+ av_log(h->s.avctx, AV_LOG_ERROR, "sps_id out of range\n");
+ goto fail;
+ }
+
+ pps->cabac= get_bits1(&s->gb);
+ pps->pic_order_present= get_bits1(&s->gb);
+ pps->slice_group_count= get_ue_golomb(&s->gb) + 1;
+ if(pps->slice_group_count > 1 ){
+ pps->mb_slice_group_map_type= get_ue_golomb(&s->gb);
+ av_log(h->s.avctx, AV_LOG_ERROR, "FMO not supported\n");
+ switch(pps->mb_slice_group_map_type){
+ case 0:
+#if 0
+| for( i = 0; i <= num_slice_groups_minus1; i++ ) | | |
+| run_length[ i ] |1 |ue(v) |
+#endif
+ break;
+ case 2:
+#if 0
+| for( i = 0; i < num_slice_groups_minus1; i++ ) | | |
+|{ | | |
+| top_left_mb[ i ] |1 |ue(v) |
+| bottom_right_mb[ i ] |1 |ue(v) |
+| } | | |
+#endif
+ break;
+ case 3:
+ case 4:
+ case 5:
+#if 0
+| slice_group_change_direction_flag |1 |u(1) |
+| slice_group_change_rate_minus1 |1 |ue(v) |
+#endif
+ // ==> Start patch MPC
+ pps->slice_group_change_direction_flag=get_bits1(&s->gb); // |1 |u(1) |
+ pps->slice_group_change_rate_minus1=get_ue_golomb(&s->gb); // |1 |ue(v) |
+ // <== End patch MPC
+ break;
+ case 6:
+#if 0
+| slice_group_id_cnt_minus1 |1 |ue(v) |
+| for( i = 0; i <= slice_group_id_cnt_minus1; i++ | | |
+|) | | |
+| slice_group_id[ i ] |1 |u(v) |
+#endif
+ break;
+ }
+ }
+ pps->ref_count[0]= get_ue_golomb(&s->gb) + 1;
+ pps->ref_count[1]= get_ue_golomb(&s->gb) + 1;
+ if(pps->ref_count[0]-1 > 32-1 || pps->ref_count[1]-1 > 32-1){
+ av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow (pps)\n");
+ goto fail;
+ }
+
+ pps->weighted_pred= get_bits1(&s->gb);
+ pps->weighted_bipred_idc= get_bits(&s->gb, 2);
+ pps->init_qp= get_se_golomb(&s->gb) + 26;
+ pps->init_qs= get_se_golomb(&s->gb) + 26;
+ pps->chroma_qp_index_offset[0]= get_se_golomb(&s->gb);
+ pps->deblocking_filter_parameters_present= get_bits1(&s->gb);
+ pps->constrained_intra_pred= get_bits1(&s->gb);
+ pps->redundant_pic_cnt_present = get_bits1(&s->gb);
+
+ pps->transform_8x8_mode= 0;
+ h->dequant_coeff_pps= -1; //contents of sps/pps can change even if id doesn't, so reinit
+ memcpy(pps->scaling_matrix4, h->sps_buffers[pps->sps_id]->scaling_matrix4, sizeof(pps->scaling_matrix4));
+ memcpy(pps->scaling_matrix8, h->sps_buffers[pps->sps_id]->scaling_matrix8, sizeof(pps->scaling_matrix8));
+
+ if(get_bits_count(&s->gb) < bit_length){
+ pps->transform_8x8_mode= get_bits1(&s->gb);
+ decode_scaling_matrices(h, h->sps_buffers[pps->sps_id], pps, 0, pps->scaling_matrix4, pps->scaling_matrix8);
+ pps->chroma_qp_index_offset[1]= get_se_golomb(&s->gb); //second_chroma_qp_index_offset
+ } else {
+ pps->chroma_qp_index_offset[1]= pps->chroma_qp_index_offset[0];
+ }
+
+ build_qp_table(pps, 0, pps->chroma_qp_index_offset[0]);
+ build_qp_table(pps, 1, pps->chroma_qp_index_offset[1]);
+ if(pps->chroma_qp_index_offset[0] != pps->chroma_qp_index_offset[1])
+ pps->chroma_qp_diff= 1;
+
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO){
+ av_log(h->s.avctx, AV_LOG_DEBUG, "pps:%u sps:%u %s slice_groups:%d ref:%d/%d %s qp:%d/%d/%d/%d %s %s %s %s\n",
+ pps_id, pps->sps_id,
+ pps->cabac ? "CABAC" : "CAVLC",
+ pps->slice_group_count,
+ pps->ref_count[0], pps->ref_count[1],
+ pps->weighted_pred ? "weighted" : "",
+ pps->init_qp, pps->init_qs, pps->chroma_qp_index_offset[0], pps->chroma_qp_index_offset[1],
+ pps->deblocking_filter_parameters_present ? "LPAR" : "",
+ pps->constrained_intra_pred ? "CONSTR" : "",
+ pps->redundant_pic_cnt_present ? "REDU" : "",
+ pps->transform_8x8_mode ? "8x8DCT" : ""
+ );
+ }
+
+ av_free(h->pps_buffers[pps_id]);
+ h->pps_buffers[pps_id]= pps;
+ return 0;
+fail:
+ av_free(pps);
+ return -1;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_refs.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_refs.c
new file mode 100644
index 000000000..183f01526
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_refs.c
@@ -0,0 +1,700 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... reference picture handling
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264_refs.c
+ * H.264 / AVC / MPEG4 part10 reference picture handling.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "internal.h"
+#include "dsputil.h"
+#include "avcodec.h"
+#include "h264.h"
+#include "golomb.h"
+
+//#undef NDEBUG
+#include <assert.h>
+
+
+static void pic_as_field(Picture *pic, const int parity){
+ int i;
+ for (i = 0; i < 4; ++i) {
+ if (parity == PICT_BOTTOM_FIELD)
+ pic->data[i] += pic->linesize[i];
+ pic->reference = parity;
+ pic->linesize[i] *= 2;
+ }
+ pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD];
+}
+
+static int split_field_copy(Picture *dest, Picture *src,
+ int parity, int id_add){
+ int match = !!(src->reference & parity);
+
+ if (match) {
+ *dest = *src;
+ if(parity != PICT_FRAME){
+ pic_as_field(dest, parity);
+ dest->pic_id *= 2;
+ dest->pic_id += id_add;
+ }
+ }
+
+ return match;
+}
+
+static int build_def_list(Picture *def, Picture **in, int len, int is_long, int sel){
+ int i[2]={0};
+ int index=0;
+
+ while(i[0]<len || i[1]<len){
+ while(i[0]<len && !(in[ i[0] ] && (in[ i[0] ]->reference & sel)))
+ i[0]++;
+ while(i[1]<len && !(in[ i[1] ] && (in[ i[1] ]->reference & (sel^3))))
+ i[1]++;
+ if(i[0] < len){
+ in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
+ split_field_copy(&def[index++], in[ i[0]++ ], sel , 1);
+ }
+ if(i[1] < len){
+ in[ i[1] ]->pic_id= is_long ? i[1] : in[ i[1] ]->frame_num;
+ split_field_copy(&def[index++], in[ i[1]++ ], sel^3, 0);
+ }
+ }
+
+ return index;
+}
+
+static int add_sorted(Picture **sorted, Picture **src, int len, int limit, int dir){
+ int i, best_poc;
+ int out_i= 0;
+
+ for(;;){
+ best_poc= dir ? INT_MIN : INT_MAX;
+
+ for(i=0; i<len; i++){
+ const int poc= src[i]->poc;
+ if(((poc > limit) ^ dir) && ((poc < best_poc) ^ dir)){
+ best_poc= poc;
+ sorted[out_i]= src[i];
+ }
+ }
+ if(best_poc == (dir ? INT_MIN : INT_MAX))
+ break;
+ limit= sorted[out_i++]->poc - dir;
+ }
+ return out_i;
+}
+
+int ff_h264_fill_default_ref_list(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ int i, len;
+
+ if(h->slice_type_nos==FF_B_TYPE){
+ Picture *sorted[32];
+ int cur_poc, list;
+ int lens[2];
+
+ if(FIELD_PICTURE)
+ cur_poc= s->current_picture_ptr->field_poc[ s->picture_structure == PICT_BOTTOM_FIELD ];
+ else
+ cur_poc= s->current_picture_ptr->poc;
+
+ for(list= 0; list<2; list++){
+ len= add_sorted(sorted , h->short_ref, h->short_ref_count, cur_poc, 1^list);
+ len+=add_sorted(sorted+len, h->short_ref, h->short_ref_count, cur_poc, 0^list);
+ assert(len<=32);
+ len= build_def_list(h->default_ref_list[list] , sorted , len, 0, s->picture_structure);
+ len+=build_def_list(h->default_ref_list[list]+len, h->long_ref, 16 , 1, s->picture_structure);
+ assert(len<=32);
+
+ if(len < h->ref_count[list])
+ memset(&h->default_ref_list[list][len], 0, sizeof(Picture)*(h->ref_count[list] - len));
+ lens[list]= len;
+ }
+
+ if(lens[0] == lens[1] && lens[1] > 1){
+ for(i=0; h->default_ref_list[0][i].data[0] == h->default_ref_list[1][i].data[0] && i<lens[0]; i++);
+ if(i == lens[0])
+ FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
+ }
+ }else{
+ len = build_def_list(h->default_ref_list[0] , h->short_ref, h->short_ref_count, 0, s->picture_structure);
+ len+= build_def_list(h->default_ref_list[0]+len, h-> long_ref, 16 , 1, s->picture_structure);
+ assert(len <= 32);
+ if(len < h->ref_count[0])
+ memset(&h->default_ref_list[0][len], 0, sizeof(Picture)*(h->ref_count[0] - len));
+ }
+#ifdef TRACE
+ for (i=0; i<h->ref_count[0]; i++) {
+ tprintf(h->s.avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]);
+ }
+ if(h->slice_type_nos==FF_B_TYPE){
+ for (i=0; i<h->ref_count[1]; i++) {
+ tprintf(h->s.avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].data[0]);
+ }
+ }
+#endif
+ return 0;
+}
+
+static void print_short_term(H264Context *h);
+static void print_long_term(H264Context *h);
+
+/**
+ * Extract structure information about the picture described by pic_num in
+ * the current decoding context (frame or field). Note that pic_num is
+ * picture number without wrapping (so, 0<=pic_num<max_pic_num).
+ * @param pic_num picture number for which to extract structure information
+ * @param structure one of PICT_XXX describing structure of picture
+ * with pic_num
+ * @return frame number (short term) or long term index of picture
+ * described by pic_num
+ */
+static int pic_num_extract(H264Context *h, int pic_num, int *structure){
+ MpegEncContext * const s = &h->s;
+
+ *structure = s->picture_structure;
+ if(FIELD_PICTURE){
+ if (!(pic_num & 1))
+ /* opposite field */
+ *structure ^= PICT_FRAME;
+ pic_num >>= 1;
+ }
+
+ return pic_num;
+}
+
+int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ int list, index, pic_structure;
+
+ print_short_term(h);
+ print_long_term(h);
+
+ for(list=0; list<h->list_count; list++){
+ memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]);
+
+ if(get_bits1(&s->gb)){
+ int pred= h->curr_pic_num;
+
+ for(index=0; ; index++){
+ unsigned int reordering_of_pic_nums_idc= get_ue_golomb_31(&s->gb);
+ unsigned int pic_id;
+ int i;
+ Picture *ref = NULL;
+
+ if(reordering_of_pic_nums_idc==3)
+ break;
+
+ if(index >= h->ref_count[list]){
+ av_log(h->s.avctx, AV_LOG_ERROR, "reference count overflow\n");
+ return -1;
+ }
+
+ if(reordering_of_pic_nums_idc<3){
+ if(reordering_of_pic_nums_idc<2){
+ const unsigned int abs_diff_pic_num= get_ue_golomb(&s->gb) + 1;
+ int frame_num;
+
+ if(abs_diff_pic_num > h->max_pic_num){
+ av_log(h->s.avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n");
+ return -1;
+ }
+
+ if(reordering_of_pic_nums_idc == 0) pred-= abs_diff_pic_num;
+ else pred+= abs_diff_pic_num;
+ pred &= h->max_pic_num - 1;
+
+ frame_num = pic_num_extract(h, pred, &pic_structure);
+
+ for(i= h->short_ref_count-1; i>=0; i--){
+ ref = h->short_ref[i];
+ assert(ref->reference);
+ assert(!ref->long_ref);
+ if(
+ ref->frame_num == frame_num &&
+ (ref->reference & pic_structure)
+ )
+ break;
+ }
+ if(i>=0)
+ ref->pic_id= pred;
+ }else{
+ int long_idx;
+ pic_id= get_ue_golomb(&s->gb); //long_term_pic_idx
+
+ long_idx= pic_num_extract(h, pic_id, &pic_structure);
+
+ if(long_idx>31){
+ av_log(h->s.avctx, AV_LOG_ERROR, "long_term_pic_idx overflow\n");
+ return -1;
+ }
+ ref = h->long_ref[long_idx];
+ assert(!(ref && !ref->reference));
+ if(ref && (ref->reference & pic_structure)){
+ ref->pic_id= pic_id;
+ assert(ref->long_ref);
+ i=0;
+ }else{
+ i=-1;
+ }
+ }
+
+ if (i < 0) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "reference picture missing during reorder\n");
+ memset(&h->ref_list[list][index], 0, sizeof(Picture)); //FIXME
+ } else {
+ for(i=index; i+1<h->ref_count[list]; i++){
+ if(ref->long_ref == h->ref_list[list][i].long_ref && ref->pic_id == h->ref_list[list][i].pic_id)
+ break;
+ }
+ for(; i > index; i--){
+ h->ref_list[list][i]= h->ref_list[list][i-1];
+ }
+ h->ref_list[list][index]= *ref;
+ if (FIELD_PICTURE){
+ pic_as_field(&h->ref_list[list][index], pic_structure);
+ }
+ }
+ }else{
+ av_log(h->s.avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc\n");
+ return -1;
+ }
+ }
+ }
+ }
+ for(list=0; list<h->list_count; list++){
+ for(index= 0; index < h->ref_count[list]; index++){
+ if(!h->ref_list[list][index].data[0]){
+ av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture\n");
+ if(h->default_ref_list[list][0].data[0])
+ h->ref_list[list][index]= h->default_ref_list[list][0];
+ else
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void ff_h264_fill_mbaff_ref_list(H264Context *h){
+ int list, i, j;
+ for(list=0; list<2; list++){ //FIXME try list_count
+ for(i=0; i<h->ref_count[list]; i++){
+ Picture *frame = &h->ref_list[list][i];
+ Picture *field = &h->ref_list[list][16+2*i];
+ field[0] = *frame;
+ for(j=0; j<3; j++)
+ field[0].linesize[j] <<= 1;
+ field[0].reference = PICT_TOP_FIELD;
+ field[0].poc= field[0].field_poc[0];
+ field[1] = field[0];
+ for(j=0; j<3; j++)
+ field[1].data[j] += frame->linesize[j];
+ field[1].reference = PICT_BOTTOM_FIELD;
+ field[1].poc= field[1].field_poc[1];
+
+ h->luma_weight[list][16+2*i] = h->luma_weight[list][16+2*i+1] = h->luma_weight[list][i];
+ h->luma_offset[list][16+2*i] = h->luma_offset[list][16+2*i+1] = h->luma_offset[list][i];
+ for(j=0; j<2; j++){
+ h->chroma_weight[list][16+2*i][j] = h->chroma_weight[list][16+2*i+1][j] = h->chroma_weight[list][i][j];
+ h->chroma_offset[list][16+2*i][j] = h->chroma_offset[list][16+2*i+1][j] = h->chroma_offset[list][i][j];
+ }
+ }
+ }
+ for(j=0; j<h->ref_count[1]; j++){
+ for(i=0; i<h->ref_count[0]; i++)
+ h->implicit_weight[j][16+2*i] = h->implicit_weight[j][16+2*i+1] = h->implicit_weight[j][i];
+ memcpy(h->implicit_weight[16+2*j], h->implicit_weight[j], sizeof(*h->implicit_weight));
+ memcpy(h->implicit_weight[16+2*j+1], h->implicit_weight[j], sizeof(*h->implicit_weight));
+ }
+}
+
+/**
+ * Mark a picture as no longer needed for reference. The refmask
+ * argument allows unreferencing of individual fields or the whole frame.
+ * If the picture becomes entirely unreferenced, but is being held for
+ * display purposes, it is marked as such.
+ * @param refmask mask of fields to unreference; the mask is bitwise
+ * anded with the reference marking of pic
+ * @return non-zero if pic becomes entirely unreferenced (except possibly
+ * for display purposes) zero if one of the fields remains in
+ * reference
+ */
+static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){
+ int i;
+ if (pic->reference &= refmask) {
+ return 0;
+ } else {
+ for(i = 0; h->delayed_pic[i]; i++)
+ if(pic == h->delayed_pic[i]){
+ pic->reference=DELAYED_PIC_REF;
+ break;
+ }
+ return 1;
+ }
+}
+
+/**
+ * Find a Picture in the short term reference list by frame number.
+ * @param frame_num frame number to search for
+ * @param idx the index into h->short_ref where returned picture is found
+ * undefined if no picture found.
+ * @return pointer to the found picture, or NULL if no pic with the provided
+ * frame number is found
+ */
+static Picture * find_short(H264Context *h, int frame_num, int *idx){
+ MpegEncContext * const s = &h->s;
+ int i;
+
+ for(i=0; i<h->short_ref_count; i++){
+ Picture *pic= h->short_ref[i];
+ if(s->avctx->debug&FF_DEBUG_MMCO)
+ av_log(h->s.avctx, AV_LOG_DEBUG, "%d %d %p\n", i, pic->frame_num, pic);
+ if(pic->frame_num == frame_num) {
+ *idx = i;
+ return pic;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Remove a picture from the short term reference list by its index in
+ * that list. This does no checking on the provided index; it is assumed
+ * to be valid. Other list entries are shifted down.
+ * @param i index into h->short_ref of picture to remove.
+ */
+static void remove_short_at_index(H264Context *h, int i){
+ assert(i >= 0 && i < h->short_ref_count);
+ h->short_ref[i]= NULL;
+ if (--h->short_ref_count)
+ memmove(&h->short_ref[i], &h->short_ref[i+1], (h->short_ref_count - i)*sizeof(Picture*));
+}
+
+/**
+ *
+ * @return the removed picture or NULL if an error occurs
+ */
+static Picture * remove_short(H264Context *h, int frame_num, int ref_mask){
+ MpegEncContext * const s = &h->s;
+ Picture *pic;
+ int i;
+
+ if(s->avctx->debug&FF_DEBUG_MMCO)
+ av_log(h->s.avctx, AV_LOG_DEBUG, "remove short %d count %d\n", frame_num, h->short_ref_count);
+
+ pic = find_short(h, frame_num, &i);
+ if (pic){
+ if(unreference_pic(h, pic, ref_mask))
+ remove_short_at_index(h, i);
+ }
+
+ return pic;
+}
+
+/**
+ * Remove a picture from the long term reference list by its index in
+ * that list.
+ * @return the removed picture or NULL if an error occurs
+ */
+static Picture * remove_long(H264Context *h, int i, int ref_mask){
+ Picture *pic;
+
+ pic= h->long_ref[i];
+ if (pic){
+ if(unreference_pic(h, pic, ref_mask)){
+ assert(h->long_ref[i]->long_ref == 1);
+ h->long_ref[i]->long_ref= 0;
+ h->long_ref[i]= NULL;
+ h->long_ref_count--;
+ }
+ }
+
+ return pic;
+}
+
+void ff_h264_remove_all_refs(H264Context *h){
+ int i;
+
+ for(i=0; i<16; i++){
+ remove_long(h, i, 0);
+ }
+ assert(h->long_ref_count==0);
+
+ for(i=0; i<h->short_ref_count; i++){
+ unreference_pic(h, h->short_ref[i], 0);
+ h->short_ref[i]= NULL;
+ }
+ h->short_ref_count=0;
+}
+
+/**
+ * print short term list
+ */
+static void print_short_term(H264Context *h) {
+ uint32_t i;
+ if(h->s.avctx->debug&FF_DEBUG_MMCO) {
+ av_log(h->s.avctx, AV_LOG_DEBUG, "short term list:\n");
+ for(i=0; i<h->short_ref_count; i++){
+ Picture *pic= h->short_ref[i];
+ av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
+ }
+ }
+}
+
+/**
+ * print long term list
+ */
+static void print_long_term(H264Context *h) {
+ uint32_t i;
+ if(h->s.avctx->debug&FF_DEBUG_MMCO) {
+ av_log(h->s.avctx, AV_LOG_DEBUG, "long term list:\n");
+ for(i = 0; i < 16; i++){
+ Picture *pic= h->long_ref[i];
+ if (pic) {
+ av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
+ }
+ }
+ }
+}
+
+int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
+ MpegEncContext * const s = &h->s;
+ int i, av_uninit(j);
+ int current_ref_assigned=0;
+ Picture *av_uninit(pic);
+
+ if((s->avctx->debug&FF_DEBUG_MMCO) && mmco_count==0)
+ av_log(h->s.avctx, AV_LOG_DEBUG, "no mmco here\n");
+
+ for(i=0; i<mmco_count; i++){
+ int av_uninit(structure), av_uninit(frame_num);
+ if(s->avctx->debug&FF_DEBUG_MMCO)
+ av_log(h->s.avctx, AV_LOG_DEBUG, "mmco:%d %d %d\n", h->mmco[i].opcode, h->mmco[i].short_pic_num, h->mmco[i].long_arg);
+
+ if( mmco[i].opcode == MMCO_SHORT2UNUSED
+ || mmco[i].opcode == MMCO_SHORT2LONG){
+ frame_num = pic_num_extract(h, mmco[i].short_pic_num, &structure);
+ pic = find_short(h, frame_num, &j);
+ if(!pic){
+ if(mmco[i].opcode != MMCO_SHORT2LONG || !h->long_ref[mmco[i].long_arg]
+ || h->long_ref[mmco[i].long_arg]->frame_num != frame_num)
+ av_log(h->s.avctx, AV_LOG_ERROR, "mmco: unref short failure\n");
+ continue;
+ }
+ }
+
+ switch(mmco[i].opcode){
+ case MMCO_SHORT2UNUSED:
+ if(s->avctx->debug&FF_DEBUG_MMCO)
+ av_log(h->s.avctx, AV_LOG_DEBUG, "mmco: unref short %d count %d\n", h->mmco[i].short_pic_num, h->short_ref_count);
+ remove_short(h, frame_num, structure ^ PICT_FRAME);
+ break;
+ case MMCO_SHORT2LONG:
+ if (h->long_ref[mmco[i].long_arg] != pic)
+ remove_long(h, mmco[i].long_arg, 0);
+
+ remove_short_at_index(h, j);
+ h->long_ref[ mmco[i].long_arg ]= pic;
+ if (h->long_ref[ mmco[i].long_arg ]){
+ h->long_ref[ mmco[i].long_arg ]->long_ref=1;
+ h->long_ref_count++;
+ }
+ break;
+ case MMCO_LONG2UNUSED:
+ j = pic_num_extract(h, mmco[i].long_arg, &structure);
+ pic = h->long_ref[j];
+ if (pic) {
+ remove_long(h, j, structure ^ PICT_FRAME);
+ } else if(s->avctx->debug&FF_DEBUG_MMCO)
+ av_log(h->s.avctx, AV_LOG_DEBUG, "mmco: unref long failure\n");
+ break;
+ case MMCO_LONG:
+ // Comment below left from previous code as it is an interresting note.
+ /* First field in pair is in short term list or
+ * at a different long term index.
+ * This is not allowed; see 7.4.3.3, notes 2 and 3.
+ * Report the problem and keep the pair where it is,
+ * and mark this field valid.
+ */
+
+ if (h->long_ref[mmco[i].long_arg] != s->current_picture_ptr) {
+ remove_long(h, mmco[i].long_arg, 0);
+
+ h->long_ref[ mmco[i].long_arg ]= s->current_picture_ptr;
+ h->long_ref[ mmco[i].long_arg ]->long_ref=1;
+ h->long_ref_count++;
+ }
+
+ s->current_picture_ptr->reference |= s->picture_structure;
+ current_ref_assigned=1;
+ break;
+ case MMCO_SET_MAX_LONG:
+ assert(mmco[i].long_arg <= 16);
+ // just remove the long term which index is greater than new max
+ for(j = mmco[i].long_arg; j<16; j++){
+ remove_long(h, j, 0);
+ }
+ break;
+ case MMCO_RESET:
+ while(h->short_ref_count){
+ remove_short(h, h->short_ref[0]->frame_num, 0);
+ }
+ for(j = 0; j < 16; j++) {
+ remove_long(h, j, 0);
+ }
+ s->current_picture_ptr->poc=
+ s->current_picture_ptr->field_poc[0]=
+ s->current_picture_ptr->field_poc[1]=
+ h->poc_lsb=
+ h->poc_msb=
+ h->frame_num=
+ s->current_picture_ptr->frame_num= 0;
+ s->current_picture_ptr->mmco_reset=1;
+ break;
+ default: assert(0);
+ }
+ }
+
+ if (!current_ref_assigned) {
+ /* Second field of complementary field pair; the first field of
+ * which is already referenced. If short referenced, it
+ * should be first entry in short_ref. If not, it must exist
+ * in long_ref; trying to put it on the short list here is an
+ * error in the encoded bit stream (ref: 7.4.3.3, NOTE 2 and 3).
+ */
+ if (h->short_ref_count && h->short_ref[0] == s->current_picture_ptr) {
+ /* Just mark the second field valid */
+ s->current_picture_ptr->reference = PICT_FRAME;
+ } else if (s->current_picture_ptr->long_ref) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term reference "
+ "assignment for second field "
+ "in complementary field pair "
+ "(first field is long term)\n");
+ } else {
+ pic= remove_short(h, s->current_picture_ptr->frame_num, 0);
+ if(pic){
+ av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term buffer state detected\n");
+ }
+
+ if(h->short_ref_count)
+ memmove(&h->short_ref[1], &h->short_ref[0], h->short_ref_count*sizeof(Picture*));
+
+ h->short_ref[0]= s->current_picture_ptr;
+ h->short_ref_count++;
+ s->current_picture_ptr->reference |= s->picture_structure;
+ }
+ }
+
+ if (h->long_ref_count + h->short_ref_count > h->sps.ref_frame_count){
+
+ /* We have too many reference frames, probably due to corrupted
+ * stream. Need to discard one frame. Prevents overrun of the
+ * short_ref and long_ref buffers.
+ */
+ av_log(h->s.avctx, AV_LOG_ERROR,
+ "number of reference frames exceeds max (probably "
+ "corrupt input), discarding one\n");
+
+ if (h->long_ref_count && !h->short_ref_count) {
+ for (i = 0; i < 16; ++i)
+ if (h->long_ref[i])
+ break;
+
+ assert(i < 16);
+ remove_long(h, i, 0);
+ } else {
+ pic = h->short_ref[h->short_ref_count - 1];
+ remove_short(h, pic->frame_num, 0);
+ }
+ }
+
+ print_short_term(h);
+ print_long_term(h);
+ return 0;
+}
+
+int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb){
+ MpegEncContext * const s = &h->s;
+ int i;
+
+ h->mmco_index= 0;
+ if(h->nal_unit_type == NAL_IDR_SLICE){ //FIXME fields
+ s->broken_link= get_bits1(gb) -1;
+ if(get_bits1(gb)){
+ h->mmco[0].opcode= MMCO_LONG;
+ h->mmco[0].long_arg= 0;
+ h->mmco_index= 1;
+ }
+ }else{
+ if(get_bits1(gb)){ // adaptive_ref_pic_marking_mode_flag
+ for(i= 0; i<MAX_MMCO_COUNT; i++) {
+ MMCOOpcode opcode= get_ue_golomb_31(gb);
+
+ h->mmco[i].opcode= opcode;
+ if(opcode==MMCO_SHORT2UNUSED || opcode==MMCO_SHORT2LONG){
+ h->mmco[i].short_pic_num= (h->curr_pic_num - get_ue_golomb(gb) - 1) & (h->max_pic_num - 1);
+/* if(h->mmco[i].short_pic_num >= h->short_ref_count || h->short_ref[ h->mmco[i].short_pic_num ] == NULL){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal short ref in memory management control operation %d\n", mmco);
+ return -1;
+ }*/
+ }
+ if(opcode==MMCO_SHORT2LONG || opcode==MMCO_LONG2UNUSED || opcode==MMCO_LONG || opcode==MMCO_SET_MAX_LONG){
+ unsigned int long_arg= get_ue_golomb_31(gb);
+ if(long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){
+ av_log(h->s.avctx, AV_LOG_ERROR, "illegal long ref in memory management control operation %d\n", opcode);
+ return -1;
+ }
+ h->mmco[i].long_arg= long_arg;
+ }
+
+ if(opcode > (unsigned)MMCO_LONG){
+ av_log(h->s.avctx, AV_LOG_ERROR, "illegal memory management control operation %d\n", opcode);
+ return -1;
+ }
+ if(opcode == MMCO_END)
+ break;
+ }
+ h->mmco_index= i;
+ }else{
+ assert(h->long_ref_count + h->short_ref_count <= h->sps.ref_frame_count);
+
+ if(h->short_ref_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
+ !(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->reference)) {
+ h->mmco[0].opcode= MMCO_SHORT2UNUSED;
+ h->mmco[0].short_pic_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num;
+ h->mmco_index= 1;
+ if (FIELD_PICTURE) {
+ h->mmco[0].short_pic_num *= 2;
+ h->mmco[1].opcode= MMCO_SHORT2UNUSED;
+ h->mmco[1].short_pic_num= h->mmco[0].short_pic_num + 1;
+ h->mmco_index= 2;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_sei.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_sei.c
new file mode 100644
index 000000000..f98448130
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264_sei.c
@@ -0,0 +1,206 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... sei decoding
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264-sei.c
+ * H.264 / AVC / MPEG4 part10 sei decoding.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "internal.h"
+#include "avcodec.h"
+#include "h264.h"
+#include "golomb.h"
+
+//#undef NDEBUG
+#include <assert.h>
+
+static const uint8_t sei_num_clock_ts_table[9]={
+ 1, 1, 1, 2, 2, 3, 3, 2, 3
+};
+
+void ff_h264_reset_sei(H264Context *h) {
+ h->sei_recovery_frame_cnt = -1;
+ h->sei_dpb_output_delay = 0;
+ h->sei_cpb_removal_delay = -1;
+ h->sei_buffering_period_present = 0;
+}
+
+static int decode_picture_timing(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ if(h->sps.nal_hrd_parameters_present_flag || h->sps.vcl_hrd_parameters_present_flag){
+ h->sei_cpb_removal_delay = get_bits(&s->gb, h->sps.cpb_removal_delay_length);
+ h->sei_dpb_output_delay = get_bits(&s->gb, h->sps.dpb_output_delay_length);
+ }
+ if(h->sps.pic_struct_present_flag){
+ unsigned int i, num_clock_ts;
+ h->sei_pic_struct = get_bits(&s->gb, 4);
+ h->sei_ct_type = 0;
+
+ if (h->sei_pic_struct > SEI_PIC_STRUCT_FRAME_TRIPLING)
+ return -1;
+
+ num_clock_ts = sei_num_clock_ts_table[h->sei_pic_struct];
+
+ for (i = 0 ; i < num_clock_ts ; i++){
+ if(get_bits(&s->gb, 1)){ /* clock_timestamp_flag */
+ unsigned int full_timestamp_flag;
+ h->sei_ct_type |= 1<<get_bits(&s->gb, 2);
+ skip_bits(&s->gb, 1); /* nuit_field_based_flag */
+ skip_bits(&s->gb, 5); /* counting_type */
+ full_timestamp_flag = get_bits(&s->gb, 1);
+ skip_bits(&s->gb, 1); /* discontinuity_flag */
+ skip_bits(&s->gb, 1); /* cnt_dropped_flag */
+ skip_bits(&s->gb, 8); /* n_frames */
+ if(full_timestamp_flag){
+ skip_bits(&s->gb, 6); /* seconds_value 0..59 */
+ skip_bits(&s->gb, 6); /* minutes_value 0..59 */
+ skip_bits(&s->gb, 5); /* hours_value 0..23 */
+ }else{
+ if(get_bits(&s->gb, 1)){ /* seconds_flag */
+ skip_bits(&s->gb, 6); /* seconds_value range 0..59 */
+ if(get_bits(&s->gb, 1)){ /* minutes_flag */
+ skip_bits(&s->gb, 6); /* minutes_value 0..59 */
+ if(get_bits(&s->gb, 1)) /* hours_flag */
+ skip_bits(&s->gb, 5); /* hours_value 0..23 */
+ }
+ }
+ }
+ if(h->sps.time_offset_length > 0)
+ skip_bits(&s->gb, h->sps.time_offset_length); /* time_offset */
+ }
+ }
+
+ if(s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_DEBUG, "ct_type:%X pic_struct:%d\n", h->sei_ct_type, h->sei_pic_struct);
+ }
+ return 0;
+}
+
+static int decode_unregistered_user_data(H264Context *h, int size){
+ MpegEncContext * const s = &h->s;
+ uint8_t user_data[16+256];
+ int e, build, i;
+
+ if(size<16)
+ return -1;
+
+ for(i=0; i<sizeof(user_data)-1 && i<size; i++){
+ user_data[i]= get_bits(&s->gb, 8);
+ }
+
+ user_data[i]= 0;
+ e= sscanf(user_data+16, "x264 - core %d"/*%s - H.264/MPEG-4 AVC codec - Copyleft 2005 - http://www.videolan.org/x264.html*/, &build);
+ if(e==1 && build>=0)
+ h->x264_build= build;
+
+ if(s->avctx->debug & FF_DEBUG_BUGS)
+ av_log(s->avctx, AV_LOG_DEBUG, "user data:\"%s\"\n", user_data+16);
+
+ for(; i<size; i++)
+ skip_bits(&s->gb, 8);
+
+ return 0;
+}
+
+static int decode_recovery_point(H264Context *h){
+ MpegEncContext * const s = &h->s;
+
+ h->sei_recovery_frame_cnt = get_ue_golomb(&s->gb);
+ skip_bits(&s->gb, 4); /* 1b exact_match_flag, 1b broken_link_flag, 2b changing_slice_group_idc */
+
+ return 0;
+}
+
+static int decode_buffering_period(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ unsigned int sps_id;
+ int sched_sel_idx;
+ SPS *sps;
+
+ sps_id = get_ue_golomb_31(&s->gb);
+ if(sps_id > 31 || !h->sps_buffers[sps_id]) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "non-existing SPS %d referenced in buffering period\n", sps_id);
+ return -1;
+ }
+ sps = h->sps_buffers[sps_id];
+
+ // NOTE: This is really so duplicated in the standard... See H.264, D.1.1
+ if (sps->nal_hrd_parameters_present_flag) {
+ for (sched_sel_idx = 0; sched_sel_idx < sps->cpb_cnt; sched_sel_idx++) {
+ h->initial_cpb_removal_delay[sched_sel_idx] = get_bits(&s->gb, sps->initial_cpb_removal_delay_length);
+ skip_bits(&s->gb, sps->initial_cpb_removal_delay_length); // initial_cpb_removal_delay_offset
+ }
+ }
+ if (sps->vcl_hrd_parameters_present_flag) {
+ for (sched_sel_idx = 0; sched_sel_idx < sps->cpb_cnt; sched_sel_idx++) {
+ h->initial_cpb_removal_delay[sched_sel_idx] = get_bits(&s->gb, sps->initial_cpb_removal_delay_length);
+ skip_bits(&s->gb, sps->initial_cpb_removal_delay_length); // initial_cpb_removal_delay_offset
+ }
+ }
+
+ h->sei_buffering_period_present = 1;
+ return 0;
+}
+
+int ff_h264_decode_sei(H264Context *h){
+ MpegEncContext * const s = &h->s;
+
+ while(get_bits_count(&s->gb) + 16 < s->gb.size_in_bits){
+ int size, type;
+
+ type=0;
+ do{
+ type+= show_bits(&s->gb, 8);
+ }while(get_bits(&s->gb, 8) == 255);
+
+ size=0;
+ do{
+ size+= show_bits(&s->gb, 8);
+ }while(get_bits(&s->gb, 8) == 255);
+
+ switch(type){
+ case SEI_TYPE_PIC_TIMING: // Picture timing SEI
+ if(decode_picture_timing(h) < 0)
+ return -1;
+ break;
+ case SEI_TYPE_USER_DATA_UNREGISTERED:
+ if(decode_unregistered_user_data(h, size) < 0)
+ return -1;
+ break;
+ case SEI_TYPE_RECOVERY_POINT:
+ if(decode_recovery_point(h) < 0)
+ return -1;
+ break;
+ case SEI_BUFFERING_PERIOD:
+ if(decode_buffering_period(h) < 0)
+ return -1;
+ break;
+ default:
+ skip_bits(&s->gb, 8*size);
+ }
+
+ //FIXME check bits here
+ align_get_bits(&s->gb);
+ }
+
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264data.h
new file mode 100644
index 000000000..c08478eb0
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264data.h
@@ -0,0 +1,262 @@
+/*
+ * H26L/H264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264data.h
+ * @brief
+ * H264 / AVC / MPEG4 part10 codec data table
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVCODEC_H264DATA_H
+#define AVCODEC_H264DATA_H
+
+#include <stdint.h>
+#include "libavutil/rational.h"
+#include "mpegvideo.h"
+#include "h264.h"
+
+
+static const uint8_t golomb_to_pict_type[5]=
+{FF_P_TYPE, FF_B_TYPE, FF_I_TYPE, FF_SP_TYPE, FF_SI_TYPE};
+
+static const uint8_t golomb_to_intra4x4_cbp[48]={
+ 47, 31, 15, 0, 23, 27, 29, 30, 7, 11, 13, 14, 39, 43, 45, 46,
+ 16, 3, 5, 10, 12, 19, 21, 26, 28, 35, 37, 42, 44, 1, 2, 4,
+ 8, 17, 18, 20, 24, 6, 9, 22, 25, 32, 33, 34, 36, 40, 38, 41
+};
+
+static const uint8_t golomb_to_inter_cbp[48]={
+ 0, 16, 1, 2, 4, 8, 32, 3, 5, 10, 12, 15, 47, 7, 11, 13,
+ 14, 6, 9, 31, 35, 37, 42, 44, 33, 34, 36, 40, 39, 43, 45, 46,
+ 17, 18, 20, 24, 19, 21, 26, 28, 23, 27, 29, 30, 22, 25, 38, 41
+};
+
+static const uint8_t zigzag_scan[16]={
+ 0+0*4, 1+0*4, 0+1*4, 0+2*4,
+ 1+1*4, 2+0*4, 3+0*4, 2+1*4,
+ 1+2*4, 0+3*4, 1+3*4, 2+2*4,
+ 3+1*4, 3+2*4, 2+3*4, 3+3*4,
+};
+
+static const uint8_t field_scan[16]={
+ 0+0*4, 0+1*4, 1+0*4, 0+2*4,
+ 0+3*4, 1+1*4, 1+2*4, 1+3*4,
+ 2+0*4, 2+1*4, 2+2*4, 2+3*4,
+ 3+0*4, 3+1*4, 3+2*4, 3+3*4,
+};
+
+static const uint8_t luma_dc_zigzag_scan[16]={
+ 0*16 + 0*64, 1*16 + 0*64, 2*16 + 0*64, 0*16 + 2*64,
+ 3*16 + 0*64, 0*16 + 1*64, 1*16 + 1*64, 2*16 + 1*64,
+ 1*16 + 2*64, 2*16 + 2*64, 3*16 + 2*64, 0*16 + 3*64,
+ 3*16 + 1*64, 1*16 + 3*64, 2*16 + 3*64, 3*16 + 3*64,
+};
+
+static const uint8_t luma_dc_field_scan[16]={
+ 0*16 + 0*64, 2*16 + 0*64, 1*16 + 0*64, 0*16 + 2*64,
+ 2*16 + 2*64, 3*16 + 0*64, 1*16 + 2*64, 3*16 + 2*64,
+ 0*16 + 1*64, 2*16 + 1*64, 0*16 + 3*64, 2*16 + 3*64,
+ 1*16 + 1*64, 3*16 + 1*64, 1*16 + 3*64, 3*16 + 3*64,
+};
+
+static const uint8_t chroma_dc_scan[4]={
+ (0+0*2)*16, (1+0*2)*16,
+ (0+1*2)*16, (1+1*2)*16, //FIXME
+};
+
+// zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
+static const uint8_t zigzag_scan8x8_cavlc[64]={
+ 0+0*8, 1+1*8, 1+2*8, 2+2*8,
+ 4+1*8, 0+5*8, 3+3*8, 7+0*8,
+ 3+4*8, 1+7*8, 5+3*8, 6+3*8,
+ 2+7*8, 6+4*8, 5+6*8, 7+5*8,
+ 1+0*8, 2+0*8, 0+3*8, 3+1*8,
+ 3+2*8, 0+6*8, 4+2*8, 6+1*8,
+ 2+5*8, 2+6*8, 6+2*8, 5+4*8,
+ 3+7*8, 7+3*8, 4+7*8, 7+6*8,
+ 0+1*8, 3+0*8, 0+4*8, 4+0*8,
+ 2+3*8, 1+5*8, 5+1*8, 5+2*8,
+ 1+6*8, 3+5*8, 7+1*8, 4+5*8,
+ 4+6*8, 7+4*8, 5+7*8, 6+7*8,
+ 0+2*8, 2+1*8, 1+3*8, 5+0*8,
+ 1+4*8, 2+4*8, 6+0*8, 4+3*8,
+ 0+7*8, 4+4*8, 7+2*8, 3+6*8,
+ 5+5*8, 6+5*8, 6+6*8, 7+7*8,
+};
+
+static const uint8_t field_scan8x8[64]={
+ 0+0*8, 0+1*8, 0+2*8, 1+0*8,
+ 1+1*8, 0+3*8, 0+4*8, 1+2*8,
+ 2+0*8, 1+3*8, 0+5*8, 0+6*8,
+ 0+7*8, 1+4*8, 2+1*8, 3+0*8,
+ 2+2*8, 1+5*8, 1+6*8, 1+7*8,
+ 2+3*8, 3+1*8, 4+0*8, 3+2*8,
+ 2+4*8, 2+5*8, 2+6*8, 2+7*8,
+ 3+3*8, 4+1*8, 5+0*8, 4+2*8,
+ 3+4*8, 3+5*8, 3+6*8, 3+7*8,
+ 4+3*8, 5+1*8, 6+0*8, 5+2*8,
+ 4+4*8, 4+5*8, 4+6*8, 4+7*8,
+ 5+3*8, 6+1*8, 6+2*8, 5+4*8,
+ 5+5*8, 5+6*8, 5+7*8, 6+3*8,
+ 7+0*8, 7+1*8, 6+4*8, 6+5*8,
+ 6+6*8, 6+7*8, 7+2*8, 7+3*8,
+ 7+4*8, 7+5*8, 7+6*8, 7+7*8,
+};
+
+static const uint8_t field_scan8x8_cavlc[64]={
+ 0+0*8, 1+1*8, 2+0*8, 0+7*8,
+ 2+2*8, 2+3*8, 2+4*8, 3+3*8,
+ 3+4*8, 4+3*8, 4+4*8, 5+3*8,
+ 5+5*8, 7+0*8, 6+6*8, 7+4*8,
+ 0+1*8, 0+3*8, 1+3*8, 1+4*8,
+ 1+5*8, 3+1*8, 2+5*8, 4+1*8,
+ 3+5*8, 5+1*8, 4+5*8, 6+1*8,
+ 5+6*8, 7+1*8, 6+7*8, 7+5*8,
+ 0+2*8, 0+4*8, 0+5*8, 2+1*8,
+ 1+6*8, 4+0*8, 2+6*8, 5+0*8,
+ 3+6*8, 6+0*8, 4+6*8, 6+2*8,
+ 5+7*8, 6+4*8, 7+2*8, 7+6*8,
+ 1+0*8, 1+2*8, 0+6*8, 3+0*8,
+ 1+7*8, 3+2*8, 2+7*8, 4+2*8,
+ 3+7*8, 5+2*8, 4+7*8, 5+4*8,
+ 6+3*8, 6+5*8, 7+3*8, 7+7*8,
+};
+
+typedef struct IMbInfo{
+ uint16_t type;
+ uint8_t pred_mode;
+ uint8_t cbp;
+} IMbInfo;
+
+static const IMbInfo i_mb_type_info[26]={
+{MB_TYPE_INTRA4x4 , -1, -1},
+{MB_TYPE_INTRA16x16, 2, 0},
+{MB_TYPE_INTRA16x16, 1, 0},
+{MB_TYPE_INTRA16x16, 0, 0},
+{MB_TYPE_INTRA16x16, 3, 0},
+{MB_TYPE_INTRA16x16, 2, 16},
+{MB_TYPE_INTRA16x16, 1, 16},
+{MB_TYPE_INTRA16x16, 0, 16},
+{MB_TYPE_INTRA16x16, 3, 16},
+{MB_TYPE_INTRA16x16, 2, 32},
+{MB_TYPE_INTRA16x16, 1, 32},
+{MB_TYPE_INTRA16x16, 0, 32},
+{MB_TYPE_INTRA16x16, 3, 32},
+{MB_TYPE_INTRA16x16, 2, 15+0},
+{MB_TYPE_INTRA16x16, 1, 15+0},
+{MB_TYPE_INTRA16x16, 0, 15+0},
+{MB_TYPE_INTRA16x16, 3, 15+0},
+{MB_TYPE_INTRA16x16, 2, 15+16},
+{MB_TYPE_INTRA16x16, 1, 15+16},
+{MB_TYPE_INTRA16x16, 0, 15+16},
+{MB_TYPE_INTRA16x16, 3, 15+16},
+{MB_TYPE_INTRA16x16, 2, 15+32},
+{MB_TYPE_INTRA16x16, 1, 15+32},
+{MB_TYPE_INTRA16x16, 0, 15+32},
+{MB_TYPE_INTRA16x16, 3, 15+32},
+{MB_TYPE_INTRA_PCM , -1, -1},
+};
+
+typedef struct PMbInfo{
+ uint16_t type;
+ uint8_t partition_count;
+} PMbInfo;
+
+static const PMbInfo p_mb_type_info[5]={
+{MB_TYPE_16x16|MB_TYPE_P0L0 , 1},
+{MB_TYPE_16x8 |MB_TYPE_P0L0|MB_TYPE_P1L0, 2},
+{MB_TYPE_8x16 |MB_TYPE_P0L0|MB_TYPE_P1L0, 2},
+{MB_TYPE_8x8 |MB_TYPE_P0L0|MB_TYPE_P1L0, 4},
+{MB_TYPE_8x8 |MB_TYPE_P0L0|MB_TYPE_P1L0|MB_TYPE_REF0, 4},
+};
+
+static const PMbInfo p_sub_mb_type_info[4]={
+{MB_TYPE_16x16|MB_TYPE_P0L0 , 1},
+{MB_TYPE_16x8 |MB_TYPE_P0L0 , 2},
+{MB_TYPE_8x16 |MB_TYPE_P0L0 , 2},
+{MB_TYPE_8x8 |MB_TYPE_P0L0 , 4},
+};
+
+static const PMbInfo b_mb_type_info[23]={
+{MB_TYPE_DIRECT2 , 1, },
+{MB_TYPE_16x16|MB_TYPE_P0L0 , 1, },
+{MB_TYPE_16x16 |MB_TYPE_P0L1 , 1, },
+{MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1 , 1, },
+{MB_TYPE_16x8 |MB_TYPE_P0L0 |MB_TYPE_P1L0 , 2, },
+{MB_TYPE_8x16 |MB_TYPE_P0L0 |MB_TYPE_P1L0 , 2, },
+{MB_TYPE_16x8 |MB_TYPE_P0L1 |MB_TYPE_P1L1, 2, },
+{MB_TYPE_8x16 |MB_TYPE_P0L1 |MB_TYPE_P1L1, 2, },
+{MB_TYPE_16x8 |MB_TYPE_P0L0 |MB_TYPE_P1L1, 2, },
+{MB_TYPE_8x16 |MB_TYPE_P0L0 |MB_TYPE_P1L1, 2, },
+{MB_TYPE_16x8 |MB_TYPE_P0L1|MB_TYPE_P1L0 , 2, },
+{MB_TYPE_8x16 |MB_TYPE_P0L1|MB_TYPE_P1L0 , 2, },
+{MB_TYPE_16x8 |MB_TYPE_P0L0 |MB_TYPE_P1L0|MB_TYPE_P1L1, 2, },
+{MB_TYPE_8x16 |MB_TYPE_P0L0 |MB_TYPE_P1L0|MB_TYPE_P1L1, 2, },
+{MB_TYPE_16x8 |MB_TYPE_P0L1|MB_TYPE_P1L0|MB_TYPE_P1L1, 2, },
+{MB_TYPE_8x16 |MB_TYPE_P0L1|MB_TYPE_P1L0|MB_TYPE_P1L1, 2, },
+{MB_TYPE_16x8 |MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_P1L0 , 2, },
+{MB_TYPE_8x16 |MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_P1L0 , 2, },
+{MB_TYPE_16x8 |MB_TYPE_P0L0|MB_TYPE_P0L1 |MB_TYPE_P1L1, 2, },
+{MB_TYPE_8x16 |MB_TYPE_P0L0|MB_TYPE_P0L1 |MB_TYPE_P1L1, 2, },
+{MB_TYPE_16x8 |MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_P1L0|MB_TYPE_P1L1, 2, },
+{MB_TYPE_8x16 |MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_P1L0|MB_TYPE_P1L1, 2, },
+{MB_TYPE_8x8 |MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_P1L0|MB_TYPE_P1L1, 4, },
+};
+
+static const PMbInfo b_sub_mb_type_info[13]={
+{MB_TYPE_DIRECT2 , 1, },
+{MB_TYPE_16x16|MB_TYPE_P0L0 , 1, },
+{MB_TYPE_16x16 |MB_TYPE_P0L1 , 1, },
+{MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1 , 1, },
+{MB_TYPE_16x8 |MB_TYPE_P0L0 |MB_TYPE_P1L0 , 2, },
+{MB_TYPE_8x16 |MB_TYPE_P0L0 |MB_TYPE_P1L0 , 2, },
+{MB_TYPE_16x8 |MB_TYPE_P0L1 |MB_TYPE_P1L1, 2, },
+{MB_TYPE_8x16 |MB_TYPE_P0L1 |MB_TYPE_P1L1, 2, },
+{MB_TYPE_16x8 |MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_P1L0|MB_TYPE_P1L1, 2, },
+{MB_TYPE_8x16 |MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_P1L0|MB_TYPE_P1L1, 2, },
+{MB_TYPE_8x8 |MB_TYPE_P0L0 |MB_TYPE_P1L0 , 4, },
+{MB_TYPE_8x8 |MB_TYPE_P0L1 |MB_TYPE_P1L1, 4, },
+{MB_TYPE_8x8 |MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_P1L0|MB_TYPE_P1L1, 4, },
+};
+
+static const uint8_t dequant4_coeff_init[6][3]={
+ {10,13,16},
+ {11,14,18},
+ {13,16,20},
+ {14,18,23},
+ {16,20,25},
+ {18,23,29},
+};
+
+static const uint8_t dequant8_coeff_init_scan[16] = {
+ 0,3,4,3, 3,1,5,1, 4,5,2,5, 3,1,5,1
+};
+static const uint8_t dequant8_coeff_init[6][6]={
+ {20,18,32,19,25,24},
+ {22,19,35,21,28,26},
+ {26,23,42,24,33,31},
+ {28,25,45,26,35,33},
+ {32,28,51,30,40,38},
+ {36,32,58,34,46,43},
+};
+
+#endif /* AVCODEC_H264DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264idct.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264idct.c
new file mode 100644
index 000000000..7de56b241
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264idct.c
@@ -0,0 +1,218 @@
+/*
+ * H.264 IDCT
+ * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file h264idct.c
+ * H.264 IDCT.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "dsputil.h"
+
+static av_always_inline void idct_internal(uint8_t *dst, DCTELEM *block, int stride, int block_stride, int shift, int add){
+ int i;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ block[0] += 1<<(shift-1);
+
+ for(i=0; i<4; i++){
+ const int z0= block[0 + block_stride*i] + block[2 + block_stride*i];
+ const int z1= block[0 + block_stride*i] - block[2 + block_stride*i];
+ const int z2= (block[1 + block_stride*i]>>1) - block[3 + block_stride*i];
+ const int z3= block[1 + block_stride*i] + (block[3 + block_stride*i]>>1);
+
+ block[0 + block_stride*i]= z0 + z3;
+ block[1 + block_stride*i]= z1 + z2;
+ block[2 + block_stride*i]= z1 - z2;
+ block[3 + block_stride*i]= z0 - z3;
+ }
+
+ for(i=0; i<4; i++){
+ const int z0= block[i + block_stride*0] + block[i + block_stride*2];
+ const int z1= block[i + block_stride*0] - block[i + block_stride*2];
+ const int z2= (block[i + block_stride*1]>>1) - block[i + block_stride*3];
+ const int z3= block[i + block_stride*1] + (block[i + block_stride*3]>>1);
+
+ dst[i + 0*stride]= cm[ add*dst[i + 0*stride] + ((z0 + z3) >> shift) ];
+ dst[i + 1*stride]= cm[ add*dst[i + 1*stride] + ((z1 + z2) >> shift) ];
+ dst[i + 2*stride]= cm[ add*dst[i + 2*stride] + ((z1 - z2) >> shift) ];
+ dst[i + 3*stride]= cm[ add*dst[i + 3*stride] + ((z0 - z3) >> shift) ];
+ }
+}
+
+void ff_h264_idct_add_c(uint8_t *dst, DCTELEM *block, int stride){
+ idct_internal(dst, block, stride, 4, 6, 1);
+}
+
+void ff_h264_lowres_idct_add_c(uint8_t *dst, int stride, DCTELEM *block){
+ idct_internal(dst, block, stride, 8, 3, 1);
+}
+
+void ff_h264_lowres_idct_put_c(uint8_t *dst, int stride, DCTELEM *block){
+ idct_internal(dst, block, stride, 8, 3, 0);
+}
+
+void ff_h264_idct8_add_c(uint8_t *dst, DCTELEM *block, int stride){
+ int i;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ block[0] += 32;
+
+ for( i = 0; i < 8; i++ )
+ {
+ const int a0 = block[0+i*8] + block[4+i*8];
+ const int a2 = block[0+i*8] - block[4+i*8];
+ const int a4 = (block[2+i*8]>>1) - block[6+i*8];
+ const int a6 = (block[6+i*8]>>1) + block[2+i*8];
+
+ const int b0 = a0 + a6;
+ const int b2 = a2 + a4;
+ const int b4 = a2 - a4;
+ const int b6 = a0 - a6;
+
+ const int a1 = -block[3+i*8] + block[5+i*8] - block[7+i*8] - (block[7+i*8]>>1);
+ const int a3 = block[1+i*8] + block[7+i*8] - block[3+i*8] - (block[3+i*8]>>1);
+ const int a5 = -block[1+i*8] + block[7+i*8] + block[5+i*8] + (block[5+i*8]>>1);
+ const int a7 = block[3+i*8] + block[5+i*8] + block[1+i*8] + (block[1+i*8]>>1);
+
+ const int b1 = (a7>>2) + a1;
+ const int b3 = a3 + (a5>>2);
+ const int b5 = (a3>>2) - a5;
+ const int b7 = a7 - (a1>>2);
+
+ block[0+i*8] = b0 + b7;
+ block[7+i*8] = b0 - b7;
+ block[1+i*8] = b2 + b5;
+ block[6+i*8] = b2 - b5;
+ block[2+i*8] = b4 + b3;
+ block[5+i*8] = b4 - b3;
+ block[3+i*8] = b6 + b1;
+ block[4+i*8] = b6 - b1;
+ }
+ for( i = 0; i < 8; i++ )
+ {
+ const int a0 = block[i+0*8] + block[i+4*8];
+ const int a2 = block[i+0*8] - block[i+4*8];
+ const int a4 = (block[i+2*8]>>1) - block[i+6*8];
+ const int a6 = (block[i+6*8]>>1) + block[i+2*8];
+
+ const int b0 = a0 + a6;
+ const int b2 = a2 + a4;
+ const int b4 = a2 - a4;
+ const int b6 = a0 - a6;
+
+ const int a1 = -block[i+3*8] + block[i+5*8] - block[i+7*8] - (block[i+7*8]>>1);
+ const int a3 = block[i+1*8] + block[i+7*8] - block[i+3*8] - (block[i+3*8]>>1);
+ const int a5 = -block[i+1*8] + block[i+7*8] + block[i+5*8] + (block[i+5*8]>>1);
+ const int a7 = block[i+3*8] + block[i+5*8] + block[i+1*8] + (block[i+1*8]>>1);
+
+ const int b1 = (a7>>2) + a1;
+ const int b3 = a3 + (a5>>2);
+ const int b5 = (a3>>2) - a5;
+ const int b7 = a7 - (a1>>2);
+
+ dst[i + 0*stride] = cm[ dst[i + 0*stride] + ((b0 + b7) >> 6) ];
+ dst[i + 1*stride] = cm[ dst[i + 1*stride] + ((b2 + b5) >> 6) ];
+ dst[i + 2*stride] = cm[ dst[i + 2*stride] + ((b4 + b3) >> 6) ];
+ dst[i + 3*stride] = cm[ dst[i + 3*stride] + ((b6 + b1) >> 6) ];
+ dst[i + 4*stride] = cm[ dst[i + 4*stride] + ((b6 - b1) >> 6) ];
+ dst[i + 5*stride] = cm[ dst[i + 5*stride] + ((b4 - b3) >> 6) ];
+ dst[i + 6*stride] = cm[ dst[i + 6*stride] + ((b2 - b5) >> 6) ];
+ dst[i + 7*stride] = cm[ dst[i + 7*stride] + ((b0 - b7) >> 6) ];
+ }
+}
+
+// assumes all AC coefs are 0
+void ff_h264_idct_dc_add_c(uint8_t *dst, DCTELEM *block, int stride){
+ int i, j;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ int dc = (block[0] + 32) >> 6;
+ for( j = 0; j < 4; j++ )
+ {
+ for( i = 0; i < 4; i++ )
+ dst[i] = cm[ dst[i] + dc ];
+ dst += stride;
+ }
+}
+
+void ff_h264_idct8_dc_add_c(uint8_t *dst, DCTELEM *block, int stride){
+ int i, j;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ int dc = (block[0] + 32) >> 6;
+ for( j = 0; j < 8; j++ )
+ {
+ for( i = 0; i < 8; i++ )
+ dst[i] = cm[ dst[i] + dc ];
+ dst += stride;
+ }
+}
+
+//FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split
+static const uint8_t scan8[16 + 2*4]={
+ 4+1*8, 5+1*8, 4+2*8, 5+2*8,
+ 6+1*8, 7+1*8, 6+2*8, 7+2*8,
+ 4+3*8, 5+3*8, 4+4*8, 5+4*8,
+ 6+3*8, 7+3*8, 6+4*8, 7+4*8,
+ 1+1*8, 2+1*8,
+ 1+2*8, 2+2*8,
+ 1+4*8, 2+4*8,
+ 1+5*8, 2+5*8,
+};
+
+void ff_h264_idct_add16_c(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i++){
+ int nnz = nnzc[ scan8[i] ];
+ if(nnz){
+ if(nnz==1 && block[i*16]) ff_h264_idct_dc_add_c(dst + block_offset[i], block + i*16, stride);
+ else idct_internal (dst + block_offset[i], block + i*16, stride, 4, 6, 1);
+ }
+ }
+}
+
+void ff_h264_idct_add16intra_c(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i++){
+ if(nnzc[ scan8[i] ]) idct_internal (dst + block_offset[i], block + i*16, stride, 4, 6, 1);
+ else if(block[i*16]) ff_h264_idct_dc_add_c(dst + block_offset[i], block + i*16, stride);
+ }
+}
+
+void ff_h264_idct8_add4_c(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i+=4){
+ int nnz = nnzc[ scan8[i] ];
+ if(nnz){
+ if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_c(dst + block_offset[i], block + i*16, stride);
+ else ff_h264_idct8_add_c (dst + block_offset[i], block + i*16, stride);
+ }
+ }
+}
+
+void ff_h264_idct_add8_c(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=16; i<16+8; i++){
+ if(nnzc[ scan8[i] ])
+ ff_h264_idct_add_c (dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
+ else if(block[i*16])
+ ff_h264_idct_dc_add_c(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264pred.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264pred.c
new file mode 100644
index 000000000..3b6fc8211
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264pred.c
@@ -0,0 +1,1175 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/h264pred.c
+ * H.264 / AVC / MPEG4 part10 prediction functions.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "h264pred.h"
+
+static void pred4x4_vertical_c(uint8_t *src, uint8_t *topright, int stride){
+ const uint32_t a= ((uint32_t*)(src-stride))[0];
+ ((uint32_t*)(src+0*stride))[0]= a;
+ ((uint32_t*)(src+1*stride))[0]= a;
+ ((uint32_t*)(src+2*stride))[0]= a;
+ ((uint32_t*)(src+3*stride))[0]= a;
+}
+
+static void pred4x4_horizontal_c(uint8_t *src, uint8_t *topright, int stride){
+ ((uint32_t*)(src+0*stride))[0]= src[-1+0*stride]*0x01010101;
+ ((uint32_t*)(src+1*stride))[0]= src[-1+1*stride]*0x01010101;
+ ((uint32_t*)(src+2*stride))[0]= src[-1+2*stride]*0x01010101;
+ ((uint32_t*)(src+3*stride))[0]= src[-1+3*stride]*0x01010101;
+}
+
+static void pred4x4_dc_c(uint8_t *src, uint8_t *topright, int stride){
+ const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
+ + src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
+
+ ((uint32_t*)(src+0*stride))[0]=
+ ((uint32_t*)(src+1*stride))[0]=
+ ((uint32_t*)(src+2*stride))[0]=
+ ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
+}
+
+static void pred4x4_left_dc_c(uint8_t *src, uint8_t *topright, int stride){
+ const int dc= ( src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
+
+ ((uint32_t*)(src+0*stride))[0]=
+ ((uint32_t*)(src+1*stride))[0]=
+ ((uint32_t*)(src+2*stride))[0]=
+ ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
+}
+
+static void pred4x4_top_dc_c(uint8_t *src, uint8_t *topright, int stride){
+ const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
+
+ ((uint32_t*)(src+0*stride))[0]=
+ ((uint32_t*)(src+1*stride))[0]=
+ ((uint32_t*)(src+2*stride))[0]=
+ ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
+}
+
+static void pred4x4_128_dc_c(uint8_t *src, uint8_t *topright, int stride){
+ ((uint32_t*)(src+0*stride))[0]=
+ ((uint32_t*)(src+1*stride))[0]=
+ ((uint32_t*)(src+2*stride))[0]=
+ ((uint32_t*)(src+3*stride))[0]= 128U*0x01010101U;
+}
+
+
+#define LOAD_TOP_RIGHT_EDGE\
+ const int av_unused t4= topright[0];\
+ const int av_unused t5= topright[1];\
+ const int av_unused t6= topright[2];\
+ const int av_unused t7= topright[3];\
+
+#define LOAD_DOWN_LEFT_EDGE\
+ const int av_unused l4= src[-1+4*stride];\
+ const int av_unused l5= src[-1+5*stride];\
+ const int av_unused l6= src[-1+6*stride];\
+ const int av_unused l7= src[-1+7*stride];\
+
+#define LOAD_LEFT_EDGE\
+ const int av_unused l0= src[-1+0*stride];\
+ const int av_unused l1= src[-1+1*stride];\
+ const int av_unused l2= src[-1+2*stride];\
+ const int av_unused l3= src[-1+3*stride];\
+
+#define LOAD_TOP_EDGE\
+ const int av_unused t0= src[ 0-1*stride];\
+ const int av_unused t1= src[ 1-1*stride];\
+ const int av_unused t2= src[ 2-1*stride];\
+ const int av_unused t3= src[ 3-1*stride];\
+
+static void pred4x4_down_right_c(uint8_t *src, uint8_t *topright, int stride){
+ const int lt= src[-1-1*stride];
+ LOAD_TOP_EDGE
+ LOAD_LEFT_EDGE
+
+ src[0+3*stride]=(l3 + 2*l2 + l1 + 2)>>2;
+ src[0+2*stride]=
+ src[1+3*stride]=(l2 + 2*l1 + l0 + 2)>>2;
+ src[0+1*stride]=
+ src[1+2*stride]=
+ src[2+3*stride]=(l1 + 2*l0 + lt + 2)>>2;
+ src[0+0*stride]=
+ src[1+1*stride]=
+ src[2+2*stride]=
+ src[3+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
+ src[1+0*stride]=
+ src[2+1*stride]=
+ src[3+2*stride]=(lt + 2*t0 + t1 + 2)>>2;
+ src[2+0*stride]=
+ src[3+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
+ src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2;
+}
+
+static void pred4x4_down_left_c(uint8_t *src, uint8_t *topright, int stride){
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+// LOAD_LEFT_EDGE
+
+ src[0+0*stride]=(t0 + t2 + 2*t1 + 2)>>2;
+ src[1+0*stride]=
+ src[0+1*stride]=(t1 + t3 + 2*t2 + 2)>>2;
+ src[2+0*stride]=
+ src[1+1*stride]=
+ src[0+2*stride]=(t2 + t4 + 2*t3 + 2)>>2;
+ src[3+0*stride]=
+ src[2+1*stride]=
+ src[1+2*stride]=
+ src[0+3*stride]=(t3 + t5 + 2*t4 + 2)>>2;
+ src[3+1*stride]=
+ src[2+2*stride]=
+ src[1+3*stride]=(t4 + t6 + 2*t5 + 2)>>2;
+ src[3+2*stride]=
+ src[2+3*stride]=(t5 + t7 + 2*t6 + 2)>>2;
+ src[3+3*stride]=(t6 + 3*t7 + 2)>>2;
+}
+
+static void pred4x4_down_left_svq3_c(uint8_t *src, uint8_t *topright, int stride){
+ LOAD_TOP_EDGE
+ LOAD_LEFT_EDGE
+ const av_unused int unu0= t0;
+ const av_unused int unu1= l0;
+
+ src[0+0*stride]=(l1 + t1)>>1;
+ src[1+0*stride]=
+ src[0+1*stride]=(l2 + t2)>>1;
+ src[2+0*stride]=
+ src[1+1*stride]=
+ src[0+2*stride]=
+ src[3+0*stride]=
+ src[2+1*stride]=
+ src[1+2*stride]=
+ src[0+3*stride]=
+ src[3+1*stride]=
+ src[2+2*stride]=
+ src[1+3*stride]=
+ src[3+2*stride]=
+ src[2+3*stride]=
+ src[3+3*stride]=(l3 + t3)>>1;
+}
+
+static void pred4x4_down_left_rv40_c(uint8_t *src, uint8_t *topright, int stride){
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+ LOAD_LEFT_EDGE
+ LOAD_DOWN_LEFT_EDGE
+
+ src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
+ src[1+0*stride]=
+ src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
+ src[2+0*stride]=
+ src[1+1*stride]=
+ src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + l4 + 2*l3 + 2)>>3;
+ src[3+0*stride]=
+ src[2+1*stride]=
+ src[1+2*stride]=
+ src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3 + l5 + 2*l4 + 2)>>3;
+ src[3+1*stride]=
+ src[2+2*stride]=
+ src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l4 + l6 + 2*l5 + 2)>>3;
+ src[3+2*stride]=
+ src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l5 + l7 + 2*l6 + 2)>>3;
+ src[3+3*stride]=(t6 + t7 + 1 + l6 + l7 + 1)>>2;
+}
+
+static void pred4x4_down_left_rv40_nodown_c(uint8_t *src, uint8_t *topright, int stride){
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+ LOAD_LEFT_EDGE
+
+ src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
+ src[1+0*stride]=
+ src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
+ src[2+0*stride]=
+ src[1+1*stride]=
+ src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + 3*l3 + 2)>>3;
+ src[3+0*stride]=
+ src[2+1*stride]=
+ src[1+2*stride]=
+ src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3*4 + 2)>>3;
+ src[3+1*stride]=
+ src[2+2*stride]=
+ src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l3*4 + 2)>>3;
+ src[3+2*stride]=
+ src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l3*4 + 2)>>3;
+ src[3+3*stride]=(t6 + t7 + 1 + 2*l3 + 1)>>2;
+}
+
+static void pred4x4_vertical_right_c(uint8_t *src, uint8_t *topright, int stride){
+ const int lt= src[-1-1*stride];
+ LOAD_TOP_EDGE
+ LOAD_LEFT_EDGE
+
+ src[0+0*stride]=
+ src[1+2*stride]=(lt + t0 + 1)>>1;
+ src[1+0*stride]=
+ src[2+2*stride]=(t0 + t1 + 1)>>1;
+ src[2+0*stride]=
+ src[3+2*stride]=(t1 + t2 + 1)>>1;
+ src[3+0*stride]=(t2 + t3 + 1)>>1;
+ src[0+1*stride]=
+ src[1+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
+ src[1+1*stride]=
+ src[2+3*stride]=(lt + 2*t0 + t1 + 2)>>2;
+ src[2+1*stride]=
+ src[3+3*stride]=(t0 + 2*t1 + t2 + 2)>>2;
+ src[3+1*stride]=(t1 + 2*t2 + t3 + 2)>>2;
+ src[0+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
+ src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
+}
+
+static void pred4x4_vertical_left_c(uint8_t *src, uint8_t *topright, int stride){
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(t0 + t1 + 1)>>1;
+ src[1+0*stride]=
+ src[0+2*stride]=(t1 + t2 + 1)>>1;
+ src[2+0*stride]=
+ src[1+2*stride]=(t2 + t3 + 1)>>1;
+ src[3+0*stride]=
+ src[2+2*stride]=(t3 + t4+ 1)>>1;
+ src[3+2*stride]=(t4 + t5+ 1)>>1;
+ src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
+ src[1+1*stride]=
+ src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
+ src[2+1*stride]=
+ src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
+ src[3+1*stride]=
+ src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
+ src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
+}
+
+static void pred4x4_vertical_left_rv40(uint8_t *src, uint8_t *topright, int stride,
+ const int l0, const int l1, const int l2, const int l3, const int l4){
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(2*t0 + 2*t1 + l1 + 2*l2 + l3 + 4)>>3;
+ src[1+0*stride]=
+ src[0+2*stride]=(t1 + t2 + 1)>>1;
+ src[2+0*stride]=
+ src[1+2*stride]=(t2 + t3 + 1)>>1;
+ src[3+0*stride]=
+ src[2+2*stride]=(t3 + t4+ 1)>>1;
+ src[3+2*stride]=(t4 + t5+ 1)>>1;
+ src[0+1*stride]=(t0 + 2*t1 + t2 + l2 + 2*l3 + l4 + 4)>>3;
+ src[1+1*stride]=
+ src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
+ src[2+1*stride]=
+ src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
+ src[3+1*stride]=
+ src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
+ src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
+}
+
+static void pred4x4_vertical_left_rv40_c(uint8_t *src, uint8_t *topright, int stride){
+ LOAD_LEFT_EDGE
+ LOAD_DOWN_LEFT_EDGE
+
+ pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l4);
+}
+
+static void pred4x4_vertical_left_rv40_nodown_c(uint8_t *src, uint8_t *topright, int stride){
+ LOAD_LEFT_EDGE
+
+ pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l3);
+}
+
+static void pred4x4_horizontal_up_c(uint8_t *src, uint8_t *topright, int stride){
+ LOAD_LEFT_EDGE
+
+ src[0+0*stride]=(l0 + l1 + 1)>>1;
+ src[1+0*stride]=(l0 + 2*l1 + l2 + 2)>>2;
+ src[2+0*stride]=
+ src[0+1*stride]=(l1 + l2 + 1)>>1;
+ src[3+0*stride]=
+ src[1+1*stride]=(l1 + 2*l2 + l3 + 2)>>2;
+ src[2+1*stride]=
+ src[0+2*stride]=(l2 + l3 + 1)>>1;
+ src[3+1*stride]=
+ src[1+2*stride]=(l2 + 2*l3 + l3 + 2)>>2;
+ src[3+2*stride]=
+ src[1+3*stride]=
+ src[0+3*stride]=
+ src[2+2*stride]=
+ src[2+3*stride]=
+ src[3+3*stride]=l3;
+}
+
+static void pred4x4_horizontal_up_rv40_c(uint8_t *src, uint8_t *topright, int stride){
+ LOAD_LEFT_EDGE
+ LOAD_DOWN_LEFT_EDGE
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
+ src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
+ src[2+0*stride]=
+ src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
+ src[3+0*stride]=
+ src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
+ src[2+1*stride]=
+ src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
+ src[3+1*stride]=
+ src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
+ src[3+2*stride]=
+ src[1+3*stride]=(l3 + 2*l4 + l5 + 2)>>2;
+ src[0+3*stride]=
+ src[2+2*stride]=(t6 + t7 + l3 + l4 + 2)>>2;
+ src[2+3*stride]=(l4 + l5 + 1)>>1;
+ src[3+3*stride]=(l4 + 2*l5 + l6 + 2)>>2;
+}
+
+static void pred4x4_horizontal_up_rv40_nodown_c(uint8_t *src, uint8_t *topright, int stride){
+ LOAD_LEFT_EDGE
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
+ src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
+ src[2+0*stride]=
+ src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
+ src[3+0*stride]=
+ src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
+ src[2+1*stride]=
+ src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
+ src[3+1*stride]=
+ src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
+ src[3+2*stride]=
+ src[1+3*stride]=l3;
+ src[0+3*stride]=
+ src[2+2*stride]=(t6 + t7 + 2*l3 + 2)>>2;
+ src[2+3*stride]=
+ src[3+3*stride]=l3;
+}
+
+static void pred4x4_horizontal_down_c(uint8_t *src, uint8_t *topright, int stride){
+ const int lt= src[-1-1*stride];
+ LOAD_TOP_EDGE
+ LOAD_LEFT_EDGE
+
+ src[0+0*stride]=
+ src[2+1*stride]=(lt + l0 + 1)>>1;
+ src[1+0*stride]=
+ src[3+1*stride]=(l0 + 2*lt + t0 + 2)>>2;
+ src[2+0*stride]=(lt + 2*t0 + t1 + 2)>>2;
+ src[3+0*stride]=(t0 + 2*t1 + t2 + 2)>>2;
+ src[0+1*stride]=
+ src[2+2*stride]=(l0 + l1 + 1)>>1;
+ src[1+1*stride]=
+ src[3+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
+ src[0+2*stride]=
+ src[2+3*stride]=(l1 + l2+ 1)>>1;
+ src[1+2*stride]=
+ src[3+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
+ src[0+3*stride]=(l2 + l3 + 1)>>1;
+ src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2;
+}
+
+static void pred16x16_vertical_c(uint8_t *src, int stride){
+ int i;
+ const uint32_t a= ((uint32_t*)(src-stride))[0];
+ const uint32_t b= ((uint32_t*)(src-stride))[1];
+ const uint32_t c= ((uint32_t*)(src-stride))[2];
+ const uint32_t d= ((uint32_t*)(src-stride))[3];
+
+ for(i=0; i<16; i++){
+ ((uint32_t*)(src+i*stride))[0]= a;
+ ((uint32_t*)(src+i*stride))[1]= b;
+ ((uint32_t*)(src+i*stride))[2]= c;
+ ((uint32_t*)(src+i*stride))[3]= d;
+ }
+}
+
+static void pred16x16_horizontal_c(uint8_t *src, int stride){
+ int i;
+
+ for(i=0; i<16; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]=
+ ((uint32_t*)(src+i*stride))[2]=
+ ((uint32_t*)(src+i*stride))[3]= src[-1+i*stride]*0x01010101;
+ }
+}
+
+static void pred16x16_dc_c(uint8_t *src, int stride){
+ int i, dc=0;
+
+ for(i=0;i<16; i++){
+ dc+= src[-1+i*stride];
+ }
+
+ for(i=0;i<16; i++){
+ dc+= src[i-stride];
+ }
+
+ dc= 0x01010101*((dc + 16)>>5);
+
+ for(i=0; i<16; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]=
+ ((uint32_t*)(src+i*stride))[2]=
+ ((uint32_t*)(src+i*stride))[3]= dc;
+ }
+}
+
+static void pred16x16_left_dc_c(uint8_t *src, int stride){
+ int i, dc=0;
+
+ for(i=0;i<16; i++){
+ dc+= src[-1+i*stride];
+ }
+
+ dc= 0x01010101*((dc + 8)>>4);
+
+ for(i=0; i<16; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]=
+ ((uint32_t*)(src+i*stride))[2]=
+ ((uint32_t*)(src+i*stride))[3]= dc;
+ }
+}
+
+static void pred16x16_top_dc_c(uint8_t *src, int stride){
+ int i, dc=0;
+
+ for(i=0;i<16; i++){
+ dc+= src[i-stride];
+ }
+ dc= 0x01010101*((dc + 8)>>4);
+
+ for(i=0; i<16; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]=
+ ((uint32_t*)(src+i*stride))[2]=
+ ((uint32_t*)(src+i*stride))[3]= dc;
+ }
+}
+
+static void pred16x16_128_dc_c(uint8_t *src, int stride){
+ int i;
+
+ for(i=0; i<16; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]=
+ ((uint32_t*)(src+i*stride))[2]=
+ ((uint32_t*)(src+i*stride))[3]= 0x01010101U*128U;
+ }
+}
+
+static inline void pred16x16_plane_compat_c(uint8_t *src, int stride, const int svq3, const int rv40){
+ int i, j, k;
+ int a;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ const uint8_t * const src0 = src+7-stride;
+ const uint8_t *src1 = src+8*stride-1;
+ const uint8_t *src2 = src1-2*stride; // == src+6*stride-1;
+ int H = src0[1] - src0[-1];
+ int V = src1[0] - src2[ 0];
+ for(k=2; k<=8; ++k) {
+ src1 += stride; src2 -= stride;
+ H += k*(src0[k] - src0[-k]);
+ V += k*(src1[0] - src2[ 0]);
+ }
+ if(svq3){
+ H = ( 5*(H/4) ) / 16;
+ V = ( 5*(V/4) ) / 16;
+
+ /* required for 100% accuracy */
+ i = H; H = V; V = i;
+ }else if(rv40){
+ H = ( H + (H>>2) ) >> 4;
+ V = ( V + (V>>2) ) >> 4;
+ }else{
+ H = ( 5*H+32 ) >> 6;
+ V = ( 5*V+32 ) >> 6;
+ }
+
+ a = 16*(src1[0] + src2[16] + 1) - 7*(V+H);
+ for(j=16; j>0; --j) {
+ int b = a;
+ a += V;
+ for(i=-16; i<0; i+=4) {
+ src[16+i] = cm[ (b ) >> 5 ];
+ src[17+i] = cm[ (b+ H) >> 5 ];
+ src[18+i] = cm[ (b+2*H) >> 5 ];
+ src[19+i] = cm[ (b+3*H) >> 5 ];
+ b += 4*H;
+ }
+ src += stride;
+ }
+}
+
+static void pred16x16_plane_c(uint8_t *src, int stride){
+ pred16x16_plane_compat_c(src, stride, 0, 0);
+}
+
+static void pred16x16_plane_svq3_c(uint8_t *src, int stride){
+ pred16x16_plane_compat_c(src, stride, 1, 0);
+}
+
+static void pred16x16_plane_rv40_c(uint8_t *src, int stride){
+ pred16x16_plane_compat_c(src, stride, 0, 1);
+}
+
+static void pred8x8_vertical_c(uint8_t *src, int stride){
+ int i;
+ const uint32_t a= ((uint32_t*)(src-stride))[0];
+ const uint32_t b= ((uint32_t*)(src-stride))[1];
+
+ for(i=0; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]= a;
+ ((uint32_t*)(src+i*stride))[1]= b;
+ }
+}
+
+static void pred8x8_horizontal_c(uint8_t *src, int stride){
+ int i;
+
+ for(i=0; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]= src[-1+i*stride]*0x01010101;
+ }
+}
+
+static void pred8x8_128_dc_c(uint8_t *src, int stride){
+ int i;
+
+ for(i=0; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U;
+ }
+}
+
+static void pred8x8_left_dc_c(uint8_t *src, int stride){
+ int i;
+ int dc0, dc2;
+
+ dc0=dc2=0;
+ for(i=0;i<4; i++){
+ dc0+= src[-1+i*stride];
+ dc2+= src[-1+(i+4)*stride];
+ }
+ dc0= 0x01010101*((dc0 + 2)>>2);
+ dc2= 0x01010101*((dc2 + 2)>>2);
+
+ for(i=0; i<4; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+ for(i=4; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]= dc2;
+ }
+}
+
+static void pred8x8_left_dc_rv40_c(uint8_t *src, int stride){
+ int i;
+ int dc0;
+
+ dc0=0;
+ for(i=0;i<8; i++)
+ dc0+= src[-1+i*stride];
+ dc0= 0x01010101*((dc0 + 4)>>3);
+
+ for(i=0; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+}
+
+static void pred8x8_top_dc_c(uint8_t *src, int stride){
+ int i;
+ int dc0, dc1;
+
+ dc0=dc1=0;
+ for(i=0;i<4; i++){
+ dc0+= src[i-stride];
+ dc1+= src[4+i-stride];
+ }
+ dc0= 0x01010101*((dc0 + 2)>>2);
+ dc1= 0x01010101*((dc1 + 2)>>2);
+
+ for(i=0; i<4; i++){
+ ((uint32_t*)(src+i*stride))[0]= dc0;
+ ((uint32_t*)(src+i*stride))[1]= dc1;
+ }
+ for(i=4; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]= dc0;
+ ((uint32_t*)(src+i*stride))[1]= dc1;
+ }
+}
+
+static void pred8x8_top_dc_rv40_c(uint8_t *src, int stride){
+ int i;
+ int dc0;
+
+ dc0=0;
+ for(i=0;i<8; i++)
+ dc0+= src[i-stride];
+ dc0= 0x01010101*((dc0 + 4)>>3);
+
+ for(i=0; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+}
+
+
+static void pred8x8_dc_c(uint8_t *src, int stride){
+ int i;
+ int dc0, dc1, dc2, dc3;
+
+ dc0=dc1=dc2=0;
+ for(i=0;i<4; i++){
+ dc0+= src[-1+i*stride] + src[i-stride];
+ dc1+= src[4+i-stride];
+ dc2+= src[-1+(i+4)*stride];
+ }
+ dc3= 0x01010101*((dc1 + dc2 + 4)>>3);
+ dc0= 0x01010101*((dc0 + 4)>>3);
+ dc1= 0x01010101*((dc1 + 2)>>2);
+ dc2= 0x01010101*((dc2 + 2)>>2);
+
+ for(i=0; i<4; i++){
+ ((uint32_t*)(src+i*stride))[0]= dc0;
+ ((uint32_t*)(src+i*stride))[1]= dc1;
+ }
+ for(i=4; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]= dc2;
+ ((uint32_t*)(src+i*stride))[1]= dc3;
+ }
+}
+
+//the following 4 function should not be optimized!
+static void pred8x8_mad_cow_dc_l0t(uint8_t *src, int stride){
+ pred8x8_top_dc_c(src, stride);
+ pred4x4_dc_c(src, NULL, stride);
+}
+
+static void pred8x8_mad_cow_dc_0lt(uint8_t *src, int stride){
+ pred8x8_dc_c(src, stride);
+ pred4x4_top_dc_c(src, NULL, stride);
+}
+
+static void pred8x8_mad_cow_dc_l00(uint8_t *src, int stride){
+ pred8x8_left_dc_c(src, stride);
+ pred4x4_128_dc_c(src + 4*stride , NULL, stride);
+ pred4x4_128_dc_c(src + 4*stride + 4, NULL, stride);
+}
+
+static void pred8x8_mad_cow_dc_0l0(uint8_t *src, int stride){
+ pred8x8_left_dc_c(src, stride);
+ pred4x4_128_dc_c(src , NULL, stride);
+ pred4x4_128_dc_c(src + 4, NULL, stride);
+}
+
+static void pred8x8_dc_rv40_c(uint8_t *src, int stride){
+ int i;
+ int dc0=0;
+
+ for(i=0;i<4; i++){
+ dc0+= src[-1+i*stride] + src[i-stride];
+ dc0+= src[4+i-stride];
+ dc0+= src[-1+(i+4)*stride];
+ }
+ dc0= 0x01010101*((dc0 + 8)>>4);
+
+ for(i=0; i<4; i++){
+ ((uint32_t*)(src+i*stride))[0]= dc0;
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+ for(i=4; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]= dc0;
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+}
+
+static void pred8x8_plane_c(uint8_t *src, int stride){
+ int j, k;
+ int a;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ const uint8_t * const src0 = src+3-stride;
+ const uint8_t *src1 = src+4*stride-1;
+ const uint8_t *src2 = src1-2*stride; // == src+2*stride-1;
+ int H = src0[1] - src0[-1];
+ int V = src1[0] - src2[ 0];
+ for(k=2; k<=4; ++k) {
+ src1 += stride; src2 -= stride;
+ H += k*(src0[k] - src0[-k]);
+ V += k*(src1[0] - src2[ 0]);
+ }
+ H = ( 17*H+16 ) >> 5;
+ V = ( 17*V+16 ) >> 5;
+
+ a = 16*(src1[0] + src2[8]+1) - 3*(V+H);
+ for(j=8; j>0; --j) {
+ int b = a;
+ a += V;
+ src[0] = cm[ (b ) >> 5 ];
+ src[1] = cm[ (b+ H) >> 5 ];
+ src[2] = cm[ (b+2*H) >> 5 ];
+ src[3] = cm[ (b+3*H) >> 5 ];
+ src[4] = cm[ (b+4*H) >> 5 ];
+ src[5] = cm[ (b+5*H) >> 5 ];
+ src[6] = cm[ (b+6*H) >> 5 ];
+ src[7] = cm[ (b+7*H) >> 5 ];
+ src += stride;
+ }
+}
+
+#define SRC(x,y) src[(x)+(y)*stride]
+#define PL(y) \
+ const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
+#define PREDICT_8x8_LOAD_LEFT \
+ const int l0 = ((has_topleft ? SRC(-1,-1) : SRC(-1,0)) \
+ + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \
+ PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \
+ const int l7 av_unused = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2
+
+#define PT(x) \
+ const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
+#define PREDICT_8x8_LOAD_TOP \
+ const int t0 = ((has_topleft ? SRC(-1,-1) : SRC(0,-1)) \
+ + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \
+ PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \
+ const int t7 av_unused = ((has_topright ? SRC(8,-1) : SRC(7,-1)) \
+ + 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2
+
+#define PTR(x) \
+ t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
+#define PREDICT_8x8_LOAD_TOPRIGHT \
+ int t8, t9, t10, t11, t12, t13, t14, t15; \
+ if(has_topright) { \
+ PTR(8) PTR(9) PTR(10) PTR(11) PTR(12) PTR(13) PTR(14) \
+ t15 = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2; \
+ } else t8=t9=t10=t11=t12=t13=t14=t15= SRC(7,-1);
+
+#define PREDICT_8x8_LOAD_TOPLEFT \
+ const int lt = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2
+
+#define PREDICT_8x8_DC(v) \
+ int y; \
+ for( y = 0; y < 8; y++ ) { \
+ ((uint32_t*)src)[0] = \
+ ((uint32_t*)src)[1] = v; \
+ src += stride; \
+ }
+
+static void pred8x8l_128_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
+{
+ PREDICT_8x8_DC(0x80808080);
+}
+static void pred8x8l_left_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
+{
+ PREDICT_8x8_LOAD_LEFT;
+ const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3) * 0x01010101;
+ PREDICT_8x8_DC(dc);
+}
+static void pred8x8l_top_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
+{
+ PREDICT_8x8_LOAD_TOP;
+ const uint32_t dc = ((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3) * 0x01010101;
+ PREDICT_8x8_DC(dc);
+}
+static void pred8x8l_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
+{
+ PREDICT_8x8_LOAD_LEFT;
+ PREDICT_8x8_LOAD_TOP;
+ const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7
+ +t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4) * 0x01010101;
+ PREDICT_8x8_DC(dc);
+}
+static void pred8x8l_horizontal_c(uint8_t *src, int has_topleft, int has_topright, int stride)
+{
+ PREDICT_8x8_LOAD_LEFT;
+#define ROW(y) ((uint32_t*)(src+y*stride))[0] =\
+ ((uint32_t*)(src+y*stride))[1] = 0x01010101 * l##y
+ ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
+#undef ROW
+}
+static void pred8x8l_vertical_c(uint8_t *src, int has_topleft, int has_topright, int stride)
+{
+ int y;
+ PREDICT_8x8_LOAD_TOP;
+ src[0] = t0;
+ src[1] = t1;
+ src[2] = t2;
+ src[3] = t3;
+ src[4] = t4;
+ src[5] = t5;
+ src[6] = t6;
+ src[7] = t7;
+ for( y = 1; y < 8; y++ )
+ *(uint64_t*)(src+y*stride) = *(uint64_t*)src;
+}
+static void pred8x8l_down_left_c(uint8_t *src, int has_topleft, int has_topright, int stride)
+{
+ PREDICT_8x8_LOAD_TOP;
+ PREDICT_8x8_LOAD_TOPRIGHT;
+ SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
+ SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2;
+ SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2;
+ SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2;
+ SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2;
+ SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2;
+ SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2;
+ SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2;
+ SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2;
+ SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2;
+ SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2;
+ SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2;
+ SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2;
+ SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
+ SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
+}
+static void pred8x8l_down_right_c(uint8_t *src, int has_topleft, int has_topright, int stride)
+{
+ PREDICT_8x8_LOAD_TOP;
+ PREDICT_8x8_LOAD_LEFT;
+ PREDICT_8x8_LOAD_TOPLEFT;
+ SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
+ SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
+ SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
+ SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
+ SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
+ SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2;
+ SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2;
+ SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2;
+ SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2;
+ SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2;
+ SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2;
+ SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2;
+ SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2;
+ SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
+ SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
+
+}
+static void pred8x8l_vertical_right_c(uint8_t *src, int has_topleft, int has_topright, int stride)
+{
+ PREDICT_8x8_LOAD_TOP;
+ PREDICT_8x8_LOAD_LEFT;
+ PREDICT_8x8_LOAD_TOPLEFT;
+ SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
+ SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
+ SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
+ SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
+ SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
+ SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
+ SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2;
+ SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1;
+ SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2;
+ SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1;
+ SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2;
+ SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1;
+ SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2;
+ SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1;
+ SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2;
+ SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1;
+ SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2;
+ SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1;
+ SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2;
+ SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1;
+ SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
+ SRC(7,0)= (t6 + t7 + 1) >> 1;
+}
+static void pred8x8l_horizontal_down_c(uint8_t *src, int has_topleft, int has_topright, int stride)
+{
+ PREDICT_8x8_LOAD_TOP;
+ PREDICT_8x8_LOAD_LEFT;
+ PREDICT_8x8_LOAD_TOPLEFT;
+ SRC(0,7)= (l6 + l7 + 1) >> 1;
+ SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
+ SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1;
+ SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
+ SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1;
+ SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
+ SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1;
+ SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
+ SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1;
+ SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
+ SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1;
+ SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
+ SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1;
+ SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
+ SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1;
+ SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2;
+ SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2;
+ SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2;
+ SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2;
+ SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2;
+ SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
+ SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
+}
+static void pred8x8l_vertical_left_c(uint8_t *src, int has_topleft, int has_topright, int stride)
+{
+ PREDICT_8x8_LOAD_TOP;
+ PREDICT_8x8_LOAD_TOPRIGHT;
+ SRC(0,0)= (t0 + t1 + 1) >> 1;
+ SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2;
+ SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1;
+ SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2;
+ SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1;
+ SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2;
+ SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1;
+ SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2;
+ SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1;
+ SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2;
+ SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1;
+ SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2;
+ SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1;
+ SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2;
+ SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1;
+ SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2;
+ SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1;
+ SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2;
+ SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1;
+ SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2;
+ SRC(7,6)= (t10 + t11 + 1) >> 1;
+ SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
+}
+static void pred8x8l_horizontal_up_c(uint8_t *src, int has_topleft, int has_topright, int stride)
+{
+ PREDICT_8x8_LOAD_LEFT;
+ SRC(0,0)= (l0 + l1 + 1) >> 1;
+ SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
+ SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1;
+ SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
+ SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1;
+ SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
+ SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1;
+ SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
+ SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1;
+ SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
+ SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1;
+ SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
+ SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1;
+ SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
+ SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)=
+ SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)=
+ SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)=
+ SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7;
+}
+#undef PREDICT_8x8_LOAD_LEFT
+#undef PREDICT_8x8_LOAD_TOP
+#undef PREDICT_8x8_LOAD_TOPLEFT
+#undef PREDICT_8x8_LOAD_TOPRIGHT
+#undef PREDICT_8x8_DC
+#undef PTR
+#undef PT
+#undef PL
+#undef SRC
+
+static void pred4x4_vertical_add_c(uint8_t *pix, const DCTELEM *block, int stride){
+ int i;
+ pix -= stride;
+ for(i=0; i<4; i++){
+ uint8_t v = pix[0];
+ pix[1*stride]= v += block[0];
+ pix[2*stride]= v += block[4];
+ pix[3*stride]= v += block[8];
+ pix[4*stride]= v + block[12];
+ pix++;
+ block++;
+ }
+}
+
+static void pred4x4_horizontal_add_c(uint8_t *pix, const DCTELEM *block, int stride){
+ int i;
+ for(i=0; i<4; i++){
+ uint8_t v = pix[-1];
+ pix[0]= v += block[0];
+ pix[1]= v += block[1];
+ pix[2]= v += block[2];
+ pix[3]= v + block[3];
+ pix+= stride;
+ block+= 4;
+ }
+}
+
+static void pred8x8l_vertical_add_c(uint8_t *pix, const DCTELEM *block, int stride){
+ int i;
+ pix -= stride;
+ for(i=0; i<8; i++){
+ uint8_t v = pix[0];
+ pix[1*stride]= v += block[0];
+ pix[2*stride]= v += block[8];
+ pix[3*stride]= v += block[16];
+ pix[4*stride]= v += block[24];
+ pix[5*stride]= v += block[32];
+ pix[6*stride]= v += block[40];
+ pix[7*stride]= v += block[48];
+ pix[8*stride]= v + block[56];
+ pix++;
+ block++;
+ }
+}
+
+static void pred8x8l_horizontal_add_c(uint8_t *pix, const DCTELEM *block, int stride){
+ int i;
+ for(i=0; i<8; i++){
+ uint8_t v = pix[-1];
+ pix[0]= v += block[0];
+ pix[1]= v += block[1];
+ pix[2]= v += block[2];
+ pix[3]= v += block[3];
+ pix[4]= v += block[4];
+ pix[5]= v += block[5];
+ pix[6]= v += block[6];
+ pix[7]= v + block[7];
+ pix+= stride;
+ block+= 8;
+ }
+}
+
+static void pred16x16_vertical_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
+ int i;
+ for(i=0; i<16; i++)
+ pred4x4_vertical_add_c(pix + block_offset[i], block + i*16, stride);
+}
+
+static void pred16x16_horizontal_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
+ int i;
+ for(i=0; i<16; i++)
+ pred4x4_horizontal_add_c(pix + block_offset[i], block + i*16, stride);
+}
+
+static void pred8x8_vertical_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
+ int i;
+ for(i=0; i<4; i++)
+ pred4x4_vertical_add_c(pix + block_offset[i], block + i*16, stride);
+}
+
+static void pred8x8_horizontal_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
+ int i;
+ for(i=0; i<4; i++)
+ pred4x4_horizontal_add_c(pix + block_offset[i], block + i*16, stride);
+}
+
+
+/**
+ * Sets the intra prediction function pointers.
+ */
+void ff_h264_pred_init(H264PredContext *h, int codec_id){
+// MpegEncContext * const s = &h->s;
+
+ if(codec_id != CODEC_ID_RV40){
+ h->pred4x4[VERT_PRED ]= pred4x4_vertical_c;
+ h->pred4x4[HOR_PRED ]= pred4x4_horizontal_c;
+ h->pred4x4[DC_PRED ]= pred4x4_dc_c;
+ if(codec_id == CODEC_ID_SVQ3)
+ h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_svq3_c;
+ else
+ h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_c;
+ h->pred4x4[DIAG_DOWN_RIGHT_PRED]= pred4x4_down_right_c;
+ h->pred4x4[VERT_RIGHT_PRED ]= pred4x4_vertical_right_c;
+ h->pred4x4[HOR_DOWN_PRED ]= pred4x4_horizontal_down_c;
+ h->pred4x4[VERT_LEFT_PRED ]= pred4x4_vertical_left_c;
+ h->pred4x4[HOR_UP_PRED ]= pred4x4_horizontal_up_c;
+ h->pred4x4[LEFT_DC_PRED ]= pred4x4_left_dc_c;
+ h->pred4x4[TOP_DC_PRED ]= pred4x4_top_dc_c;
+ h->pred4x4[DC_128_PRED ]= pred4x4_128_dc_c;
+ }else{
+ h->pred4x4[VERT_PRED ]= pred4x4_vertical_c;
+ h->pred4x4[HOR_PRED ]= pred4x4_horizontal_c;
+ h->pred4x4[DC_PRED ]= pred4x4_dc_c;
+ h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_rv40_c;
+ h->pred4x4[DIAG_DOWN_RIGHT_PRED]= pred4x4_down_right_c;
+ h->pred4x4[VERT_RIGHT_PRED ]= pred4x4_vertical_right_c;
+ h->pred4x4[HOR_DOWN_PRED ]= pred4x4_horizontal_down_c;
+ h->pred4x4[VERT_LEFT_PRED ]= pred4x4_vertical_left_rv40_c;
+ h->pred4x4[HOR_UP_PRED ]= pred4x4_horizontal_up_rv40_c;
+ h->pred4x4[LEFT_DC_PRED ]= pred4x4_left_dc_c;
+ h->pred4x4[TOP_DC_PRED ]= pred4x4_top_dc_c;
+ h->pred4x4[DC_128_PRED ]= pred4x4_128_dc_c;
+ h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN]= pred4x4_down_left_rv40_nodown_c;
+ h->pred4x4[HOR_UP_PRED_RV40_NODOWN]= pred4x4_horizontal_up_rv40_nodown_c;
+ h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN]= pred4x4_vertical_left_rv40_nodown_c;
+ }
+
+ h->pred8x8l[VERT_PRED ]= pred8x8l_vertical_c;
+ h->pred8x8l[HOR_PRED ]= pred8x8l_horizontal_c;
+ h->pred8x8l[DC_PRED ]= pred8x8l_dc_c;
+ h->pred8x8l[DIAG_DOWN_LEFT_PRED ]= pred8x8l_down_left_c;
+ h->pred8x8l[DIAG_DOWN_RIGHT_PRED]= pred8x8l_down_right_c;
+ h->pred8x8l[VERT_RIGHT_PRED ]= pred8x8l_vertical_right_c;
+ h->pred8x8l[HOR_DOWN_PRED ]= pred8x8l_horizontal_down_c;
+ h->pred8x8l[VERT_LEFT_PRED ]= pred8x8l_vertical_left_c;
+ h->pred8x8l[HOR_UP_PRED ]= pred8x8l_horizontal_up_c;
+ h->pred8x8l[LEFT_DC_PRED ]= pred8x8l_left_dc_c;
+ h->pred8x8l[TOP_DC_PRED ]= pred8x8l_top_dc_c;
+ h->pred8x8l[DC_128_PRED ]= pred8x8l_128_dc_c;
+
+ h->pred8x8[VERT_PRED8x8 ]= pred8x8_vertical_c;
+ h->pred8x8[HOR_PRED8x8 ]= pred8x8_horizontal_c;
+ h->pred8x8[PLANE_PRED8x8 ]= pred8x8_plane_c;
+ if(codec_id != CODEC_ID_RV40){
+ h->pred8x8[DC_PRED8x8 ]= pred8x8_dc_c;
+ h->pred8x8[LEFT_DC_PRED8x8]= pred8x8_left_dc_c;
+ h->pred8x8[TOP_DC_PRED8x8 ]= pred8x8_top_dc_c;
+ h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8 ]= pred8x8_mad_cow_dc_l0t;
+ h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8 ]= pred8x8_mad_cow_dc_0lt;
+ h->pred8x8[ALZHEIMER_DC_L00_PRED8x8 ]= pred8x8_mad_cow_dc_l00;
+ h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8 ]= pred8x8_mad_cow_dc_0l0;
+ }else{
+ h->pred8x8[DC_PRED8x8 ]= pred8x8_dc_rv40_c;
+ h->pred8x8[LEFT_DC_PRED8x8]= pred8x8_left_dc_rv40_c;
+ h->pred8x8[TOP_DC_PRED8x8 ]= pred8x8_top_dc_rv40_c;
+ }
+ h->pred8x8[DC_128_PRED8x8 ]= pred8x8_128_dc_c;
+
+ h->pred16x16[DC_PRED8x8 ]= pred16x16_dc_c;
+ h->pred16x16[VERT_PRED8x8 ]= pred16x16_vertical_c;
+ h->pred16x16[HOR_PRED8x8 ]= pred16x16_horizontal_c;
+ h->pred16x16[PLANE_PRED8x8 ]= pred16x16_plane_c;
+ switch(codec_id){
+ case CODEC_ID_SVQ3:
+ h->pred16x16[PLANE_PRED8x8 ]= pred16x16_plane_svq3_c;
+ break;
+ case CODEC_ID_RV40:
+ h->pred16x16[PLANE_PRED8x8 ]= pred16x16_plane_rv40_c;
+ break;
+ default:
+ h->pred16x16[PLANE_PRED8x8 ]= pred16x16_plane_c;
+ }
+ h->pred16x16[LEFT_DC_PRED8x8]= pred16x16_left_dc_c;
+ h->pred16x16[TOP_DC_PRED8x8 ]= pred16x16_top_dc_c;
+ h->pred16x16[DC_128_PRED8x8 ]= pred16x16_128_dc_c;
+
+ //special lossless h/v prediction for h264
+ h->pred4x4_add [VERT_PRED ]= pred4x4_vertical_add_c;
+ h->pred4x4_add [ HOR_PRED ]= pred4x4_horizontal_add_c;
+ h->pred8x8l_add [VERT_PRED ]= pred8x8l_vertical_add_c;
+ h->pred8x8l_add [ HOR_PRED ]= pred8x8l_horizontal_add_c;
+ h->pred8x8_add [VERT_PRED8x8]= pred8x8_vertical_add_c;
+ h->pred8x8_add [ HOR_PRED8x8]= pred8x8_horizontal_add_c;
+ h->pred16x16_add[VERT_PRED8x8]= pred16x16_vertical_add_c;
+ h->pred16x16_add[ HOR_PRED8x8]= pred16x16_horizontal_add_c;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264pred.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264pred.h
new file mode 100644
index 000000000..b0c2432c6
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h264pred.h
@@ -0,0 +1,88 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file h264pred.h
+ * H.264 / AVC / MPEG4 prediction functions.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVCODEC_H264PRED_H
+#define AVCODEC_H264PRED_H
+
+#include "libavutil/common.h"
+#include "dsputil.h"
+
+/**
+ * Prediction types
+ */
+//@{
+#define VERT_PRED 0
+#define HOR_PRED 1
+#define DC_PRED 2
+#define DIAG_DOWN_LEFT_PRED 3
+#define DIAG_DOWN_RIGHT_PRED 4
+#define VERT_RIGHT_PRED 5
+#define HOR_DOWN_PRED 6
+#define VERT_LEFT_PRED 7
+#define HOR_UP_PRED 8
+
+#define LEFT_DC_PRED 9
+#define TOP_DC_PRED 10
+#define DC_128_PRED 11
+
+#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN 12
+#define HOR_UP_PRED_RV40_NODOWN 13
+#define VERT_LEFT_PRED_RV40_NODOWN 14
+
+#define DC_PRED8x8 0
+#define HOR_PRED8x8 1
+#define VERT_PRED8x8 2
+#define PLANE_PRED8x8 3
+
+#define LEFT_DC_PRED8x8 4
+#define TOP_DC_PRED8x8 5
+#define DC_128_PRED8x8 6
+
+#define ALZHEIMER_DC_L0T_PRED8x8 7
+#define ALZHEIMER_DC_0LT_PRED8x8 8
+#define ALZHEIMER_DC_L00_PRED8x8 9
+#define ALZHEIMER_DC_0L0_PRED8x8 10
+//@}
+
+/**
+ * Context for storing H.264 prediction functions
+ */
+typedef struct H264PredContext{
+ void (*pred4x4 [9+3+3])(uint8_t *src, uint8_t *topright, int stride);//FIXME move to dsp?
+ void (*pred8x8l [9+3])(uint8_t *src, int topleft, int topright, int stride);
+ void (*pred8x8 [4+3+4])(uint8_t *src, int stride);
+ void (*pred16x16[4+3])(uint8_t *src, int stride);
+
+ void (*pred4x4_add [2])(uint8_t *pix/*align 4*/, const DCTELEM *block/*align 16*/, int stride);
+ void (*pred8x8l_add [2])(uint8_t *pix/*align 8*/, const DCTELEM *block/*align 16*/, int stride);
+ void (*pred8x8_add [3])(uint8_t *pix/*align 8*/, const int *block_offset, const DCTELEM *block/*align 16*/, int stride);
+ void (*pred16x16_add[3])(uint8_t *pix/*align 16*/, const int *block_offset, const DCTELEM *block/*align 16*/, int stride);
+}H264PredContext;
+
+void ff_h264_pred_init(H264PredContext *h, int codec_id);
+
+#endif /* AVCODEC_H264PRED_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/huffman.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/huffman.c
new file mode 100644
index 000000000..6b8ed4f75
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/huffman.c
@@ -0,0 +1,109 @@
+/**
+ * @file libavcodec/huffman.c
+ * huffman tree builder and VLC generator
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "get_bits.h"
+#include "huffman.h"
+
+/* symbol for Huffman tree node */
+#define HNODE -1
+
+
+static void get_tree_codes(uint32_t *bits, int16_t *lens, uint8_t *xlat, Node *nodes, int node, uint32_t pfx, int pl, int *pos, int no_zero_count)
+{
+ int s;
+
+ s = nodes[node].sym;
+ if(s != HNODE || (no_zero_count && !nodes[node].count)){
+ bits[*pos] = pfx;
+ lens[*pos] = pl;
+ xlat[*pos] = s;
+ (*pos)++;
+ }else{
+ pfx <<= 1;
+ pl++;
+ get_tree_codes(bits, lens, xlat, nodes, nodes[node].n0, pfx, pl, pos,
+ no_zero_count);
+ pfx |= 1;
+ get_tree_codes(bits, lens, xlat, nodes, nodes[node].n0+1, pfx, pl, pos,
+ no_zero_count);
+ }
+}
+
+static int build_huff_tree(VLC *vlc, Node *nodes, int head, int flags)
+{
+ int no_zero_count = !(flags & FF_HUFFMAN_FLAG_ZERO_COUNT);
+ uint32_t bits[256];
+ int16_t lens[256];
+ uint8_t xlat[256];
+ int pos = 0;
+
+ get_tree_codes(bits, lens, xlat, nodes, head, 0, 0, &pos, no_zero_count);
+ return init_vlc_sparse(vlc, 9, pos, lens, 2, 2, bits, 4, 4, xlat, 1, 1, 0);
+}
+
+
+/**
+ * nodes size must be 2*nb_codes
+ * first nb_codes nodes.count must be set
+ */
+int ff_huff_build_tree(AVCodecContext *avctx, VLC *vlc, int nb_codes,
+ Node *nodes, HuffCmp cmp, int flags)
+{
+ int i, j;
+ int cur_node;
+ int64_t sum = 0;
+
+ for(i = 0; i < nb_codes; i++){
+ nodes[i].sym = i;
+ nodes[i].n0 = -2;
+ sum += nodes[i].count;
+ }
+
+ if(sum >> 31) {
+ av_log(avctx, AV_LOG_ERROR, "Too high symbol frequencies. Tree construction is not possible\n");
+ return -1;
+ }
+ qsort(nodes, nb_codes, sizeof(Node), cmp);
+ cur_node = nb_codes;
+ nodes[nb_codes*2-1].count = 0;
+ for(i = 0; i < nb_codes*2-1; i += 2){
+ nodes[cur_node].sym = HNODE;
+ nodes[cur_node].count = nodes[i].count + nodes[i+1].count;
+ nodes[cur_node].n0 = i;
+ for(j = cur_node; j > 0; j--){
+ if(nodes[j].count > nodes[j-1].count ||
+ (nodes[j].count == nodes[j-1].count &&
+ (!(flags & FF_HUFFMAN_FLAG_HNODE_FIRST) ||
+ nodes[j].n0==j-1 || nodes[j].n0==j-2 ||
+ (nodes[j].sym!=HNODE && nodes[j-1].sym!=HNODE))))
+ break;
+ FFSWAP(Node, nodes[j], nodes[j-1]);
+ }
+ cur_node++;
+ }
+ if(build_huff_tree(vlc, nodes, nb_codes*2-2, flags) < 0){
+ av_log(avctx, AV_LOG_ERROR, "Error building tree\n");
+ return -1;
+ }
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/huffman.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/huffman.h
new file mode 100644
index 000000000..d56c7274b
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/huffman.h
@@ -0,0 +1,42 @@
+/**
+ * @file libavcodec/huffman.h
+ * huffman tree builder and VLC generator
+ * Copyright (C) 2007 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_HUFFMAN_H
+#define AVCODEC_HUFFMAN_H
+
+#include "avcodec.h"
+#include "get_bits.h"
+
+typedef struct {
+ int16_t sym;
+ int16_t n0;
+ uint32_t count;
+} Node;
+
+#define FF_HUFFMAN_FLAG_HNODE_FIRST 0x01
+#define FF_HUFFMAN_FLAG_ZERO_COUNT 0x02
+
+typedef int (*HuffCmp)(const void *va, const void *vb);
+int ff_huff_build_tree(AVCodecContext *avctx, VLC *vlc, int nb_codes,
+ Node *nodes, HuffCmp cmp, int flags);
+
+#endif /* AVCODEC_HUFFMAN_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/imgconvert.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/imgconvert.c
new file mode 100644
index 000000000..6584fae99
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/imgconvert.c
@@ -0,0 +1,1112 @@
+/*
+ * Misc image conversion routines
+ * Copyright (c) 2001, 2002, 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/imgconvert.c
+ * misc image conversion routines
+ */
+
+/* TODO:
+ * - write 'ffimg' program to test all the image related stuff
+ * - move all api to slice based system
+ * - integrate deinterlacing, postprocessing and scaling in the conversion process
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+
+#if HAVE_MMX
+#include "x86/mmx.h"
+#include "x86/dsputil_mmx.h"
+#endif
+
+#define xglue(x, y) x ## y
+#define glue(x, y) xglue(x, y)
+
+#define FF_COLOR_RGB 0 /**< RGB color space */
+#define FF_COLOR_GRAY 1 /**< gray color space */
+#define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
+#define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
+
+#define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */
+#define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */
+#define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */
+
+typedef struct PixFmtInfo {
+ const char *name;
+ uint8_t nb_channels; /**< number of channels (including alpha) */
+ uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */
+ uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */
+ uint8_t is_alpha : 1; /**< true if alpha can be specified */
+ uint8_t is_hwaccel : 1; /**< true if this is an HW accelerated format */
+ uint8_t x_chroma_shift; /**< X chroma subsampling factor is 2 ^ shift */
+ uint8_t y_chroma_shift; /**< Y chroma subsampling factor is 2 ^ shift */
+ uint8_t depth; /**< bit depth of the color components */
+} PixFmtInfo;
+
+/* this table gives more information about formats */
+#if __STDC_VERSION__ >= 199901L
+static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
+ /* YUV formats */
+ [PIX_FMT_YUV420P] = {
+ .name = "yuv420p",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 1, .y_chroma_shift = 1,
+ },
+ [PIX_FMT_YUV422P] = {
+ .name = "yuv422p",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 1, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_YUV444P] = {
+ .name = "yuv444p",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_YUYV422] = {
+ .name = "yuyv422",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 1, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_UYVY422] = {
+ .name = "uyvy422",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 1, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_YUV410P] = {
+ .name = "yuv410p",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 2, .y_chroma_shift = 2,
+ },
+ [PIX_FMT_YUV411P] = {
+ .name = "yuv411p",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 2, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_YUV440P] = {
+ .name = "yuv440p",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 1,
+ },
+
+ /* YUV formats with alpha plane */
+ [PIX_FMT_YUVA420P] = {
+ .name = "yuva420p",
+ .nb_channels = 4,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 1, .y_chroma_shift = 1,
+ },
+
+ /* JPEG YUV */
+ [PIX_FMT_YUVJ420P] = {
+ .name = "yuvj420p",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_YUV_JPEG,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 1, .y_chroma_shift = 1,
+ },
+ [PIX_FMT_YUVJ422P] = {
+ .name = "yuvj422p",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_YUV_JPEG,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 1, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_YUVJ444P] = {
+ .name = "yuvj444p",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_YUV_JPEG,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_YUVJ440P] = {
+ .name = "yuvj440p",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_YUV_JPEG,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 1,
+ },
+
+ /* RGB formats */
+ [PIX_FMT_RGB24] = {
+ .name = "rgb24",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_BGR24] = {
+ .name = "bgr24",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB32] = {
+ .name = "rgb32",
+ .nb_channels = 4, .is_alpha = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB48BE] = {
+ .name = "rgb48be",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 16,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB48LE] = {
+ .name = "rgb48le",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 16,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB565] = {
+ .name = "rgb565",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 5,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB555] = {
+ .name = "rgb555",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 5,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+
+ /* gray / mono formats */
+ [PIX_FMT_GRAY16BE] = {
+ .name = "gray16be",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_GRAY,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 16,
+ },
+ [PIX_FMT_GRAY16LE] = {
+ .name = "gray16le",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_GRAY,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 16,
+ },
+ [PIX_FMT_GRAY8] = {
+ .name = "gray",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_GRAY,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ },
+ [PIX_FMT_MONOWHITE] = {
+ .name = "monow",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_GRAY,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 1,
+ },
+ [PIX_FMT_MONOBLACK] = {
+ .name = "monob",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_GRAY,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 1,
+ },
+
+ /* paletted formats */
+ [PIX_FMT_PAL8] = {
+ .name = "pal8",
+ .nb_channels = 4, .is_alpha = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PALETTE,
+ .depth = 8,
+ },
+ [PIX_FMT_UYYVYY411] = {
+ .name = "uyyvyy411",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 2, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_BGR32] = {
+ .name = "bgr32",
+ .nb_channels = 4, .is_alpha = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_BGR565] = {
+ .name = "bgr565",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 5,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_BGR555] = {
+ .name = "bgr555",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 5,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB8] = {
+ .name = "rgb8",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB4] = {
+ .name = "rgb4",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 4,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB4_BYTE] = {
+ .name = "rgb4_byte",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_BGR8] = {
+ .name = "bgr8",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_BGR4] = {
+ .name = "bgr4",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 4,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_BGR4_BYTE] = {
+ .name = "bgr4_byte",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_NV12] = {
+ .name = "nv12",
+ .nb_channels = 2,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 1, .y_chroma_shift = 1,
+ },
+ [PIX_FMT_NV21] = {
+ .name = "nv12",
+ .nb_channels = 2,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 1, .y_chroma_shift = 1,
+ },
+
+ [PIX_FMT_BGR32_1] = {
+ .name = "bgr32_1",
+ .nb_channels = 4, .is_alpha = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB32_1] = {
+ .name = "rgb32_1",
+ .nb_channels = 4, .is_alpha = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+};
+
+#else
+static PixFmtInfo pix_fmt_info[PIX_FMT_NB];
+
+void avpicture_init_pixfmtinfo(void)
+{
+ pix_fmt_info[PIX_FMT_YUV420P].name = "yuv420p";
+ pix_fmt_info[PIX_FMT_YUV420P].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_YUV420P].color_type = FF_COLOR_YUV;
+ pix_fmt_info[PIX_FMT_YUV420P].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_YUV420P].depth = 8;
+ pix_fmt_info[PIX_FMT_YUV420P].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_YUV420P].y_chroma_shift = 1;
+
+ pix_fmt_info[PIX_FMT_YUV422P].name = "yuv422p";
+ pix_fmt_info[PIX_FMT_YUV422P].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_YUV422P].color_type = FF_COLOR_YUV;
+ pix_fmt_info[PIX_FMT_YUV422P].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_YUV422P].depth = 8,
+ pix_fmt_info[PIX_FMT_YUV422P].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_YUV422P].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_YUV444P].name = "yuv444p";
+ pix_fmt_info[PIX_FMT_YUV444P].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_YUV444P].color_type = FF_COLOR_YUV;
+ pix_fmt_info[PIX_FMT_YUV444P].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_YUV444P].depth = 8;
+ pix_fmt_info[PIX_FMT_YUV444P].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_YUV444P].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_YUYV422].name = "yuyv422";
+ pix_fmt_info[PIX_FMT_YUYV422].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_YUYV422].color_type = FF_COLOR_YUV;
+ pix_fmt_info[PIX_FMT_YUYV422].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_YUYV422].depth = 8;
+ pix_fmt_info[PIX_FMT_YUYV422].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_YUYV422].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_UYVY422].name = "uyvy422";
+ pix_fmt_info[PIX_FMT_UYVY422].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_UYVY422].color_type = FF_COLOR_YUV;
+ pix_fmt_info[PIX_FMT_UYVY422].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_UYVY422].depth = 8;
+ pix_fmt_info[PIX_FMT_UYVY422].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_UYVY422].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_YUV410P].name = "yuv410p";
+ pix_fmt_info[PIX_FMT_YUV410P].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_YUV410P].color_type = FF_COLOR_YUV;
+ pix_fmt_info[PIX_FMT_YUV410P].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_YUV410P].depth = 8;
+ pix_fmt_info[PIX_FMT_YUV410P].x_chroma_shift = 2;
+ pix_fmt_info[PIX_FMT_YUV410P].y_chroma_shift = 2;
+
+ pix_fmt_info[PIX_FMT_YUV411P].name = "yuv411p";
+ pix_fmt_info[PIX_FMT_YUV411P].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_YUV411P].color_type = FF_COLOR_YUV;
+ pix_fmt_info[PIX_FMT_YUV411P].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_YUV411P].depth = 8;
+ pix_fmt_info[PIX_FMT_YUV411P].x_chroma_shift = 2;
+ pix_fmt_info[PIX_FMT_YUV411P].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_YUV440P].name = "yuv440p";
+ pix_fmt_info[PIX_FMT_YUV440P].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_YUV440P].color_type = FF_COLOR_YUV;
+ pix_fmt_info[PIX_FMT_YUV440P].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_YUV440P].depth = 8;
+ pix_fmt_info[PIX_FMT_YUV440P].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_YUV440P].y_chroma_shift = 1;
+
+ pix_fmt_info[PIX_FMT_YUVA420P].name = "yuva420p";
+ pix_fmt_info[PIX_FMT_YUVA420P].nb_channels = 4;
+ pix_fmt_info[PIX_FMT_YUVA420P].color_type = FF_COLOR_YUV;
+ pix_fmt_info[PIX_FMT_YUVA420P].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_YUVA420P].depth = 8;
+ pix_fmt_info[PIX_FMT_YUVA420P].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_YUVA420P].y_chroma_shift = 1;
+
+ /* JPEG YUV */
+ pix_fmt_info[PIX_FMT_YUVJ420P].name = "yuvj420p";
+ pix_fmt_info[PIX_FMT_YUVJ420P].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_YUVJ420P].color_type = FF_COLOR_YUV_JPEG;
+ pix_fmt_info[PIX_FMT_YUVJ420P].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_YUVJ420P].depth = 8;
+ pix_fmt_info[PIX_FMT_YUVJ420P].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_YUVJ420P].y_chroma_shift = 1;
+
+ pix_fmt_info[PIX_FMT_YUVJ422P].name = "yuvj422p";
+ pix_fmt_info[PIX_FMT_YUVJ422P].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_YUVJ422P].color_type = FF_COLOR_YUV_JPEG;
+ pix_fmt_info[PIX_FMT_YUVJ422P].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_YUVJ422P].depth = 8;
+ pix_fmt_info[PIX_FMT_YUVJ422P].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_YUVJ422P].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_YUVJ444P].name = "yuvj444p";
+ pix_fmt_info[PIX_FMT_YUVJ444P].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_YUVJ444P].color_type = FF_COLOR_YUV_JPEG;
+ pix_fmt_info[PIX_FMT_YUVJ444P].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_YUVJ444P].depth = 8;
+ pix_fmt_info[PIX_FMT_YUVJ444P].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_YUVJ444P].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_YUVJ440P].name = "yuvj440p";
+ pix_fmt_info[PIX_FMT_YUVJ440P].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_YUVJ440P].color_type = FF_COLOR_YUV_JPEG;
+ pix_fmt_info[PIX_FMT_YUVJ440P].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_YUVJ440P].depth = 8;
+ pix_fmt_info[PIX_FMT_YUVJ440P].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_YUVJ440P].y_chroma_shift = 1;
+
+ /* RGB formats */
+ pix_fmt_info[PIX_FMT_RGB24].name = "rgb24";
+ pix_fmt_info[PIX_FMT_RGB24].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_RGB24].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_RGB24].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_RGB24].depth = 8;
+ pix_fmt_info[PIX_FMT_RGB24].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB24].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_BGR24].name = "bgr24";
+ pix_fmt_info[PIX_FMT_BGR24].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_BGR24].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_BGR24].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_BGR24].depth = 8;
+ pix_fmt_info[PIX_FMT_BGR24].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_BGR24].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_RGB32].name = "rgb32";
+ pix_fmt_info[PIX_FMT_RGB32].nb_channels = 4;
+ pix_fmt_info[PIX_FMT_RGB32].is_alpha = 1;
+ pix_fmt_info[PIX_FMT_RGB32].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_RGB32].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_RGB32].depth = 8;
+ pix_fmt_info[PIX_FMT_RGB32].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB32].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_RGB48BE].name = "rgb48be";
+ pix_fmt_info[PIX_FMT_RGB48BE].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_RGB48BE].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_RGB48BE].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_RGB48BE].depth = 16;
+ pix_fmt_info[PIX_FMT_RGB48BE].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB48BE].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_RGB48LE].name = "rgb48le";
+ pix_fmt_info[PIX_FMT_RGB48LE].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_RGB48LE].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_RGB48LE].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_RGB48LE].depth = 16;
+ pix_fmt_info[PIX_FMT_RGB48LE].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB48LE].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_RGB565].name = "rgb565";
+ pix_fmt_info[PIX_FMT_RGB565].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_RGB565].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_RGB565].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_RGB565].depth = 5;
+ pix_fmt_info[PIX_FMT_RGB565].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB565].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_RGB555].name = "rgb555";
+ pix_fmt_info[PIX_FMT_RGB555].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_RGB555].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_RGB555].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_RGB555].depth = 5;
+ pix_fmt_info[PIX_FMT_RGB555].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB555].y_chroma_shift = 0;
+
+ /* gray / mono formats */
+ pix_fmt_info[PIX_FMT_GRAY16BE].name = "gray16be";
+ pix_fmt_info[PIX_FMT_GRAY16BE].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_GRAY16BE].color_type = FF_COLOR_GRAY;
+ pix_fmt_info[PIX_FMT_GRAY16BE].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_GRAY16BE].depth = 16;
+
+ pix_fmt_info[PIX_FMT_GRAY16LE].name = "gray16le";
+ pix_fmt_info[PIX_FMT_GRAY16LE].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_GRAY16LE].color_type = FF_COLOR_GRAY;
+ pix_fmt_info[PIX_FMT_GRAY16LE].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_GRAY16LE].depth = 16;
+
+ pix_fmt_info[PIX_FMT_GRAY8].name = "gray";
+ pix_fmt_info[PIX_FMT_GRAY8].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_GRAY8].color_type = FF_COLOR_GRAY;
+ pix_fmt_info[PIX_FMT_GRAY8].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_GRAY8].depth = 8;
+
+ pix_fmt_info[PIX_FMT_MONOWHITE].name = "monow";
+ pix_fmt_info[PIX_FMT_MONOWHITE].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_MONOWHITE].color_type = FF_COLOR_GRAY;
+ pix_fmt_info[PIX_FMT_MONOWHITE].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_MONOWHITE].depth = 1;
+
+ pix_fmt_info[PIX_FMT_MONOBLACK].name = "monob";
+ pix_fmt_info[PIX_FMT_MONOBLACK].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_MONOBLACK].color_type = FF_COLOR_GRAY;
+ pix_fmt_info[PIX_FMT_MONOBLACK].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_MONOBLACK].depth = 1;
+
+ /* paletted formats */
+ pix_fmt_info[PIX_FMT_PAL8].name = "pal8";
+ pix_fmt_info[PIX_FMT_PAL8].nb_channels = 4;
+ pix_fmt_info[PIX_FMT_PAL8].is_alpha = 1;
+ pix_fmt_info[PIX_FMT_PAL8].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_PAL8].pixel_type = FF_PIXEL_PALETTE;
+ pix_fmt_info[PIX_FMT_PAL8].depth = 8;
+
+ pix_fmt_info[PIX_FMT_UYYVYY411].name = "uyyvyy411";
+ pix_fmt_info[PIX_FMT_UYYVYY411].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_UYYVYY411].color_type = FF_COLOR_YUV;
+ pix_fmt_info[PIX_FMT_UYYVYY411].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_UYYVYY411].depth = 8;
+ pix_fmt_info[PIX_FMT_UYYVYY411].x_chroma_shift = 2;
+ pix_fmt_info[PIX_FMT_UYYVYY411].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_BGR32].name = "bgr32";
+ pix_fmt_info[PIX_FMT_BGR32].nb_channels = 4;
+ pix_fmt_info[PIX_FMT_BGR32].is_alpha = 1;
+ pix_fmt_info[PIX_FMT_BGR32].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_BGR32].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_BGR32].depth = 8;
+ pix_fmt_info[PIX_FMT_BGR32].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_BGR32].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_BGR565].name = "bgr565";
+ pix_fmt_info[PIX_FMT_BGR565].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_BGR565].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_BGR565].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_BGR565].depth = 5;
+ pix_fmt_info[PIX_FMT_BGR565].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_BGR565].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_BGR555].name = "bgr555";
+ pix_fmt_info[PIX_FMT_BGR555].nb_channels = 3;
+ pix_fmt_info[PIX_FMT_BGR555].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_BGR555].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_BGR555].depth = 5;
+ pix_fmt_info[PIX_FMT_BGR555].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_BGR555].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_RGB8].name = "rgb8";
+ pix_fmt_info[PIX_FMT_RGB8].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_RGB8].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_RGB8].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_RGB8].depth = 8;
+ pix_fmt_info[PIX_FMT_RGB8].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB8].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_RGB4].name = "rgb4";
+ pix_fmt_info[PIX_FMT_RGB4].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_RGB4].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_RGB4].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_RGB4].depth = 4;
+ pix_fmt_info[PIX_FMT_RGB4].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB4].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_RGB4_BYTE].name = "rgb4_byte";
+ pix_fmt_info[PIX_FMT_RGB4_BYTE].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_RGB4_BYTE].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_RGB4_BYTE].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_RGB4_BYTE].depth = 8;
+ pix_fmt_info[PIX_FMT_RGB4_BYTE].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB4_BYTE].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_BGR8].name = "bgr8";
+ pix_fmt_info[PIX_FMT_BGR8].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_BGR8].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_BGR8].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_BGR8].depth = 8;
+ pix_fmt_info[PIX_FMT_BGR8].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_BGR8].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_BGR4].name = "bgr4";
+ pix_fmt_info[PIX_FMT_BGR4].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_BGR4].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_BGR4].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_BGR4].depth = 4;
+ pix_fmt_info[PIX_FMT_BGR4].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_BGR4].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_BGR4_BYTE].name = "bgr4_byte";
+ pix_fmt_info[PIX_FMT_BGR4_BYTE].nb_channels = 1;
+ pix_fmt_info[PIX_FMT_BGR4_BYTE].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_BGR4_BYTE].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_BGR4_BYTE].depth = 8;
+ pix_fmt_info[PIX_FMT_BGR4_BYTE].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_BGR4_BYTE].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_NV12].name = "nv12";
+ pix_fmt_info[PIX_FMT_NV12].nb_channels = 2;
+ pix_fmt_info[PIX_FMT_NV12].color_type = FF_COLOR_YUV;
+ pix_fmt_info[PIX_FMT_NV12].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_NV12].depth = 8;
+ pix_fmt_info[PIX_FMT_NV12].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_NV12].y_chroma_shift = 1;
+
+ pix_fmt_info[PIX_FMT_NV21].name = "nv12";
+ pix_fmt_info[PIX_FMT_NV21].nb_channels = 2;
+ pix_fmt_info[PIX_FMT_NV21].color_type = FF_COLOR_YUV;
+ pix_fmt_info[PIX_FMT_NV21].pixel_type = FF_PIXEL_PLANAR;
+ pix_fmt_info[PIX_FMT_NV21].depth = 8;
+ pix_fmt_info[PIX_FMT_NV21].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_NV21].y_chroma_shift = 1;
+
+ pix_fmt_info[PIX_FMT_BGR32_1].name = "bgr32_1";
+ pix_fmt_info[PIX_FMT_BGR32_1].nb_channels = 4;
+ pix_fmt_info[PIX_FMT_BGR32_1].is_alpha = 1;
+ pix_fmt_info[PIX_FMT_BGR32_1].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_BGR32_1].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_BGR32_1].depth = 8;
+ pix_fmt_info[PIX_FMT_BGR32_1].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_BGR32_1].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_RGB32_1].name = "rgb32_1";
+ pix_fmt_info[PIX_FMT_RGB32_1].nb_channels = 4;
+ pix_fmt_info[PIX_FMT_RGB32_1].is_alpha = 1;
+ pix_fmt_info[PIX_FMT_RGB32_1].color_type = FF_COLOR_RGB;
+ pix_fmt_info[PIX_FMT_RGB32_1].pixel_type = FF_PIXEL_PACKED;
+ pix_fmt_info[PIX_FMT_RGB32_1].depth = 8;
+ pix_fmt_info[PIX_FMT_RGB32_1].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB32_1].y_chroma_shift = 0;
+}
+#endif
+
+void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
+{
+ *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
+ *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
+}
+
+int ff_set_systematic_pal(uint32_t pal[256], enum PixelFormat pix_fmt){
+ int i;
+
+ for(i=0; i<256; i++){
+ int r,g,b;
+
+ switch(pix_fmt) {
+ case PIX_FMT_RGB8:
+ r= (i>>5 )*36;
+ g= ((i>>2)&7)*36;
+ b= (i&3 )*85;
+ break;
+ case PIX_FMT_BGR8:
+ b= (i>>6 )*85;
+ g= ((i>>3)&7)*36;
+ r= (i&7 )*36;
+ break;
+ case PIX_FMT_RGB4_BYTE:
+ r= (i>>3 )*255;
+ g= ((i>>1)&3)*85;
+ b= (i&1 )*255;
+ break;
+ case PIX_FMT_BGR4_BYTE:
+ b= (i>>3 )*255;
+ g= ((i>>1)&3)*85;
+ r= (i&1 )*255;
+ break;
+ case PIX_FMT_GRAY8:
+ r=b=g= i;
+ break;
+ default:
+ return -1;
+ }
+ pal[i] = b + (g<<8) + (r<<16);
+ }
+
+ return 0;
+}
+
+int ff_fill_linesize(AVPicture *picture, enum PixelFormat pix_fmt, int width)
+{
+ int w2;
+ const PixFmtInfo *pinfo;
+
+ memset(picture->linesize, 0, sizeof(picture->linesize));
+
+ pinfo = &pix_fmt_info[pix_fmt];
+ switch(pix_fmt) {
+ case PIX_FMT_YUV420P:
+ case PIX_FMT_YUV422P:
+ case PIX_FMT_YUV444P:
+ case PIX_FMT_YUV410P:
+ case PIX_FMT_YUV411P:
+ case PIX_FMT_YUV440P:
+ case PIX_FMT_YUVJ420P:
+ case PIX_FMT_YUVJ422P:
+ case PIX_FMT_YUVJ444P:
+ case PIX_FMT_YUVJ440P:
+ w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
+ picture->linesize[0] = width;
+ picture->linesize[1] = w2;
+ picture->linesize[2] = w2;
+ break;
+ case PIX_FMT_YUVA420P:
+ w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
+ picture->linesize[0] = width;
+ picture->linesize[1] = w2;
+ picture->linesize[2] = w2;
+ picture->linesize[3] = width;
+ break;
+ case PIX_FMT_NV12:
+ case PIX_FMT_NV21:
+ w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
+ picture->linesize[0] = width;
+ picture->linesize[1] = 2 * w2;
+ break;
+ case PIX_FMT_RGB24:
+ case PIX_FMT_BGR24:
+ picture->linesize[0] = width * 3;
+ break;
+ case PIX_FMT_RGB32:
+ case PIX_FMT_BGR32:
+ case PIX_FMT_RGB32_1:
+ case PIX_FMT_BGR32_1:
+ picture->linesize[0] = width * 4;
+ break;
+ case PIX_FMT_RGB48BE:
+ case PIX_FMT_RGB48LE:
+ picture->linesize[0] = width * 6;
+ break;
+ case PIX_FMT_GRAY16BE:
+ case PIX_FMT_GRAY16LE:
+ case PIX_FMT_BGR555:
+ case PIX_FMT_BGR565:
+ case PIX_FMT_RGB555:
+ case PIX_FMT_RGB565:
+ case PIX_FMT_YUYV422:
+ picture->linesize[0] = width * 2;
+ break;
+ case PIX_FMT_UYVY422:
+ picture->linesize[0] = width * 2;
+ break;
+ case PIX_FMT_UYYVYY411:
+ picture->linesize[0] = width + width/2;
+ break;
+ case PIX_FMT_RGB4:
+ case PIX_FMT_BGR4:
+ picture->linesize[0] = width / 2;
+ break;
+ case PIX_FMT_MONOWHITE:
+ case PIX_FMT_MONOBLACK:
+ picture->linesize[0] = (width + 7) >> 3;
+ break;
+ case PIX_FMT_PAL8:
+ case PIX_FMT_RGB8:
+ case PIX_FMT_BGR8:
+ case PIX_FMT_RGB4_BYTE:
+ case PIX_FMT_BGR4_BYTE:
+ case PIX_FMT_GRAY8:
+ picture->linesize[0] = width;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, enum PixelFormat pix_fmt,
+ int height)
+{
+ int size, h2, size2;
+ const PixFmtInfo *pinfo;
+
+ pinfo = &pix_fmt_info[pix_fmt];
+ size = picture->linesize[0] * height;
+ switch(pix_fmt) {
+ case PIX_FMT_YUV420P:
+ case PIX_FMT_YUV422P:
+ case PIX_FMT_YUV444P:
+ case PIX_FMT_YUV410P:
+ case PIX_FMT_YUV411P:
+ case PIX_FMT_YUV440P:
+ case PIX_FMT_YUVJ420P:
+ case PIX_FMT_YUVJ422P:
+ case PIX_FMT_YUVJ444P:
+ case PIX_FMT_YUVJ440P:
+ h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
+ size2 = picture->linesize[1] * h2;
+ picture->data[0] = ptr;
+ picture->data[1] = picture->data[0] + size;
+ picture->data[2] = picture->data[1] + size2;
+ picture->data[3] = NULL;
+ return size + 2 * size2;
+ case PIX_FMT_YUVA420P:
+ h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
+ size2 = picture->linesize[1] * h2;
+ picture->data[0] = ptr;
+ picture->data[1] = picture->data[0] + size;
+ picture->data[2] = picture->data[1] + size2;
+ picture->data[3] = picture->data[1] + size2 + size2;
+ return 2 * size + 2 * size2;
+ case PIX_FMT_NV12:
+ case PIX_FMT_NV21:
+ h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
+ size2 = picture->linesize[1] * h2;
+ picture->data[0] = ptr;
+ picture->data[1] = picture->data[0] + size;
+ picture->data[2] = NULL;
+ picture->data[3] = NULL;
+ return size + size2;
+ case PIX_FMT_RGB24:
+ case PIX_FMT_BGR24:
+ case PIX_FMT_RGB32:
+ case PIX_FMT_BGR32:
+ case PIX_FMT_RGB32_1:
+ case PIX_FMT_BGR32_1:
+ case PIX_FMT_RGB48BE:
+ case PIX_FMT_RGB48LE:
+ case PIX_FMT_GRAY16BE:
+ case PIX_FMT_GRAY16LE:
+ case PIX_FMT_BGR555:
+ case PIX_FMT_BGR565:
+ case PIX_FMT_RGB555:
+ case PIX_FMT_RGB565:
+ case PIX_FMT_YUYV422:
+ case PIX_FMT_UYVY422:
+ case PIX_FMT_UYYVYY411:
+ case PIX_FMT_RGB4:
+ case PIX_FMT_BGR4:
+ case PIX_FMT_MONOWHITE:
+ case PIX_FMT_MONOBLACK:
+ picture->data[0] = ptr;
+ picture->data[1] = NULL;
+ picture->data[2] = NULL;
+ picture->data[3] = NULL;
+ return size;
+ case PIX_FMT_PAL8:
+ case PIX_FMT_RGB8:
+ case PIX_FMT_BGR8:
+ case PIX_FMT_RGB4_BYTE:
+ case PIX_FMT_BGR4_BYTE:
+ case PIX_FMT_GRAY8:
+ size2 = (size + 3) & ~3;
+ picture->data[0] = ptr;
+ picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
+ picture->data[2] = NULL;
+ picture->data[3] = NULL;
+ return size2 + 256 * 4;
+ default:
+ picture->data[0] = NULL;
+ picture->data[1] = NULL;
+ picture->data[2] = NULL;
+ picture->data[3] = NULL;
+ return -1;
+ }
+}
+
+void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
+ const uint8_t *src, int src_wrap,
+ int width, int height)
+{
+ if((!dst) || (!src))
+ return;
+ for(;height > 0; height--) {
+ memcpy(dst, src, width);
+ dst += dst_wrap;
+ src += src_wrap;
+ }
+}
+
+int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane)
+{
+ int bits;
+ const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
+
+ pf = &pix_fmt_info[pix_fmt];
+ switch(pf->pixel_type) {
+ case FF_PIXEL_PACKED:
+ switch(pix_fmt) {
+ case PIX_FMT_YUYV422:
+ case PIX_FMT_UYVY422:
+ case PIX_FMT_RGB565:
+ case PIX_FMT_RGB555:
+ case PIX_FMT_BGR565:
+ case PIX_FMT_BGR555:
+ bits = 16;
+ break;
+ case PIX_FMT_UYYVYY411:
+ bits = 12;
+ break;
+ default:
+ bits = pf->depth * pf->nb_channels;
+ break;
+ }
+ return (width * bits + 7) >> 3;
+ break;
+ case FF_PIXEL_PLANAR:
+ if (plane == 1 || plane == 2)
+ width= -((-width)>>pf->x_chroma_shift);
+
+ return (width * pf->depth + 7) >> 3;
+ break;
+ case FF_PIXEL_PALETTE:
+ if (plane == 0)
+ return width;
+ break;
+ }
+
+ return -1;
+}
+
+void av_picture_copy(AVPicture *dst, const AVPicture *src,
+ enum PixelFormat pix_fmt, int width, int height)
+{
+ int i;
+ const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
+
+ switch(pf->pixel_type) {
+ case FF_PIXEL_PACKED:
+ case FF_PIXEL_PLANAR:
+ for(i = 0; i < pf->nb_channels; i++) {
+ int h;
+ int bwidth = ff_get_plane_bytewidth(pix_fmt, width, i);
+ h = height;
+ if (i == 1 || i == 2) {
+ h= -((-height)>>pf->y_chroma_shift);
+ }
+ ff_img_copy_plane(dst->data[i], dst->linesize[i],
+ src->data[i], src->linesize[i],
+ bwidth, h);
+ }
+ break;
+ case FF_PIXEL_PALETTE:
+ ff_img_copy_plane(dst->data[0], dst->linesize[0],
+ src->data[0], src->linesize[0],
+ width, height);
+ /* copy the palette */
+ memcpy(dst->data[1], src->data[1], 4*256);
+ break;
+ }
+}
+
+/* 2x2 -> 1x1 */
+void ff_shrink22(uint8_t *dst, int dst_wrap,
+ const uint8_t *src, int src_wrap,
+ int width, int height)
+{
+ int w;
+ const uint8_t *s1, *s2;
+ uint8_t *d;
+
+ for(;height > 0; height--) {
+ s1 = src;
+ s2 = s1 + src_wrap;
+ d = dst;
+ for(w = width;w >= 4; w-=4) {
+ d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
+ d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
+ d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
+ d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
+ s1 += 8;
+ s2 += 8;
+ d += 4;
+ }
+ for(;w > 0; w--) {
+ d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
+ s1 += 2;
+ s2 += 2;
+ d++;
+ }
+ src += 2 * src_wrap;
+ dst += dst_wrap;
+ }
+}
+
+/* 4x4 -> 1x1 */
+void ff_shrink44(uint8_t *dst, int dst_wrap,
+ const uint8_t *src, int src_wrap,
+ int width, int height)
+{
+ int w;
+ const uint8_t *s1, *s2, *s3, *s4;
+ uint8_t *d;
+
+ for(;height > 0; height--) {
+ s1 = src;
+ s2 = s1 + src_wrap;
+ s3 = s2 + src_wrap;
+ s4 = s3 + src_wrap;
+ d = dst;
+ for(w = width;w > 0; w--) {
+ d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
+ s2[0] + s2[1] + s2[2] + s2[3] +
+ s3[0] + s3[1] + s3[2] + s3[3] +
+ s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
+ s1 += 4;
+ s2 += 4;
+ s3 += 4;
+ s4 += 4;
+ d++;
+ }
+ src += 4 * src_wrap;
+ dst += dst_wrap;
+ }
+}
+
+/* 8x8 -> 1x1 */
+void ff_shrink88(uint8_t *dst, int dst_wrap,
+ const uint8_t *src, int src_wrap,
+ int width, int height)
+{
+ int w, i;
+
+ for(;height > 0; height--) {
+ for(w = width;w > 0; w--) {
+ int tmp=0;
+ for(i=0; i<8; i++){
+ tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
+ src += src_wrap;
+ }
+ *(dst++) = (tmp + 32)>>6;
+ src += 8 - 8*src_wrap;
+ }
+ src += 8*src_wrap - 8*width;
+ dst += dst_wrap - width;
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/imgconvert.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/imgconvert.h
new file mode 100644
index 000000000..48e2f1271
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/imgconvert.h
@@ -0,0 +1,38 @@
+/*
+ * Misc image conversion routines
+ * most functionality is exported to the public API, see avcodec.h
+ *
+ * Copyright (c) 2008 Vitor Sessak
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_IMGCONVERT_H
+#define AVCODEC_IMGCONVERT_H
+
+#include <stdint.h>
+#include "avcodec.h"
+
+int ff_fill_linesize(AVPicture *picture, enum PixelFormat pix_fmt, int width);
+
+int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, enum PixelFormat pix_fmt, int height);
+
+int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane);
+
+int ff_set_systematic_pal(uint32_t pal[256], enum PixelFormat pix_fmt);
+
+#endif /* AVCODEC_IMGCONVERT_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intelh263dec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intelh263dec.c
new file mode 100644
index 000000000..30dcf7c55
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intelh263dec.c
@@ -0,0 +1,133 @@
+/*
+ * H.263i decoder
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "mpegvideo.h"
+#include "h263.h"
+
+/* don't understand why they choose a different header ! */
+int ff_intel_h263_decode_picture_header(MpegEncContext *s)
+{
+ int format;
+
+ /* picture header */
+ if (get_bits_long(&s->gb, 22) != 0x20) {
+ av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
+ return -1;
+ }
+ s->picture_number = get_bits(&s->gb, 8); /* picture timestamp */
+
+ if (get_bits1(&s->gb) != 1) {
+ av_log(s->avctx, AV_LOG_ERROR, "Bad marker\n");
+ return -1; /* marker */
+ }
+ if (get_bits1(&s->gb) != 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "Bad H263 id\n");
+ return -1; /* h263 id */
+ }
+ skip_bits1(&s->gb); /* split screen off */
+ skip_bits1(&s->gb); /* camera off */
+ skip_bits1(&s->gb); /* freeze picture release off */
+
+ format = get_bits(&s->gb, 3);
+ if (format != 7) {
+ av_log(s->avctx, AV_LOG_ERROR, "Intel H263 free format not supported\n");
+ return -1;
+ }
+ s->h263_plus = 0;
+
+ s->pict_type = FF_I_TYPE + get_bits1(&s->gb);
+
+ s->unrestricted_mv = get_bits1(&s->gb);
+ s->h263_long_vectors = s->unrestricted_mv;
+
+ if (get_bits1(&s->gb) != 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "SAC not supported\n");
+ return -1; /* SAC: off */
+ }
+ s->obmc= get_bits1(&s->gb);
+ s->pb_frame = get_bits1(&s->gb);
+
+ if(format == 7){
+ format = get_bits(&s->gb, 3);
+ if(format == 0 || format == 7){
+ av_log(s->avctx, AV_LOG_ERROR, "Wrong Intel H263 format\n");
+ return -1;
+ }
+ if(get_bits(&s->gb, 2))
+ av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
+ s->loop_filter = get_bits1(&s->gb);
+ if(get_bits1(&s->gb))
+ av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
+ if(get_bits1(&s->gb))
+ s->pb_frame = 2;
+ if(get_bits(&s->gb, 5))
+ av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
+ if(get_bits(&s->gb, 5) != 1)
+ av_log(s->avctx, AV_LOG_ERROR, "Invalid marker\n");
+ }
+ if(format == 6){
+ int ar = get_bits(&s->gb, 4);
+ skip_bits(&s->gb, 9); // display width
+ skip_bits1(&s->gb);
+ skip_bits(&s->gb, 9); // display height
+ if(ar == 15){
+ skip_bits(&s->gb, 8); // aspect ratio - width
+ skip_bits(&s->gb, 8); // aspect ratio - height
+ }
+ }
+
+ s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);
+ skip_bits1(&s->gb); /* Continuous Presence Multipoint mode: off */
+
+ if(s->pb_frame){
+ skip_bits(&s->gb, 3); //temporal reference for B-frame
+ skip_bits(&s->gb, 2); //dbquant
+ }
+
+ /* PEI */
+ while (get_bits1(&s->gb) != 0) {
+ skip_bits(&s->gb, 8);
+ }
+ s->f_code = 1;
+
+ s->y_dc_scale_table=
+ s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
+
+ ff_h263_show_pict_info(s);
+
+ return 0;
+}
+
+AVCodec h263i_decoder = {
+ "h263i",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_H263I,
+ sizeof(MpegEncContext),
+ ff_h263_decode_init,
+ NULL,
+ ff_h263_decode_end,
+ ff_h263_decode_frame,
+ CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("Intel H.263"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/internal.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/internal.h
new file mode 100644
index 000000000..774bdb95d
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/internal.h
@@ -0,0 +1,36 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/internal.h
+ * common internal api header.
+ */
+
+#ifndef AVCODEC_INTERNAL_H
+#define AVCODEC_INTERNAL_H
+
+#include <stdint.h>
+#include "avcodec.h"
+
+/**
+ * Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
+ * If there is no such matching pair then size is returned.
+ */
+int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b);
+
+#endif /* AVCODEC_INTERNAL_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8.c
new file mode 100644
index 000000000..70e3bff75
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8.c
@@ -0,0 +1,789 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/intrax8.c
+ * @brief IntraX8 (J-Frame) subdecoder, used by WMV2 and VC-1
+ */
+
+#include "avcodec.h"
+#include "get_bits.h"
+#include "mpegvideo.h"
+#include "msmpeg4data.h"
+#include "intrax8huf.h"
+#include "intrax8.h"
+
+#define MAX_TABLE_DEPTH(table_bits, max_bits) ((max_bits+table_bits-1)/table_bits)
+
+#define DC_VLC_BITS 9
+#define AC_VLC_BITS 9
+#define OR_VLC_BITS 7
+
+#define DC_VLC_MTD MAX_TABLE_DEPTH(DC_VLC_BITS, MAX_DC_VLC_BITS)
+#define AC_VLC_MTD MAX_TABLE_DEPTH(AC_VLC_BITS, MAX_AC_VLC_BITS)
+#define OR_VLC_MTD MAX_TABLE_DEPTH(OR_VLC_BITS, MAX_OR_VLC_BITS)
+
+static VLC j_ac_vlc[2][2][8]; //[quant<13],[intra/inter],[select]
+static VLC j_dc_vlc[2][8]; //[quant], [select]
+static VLC j_orient_vlc[2][4]; //[quant], [select]
+
+static av_cold void x8_vlc_init(void){
+ int i;
+ int offset = 0;
+ int sizeidx = 0;
+ static const uint16_t sizes[8*4 + 8*2 + 2 + 4] = {
+ 576, 548, 582, 618, 546, 616, 560, 642,
+ 584, 582, 704, 664, 512, 544, 656, 640,
+ 512, 648, 582, 566, 532, 614, 596, 648,
+ 586, 552, 584, 590, 544, 578, 584, 624,
+
+ 528, 528, 526, 528, 536, 528, 526, 544,
+ 544, 512, 512, 528, 528, 544, 512, 544,
+
+ 128, 128, 128, 128, 128, 128};
+
+ static VLC_TYPE table[28150][2];
+
+#define init_ac_vlc(dst,src) \
+ dst.table = &table[offset]; \
+ dst.table_allocated = sizes[sizeidx]; \
+ offset += sizes[sizeidx++]; \
+ init_vlc(&dst, \
+ AC_VLC_BITS,77, \
+ &src[1],4,2, \
+ &src[0],4,2, \
+ INIT_VLC_USE_NEW_STATIC)
+//set ac tables
+ for(i=0;i<8;i++){
+ init_ac_vlc( j_ac_vlc[0][0][i], x8_ac0_highquant_table[i][0] );
+ init_ac_vlc( j_ac_vlc[0][1][i], x8_ac1_highquant_table[i][0] );
+ init_ac_vlc( j_ac_vlc[1][0][i], x8_ac0_lowquant_table [i][0] );
+ init_ac_vlc( j_ac_vlc[1][1][i], x8_ac1_lowquant_table [i][0] );
+ }
+#undef init_ac_vlc
+
+//set dc tables
+#define init_dc_vlc(dst,src) \
+ dst.table = &table[offset]; \
+ dst.table_allocated = sizes[sizeidx]; \
+ offset += sizes[sizeidx++]; \
+ init_vlc(&dst, \
+ DC_VLC_BITS,34, \
+ &src[1],4,2, \
+ &src[0],4,2, \
+ INIT_VLC_USE_NEW_STATIC);
+ for(i=0;i<8;i++){
+ init_dc_vlc( j_dc_vlc[0][i], x8_dc_highquant_table[i][0]);
+ init_dc_vlc( j_dc_vlc[1][i], x8_dc_lowquant_table [i][0]);
+ }
+#undef init_dc_vlc
+
+//set orient tables
+#define init_or_vlc(dst,src) \
+ dst.table = &table[offset]; \
+ dst.table_allocated = sizes[sizeidx]; \
+ offset += sizes[sizeidx++]; \
+ init_vlc(&dst, \
+ OR_VLC_BITS,12, \
+ &src[1],4,2, \
+ &src[0],4,2, \
+ INIT_VLC_USE_NEW_STATIC);
+ for(i=0;i<2;i++){
+ init_or_vlc( j_orient_vlc[0][i], x8_orient_highquant_table[i][0]);
+ }
+ for(i=0;i<4;i++){
+ init_or_vlc( j_orient_vlc[1][i], x8_orient_lowquant_table [i][0])
+ }
+ if (offset != sizeof(table)/sizeof(VLC_TYPE)/2)
+ av_log(NULL, AV_LOG_ERROR, "table size %i does not match needed %i\n", (int)(sizeof(table)/sizeof(VLC_TYPE)/2), offset);
+}
+#undef init_or_vlc
+
+static void x8_reset_vlc_tables(IntraX8Context * w){
+ memset(w->j_dc_vlc,0,sizeof(w->j_dc_vlc));
+ memset(w->j_ac_vlc,0,sizeof(w->j_ac_vlc));
+ w->j_orient_vlc=NULL;
+}
+
+static inline void x8_select_ac_table(IntraX8Context * const w , int mode){
+ MpegEncContext * const s= w->s;
+ int table_index;
+
+ assert(mode<4);
+
+ if( w->j_ac_vlc[mode] ) return;
+
+ table_index = get_bits(&s->gb, 3);
+ w->j_ac_vlc[mode] = &j_ac_vlc[w->quant<13][mode>>1][table_index];//2 modes use same tables
+ assert(w->j_ac_vlc[mode]);
+}
+
+static inline int x8_get_orient_vlc(IntraX8Context * w){
+ MpegEncContext * const s= w->s;
+ int table_index;
+
+ if(!w->j_orient_vlc ){
+ table_index = get_bits(&s->gb, 1+(w->quant<13) );
+ w->j_orient_vlc = &j_orient_vlc[w->quant<13][table_index];
+ }
+ assert(w->j_orient_vlc);
+ assert(w->j_orient_vlc->table);
+
+ return get_vlc2(&s->gb, w->j_orient_vlc->table, OR_VLC_BITS, OR_VLC_MTD);
+}
+
+#define extra_bits(eb) (eb)
+#define extra_run (0xFF<<8)
+#define extra_level (0x00<<8)
+#define run_offset(r) ((r)<<16)
+#define level_offset(l) ((l)<<24)
+static const uint32_t ac_decode_table[]={
+ /*46*/ extra_bits(3) | extra_run | run_offset(16) | level_offset( 0),
+ /*47*/ extra_bits(3) | extra_run | run_offset(24) | level_offset( 0),
+ /*48*/ extra_bits(2) | extra_run | run_offset( 4) | level_offset( 1),
+ /*49*/ extra_bits(3) | extra_run | run_offset( 8) | level_offset( 1),
+
+ /*50*/ extra_bits(5) | extra_run | run_offset(32) | level_offset( 0),
+ /*51*/ extra_bits(4) | extra_run | run_offset(16) | level_offset( 1),
+
+ /*52*/ extra_bits(2) | extra_level | run_offset( 0) | level_offset( 4),
+ /*53*/ extra_bits(2) | extra_level | run_offset( 0) | level_offset( 8),
+ /*54*/ extra_bits(2) | extra_level | run_offset( 0) | level_offset(12),
+ /*55*/ extra_bits(3) | extra_level | run_offset( 0) | level_offset(16),
+ /*56*/ extra_bits(3) | extra_level | run_offset( 0) | level_offset(24),
+
+ /*57*/ extra_bits(2) | extra_level | run_offset( 1) | level_offset( 3),
+ /*58*/ extra_bits(3) | extra_level | run_offset( 1) | level_offset( 7),
+
+ /*59*/ extra_bits(2) | extra_run | run_offset(16) | level_offset( 0),
+ /*60*/ extra_bits(2) | extra_run | run_offset(20) | level_offset( 0),
+ /*61*/ extra_bits(2) | extra_run | run_offset(24) | level_offset( 0),
+ /*62*/ extra_bits(2) | extra_run | run_offset(28) | level_offset( 0),
+ /*63*/ extra_bits(4) | extra_run | run_offset(32) | level_offset( 0),
+ /*64*/ extra_bits(4) | extra_run | run_offset(48) | level_offset( 0),
+
+ /*65*/ extra_bits(2) | extra_run | run_offset( 4) | level_offset( 1),
+ /*66*/ extra_bits(3) | extra_run | run_offset( 8) | level_offset( 1),
+ /*67*/ extra_bits(4) | extra_run | run_offset(16) | level_offset( 1),
+
+ /*68*/ extra_bits(2) | extra_level | run_offset( 0) | level_offset( 4),
+ /*69*/ extra_bits(3) | extra_level | run_offset( 0) | level_offset( 8),
+ /*70*/ extra_bits(4) | extra_level | run_offset( 0) | level_offset(16),
+
+ /*71*/ extra_bits(2) | extra_level | run_offset( 1) | level_offset( 3),
+ /*72*/ extra_bits(3) | extra_level | run_offset( 1) | level_offset( 7),
+};
+//extra_bits = 3bits; extra_run/level = 1 bit; run_offset = 6bits; level_offset = 5 bits;
+#undef extra_bits
+#undef extra_run
+#undef extra_level
+#undef run_offset
+#undef level_offset
+
+static void x8_get_ac_rlf(IntraX8Context * const w, const int mode,
+ int * const run, int * const level, int * const final){
+ MpegEncContext * const s= w->s;
+ int i,e;
+
+// x8_select_ac_table(w,mode);
+ i = get_vlc2(&s->gb, w->j_ac_vlc[mode]->table, AC_VLC_BITS, AC_VLC_MTD);
+
+ if(i<46){ //[0-45]
+ int t,l;
+ if(i<0){
+ (*level)=(*final)=//prevent 'may be used unilitialized'
+ (*run)=64;//this would cause error exit in the ac loop
+ return;
+ }
+
+ (*final) = t = (i>22);
+ i-=23*t;
+/*
+ i== 0-15 r=0-15 l=0 ;r=i& %01111
+ i==16-19 r=0-3 l=1 ;r=i& %00011
+ i==20-21 r=0-1 l=2 ;r=i& %00001
+ i==22 r=0 l=3 ;r=i& %00000
+l=lut_l[i/2]={0,0,0,0,0,0,0,0,1,1,2,3}[i>>1];// 11 10'01 01'00 00'00 00'00 00'00 00 => 0xE50000
+t=lut_mask[l]={0x0f,0x03,0x01,0x00}[l]; as i<256 the higher bits do not matter */
+ l=(0xE50000>>(i&(0x1E)))&3;/*0x1E or (~1) or ((i>>1)<<1)*/
+ t=(0x01030F>>(l<<3));
+
+ (*run) = i&t;
+ (*level) = l;
+ }else if(i<73){//[46-72]
+ uint32_t sm;
+ uint32_t mask;
+
+ i-=46;
+ sm=ac_decode_table[i];
+
+ e=get_bits(&s->gb,sm&0xF);sm>>=8;//3bits
+ mask=sm&0xff;sm>>=8; //1bit
+
+ (*run) =(sm&0xff) + (e&( mask));//6bits
+ (*level)=(sm>>8) + (e&(~mask));//5bits
+ (*final)=i>(58-46);
+ }else if(i<75){//[73-74]
+ static const uint8_t crazy_mix_runlevel[32]={
+ 0x22,0x32,0x33,0x53,0x23,0x42,0x43,0x63,
+ 0x24,0x52,0x34,0x73,0x25,0x62,0x44,0x83,
+ 0x26,0x72,0x35,0x54,0x27,0x82,0x45,0x64,
+ 0x28,0x92,0x36,0x74,0x29,0xa2,0x46,0x84};
+
+ (*final)=!(i&1);
+ e=get_bits(&s->gb,5);//get the extra bits
+ (*run) =crazy_mix_runlevel[e]>>4;
+ (*level)=crazy_mix_runlevel[e]&0x0F;
+ }else{
+ (*level)=get_bits( &s->gb, 7-3*(i&1));
+ (*run) =get_bits( &s->gb, 6);
+ (*final)=get_bits1(&s->gb);
+ }
+ return;
+}
+
+//static const uint8_t dc_extra_sbits[] ={0, 1,1, 1,1, 2,2, 3,3, 4,4, 5,5, 6,6, 7,7 };
+static const uint8_t dc_index_offset[] ={ 0, 1,2, 3,4, 5,7, 9,13, 17,25, 33,49, 65,97, 129,193};
+
+static int x8_get_dc_rlf(IntraX8Context * const w,int const mode, int * const level, int * const final){
+ MpegEncContext * const s= w->s;
+ int i,e,c;
+
+ assert(mode<3);
+ if( !w->j_dc_vlc[mode] ) {
+ int table_index;
+ table_index = get_bits(&s->gb, 3);
+ //4 modes, same table
+ w->j_dc_vlc[mode]= &j_dc_vlc[w->quant<13][table_index];
+ }
+ assert(w->j_dc_vlc);
+ assert(w->j_dc_vlc[mode]->table);
+
+ i=get_vlc2(&s->gb, w->j_dc_vlc[mode]->table, DC_VLC_BITS, DC_VLC_MTD);
+
+ /*(i>=17) {i-=17;final=1;}*/
+ c= i>16;
+ (*final)=c;
+ i-=17*c;
+
+ if(i<=0){
+ (*level)=0;
+ return -i;
+ }
+ c=(i+1)>>1;//hackish way to calculate dc_extra_sbits[]
+ c-=c>1;
+
+ e=get_bits(&s->gb,c);//get the extra bits
+ i=dc_index_offset[i]+(e>>1);
+
+ e= -(e & 1);//0,0xffffff
+ (*level)= (i ^ e) - e;// (i^0)-0 , (i^0xff)-(-1)
+ return 0;
+}
+//end of huffman
+
+static int x8_setup_spatial_predictor(IntraX8Context * const w, const int chroma){
+ MpegEncContext * const s= w->s;
+ int range;
+ int sum;
+ int quant;
+
+ s->dsp.x8_setup_spatial_compensation(s->dest[chroma], s->edge_emu_buffer,
+ s->current_picture.linesize[chroma>0],
+ &range, &sum, w->edges);
+ if(chroma){
+ w->orient=w->chroma_orient;
+ quant=w->quant_dc_chroma;
+ }else{
+ quant=w->quant;
+ }
+
+ w->flat_dc=0;
+ if(range < quant || range < 3){
+ w->orient=0;
+ if(range < 3){//yep you read right, a +-1 idct error may break decoding!
+ w->flat_dc=1;
+ sum+=9;
+ w->predicted_dc = (sum*6899)>>17;//((1<<17)+9)/(8+8+1+2)=6899
+ }
+ }
+ if(chroma)
+ return 0;
+
+ assert(w->orient < 3);
+ if(range < 2*w->quant){
+ if( (w->edges&3) == 0){
+ if(w->orient==1) w->orient=11;
+ if(w->orient==2) w->orient=10;
+ }else{
+ w->orient=0;
+ }
+ w->raw_orient=0;
+ }else{
+ static const uint8_t prediction_table[3][12]={
+ {0,8,4, 10,11, 2,6,9,1,3,5,7},
+ {4,0,8, 11,10, 3,5,2,6,9,1,7},
+ {8,0,4, 10,11, 1,7,2,6,9,3,5}
+ };
+ w->raw_orient=x8_get_orient_vlc(w);
+ if(w->raw_orient<0) return -1;
+ assert(w->raw_orient < 12 );
+ assert(w->orient<3);
+ w->orient=prediction_table[w->orient][w->raw_orient];
+ }
+ return 0;
+}
+
+static void x8_update_predictions(IntraX8Context * const w, const int orient, const int est_run ){
+ MpegEncContext * const s= w->s;
+
+ w->prediction_table[s->mb_x*2+(s->mb_y&1)] = (est_run<<2) + 1*(orient==4) + 2*(orient==8);
+/*
+ y=2n+0 ->//0 2 4
+ y=2n+1 ->//1 3 5
+*/
+}
+static void x8_get_prediction_chroma(IntraX8Context * const w){
+ MpegEncContext * const s= w->s;
+
+ w->edges = 1*( !(s->mb_x>>1) );
+ w->edges|= 2*( !(s->mb_y>>1) );
+ w->edges|= 4*( s->mb_x >= (2*s->mb_width-1) );//mb_x for chroma would always be odd
+
+ w->raw_orient=0;
+ if(w->edges&3){//lut_co[8]={inv,4,8,8, inv,4,8,8}<- =>{1,1,0,0;1,1,0,0} => 0xCC
+ w->chroma_orient=4<<((0xCC>>w->edges)&1);
+ return;
+ }
+ w->chroma_orient = (w->prediction_table[2*s->mb_x-2] & 0x03)<<2;//block[x-1][y|1-1)]
+}
+
+static void x8_get_prediction(IntraX8Context * const w){
+ MpegEncContext * const s= w->s;
+ int a,b,c,i;
+
+ w->edges = 1*( !s->mb_x );
+ w->edges|= 2*( !s->mb_y );
+ w->edges|= 4*( s->mb_x >= (2*s->mb_width-1) );
+
+ switch(w->edges&3){
+ case 0:
+ break;
+ case 1:
+ //take the one from the above block[0][y-1]
+ w->est_run = w->prediction_table[!(s->mb_y&1)]>>2;
+ w->orient = 1;
+ return;
+ case 2:
+ //take the one from the previous block[x-1][0]
+ w->est_run = w->prediction_table[2*s->mb_x-2]>>2;
+ w->orient = 2;
+ return;
+ case 3:
+ w->est_run = 16;
+ w->orient = 0;
+ return;
+ }
+ //no edge cases
+ b= w->prediction_table[2*s->mb_x + !(s->mb_y&1) ];//block[x ][y-1]
+ a= w->prediction_table[2*s->mb_x-2 + (s->mb_y&1) ];//block[x-1][y ]
+ c= w->prediction_table[2*s->mb_x-2 + !(s->mb_y&1) ];//block[x-1][y-1]
+
+ w->est_run = FFMIN(b,a);
+ /* This condition has nothing to do with w->edges, even if it looks
+ similar it would trigger if e.g. x=3;y=2;
+ I guess somebody wrote something wrong and it became standard. */
+ if( (s->mb_x & s->mb_y) != 0 ) w->est_run=FFMIN(c,w->est_run);
+ w->est_run>>=2;
+
+ a&=3;
+ b&=3;
+ c&=3;
+
+ i=( 0xFFEAF4C4>>(2*b+8*a) )&3;
+ if(i!=3) w->orient=i;
+ else w->orient=( 0xFFEAD8>>(2*c+8*(w->quant>12)) )&3;
+/*
+lut1[b][a]={
+->{0, 1, 0, pad},
+ {0, 1, X, pad},
+ {2, 2, 2, pad}}
+ pad 2 2 2; pad X 1 0; pad 0 1 0 <-
+-> 11 10 '10 10 '11 11'01 00 '11 00'01 00=>0xEAF4C4
+
+lut2[q>12][c]={
+ ->{0,2,1,pad},
+ {2,2,2,pad}}
+ pad 2 2 2; pad 1 2 0 <-
+-> 11 10'10 10 '11 01'10 00=>0xEAD8
+*/
+}
+
+
+static void x8_ac_compensation(IntraX8Context * const w, int const direction, int const dc_level){
+ MpegEncContext * const s= w->s;
+ int t;
+#define B(x,y) s->block[0][s->dsp.idct_permutation[(x)+(y)*8]]
+#define T(x) ((x) * dc_level + 0x8000) >> 16;
+ switch(direction){
+ case 0:
+ t = T(3811);//h
+ B(1,0) -= t;
+ B(0,1) -= t;
+
+ t = T(487);//e
+ B(2,0) -= t;
+ B(0,2) -= t;
+
+ t = T(506);//f
+ B(3,0) -= t;
+ B(0,3) -= t;
+
+ t = T(135);//c
+ B(4,0) -= t;
+ B(0,4) -= t;
+ B(2,1) += t;
+ B(1,2) += t;
+ B(3,1) += t;
+ B(1,3) += t;
+
+ t = T(173);//d
+ B(5,0) -= t;
+ B(0,5) -= t;
+
+ t = T(61);//b
+ B(6,0) -= t;
+ B(0,6) -= t;
+ B(5,1) += t;
+ B(1,5) += t;
+
+ t = T(42); //a
+ B(7,0) -= t;
+ B(0,7) -= t;
+ B(4,1) += t;
+ B(1,4) += t;
+ B(4,4) += t;
+
+ t = T(1084);//g
+ B(1,1) += t;
+
+ s->block_last_index[0] = FFMAX(s->block_last_index[0], 7*8);
+ break;
+ case 1:
+ B(0,1) -= T(6269);
+ B(0,3) -= T( 708);
+ B(0,5) -= T( 172);
+ B(0,7) -= T( 73);
+
+ s->block_last_index[0] = FFMAX(s->block_last_index[0], 7*8);
+ break;
+ case 2:
+ B(1,0) -= T(6269);
+ B(3,0) -= T( 708);
+ B(5,0) -= T( 172);
+ B(7,0) -= T( 73);
+
+ s->block_last_index[0] = FFMAX(s->block_last_index[0], 7);
+ break;
+ }
+#undef B
+#undef T
+}
+
+static void dsp_x8_put_solidcolor(uint8_t const pix, uint8_t * dst, int const linesize){
+ int k;
+ for(k=0;k<8;k++){
+ memset(dst,pix,8);
+ dst+=linesize;
+ }
+}
+
+static const int16_t quant_table[64] = {
+ 256, 256, 256, 256, 256, 256, 259, 262,
+ 265, 269, 272, 275, 278, 282, 285, 288,
+ 292, 295, 299, 303, 306, 310, 314, 317,
+ 321, 325, 329, 333, 337, 341, 345, 349,
+ 353, 358, 362, 366, 371, 375, 379, 384,
+ 389, 393, 398, 403, 408, 413, 417, 422,
+ 428, 433, 438, 443, 448, 454, 459, 465,
+ 470, 476, 482, 488, 493, 499, 505, 511
+};
+
+static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){
+ MpegEncContext * const s= w->s;
+
+ uint8_t * scantable;
+ int final,run,level;
+ int ac_mode,dc_mode,est_run,dc_level;
+ int pos,n;
+ int zeros_only;
+ int use_quant_matrix;
+ int sign;
+
+ assert(w->orient<12);
+ s->dsp.clear_block(s->block[0]);
+
+ if(chroma){
+ dc_mode=2;
+ }else{
+ dc_mode=!!w->est_run;//0,1
+ }
+
+ if(x8_get_dc_rlf(w, dc_mode, &dc_level, &final)) return -1;
+ n=0;
+ zeros_only=0;
+ if(!final){//decode ac
+ use_quant_matrix=w->use_quant_matrix;
+ if(chroma){
+ ac_mode = 1;
+ est_run = 64;//not used
+ }else{
+ if (w->raw_orient < 3){
+ use_quant_matrix = 0;
+ }
+ if(w->raw_orient > 4){
+ ac_mode = 0;
+ est_run = 64;
+ }else{
+ if(w->est_run > 1){
+ ac_mode = 2;
+ est_run=w->est_run;
+ }else{
+ ac_mode = 3;
+ est_run = 64;
+ }
+ }
+ }
+ x8_select_ac_table(w,ac_mode);
+ /*scantable_selector[12]={0,2,0,1,1,1,0,2,2,0,1,2};<-
+ -> 10'01' 00'10' 10'00' 01'01' 01'00' 10'00 =>0x928548 */
+ scantable = w->scantable[ (0x928548>>(2*w->orient))&3 ].permutated;
+ pos=0;
+ do {
+ n++;
+ if( n >= est_run ){
+ ac_mode=3;
+ x8_select_ac_table(w,3);
+ }
+
+ x8_get_ac_rlf(w,ac_mode,&run,&level,&final);
+
+ pos+=run+1;
+ if(pos>63){
+ //this also handles vlc error in x8_get_ac_rlf
+ return -1;
+ }
+ level= (level+1) * w->dquant;
+ level+= w->qsum;
+
+ sign = - get_bits1(&s->gb);
+ level = (level ^ sign) - sign;
+
+ if(use_quant_matrix){
+ level = (level*quant_table[pos])>>8;
+ }
+ s->block[0][ scantable[pos] ]=level;
+ }while(!final);
+
+ s->block_last_index[0]=pos;
+ }else{//DC only
+ s->block_last_index[0]=0;
+ if(w->flat_dc && ((unsigned)(dc_level+1)) < 3){//[-1;1]
+ int32_t divide_quant= !chroma ? w->divide_quant_dc_luma:
+ w->divide_quant_dc_chroma;
+ int32_t dc_quant = !chroma ? w->quant:
+ w->quant_dc_chroma;
+
+ //original intent dc_level+=predicted_dc/quant; but it got lost somewhere in the rounding
+ dc_level+= (w->predicted_dc*divide_quant + (1<<12) )>>13;
+
+ dsp_x8_put_solidcolor( av_clip_uint8((dc_level*dc_quant+4)>>3),
+ s->dest[chroma], s->current_picture.linesize[!!chroma]);
+
+ goto block_placed;
+ }
+ zeros_only = (dc_level == 0);
+ }
+ if(!chroma){
+ s->block[0][0] = dc_level*w->quant;
+ }else{
+ s->block[0][0] = dc_level*w->quant_dc_chroma;
+ }
+
+ //there is !zero_only check in the original, but dc_level check is enough
+ if( (unsigned int)(dc_level+1) >= 3 && (w->edges&3) != 3 ){
+ int direction;
+ /*ac_comp_direction[orient] = { 0, 3, 3, 1, 1, 0, 0, 0, 2, 2, 2, 1 };<-
+ -> 01'10' 10'10' 00'00' 00'01' 01'11' 11'00 =>0x6A017C */
+ direction= (0x6A017C>>(w->orient*2))&3;
+ if (direction != 3){
+ x8_ac_compensation(w, direction, s->block[0][0]);//modify block_last[]
+ }
+ }
+
+ if(w->flat_dc){
+ dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.linesize[!!chroma]);
+ }else{
+ s->dsp.x8_spatial_compensation[w->orient]( s->edge_emu_buffer,
+ s->dest[chroma],
+ s->current_picture.linesize[!!chroma] );
+ }
+ if(!zeros_only)
+ s->dsp.idct_add ( s->dest[chroma],
+ s->current_picture.linesize[!!chroma],
+ s->block[0] );
+
+block_placed:
+
+ if(!chroma){
+ x8_update_predictions(w,w->orient,n);
+ }
+
+ if(s->loop_filter){
+ uint8_t* ptr = s->dest[chroma];
+ int linesize = s->current_picture.linesize[!!chroma];
+
+ if(!( (w->edges&2) || ( zeros_only && (w->orient|4)==4 ) )){
+ s->dsp.x8_h_loop_filter(ptr, linesize, w->quant);
+ }
+ if(!( (w->edges&1) || ( zeros_only && (w->orient|8)==8 ) )){
+ s->dsp.x8_v_loop_filter(ptr, linesize, w->quant);
+ }
+ }
+ return 0;
+}
+
+static void x8_init_block_index(MpegEncContext *s){ //FIXME maybe merge with ff_*
+//not s->linesize as this would be wrong for field pics
+//not that IntraX8 has interlacing support ;)
+ const int linesize = s->current_picture.linesize[0];
+ const int uvlinesize= s->current_picture.linesize[1];
+
+ s->dest[0] = s->current_picture.data[0];
+ s->dest[1] = s->current_picture.data[1];
+ s->dest[2] = s->current_picture.data[2];
+
+ s->dest[0] += s->mb_y * linesize << 3;
+ s->dest[1] += ( s->mb_y&(~1) ) * uvlinesize << 2;//chroma blocks are on add rows
+ s->dest[2] += ( s->mb_y&(~1) ) * uvlinesize << 2;
+}
+
+/**
+ * Initialize IntraX8 frame decoder.
+ * Requires valid MpegEncContext with valid s->mb_width before calling.
+ * @param w pointer to IntraX8Context
+ * @param s pointer to MpegEncContext of the parent codec
+ */
+av_cold void ff_intrax8_common_init(IntraX8Context * w, MpegEncContext * const s){
+
+ w->s=s;
+ x8_vlc_init();
+ assert(s->mb_width>0);
+ w->prediction_table=av_mallocz(s->mb_width*2*2);//two rows, 2 blocks per cannon mb
+
+ ff_init_scantable(s->dsp.idct_permutation, &w->scantable[0], wmv1_scantable[0]);
+ ff_init_scantable(s->dsp.idct_permutation, &w->scantable[1], wmv1_scantable[2]);
+ ff_init_scantable(s->dsp.idct_permutation, &w->scantable[2], wmv1_scantable[3]);
+}
+
+/**
+ * Destroy IntraX8 frame structure.
+ * @param w pointer to IntraX8Context
+ */
+av_cold void ff_intrax8_common_end(IntraX8Context * w)
+{
+ av_freep(&w->prediction_table);
+}
+
+/**
+ * Decode single IntraX8 frame.
+ * The parent codec must fill s->loopfilter and s->gb (bitstream).
+ * The parent codec must call MPV_frame_start(), ff_er_frame_start() before calling this function.
+ * The parent codec must call ff_er_frame_end(), MPV_frame_end() after calling this function.
+ * This function does not use MPV_decode_mb().
+ * lowres decoding is theoretically impossible.
+ * @param w pointer to IntraX8Context
+ * @param dquant doubled quantizer, it would be odd in case of VC-1 halfpq==1.
+ * @param quant_offset offset away from zero
+ */
+//FIXME extern uint8_t wmv3_dc_scale_table[32];
+int ff_intrax8_decode_picture(IntraX8Context * const w, int dquant, int quant_offset){
+ MpegEncContext * const s= w->s;
+ int mb_xy;
+ assert(s);
+ w->use_quant_matrix = get_bits1(&s->gb);
+
+ w->dquant = dquant;
+ w->quant = dquant >> 1;
+ w->qsum = quant_offset;
+
+ w->divide_quant_dc_luma = ((1<<16) + (w->quant>>1)) / w->quant;
+ if(w->quant < 5){
+ w->quant_dc_chroma = w->quant;
+ w->divide_quant_dc_chroma = w->divide_quant_dc_luma;
+ }else{
+ w->quant_dc_chroma = w->quant+((w->quant+3)>>3);
+ w->divide_quant_dc_chroma = ((1<<16) + (w->quant_dc_chroma>>1)) / w->quant_dc_chroma;
+ }
+ x8_reset_vlc_tables(w);
+
+ s->resync_mb_x=0;
+ s->resync_mb_y=0;
+
+ for(s->mb_y=0; s->mb_y < s->mb_height*2; s->mb_y++){
+ x8_init_block_index(s);
+ mb_xy=(s->mb_y>>1)*s->mb_stride;
+
+ for(s->mb_x=0; s->mb_x < s->mb_width*2; s->mb_x++){
+ x8_get_prediction(w);
+ if(x8_setup_spatial_predictor(w,0)) goto error;
+ if(x8_decode_intra_mb(w,0)) goto error;
+
+ if( s->mb_x & s->mb_y & 1 ){
+ x8_get_prediction_chroma(w);
+
+ /*when setting up chroma, no vlc is read,
+ so no error condition can be reached*/
+ x8_setup_spatial_predictor(w,1);
+ if(x8_decode_intra_mb(w,1)) goto error;
+
+ x8_setup_spatial_predictor(w,2);
+ if(x8_decode_intra_mb(w,2)) goto error;
+
+ s->dest[1]+= 8;
+ s->dest[2]+= 8;
+
+ /*emulate MB info in the relevant tables*/
+ s->mbskip_table [mb_xy]=0;
+ s->mbintra_table[mb_xy]=1;
+ s->current_picture.qscale_table[mb_xy]=w->quant;
+ mb_xy++;
+ }
+ s->dest[0]+= 8;
+ }
+ if(s->mb_y&1){
+ ff_draw_horiz_band(s, (s->mb_y-1)*8, 16);
+ }
+ }
+
+error:
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y,
+ (s->mb_x>>1)-1, (s->mb_y>>1)-1,
+ (AC_END|DC_END|MV_END) );
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8.h
new file mode 100644
index 000000000..8ce4f8d09
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8.h
@@ -0,0 +1,57 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_INTRAX8_H
+#define AVCODEC_INTRAX8_H
+
+#include "get_bits.h"
+#include "mpegvideo.h"
+
+typedef struct{
+ VLC * j_ac_vlc[4];//they point to the static j_mb_vlc
+ VLC * j_orient_vlc;
+ VLC * j_dc_vlc[3];
+
+ int use_quant_matrix;
+//set by ff_intrax8_common_init
+ uint8_t * prediction_table;//2*(mb_w*2)
+ ScanTable scantable[3];
+//set by the caller codec
+ MpegEncContext * s;
+ int quant;
+ int dquant;
+ int qsum;
+//calculated per frame
+ int quant_dc_chroma;
+ int divide_quant_dc_luma;
+ int divide_quant_dc_chroma;
+//changed per block
+ int edges;
+ int flat_dc;
+ int predicted_dc;
+ int raw_orient;
+ int chroma_orient;
+ int orient;
+ int est_run;
+} IntraX8Context;
+
+void ff_intrax8_common_init(IntraX8Context * w, MpegEncContext * const s);
+void ff_intrax8_common_end(IntraX8Context * w);
+int ff_intrax8_decode_picture(IntraX8Context * w, int quant, int halfpq);
+
+#endif /* AVCODEC_INTRAX8_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8dsp.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8dsp.c
new file mode 100644
index 000000000..05d6f66fb
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8dsp.c
@@ -0,0 +1,432 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+* @file libavcodec/intrax8dsp.c
+ *@brief IntraX8 frame subdecoder image manipulation routines
+ */
+
+#include "dsputil.h"
+
+/*
+area positions, #3 is 1 pixel only, other are 8 pixels
+ |66666666|
+ 3|44444444|55555555|
+- -+--------+--------+
+1 2|XXXXXXXX|
+1 2|XXXXXXXX|
+1 2|XXXXXXXX|
+1 2|XXXXXXXX|
+1 2|XXXXXXXX|
+1 2|XXXXXXXX|
+1 2|XXXXXXXX|
+1 2|XXXXXXXX|
+^-start
+*/
+
+#define area1 (0)
+#define area2 (8)
+#define area3 (8+8)
+#define area4 (8+8+1)
+#define area5 (8+8+1+8)
+#define area6 (8+8+1+16)
+
+/**
+ Collect statistics and prepare the edge pixels required by the other spatial compensation functions.
+
+ * @param src pointer to the beginning of the processed block
+ * @param dst pointer to emu_edge, edge pixels are stored the way other compensation routines do.
+ * @param linesize byte offset between 2 vertical pixels in the source image
+ * @param range pointer to the variable where the edge pixel range is to be stored (max-min values)
+ * @param psum pointer to the variable where the edge pixel sum is to be stored
+ * @param edges Informs this routine that the block is on an image border, so it has to interpolate the missing edge pixels.
+ and some of the edge pixels should be interpolated, the flag has the following meaning:
+ 1 - mb_x==0 - first block in the row, interpolate area #1,#2,#3;
+ 2 - mb_y==0 - first row, interpolate area #3,#4,#5,#6;
+ note: 1|2 - mb_x==mb_y==0 - first block, use 0x80 value for all areas;
+ 4 - mb_x>= (mb_width-1) last block in the row, interpolate area #5;
+*/
+static void x8_setup_spatial_compensation(uint8_t *src, uint8_t *dst, int linesize,
+ int * range, int * psum, int edges){
+ uint8_t * ptr;
+ int sum;
+ int i;
+ int min_pix,max_pix;
+ uint8_t c;
+
+ if((edges&3)==3){
+ *psum=0x80*(8+1+8+2);
+ *range=0;
+ memset(dst,0x80,16+1+16+8);
+ //this triggers flat_dc for sure.
+ //flat_dc avoids all (other) prediction modes, but requires dc_level decoding.
+ return;
+ }
+
+ min_pix=256;
+ max_pix=-1;
+
+ sum=0;
+
+ if(!(edges&1)){//(mb_x!=0)//there is previous block on this row
+ ptr=src-1;//left column, area 2
+ for(i=7;i>=0;i--){
+ c=*(ptr-1);//area1, same mb as area2, no need to check
+ dst[area1+i]=c;
+ c=*(ptr);
+
+ sum+=c;
+ min_pix=FFMIN(min_pix,c);
+ max_pix=FFMAX(max_pix,c);
+ dst[area2+i]=c;
+
+ ptr+=linesize;
+ }
+ }
+
+ if(!(edges&2)){ //(mb_y!=0)//there is row above
+ ptr=src-linesize;//top line
+ for(i=0;i<8;i++){
+ c=*(ptr+i);
+ sum+=c;
+ min_pix=FFMIN(min_pix, c);
+ max_pix=FFMAX(max_pix, c);
+ }
+ if(edges&4){//last block on the row?
+ memset(dst+area5,c,8);//set with last pixel fr
+ memcpy(dst+area4, ptr, 8);
+ }else{
+ memcpy(dst+area4, ptr, 16);//both area4 and 5
+ }
+ memcpy(dst+area6, ptr-linesize, 8);//area6 always present in the above block
+ }
+ //now calculate the stuff we need
+ if(edges&3){//mb_x==0 || mb_y==0){
+ int avg=(sum+4)>>3;
+ if(edges&1){ //(mb_x==0) {//implies mb_y!=0
+ memset(dst+area1,avg,8+8+1);//areas 1,2 and 3 are averaged
+ }else{//implies y==0 x!=0
+ memset(dst+area3,avg, 1+16+8);//areas 3, 4,5,6
+ }
+ sum+=avg*9;
+ }else{
+ uint8_t c=*(src-1-linesize);//the edge pixel, in the top line and left column
+ dst[area3]=c;
+ sum+=c;
+ //edge pixel is not part of min/max
+ }
+ (*range) = max_pix - min_pix;
+ sum += *(dst+area5) + *(dst+area5+1);
+ *psum = sum;
+}
+
+
+static const uint16_t zero_prediction_weights[64*2] = {
+ 640, 640, 669, 480, 708, 354, 748, 257, 792, 198, 760, 143, 808, 101, 772, 72,
+ 480, 669, 537, 537, 598, 416, 661, 316, 719, 250, 707, 185, 768, 134, 745, 97,
+ 354, 708, 416, 598, 488, 488, 564, 388, 634, 317, 642, 241, 716, 179, 706, 132,
+ 257, 748, 316, 661, 388, 564, 469, 469, 543, 395, 571, 311, 655, 238, 660, 180,
+ 198, 792, 250, 719, 317, 634, 395, 543, 469, 469, 507, 380, 597, 299, 616, 231,
+ 161, 855, 206, 788, 266, 710, 340, 623, 411, 548, 455, 455, 548, 366, 576, 288,
+ 122, 972, 159, 914, 211, 842, 276, 758, 341, 682, 389, 584, 483, 483, 520, 390,
+ 110, 1172, 144, 1107, 193, 1028, 254, 932, 317, 846, 366, 731, 458, 611, 499, 499
+};
+
+static void spatial_compensation_0(uint8_t *src , uint8_t *dst, int linesize){
+ int i,j;
+ int x,y;
+ unsigned int p;//power divided by 2
+ int a;
+ uint16_t left_sum[2][8];
+ uint16_t top_sum[2][8];
+ memset(left_sum,0,2*8*sizeof(uint16_t));
+ memset( top_sum,0,2*8*sizeof(uint16_t));
+
+ for(i=0;i<8;i++){
+ a=src[area2+7-i]<<4;
+ for(j=0;j<8;j++){
+ p=abs(i-j);
+ left_sum[p&1][j]+= a>>(p>>1);
+ }
+ }
+
+ for(i=0;i<8;i++){
+ a=src[area4+i]<<4;
+ for(j=0;j<8;j++){
+ p=abs(i-j);
+ top_sum[p&1][j]+= a>>(p>>1);
+ }
+ }
+ for(;i<10;i++){
+ a=src[area4+i]<<4;
+ for(j=5;j<8;j++){
+ p=abs(i-j);
+ top_sum[p&1][j]+= a>>(p>>1);
+ }
+ }
+ for(;i<12;i++){
+ a=src[area4+i]<<4;
+ for(j=7;j<8;j++){
+ p=abs(i-j);
+ top_sum[p&1][j]+= a>>(p>>1);
+ }
+ }
+
+ for(i=0;i<8;i++){
+ top_sum [0][i]+=(top_sum [1][i]*181 + 128 )>>8;//181 is sqrt(2)/2
+ left_sum[0][i]+=(left_sum[1][i]*181 + 128 )>>8;
+ }
+ for(y=0;y<8;y++){
+ for(x=0;x<8;x++){
+ dst[x] = (
+ (uint32_t)top_sum [0][x]*zero_prediction_weights[y*16+x*2+0] +
+ (uint32_t)left_sum[0][y]*zero_prediction_weights[y*16+x*2+1] +
+ 0x8000
+ )>>16;
+ }
+ dst+=linesize;
+ }
+}
+static void spatial_compensation_1(uint8_t *src , uint8_t *dst, int linesize){
+ int x,y;
+
+ for(y=0;y<8;y++){
+ for(x=0;x<8;x++){
+ dst[x]=src[area4 + FFMIN(2*y+x+2, 15) ];
+ }
+ dst+=linesize;
+ }
+}
+static void spatial_compensation_2(uint8_t *src , uint8_t *dst, int linesize){
+ int x,y;
+
+ for(y=0;y<8;y++){
+ for(x=0;x<8;x++){
+ dst[x]=src[area4 +1+y+x];
+ }
+ dst+=linesize;
+ }
+}
+static void spatial_compensation_3(uint8_t *src , uint8_t *dst, int linesize){
+ int x,y;
+
+ for(y=0;y<8;y++){
+ for(x=0;x<8;x++){
+ dst[x]=src[area4 +((y+1)>>1)+x];
+ }
+ dst+=linesize;
+ }
+}
+static void spatial_compensation_4(uint8_t *src , uint8_t *dst, int linesize){
+ int x,y;
+
+ for(y=0;y<8;y++){
+ for(x=0;x<8;x++){
+ dst[x]=( src[area4+x] + src[area6+x] + 1 )>>1;
+ }
+ dst+=linesize;
+ }
+}
+static void spatial_compensation_5(uint8_t *src , uint8_t *dst, int linesize){
+ int x,y;
+
+ for(y=0;y<8;y++){
+ for(x=0;x<8;x++){
+ if(2*x-y<0){
+ dst[x]=src[area2+9+2*x-y];
+ }else{
+ dst[x]=src[area4 +x-((y+1)>>1)];
+ }
+ }
+ dst+=linesize;
+ }
+}
+static void spatial_compensation_6(uint8_t *src , uint8_t *dst, int linesize){
+ int x,y;
+
+ for(y=0;y<8;y++){
+ for(x=0;x<8;x++){
+ dst[x]=src[area3+x-y];
+ }
+ dst+=linesize;
+ }
+}
+static void spatial_compensation_7(uint8_t *src , uint8_t *dst, int linesize){
+ int x,y;
+
+ for(y=0;y<8;y++){
+ for(x=0;x<8;x++){
+ if(x-2*y>0){
+ dst[x]=( src[area3-1+x-2*y] + src[area3+x-2*y] + 1)>>1;
+ }else{
+ dst[x]=src[area2+8-y +(x>>1)];
+ }
+ }
+ dst+=linesize;
+ }
+}
+static void spatial_compensation_8(uint8_t *src , uint8_t *dst, int linesize){
+ int x,y;
+
+ for(y=0;y<8;y++){
+ for(x=0;x<8;x++){
+ dst[x]=( src[area1+7-y] + src[area2+7-y] + 1 )>>1;
+ }
+ dst+=linesize;
+ }
+}
+static void spatial_compensation_9(uint8_t *src , uint8_t *dst, int linesize){
+ int x,y;
+
+ for(y=0;y<8;y++){
+ for(x=0;x<8;x++){
+ dst[x]=src[area2+6-FFMIN(x+y,6)];
+ }
+ dst+=linesize;
+ }
+}
+static void spatial_compensation_10(uint8_t *src , uint8_t *dst, int linesize){
+ int x,y;
+
+ for(y=0;y<8;y++){
+ for(x=0;x<8;x++){
+ dst[x]=(src[area2+7-y]*(8-x)+src[area4+x]*x+4)>>3;
+ }
+ dst+=linesize;
+ }
+}
+static void spatial_compensation_11(uint8_t *src , uint8_t *dst, int linesize){
+ int x,y;
+
+ for(y=0;y<8;y++){
+ for(x=0;x<8;x++){
+ dst[x]=(src[area2+7-y]*y+src[area4+x]*(8-y)+4)>>3;
+ }
+ dst+=linesize;
+ }
+}
+
+static void x8_loop_filter(uint8_t * ptr, const int a_stride, const int b_stride, int quant){
+ int i,t;
+ int p0,p1,p2,p3,p4,p5,p6,p7,p8,p9;
+ int ql=(quant+10)>>3;
+
+ for(i=0; i<8; i++,ptr+=b_stride){
+ p0=ptr[-5*a_stride];
+ p1=ptr[-4*a_stride];
+ p2=ptr[-3*a_stride];
+ p3=ptr[-2*a_stride];
+ p4=ptr[-1*a_stride];
+ p5=ptr[ 0 ];
+ p6=ptr[ 1*a_stride];
+ p7=ptr[ 2*a_stride];
+ p8=ptr[ 3*a_stride];
+ p9=ptr[ 4*a_stride];
+
+ t=
+ (FFABS(p1-p2) <= ql) +
+ (FFABS(p2-p3) <= ql) +
+ (FFABS(p3-p4) <= ql) +
+ (FFABS(p4-p5) <= ql);
+ if(t>0){//You need at least 1 to be able to reach a total score of 6.
+ t+=
+ (FFABS(p5-p6) <= ql) +
+ (FFABS(p6-p7) <= ql) +
+ (FFABS(p7-p8) <= ql) +
+ (FFABS(p8-p9) <= ql) +
+ (FFABS(p0-p1) <= ql);
+ if(t>=6){
+ int min,max;
+
+ min=max=p1;
+ min=FFMIN(min,p3); max=FFMAX(max,p3);
+ min=FFMIN(min,p5); max=FFMAX(max,p5);
+ min=FFMIN(min,p8); max=FFMAX(max,p8);
+ if(max-min<2*quant){//early stop
+ min=FFMIN(min,p2); max=FFMAX(max,p2);
+ min=FFMIN(min,p4); max=FFMAX(max,p4);
+ min=FFMIN(min,p6); max=FFMAX(max,p6);
+ min=FFMIN(min,p7); max=FFMAX(max,p7);
+ if(max-min<2*quant){
+ ptr[-2*a_stride]=(4*p2 + 3*p3 + 1*p7 + 4)>>3;
+ ptr[-1*a_stride]=(3*p2 + 3*p4 + 2*p7 + 4)>>3;
+ ptr[ 0 ]=(2*p2 + 3*p5 + 3*p7 + 4)>>3;
+ ptr[ 1*a_stride]=(1*p2 + 3*p6 + 4*p7 + 4)>>3;
+ continue;
+ };
+ }
+ }
+ }
+ {
+ int x,x0,x1,x2;
+ int m;
+
+ x0 = (2*p3 - 5*p4 + 5*p5 - 2*p6 + 4)>>3;
+ if(FFABS(x0) < quant){
+ x1=(2*p1 - 5*p2 + 5*p3 - 2*p4 + 4)>>3;
+ x2=(2*p5 - 5*p6 + 5*p7 - 2*p8 + 4)>>3;
+
+ x=FFABS(x0) - FFMIN( FFABS(x1), FFABS(x2) );
+ m=p4-p5;
+
+ if( x > 0 && (m^x0) <0){
+ int32_t sign;
+
+ sign=m>>31;
+ m=(m^sign)-sign;//abs(m)
+ m>>=1;
+
+ x=(5*x)>>3;
+
+ if(x>m) x=m;
+
+ x=(x^sign)-sign;
+
+ ptr[-1*a_stride] -= x;
+ ptr[ 0] += x;
+ }
+ }
+ }
+ }
+}
+
+static void x8_h_loop_filter(uint8_t *src, int stride, int qscale){
+ x8_loop_filter(src, stride, 1, qscale);
+}
+
+static void x8_v_loop_filter(uint8_t *src, int stride, int qscale){
+ x8_loop_filter(src, 1, stride, qscale);
+}
+
+av_cold void ff_intrax8dsp_init(DSPContext* dsp, AVCodecContext *avctx) {
+ dsp->x8_h_loop_filter=x8_h_loop_filter;
+ dsp->x8_v_loop_filter=x8_v_loop_filter;
+ dsp->x8_setup_spatial_compensation=x8_setup_spatial_compensation;
+ dsp->x8_spatial_compensation[0]=spatial_compensation_0;
+ dsp->x8_spatial_compensation[1]=spatial_compensation_1;
+ dsp->x8_spatial_compensation[2]=spatial_compensation_2;
+ dsp->x8_spatial_compensation[3]=spatial_compensation_3;
+ dsp->x8_spatial_compensation[4]=spatial_compensation_4;
+ dsp->x8_spatial_compensation[5]=spatial_compensation_5;
+ dsp->x8_spatial_compensation[6]=spatial_compensation_6;
+ dsp->x8_spatial_compensation[7]=spatial_compensation_7;
+ dsp->x8_spatial_compensation[8]=spatial_compensation_8;
+ dsp->x8_spatial_compensation[9]=spatial_compensation_9;
+ dsp->x8_spatial_compensation[10]=spatial_compensation_10;
+ dsp->x8_spatial_compensation[11]=spatial_compensation_11;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8huf.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8huf.h
new file mode 100644
index 000000000..375906bab
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/intrax8huf.h
@@ -0,0 +1,918 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_INTRAX8HUF_H
+#define AVCODEC_INTRAX8HUF_H
+
+#include <inttypes.h>
+
+
+static const uint16_t x8_orient_lowquant_table[4][12][2]={
+ {//0
+ {0x0000, 1}, {0x0004, 3}, {0x0005, 3}, {0x000C, 4},
+ {0x000D, 4}, {0x0038, 6}, {0x001D, 5}, {0x0039, 6},
+ {0x003C, 6}, {0x003D, 6}, {0x003E, 6}, {0x003F, 6},
+ },{//1
+ {0x0000, 5}, {0x0001, 5}, {0x0002, 5}, {0x0001, 2},
+ {0x0002, 2}, {0x0002, 4}, {0x0003, 5}, {0x0006, 3},
+ {0x0003, 4}, {0x000E, 4}, {0x001E, 5}, {0x001F, 5},
+ },{//2
+ {0x0000, 2}, {0x0001, 2}, {0x0004, 3}, {0x0005, 3},
+ {0x0006, 3}, {0x0038, 6}, {0x0039, 6}, {0x001D, 5},
+ {0x003C, 6}, {0x003D, 6}, {0x003E, 6}, {0x003F, 6},
+ },{//3
+ {0x0000, 3}, {0x0002, 4}, {0x0003, 4}, {0x0001, 2},
+ {0x0002, 2}, {0x0018, 5}, {0x0019, 5}, {0x000D, 4},
+ {0x001C, 5}, {0x001D, 5}, {0x001E, 5}, {0x001F, 5},
+ }
+};
+
+static const uint16_t x8_orient_highquant_table[2][12][2]={
+ {//0
+ {0x0000, 2}, {0x0001, 2}, {0x0004, 3}, {0x0005, 3},
+ {0x0006, 3}, {0x0038, 6}, {0x001D, 5}, {0x0039, 6},
+ {0x003C, 6}, {0x003D, 6}, {0x003E, 6}, {0x003F, 6},
+ },{//1
+ {0x0000, 1}, {0x0002, 2}, {0x0006, 3}, {0x001C, 5},
+ {0x001D, 5}, {0x0078, 7}, {0x003D, 6}, {0x0079, 7},
+ {0x007C, 7}, {0x007D, 7}, {0x007E, 7}, {0x007F, 7},
+ }
+};
+#define MAX_OR_VLC_BITS 7
+
+
+static const uint16_t x8_dc_lowquant_table[8][34][2]={
+ {//0
+ {0x0000, 5}, {0x0001, 4}, {0x0001, 5}, {0x0004, 5},
+ {0x0005, 5}, {0x0006, 5}, {0x000E, 6}, {0x000F, 6},
+ {0x0040, 8}, {0x0041, 8}, {0x0840, 13}, {0x0841, 13},
+ {0x0842, 13}, {0x0843, 13}, {0x0844, 13}, {0x0845, 13},
+ {0x0846, 13}, {0x0002, 2}, {0x0003, 2}, {0x0003, 3},
+ {0x0005, 4}, {0x0009, 5}, {0x0011, 6}, {0x0043, 8},
+ {0x0085, 9}, {0x0847, 13}, {0x0848, 13}, {0x0849, 13},
+ {0x084A, 13}, {0x084B, 13}, {0x084C, 13}, {0x084D, 13},
+ {0x084E, 13}, {0x084F, 13},
+ },{//1
+ {0x0000, 4}, {0x0001, 3}, {0x0002, 3}, {0x0001, 4},
+ {0x0006, 4}, {0x0004, 3}, {0x0007, 4}, {0x0005, 3},
+ {0x000C, 4}, {0x000D, 4}, {0x001C, 5}, {0x003A, 6},
+ {0x01D8, 9}, {0x01D9, 9}, {0x1DA0, 13}, {0x1DA1, 13},
+ {0x1DA2, 13}, {0x003C, 6}, {0x003D, 6}, {0x003E, 6},
+ {0x0077, 7}, {0x01DB, 9}, {0x007E, 7}, {0x00FE, 8},
+ {0x01FE, 9}, {0x1DA3, 13}, {0x1DA4, 13}, {0x1DA5, 13},
+ {0x0ED3, 12}, {0x0ED4, 12}, {0x01FF, 9}, {0x0ED5, 12},
+ {0x0ED6, 12}, {0x0ED7, 12},
+ },{//2
+ {0x0000, 4}, {0x0001, 3}, {0x0002, 3}, {0x0001, 4},
+ {0x0006, 4}, {0x0007, 4}, {0x0008, 4}, {0x0009, 4},
+ {0x0028, 6}, {0x0029, 6}, {0x0054, 7}, {0x0055, 7},
+ {0x0AC0, 12}, {0x0AC1, 12}, {0x0AC2, 12}, {0x0AC3, 12},
+ {0x0AC4, 12}, {0x000B, 4}, {0x0006, 3}, {0x000E, 4},
+ {0x001E, 5}, {0x003E, 6}, {0x003F, 6}, {0x0057, 7},
+ {0x00AD, 8}, {0x0AC5, 12}, {0x0AC6, 12}, {0x0AC7, 12},
+ {0x0AC8, 12}, {0x0AC9, 12}, {0x0ACA, 12}, {0x0ACB, 12},
+ {0x0566, 11}, {0x0567, 11},
+ },{//3
+ {0x0000, 4}, {0x0001, 2}, {0x0001, 3}, {0x0004, 3},
+ {0x0005, 3}, {0x0006, 3}, {0x0001, 4}, {0x000E, 4},
+ {0x003C, 6}, {0x003D, 6}, {0x007C, 7}, {0x00FA, 8},
+ {0x3EC0, 14}, {0x3EC1, 14}, {0x3EC2, 14}, {0x3EC3, 14},
+ {0x1F62, 13}, {0x01F7, 9}, {0x007E, 7}, {0x00FE, 8},
+ {0x00FF, 8}, {0x1F63, 13}, {0x1F64, 13}, {0x1F65, 13},
+ {0x1F66, 13}, {0x1F67, 13}, {0x1F68, 13}, {0x1F69, 13},
+ {0x1F6A, 13}, {0x1F6B, 13}, {0x1F6C, 13}, {0x1F6D, 13},
+ {0x1F6E, 13}, {0x1F6F, 13},
+ },{//4
+ {0x0000, 7}, {0x0001, 7}, {0x0002, 7}, {0x0003, 7},
+ {0x0004, 7}, {0x0005, 7}, {0x0006, 7}, {0x0007, 7},
+ {0x0008, 7}, {0x0009, 7}, {0x000A, 7}, {0x000B, 7},
+ {0x000C, 7}, {0x000D, 7}, {0x000E, 7}, {0x000F, 7},
+ {0x0010, 7}, {0x0001, 1}, {0x0001, 2}, {0x0011, 7},
+ {0x0012, 7}, {0x0013, 7}, {0x0014, 7}, {0x0015, 7},
+ {0x0016, 7}, {0x0017, 7}, {0x0018, 7}, {0x0019, 7},
+ {0x001A, 7}, {0x001B, 7}, {0x001C, 7}, {0x001D, 7},
+ {0x001E, 7}, {0x001F, 7},
+ },{//5
+ {0x0000, 5}, {0x0001, 4}, {0x0001, 5}, {0x0008, 6},
+ {0x0009, 6}, {0x000A, 6}, {0x0016, 7}, {0x000C, 6},
+ {0x0017, 7}, {0x000D, 6}, {0x0038, 8}, {0x001D, 7},
+ {0x0039, 8}, {0x0780, 13}, {0x0781, 13}, {0x0782, 13},
+ {0x0783, 13}, {0x0002, 3}, {0x0001, 1}, {0x0003, 3},
+ {0x001F, 7}, {0x003D, 8}, {0x0079, 9}, {0x0784, 13},
+ {0x0785, 13}, {0x0786, 13}, {0x0787, 13}, {0x0788, 13},
+ {0x0789, 13}, {0x078A, 13}, {0x078B, 13}, {0x078C, 13},
+ {0x078D, 13}, {0x03C7, 12},
+ },{//6
+ {0x0000, 4}, {0x0001, 2}, {0x0001, 3}, {0x0004, 3},
+ {0x0001, 4}, {0x000A, 4}, {0x0016, 5}, {0x002E, 6},
+ {0x005E, 7}, {0x005F, 7}, {0x00C0, 8}, {0x3040, 14},
+ {0x3041, 14}, {0x0305, 10}, {0x0183, 9}, {0x3042, 14},
+ {0x3043, 14}, {0x000D, 4}, {0x0007, 3}, {0x0019, 5},
+ {0x0031, 6}, {0x00C2, 8}, {0x00C3, 8}, {0x3044, 14},
+ {0x3045, 14}, {0x3046, 14}, {0x3047, 14}, {0x3048, 14},
+ {0x3049, 14}, {0x304A, 14}, {0x304B, 14}, {0x304C, 14},
+ {0x304D, 14}, {0x1827, 13},
+ },{//7
+ {0x0000, 6}, {0x0001, 6}, {0x0002, 6}, {0x0006, 7},
+ {0x0007, 7}, {0x0004, 6}, {0x0005, 6}, {0x0006, 6},
+ {0x000E, 7}, {0x001E, 8}, {0x001F, 8}, {0x0040, 9},
+ {0x0082, 10}, {0x0830, 14}, {0x0831, 14}, {0x0832, 14},
+ {0x0833, 14}, {0x0001, 1}, {0x0001, 2}, {0x0003, 4},
+ {0x0005, 5}, {0x0009, 6}, {0x0011, 7}, {0x0021, 8},
+ {0x0834, 14}, {0x0835, 14}, {0x0836, 14}, {0x0837, 14},
+ {0x0838, 14}, {0x0839, 14}, {0x083A, 14}, {0x083B, 14},
+ {0x041E, 13}, {0x041F, 13},
+ }
+};
+
+static const uint16_t x8_dc_highquant_table[8][34][2]={
+ {//0
+ {0x0000, 5}, {0x0001, 4}, {0x0002, 4}, {0x0001, 5},
+ {0x0006, 5}, {0x0004, 4}, {0x0007, 5}, {0x000A, 5},
+ {0x002C, 7}, {0x002D, 7}, {0x05C0, 12}, {0x05C1, 12},
+ {0x05C2, 12}, {0x05C3, 12}, {0x05C4, 12}, {0x05C5, 12},
+ {0x05C6, 12}, {0x0003, 3}, {0x0002, 2}, {0x0006, 3},
+ {0x000E, 4}, {0x001E, 5}, {0x001F, 5}, {0x002F, 7},
+ {0x005D, 8}, {0x05C7, 12}, {0x05C8, 12}, {0x05C9, 12},
+ {0x05CA, 12}, {0x05CB, 12}, {0x05CC, 12}, {0x05CD, 12},
+ {0x05CE, 12}, {0x05CF, 12},
+ },{//1
+ {0x0000, 3}, {0x0001, 3}, {0x0002, 3}, {0x0006, 4},
+ {0x0007, 4}, {0x0004, 3}, {0x000A, 4}, {0x000B, 4},
+ {0x0030, 6}, {0x0062, 7}, {0x0063, 7}, {0x0640, 11},
+ {0x0641, 11}, {0x0642, 11}, {0x0643, 11}, {0x0644, 11},
+ {0x0645, 11}, {0x0033, 6}, {0x000D, 4}, {0x001C, 5},
+ {0x001D, 5}, {0x003C, 6}, {0x001F, 5}, {0x0065, 7},
+ {0x007A, 7}, {0x0646, 11}, {0x007B, 7}, {0x0647, 11},
+ {0x0648, 11}, {0x0649, 11}, {0x064A, 11}, {0x064B, 11},
+ {0x0326, 10}, {0x0327, 10},
+ },{//2
+ {0x0000, 7}, {0x0001, 7}, {0x0001, 6}, {0x0004, 7},
+ {0x0003, 6}, {0x0005, 7}, {0x0010, 8}, {0x0011, 8},
+ {0x0240, 13}, {0x0241, 13}, {0x0242, 13}, {0x0243, 13},
+ {0x0244, 13}, {0x0245, 13}, {0x0246, 13}, {0x0247, 13},
+ {0x0124, 12}, {0x0001, 1}, {0x0001, 2}, {0x0001, 3},
+ {0x0003, 5}, {0x0005, 6}, {0x0013, 8}, {0x0125, 12},
+ {0x0126, 12}, {0x0127, 12}, {0x0128, 12}, {0x0129, 12},
+ {0x012A, 12}, {0x012B, 12}, {0x012C, 12}, {0x012D, 12},
+ {0x012E, 12}, {0x012F, 12},
+ },{//3
+ {0x0000, 4}, {0x0001, 3}, {0x0002, 3}, {0x0001, 4},
+ {0x0006, 4}, {0x0004, 3}, {0x0005, 3}, {0x0006, 3},
+ {0x000E, 5}, {0x000F, 5}, {0x0070, 7}, {0x0710, 11},
+ {0x0711, 11}, {0x0712, 11}, {0x0713, 11}, {0x0714, 11},
+ {0x0715, 11}, {0x001D, 5}, {0x0072, 7}, {0x003C, 6},
+ {0x003D, 6}, {0x0073, 7}, {0x007C, 7}, {0x007D, 7},
+ {0x007E, 7}, {0x0716, 11}, {0x0717, 11}, {0x0718, 11},
+ {0x007F, 7}, {0x0719, 11}, {0x071A, 11}, {0x071B, 11},
+ {0x038E, 10}, {0x038F, 10},
+ },{//4
+ {0x0000, 8}, {0x0001, 7}, {0x0002, 7}, {0x0003, 7},
+ {0x0002, 9}, {0x0008, 8}, {0x0003, 9}, {0x0240, 14},
+ {0x0241, 14}, {0x0242, 14}, {0x0243, 14}, {0x0244, 14},
+ {0x0245, 14}, {0x0246, 14}, {0x0247, 14}, {0x0124, 13},
+ {0x0125, 13}, {0x0001, 2}, {0x0001, 1}, {0x0001, 3},
+ {0x0001, 4}, {0x0003, 6}, {0x0005, 7}, {0x0013, 9},
+ {0x0126, 13}, {0x0127, 13}, {0x0128, 13}, {0x0129, 13},
+ {0x012A, 13}, {0x012B, 13}, {0x012C, 13}, {0x012D, 13},
+ {0x012E, 13}, {0x012F, 13},
+ },{//5
+ {0x0000, 7}, {0x0001, 7}, {0x0001, 6}, {0x0002, 6},
+ {0x0003, 6}, {0x0004, 6}, {0x0005, 6}, {0x0006, 6},
+ {0x0007, 6}, {0x0008, 6}, {0x0009, 6}, {0x000A, 6},
+ {0x000B, 6}, {0x000C, 6}, {0x000D, 6}, {0x000E, 6},
+ {0x000F, 6}, {0x0010, 6}, {0x0011, 6}, {0x0012, 6},
+ {0x0013, 6}, {0x0014, 6}, {0x0015, 6}, {0x0016, 6},
+ {0x0017, 6}, {0x0018, 6}, {0x0019, 6}, {0x0001, 1},
+ {0x001A, 6}, {0x001B, 6}, {0x001C, 6}, {0x001D, 6},
+ {0x001E, 6}, {0x001F, 6},
+ },{//6
+ {0x0000, 5}, {0x0001, 4}, {0x0001, 5}, {0x0004, 5},
+ {0x000A, 6}, {0x0006, 5}, {0x000B, 6}, {0x000E, 6},
+ {0x003C, 8}, {0x003D, 8}, {0x07C0, 13}, {0x07C1, 13},
+ {0x07C2, 13}, {0x07C3, 13}, {0x07C4, 13}, {0x07C5, 13},
+ {0x07C6, 13}, {0x0001, 2}, {0x0002, 2}, {0x0006, 3},
+ {0x000E, 4}, {0x001E, 5}, {0x001F, 5}, {0x003F, 8},
+ {0x007D, 9}, {0x07C7, 13}, {0x07C8, 13}, {0x07C9, 13},
+ {0x07CA, 13}, {0x07CB, 13}, {0x07CC, 13}, {0x07CD, 13},
+ {0x07CE, 13}, {0x07CF, 13},
+ },{//7
+ {0x0000, 7}, {0x0001, 7}, {0x0002, 7}, {0x0003, 7},
+ {0x0004, 7}, {0x0005, 7}, {0x0006, 7}, {0x0007, 7},
+ {0x0008, 7}, {0x0009, 7}, {0x000A, 7}, {0x000B, 7},
+ {0x000C, 7}, {0x000D, 7}, {0x000E, 7}, {0x000F, 7},
+ {0x0010, 7}, {0x0001, 1}, {0x0001, 2}, {0x0011, 7},
+ {0x0012, 7}, {0x0013, 7}, {0x0014, 7}, {0x0015, 7},
+ {0x0016, 7}, {0x0017, 7}, {0x0018, 7}, {0x0019, 7},
+ {0x001A, 7}, {0x001B, 7}, {0x001C, 7}, {0x001D, 7},
+ {0x001E, 7}, {0x001F, 7},
+ }
+};
+#define MAX_DC_VLC_BITS 14
+
+
+static const uint16_t x8_ac0_lowquant_table[8][77][2]={
+ {//0
+ {0x0000, 2}, {0x0002, 3}, {0x0006, 4}, {0x000E, 5},
+ {0x001E, 6}, {0x003E, 7}, {0x003F, 7}, {0x0040, 7},
+ {0x0104, 9}, {0x0083, 8}, {0x0084, 8}, {0x0085, 8},
+ {0x020A, 10}, {0x020B, 10}, {0x0218, 10}, {0x0219, 10},
+ {0x0009, 4}, {0x0044, 7}, {0x010D, 9}, {0x021C, 10},
+ {0x0023, 6}, {0x0045, 7}, {0x0050, 7}, {0x000B, 4},
+ {0x000C, 4}, {0x0015, 5}, {0x001A, 5}, {0x001B, 5},
+ {0x0029, 6}, {0x0038, 6}, {0x0039, 6}, {0x003A, 6},
+ {0x0051, 7}, {0x0076, 7}, {0x0077, 7}, {0x0078, 7},
+ {0x0079, 7}, {0x007A, 7}, {0x007B, 7}, {0x00F8, 8},
+ {0x010F, 9}, {0x021D, 10}, {0x3E40, 14}, {0x3E41, 14},
+ {0x3E42, 14}, {0x3E43, 14}, {0x03E5, 10}, {0x3E44, 14},
+ {0x01F3, 9}, {0x3E45, 14}, {0x3E46, 14}, {0x3E47, 14},
+ {0x00FA, 8}, {0x3E48, 14}, {0x3E49, 14}, {0x3E4A, 14},
+ {0x3E4B, 14}, {0x03EC, 10}, {0x3E4C, 14}, {0x007E, 7},
+ {0x00FE, 8}, {0x00FF, 8}, {0x01F7, 9}, {0x3E4D, 14},
+ {0x3E4E, 14}, {0x3E4F, 14}, {0x3ED0, 14}, {0x3ED1, 14},
+ {0x3ED2, 14}, {0x3ED3, 14}, {0x3ED4, 14}, {0x3ED5, 14},
+ {0x1F6B, 13}, {0x1F6C, 13}, {0x1F6D, 13}, {0x1F6E, 13},
+ {0x1F6F, 13},
+ },{//1
+ {0x0000, 3}, {0x0004, 5}, {0x0014, 7}, {0x000B, 6},
+ {0x000C, 6}, {0x002A, 8}, {0x002B, 8}, {0x0034, 8},
+ {0x0D40, 14}, {0x0D41, 14}, {0x001B, 7}, {0x0D42, 14},
+ {0x0D43, 14}, {0x0D44, 14}, {0x0D45, 14}, {0x0D46, 14},
+ {0x000E, 6}, {0x003C, 8}, {0x0D47, 14}, {0x003D, 8},
+ {0x0D48, 14}, {0x0D49, 14}, {0x0D4A, 14}, {0x0001, 2},
+ {0x0004, 3}, {0x0014, 5}, {0x000B, 4}, {0x000C, 4},
+ {0x000D, 4}, {0x002A, 6}, {0x001F, 7}, {0x0056, 7},
+ {0x0057, 7}, {0x0070, 7}, {0x00E2, 8}, {0x0072, 7},
+ {0x003A, 6}, {0x003B, 6}, {0x003C, 6}, {0x003D, 6},
+ {0x00E3, 8}, {0x0D4B, 14}, {0x00E6, 8}, {0x00E7, 8},
+ {0x00F8, 8}, {0x0D4C, 14}, {0x0D4D, 14}, {0x0D4E, 14},
+ {0x00F9, 8}, {0x0D4F, 14}, {0x0D50, 14}, {0x0D51, 14},
+ {0x06A9, 13}, {0x06AA, 13}, {0x06AB, 13}, {0x06AC, 13},
+ {0x06AD, 13}, {0x06AE, 13}, {0x06AF, 13}, {0x003F, 6},
+ {0x06B0, 13}, {0x06B1, 13}, {0x06B2, 13}, {0x06B3, 13},
+ {0x06B4, 13}, {0x007D, 7}, {0x06B5, 13}, {0x06B6, 13},
+ {0x06B7, 13}, {0x06B8, 13}, {0x06B9, 13}, {0x06BA, 13},
+ {0x06BB, 13}, {0x06BC, 13}, {0x06BD, 13}, {0x06BE, 13},
+ {0x06BF, 13},
+ },{//2
+ {0x0000, 2}, {0x0002, 3}, {0x0003, 3}, {0x0008, 4},
+ {0x0012, 5}, {0x0013, 5}, {0x0028, 6}, {0x0029, 6},
+ {0x0054, 7}, {0x0055, 7}, {0x0056, 7}, {0x00AE, 8},
+ {0x00AF, 8}, {0x00B0, 8}, {0x0162, 9}, {0x02C6, 10},
+ {0x000C, 4}, {0x002D, 6}, {0x00B2, 8}, {0x0166, 9},
+ {0x002E, 6}, {0x0167, 9}, {0x00BC, 8}, {0x001A, 5},
+ {0x0036, 6}, {0x0037, 6}, {0x0038, 6}, {0x005F, 7},
+ {0x0072, 7}, {0x0073, 7}, {0x0074, 7}, {0x0075, 7},
+ {0x0076, 7}, {0x0077, 7}, {0x0078, 7}, {0x0079, 7},
+ {0x007A, 7}, {0x007B, 7}, {0x00BD, 8}, {0xB1C0, 16},
+ {0xB1C1, 16}, {0x58E1, 15}, {0x0B1D, 12}, {0x58E2, 15},
+ {0x58E3, 15}, {0x58E4, 15}, {0x00F8, 8}, {0x03E4, 10},
+ {0x01F3, 9}, {0x0B1E, 12}, {0x58E5, 15}, {0x58E6, 15},
+ {0x00FA, 8}, {0x58E7, 15}, {0x58F8, 15}, {0x58F9, 15},
+ {0x58FA, 15}, {0x01F6, 9}, {0x58FB, 15}, {0x007E, 7},
+ {0x00FE, 8}, {0x00FF, 8}, {0x07CA, 11}, {0x0F96, 12},
+ {0x58FC, 15}, {0x58FD, 15}, {0x58FE, 15}, {0x58FF, 15},
+ {0x7CB8, 15}, {0x7CB9, 15}, {0x7CBA, 15}, {0x7CBB, 15},
+ {0x7CBC, 15}, {0x01F7, 9}, {0x7CBD, 15}, {0x7CBE, 15},
+ {0x7CBF, 15},
+ },{//3
+ {0x0000, 2}, {0x0002, 3}, {0x0006, 4}, {0x000E, 5},
+ {0x000F, 5}, {0x0020, 6}, {0x0021, 6}, {0x0044, 7},
+ {0x0045, 7}, {0x008C, 8}, {0x008D, 8}, {0x011C, 9},
+ {0x011D, 9}, {0x011E, 9}, {0x023E, 10}, {0x023F, 10},
+ {0x0005, 3}, {0x0012, 5}, {0x004C, 7}, {0x004D, 7},
+ {0x000C, 4}, {0x004E, 7}, {0x001A, 5}, {0x0036, 6},
+ {0x004F, 7}, {0x006E, 7}, {0x006F, 7}, {0x00E0, 8},
+ {0x00E1, 8}, {0x00E2, 8}, {0x00E3, 8}, {0x00E4, 8},
+ {0x00E5, 8}, {0x01CC, 9}, {0x00E7, 8}, {0x00E8, 8},
+ {0x00E9, 8}, {0x01CD, 9}, {0x0750, 11}, {0x03A9, 10},
+ {0x0751, 11}, {0x7540, 15}, {0x03AB, 10}, {0x7541, 15},
+ {0x7542, 15}, {0x7543, 15}, {0x01D6, 9}, {0x0755, 11},
+ {0x0076, 7}, {0x0EA9, 12}, {0x7544, 15}, {0x7545, 15},
+ {0x001E, 5}, {0x0077, 7}, {0x00F8, 8}, {0x03AE, 10},
+ {0x075E, 11}, {0x007D, 7}, {0x03E4, 10}, {0x00FC, 8},
+ {0x00FD, 8}, {0x03E5, 10}, {0x03E6, 10}, {0x0EBE, 12},
+ {0x7546, 15}, {0x07CE, 11}, {0x7547, 15}, {0x75F8, 15},
+ {0x75F9, 15}, {0x75FA, 15}, {0x75FB, 15}, {0x75FC, 15},
+ {0x75FD, 15}, {0x007F, 7}, {0x3AFF, 14}, {0x0F9E, 12},
+ {0x0F9F, 12},
+ },{//4
+ {0x0000, 3}, {0x0002, 4}, {0x0003, 4}, {0x0008, 5},
+ {0x0012, 6}, {0x0013, 6}, {0x0014, 6}, {0x002A, 7},
+ {0x0016, 6}, {0x002B, 7}, {0x005C, 8}, {0x005D, 8},
+ {0x005E, 8}, {0x00BE, 9}, {0x00BF, 9}, {0x0060, 8},
+ {0x0007, 4}, {0x000D, 5}, {0x0019, 6}, {0x0020, 6},
+ {0x0009, 4}, {0x0021, 6}, {0x0011, 5}, {0x0014, 5},
+ {0x002A, 6}, {0x002B, 6}, {0x002C, 6}, {0x002D, 6},
+ {0x002E, 6}, {0x002F, 6}, {0x0030, 6}, {0x0031, 7},
+ {0x0062, 7}, {0x0063, 7}, {0x0064, 7}, {0x0065, 7},
+ {0x0066, 7}, {0x0061, 8}, {0x0670, 11}, {0x0068, 7},
+ {0x0069, 7}, {0x00CF, 8}, {0x019D, 9}, {0x01A8, 9},
+ {0x01A9, 9}, {0x0339, 10}, {0x01AA, 9}, {0x0356, 10},
+ {0x0036, 6}, {0x00D6, 8}, {0x6710, 15}, {0x6711, 15},
+ {0x000E, 4}, {0x006E, 7}, {0x01AE, 9}, {0x6712, 15},
+ {0x6713, 15}, {0x003C, 6}, {0x0357, 10}, {0x006F, 7},
+ {0x00F4, 8}, {0x00F5, 8}, {0x035E, 10}, {0x01EC, 9},
+ {0x6714, 15}, {0x01ED, 9}, {0x035F, 10}, {0x03DC, 10},
+ {0x03DD, 10}, {0x6715, 15}, {0x338B, 14}, {0x338C, 14},
+ {0x338D, 14}, {0x001F, 5}, {0x01EF, 9}, {0x338E, 14},
+ {0x338F, 14},
+ },{//5
+ {0x0000, 2}, {0x0004, 4}, {0x000A, 5}, {0x000B, 5},
+ {0x0018, 6}, {0x0019, 6}, {0x0034, 7}, {0x006A, 8},
+ {0x006B, 8}, {0x006C, 8}, {0x00DA, 9}, {0x036C, 11},
+ {0x006E, 8}, {0x01B7, 10}, {0x036D, 11}, {0x3780, 15},
+ {0x0004, 3}, {0x000E, 5}, {0x001E, 6}, {0x003E, 7},
+ {0x000A, 4}, {0x002C, 6}, {0x0017, 5}, {0x002D, 6},
+ {0x003F, 7}, {0x00C0, 8}, {0x0061, 7}, {0x00C1, 8},
+ {0x0062, 7}, {0x00C6, 8}, {0x0064, 7}, {0x00C7, 8},
+ {0x00CA, 8}, {0x00DF, 9}, {0x0196, 9}, {0x0197, 9},
+ {0x0198, 9}, {0x0199, 9}, {0x0379, 11}, {0x019A, 9},
+ {0x01BD, 10}, {0x066C, 11}, {0x3781, 15}, {0x0337, 10},
+ {0x066D, 11}, {0x0670, 11}, {0x0339, 10}, {0x0671, 11},
+ {0x0034, 6}, {0x00CF, 8}, {0x3782, 15}, {0x3783, 15},
+ {0x000E, 4}, {0x001B, 5}, {0x006A, 7}, {0x006B, 7},
+ {0x019D, 9}, {0x003C, 6}, {0x00F4, 8}, {0x00F5, 8},
+ {0x03D8, 10}, {0x07B2, 11}, {0x3784, 15}, {0x03DA, 10},
+ {0x3785, 15}, {0x03DB, 10}, {0x03DC, 10}, {0x3786, 15},
+ {0x3787, 15}, {0x1BC4, 14}, {0x1BC5, 14}, {0x1BC6, 14},
+ {0x1BC7, 14}, {0x001F, 5}, {0x03DD, 10}, {0x07B3, 11},
+ {0x01EF, 9},
+ },{//6
+ {0x0000, 2}, {0x0004, 4}, {0x000A, 5}, {0x0016, 6},
+ {0x0017, 6}, {0x0060, 8}, {0x00C2, 9}, {0x0186, 10},
+ {0x0187, 10}, {0x00C4, 9}, {0x3140, 15}, {0x3141, 15},
+ {0x018B, 10}, {0x3142, 15}, {0x018C, 10}, {0x3143, 15},
+ {0x0007, 4}, {0x000D, 5}, {0x0064, 8}, {0x0065, 8},
+ {0x0010, 5}, {0x00C7, 9}, {0x0066, 8}, {0x0005, 3},
+ {0x0006, 3}, {0x0009, 4}, {0x0011, 5}, {0x0038, 6},
+ {0x0039, 6}, {0x0074, 7}, {0x0075, 7}, {0x0076, 7},
+ {0x0067, 8}, {0x00EE, 8}, {0x01DE, 9}, {0x00F0, 8},
+ {0x018D, 10}, {0x3144, 15}, {0x01DF, 9}, {0x003D, 6},
+ {0x003E, 6}, {0x01E2, 9}, {0x03C6, 10}, {0x00F2, 8},
+ {0x00F3, 8}, {0x03C7, 10}, {0x3145, 15}, {0x3146, 15},
+ {0x01F8, 9}, {0x3147, 15}, {0x3148, 15}, {0x3149, 15},
+ {0x00FD, 8}, {0x314A, 15}, {0x314B, 15}, {0x314C, 15},
+ {0x314D, 15}, {0x01F9, 9}, {0x314E, 15}, {0x01FC, 9},
+ {0x314F, 15}, {0x3150, 15}, {0x3151, 15}, {0x3152, 15},
+ {0x3153, 15}, {0x03FA, 10}, {0x03FB, 10}, {0x3154, 15},
+ {0x3155, 15}, {0x3156, 15}, {0x3157, 15}, {0x3158, 15},
+ {0x3159, 15}, {0x00FF, 8}, {0x18AD, 14}, {0x18AE, 14},
+ {0x18AF, 14},
+ },{//7
+ {0x0000, 4}, {0x0080, 11}, {0x0081, 11}, {0x0082, 11},
+ {0x0083, 11}, {0x0084, 11}, {0x0085, 11}, {0x0086, 11},
+ {0x0087, 11}, {0x0088, 11}, {0x0089, 11}, {0x008A, 11},
+ {0x008B, 11}, {0x008C, 11}, {0x008D, 11}, {0x008E, 11},
+ {0x008F, 11}, {0x0048, 10}, {0x0049, 10}, {0x004A, 10},
+ {0x004B, 10}, {0x004C, 10}, {0x004D, 10}, {0x0001, 1},
+ {0x0001, 2}, {0x004E, 10}, {0x0002, 4}, {0x0003, 4},
+ {0x004F, 10}, {0x0050, 10}, {0x0051, 10}, {0x0052, 10},
+ {0x0053, 10}, {0x0054, 10}, {0x0055, 10}, {0x0056, 10},
+ {0x0057, 10}, {0x0058, 10}, {0x0059, 10}, {0x005A, 10},
+ {0x005B, 10}, {0x005C, 10}, {0x005D, 10}, {0x005E, 10},
+ {0x005F, 10}, {0x0060, 10}, {0x0061, 10}, {0x0062, 10},
+ {0x0063, 10}, {0x0064, 10}, {0x0065, 10}, {0x0066, 10},
+ {0x0067, 10}, {0x0068, 10}, {0x0069, 10}, {0x006A, 10},
+ {0x006B, 10}, {0x006C, 10}, {0x006D, 10}, {0x006E, 10},
+ {0x006F, 10}, {0x0070, 10}, {0x0071, 10}, {0x0072, 10},
+ {0x0073, 10}, {0x0074, 10}, {0x0075, 10}, {0x0076, 10},
+ {0x0077, 10}, {0x0078, 10}, {0x0079, 10}, {0x007A, 10},
+ {0x007B, 10}, {0x007C, 10}, {0x007D, 10}, {0x007E, 10},
+ {0x007F, 10},
+ }
+};
+
+static const uint16_t x8_ac0_highquant_table[8][77][2]={
+ {//0
+ {0x0000, 3}, {0x0002, 4}, {0x000C, 6}, {0x000D, 6},
+ {0x001C, 7}, {0x000F, 6}, {0x1D00, 15}, {0x003B, 8},
+ {0x1D01, 15}, {0x0075, 9}, {0x1D02, 15}, {0x0080, 9},
+ {0x1D03, 15}, {0x1D04, 15}, {0x1D05, 15}, {0x0E83, 14},
+ {0x0009, 5}, {0x0011, 6}, {0x0081, 9}, {0x0082, 9},
+ {0x0021, 7}, {0x0028, 7}, {0x0083, 9}, {0x0002, 2},
+ {0x0003, 3}, {0x000C, 4}, {0x000D, 4}, {0x000B, 5},
+ {0x0015, 6}, {0x0052, 8}, {0x0070, 7}, {0x0039, 6},
+ {0x0071, 7}, {0x0053, 8}, {0x0E84, 14}, {0x0074, 7},
+ {0x0075, 7}, {0x0076, 7}, {0x01DC, 9}, {0x001E, 5},
+ {0x003E, 6}, {0x01DD, 9}, {0x00EF, 8}, {0x01F8, 9},
+ {0x01F9, 9}, {0x0E85, 14}, {0x0E86, 14}, {0x0E87, 14},
+ {0x00FD, 8}, {0x0E88, 14}, {0x0E89, 14}, {0x0E8A, 14},
+ {0x0E8B, 14}, {0x0E8C, 14}, {0x0E8D, 14}, {0x0E8E, 14},
+ {0x0E8F, 14}, {0x0E90, 14}, {0x0E91, 14}, {0x01FC, 9},
+ {0x0E92, 14}, {0x0E93, 14}, {0x0E94, 14}, {0x0E95, 14},
+ {0x0E96, 14}, {0x0E97, 14}, {0x01FD, 9}, {0x0E98, 14},
+ {0x01FE, 9}, {0x0E99, 14}, {0x0E9A, 14}, {0x0E9B, 14},
+ {0x0E9C, 14}, {0x01FF, 9}, {0x0E9D, 14}, {0x0E9E, 14},
+ {0x0E9F, 14},
+ },{//1
+ {0x0000, 3}, {0x0002, 4}, {0x0003, 4}, {0x0008, 5},
+ {0x0012, 6}, {0x0013, 6}, {0x0014, 6}, {0x0015, 6},
+ {0x002C, 7}, {0x005A, 8}, {0x005B, 8}, {0x005C, 8},
+ {0x005D, 8}, {0x1780, 14}, {0x0179, 10}, {0x017A, 10},
+ {0x0006, 4}, {0x000E, 5}, {0x001E, 6}, {0x003E, 7},
+ {0x0010, 5}, {0x0022, 6}, {0x0012, 5}, {0x000A, 4},
+ {0x0013, 5}, {0x0016, 5}, {0x0023, 6}, {0x002E, 6},
+ {0x002F, 6}, {0x0030, 6}, {0x0031, 6}, {0x003F, 7},
+ {0x005F, 8}, {0x00C8, 8}, {0x0065, 7}, {0x0066, 7},
+ {0x0067, 7}, {0x0068, 7}, {0x00C9, 8}, {0x0069, 7},
+ {0x006A, 7}, {0x00D6, 8}, {0x00D7, 8}, {0x00D8, 8},
+ {0x1781, 14}, {0x017B, 10}, {0x01B2, 9}, {0x1782, 14},
+ {0x001C, 5}, {0x01B3, 9}, {0x1783, 14}, {0x1784, 14},
+ {0x001D, 5}, {0x00DA, 8}, {0x1785, 14}, {0x1786, 14},
+ {0x1787, 14}, {0x0037, 6}, {0x00DB, 8}, {0x0078, 7},
+ {0x00F2, 8}, {0x01E6, 9}, {0x00F4, 8}, {0x1788, 14},
+ {0x1789, 14}, {0x00F5, 8}, {0x01E7, 9}, {0x178A, 14},
+ {0x178B, 14}, {0x178C, 14}, {0x178D, 14}, {0x01EC, 9},
+ {0x178E, 14}, {0x001F, 5}, {0x00F7, 8}, {0x01ED, 9},
+ {0x178F, 14},
+ },{//2
+ {0x0000, 4}, {0x0002, 5}, {0x0180, 12}, {0x0181, 12},
+ {0x0182, 12}, {0x0183, 12}, {0x0184, 12}, {0x0185, 12},
+ {0x0186, 12}, {0x0187, 12}, {0x0188, 12}, {0x0189, 12},
+ {0x00C5, 11}, {0x00C6, 11}, {0x00C7, 11}, {0x00C8, 11},
+ {0x00C9, 11}, {0x00CA, 11}, {0x00CB, 11}, {0x00CC, 11},
+ {0x00CD, 11}, {0x00CE, 11}, {0x00CF, 11}, {0x0001, 1},
+ {0x0001, 2}, {0x0004, 5}, {0x0005, 5}, {0x0006, 5},
+ {0x00D0, 11}, {0x00D1, 11}, {0x00D2, 11}, {0x00D3, 11},
+ {0x00D4, 11}, {0x00D5, 11}, {0x00D6, 11}, {0x00D7, 11},
+ {0x00D8, 11}, {0x00D9, 11}, {0x00DA, 11}, {0x0007, 5},
+ {0x00DB, 11}, {0x00DC, 11}, {0x00DD, 11}, {0x00DE, 11},
+ {0x00DF, 11}, {0x00E0, 11}, {0x00E1, 11}, {0x00E2, 11},
+ {0x00E3, 11}, {0x00E4, 11}, {0x00E5, 11}, {0x00E6, 11},
+ {0x00E7, 11}, {0x00E8, 11}, {0x00E9, 11}, {0x00EA, 11},
+ {0x00EB, 11}, {0x00EC, 11}, {0x00ED, 11}, {0x00EE, 11},
+ {0x00EF, 11}, {0x00F0, 11}, {0x00F1, 11}, {0x00F2, 11},
+ {0x00F3, 11}, {0x00F4, 11}, {0x00F5, 11}, {0x00F6, 11},
+ {0x00F7, 11}, {0x00F8, 11}, {0x00F9, 11}, {0x00FA, 11},
+ {0x00FB, 11}, {0x00FC, 11}, {0x00FD, 11}, {0x00FE, 11},
+ {0x00FF, 11},
+ },{//3
+ {0x0000, 8}, {0x0001, 8}, {0x0002, 8}, {0x0003, 8},
+ {0x0004, 8}, {0x0005, 8}, {0x0006, 8}, {0x0007, 8},
+ {0x0008, 8}, {0x0009, 8}, {0x000A, 8}, {0x000B, 8},
+ {0x000C, 8}, {0x000D, 8}, {0x000E, 8}, {0x000F, 8},
+ {0x0010, 8}, {0x0011, 8}, {0x0012, 8}, {0x0013, 8},
+ {0x0014, 8}, {0x0015, 8}, {0x0016, 8}, {0x0001, 1},
+ {0x0017, 8}, {0x000C, 7}, {0x000D, 7}, {0x000E, 7},
+ {0x000F, 7}, {0x0010, 7}, {0x0011, 7}, {0x0012, 7},
+ {0x0013, 7}, {0x0014, 7}, {0x0015, 7}, {0x0016, 7},
+ {0x0017, 7}, {0x0018, 7}, {0x0019, 7}, {0x001A, 7},
+ {0x001B, 7}, {0x001C, 7}, {0x001D, 7}, {0x001E, 7},
+ {0x001F, 7}, {0x0020, 7}, {0x0021, 7}, {0x0022, 7},
+ {0x0023, 7}, {0x0024, 7}, {0x0025, 7}, {0x0026, 7},
+ {0x0027, 7}, {0x0028, 7}, {0x0029, 7}, {0x002A, 7},
+ {0x002B, 7}, {0x002C, 7}, {0x002D, 7}, {0x002E, 7},
+ {0x002F, 7}, {0x0030, 7}, {0x0031, 7}, {0x0032, 7},
+ {0x0033, 7}, {0x0034, 7}, {0x0035, 7}, {0x0036, 7},
+ {0x0037, 7}, {0x0038, 7}, {0x0039, 7}, {0x003A, 7},
+ {0x003B, 7}, {0x003C, 7}, {0x003D, 7}, {0x003E, 7},
+ {0x003F, 7},
+ },{//4
+ {0x0000, 9}, {0x0001, 9}, {0x0002, 9}, {0x0003, 9},
+ {0x0004, 9}, {0x0005, 9}, {0x0006, 9}, {0x0007, 9},
+ {0x0008, 9}, {0x0009, 9}, {0x000A, 9}, {0x000B, 9},
+ {0x000C, 9}, {0x000D, 9}, {0x000E, 9}, {0x000F, 9},
+ {0x0010, 9}, {0x0011, 9}, {0x0012, 9}, {0x0013, 9},
+ {0x0014, 9}, {0x0015, 9}, {0x000B, 8}, {0x0001, 2},
+ {0x0001, 1}, {0x000C, 8}, {0x000D, 8}, {0x000E, 8},
+ {0x000F, 8}, {0x0010, 8}, {0x0011, 8}, {0x0012, 8},
+ {0x0013, 8}, {0x0014, 8}, {0x0015, 8}, {0x0016, 8},
+ {0x0017, 8}, {0x0018, 8}, {0x0019, 8}, {0x001A, 8},
+ {0x001B, 8}, {0x001C, 8}, {0x001D, 8}, {0x001E, 8},
+ {0x001F, 8}, {0x0020, 8}, {0x0021, 8}, {0x0022, 8},
+ {0x0023, 8}, {0x0024, 8}, {0x0025, 8}, {0x0026, 8},
+ {0x0027, 8}, {0x0028, 8}, {0x0029, 8}, {0x002A, 8},
+ {0x002B, 8}, {0x002C, 8}, {0x002D, 8}, {0x002E, 8},
+ {0x002F, 8}, {0x0030, 8}, {0x0031, 8}, {0x0032, 8},
+ {0x0033, 8}, {0x0034, 8}, {0x0035, 8}, {0x0036, 8},
+ {0x0037, 8}, {0x0038, 8}, {0x0039, 8}, {0x003A, 8},
+ {0x003B, 8}, {0x003C, 8}, {0x003D, 8}, {0x003E, 8},
+ {0x003F, 8},
+ },{//5
+ {0x0000, 10}, {0x0001, 10}, {0x0002, 10}, {0x0003, 10},
+ {0x0004, 10}, {0x0005, 10}, {0x0006, 10}, {0x0007, 10},
+ {0x0008, 10}, {0x0009, 10}, {0x000A, 10}, {0x000B, 10},
+ {0x000C, 10}, {0x000D, 10}, {0x000E, 10}, {0x000F, 10},
+ {0x0010, 10}, {0x0011, 10}, {0x0012, 10}, {0x0013, 10},
+ {0x000A, 9}, {0x000B, 9}, {0x000C, 9}, {0x0001, 1},
+ {0x0001, 3}, {0x000D, 9}, {0x000E, 9}, {0x0001, 2},
+ {0x000F, 9}, {0x0010, 9}, {0x0011, 9}, {0x0012, 9},
+ {0x0013, 9}, {0x0014, 9}, {0x0015, 9}, {0x0016, 9},
+ {0x0017, 9}, {0x0018, 9}, {0x0019, 9}, {0x001A, 9},
+ {0x001B, 9}, {0x001C, 9}, {0x001D, 9}, {0x001E, 9},
+ {0x001F, 9}, {0x0020, 9}, {0x0021, 9}, {0x0022, 9},
+ {0x0023, 9}, {0x0024, 9}, {0x0025, 9}, {0x0026, 9},
+ {0x0027, 9}, {0x0028, 9}, {0x0029, 9}, {0x002A, 9},
+ {0x002B, 9}, {0x002C, 9}, {0x002D, 9}, {0x002E, 9},
+ {0x002F, 9}, {0x0030, 9}, {0x0031, 9}, {0x0032, 9},
+ {0x0033, 9}, {0x0034, 9}, {0x0035, 9}, {0x0036, 9},
+ {0x0037, 9}, {0x0038, 9}, {0x0039, 9}, {0x003A, 9},
+ {0x003B, 9}, {0x003C, 9}, {0x003D, 9}, {0x003E, 9},
+ {0x003F, 9},
+ },{//6
+ {0x0000, 2}, {0x0004, 4}, {0x000A, 5}, {0x000B, 5},
+ {0x0018, 6}, {0x0019, 6}, {0x0034, 7}, {0x006A, 8},
+ {0x006B, 8}, {0x006C, 8}, {0x00DA, 9}, {0x00DB, 9},
+ {0x01B8, 10}, {0x00DD, 9}, {0x01B9, 10}, {0x3780, 15},
+ {0x0004, 3}, {0x000E, 5}, {0x001E, 6}, {0x001F, 6},
+ {0x000A, 4}, {0x0058, 7}, {0x0017, 5}, {0x0018, 5},
+ {0x0059, 7}, {0x005A, 7}, {0x005B, 7}, {0x00C8, 8},
+ {0x0065, 7}, {0x0066, 7}, {0x00C9, 8}, {0x00CE, 8},
+ {0x00CF, 8}, {0x00D0, 8}, {0x00D1, 8}, {0x00D2, 8},
+ {0x00D3, 8}, {0x00DF, 9}, {0x00D4, 8}, {0x00D5, 8},
+ {0x00D6, 8}, {0x01AE, 9}, {0x3781, 15}, {0x01BD, 10},
+ {0x035E, 10}, {0x035F, 10}, {0x3782, 15}, {0x0360, 10},
+ {0x0037, 6}, {0x01B1, 9}, {0x3783, 15}, {0x3784, 15},
+ {0x000E, 4}, {0x003C, 6}, {0x0361, 10}, {0x3785, 15},
+ {0x1BC3, 14}, {0x003D, 6}, {0x00D9, 8}, {0x1BC4, 14},
+ {0x0368, 10}, {0x1BC5, 14}, {0x1BC6, 14}, {0x1BC7, 14},
+ {0x1BC8, 14}, {0x00DB, 8}, {0x0369, 10}, {0x036A, 10},
+ {0x1BC9, 14}, {0x1BCA, 14}, {0x1BCB, 14}, {0x1BCC, 14},
+ {0x1BCD, 14}, {0x001F, 5}, {0x036B, 10}, {0x1BCE, 14},
+ {0x1BCF, 14},
+ },{//7
+ {0x0000, 3}, {0x0002, 4}, {0x0006, 5}, {0x0007, 5},
+ {0x0010, 6}, {0x0044, 8}, {0x0023, 7}, {0x0012, 6},
+ {0x0026, 7}, {0x08A0, 13}, {0x004E, 8}, {0x004F, 8},
+ {0x08A1, 13}, {0x08A2, 13}, {0x08A3, 13}, {0x0050, 8},
+ {0x0006, 4}, {0x000B, 5}, {0x0029, 7}, {0x0015, 6},
+ {0x001C, 6}, {0x003A, 7}, {0x001E, 6}, {0x0004, 3},
+ {0x0014, 5}, {0x0015, 5}, {0x000B, 4}, {0x001F, 6},
+ {0x0030, 6}, {0x0031, 6}, {0x0019, 5}, {0x0051, 8},
+ {0x0034, 6}, {0x0035, 6}, {0x0036, 6}, {0x0037, 6},
+ {0x0076, 8}, {0x0077, 8}, {0x0070, 7}, {0x001D, 5},
+ {0x0071, 7}, {0x0072, 7}, {0x08A4, 13}, {0x0073, 7},
+ {0x00F0, 8}, {0x08A5, 13}, {0x08A6, 13}, {0x08A7, 13},
+ {0x0079, 7}, {0x007A, 7}, {0x08A8, 13}, {0x08A9, 13},
+ {0x00F1, 8}, {0x08AA, 13}, {0x08AB, 13}, {0x08AC, 13},
+ {0x08AD, 13}, {0x00F6, 8}, {0x08AE, 13}, {0x007C, 7},
+ {0x00F7, 8}, {0x08AF, 13}, {0x08B0, 13}, {0x08B1, 13},
+ {0x08B2, 13}, {0x00FA, 8}, {0x08B3, 13}, {0x08B4, 13},
+ {0x08B5, 13}, {0x08B6, 13}, {0x08B7, 13}, {0x00FB, 8},
+ {0x045C, 12}, {0x003F, 6}, {0x045D, 12}, {0x045E, 12},
+ {0x045F, 12},
+ }
+};
+
+static const uint16_t x8_ac1_lowquant_table[8][77][2]={
+ {//0
+ {0x0000, 3}, {0x0002, 4}, {0x0003, 4}, {0x0008, 5},
+ {0x0012, 6}, {0x0026, 7}, {0x0014, 6}, {0x004E, 8},
+ {0x004F, 8}, {0x00A8, 9}, {0x0152, 10}, {0x00AA, 9},
+ {0x00AB, 9}, {0x00AC, 9}, {0x2A60, 15}, {0x02A7, 11},
+ {0x0006, 4}, {0x000B, 5}, {0x001C, 6}, {0x003A, 7},
+ {0x000F, 5}, {0x003B, 7}, {0x0010, 5}, {0x0005, 3},
+ {0x0009, 4}, {0x0011, 5}, {0x0018, 5}, {0x0019, 5},
+ {0x001A, 5}, {0x0036, 6}, {0x0037, 6}, {0x0070, 7},
+ {0x0057, 8}, {0x00E2, 8}, {0x00E3, 8}, {0x00E4, 8},
+ {0x00E5, 8}, {0x00AD, 9}, {0x0398, 10}, {0x003A, 6},
+ {0x0076, 7}, {0x00E7, 8}, {0x00EE, 8}, {0x00EF, 8},
+ {0x0732, 11}, {0x039A, 10}, {0x0733, 11}, {0x2A61, 15},
+ {0x0078, 7}, {0x1531, 14}, {0x1532, 14}, {0x1533, 14},
+ {0x003D, 6}, {0x039B, 10}, {0x1534, 14}, {0x1535, 14},
+ {0x1536, 14}, {0x0079, 7}, {0x1537, 14}, {0x00F8, 8},
+ {0x01F2, 9}, {0x07CC, 11}, {0x03E7, 10}, {0x07CD, 11},
+ {0x3E80, 14}, {0x00FB, 8}, {0x03E9, 10}, {0x3E81, 14},
+ {0x3E82, 14}, {0x3E83, 14}, {0x3E84, 14}, {0x3E85, 14},
+ {0x3E86, 14}, {0x003F, 6}, {0x01F5, 9}, {0x07D1, 11},
+ {0x3E87, 14},
+ },{//1
+ {0x0000, 2}, {0x0002, 3}, {0x0006, 4}, {0x000E, 5},
+ {0x001E, 6}, {0x001F, 6}, {0x0040, 7}, {0x0082, 8},
+ {0x0083, 8}, {0x0084, 8}, {0x010A, 9}, {0x010B, 9},
+ {0x0430, 11}, {0x0431, 11}, {0x0432, 11}, {0x0433, 11},
+ {0x0005, 3}, {0x0011, 5}, {0x0024, 6}, {0x004A, 7},
+ {0x000C, 4}, {0x0026, 6}, {0x000D, 4}, {0x0087, 8},
+ {0x010D, 9}, {0x0258, 10}, {0x012D, 9}, {0x0259, 10},
+ {0x025C, 10}, {0x0974, 12}, {0x025E, 10}, {0x025F, 10},
+ {0x0270, 10}, {0x0271, 10}, {0x04BB, 11}, {0x0975, 12},
+ {0x0272, 10}, {0x09CC, 12}, {0x09CD, 12}, {0x4E70, 15},
+ {0x4E71, 15}, {0x4E72, 15}, {0x4E73, 15}, {0x273A, 14},
+ {0x273B, 14}, {0x273C, 14}, {0x04E8, 11}, {0x04E9, 11},
+ {0x009E, 8}, {0x0275, 10}, {0x09D8, 12}, {0x273D, 14},
+ {0x000E, 4}, {0x003C, 6}, {0x007A, 7}, {0x009F, 8},
+ {0x0277, 10}, {0x003E, 6}, {0x00F6, 8}, {0x04ED, 11},
+ {0x03DC, 10}, {0x273E, 14}, {0x07BA, 11}, {0x09D9, 12},
+ {0x273F, 14}, {0x3DD8, 14}, {0x3DD9, 14}, {0x3DDA, 14},
+ {0x3DDB, 14}, {0x3DDC, 14}, {0x3DDD, 14}, {0x3DDE, 14},
+ {0x3DDF, 14}, {0x003F, 6}, {0x07BC, 11}, {0x07BD, 11},
+ {0x03DF, 10},
+ },{//2
+ {0x0000, 3}, {0x0002, 4}, {0x0006, 5}, {0x000E, 6},
+ {0x001E, 7}, {0x003E, 8}, {0x003F, 8}, {0x0040, 8},
+ {0x0104, 10}, {0x0083, 9}, {0x0105, 10}, {0x0108, 10},
+ {0x4240, 16}, {0x010A, 10}, {0x010B, 10}, {0x4241, 16},
+ {0x0003, 3}, {0x0009, 5}, {0x0011, 6}, {0x0043, 8},
+ {0x0004, 3}, {0x000A, 5}, {0x000A, 4}, {0x002C, 7},
+ {0x00B4, 9}, {0x00B5, 9}, {0x00B6, 9}, {0x00B7, 9},
+ {0x00B8, 9}, {0x0172, 10}, {0x0173, 10}, {0x0174, 10},
+ {0x0175, 10}, {0x0176, 10}, {0x0177, 10}, {0x00BC, 9},
+ {0x017A, 10}, {0x0213, 11}, {0x4242, 16}, {0x017B, 10},
+ {0x02F8, 11}, {0x017D, 10}, {0x02F9, 11}, {0x017E, 10},
+ {0x4243, 16}, {0x02FE, 11}, {0x2122, 15}, {0x2123, 15},
+ {0x0058, 7}, {0x0164, 9}, {0x2124, 15}, {0x2125, 15},
+ {0x0006, 3}, {0x000E, 4}, {0x002D, 6}, {0x002E, 6},
+ {0x00B3, 8}, {0x001E, 5}, {0x005E, 7}, {0x2126, 15},
+ {0x2127, 15}, {0x2128, 15}, {0x2129, 15}, {0x02FF, 11},
+ {0x212A, 15}, {0x0594, 11}, {0x0595, 11}, {0x0596, 11},
+ {0x212B, 15}, {0x212C, 15}, {0x212D, 15}, {0x212E, 15},
+ {0x212F, 15}, {0x001F, 5}, {0x0597, 11}, {0x00BE, 8},
+ {0x00BF, 8},
+ },{//3
+ {0x0000, 2}, {0x0002, 3}, {0x0006, 4}, {0x0007, 4},
+ {0x0010, 5}, {0x0011, 5}, {0x0024, 6}, {0x0025, 6},
+ {0x0026, 6}, {0x0027, 6}, {0x0050, 7}, {0x0051, 7},
+ {0x00A4, 8}, {0x00A5, 8}, {0x00A6, 8}, {0x014E, 9},
+ {0x000B, 4}, {0x002A, 6}, {0x0056, 7}, {0x014F, 9},
+ {0x0030, 6}, {0x00AE, 8}, {0x0062, 7}, {0x0032, 6},
+ {0x0033, 6}, {0x0034, 6}, {0x0035, 6}, {0x0036, 6},
+ {0x0063, 7}, {0x006E, 7}, {0x006F, 7}, {0x0070, 7},
+ {0x0071, 7}, {0x0072, 7}, {0x0073, 7}, {0x0074, 7},
+ {0x00AF, 8}, {0x00EA, 8}, {0x01D6, 9}, {0x075C, 11},
+ {0x03AF, 10}, {0x75D0, 15}, {0x75D1, 15}, {0x75D2, 15},
+ {0x75D3, 15}, {0x75D4, 15}, {0x0076, 7}, {0x00EE, 8},
+ {0x00EF, 8}, {0x0EBB, 12}, {0x01E0, 9}, {0x75D5, 15},
+ {0x0079, 7}, {0x01E1, 9}, {0x75D6, 15}, {0x75D7, 15},
+ {0x7880, 15}, {0x00F4, 8}, {0x0789, 11}, {0x003E, 6},
+ {0x007B, 7}, {0x00F5, 8}, {0x00FC, 8}, {0x007F, 7},
+ {0x01E3, 9}, {0x078A, 11}, {0x078B, 11}, {0x7881, 15},
+ {0x7882, 15}, {0x7883, 15}, {0x3C42, 14}, {0x3C43, 14},
+ {0x3C44, 14}, {0x00FD, 8}, {0x3C45, 14}, {0x3C46, 14},
+ {0x3C47, 14},
+ },{//4
+ {0x0000, 2}, {0x0004, 4}, {0x000A, 5}, {0x0016, 6},
+ {0x0017, 6}, {0x0030, 7}, {0x0031, 7}, {0x0064, 8},
+ {0x0065, 8}, {0x0066, 8}, {0x00CE, 9}, {0x00CF, 9},
+ {0x01A0, 10}, {0x01A1, 10}, {0x1A20, 14}, {0x0689, 12},
+ {0x0004, 3}, {0x000E, 5}, {0x001B, 6}, {0x0035, 7},
+ {0x000A, 4}, {0x001E, 6}, {0x0016, 5}, {0x0017, 5},
+ {0x001F, 6}, {0x0030, 6}, {0x0031, 6}, {0x0064, 7},
+ {0x0065, 7}, {0x0069, 8}, {0x0066, 7}, {0x00CE, 8},
+ {0x00CF, 8}, {0x00D0, 8}, {0x00D1, 8}, {0x00D2, 8},
+ {0x01A6, 9}, {0x01A3, 10}, {0x034E, 10}, {0x006A, 7},
+ {0x00D6, 8}, {0x01AE, 9}, {0x01AF, 9}, {0x034F, 10},
+ {0x0345, 11}, {0x01B0, 9}, {0x01B1, 9}, {0x0364, 10},
+ {0x006D, 7}, {0x00DC, 8}, {0x0D94, 12}, {0x0D95, 12},
+ {0x000E, 4}, {0x003C, 6}, {0x00DD, 8}, {0x00DE, 8},
+ {0x01B3, 9}, {0x003D, 6}, {0x00DF, 8}, {0x01F0, 9},
+ {0x03E2, 10}, {0x03E3, 10}, {0x06CB, 11}, {0x03E4, 10},
+ {0x07CA, 11}, {0x01F3, 9}, {0x01F4, 9}, {0x07CB, 11},
+ {0x07D4, 11}, {0x1A21, 14}, {0x1A22, 14}, {0x07D5, 11},
+ {0x1A23, 14}, {0x003F, 6}, {0x01F6, 9}, {0x01F7, 9},
+ {0x03EB, 10},
+ },{//5
+ {0x0000, 2}, {0x0002, 3}, {0x0006, 4}, {0x000E, 5},
+ {0x000F, 5}, {0x0020, 6}, {0x0021, 6}, {0x0044, 7},
+ {0x0045, 7}, {0x0046, 7}, {0x008E, 8}, {0x008F, 8},
+ {0x0090, 8}, {0x0122, 9}, {0x0246, 10}, {0x0124, 9},
+ {0x0005, 3}, {0x0013, 5}, {0x004A, 7}, {0x0093, 8},
+ {0x0018, 5}, {0x004B, 7}, {0x0032, 6}, {0x001A, 5},
+ {0x0033, 6}, {0x006C, 7}, {0x006D, 7}, {0x006E, 7},
+ {0x00DE, 8}, {0x00DF, 8}, {0x0070, 7}, {0x00E2, 8},
+ {0x00E3, 8}, {0x00E4, 8}, {0x00E5, 8}, {0x00E6, 8},
+ {0x00E7, 8}, {0x0125, 9}, {0x01D0, 9}, {0x048E, 11},
+ {0x091E, 12}, {0x091F, 12}, {0x7440, 15}, {0x1D11, 13},
+ {0x7441, 15}, {0x7442, 15}, {0x00E9, 8}, {0x01D4, 9},
+ {0x00EB, 8}, {0x03A3, 10}, {0x01D5, 9}, {0x1D12, 13},
+ {0x001E, 5}, {0x0076, 7}, {0x01DC, 9}, {0x01DD, 9},
+ {0x7443, 15}, {0x007C, 7}, {0x0745, 11}, {0x00EF, 8},
+ {0x00FA, 8}, {0x00FB, 8}, {0x01F8, 9}, {0x00FD, 8},
+ {0x07E4, 11}, {0x0FCA, 12}, {0x1D13, 13}, {0x7E58, 15},
+ {0x7E59, 15}, {0x7E5A, 15}, {0x7E5B, 15}, {0x7E5C, 15},
+ {0x7E5D, 15}, {0x007F, 7}, {0x3F2F, 14}, {0x07E6, 11},
+ {0x07E7, 11},
+ },{//6
+ {0x0000, 3}, {0x0002, 4}, {0x0003, 4}, {0x0008, 5},
+ {0x0009, 5}, {0x0014, 6}, {0x0015, 6}, {0x002C, 7},
+ {0x005A, 8}, {0x005B, 8}, {0x005C, 8}, {0x00BA, 9},
+ {0x00BB, 9}, {0x00BC, 9}, {0x02F4, 11}, {0x05EA, 12},
+ {0x0003, 3}, {0x0010, 5}, {0x0022, 6}, {0x0046, 7},
+ {0x0009, 4}, {0x0028, 6}, {0x0015, 5}, {0x000B, 4},
+ {0x0018, 5}, {0x0029, 6}, {0x0032, 6}, {0x0047, 7},
+ {0x0066, 7}, {0x0067, 7}, {0x0068, 7}, {0x0069, 7},
+ {0x006A, 7}, {0x005F, 8}, {0x00D6, 8}, {0x00D7, 8},
+ {0x01B0, 9}, {0x00D9, 8}, {0x017B, 10}, {0x006D, 7},
+ {0x00DC, 8}, {0x01B1, 9}, {0x06E8, 11}, {0x01BB, 9},
+ {0x0375, 10}, {0x05EB, 12}, {0x01BC, 9}, {0x6E90, 15},
+ {0x0038, 6}, {0x0072, 7}, {0x6E91, 15}, {0x6E92, 15},
+ {0x001D, 5}, {0x0073, 7}, {0x01BD, 9}, {0x06F8, 11},
+ {0x6E93, 15}, {0x003C, 6}, {0x01BF, 9}, {0x00F4, 8},
+ {0x01EA, 9}, {0x037D, 10}, {0x03D6, 10}, {0x06F9, 11},
+ {0x6E94, 15}, {0x00F6, 8}, {0x01EE, 9}, {0x6E95, 15},
+ {0x6E96, 15}, {0x6E97, 15}, {0x374C, 14}, {0x374D, 14},
+ {0x374E, 14}, {0x001F, 5}, {0x03D7, 10}, {0x01EF, 9},
+ {0x374F, 14},
+ },{//7
+ {0x0000, 2}, {0x0004, 4}, {0x000A, 5}, {0x0016, 6},
+ {0x002E, 7}, {0x002F, 7}, {0x0060, 8}, {0x0061, 8},
+ {0x00C4, 9}, {0x00C5, 9}, {0x00C6, 9}, {0x018E, 10},
+ {0x31E0, 15}, {0x31E1, 15}, {0x31E2, 15}, {0x31E3, 15},
+ {0x0004, 3}, {0x000D, 5}, {0x0019, 6}, {0x0038, 7},
+ {0x000A, 4}, {0x001D, 6}, {0x000B, 4}, {0x0072, 8},
+ {0x0073, 8}, {0x00F0, 9}, {0x01E2, 10}, {0x00F2, 9},
+ {0x01E3, 10}, {0x00F3, 9}, {0x01E8, 10}, {0x01E9, 10},
+ {0x31E4, 15}, {0x01EA, 10}, {0x031F, 11}, {0x03D6, 11},
+ {0x31E5, 15}, {0x01EC, 10}, {0x31E6, 15}, {0x00F7, 9},
+ {0x03D7, 11}, {0x31E7, 15}, {0x31E8, 15}, {0x03DA, 11},
+ {0x03DB, 11}, {0x31E9, 15}, {0x03E0, 11}, {0x31EA, 15},
+ {0x003F, 7}, {0x01F1, 10}, {0x31EB, 15}, {0x31EC, 15},
+ {0x0006, 3}, {0x001C, 5}, {0x0074, 7}, {0x0075, 7},
+ {0x00F9, 9}, {0x001E, 5}, {0x0076, 7}, {0x00FA, 9},
+ {0x03E1, 11}, {0x31ED, 15}, {0x18F7, 14}, {0x1F60, 14},
+ {0x1F61, 14}, {0x01DC, 9}, {0x01DD, 9}, {0x1F62, 14},
+ {0x1F63, 14}, {0x1F64, 14}, {0x1F65, 14}, {0x1F66, 14},
+ {0x1F67, 14}, {0x001F, 5}, {0x03ED, 11}, {0x00EF, 8},
+ {0x01F7, 10},
+ }
+};
+
+static const uint16_t x8_ac1_highquant_table[8][77][2]={
+ {//0
+ {0x0000, 3}, {0x0002, 4}, {0x0006, 5}, {0x0007, 5},
+ {0x0008, 5}, {0x0009, 5}, {0x0014, 6}, {0x002A, 7},
+ {0x0016, 6}, {0x002B, 7}, {0x005C, 8}, {0x002F, 7},
+ {0x0030, 7}, {0x005D, 8}, {0x0062, 8}, {0x00C6, 9},
+ {0x0007, 4}, {0x0019, 6}, {0x001A, 6}, {0x0036, 7},
+ {0x0010, 5}, {0x006E, 8}, {0x0022, 6}, {0x0009, 4},
+ {0x000A, 4}, {0x0016, 5}, {0x0023, 6}, {0x002E, 6},
+ {0x002F, 6}, {0x0030, 6}, {0x0062, 7}, {0x0063, 7},
+ {0x0064, 7}, {0x0065, 7}, {0x0066, 7}, {0x0067, 7},
+ {0x0068, 7}, {0x0069, 7}, {0x006A, 7}, {0x006B, 7},
+ {0x006C, 7}, {0x00C7, 9}, {0x00DE, 9}, {0x00DF, 9},
+ {0x06D0, 11}, {0x01B5, 9}, {0x0037, 6}, {0x00DB, 8},
+ {0x001C, 5}, {0x0074, 7}, {0x01D4, 9}, {0x01D5, 9},
+ {0x0076, 7}, {0x0369, 10}, {0x3688, 14}, {0x3689, 14},
+ {0x368A, 14}, {0x0077, 7}, {0x03AC, 10}, {0x0078, 7},
+ {0x00F2, 8}, {0x01D7, 9}, {0x00F3, 8}, {0x007A, 7},
+ {0x368B, 14}, {0x007B, 7}, {0x007C, 7}, {0x03AD, 10},
+ {0x03E8, 10}, {0x368C, 14}, {0x368D, 14}, {0x03E9, 10},
+ {0x368E, 14}, {0x003F, 6}, {0x01F5, 9}, {0x00FB, 8},
+ {0x368F, 14},
+ },{//1
+ {0x0000, 2}, {0x0004, 4}, {0x000A, 5}, {0x000B, 5},
+ {0x0018, 6}, {0x0032, 7}, {0x0033, 7}, {0x0034, 7},
+ {0x006A, 8}, {0x00D6, 9}, {0x00D7, 9}, {0x00D8, 9},
+ {0x00D9, 9}, {0x3680, 15}, {0x01B5, 10}, {0x0369, 11},
+ {0x0004, 3}, {0x000E, 5}, {0x001E, 6}, {0x0037, 7},
+ {0x000A, 4}, {0x0016, 5}, {0x000C, 4}, {0x001F, 6},
+ {0x005C, 7}, {0x005D, 7}, {0x00BC, 8}, {0x00BD, 8},
+ {0x005F, 7}, {0x00D0, 8}, {0x00DB, 9}, {0x00D1, 8},
+ {0x01A4, 9}, {0x01A5, 9}, {0x01A6, 9}, {0x01A7, 9},
+ {0x0350, 10}, {0x06A2, 11}, {0x06A3, 11}, {0x01A9, 9},
+ {0x01AA, 9}, {0x06AC, 11}, {0x3681, 15}, {0x0357, 10},
+ {0x3682, 15}, {0x3683, 15}, {0x3684, 15}, {0x3685, 15},
+ {0x0036, 6}, {0x00D6, 8}, {0x3686, 15}, {0x3687, 15},
+ {0x000E, 4}, {0x006E, 7}, {0x00D7, 8}, {0x06AD, 11},
+ {0x3688, 15}, {0x001E, 5}, {0x00DE, 8}, {0x06F8, 11},
+ {0x037D, 10}, {0x3689, 15}, {0x368A, 15}, {0x368B, 15},
+ {0x368C, 15}, {0x01BF, 9}, {0x368D, 15}, {0x1B47, 14},
+ {0x37C8, 14}, {0x37C9, 14}, {0x37CA, 14}, {0x37CB, 14},
+ {0x37CC, 14}, {0x001F, 5}, {0x37CD, 14}, {0x37CE, 14},
+ {0x37CF, 14},
+ },{//2
+ {0x0000, 3}, {0x0002, 4}, {0x0003, 4}, {0x0008, 5},
+ {0x0012, 6}, {0x0026, 7}, {0x0014, 6}, {0x0027, 7},
+ {0x00A8, 9}, {0x00A9, 9}, {0x0055, 8}, {0x2B00, 15},
+ {0x00AD, 9}, {0x2B01, 15}, {0x2B02, 15}, {0x2B03, 15},
+ {0x0003, 3}, {0x000B, 5}, {0x0040, 7}, {0x0041, 7},
+ {0x0009, 4}, {0x0021, 6}, {0x0011, 5}, {0x000A, 4},
+ {0x000B, 4}, {0x0018, 5}, {0x0032, 6}, {0x0033, 6},
+ {0x0034, 6}, {0x0035, 6}, {0x006C, 7}, {0x0057, 8},
+ {0x006D, 7}, {0x00DC, 8}, {0x0159, 10}, {0x00DD, 8},
+ {0x01BC, 9}, {0x037A, 10}, {0x037B, 10}, {0x0038, 6},
+ {0x0072, 7}, {0x01BE, 9}, {0x01BF, 9}, {0x00E6, 8},
+ {0x039C, 10}, {0x01CF, 9}, {0x2B04, 15}, {0x2B05, 15},
+ {0x0074, 7}, {0x01D4, 9}, {0x2B06, 15}, {0x2B07, 15},
+ {0x001E, 5}, {0x00EB, 8}, {0x1584, 14}, {0x1585, 14},
+ {0x1586, 14}, {0x003B, 6}, {0x01D5, 9}, {0x01F0, 9},
+ {0x039D, 10}, {0x03E2, 10}, {0x1587, 14}, {0x1588, 14},
+ {0x1589, 14}, {0x00F9, 8}, {0x158A, 14}, {0x158B, 14},
+ {0x03E3, 10}, {0x158C, 14}, {0x158D, 14}, {0x01F4, 9},
+ {0x158E, 14}, {0x003F, 6}, {0x00FB, 8}, {0x01F5, 9},
+ {0x158F, 14},
+ },{//3
+ {0x0000, 3}, {0x0002, 4}, {0x0006, 5}, {0x0007, 5},
+ {0x0010, 6}, {0x0011, 6}, {0x0024, 7}, {0x0025, 7},
+ {0x0013, 6}, {0x0014, 6}, {0x002A, 7}, {0x002B, 7},
+ {0x00B0, 9}, {0x00B1, 9}, {0x002D, 7}, {0x0059, 8},
+ {0x000C, 5}, {0x0017, 6}, {0x00D0, 9}, {0x0035, 7},
+ {0x001B, 6}, {0x0038, 7}, {0x0039, 7}, {0x0004, 3},
+ {0x0005, 3}, {0x000F, 5}, {0x0018, 5}, {0x001D, 6},
+ {0x0032, 6}, {0x0033, 6}, {0x0068, 7}, {0x0069, 7},
+ {0x0069, 8}, {0x00D4, 8}, {0x00D5, 8}, {0x00D6, 8},
+ {0x006C, 7}, {0x0037, 6}, {0x006D, 7}, {0x0070, 7},
+ {0x0039, 6}, {0x00D7, 8}, {0x00D1, 9}, {0x3880, 14},
+ {0x3881, 14}, {0x3882, 14}, {0x0074, 7}, {0x01C5, 9},
+ {0x0075, 7}, {0x00E3, 8}, {0x3883, 14}, {0x3884, 14},
+ {0x00EC, 8}, {0x3885, 14}, {0x1C43, 13}, {0x1C44, 13},
+ {0x1C45, 13}, {0x00ED, 8}, {0x1C46, 13}, {0x003C, 6},
+ {0x0077, 7}, {0x01E8, 9}, {0x003E, 6}, {0x007B, 7},
+ {0x1C47, 13}, {0x007E, 7}, {0x007F, 7}, {0x1C48, 13},
+ {0x1C49, 13}, {0x1C4A, 13}, {0x1C4B, 13}, {0x1C4C, 13},
+ {0x1C4D, 13}, {0x00F5, 8}, {0x1C4E, 13}, {0x01E9, 9},
+ {0x1C4F, 13},
+ },{//4
+ {0x0000, 2}, {0x0004, 4}, {0x000A, 5}, {0x000B, 5},
+ {0x0018, 6}, {0x0019, 6}, {0x0034, 7}, {0x0035, 7},
+ {0x0036, 7}, {0x006E, 8}, {0x00DE, 9}, {0x00DF, 9},
+ {0x01C0, 10}, {0x01C1, 10}, {0x01C2, 10}, {0x3860, 15},
+ {0x0004, 3}, {0x000F, 5}, {0x001D, 6}, {0x0039, 7},
+ {0x000A, 4}, {0x002C, 6}, {0x002D, 6}, {0x000C, 4},
+ {0x0017, 5}, {0x0034, 6}, {0x0035, 6}, {0x0036, 6},
+ {0x006E, 7}, {0x006F, 7}, {0x0070, 7}, {0x0071, 7},
+ {0x0071, 8}, {0x00E4, 8}, {0x00E5, 8}, {0x00E6, 8},
+ {0x00E7, 8}, {0x00E8, 8}, {0x03A4, 10}, {0x0075, 7},
+ {0x00EC, 8}, {0x01D3, 9}, {0x01DA, 9}, {0x03A5, 10},
+ {0x03B6, 10}, {0x070D, 12}, {0x03B7, 10}, {0x070E, 12},
+ {0x003C, 6}, {0x00EE, 8}, {0x3861, 15}, {0x3862, 15},
+ {0x003D, 6}, {0x01DE, 9}, {0x3863, 15}, {0x3864, 15},
+ {0x3865, 15}, {0x007C, 7}, {0x070F, 12}, {0x03BE, 10},
+ {0x03BF, 10}, {0x3866, 15}, {0x0FA0, 12}, {0x07D1, 11},
+ {0x3867, 15}, {0x00FB, 8}, {0x01F5, 9}, {0x7D08, 15},
+ {0x0FA4, 12}, {0x7D09, 15}, {0x7D0A, 15}, {0x7D0B, 15},
+ {0x3E86, 14}, {0x003F, 6}, {0x0FA5, 12}, {0x07D3, 11},
+ {0x3E87, 14},
+ },{//5
+ {0x0000, 3}, {0x0002, 4}, {0x0003, 4}, {0x0008, 5},
+ {0x0009, 5}, {0x0014, 6}, {0x002A, 7}, {0x0056, 8},
+ {0x02B8, 11}, {0x00AF, 9}, {0x02B9, 11}, {0x015D, 10},
+ {0x02C0, 11}, {0x2C10, 15}, {0x2C11, 15}, {0x2C12, 15},
+ {0x0006, 4}, {0x000E, 5}, {0x0017, 6}, {0x002D, 7},
+ {0x000F, 5}, {0x0040, 7}, {0x0021, 6}, {0x0005, 3},
+ {0x0009, 4}, {0x0011, 5}, {0x0018, 5}, {0x0019, 5},
+ {0x001A, 5}, {0x0036, 6}, {0x0037, 6}, {0x0041, 7},
+ {0x0059, 8}, {0x00E0, 8}, {0x00E1, 8}, {0x0071, 7},
+ {0x00E4, 8}, {0x00B1, 9}, {0x02C2, 11}, {0x001D, 5},
+ {0x0073, 7}, {0x00E5, 8}, {0x00F0, 8}, {0x0079, 7},
+ {0x03C4, 10}, {0x01E3, 9}, {0x01E8, 9}, {0x2C13, 15},
+ {0x007B, 7}, {0x2C14, 15}, {0x2C15, 15}, {0x2C16, 15},
+ {0x007C, 7}, {0x02C3, 11}, {0x2C17, 15}, {0x160C, 14},
+ {0x160D, 14}, {0x007D, 7}, {0x160E, 14}, {0x01E9, 9},
+ {0x03C5, 10}, {0x03D4, 10}, {0x01EB, 9}, {0x160F, 14},
+ {0x3D50, 14}, {0x00FC, 8}, {0x07AB, 11}, {0x3D51, 14},
+ {0x3D52, 14}, {0x3D53, 14}, {0x3D54, 14}, {0x01FA, 9},
+ {0x3D55, 14}, {0x007F, 7}, {0x01FB, 9}, {0x3D56, 14},
+ {0x3D57, 14},
+ },{//6
+ {0x0000, 3}, {0x0002, 4}, {0x0003, 4}, {0x0008, 5},
+ {0x0009, 5}, {0x000A, 5}, {0x000B, 5}, {0x0018, 6},
+ {0x0032, 7}, {0x000D, 5}, {0x0033, 7}, {0x0E00, 13},
+ {0x0039, 7}, {0x0E01, 13}, {0x003A, 7}, {0x0E02, 13},
+ {0x0008, 4}, {0x001E, 6}, {0x003B, 7}, {0x003E, 7},
+ {0x0012, 5}, {0x003F, 7}, {0x0013, 5}, {0x0028, 6},
+ {0x0029, 6}, {0x0054, 7}, {0x002B, 6}, {0x0055, 7},
+ {0x0058, 7}, {0x0E03, 13}, {0x0059, 7}, {0x005A, 7},
+ {0x0E04, 13}, {0x0E05, 13}, {0x0703, 12}, {0x005B, 7},
+ {0x005C, 7}, {0x0704, 12}, {0x0705, 12}, {0x005D, 7},
+ {0x0706, 12}, {0x0707, 12}, {0x0708, 12}, {0x0709, 12},
+ {0x070A, 12}, {0x070B, 12}, {0x0018, 5}, {0x002F, 6},
+ {0x000D, 4}, {0x0019, 5}, {0x070C, 12}, {0x0070, 7},
+ {0x001D, 5}, {0x070D, 12}, {0x070E, 12}, {0x070F, 12},
+ {0x0710, 12}, {0x0039, 6}, {0x0711, 12}, {0x003C, 6},
+ {0x0712, 12}, {0x0713, 12}, {0x0714, 12}, {0x0715, 12},
+ {0x0716, 12}, {0x003D, 6}, {0x0717, 12}, {0x0718, 12},
+ {0x0719, 12}, {0x071A, 12}, {0x071B, 12}, {0x071C, 12},
+ {0x071D, 12}, {0x001F, 5}, {0x071E, 12}, {0x0071, 7},
+ {0x071F, 12},
+ },{//7
+ {0x0000, 3}, {0x0002, 4}, {0x0006, 5}, {0x000E, 6},
+ {0x000F, 6}, {0x0040, 8}, {0x0041, 8}, {0x0042, 8},
+ {0x0218, 11}, {0x2190, 15}, {0x2191, 15}, {0x2192, 15},
+ {0x2193, 15}, {0x2194, 15}, {0x2195, 15}, {0x2196, 15},
+ {0x0005, 4}, {0x0011, 6}, {0x0024, 7}, {0x0087, 9},
+ {0x000C, 5}, {0x004A, 8}, {0x004B, 8}, {0x0002, 2},
+ {0x0006, 3}, {0x000D, 5}, {0x000E, 5}, {0x000F, 5},
+ {0x0013, 6}, {0x0038, 6}, {0x00E4, 8}, {0x00E5, 8},
+ {0x01CC, 9}, {0x00E7, 8}, {0x0074, 7}, {0x00EA, 8},
+ {0x01CD, 9}, {0x021A, 11}, {0x2197, 15}, {0x001E, 5},
+ {0x0076, 7}, {0x00EB, 8}, {0x01DC, 9}, {0x00EF, 8},
+ {0x01DD, 9}, {0x01F0, 9}, {0x2198, 15}, {0x2199, 15},
+ {0x00F9, 8}, {0x03E2, 10}, {0x219A, 15}, {0x219B, 15},
+ {0x00FA, 8}, {0x219C, 15}, {0x219D, 15}, {0x219E, 15},
+ {0x219F, 15}, {0x01F6, 9}, {0x21B0, 15}, {0x00FC, 8},
+ {0x01F7, 9}, {0x21B1, 15}, {0x21B2, 15}, {0x21B3, 15},
+ {0x21B4, 15}, {0x01FA, 9}, {0x21B5, 15}, {0x21B6, 15},
+ {0x21B7, 15}, {0x21B8, 15}, {0x21B9, 15}, {0x03E3, 10},
+ {0x10DD, 14}, {0x007F, 7}, {0x01FB, 9}, {0x10DE, 14},
+ {0x10DF, 14},
+ }
+};
+#define MAX_AC_VLC_BITS 16
+
+#endif /* AVCODEC_INTRAX8HUF_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ituh263dec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ituh263dec.c
new file mode 100644
index 000000000..fc997f080
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ituh263dec.c
@@ -0,0 +1,1131 @@
+/*
+ * ITU H263 bitstream decoder
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ * H263+ support.
+ * Copyright (c) 2001 Juan J. Sierralta P
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/ituh263dec.c
+ * h263 decoder.
+ */
+
+//#define DEBUG
+#include <limits.h>
+
+#include "dsputil.h"
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "h263.h"
+#include "mathops.h"
+#include "unary.h"
+#include "flv.h"
+#include "mpeg4video.h"
+
+//#undef NDEBUG
+//#include <assert.h>
+
+// The defines below define the number of bits that are read at once for
+// reading vlc values. Changing these may improve speed and data cache needs
+// be aware though that decreasing them may need the number of stages that is
+// passed to get_vlc* to be increased.
+#define MV_VLC_BITS 9
+#define H263_MBTYPE_B_VLC_BITS 6
+#define CBPC_B_VLC_BITS 3
+
+static const int h263_mb_type_b_map[15]= {
+ MB_TYPE_DIRECT2 | MB_TYPE_L0L1,
+ MB_TYPE_DIRECT2 | MB_TYPE_L0L1 | MB_TYPE_CBP,
+ MB_TYPE_DIRECT2 | MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_QUANT,
+ MB_TYPE_L0 | MB_TYPE_16x16,
+ MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_16x16,
+ MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16,
+ MB_TYPE_L1 | MB_TYPE_16x16,
+ MB_TYPE_L1 | MB_TYPE_CBP | MB_TYPE_16x16,
+ MB_TYPE_L1 | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16,
+ MB_TYPE_L0L1 | MB_TYPE_16x16,
+ MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_16x16,
+ MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16,
+ 0, //stuffing
+ MB_TYPE_INTRA4x4 | MB_TYPE_CBP,
+ MB_TYPE_INTRA4x4 | MB_TYPE_CBP | MB_TYPE_QUANT,
+};
+
+void ff_h263_show_pict_info(MpegEncContext *s){
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO){
+ av_log(s->avctx, AV_LOG_DEBUG, "qp:%d %c size:%d rnd:%d%s%s%s%s%s%s%s%s%s %d/%d\n",
+ s->qscale, av_get_pict_type_char(s->pict_type),
+ s->gb.size_in_bits, 1-s->no_rounding,
+ s->obmc ? " AP" : "",
+ s->umvplus ? " UMV" : "",
+ s->h263_long_vectors ? " LONG" : "",
+ s->h263_plus ? " +" : "",
+ s->h263_aic ? " AIC" : "",
+ s->alt_inter_vlc ? " AIV" : "",
+ s->modified_quant ? " MQ" : "",
+ s->loop_filter ? " LOOP" : "",
+ s->h263_slice_structured ? " SS" : "",
+ s->avctx->time_base.den, s->avctx->time_base.num
+ );
+ }
+}
+
+/***********************************************/
+/* decoding */
+
+VLC ff_h263_intra_MCBPC_vlc;
+VLC ff_h263_inter_MCBPC_vlc;
+VLC ff_h263_cbpy_vlc;
+static VLC mv_vlc;
+static VLC h263_mbtype_b_vlc;
+static VLC cbpc_b_vlc;
+
+/* init vlcs */
+
+/* XXX: find a better solution to handle static init */
+void h263_decode_init_vlc(MpegEncContext *s)
+{
+ static int done = 0;
+
+ if (!done) {
+ done = 1;
+
+ INIT_VLC_STATIC(&ff_h263_intra_MCBPC_vlc, INTRA_MCBPC_VLC_BITS, 9,
+ ff_h263_intra_MCBPC_bits, 1, 1,
+ ff_h263_intra_MCBPC_code, 1, 1, 72);
+ INIT_VLC_STATIC(&ff_h263_inter_MCBPC_vlc, INTER_MCBPC_VLC_BITS, 28,
+ ff_h263_inter_MCBPC_bits, 1, 1,
+ ff_h263_inter_MCBPC_code, 1, 1, 198);
+ INIT_VLC_STATIC(&ff_h263_cbpy_vlc, CBPY_VLC_BITS, 16,
+ &ff_h263_cbpy_tab[0][1], 2, 1,
+ &ff_h263_cbpy_tab[0][0], 2, 1, 64);
+ INIT_VLC_STATIC(&mv_vlc, MV_VLC_BITS, 33,
+ &mvtab[0][1], 2, 1,
+ &mvtab[0][0], 2, 1, 538);
+ init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
+ init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
+ INIT_VLC_RL(ff_h263_rl_inter, 554);
+ INIT_VLC_RL(rl_intra_aic, 554);
+ INIT_VLC_STATIC(&h263_mbtype_b_vlc, H263_MBTYPE_B_VLC_BITS, 15,
+ &h263_mbtype_b_tab[0][1], 2, 1,
+ &h263_mbtype_b_tab[0][0], 2, 1, 80);
+ INIT_VLC_STATIC(&cbpc_b_vlc, CBPC_B_VLC_BITS, 4,
+ &cbpc_b_tab[0][1], 2, 1,
+ &cbpc_b_tab[0][0], 2, 1, 8);
+ }
+}
+
+int ff_h263_decode_mba(MpegEncContext *s)
+{
+ int i, mb_pos;
+
+ for(i=0; i<6; i++){
+ if(s->mb_num-1 <= ff_mba_max[i]) break;
+ }
+ mb_pos= get_bits(&s->gb, ff_mba_length[i]);
+ s->mb_x= mb_pos % s->mb_width;
+ s->mb_y= mb_pos / s->mb_width;
+
+ return mb_pos;
+}
+
+/**
+ * decodes the group of blocks header or slice header.
+ * @return <0 if an error occurred
+ */
+static int h263_decode_gob_header(MpegEncContext *s)
+{
+ unsigned int val, gfid, gob_number;
+ int left;
+
+ /* Check for GOB Start Code */
+ val = show_bits(&s->gb, 16);
+ if(val)
+ return -1;
+
+ /* We have a GBSC probably with GSTUFF */
+ skip_bits(&s->gb, 16); /* Drop the zeros */
+ left= get_bits_left(&s->gb);
+ //MN: we must check the bits left or we might end in a infinite loop (or segfault)
+ for(;left>13; left--){
+ if(get_bits1(&s->gb)) break; /* Seek the '1' bit */
+ }
+ if(left<=13)
+ return -1;
+
+ if(s->h263_slice_structured){
+ if(get_bits1(&s->gb)==0)
+ return -1;
+
+ ff_h263_decode_mba(s);
+
+ if(s->mb_num > 1583)
+ if(get_bits1(&s->gb)==0)
+ return -1;
+
+ s->qscale = get_bits(&s->gb, 5); /* SQUANT */
+ if(get_bits1(&s->gb)==0)
+ return -1;
+ gfid = get_bits(&s->gb, 2); /* GFID */
+ }else{
+ gob_number = get_bits(&s->gb, 5); /* GN */
+ s->mb_x= 0;
+ s->mb_y= s->gob_index* gob_number;
+ gfid = get_bits(&s->gb, 2); /* GFID */
+ s->qscale = get_bits(&s->gb, 5); /* GQUANT */
+ }
+
+ if(s->mb_y >= s->mb_height)
+ return -1;
+
+ if(s->qscale==0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * finds the next resync_marker
+ * @param p pointer to buffer to scan
+ * @param end pointer to the end of the buffer
+ * @return pointer to the next resync_marker, or end if none was found
+ */
+const uint8_t *ff_h263_find_resync_marker(const uint8_t *restrict p, const uint8_t * restrict end)
+{
+ assert(p < end);
+
+ end-=2;
+ p++;
+ for(;p<end; p+=2){
+ if(!*p){
+ if (!p[-1] && p[1]) return p - 1;
+ else if(!p[ 1] && p[2]) return p;
+ }
+ }
+ return end+2;
+}
+
+/**
+ * decodes the group of blocks / video packet header.
+ * @return bit position of the resync_marker, or <0 if none was found
+ */
+int ff_h263_resync(MpegEncContext *s){
+ int left, pos, ret;
+
+ if(s->codec_id==CODEC_ID_MPEG4){
+ skip_bits1(&s->gb);
+ align_get_bits(&s->gb);
+ }
+
+ if(show_bits(&s->gb, 16)==0){
+ pos= get_bits_count(&s->gb);
+ if(CONFIG_MPEG4_DECODER && s->codec_id==CODEC_ID_MPEG4)
+ ret= mpeg4_decode_video_packet_header(s);
+ else
+ ret= h263_decode_gob_header(s);
+ if(ret>=0)
+ return pos;
+ }
+ //OK, it's not where it is supposed to be ...
+ s->gb= s->last_resync_gb;
+ align_get_bits(&s->gb);
+ left= get_bits_left(&s->gb);
+
+ for(;left>16+1+5+5; left-=8){
+ if(show_bits(&s->gb, 16)==0){
+ GetBitContext bak= s->gb;
+
+ pos= get_bits_count(&s->gb);
+ if(CONFIG_MPEG4_DECODER && s->codec_id==CODEC_ID_MPEG4)
+ ret= mpeg4_decode_video_packet_header(s);
+ else
+ ret= h263_decode_gob_header(s);
+ if(ret>=0)
+ return pos;
+
+ s->gb= bak;
+ }
+ skip_bits(&s->gb, 8);
+ }
+
+ return -1;
+}
+
+int h263_decode_motion(MpegEncContext * s, int pred, int f_code)
+{
+ int code, val, sign, shift, l;
+ code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
+
+ if (code == 0)
+ return pred;
+ if (code < 0)
+ return 0xffff;
+
+ sign = get_bits1(&s->gb);
+ shift = f_code - 1;
+ val = code;
+ if (shift) {
+ val = (val - 1) << shift;
+ val |= get_bits(&s->gb, shift);
+ val++;
+ }
+ if (sign)
+ val = -val;
+ val += pred;
+
+ /* modulo decoding */
+ if (!s->h263_long_vectors) {
+ l = INT_BIT - 5 - f_code;
+ val = (val<<l)>>l;
+ } else {
+ /* horrible h263 long vector mode */
+ if (pred < -31 && val < -63)
+ val += 64;
+ if (pred > 32 && val > 63)
+ val -= 64;
+
+ }
+ return val;
+}
+
+
+/* Decodes RVLC of H.263+ UMV */
+static int h263p_decode_umotion(MpegEncContext * s, int pred)
+{
+ int code = 0, sign;
+
+ if (get_bits1(&s->gb)) /* Motion difference = 0 */
+ return pred;
+
+ code = 2 + get_bits1(&s->gb);
+
+ while (get_bits1(&s->gb))
+ {
+ code <<= 1;
+ code += get_bits1(&s->gb);
+ }
+ sign = code & 1;
+ code >>= 1;
+
+ code = (sign) ? (pred - code) : (pred + code);
+ dprintf(s->avctx,"H.263+ UMV Motion = %d\n", code);
+ return code;
+
+}
+
+/**
+ * read the next MVs for OBMC. yes this is a ugly hack, feel free to send a patch :)
+ */
+static void preview_obmc(MpegEncContext *s){
+ GetBitContext gb= s->gb;
+
+ int cbpc, i, pred_x, pred_y, mx, my;
+ int16_t *mot_val;
+ const int xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
+ const int stride= s->b8_stride*2;
+
+ for(i=0; i<4; i++)
+ s->block_index[i]+= 2;
+ for(i=4; i<6; i++)
+ s->block_index[i]+= 1;
+ s->mb_x++;
+
+ assert(s->pict_type == FF_P_TYPE);
+
+ do{
+ if (get_bits1(&s->gb)) {
+ /* skip mb */
+ mot_val = s->current_picture.motion_val[0][ s->block_index[0] ];
+ mot_val[0 ]= mot_val[2 ]=
+ mot_val[0+stride]= mot_val[2+stride]= 0;
+ mot_val[1 ]= mot_val[3 ]=
+ mot_val[1+stride]= mot_val[3+stride]= 0;
+
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ goto end;
+ }
+ cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
+ }while(cbpc == 20);
+
+ if(cbpc & 4){
+ s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
+ }else{
+ get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
+ if (cbpc & 8) {
+ if(s->modified_quant){
+ if(get_bits1(&s->gb)) skip_bits(&s->gb, 1);
+ else skip_bits(&s->gb, 5);
+ }else
+ skip_bits(&s->gb, 2);
+ }
+
+ if ((cbpc & 16) == 0) {
+ s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ /* 16x16 motion prediction */
+ mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
+ if (s->umvplus)
+ mx = h263p_decode_umotion(s, pred_x);
+ else
+ mx = h263_decode_motion(s, pred_x, 1);
+
+ if (s->umvplus)
+ my = h263p_decode_umotion(s, pred_y);
+ else
+ my = h263_decode_motion(s, pred_y, 1);
+
+ mot_val[0 ]= mot_val[2 ]=
+ mot_val[0+stride]= mot_val[2+stride]= mx;
+ mot_val[1 ]= mot_val[3 ]=
+ mot_val[1+stride]= mot_val[3+stride]= my;
+ } else {
+ s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
+ for(i=0;i<4;i++) {
+ mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
+ if (s->umvplus)
+ mx = h263p_decode_umotion(s, pred_x);
+ else
+ mx = h263_decode_motion(s, pred_x, 1);
+
+ if (s->umvplus)
+ my = h263p_decode_umotion(s, pred_y);
+ else
+ my = h263_decode_motion(s, pred_y, 1);
+ if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
+ skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
+ mot_val[0] = mx;
+ mot_val[1] = my;
+ }
+ }
+ }
+end:
+
+ for(i=0; i<4; i++)
+ s->block_index[i]-= 2;
+ for(i=4; i<6; i++)
+ s->block_index[i]-= 1;
+ s->mb_x--;
+
+ s->gb= gb;
+}
+
+static void h263_decode_dquant(MpegEncContext *s){
+ static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
+
+ if(s->modified_quant){
+ if(get_bits1(&s->gb))
+ s->qscale= modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
+ else
+ s->qscale= get_bits(&s->gb, 5);
+ }else
+ s->qscale += quant_tab[get_bits(&s->gb, 2)];
+ ff_set_qscale(s, s->qscale);
+}
+
+static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
+ int n, int coded)
+{
+ int code, level, i, j, last, run;
+ RLTable *rl = &ff_h263_rl_inter;
+ const uint8_t *scan_table;
+ GetBitContext gb= s->gb;
+
+ scan_table = s->intra_scantable.permutated;
+ if (s->h263_aic && s->mb_intra) {
+ rl = &rl_intra_aic;
+ i = 0;
+ if (s->ac_pred) {
+ if (s->h263_aic_dir)
+ scan_table = s->intra_v_scantable.permutated; /* left */
+ else
+ scan_table = s->intra_h_scantable.permutated; /* top */
+ }
+ } else if (s->mb_intra) {
+ /* DC coef */
+ if(s->codec_id == CODEC_ID_RV10){
+#if CONFIG_RV10_DECODER
+ if (s->rv10_version == 3 && s->pict_type == FF_I_TYPE) {
+ int component, diff;
+ component = (n <= 3 ? 0 : n - 4 + 1);
+ level = s->last_dc[component];
+ if (s->rv10_first_dc_coded[component]) {
+ diff = rv_decode_dc(s, n);
+ if (diff == 0xffff)
+ return -1;
+ level += diff;
+ level = level & 0xff; /* handle wrap round */
+ s->last_dc[component] = level;
+ } else {
+ s->rv10_first_dc_coded[component] = 1;
+ }
+ } else {
+ level = get_bits(&s->gb, 8);
+ if (level == 255)
+ level = 128;
+ }
+#endif
+ }else{
+ level = get_bits(&s->gb, 8);
+ if((level&0x7F) == 0){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal dc %d at %d %d\n", level, s->mb_x, s->mb_y);
+ if(s->error_recognition >= FF_ER_COMPLIANT)
+ return -1;
+ }
+ if (level == 255)
+ level = 128;
+ }
+ block[0] = level;
+ i = 1;
+ } else {
+ i = 0;
+ }
+ if (!coded) {
+ if (s->mb_intra && s->h263_aic)
+ goto not_coded;
+ s->block_last_index[n] = i - 1;
+ return 0;
+ }
+retry:
+ for(;;) {
+ code = get_vlc2(&s->gb, rl->vlc.table, TEX_VLC_BITS, 2);
+ if (code < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal ac vlc code at %dx%d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ if (code == rl->n) {
+ /* escape */
+ if (CONFIG_FLV_DECODER && s->h263_flv > 1) {
+ ff_flv2_decode_ac_esc(&s->gb, &level, &run, &last);
+ } else {
+ last = get_bits1(&s->gb);
+ run = get_bits(&s->gb, 6);
+ level = (int8_t)get_bits(&s->gb, 8);
+ if(level == -128){
+ if (s->codec_id == CODEC_ID_RV10) {
+ /* XXX: should patch encoder too */
+ level = get_sbits(&s->gb, 12);
+ }else{
+ level = get_bits(&s->gb, 5);
+ level |= get_sbits(&s->gb, 6)<<5;
+ }
+ }
+ }
+ } else {
+ run = rl->table_run[code];
+ level = rl->table_level[code];
+ last = code >= rl->last;
+ if (get_bits1(&s->gb))
+ level = -level;
+ }
+ i += run;
+ if (i >= 64){
+ if(s->alt_inter_vlc && rl == &ff_h263_rl_inter && !s->mb_intra){
+ //Looks like a hack but no, it's the way it is supposed to work ...
+ rl = &rl_intra_aic;
+ i = 0;
+ s->gb= gb;
+ s->dsp.clear_block(block);
+ goto retry;
+ }
+ av_log(s->avctx, AV_LOG_ERROR, "run overflow at %dx%d i:%d\n", s->mb_x, s->mb_y, s->mb_intra);
+ return -1;
+ }
+ j = scan_table[i];
+ block[j] = level;
+ if (last)
+ break;
+ i++;
+ }
+not_coded:
+ if (s->mb_intra && s->h263_aic) {
+ h263_pred_acdc(s, block, n);
+ i = 63;
+ }
+ s->block_last_index[n] = i;
+ return 0;
+}
+
+static int h263_skip_b_part(MpegEncContext *s, int cbp)
+{
+ DECLARE_ALIGNED(16, DCTELEM, dblock)[64];
+ int i, mbi;
+
+ /* we have to set s->mb_intra to zero to decode B-part of PB-frame correctly
+ * but real value should be restored in order to be used later (in OBMC condition)
+ */
+ mbi = s->mb_intra;
+ s->mb_intra = 0;
+ for (i = 0; i < 6; i++) {
+ if (h263_decode_block(s, dblock, i, cbp&32) < 0)
+ return -1;
+ cbp+=cbp;
+ }
+ s->mb_intra = mbi;
+ return 0;
+}
+
+static int h263_get_modb(GetBitContext *gb, int pb_frame, int *cbpb)
+{
+ int c, mv = 1;
+
+ if (pb_frame < 3) { // h.263 Annex G and i263 PB-frame
+ c = get_bits1(gb);
+ if (pb_frame == 2 && c)
+ mv = !get_bits1(gb);
+ } else { // h.263 Annex M improved PB-frame
+ mv = get_unary(gb, 0, 4) + 1;
+ c = mv & 1;
+ mv = !!(mv & 2);
+ }
+ if(c)
+ *cbpb = get_bits(gb, 6);
+ return mv;
+}
+
+int ff_h263_decode_mb(MpegEncContext *s,
+ DCTELEM block[6][64])
+{
+ int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant;
+ int16_t *mot_val;
+ const int xy= s->mb_x + s->mb_y * s->mb_stride;
+ int cbpb = 0, pb_mv_count = 0;
+
+ assert(!s->h263_pred);
+
+ if (s->pict_type == FF_P_TYPE) {
+ do{
+ if (get_bits1(&s->gb)) {
+ /* skip mb */
+ s->mb_intra = 0;
+ for(i=0;i<6;i++)
+ s->block_last_index[i] = -1;
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->mv[0][0][0] = 0;
+ s->mv[0][0][1] = 0;
+ s->mb_skipped = !(s->obmc | s->loop_filter);
+ goto end;
+ }
+ cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
+ if (cbpc < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "cbpc damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ }while(cbpc == 20);
+
+ s->dsp.clear_blocks(s->block[0]);
+
+ dquant = cbpc & 8;
+ s->mb_intra = ((cbpc & 4) != 0);
+ if (s->mb_intra) goto intra;
+
+ if(s->pb_frame && get_bits1(&s->gb))
+ pb_mv_count = h263_get_modb(&s->gb, s->pb_frame, &cbpb);
+ cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
+
+ if(s->alt_inter_vlc==0 || (cbpc & 3)!=3)
+ cbpy ^= 0xF;
+
+ cbp = (cbpc & 3) | (cbpy << 2);
+ if (dquant) {
+ h263_decode_dquant(s);
+ }
+
+ s->mv_dir = MV_DIR_FORWARD;
+ if ((cbpc & 16) == 0) {
+ s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ /* 16x16 motion prediction */
+ s->mv_type = MV_TYPE_16X16;
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
+ if (s->umvplus)
+ mx = h263p_decode_umotion(s, pred_x);
+ else
+ mx = h263_decode_motion(s, pred_x, 1);
+
+ if (mx >= 0xffff)
+ return -1;
+
+ if (s->umvplus)
+ my = h263p_decode_umotion(s, pred_y);
+ else
+ my = h263_decode_motion(s, pred_y, 1);
+
+ if (my >= 0xffff)
+ return -1;
+ s->mv[0][0][0] = mx;
+ s->mv[0][0][1] = my;
+
+ if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
+ skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
+ } else {
+ s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
+ s->mv_type = MV_TYPE_8X8;
+ for(i=0;i<4;i++) {
+ mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
+ if (s->umvplus)
+ mx = h263p_decode_umotion(s, pred_x);
+ else
+ mx = h263_decode_motion(s, pred_x, 1);
+ if (mx >= 0xffff)
+ return -1;
+
+ if (s->umvplus)
+ my = h263p_decode_umotion(s, pred_y);
+ else
+ my = h263_decode_motion(s, pred_y, 1);
+ if (my >= 0xffff)
+ return -1;
+ s->mv[0][i][0] = mx;
+ s->mv[0][i][1] = my;
+ if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
+ skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
+ mot_val[0] = mx;
+ mot_val[1] = my;
+ }
+ }
+ } else if(s->pict_type==FF_B_TYPE) {
+ int mb_type;
+ const int stride= s->b8_stride;
+ int16_t *mot_val0 = s->current_picture.motion_val[0][ 2*(s->mb_x + s->mb_y*stride) ];
+ int16_t *mot_val1 = s->current_picture.motion_val[1][ 2*(s->mb_x + s->mb_y*stride) ];
+// const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
+
+ //FIXME ugly
+ mot_val0[0 ]= mot_val0[2 ]= mot_val0[0+2*stride]= mot_val0[2+2*stride]=
+ mot_val0[1 ]= mot_val0[3 ]= mot_val0[1+2*stride]= mot_val0[3+2*stride]=
+ mot_val1[0 ]= mot_val1[2 ]= mot_val1[0+2*stride]= mot_val1[2+2*stride]=
+ mot_val1[1 ]= mot_val1[3 ]= mot_val1[1+2*stride]= mot_val1[3+2*stride]= 0;
+
+ do{
+ mb_type= get_vlc2(&s->gb, h263_mbtype_b_vlc.table, H263_MBTYPE_B_VLC_BITS, 2);
+ if (mb_type < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "b mb_type damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ mb_type= h263_mb_type_b_map[ mb_type ];
+ }while(!mb_type);
+
+ s->mb_intra = IS_INTRA(mb_type);
+ if(HAS_CBP(mb_type)){
+ s->dsp.clear_blocks(s->block[0]);
+ cbpc = get_vlc2(&s->gb, cbpc_b_vlc.table, CBPC_B_VLC_BITS, 1);
+ if(s->mb_intra){
+ dquant = IS_QUANT(mb_type);
+ goto intra;
+ }
+
+ cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
+
+ if (cbpy < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "b cbpy damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ if(s->alt_inter_vlc==0 || (cbpc & 3)!=3)
+ cbpy ^= 0xF;
+
+ cbp = (cbpc & 3) | (cbpy << 2);
+ }else
+ cbp=0;
+
+ assert(!s->mb_intra);
+
+ if(IS_QUANT(mb_type)){
+ h263_decode_dquant(s);
+ }
+
+ if(IS_DIRECT(mb_type)){
+ s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
+ mb_type |= ff_mpeg4_set_direct_mv(s, 0, 0);
+ }else{
+ s->mv_dir = 0;
+ s->mv_type= MV_TYPE_16X16;
+//FIXME UMV
+
+ if(USES_LIST(mb_type, 0)){
+ int16_t *mot_val= h263_pred_motion(s, 0, 0, &mx, &my);
+ s->mv_dir = MV_DIR_FORWARD;
+
+ mx = h263_decode_motion(s, mx, 1);
+ my = h263_decode_motion(s, my, 1);
+
+ s->mv[0][0][0] = mx;
+ s->mv[0][0][1] = my;
+ mot_val[0 ]= mot_val[2 ]= mot_val[0+2*stride]= mot_val[2+2*stride]= mx;
+ mot_val[1 ]= mot_val[3 ]= mot_val[1+2*stride]= mot_val[3+2*stride]= my;
+ }
+
+ if(USES_LIST(mb_type, 1)){
+ int16_t *mot_val= h263_pred_motion(s, 0, 1, &mx, &my);
+ s->mv_dir |= MV_DIR_BACKWARD;
+
+ mx = h263_decode_motion(s, mx, 1);
+ my = h263_decode_motion(s, my, 1);
+
+ s->mv[1][0][0] = mx;
+ s->mv[1][0][1] = my;
+ mot_val[0 ]= mot_val[2 ]= mot_val[0+2*stride]= mot_val[2+2*stride]= mx;
+ mot_val[1 ]= mot_val[3 ]= mot_val[1+2*stride]= mot_val[3+2*stride]= my;
+ }
+ }
+
+ s->current_picture.mb_type[xy]= mb_type;
+ } else { /* I-Frame */
+ do{
+ cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
+ if (cbpc < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "I cbpc damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ }while(cbpc == 8);
+
+ s->dsp.clear_blocks(s->block[0]);
+
+ dquant = cbpc & 4;
+ s->mb_intra = 1;
+intra:
+ s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
+ if (s->h263_aic) {
+ s->ac_pred = get_bits1(&s->gb);
+ if(s->ac_pred){
+ s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED;
+
+ s->h263_aic_dir = get_bits1(&s->gb);
+ }
+ }else
+ s->ac_pred = 0;
+
+ if(s->pb_frame && get_bits1(&s->gb))
+ pb_mv_count = h263_get_modb(&s->gb, s->pb_frame, &cbpb);
+ cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
+ if(cbpy<0){
+ av_log(s->avctx, AV_LOG_ERROR, "I cbpy damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ cbp = (cbpc & 3) | (cbpy << 2);
+ if (dquant) {
+ h263_decode_dquant(s);
+ }
+
+ pb_mv_count += !!s->pb_frame;
+ }
+
+ while(pb_mv_count--){
+ h263_decode_motion(s, 0, 1);
+ h263_decode_motion(s, 0, 1);
+ }
+
+ /* decode each block */
+ for (i = 0; i < 6; i++) {
+ if (h263_decode_block(s, block[i], i, cbp&32) < 0)
+ return -1;
+ cbp+=cbp;
+ }
+
+ if(s->pb_frame && h263_skip_b_part(s, cbpb) < 0)
+ return -1;
+ if(s->obmc && !s->mb_intra){
+ if(s->pict_type == FF_P_TYPE && s->mb_x+1<s->mb_width && s->mb_num_left != 1)
+ preview_obmc(s);
+ }
+end:
+
+ /* per-MB end of slice check */
+ {
+ int v= show_bits(&s->gb, 16);
+
+ if(get_bits_count(&s->gb) + 16 > s->gb.size_in_bits){
+ v>>= get_bits_count(&s->gb) + 16 - s->gb.size_in_bits;
+ }
+
+ if(v==0)
+ return SLICE_END;
+ }
+
+ return SLICE_OK;
+}
+
+/* most is hardcoded. should extend to handle all h263 streams */
+int h263_decode_picture_header(MpegEncContext *s)
+{
+ int format, width, height, i;
+ uint32_t startcode;
+
+ align_get_bits(&s->gb);
+
+ startcode= get_bits(&s->gb, 22-8);
+
+ for(i= get_bits_left(&s->gb); i>24; i-=8) {
+ startcode = ((startcode << 8) | get_bits(&s->gb, 8)) & 0x003FFFFF;
+
+ if(startcode == 0x20)
+ break;
+ }
+
+ if (startcode != 0x20) {
+ av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
+ return -1;
+ }
+ /* temporal reference */
+ i = get_bits(&s->gb, 8); /* picture timestamp */
+ if( (s->picture_number&~0xFF)+i < s->picture_number)
+ i+= 256;
+ s->current_picture_ptr->pts=
+ s->picture_number= (s->picture_number&~0xFF) + i;
+
+ /* PTYPE starts here */
+ if (get_bits1(&s->gb) != 1) {
+ /* marker */
+ av_log(s->avctx, AV_LOG_ERROR, "Bad marker\n");
+ return -1;
+ }
+ if (get_bits1(&s->gb) != 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "Bad H263 id\n");
+ return -1; /* h263 id */
+ }
+ skip_bits1(&s->gb); /* split screen off */
+ skip_bits1(&s->gb); /* camera off */
+ skip_bits1(&s->gb); /* freeze picture release off */
+
+ format = get_bits(&s->gb, 3);
+ /*
+ 0 forbidden
+ 1 sub-QCIF
+ 10 QCIF
+ 7 extended PTYPE (PLUSPTYPE)
+ */
+
+ if (format != 7 && format != 6) {
+ s->h263_plus = 0;
+ /* H.263v1 */
+ width = h263_format[format][0];
+ height = h263_format[format][1];
+ if (!width)
+ return -1;
+
+ s->pict_type = FF_I_TYPE + get_bits1(&s->gb);
+
+ s->h263_long_vectors = get_bits1(&s->gb);
+
+ if (get_bits1(&s->gb) != 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "H263 SAC not supported\n");
+ return -1; /* SAC: off */
+ }
+ s->obmc= get_bits1(&s->gb); /* Advanced prediction mode */
+ s->unrestricted_mv = s->h263_long_vectors || s->obmc;
+
+ s->pb_frame = get_bits1(&s->gb);
+ s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);
+ skip_bits1(&s->gb); /* Continuous Presence Multipoint mode: off */
+
+ s->width = width;
+ s->height = height;
+ s->avctx->sample_aspect_ratio.num=12;s->avctx->sample_aspect_ratio.den=11;// = (AVRational){12,11};
+ s->avctx->time_base.num=1001;s->avctx->time_base.den=30000;// (AVRational){1001, 30000};
+ } else {
+ int ufep;
+
+ /* H.263v2 */
+ s->h263_plus = 1;
+ ufep = get_bits(&s->gb, 3); /* Update Full Extended PTYPE */
+
+ /* ufep other than 0 and 1 are reserved */
+ if (ufep == 1) {
+ /* OPPTYPE */
+ format = get_bits(&s->gb, 3);
+ dprintf(s->avctx, "ufep=1, format: %d\n", format);
+ s->custom_pcf= get_bits1(&s->gb);
+ s->umvplus = get_bits1(&s->gb); /* Unrestricted Motion Vector */
+ if (get_bits1(&s->gb) != 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "Syntax-based Arithmetic Coding (SAC) not supported\n");
+ }
+ s->obmc= get_bits1(&s->gb); /* Advanced prediction mode */
+ s->h263_aic = get_bits1(&s->gb); /* Advanced Intra Coding (AIC) */
+ s->loop_filter= get_bits1(&s->gb);
+ s->unrestricted_mv = s->umvplus || s->obmc || s->loop_filter;
+
+ s->h263_slice_structured= get_bits1(&s->gb);
+ if (get_bits1(&s->gb) != 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "Reference Picture Selection not supported\n");
+ }
+ if (get_bits1(&s->gb) != 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "Independent Segment Decoding not supported\n");
+ }
+ s->alt_inter_vlc= get_bits1(&s->gb);
+ s->modified_quant= get_bits1(&s->gb);
+ if(s->modified_quant)
+ s->chroma_qscale_table= ff_h263_chroma_qscale_table;
+
+ skip_bits(&s->gb, 1); /* Prevent start code emulation */
+
+ skip_bits(&s->gb, 3); /* Reserved */
+ } else if (ufep != 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "Bad UFEP type (%d)\n", ufep);
+ return -1;
+ }
+
+ /* MPPTYPE */
+ s->pict_type = get_bits(&s->gb, 3);
+ switch(s->pict_type){
+ case 0: s->pict_type= FF_I_TYPE;break;
+ case 1: s->pict_type= FF_P_TYPE;break;
+ case 2: s->pict_type= FF_P_TYPE;s->pb_frame = 3;break;
+ case 3: s->pict_type= FF_B_TYPE;break;
+ case 7: s->pict_type= FF_I_TYPE;break; //ZYGO
+ default:
+ return -1;
+ }
+ skip_bits(&s->gb, 2);
+ s->no_rounding = get_bits1(&s->gb);
+ skip_bits(&s->gb, 4);
+
+ /* Get the picture dimensions */
+ if (ufep) {
+ if (format == 6) {
+ /* Custom Picture Format (CPFMT) */
+ s->aspect_ratio_info = get_bits(&s->gb, 4);
+ dprintf(s->avctx, "aspect: %d\n", s->aspect_ratio_info);
+ /* aspect ratios:
+ 0 - forbidden
+ 1 - 1:1
+ 2 - 12:11 (CIF 4:3)
+ 3 - 10:11 (525-type 4:3)
+ 4 - 16:11 (CIF 16:9)
+ 5 - 40:33 (525-type 16:9)
+ 6-14 - reserved
+ */
+ width = (get_bits(&s->gb, 9) + 1) * 4;
+ skip_bits1(&s->gb);
+ height = get_bits(&s->gb, 9) * 4;
+ dprintf(s->avctx, "\nH.263+ Custom picture: %dx%d\n",width,height);
+ if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
+ /* aspected dimensions */
+ s->avctx->sample_aspect_ratio.num= get_bits(&s->gb, 8);
+ s->avctx->sample_aspect_ratio.den= get_bits(&s->gb, 8);
+ }else{
+ s->avctx->sample_aspect_ratio= ff_h263_pixel_aspect[s->aspect_ratio_info];
+ }
+ } else {
+ width = h263_format[format][0];
+ height = h263_format[format][1];
+ s->avctx->sample_aspect_ratio.num=12;s->avctx->sample_aspect_ratio.den=11; //(AVRational){12,11};
+ }
+ if ((width == 0) || (height == 0))
+ return -1;
+ s->width = width;
+ s->height = height;
+
+ if(s->custom_pcf){
+ int gcd;
+ s->avctx->time_base.den= 1800000;
+ s->avctx->time_base.num= 1000 + get_bits1(&s->gb);
+ s->avctx->time_base.num*= get_bits(&s->gb, 7);
+ if(s->avctx->time_base.num == 0){
+ av_log(s, AV_LOG_ERROR, "zero framerate\n");
+ return -1;
+ }
+ gcd= av_gcd(s->avctx->time_base.den, s->avctx->time_base.num);
+ s->avctx->time_base.den /= gcd;
+ s->avctx->time_base.num /= gcd;
+ }else{
+ s->avctx->time_base.num=1001;s->avctx->time_base.den=30000;// (AVRational){1001, 30000};
+ }
+ }
+
+ if(s->custom_pcf){
+ skip_bits(&s->gb, 2); //extended Temporal reference
+ }
+
+ if (ufep) {
+ if (s->umvplus) {
+ if(get_bits1(&s->gb)==0) /* Unlimited Unrestricted Motion Vectors Indicator (UUI) */
+ skip_bits1(&s->gb);
+ }
+ if(s->h263_slice_structured){
+ if (get_bits1(&s->gb) != 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "rectangular slices not supported\n");
+ }
+ if (get_bits1(&s->gb) != 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "unordered slices not supported\n");
+ }
+ }
+ }
+
+ s->qscale = get_bits(&s->gb, 5);
+ }
+
+ s->mb_width = (s->width + 15) / 16;
+ s->mb_height = (s->height + 15) / 16;
+ s->mb_num = s->mb_width * s->mb_height;
+
+ if (s->pb_frame) {
+ skip_bits(&s->gb, 3); /* Temporal reference for B-pictures */
+ if (s->custom_pcf)
+ skip_bits(&s->gb, 2); //extended Temporal reference
+ skip_bits(&s->gb, 2); /* Quantization information for B-pictures */
+ }
+
+ /* PEI */
+ while (get_bits1(&s->gb) != 0) {
+ skip_bits(&s->gb, 8);
+ }
+
+ if(s->h263_slice_structured){
+ if (get_bits1(&s->gb) != 1) {
+ av_log(s->avctx, AV_LOG_ERROR, "SEPB1 marker missing\n");
+ return -1;
+ }
+
+ ff_h263_decode_mba(s);
+
+ if (get_bits1(&s->gb) != 1) {
+ av_log(s->avctx, AV_LOG_ERROR, "SEPB2 marker missing\n");
+ return -1;
+ }
+ }
+ s->f_code = 1;
+
+ if(s->h263_aic){
+ s->y_dc_scale_table=
+ s->c_dc_scale_table= ff_aic_dc_scale_table;
+ }else{
+ s->y_dc_scale_table=
+ s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
+ }
+
+ ff_h263_show_pict_info(s);
+ if (s->pict_type == FF_I_TYPE && s->codec_tag == AV_RL32("ZYGO")){
+ int i,j;
+ for(i=0; i<85; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb));
+ av_log(s->avctx, AV_LOG_DEBUG, "\n");
+ for(i=0; i<13; i++){
+ for(j=0; j<3; j++){
+ int v= get_bits(&s->gb, 8);
+ v |= get_sbits(&s->gb, 8)<<8;
+ av_log(s->avctx, AV_LOG_DEBUG, " %5d", v);
+ }
+ av_log(s->avctx, AV_LOG_DEBUG, "\n");
+ }
+ for(i=0; i<50; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb));
+ }
+
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpegls.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpegls.c
new file mode 100644
index 000000000..732f45d80
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpegls.c
@@ -0,0 +1,89 @@
+/*
+ * JPEG-LS common code
+ * Copyright (c) 2003 Michael Niedermayer
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file jpegls.c
+ * JPEG-LS common code.
+ */
+
+#include "jpegls.h"
+
+void ff_jpegls_init_state(JLSState *state){
+ int i;
+
+ state->twonear = state->near * 2 + 1;
+ state->range = ((state->maxval + state->twonear - 1) / state->twonear) + 1;
+
+ // QBPP = ceil(log2(RANGE))
+ for(state->qbpp = 0; (1 << state->qbpp) < state->range; state->qbpp++);
+
+ if(state->bpp < 8)
+ state->limit = 16 + 2 * state->bpp - state->qbpp;
+ else
+ state->limit = (4 * state->bpp) - state->qbpp;
+
+ for(i = 0; i < 367; i++) {
+ state->A[i] = FFMAX((state->range + 32) >> 6, 2);
+ state->N[i] = 1;
+ }
+
+}
+
+/**
+ * Custom value clipping function used in T1, T2, T3 calculation
+ */
+static inline int iso_clip(int v, int vmin, int vmax){
+ if(v > vmax || v < vmin) return vmin;
+ else return v;
+}
+
+void ff_jpegls_reset_coding_parameters(JLSState *s, int reset_all){
+ const int basic_t1= 3;
+ const int basic_t2= 7;
+ const int basic_t3= 21;
+ int factor;
+
+ if(s->maxval==0 || reset_all) s->maxval= (1 << s->bpp) - 1;
+
+ if(s->maxval >=128){
+ factor= (FFMIN(s->maxval, 4095) + 128)>>8;
+
+ if(s->T1==0 || reset_all)
+ s->T1= iso_clip(factor*(basic_t1-2) + 2 + 3*s->near, s->near+1, s->maxval);
+ if(s->T2==0 || reset_all)
+ s->T2= iso_clip(factor*(basic_t2-3) + 3 + 5*s->near, s->T1, s->maxval);
+ if(s->T3==0 || reset_all)
+ s->T3= iso_clip(factor*(basic_t3-4) + 4 + 7*s->near, s->T2, s->maxval);
+ }else{
+ factor= 256 / (s->maxval + 1);
+
+ if(s->T1==0 || reset_all)
+ s->T1= iso_clip(FFMAX(2, basic_t1/factor + 3*s->near), s->near+1, s->maxval);
+ if(s->T2==0 || reset_all)
+ s->T2= iso_clip(FFMAX(3, basic_t2/factor + 5*s->near), s->T1, s->maxval);
+ if(s->T3==0 || reset_all)
+ s->T3= iso_clip(FFMAX(4, basic_t3/factor + 7*s->near), s->T2, s->maxval);
+ }
+
+ if(s->reset==0 || reset_all) s->reset= 64;
+// av_log(NULL, AV_LOG_DEBUG, "[JPEG-LS RESET] T=%i,%i,%i\n", s->T1, s->T2, s->T3);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpegls.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpegls.h
new file mode 100644
index 000000000..61d64cd1c
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpegls.h
@@ -0,0 +1,111 @@
+/*
+ * JPEG-LS common code
+ * Copyright (c) 2003 Michael Niedermayer
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file jpegls.h
+ * JPEG-LS common code.
+ */
+
+#ifndef AVCODEC_JPEGLS_H
+#define AVCODEC_JPEGLS_H
+
+#include "avcodec.h"
+
+typedef struct JpeglsContext{
+ AVCodecContext *avctx;
+ AVFrame picture;
+}JpeglsContext;
+
+typedef struct JLSState{
+ int T1, T2, T3;
+ int A[367], B[367], C[365], N[367];
+ int limit, reset, bpp, qbpp, maxval, range;
+ int near, twonear;
+ int run_index[3];
+}JLSState;
+
+extern const uint8_t ff_log2_run[32];
+
+/**
+ * Calculate initial JPEG-LS parameters
+ */
+void ff_jpegls_init_state(JLSState *state);
+
+/**
+ * Calculate quantized gradient value, used for context determination
+ */
+static inline int ff_jpegls_quantize(JLSState *s, int v){ //FIXME optimize
+ if(v==0) return 0;
+ if(v < 0){
+ if(v <= -s->T3) return -4;
+ if(v <= -s->T2) return -3;
+ if(v <= -s->T1) return -2;
+ if(v < -s->near) return -1;
+ return 0;
+ }else{
+ if(v <= s->near) return 0;
+ if(v < s->T1) return 1;
+ if(v < s->T2) return 2;
+ if(v < s->T3) return 3;
+ return 4;
+ }
+}
+
+/**
+ * Calculate JPEG-LS codec values
+ */
+void ff_jpegls_reset_coding_parameters(JLSState *s, int reset_all);
+
+
+static inline void ff_jpegls_downscale_state(JLSState *state, int Q){
+ if(state->N[Q] == state->reset){
+ state->A[Q] >>=1;
+ state->B[Q] >>=1;
+ state->N[Q] >>=1;
+ }
+ state->N[Q]++;
+}
+
+static inline int ff_jpegls_update_state_regular(JLSState *state, int Q, int err){
+ state->A[Q] += FFABS(err);
+ err *= state->twonear;
+ state->B[Q] += err;
+
+ ff_jpegls_downscale_state(state, Q);
+
+ if(state->B[Q] <= -state->N[Q]) {
+ state->B[Q]= FFMAX(state->B[Q] + state->N[Q], 1-state->N[Q]);
+ if(state->C[Q] > -128)
+ state->C[Q]--;
+ }else if(state->B[Q] > 0){
+ state->B[Q]= FFMIN(state->B[Q] - state->N[Q], 0);
+ if(state->C[Q] < 127)
+ state->C[Q]++;
+ }
+
+ return err;
+}
+
+#define R(a, i ) (bits == 8 ? ((uint8_t*)(a))[i] : ((uint16_t*)(a))[i] )
+#define W(a, i, v) (bits == 8 ? (((uint8_t*)(a))[i]=v) : (((uint16_t*)(a))[i]=v))
+
+#endif /* AVCODEC_JPEGLS_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpeglsdec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpeglsdec.c
new file mode 100644
index 000000000..d363f3702
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpeglsdec.c
@@ -0,0 +1,381 @@
+/*
+ * JPEG-LS decoder
+ * Copyright (c) 2003 Michael Niedermayer
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/jpeglsdec.c
+ * JPEG-LS decoder.
+ */
+
+#include "avcodec.h"
+#include "get_bits.h"
+#include "golomb.h"
+#include "mathops.h"
+#include "mjpeg.h"
+#include "mjpegdec.h"
+#include "jpegls.h"
+#include "jpeglsdec.h"
+
+
+/*
+* Uncomment this to significantly speed up decoding of broken JPEG-LS
+* (or test broken JPEG-LS decoder) and slow down ordinary decoding a bit.
+*
+* There is no Golomb code with length >= 32 bits possible, so check and
+* avoid situation of 32 zeros, FFmpeg Golomb decoder is painfully slow
+* on this errors.
+*/
+//#define JLS_BROKEN
+
+
+/**
+ * Decode LSE block with initialization parameters
+ */
+int ff_jpegls_decode_lse(MJpegDecodeContext *s)
+{
+ int len, id;
+
+ /* XXX: verify len field validity */
+ len = get_bits(&s->gb, 16);
+ id = get_bits(&s->gb, 8);
+
+ switch(id){
+ case 1:
+ s->maxval= get_bits(&s->gb, 16);
+ s->t1= get_bits(&s->gb, 16);
+ s->t2= get_bits(&s->gb, 16);
+ s->t3= get_bits(&s->gb, 16);
+ s->reset= get_bits(&s->gb, 16);
+
+// ff_jpegls_reset_coding_parameters(s, 0);
+ //FIXME quant table?
+ break;
+ case 2:
+ case 3:
+ av_log(s->avctx, AV_LOG_ERROR, "palette not supported\n");
+ return -1;
+ case 4:
+ av_log(s->avctx, AV_LOG_ERROR, "oversize image not supported\n");
+ return -1;
+ default:
+ av_log(s->avctx, AV_LOG_ERROR, "invalid id %d\n", id);
+ return -1;
+ }
+// av_log(s->avctx, AV_LOG_DEBUG, "ID=%i, T=%i,%i,%i\n", id, s->t1, s->t2, s->t3);
+
+ return 0;
+}
+
+/**
+ * Get context-dependent Golomb code, decode it and update context
+ */
+static inline int ls_get_code_regular(GetBitContext *gb, JLSState *state, int Q){
+ int k, ret;
+
+ for(k = 0; (state->N[Q] << k) < state->A[Q]; k++);
+
+#ifdef JLS_BROKEN
+ if(!show_bits_long(gb, 32))return -1;
+#endif
+ ret = get_ur_golomb_jpegls(gb, k, state->limit, state->qbpp);
+
+ /* decode mapped error */
+ if(ret & 1)
+ ret = -((ret + 1) >> 1);
+ else
+ ret >>= 1;
+
+ /* for NEAR=0, k=0 and 2*B[Q] <= - N[Q] mapping is reversed */
+ if(!state->near && !k && (2 * state->B[Q] <= -state->N[Q]))
+ ret = -(ret + 1);
+
+ ret= ff_jpegls_update_state_regular(state, Q, ret);
+
+ return ret;
+}
+
+/**
+ * Get Golomb code, decode it and update state for run termination
+ */
+static inline int ls_get_code_runterm(GetBitContext *gb, JLSState *state, int RItype, int limit_add){
+ int k, ret, temp, map;
+ int Q = 365 + RItype;
+
+ temp= state->A[Q];
+ if(RItype)
+ temp += state->N[Q] >> 1;
+
+ for(k = 0; (state->N[Q] << k) < temp; k++);
+
+#ifdef JLS_BROKEN
+ if(!show_bits_long(gb, 32))return -1;
+#endif
+ ret = get_ur_golomb_jpegls(gb, k, state->limit - limit_add - 1, state->qbpp);
+
+ /* decode mapped error */
+ map = 0;
+ if(!k && (RItype || ret) && (2 * state->B[Q] < state->N[Q]))
+ map = 1;
+ ret += RItype + map;
+
+ if(ret & 1){
+ ret = map - ((ret + 1) >> 1);
+ state->B[Q]++;
+ } else {
+ ret = ret >> 1;
+ }
+
+ /* update state */
+ state->A[Q] += FFABS(ret) - RItype;
+ ret *= state->twonear;
+ ff_jpegls_downscale_state(state, Q);
+
+ return ret;
+}
+
+/**
+ * Decode one line of image
+ */
+static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, void *last, void *dst, int last2, int w, int stride, int comp, int bits){
+ int i, x = 0;
+ int Ra, Rb, Rc, Rd;
+ int D0, D1, D2;
+
+ while(x < w) {
+ int err, pred;
+
+ /* compute gradients */
+ Ra = x ? R(dst, x - stride) : R(last, x);
+ Rb = R(last, x);
+ Rc = x ? R(last, x - stride) : last2;
+ Rd = (x >= w - stride) ? R(last, x) : R(last, x + stride);
+ D0 = Rd - Rb;
+ D1 = Rb - Rc;
+ D2 = Rc - Ra;
+ /* run mode */
+ if((FFABS(D0) <= state->near) && (FFABS(D1) <= state->near) && (FFABS(D2) <= state->near)) {
+ int r;
+ int RItype;
+
+ /* decode full runs while available */
+ while(get_bits1(&s->gb)) {
+ int r;
+ r = 1 << ff_log2_run[state->run_index[comp]];
+ if(x + r * stride > w) {
+ r = (w - x) / stride;
+ }
+ for(i = 0; i < r; i++) {
+ W(dst, x, Ra);
+ x += stride;
+ }
+ /* if EOL reached, we stop decoding */
+ if(r != (1 << ff_log2_run[state->run_index[comp]]))
+ return;
+ if(state->run_index[comp] < 31)
+ state->run_index[comp]++;
+ if(x + stride > w)
+ return;
+ }
+ /* decode aborted run */
+ r = ff_log2_run[state->run_index[comp]];
+ if(r)
+ r = get_bits_long(&s->gb, r);
+ for(i = 0; i < r; i++) {
+ W(dst, x, Ra);
+ x += stride;
+ }
+
+ /* decode run termination value */
+ Rb = R(last, x);
+ RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
+ err = ls_get_code_runterm(&s->gb, state, RItype, ff_log2_run[state->run_index[comp]]);
+ if(state->run_index[comp])
+ state->run_index[comp]--;
+
+ if(state->near && RItype){
+ pred = Ra + err;
+ } else {
+ if(Rb < Ra)
+ pred = Rb - err;
+ else
+ pred = Rb + err;
+ }
+ } else { /* regular mode */
+ int context, sign;
+
+ context = ff_jpegls_quantize(state, D0) * 81 + ff_jpegls_quantize(state, D1) * 9 + ff_jpegls_quantize(state, D2);
+ pred = mid_pred(Ra, Ra + Rb - Rc, Rb);
+
+ if(context < 0){
+ context = -context;
+ sign = 1;
+ }else{
+ sign = 0;
+ }
+
+ if(sign){
+ pred = av_clip(pred - state->C[context], 0, state->maxval);
+ err = -ls_get_code_regular(&s->gb, state, context);
+ } else {
+ pred = av_clip(pred + state->C[context], 0, state->maxval);
+ err = ls_get_code_regular(&s->gb, state, context);
+ }
+
+ /* we have to do something more for near-lossless coding */
+ pred += err;
+ }
+ if(state->near){
+ if(pred < -state->near)
+ pred += state->range * state->twonear;
+ else if(pred > state->maxval + state->near)
+ pred -= state->range * state->twonear;
+ pred = av_clip(pred, 0, state->maxval);
+ }
+
+ pred &= state->maxval;
+ W(dst, x, pred);
+ x += stride;
+ }
+}
+
+int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv){
+ int i, t = 0;
+ uint8_t *zero, *last, *cur;
+ JLSState *state;
+ int off = 0, stride = 1, width, shift;
+
+ zero = av_mallocz(s->picture.linesize[0]);
+ last = zero;
+ cur = s->picture.data[0];
+
+ state = av_mallocz(sizeof(JLSState));
+ /* initialize JPEG-LS state from JPEG parameters */
+ state->near = near;
+ state->bpp = (s->bits < 2) ? 2 : s->bits;
+ state->maxval = s->maxval;
+ state->T1 = s->t1;
+ state->T2 = s->t2;
+ state->T3 = s->t3;
+ state->reset = s->reset;
+ ff_jpegls_reset_coding_parameters(state, 0);
+ ff_jpegls_init_state(state);
+
+ if(s->bits <= 8)
+ shift = point_transform + (8 - s->bits);
+ else
+ shift = point_transform + (16 - s->bits);
+
+// av_log(s->avctx, AV_LOG_DEBUG, "JPEG-LS params: %ix%i NEAR=%i MV=%i T(%i,%i,%i) RESET=%i, LIMIT=%i, qbpp=%i, RANGE=%i\n",s->width,s->height,state->near,state->maxval,state->T1,state->T2,state->T3,state->reset,state->limit,state->qbpp, state->range);
+// av_log(s->avctx, AV_LOG_DEBUG, "JPEG params: ILV=%i Pt=%i BPP=%i, scan = %i\n", ilv, point_transform, s->bits, s->cur_scan);
+ if(ilv == 0) { /* separate planes */
+ off = s->cur_scan - 1;
+ stride = (s->nb_components > 1) ? 3 : 1;
+ width = s->width * stride;
+ cur += off;
+ for(i = 0; i < s->height; i++) {
+ if(s->bits <= 8){
+ ls_decode_line(state, s, last, cur, t, width, stride, off, 8);
+ t = last[0];
+ }else{
+ ls_decode_line(state, s, last, cur, t, width, stride, off, 16);
+ t = *((uint16_t*)last);
+ }
+ last = cur;
+ cur += s->picture.linesize[0];
+
+ if (s->restart_interval && !--s->restart_count) {
+ align_get_bits(&s->gb);
+ skip_bits(&s->gb, 16); /* skip RSTn */
+ }
+ }
+ } else if(ilv == 1) { /* line interleaving */
+ int j;
+ int Rc[3] = {0, 0, 0};
+ memset(cur, 0, s->picture.linesize[0]);
+ width = s->width * 3;
+ for(i = 0; i < s->height; i++) {
+ for(j = 0; j < 3; j++) {
+ ls_decode_line(state, s, last + j, cur + j, Rc[j], width, 3, j, 8);
+ Rc[j] = last[j];
+
+ if (s->restart_interval && !--s->restart_count) {
+ align_get_bits(&s->gb);
+ skip_bits(&s->gb, 16); /* skip RSTn */
+ }
+ }
+ last = cur;
+ cur += s->picture.linesize[0];
+ }
+ } else if(ilv == 2) { /* sample interleaving */
+ av_log(s->avctx, AV_LOG_ERROR, "Sample interleaved images are not supported.\n");
+ av_free(state);
+ av_free(zero);
+ return -1;
+ }
+
+ if(shift){ /* we need to do point transform or normalize samples */
+ int x, w;
+
+ w = s->width * s->nb_components;
+
+ if(s->bits <= 8){
+ uint8_t *src = s->picture.data[0];
+
+ for(i = 0; i < s->height; i++){
+ for(x = off; x < w; x+= stride){
+ src[x] <<= shift;
+ }
+ src += s->picture.linesize[0];
+ }
+ }else{
+ uint16_t *src = (uint16_t*) s->picture.data[0];
+
+ for(i = 0; i < s->height; i++){
+ for(x = 0; x < w; x++){
+ src[x] <<= shift;
+ }
+ src += s->picture.linesize[0]/2;
+ }
+ }
+ }
+ av_free(state);
+ av_free(zero);
+
+ return 0;
+}
+
+
+AVCodec jpegls_decoder = {
+ "jpegls",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_JPEGLS,
+ sizeof(MJpegDecodeContext),
+ /*.init=*/ff_mjpeg_decode_init,
+ /*.encode=*/NULL,
+ /*.close=*/ff_mjpeg_decode_end,
+ /*.decode=*/ff_mjpeg_decode_frame,
+ /*.capabilities=*/CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("JPEG-LS"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpeglsdec.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpeglsdec.h
new file mode 100644
index 000000000..13eec758d
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jpeglsdec.h
@@ -0,0 +1,41 @@
+/*
+ * JPEG-LS decoder
+ * Copyright (c) 2003 Michael Niedermayer
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file jpeglsdec.h
+ * JPEG-LS decoder.
+ */
+
+#ifndef AVCODEC_JPEGLSDEC_H
+#define AVCODEC_JPEGLSDEC_H
+
+#include "mjpeg.h"
+#include "mjpegdec.h"
+
+/**
+ * Decode LSE block with initialization parameters
+ */
+int ff_jpegls_decode_lse(MJpegDecodeContext *s);
+
+int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv);
+
+#endif /* AVCODEC_JPEGLSDEC_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jrevdct.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jrevdct.c
new file mode 100644
index 000000000..46a692bd8
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/jrevdct.c
@@ -0,0 +1,1157 @@
+/*
+ * jrevdct.c
+ *
+ * This file is part of the Independent JPEG Group's software.
+ *
+ * The authors make NO WARRANTY or representation, either express or implied,
+ * with respect to this software, its quality, accuracy, merchantability, or
+ * fitness for a particular purpose. This software is provided "AS IS", and
+ * you, its user, assume the entire risk as to its quality and accuracy.
+ *
+ * This software is copyright (C) 1991, 1992, Thomas G. Lane.
+ * All Rights Reserved except as specified below.
+ *
+ * Permission is hereby granted to use, copy, modify, and distribute this
+ * software (or portions thereof) for any purpose, without fee, subject to
+ * these conditions:
+ * (1) If any part of the source code for this software is distributed, then
+ * this README file must be included, with this copyright and no-warranty
+ * notice unaltered; and any additions, deletions, or changes to the original
+ * files must be clearly indicated in accompanying documentation.
+ * (2) If only executable code is distributed, then the accompanying
+ * documentation must state that "this software is based in part on the work
+ * of the Independent JPEG Group".
+ * (3) Permission for use of this software is granted only if the user accepts
+ * full responsibility for any undesirable consequences; the authors accept
+ * NO LIABILITY for damages of any kind.
+ *
+ * These conditions apply to any software derived from or based on the IJG
+ * code, not just to the unmodified library. If you use our work, you ought
+ * to acknowledge us.
+ *
+ * Permission is NOT granted for the use of any IJG author's name or company
+ * name in advertising or publicity relating to this software or products
+ * derived from it. This software may be referred to only as "the Independent
+ * JPEG Group's software".
+ *
+ * We specifically permit and encourage the use of this software as the basis
+ * of commercial products, provided that all warranty or liability claims are
+ * assumed by the product vendor.
+ *
+ * This file contains the basic inverse-DCT transformation subroutine.
+ *
+ * This implementation is based on an algorithm described in
+ * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT
+ * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics,
+ * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991.
+ * The primary algorithm described there uses 11 multiplies and 29 adds.
+ * We use their alternate method with 12 multiplies and 32 adds.
+ * The advantage of this method is that no data path contains more than one
+ * multiplication; this allows a very simple and accurate implementation in
+ * scaled fixed-point arithmetic, with a minimal number of shifts.
+ *
+ * I've made lots of modifications to attempt to take advantage of the
+ * sparse nature of the DCT matrices we're getting. Although the logic
+ * is cumbersome, it's straightforward and the resulting code is much
+ * faster.
+ *
+ * A better way to do this would be to pass in the DCT block as a sparse
+ * matrix, perhaps with the difference cases encoded.
+ */
+
+/**
+ * @file libavcodec/jrevdct.c
+ * Independent JPEG Group's LLM idct.
+ */
+
+#include "libavutil/common.h"
+#include "dsputil.h"
+
+#define EIGHT_BIT_SAMPLES
+
+#define DCTSIZE 8
+#define DCTSIZE2 64
+
+#define GLOBAL
+
+#define RIGHT_SHIFT(x, n) ((x) >> (n))
+
+typedef DCTELEM DCTBLOCK[DCTSIZE2];
+
+#define CONST_BITS 13
+
+/*
+ * This routine is specialized to the case DCTSIZE = 8.
+ */
+
+#if DCTSIZE != 8
+ Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */
+#endif
+
+
+/*
+ * A 2-D IDCT can be done by 1-D IDCT on each row followed by 1-D IDCT
+ * on each column. Direct algorithms are also available, but they are
+ * much more complex and seem not to be any faster when reduced to code.
+ *
+ * The poop on this scaling stuff is as follows:
+ *
+ * Each 1-D IDCT step produces outputs which are a factor of sqrt(N)
+ * larger than the true IDCT outputs. The final outputs are therefore
+ * a factor of N larger than desired; since N=8 this can be cured by
+ * a simple right shift at the end of the algorithm. The advantage of
+ * this arrangement is that we save two multiplications per 1-D IDCT,
+ * because the y0 and y4 inputs need not be divided by sqrt(N).
+ *
+ * We have to do addition and subtraction of the integer inputs, which
+ * is no problem, and multiplication by fractional constants, which is
+ * a problem to do in integer arithmetic. We multiply all the constants
+ * by CONST_SCALE and convert them to integer constants (thus retaining
+ * CONST_BITS bits of precision in the constants). After doing a
+ * multiplication we have to divide the product by CONST_SCALE, with proper
+ * rounding, to produce the correct output. This division can be done
+ * cheaply as a right shift of CONST_BITS bits. We postpone shifting
+ * as long as possible so that partial sums can be added together with
+ * full fractional precision.
+ *
+ * The outputs of the first pass are scaled up by PASS1_BITS bits so that
+ * they are represented to better-than-integral precision. These outputs
+ * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word
+ * with the recommended scaling. (To scale up 12-bit sample data further, an
+ * intermediate int32 array would be needed.)
+ *
+ * To avoid overflow of the 32-bit intermediate results in pass 2, we must
+ * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis
+ * shows that the values given below are the most effective.
+ */
+
+#ifdef EIGHT_BIT_SAMPLES
+#define PASS1_BITS 2
+#else
+#define PASS1_BITS 1 /* lose a little precision to avoid overflow */
+#endif
+
+#define ONE ((int32_t) 1)
+
+#define CONST_SCALE (ONE << CONST_BITS)
+
+/* Convert a positive real constant to an integer scaled by CONST_SCALE.
+ * IMPORTANT: if your compiler doesn't do this arithmetic at compile time,
+ * you will pay a significant penalty in run time. In that case, figure
+ * the correct integer constant values and insert them by hand.
+ */
+
+/* Actually FIX is no longer used, we precomputed them all */
+#define FIX(x) ((int32_t) ((x) * CONST_SCALE + 0.5))
+
+/* Descale and correctly round an int32_t value that's scaled by N bits.
+ * We assume RIGHT_SHIFT rounds towards minus infinity, so adding
+ * the fudge factor is correct for either sign of X.
+ */
+
+#define DESCALE(x,n) RIGHT_SHIFT((x) + (ONE << ((n)-1)), n)
+
+/* Multiply an int32_t variable by an int32_t constant to yield an int32_t result.
+ * For 8-bit samples with the recommended scaling, all the variable
+ * and constant values involved are no more than 16 bits wide, so a
+ * 16x16->32 bit multiply can be used instead of a full 32x32 multiply;
+ * this provides a useful speedup on many machines.
+ * There is no way to specify a 16x16->32 multiply in portable C, but
+ * some C compilers will do the right thing if you provide the correct
+ * combination of casts.
+ * NB: for 12-bit samples, a full 32-bit multiplication will be needed.
+ */
+
+#ifdef EIGHT_BIT_SAMPLES
+#ifdef SHORTxSHORT_32 /* may work if 'int' is 32 bits */
+#define MULTIPLY(var,const) (((int16_t) (var)) * ((int16_t) (const)))
+#endif
+#ifdef SHORTxLCONST_32 /* known to work with Microsoft C 6.0 */
+#define MULTIPLY(var,const) (((int16_t) (var)) * ((int32_t) (const)))
+#endif
+#endif
+
+#ifndef MULTIPLY /* default definition */
+#define MULTIPLY(var,const) ((var) * (const))
+#endif
+
+
+/*
+ Unlike our decoder where we approximate the FIXes, we need to use exact
+ones here or successive P-frames will drift too much with Reference frame coding
+*/
+#define FIX_0_211164243 1730
+#define FIX_0_275899380 2260
+#define FIX_0_298631336 2446
+#define FIX_0_390180644 3196
+#define FIX_0_509795579 4176
+#define FIX_0_541196100 4433
+#define FIX_0_601344887 4926
+#define FIX_0_765366865 6270
+#define FIX_0_785694958 6436
+#define FIX_0_899976223 7373
+#define FIX_1_061594337 8697
+#define FIX_1_111140466 9102
+#define FIX_1_175875602 9633
+#define FIX_1_306562965 10703
+#define FIX_1_387039845 11363
+#define FIX_1_451774981 11893
+#define FIX_1_501321110 12299
+#define FIX_1_662939225 13623
+#define FIX_1_847759065 15137
+#define FIX_1_961570560 16069
+#define FIX_2_053119869 16819
+#define FIX_2_172734803 17799
+#define FIX_2_562915447 20995
+#define FIX_3_072711026 25172
+
+/*
+ * Perform the inverse DCT on one block of coefficients.
+ */
+
+void j_rev_dct(DCTBLOCK data)
+{
+ int32_t tmp0, tmp1, tmp2, tmp3;
+ int32_t tmp10, tmp11, tmp12, tmp13;
+ int32_t z1, z2, z3, z4, z5;
+ int32_t d0, d1, d2, d3, d4, d5, d6, d7;
+ register DCTELEM *dataptr;
+ int rowctr;
+
+ /* Pass 1: process rows. */
+ /* Note results are scaled up by sqrt(8) compared to a true IDCT; */
+ /* furthermore, we scale the results by 2**PASS1_BITS. */
+
+ dataptr = data;
+
+ for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) {
+ /* Due to quantization, we will usually find that many of the input
+ * coefficients are zero, especially the AC terms. We can exploit this
+ * by short-circuiting the IDCT calculation for any row in which all
+ * the AC terms are zero. In that case each output is equal to the
+ * DC coefficient (with scale factor as needed).
+ * With typical images and quantization tables, half or more of the
+ * row DCT calculations can be simplified this way.
+ */
+
+ register int *idataptr = (int*)dataptr;
+
+ /* WARNING: we do the same permutation as MMX idct to simplify the
+ video core */
+ d0 = dataptr[0];
+ d2 = dataptr[1];
+ d4 = dataptr[2];
+ d6 = dataptr[3];
+ d1 = dataptr[4];
+ d3 = dataptr[5];
+ d5 = dataptr[6];
+ d7 = dataptr[7];
+
+ if ((d1 | d2 | d3 | d4 | d5 | d6 | d7) == 0) {
+ /* AC terms all zero */
+ if (d0) {
+ /* Compute a 32 bit value to assign. */
+ DCTELEM dcval = (DCTELEM) (d0 << PASS1_BITS);
+ register int v = (dcval & 0xffff) | ((dcval << 16) & 0xffff0000);
+
+ idataptr[0] = v;
+ idataptr[1] = v;
+ idataptr[2] = v;
+ idataptr[3] = v;
+ }
+
+ dataptr += DCTSIZE; /* advance pointer to next row */
+ continue;
+ }
+
+ /* Even part: reverse the even part of the forward DCT. */
+ /* The rotator is sqrt(2)*c(-6). */
+{
+ if (d6) {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 != 0 */
+ z1 = MULTIPLY(d2 + d6, FIX_0_541196100);
+ tmp2 = z1 + MULTIPLY(-d6, FIX_1_847759065);
+ tmp3 = z1 + MULTIPLY(d2, FIX_0_765366865);
+
+ tmp0 = (d0 + d4) << CONST_BITS;
+ tmp1 = (d0 - d4) << CONST_BITS;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 != 0 */
+ tmp2 = MULTIPLY(-d6, FIX_1_306562965);
+ tmp3 = MULTIPLY(d6, FIX_0_541196100);
+
+ tmp0 = (d0 + d4) << CONST_BITS;
+ tmp1 = (d0 - d4) << CONST_BITS;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ }
+ } else {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 == 0 */
+ tmp2 = MULTIPLY(d2, FIX_0_541196100);
+ tmp3 = MULTIPLY(d2, FIX_1_306562965);
+
+ tmp0 = (d0 + d4) << CONST_BITS;
+ tmp1 = (d0 - d4) << CONST_BITS;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 == 0 */
+ tmp10 = tmp13 = (d0 + d4) << CONST_BITS;
+ tmp11 = tmp12 = (d0 - d4) << CONST_BITS;
+ }
+ }
+
+ /* Odd part per figure 8; the matrix is unitary and hence its
+ * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
+ */
+
+ if (d7) {
+ if (d5) {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 != 0, d7 != 0 */
+ z1 = d7 + d1;
+ z2 = d5 + d3;
+ z3 = d7 + d3;
+ z4 = d5 + d1;
+ z5 = MULTIPLY(z3 + z4, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-z1, FIX_0_899976223);
+ z2 = MULTIPLY(-z2, FIX_2_562915447);
+ z3 = MULTIPLY(-z3, FIX_1_961570560);
+ z4 = MULTIPLY(-z4, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 != 0, d5 != 0, d7 != 0 */
+ z2 = d5 + d3;
+ z3 = d7 + d3;
+ z5 = MULTIPLY(z3 + d5, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ z1 = MULTIPLY(-d7, FIX_0_899976223);
+ z2 = MULTIPLY(-z2, FIX_2_562915447);
+ z3 = MULTIPLY(-z3, FIX_1_961570560);
+ z4 = MULTIPLY(-d5, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 = z1 + z4;
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 != 0, d7 != 0 */
+ z1 = d7 + d1;
+ z4 = d5 + d1;
+ z5 = MULTIPLY(d7 + z4, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-z1, FIX_0_899976223);
+ z2 = MULTIPLY(-d5, FIX_2_562915447);
+ z3 = MULTIPLY(-d7, FIX_1_961570560);
+ z4 = MULTIPLY(-z4, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 = z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 == 0, d5 != 0, d7 != 0 */
+ tmp0 = MULTIPLY(-d7, FIX_0_601344887);
+ z1 = MULTIPLY(-d7, FIX_0_899976223);
+ z3 = MULTIPLY(-d7, FIX_1_961570560);
+ tmp1 = MULTIPLY(-d5, FIX_0_509795579);
+ z2 = MULTIPLY(-d5, FIX_2_562915447);
+ z4 = MULTIPLY(-d5, FIX_0_390180644);
+ z5 = MULTIPLY(d5 + d7, FIX_1_175875602);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z3;
+ tmp1 += z4;
+ tmp2 = z2 + z3;
+ tmp3 = z1 + z4;
+ }
+ }
+ } else {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 == 0, d7 != 0 */
+ z1 = d7 + d1;
+ z3 = d7 + d3;
+ z5 = MULTIPLY(z3 + d1, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-z1, FIX_0_899976223);
+ z2 = MULTIPLY(-d3, FIX_2_562915447);
+ z3 = MULTIPLY(-z3, FIX_1_961570560);
+ z4 = MULTIPLY(-d1, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 = z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 != 0, d5 == 0, d7 != 0 */
+ z3 = d7 + d3;
+
+ tmp0 = MULTIPLY(-d7, FIX_0_601344887);
+ z1 = MULTIPLY(-d7, FIX_0_899976223);
+ tmp2 = MULTIPLY(d3, FIX_0_509795579);
+ z2 = MULTIPLY(-d3, FIX_2_562915447);
+ z5 = MULTIPLY(z3, FIX_1_175875602);
+ z3 = MULTIPLY(-z3, FIX_0_785694958);
+
+ tmp0 += z3;
+ tmp1 = z2 + z5;
+ tmp2 += z3;
+ tmp3 = z1 + z5;
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 == 0, d7 != 0 */
+ z1 = d7 + d1;
+ z5 = MULTIPLY(z1, FIX_1_175875602);
+
+ z1 = MULTIPLY(z1, FIX_0_275899380);
+ z3 = MULTIPLY(-d7, FIX_1_961570560);
+ tmp0 = MULTIPLY(-d7, FIX_1_662939225);
+ z4 = MULTIPLY(-d1, FIX_0_390180644);
+ tmp3 = MULTIPLY(d1, FIX_1_111140466);
+
+ tmp0 += z1;
+ tmp1 = z4 + z5;
+ tmp2 = z3 + z5;
+ tmp3 += z1;
+ } else {
+ /* d1 == 0, d3 == 0, d5 == 0, d7 != 0 */
+ tmp0 = MULTIPLY(-d7, FIX_1_387039845);
+ tmp1 = MULTIPLY(d7, FIX_1_175875602);
+ tmp2 = MULTIPLY(-d7, FIX_0_785694958);
+ tmp3 = MULTIPLY(d7, FIX_0_275899380);
+ }
+ }
+ }
+ } else {
+ if (d5) {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 != 0, d7 == 0 */
+ z2 = d5 + d3;
+ z4 = d5 + d1;
+ z5 = MULTIPLY(d3 + z4, FIX_1_175875602);
+
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-d1, FIX_0_899976223);
+ z2 = MULTIPLY(-z2, FIX_2_562915447);
+ z3 = MULTIPLY(-d3, FIX_1_961570560);
+ z4 = MULTIPLY(-z4, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 = z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 != 0, d5 != 0, d7 == 0 */
+ z2 = d5 + d3;
+
+ z5 = MULTIPLY(z2, FIX_1_175875602);
+ tmp1 = MULTIPLY(d5, FIX_1_662939225);
+ z4 = MULTIPLY(-d5, FIX_0_390180644);
+ z2 = MULTIPLY(-z2, FIX_1_387039845);
+ tmp2 = MULTIPLY(d3, FIX_1_111140466);
+ z3 = MULTIPLY(-d3, FIX_1_961570560);
+
+ tmp0 = z3 + z5;
+ tmp1 += z2;
+ tmp2 += z2;
+ tmp3 = z4 + z5;
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 != 0, d7 == 0 */
+ z4 = d5 + d1;
+
+ z5 = MULTIPLY(z4, FIX_1_175875602);
+ z1 = MULTIPLY(-d1, FIX_0_899976223);
+ tmp3 = MULTIPLY(d1, FIX_0_601344887);
+ tmp1 = MULTIPLY(-d5, FIX_0_509795579);
+ z2 = MULTIPLY(-d5, FIX_2_562915447);
+ z4 = MULTIPLY(z4, FIX_0_785694958);
+
+ tmp0 = z1 + z5;
+ tmp1 += z4;
+ tmp2 = z2 + z5;
+ tmp3 += z4;
+ } else {
+ /* d1 == 0, d3 == 0, d5 != 0, d7 == 0 */
+ tmp0 = MULTIPLY(d5, FIX_1_175875602);
+ tmp1 = MULTIPLY(d5, FIX_0_275899380);
+ tmp2 = MULTIPLY(-d5, FIX_1_387039845);
+ tmp3 = MULTIPLY(d5, FIX_0_785694958);
+ }
+ }
+ } else {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 == 0, d7 == 0 */
+ z5 = d1 + d3;
+ tmp3 = MULTIPLY(d1, FIX_0_211164243);
+ tmp2 = MULTIPLY(-d3, FIX_1_451774981);
+ z1 = MULTIPLY(d1, FIX_1_061594337);
+ z2 = MULTIPLY(-d3, FIX_2_172734803);
+ z4 = MULTIPLY(z5, FIX_0_785694958);
+ z5 = MULTIPLY(z5, FIX_1_175875602);
+
+ tmp0 = z1 - z4;
+ tmp1 = z2 + z4;
+ tmp2 += z5;
+ tmp3 += z5;
+ } else {
+ /* d1 == 0, d3 != 0, d5 == 0, d7 == 0 */
+ tmp0 = MULTIPLY(-d3, FIX_0_785694958);
+ tmp1 = MULTIPLY(-d3, FIX_1_387039845);
+ tmp2 = MULTIPLY(-d3, FIX_0_275899380);
+ tmp3 = MULTIPLY(d3, FIX_1_175875602);
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 == 0, d7 == 0 */
+ tmp0 = MULTIPLY(d1, FIX_0_275899380);
+ tmp1 = MULTIPLY(d1, FIX_0_785694958);
+ tmp2 = MULTIPLY(d1, FIX_1_175875602);
+ tmp3 = MULTIPLY(d1, FIX_1_387039845);
+ } else {
+ /* d1 == 0, d3 == 0, d5 == 0, d7 == 0 */
+ tmp0 = tmp1 = tmp2 = tmp3 = 0;
+ }
+ }
+ }
+ }
+}
+ /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
+
+ dataptr[0] = (DCTELEM) DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS);
+ dataptr[7] = (DCTELEM) DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS);
+ dataptr[1] = (DCTELEM) DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS);
+ dataptr[6] = (DCTELEM) DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS);
+ dataptr[2] = (DCTELEM) DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS);
+ dataptr[5] = (DCTELEM) DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS);
+ dataptr[3] = (DCTELEM) DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS);
+ dataptr[4] = (DCTELEM) DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS);
+
+ dataptr += DCTSIZE; /* advance pointer to next row */
+ }
+
+ /* Pass 2: process columns. */
+ /* Note that we must descale the results by a factor of 8 == 2**3, */
+ /* and also undo the PASS1_BITS scaling. */
+
+ dataptr = data;
+ for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) {
+ /* Columns of zeroes can be exploited in the same way as we did with rows.
+ * However, the row calculation has created many nonzero AC terms, so the
+ * simplification applies less often (typically 5% to 10% of the time).
+ * On machines with very fast multiplication, it's possible that the
+ * test takes more time than it's worth. In that case this section
+ * may be commented out.
+ */
+
+ d0 = dataptr[DCTSIZE*0];
+ d1 = dataptr[DCTSIZE*1];
+ d2 = dataptr[DCTSIZE*2];
+ d3 = dataptr[DCTSIZE*3];
+ d4 = dataptr[DCTSIZE*4];
+ d5 = dataptr[DCTSIZE*5];
+ d6 = dataptr[DCTSIZE*6];
+ d7 = dataptr[DCTSIZE*7];
+
+ /* Even part: reverse the even part of the forward DCT. */
+ /* The rotator is sqrt(2)*c(-6). */
+ if (d6) {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 != 0 */
+ z1 = MULTIPLY(d2 + d6, FIX_0_541196100);
+ tmp2 = z1 + MULTIPLY(-d6, FIX_1_847759065);
+ tmp3 = z1 + MULTIPLY(d2, FIX_0_765366865);
+
+ tmp0 = (d0 + d4) << CONST_BITS;
+ tmp1 = (d0 - d4) << CONST_BITS;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 != 0 */
+ tmp2 = MULTIPLY(-d6, FIX_1_306562965);
+ tmp3 = MULTIPLY(d6, FIX_0_541196100);
+
+ tmp0 = (d0 + d4) << CONST_BITS;
+ tmp1 = (d0 - d4) << CONST_BITS;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ }
+ } else {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 == 0 */
+ tmp2 = MULTIPLY(d2, FIX_0_541196100);
+ tmp3 = MULTIPLY(d2, FIX_1_306562965);
+
+ tmp0 = (d0 + d4) << CONST_BITS;
+ tmp1 = (d0 - d4) << CONST_BITS;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 == 0 */
+ tmp10 = tmp13 = (d0 + d4) << CONST_BITS;
+ tmp11 = tmp12 = (d0 - d4) << CONST_BITS;
+ }
+ }
+
+ /* Odd part per figure 8; the matrix is unitary and hence its
+ * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
+ */
+ if (d7) {
+ if (d5) {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 != 0, d7 != 0 */
+ z1 = d7 + d1;
+ z2 = d5 + d3;
+ z3 = d7 + d3;
+ z4 = d5 + d1;
+ z5 = MULTIPLY(z3 + z4, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-z1, FIX_0_899976223);
+ z2 = MULTIPLY(-z2, FIX_2_562915447);
+ z3 = MULTIPLY(-z3, FIX_1_961570560);
+ z4 = MULTIPLY(-z4, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 != 0, d5 != 0, d7 != 0 */
+ z2 = d5 + d3;
+ z3 = d7 + d3;
+ z5 = MULTIPLY(z3 + d5, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ z1 = MULTIPLY(-d7, FIX_0_899976223);
+ z2 = MULTIPLY(-z2, FIX_2_562915447);
+ z3 = MULTIPLY(-z3, FIX_1_961570560);
+ z4 = MULTIPLY(-d5, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 = z1 + z4;
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 != 0, d7 != 0 */
+ z1 = d7 + d1;
+ z3 = d7;
+ z4 = d5 + d1;
+ z5 = MULTIPLY(z3 + z4, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-z1, FIX_0_899976223);
+ z2 = MULTIPLY(-d5, FIX_2_562915447);
+ z3 = MULTIPLY(-d7, FIX_1_961570560);
+ z4 = MULTIPLY(-z4, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 = z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 == 0, d5 != 0, d7 != 0 */
+ tmp0 = MULTIPLY(-d7, FIX_0_601344887);
+ z1 = MULTIPLY(-d7, FIX_0_899976223);
+ z3 = MULTIPLY(-d7, FIX_1_961570560);
+ tmp1 = MULTIPLY(-d5, FIX_0_509795579);
+ z2 = MULTIPLY(-d5, FIX_2_562915447);
+ z4 = MULTIPLY(-d5, FIX_0_390180644);
+ z5 = MULTIPLY(d5 + d7, FIX_1_175875602);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z3;
+ tmp1 += z4;
+ tmp2 = z2 + z3;
+ tmp3 = z1 + z4;
+ }
+ }
+ } else {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 == 0, d7 != 0 */
+ z1 = d7 + d1;
+ z3 = d7 + d3;
+ z5 = MULTIPLY(z3 + d1, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-z1, FIX_0_899976223);
+ z2 = MULTIPLY(-d3, FIX_2_562915447);
+ z3 = MULTIPLY(-z3, FIX_1_961570560);
+ z4 = MULTIPLY(-d1, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 = z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 != 0, d5 == 0, d7 != 0 */
+ z3 = d7 + d3;
+
+ tmp0 = MULTIPLY(-d7, FIX_0_601344887);
+ z1 = MULTIPLY(-d7, FIX_0_899976223);
+ tmp2 = MULTIPLY(d3, FIX_0_509795579);
+ z2 = MULTIPLY(-d3, FIX_2_562915447);
+ z5 = MULTIPLY(z3, FIX_1_175875602);
+ z3 = MULTIPLY(-z3, FIX_0_785694958);
+
+ tmp0 += z3;
+ tmp1 = z2 + z5;
+ tmp2 += z3;
+ tmp3 = z1 + z5;
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 == 0, d7 != 0 */
+ z1 = d7 + d1;
+ z5 = MULTIPLY(z1, FIX_1_175875602);
+
+ z1 = MULTIPLY(z1, FIX_0_275899380);
+ z3 = MULTIPLY(-d7, FIX_1_961570560);
+ tmp0 = MULTIPLY(-d7, FIX_1_662939225);
+ z4 = MULTIPLY(-d1, FIX_0_390180644);
+ tmp3 = MULTIPLY(d1, FIX_1_111140466);
+
+ tmp0 += z1;
+ tmp1 = z4 + z5;
+ tmp2 = z3 + z5;
+ tmp3 += z1;
+ } else {
+ /* d1 == 0, d3 == 0, d5 == 0, d7 != 0 */
+ tmp0 = MULTIPLY(-d7, FIX_1_387039845);
+ tmp1 = MULTIPLY(d7, FIX_1_175875602);
+ tmp2 = MULTIPLY(-d7, FIX_0_785694958);
+ tmp3 = MULTIPLY(d7, FIX_0_275899380);
+ }
+ }
+ }
+ } else {
+ if (d5) {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 != 0, d7 == 0 */
+ z2 = d5 + d3;
+ z4 = d5 + d1;
+ z5 = MULTIPLY(d3 + z4, FIX_1_175875602);
+
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-d1, FIX_0_899976223);
+ z2 = MULTIPLY(-z2, FIX_2_562915447);
+ z3 = MULTIPLY(-d3, FIX_1_961570560);
+ z4 = MULTIPLY(-z4, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 = z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 != 0, d5 != 0, d7 == 0 */
+ z2 = d5 + d3;
+
+ z5 = MULTIPLY(z2, FIX_1_175875602);
+ tmp1 = MULTIPLY(d5, FIX_1_662939225);
+ z4 = MULTIPLY(-d5, FIX_0_390180644);
+ z2 = MULTIPLY(-z2, FIX_1_387039845);
+ tmp2 = MULTIPLY(d3, FIX_1_111140466);
+ z3 = MULTIPLY(-d3, FIX_1_961570560);
+
+ tmp0 = z3 + z5;
+ tmp1 += z2;
+ tmp2 += z2;
+ tmp3 = z4 + z5;
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 != 0, d7 == 0 */
+ z4 = d5 + d1;
+
+ z5 = MULTIPLY(z4, FIX_1_175875602);
+ z1 = MULTIPLY(-d1, FIX_0_899976223);
+ tmp3 = MULTIPLY(d1, FIX_0_601344887);
+ tmp1 = MULTIPLY(-d5, FIX_0_509795579);
+ z2 = MULTIPLY(-d5, FIX_2_562915447);
+ z4 = MULTIPLY(z4, FIX_0_785694958);
+
+ tmp0 = z1 + z5;
+ tmp1 += z4;
+ tmp2 = z2 + z5;
+ tmp3 += z4;
+ } else {
+ /* d1 == 0, d3 == 0, d5 != 0, d7 == 0 */
+ tmp0 = MULTIPLY(d5, FIX_1_175875602);
+ tmp1 = MULTIPLY(d5, FIX_0_275899380);
+ tmp2 = MULTIPLY(-d5, FIX_1_387039845);
+ tmp3 = MULTIPLY(d5, FIX_0_785694958);
+ }
+ }
+ } else {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 == 0, d7 == 0 */
+ z5 = d1 + d3;
+ tmp3 = MULTIPLY(d1, FIX_0_211164243);
+ tmp2 = MULTIPLY(-d3, FIX_1_451774981);
+ z1 = MULTIPLY(d1, FIX_1_061594337);
+ z2 = MULTIPLY(-d3, FIX_2_172734803);
+ z4 = MULTIPLY(z5, FIX_0_785694958);
+ z5 = MULTIPLY(z5, FIX_1_175875602);
+
+ tmp0 = z1 - z4;
+ tmp1 = z2 + z4;
+ tmp2 += z5;
+ tmp3 += z5;
+ } else {
+ /* d1 == 0, d3 != 0, d5 == 0, d7 == 0 */
+ tmp0 = MULTIPLY(-d3, FIX_0_785694958);
+ tmp1 = MULTIPLY(-d3, FIX_1_387039845);
+ tmp2 = MULTIPLY(-d3, FIX_0_275899380);
+ tmp3 = MULTIPLY(d3, FIX_1_175875602);
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 == 0, d7 == 0 */
+ tmp0 = MULTIPLY(d1, FIX_0_275899380);
+ tmp1 = MULTIPLY(d1, FIX_0_785694958);
+ tmp2 = MULTIPLY(d1, FIX_1_175875602);
+ tmp3 = MULTIPLY(d1, FIX_1_387039845);
+ } else {
+ /* d1 == 0, d3 == 0, d5 == 0, d7 == 0 */
+ tmp0 = tmp1 = tmp2 = tmp3 = 0;
+ }
+ }
+ }
+ }
+
+ /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
+
+ dataptr[DCTSIZE*0] = (DCTELEM) DESCALE(tmp10 + tmp3,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp10 - tmp3,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp11 + tmp2,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(tmp11 - tmp2,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(tmp12 + tmp1,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp12 - tmp1,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp13 + tmp0,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp13 - tmp0,
+ CONST_BITS+PASS1_BITS+3);
+
+ dataptr++; /* advance pointer to next column */
+ }
+}
+
+#undef DCTSIZE
+#define DCTSIZE 4
+#define DCTSTRIDE 8
+
+void j_rev_dct4(DCTBLOCK data)
+{
+ int32_t tmp0, tmp1, tmp2, tmp3;
+ int32_t tmp10, tmp11, tmp12, tmp13;
+ int32_t z1;
+ int32_t d0, d2, d4, d6;
+ register DCTELEM *dataptr;
+ int rowctr;
+
+ /* Pass 1: process rows. */
+ /* Note results are scaled up by sqrt(8) compared to a true IDCT; */
+ /* furthermore, we scale the results by 2**PASS1_BITS. */
+
+ data[0] += 4;
+
+ dataptr = data;
+
+ for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) {
+ /* Due to quantization, we will usually find that many of the input
+ * coefficients are zero, especially the AC terms. We can exploit this
+ * by short-circuiting the IDCT calculation for any row in which all
+ * the AC terms are zero. In that case each output is equal to the
+ * DC coefficient (with scale factor as needed).
+ * With typical images and quantization tables, half or more of the
+ * row DCT calculations can be simplified this way.
+ */
+
+ register int *idataptr = (int*)dataptr;
+
+ d0 = dataptr[0];
+ d2 = dataptr[1];
+ d4 = dataptr[2];
+ d6 = dataptr[3];
+
+ if ((d2 | d4 | d6) == 0) {
+ /* AC terms all zero */
+ if (d0) {
+ /* Compute a 32 bit value to assign. */
+ DCTELEM dcval = (DCTELEM) (d0 << PASS1_BITS);
+ register int v = (dcval & 0xffff) | ((dcval << 16) & 0xffff0000);
+
+ idataptr[0] = v;
+ idataptr[1] = v;
+ }
+
+ dataptr += DCTSTRIDE; /* advance pointer to next row */
+ continue;
+ }
+
+ /* Even part: reverse the even part of the forward DCT. */
+ /* The rotator is sqrt(2)*c(-6). */
+ if (d6) {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 != 0 */
+ z1 = MULTIPLY(d2 + d6, FIX_0_541196100);
+ tmp2 = z1 + MULTIPLY(-d6, FIX_1_847759065);
+ tmp3 = z1 + MULTIPLY(d2, FIX_0_765366865);
+
+ tmp0 = (d0 + d4) << CONST_BITS;
+ tmp1 = (d0 - d4) << CONST_BITS;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 != 0 */
+ tmp2 = MULTIPLY(-d6, FIX_1_306562965);
+ tmp3 = MULTIPLY(d6, FIX_0_541196100);
+
+ tmp0 = (d0 + d4) << CONST_BITS;
+ tmp1 = (d0 - d4) << CONST_BITS;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ }
+ } else {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 == 0 */
+ tmp2 = MULTIPLY(d2, FIX_0_541196100);
+ tmp3 = MULTIPLY(d2, FIX_1_306562965);
+
+ tmp0 = (d0 + d4) << CONST_BITS;
+ tmp1 = (d0 - d4) << CONST_BITS;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 == 0 */
+ tmp10 = tmp13 = (d0 + d4) << CONST_BITS;
+ tmp11 = tmp12 = (d0 - d4) << CONST_BITS;
+ }
+ }
+
+ /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
+
+ dataptr[0] = (DCTELEM) DESCALE(tmp10, CONST_BITS-PASS1_BITS);
+ dataptr[1] = (DCTELEM) DESCALE(tmp11, CONST_BITS-PASS1_BITS);
+ dataptr[2] = (DCTELEM) DESCALE(tmp12, CONST_BITS-PASS1_BITS);
+ dataptr[3] = (DCTELEM) DESCALE(tmp13, CONST_BITS-PASS1_BITS);
+
+ dataptr += DCTSTRIDE; /* advance pointer to next row */
+ }
+
+ /* Pass 2: process columns. */
+ /* Note that we must descale the results by a factor of 8 == 2**3, */
+ /* and also undo the PASS1_BITS scaling. */
+
+ dataptr = data;
+ for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) {
+ /* Columns of zeroes can be exploited in the same way as we did with rows.
+ * However, the row calculation has created many nonzero AC terms, so the
+ * simplification applies less often (typically 5% to 10% of the time).
+ * On machines with very fast multiplication, it's possible that the
+ * test takes more time than it's worth. In that case this section
+ * may be commented out.
+ */
+
+ d0 = dataptr[DCTSTRIDE*0];
+ d2 = dataptr[DCTSTRIDE*1];
+ d4 = dataptr[DCTSTRIDE*2];
+ d6 = dataptr[DCTSTRIDE*3];
+
+ /* Even part: reverse the even part of the forward DCT. */
+ /* The rotator is sqrt(2)*c(-6). */
+ if (d6) {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 != 0 */
+ z1 = MULTIPLY(d2 + d6, FIX_0_541196100);
+ tmp2 = z1 + MULTIPLY(-d6, FIX_1_847759065);
+ tmp3 = z1 + MULTIPLY(d2, FIX_0_765366865);
+
+ tmp0 = (d0 + d4) << CONST_BITS;
+ tmp1 = (d0 - d4) << CONST_BITS;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 != 0 */
+ tmp2 = MULTIPLY(-d6, FIX_1_306562965);
+ tmp3 = MULTIPLY(d6, FIX_0_541196100);
+
+ tmp0 = (d0 + d4) << CONST_BITS;
+ tmp1 = (d0 - d4) << CONST_BITS;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ }
+ } else {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 == 0 */
+ tmp2 = MULTIPLY(d2, FIX_0_541196100);
+ tmp3 = MULTIPLY(d2, FIX_1_306562965);
+
+ tmp0 = (d0 + d4) << CONST_BITS;
+ tmp1 = (d0 - d4) << CONST_BITS;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 == 0 */
+ tmp10 = tmp13 = (d0 + d4) << CONST_BITS;
+ tmp11 = tmp12 = (d0 - d4) << CONST_BITS;
+ }
+ }
+
+ /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
+
+ dataptr[DCTSTRIDE*0] = tmp10 >> (CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSTRIDE*1] = tmp11 >> (CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSTRIDE*2] = tmp12 >> (CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSTRIDE*3] = tmp13 >> (CONST_BITS+PASS1_BITS+3);
+
+ dataptr++; /* advance pointer to next column */
+ }
+}
+
+void j_rev_dct2(DCTBLOCK data){
+ int d00, d01, d10, d11;
+
+ data[0] += 4;
+ d00 = data[0+0*DCTSTRIDE] + data[1+0*DCTSTRIDE];
+ d01 = data[0+0*DCTSTRIDE] - data[1+0*DCTSTRIDE];
+ d10 = data[0+1*DCTSTRIDE] + data[1+1*DCTSTRIDE];
+ d11 = data[0+1*DCTSTRIDE] - data[1+1*DCTSTRIDE];
+
+ data[0+0*DCTSTRIDE]= (d00 + d10)>>3;
+ data[1+0*DCTSTRIDE]= (d01 + d11)>>3;
+ data[0+1*DCTSTRIDE]= (d00 - d10)>>3;
+ data[1+1*DCTSTRIDE]= (d01 - d11)>>3;
+}
+
+void j_rev_dct1(DCTBLOCK data){
+ data[0] = (data[0] + 4)>>3;
+}
+
+#undef FIX
+#undef CONST_BITS
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/libamr.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/libamr.c
new file mode 100644
index 000000000..695fc4ca7
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/libamr.c
@@ -0,0 +1,186 @@
+/*
+ * AMR Audio decoder stub
+ * Copyright (c) 2003 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+ /** @file
+ * Adaptive Multi-Rate (AMR) Audio decoder stub.
+ *
+ * This code implements both an AMR-NarrowBand (AMR-NB) and an AMR-WideBand
+ * (AMR-WB) audio encoder/decoder through external reference code from
+ * http://www.3gpp.org/. The license of the code from 3gpp is unclear so you
+ * have to download the code separately.
+ *
+ * \section AMR-NB
+ *
+ * The float version (default) can be downloaded from:
+ * http://www.3gpp.org/ftp/Specs/archive/26_series/26.104/26104-610.zip
+ *
+ * \subsection Specification
+ * The specification for AMR-NB can be found in TS 26.071
+ * (http://www.3gpp.org/ftp/Specs/html-info/26071.htm) and some other
+ * info at http://www.3gpp.org/ftp/Specs/html-info/26-series.htm.
+ *
+ * \section AMR-WB
+ *
+ * The reference code can be downloaded from:
+ * http://www.3gpp.org/ftp/Specs/archive/26_series/26.204/26204-600.zip
+ *
+ * \subsection Specification
+ * The specification for AMR-WB can be found in TS 26.171
+ * (http://www.3gpp.org/ftp/Specs/html-info/26171.htm) and some other
+ * info at http://www.3gpp.org/ftp/Specs/html-info/26-series.htm.
+ *
+ */
+
+#include "avcodec.h"
+
+static void amr_decode_fix_avctx(AVCodecContext *avctx)
+{
+ //const int is_amr_wb = 1 + (avctx->codec_id == CODEC_ID_AMR_WB);
+ const int is_amr_wb = 1;
+
+ if (!avctx->sample_rate)
+ avctx->sample_rate = 8000 * is_amr_wb;
+
+ if (!avctx->channels)
+ avctx->channels = 1;
+
+ avctx->frame_size = 160 * is_amr_wb;
+ avctx->sample_fmt = SAMPLE_FMT_S16;
+}
+
+#if CONFIG_LIBAMR_NB
+
+#include "amr_float/interf_dec.h"
+#include "amr_float/interf_enc.h"
+
+static const char nb_bitrate_unsupported[] =
+ "bitrate not supported: use one of 4.75k, 5.15k, 5.9k, 6.7k, 7.4k, 7.95k, 10.2k or 12.2k\n";
+
+typedef struct AMR_bitrates {
+ int rate;
+ enum Mode mode;
+} AMR_bitrates;
+
+/* Match desired bitrate */
+static int getBitrateMode(int bitrate)
+{
+ /* make the correspondance between bitrate and mode */
+ AMR_bitrates rates[] = { { 4750, MR475},
+ { 5150, MR515},
+ { 5900, MR59},
+ { 6700, MR67},
+ { 7400, MR74},
+ { 7950, MR795},
+ {10200, MR102},
+ {12200, MR122}, };
+ int i;
+
+ for (i = 0; i < 8; i++)
+ if (rates[i].rate == bitrate)
+ return rates[i].mode;
+ /* no bitrate matching, return an error */
+ return -1;
+}
+
+typedef struct AMRContext {
+ int frameCount;
+ void *decState;
+ int *enstate;
+ int enc_bitrate;
+} AMRContext;
+
+static av_cold int amr_nb_decode_init(AVCodecContext *avctx)
+{
+ AMRContext *s = avctx->priv_data;
+
+ s->frameCount = 0;
+ s->decState = Decoder_Interface_init();
+ if (!s->decState) {
+ av_log(avctx, AV_LOG_ERROR, "Decoder_Interface_init error\r\n");
+ return -1;
+ }
+
+ amr_decode_fix_avctx(avctx);
+
+ if (avctx->channels > 1) {
+ av_log(avctx, AV_LOG_ERROR, "amr_nb: multichannel decoding not supported\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static av_cold int amr_nb_decode_close(AVCodecContext *avctx)
+{
+ AMRContext *s = avctx->priv_data;
+
+ Decoder_Interface_exit(s->decState);
+ return 0;
+}
+
+static int amr_nb_decode_frame(AVCodecContext * avctx, void *data,
+ int *data_size, const uint8_t * buf, int buf_size)
+{
+ AMRContext *s = avctx->priv_data;
+ const uint8_t *amrData = buf;
+ static const uint8_t block_size[16] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0 };
+ enum Mode dec_mode;
+ int packet_size;
+
+ /* av_log(NULL, AV_LOG_DEBUG, "amr_decode_frame buf=%p buf_size=%d frameCount=%d!!\n",
+ buf, buf_size, s->frameCount); */
+
+ dec_mode = (buf[0] >> 3) & 0x000F;
+ packet_size = block_size[dec_mode] + 1;
+
+ if (packet_size > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "amr frame too short (%u, should be %u)\n",
+ buf_size, packet_size);
+ return -1;
+ }
+
+ s->frameCount++;
+ /* av_log(NULL, AV_LOG_DEBUG, "packet_size=%d amrData= 0x%X %X %X %X\n",
+ packet_size, amrData[0], amrData[1], amrData[2], amrData[3]); */
+ /* call decoder */
+ Decoder_Interface_Decode(s->decState, amrData, data, 0);
+ *data_size = 160 * 2;
+
+ return packet_size;
+}
+
+AVCodec libamr_nb_decoder = {
+ "libamr_nb",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_AMR_NB,
+ sizeof(AMRContext),
+ amr_nb_decode_init,
+ NULL,
+ amr_nb_decode_close,
+ amr_nb_decode_frame,
+ /*.capabilities = */0,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("libamr-nb Adaptive Multi-Rate (AMR) Narrow-Band"),
+};
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mathops.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mathops.h
new file mode 100644
index 000000000..acccd645b
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mathops.h
@@ -0,0 +1,129 @@
+/*
+ * simple math operations
+ * Copyright (c) 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef AVCODEC_MATHOPS_H
+#define AVCODEC_MATHOPS_H
+
+#include "libavutil/common.h"
+
+#if ARCH_X86
+# include "x86/mathops.h"
+#endif
+
+/* generic implementation */
+
+#ifndef MULL
+# define MULL(a,b,s) (((int64_t)(a) * (int64_t)(b)) >> (s))
+#endif
+
+#ifndef MULH
+//gcc 3.4 creates an incredibly bloated mess out of this
+//# define MULH(a,b) (((int64_t)(a) * (int64_t)(b))>>32)
+
+static av_always_inline int MULH(int a, int b){
+ return ((int64_t)(a) * (int64_t)(b))>>32;
+}
+#endif
+
+#ifndef UMULH
+static av_always_inline unsigned UMULH(unsigned a, unsigned b){
+ return ((uint64_t)(a) * (uint64_t)(b))>>32;
+}
+#endif
+
+#ifndef MUL64
+# define MUL64(a,b) ((int64_t)(a) * (int64_t)(b))
+#endif
+
+#ifndef MAC64
+# define MAC64(d, a, b) ((d) += MUL64(a, b))
+#endif
+
+#ifndef MLS64
+# define MLS64(d, a, b) ((d) -= MUL64(a, b))
+#endif
+
+/* signed 16x16 -> 32 multiply add accumulate */
+#ifndef MAC16
+# define MAC16(rt, ra, rb) rt += (ra) * (rb)
+#endif
+
+/* signed 16x16 -> 32 multiply */
+#ifndef MUL16
+# define MUL16(ra, rb) ((ra) * (rb))
+#endif
+
+#ifndef MLS16
+# define MLS16(rt, ra, rb) ((rt) -= (ra) * (rb))
+#endif
+
+/* median of 3 */
+#ifndef mid_pred
+#define mid_pred mid_pred
+static inline av_const int mid_pred(int a, int b, int c)
+{
+#if 0
+ int t= (a-b)&((a-b)>>31);
+ a-=t;
+ b+=t;
+ b-= (b-c)&((b-c)>>31);
+ b+= (a-b)&((a-b)>>31);
+
+ return b;
+#else
+ if(a>b){
+ if(c>b){
+ if(c>a) b=a;
+ else b=c;
+ }
+ }else{
+ if(b>c){
+ if(c>a) b=c;
+ else b=a;
+ }
+ }
+ return b;
+#endif
+}
+#endif
+
+#ifndef INT_BIT
+# define INT_BIT (CHAR_BIT * sizeof(int))
+#endif
+
+#ifndef sign_extend
+static inline av_const int sign_extend(int val, unsigned bits)
+{
+ return (val << (INT_BIT - bits)) >> (INT_BIT - bits);
+}
+#endif
+
+#ifndef COPY3_IF_LT
+#define COPY3_IF_LT(x, y, a, b, c, d)\
+if ((y) < (x)) {\
+ (x) = (y);\
+ (a) = (b);\
+ (c) = (d);\
+}
+#endif
+
+#endif /* AVCODEC_MATHOPS_H */
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mdct.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mdct.c
new file mode 100644
index 000000000..3bf953134
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mdct.c
@@ -0,0 +1,235 @@
+/*
+ * MDCT/IMDCT transforms
+ * Copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "dsputil.h"
+
+/**
+ * @file libavcodec/mdct.c
+ * MDCT/IMDCT transforms.
+ */
+
+#if _MSC_VER<1400
+#define sinf sin
+#endif
+
+// Generate a Kaiser-Bessel Derived Window.
+#define BESSEL_I0_ITER 50 // default: 50 iterations of Bessel I0 approximation
+av_cold void ff_kbd_window_init(float *window, float alpha, int n)
+{
+ int i, j;
+ double sum = 0.0, bessel, tmp;
+#if __STDC_VERSION__ >= 199901L
+ double local_window[n];
+#else
+ double *local_window = _alloca(n * sizeof(double));
+#endif
+ double alpha2 = (alpha * M_PI / n) * (alpha * M_PI / n);
+
+ for (i = 0; i < n; i++) {
+ tmp = i * (n - i) * alpha2;
+ bessel = 1.0;
+ for (j = BESSEL_I0_ITER; j > 0; j--)
+ bessel = bessel * tmp / (j * j) + 1;
+ sum += bessel;
+ local_window[i] = sum;
+ }
+
+ sum++;
+ for (i = 0; i < n; i++)
+ window[i] = sqrt(local_window[i] / sum);
+}
+
+//#include "mdct_tablegen.h"
+
+/**
+ * init MDCT or IMDCT computation.
+ */
+av_cold int ff_mdct_init(FFTContext *s, int nbits, int inverse, double scale)
+{
+ int n, n4, i;
+ double alpha, theta;
+ int tstep;
+
+ memset(s, 0, sizeof(*s));
+ n = 1 << nbits;
+ s->mdct_bits = nbits;
+ s->mdct_size = n;
+ n4 = n >> 2;
+ s->permutation = FF_MDCT_PERM_NONE;
+
+ if (ff_fft_init(s, s->mdct_bits - 2, inverse) < 0)
+ goto fail;
+
+ s->tcos = av_malloc(n/2 * sizeof(FFTSample));
+ if (!s->tcos)
+ goto fail;
+
+ switch (s->permutation) {
+ case FF_MDCT_PERM_NONE:
+ s->tsin = s->tcos + n4;
+ tstep = 1;
+ break;
+ case FF_MDCT_PERM_INTERLEAVE:
+ s->tsin = s->tcos + 1;
+ tstep = 2;
+ break;
+ default:
+ goto fail;
+ }
+
+ theta = 1.0 / 8.0 + (scale < 0 ? n4 : 0);
+ scale = sqrt(fabs(scale));
+ for(i=0;i<n4;i++) {
+ alpha = 2 * M_PI * (i + theta) / n;
+ s->tcos[i*tstep] = -cos(alpha) * scale;
+ s->tsin[i*tstep] = -sin(alpha) * scale;
+ }
+ return 0;
+ fail:
+ ff_mdct_end(s);
+ return -1;
+}
+
+/* complex multiplication: p = a * b */
+#define CMUL(pre, pim, are, aim, bre, bim) \
+{\
+ FFTSample _are = (are);\
+ FFTSample _aim = (aim);\
+ FFTSample _bre = (bre);\
+ FFTSample _bim = (bim);\
+ (pre) = _are * _bre - _aim * _bim;\
+ (pim) = _are * _bim + _aim * _bre;\
+}
+
+/**
+ * Compute the middle half of the inverse MDCT of size N = 2^nbits,
+ * thus excluding the parts that can be derived by symmetry
+ * @param output N/2 samples
+ * @param input N/2 samples
+ */
+void ff_imdct_half_c(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ int k, n8, n4, n2, n, j;
+ const uint16_t *revtab = s->revtab;
+ const FFTSample *tcos = s->tcos;
+ const FFTSample *tsin = s->tsin;
+ const FFTSample *in1, *in2;
+ FFTComplex *z = (FFTComplex *)output;
+
+ n = 1 << s->mdct_bits;
+ n2 = n >> 1;
+ n4 = n >> 2;
+ n8 = n >> 3;
+
+ /* pre rotation */
+ in1 = input;
+ in2 = input + n2 - 1;
+ for(k = 0; k < n4; k++) {
+ j=revtab[k];
+ CMUL(z[j].re, z[j].im, *in2, *in1, tcos[k], tsin[k]);
+ in1 += 2;
+ in2 -= 2;
+ }
+ ff_fft_calc(s, z);
+
+ /* post rotation + reordering */
+ for(k = 0; k < n8; k++) {
+ FFTSample r0, i0, r1, i1;
+ CMUL(r0, i1, z[n8-k-1].im, z[n8-k-1].re, tsin[n8-k-1], tcos[n8-k-1]);
+ CMUL(r1, i0, z[n8+k ].im, z[n8+k ].re, tsin[n8+k ], tcos[n8+k ]);
+ z[n8-k-1].re = r0;
+ z[n8-k-1].im = i0;
+ z[n8+k ].re = r1;
+ z[n8+k ].im = i1;
+ }
+}
+
+/**
+ * Compute inverse MDCT of size N = 2^nbits
+ * @param output N samples
+ * @param input N/2 samples
+ */
+void ff_imdct_calc_c(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ int k;
+ int n = 1 << s->mdct_bits;
+ int n2 = n >> 1;
+ int n4 = n >> 2;
+
+ ff_imdct_half_c(s, output+n4, input);
+
+ for(k = 0; k < n4; k++) {
+ output[k] = -output[n2-k-1];
+ output[n-k-1] = output[n2+k];
+ }
+}
+
+/**
+ * Compute MDCT of size N = 2^nbits
+ * @param input N samples
+ * @param out N/2 samples
+ */
+void ff_mdct_calc_c(FFTContext *s, FFTSample *out, const FFTSample *input)
+{
+ int i, j, n, n8, n4, n2, n3;
+ FFTSample re, im;
+ const uint16_t *revtab = s->revtab;
+ const FFTSample *tcos = s->tcos;
+ const FFTSample *tsin = s->tsin;
+ FFTComplex *x = (FFTComplex *)out;
+
+ n = 1 << s->mdct_bits;
+ n2 = n >> 1;
+ n4 = n >> 2;
+ n8 = n >> 3;
+ n3 = 3 * n4;
+
+ /* pre rotation */
+ for(i=0;i<n8;i++) {
+ re = -input[2*i+3*n4] - input[n3-1-2*i];
+ im = -input[n4+2*i] + input[n4-1-2*i];
+ j = revtab[i];
+ CMUL(x[j].re, x[j].im, re, im, -tcos[i], tsin[i]);
+
+ re = input[2*i] - input[n2-1-2*i];
+ im = -(input[n2+2*i] + input[n-1-2*i]);
+ j = revtab[n8 + i];
+ CMUL(x[j].re, x[j].im, re, im, -tcos[n8 + i], tsin[n8 + i]);
+ }
+
+ ff_fft_calc(s, x);
+
+ /* post rotation */
+ for(i=0;i<n8;i++) {
+ FFTSample r0, i0, r1, i1;
+ CMUL(i1, r0, x[n8-i-1].re, x[n8-i-1].im, -tsin[n8-i-1], -tcos[n8-i-1]);
+ CMUL(i0, r1, x[n8+i ].re, x[n8+i ].im, -tsin[n8+i ], -tcos[n8+i ]);
+ x[n8-i-1].re = r0;
+ x[n8-i-1].im = i0;
+ x[n8+i ].re = r1;
+ x[n8+i ].im = i1;
+ }
+}
+
+av_cold void ff_mdct_end(FFTContext *s)
+{
+ av_freep(&s->tcos);
+ ff_fft_end(s);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mdct_tablegen.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mdct_tablegen.c
new file mode 100644
index 000000000..f04502f18
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mdct_tablegen.c
@@ -0,0 +1,61 @@
+/*
+ * Generate a header file for hardcoded MDCT tables
+ *
+ * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+#define CONFIG_HARDCODED_TABLES 0
+#define av_cold
+#define SINETABLE_CONST
+#define SINETABLE(size) \
+ float ff_sine_##size[size]
+#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+#include "mdct_tablegen.h"
+#include "tableprint.h"
+
+void tableinit(void)
+{
+ int i;
+ for (i = 5; i <= 12; i++)
+ ff_init_ff_sine_windows(i);
+}
+
+#define SINE_TABLE_DEF(size) \
+ { \
+ "SINETABLE("#size")", \
+ write_float_array, \
+ ff_sine_##size, \
+ size \
+ },
+
+const struct tabledef tables[] = {
+ SINE_TABLE_DEF( 32)
+ SINE_TABLE_DEF( 64)
+ SINE_TABLE_DEF( 128)
+ SINE_TABLE_DEF( 256)
+ SINE_TABLE_DEF( 512)
+ SINE_TABLE_DEF(1024)
+ SINE_TABLE_DEF(2048)
+ SINE_TABLE_DEF(4096)
+ { NULL }
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mdct_tablegen.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mdct_tablegen.h
new file mode 100644
index 000000000..af652e745
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mdct_tablegen.h
@@ -0,0 +1,59 @@
+/*
+ * Header file for hardcoded MDCT tables
+ *
+ * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <assert.h>
+// do not use libavutil/mathematics.h since this is compiled both
+// for the host and the target and config.h is only valid for the target
+#include <math.h>
+
+#if !CONFIG_HARDCODED_TABLES
+SINETABLE( 32);
+SINETABLE( 64);
+SINETABLE( 128);
+SINETABLE( 256);
+SINETABLE( 512);
+SINETABLE(1024);
+SINETABLE(2048);
+SINETABLE(4096);
+#else
+#include "libavcodec/mdct_tables.h"
+#endif
+
+SINETABLE_CONST float * const ff_sine_windows[] = {
+ NULL, NULL, NULL, NULL, NULL, // unused
+ ff_sine_32 , ff_sine_64 ,
+ ff_sine_128, ff_sine_256, ff_sine_512, ff_sine_1024, ff_sine_2048, ff_sine_4096
+};
+
+// Generate a sine window.
+av_cold void ff_sine_window_init(float *window, int n) {
+ int i;
+ for(i = 0; i < n; i++)
+ window[i] = sinf((i + 0.5) * (M_PI / (2.0 * n)));
+}
+
+av_cold void ff_init_ff_sine_windows(int index) {
+ assert(index >= 0 && index < FF_ARRAY_ELEMS(ff_sine_windows));
+#if !CONFIG_HARDCODED_TABLES
+ ff_sine_window_init(ff_sine_windows[index], 1 << index);
+#endif
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpeg.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpeg.c
new file mode 100644
index 000000000..69b8a507d
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpeg.c
@@ -0,0 +1,145 @@
+/*
+ * MJPEG encoder and decoder
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2003 Alex Beregszaszi
+ * Copyright (c) 2003-2004 Michael Niedermayer
+ *
+ * Support for external huffman table, various fixes (AVID workaround),
+ * aspecting, new decode_frame mechanism and apple mjpeg-b support
+ * by Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file mjpeg.c
+ * MJPEG encoder and decoder.
+ */
+
+#include "mjpeg.h"
+
+
+#if 0
+/* These are the sample quantization tables given in JPEG spec section K.1.
+ * The spec says that the values given produce "good" quality, and
+ * when divided by 2, "very good" quality.
+ */
+const unsigned char std_luminance_quant_tbl[64] = {
+ 16, 11, 10, 16, 24, 40, 51, 61,
+ 12, 12, 14, 19, 26, 58, 60, 55,
+ 14, 13, 16, 24, 40, 57, 69, 56,
+ 14, 17, 22, 29, 51, 87, 80, 62,
+ 18, 22, 37, 56, 68, 109, 103, 77,
+ 24, 35, 55, 64, 81, 104, 113, 92,
+ 49, 64, 78, 87, 103, 121, 120, 101,
+ 72, 92, 95, 98, 112, 100, 103, 99
+};
+const unsigned char std_chrominance_quant_tbl[64] = {
+ 17, 18, 24, 47, 99, 99, 99, 99,
+ 18, 21, 26, 66, 99, 99, 99, 99,
+ 24, 26, 56, 99, 99, 99, 99, 99,
+ 47, 66, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99
+};
+#endif
+
+/* Set up the standard Huffman tables (cf. JPEG standard section K.3) */
+/* IMPORTANT: these are only valid for 8-bit data precision! */
+const uint8_t ff_mjpeg_bits_dc_luminance[17] =
+{ /* 0-base */ 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 };
+const uint8_t ff_mjpeg_val_dc[12] =
+{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
+
+const uint8_t ff_mjpeg_bits_dc_chrominance[17] =
+{ /* 0-base */ 0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 };
+
+const uint8_t ff_mjpeg_bits_ac_luminance[17] =
+{ /* 0-base */ 0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d };
+const uint8_t ff_mjpeg_val_ac_luminance[] =
+{ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa
+};
+
+const uint8_t ff_mjpeg_bits_ac_chrominance[17] =
+{ /* 0-base */ 0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77 };
+
+const uint8_t ff_mjpeg_val_ac_chrominance[] =
+{ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
+ 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
+ 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
+ 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
+ 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
+ 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
+ 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
+ 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
+ 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
+ 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
+ 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
+ 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
+ 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
+ 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa
+};
+
+/* isn't this function nicer than the one in the libjpeg ? */
+void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code,
+ const uint8_t *bits_table,
+ const uint8_t *val_table)
+{
+ int i, j, k,nb, code, sym;
+
+ code = 0;
+ k = 0;
+ for(i=1;i<=16;i++) {
+ nb = bits_table[i];
+ for(j=0;j<nb;j++) {
+ sym = val_table[k++];
+ huff_size[sym] = i;
+ huff_code[sym] = code;
+ code++;
+ }
+ code <<= 1;
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpeg.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpeg.h
new file mode 100644
index 000000000..b59fb785b
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpeg.h
@@ -0,0 +1,155 @@
+/*
+ * MJPEG encoder and decoder
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2003 Alex Beregszaszi
+ * Copyright (c) 2003-2004 Michael Niedermayer
+ *
+ * Support for external huffman table, various fixes (AVID workaround),
+ * aspecting, new decode_frame mechanism and apple mjpeg-b support
+ * by Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/mjpeg.h
+ * MJPEG encoder and decoder.
+ */
+
+#ifndef AVCODEC_MJPEG_H
+#define AVCODEC_MJPEG_H
+
+#include "avcodec.h"
+#include "put_bits.h"
+
+
+/* JPEG marker codes */
+typedef enum {
+ /* start of frame */
+ SOF0 = 0xc0, /* baseline */
+ SOF1 = 0xc1, /* extended sequential, huffman */
+ SOF2 = 0xc2, /* progressive, huffman */
+ SOF3 = 0xc3, /* lossless, huffman */
+
+ SOF5 = 0xc5, /* differential sequential, huffman */
+ SOF6 = 0xc6, /* differential progressive, huffman */
+ SOF7 = 0xc7, /* differential lossless, huffman */
+ JPG = 0xc8, /* reserved for JPEG extension */
+ SOF9 = 0xc9, /* extended sequential, arithmetic */
+ SOF10 = 0xca, /* progressive, arithmetic */
+ SOF11 = 0xcb, /* lossless, arithmetic */
+
+ SOF13 = 0xcd, /* differential sequential, arithmetic */
+ SOF14 = 0xce, /* differential progressive, arithmetic */
+ SOF15 = 0xcf, /* differential lossless, arithmetic */
+
+ DHT = 0xc4, /* define huffman tables */
+
+ DAC = 0xcc, /* define arithmetic-coding conditioning */
+
+ /* restart with modulo 8 count "m" */
+ RST0 = 0xd0,
+ RST1 = 0xd1,
+ RST2 = 0xd2,
+ RST3 = 0xd3,
+ RST4 = 0xd4,
+ RST5 = 0xd5,
+ RST6 = 0xd6,
+ RST7 = 0xd7,
+
+ SOI = 0xd8, /* start of image */
+ EOI = 0xd9, /* end of image */
+ SOS = 0xda, /* start of scan */
+ DQT = 0xdb, /* define quantization tables */
+ DNL = 0xdc, /* define number of lines */
+ DRI = 0xdd, /* define restart interval */
+ DHP = 0xde, /* define hierarchical progression */
+ EXP = 0xdf, /* expand reference components */
+
+ APP0 = 0xe0,
+ APP1 = 0xe1,
+ APP2 = 0xe2,
+ APP3 = 0xe3,
+ APP4 = 0xe4,
+ APP5 = 0xe5,
+ APP6 = 0xe6,
+ APP7 = 0xe7,
+ APP8 = 0xe8,
+ APP9 = 0xe9,
+ APP10 = 0xea,
+ APP11 = 0xeb,
+ APP12 = 0xec,
+ APP13 = 0xed,
+ APP14 = 0xee,
+ APP15 = 0xef,
+
+ JPG0 = 0xf0,
+ JPG1 = 0xf1,
+ JPG2 = 0xf2,
+ JPG3 = 0xf3,
+ JPG4 = 0xf4,
+ JPG5 = 0xf5,
+ JPG6 = 0xf6,
+ SOF48 = 0xf7, ///< JPEG-LS
+ LSE = 0xf8, ///< JPEG-LS extension parameters
+ JPG9 = 0xf9,
+ JPG10 = 0xfa,
+ JPG11 = 0xfb,
+ JPG12 = 0xfc,
+ JPG13 = 0xfd,
+
+ COM = 0xfe, /* comment */
+
+ TEM = 0x01, /* temporary private use for arithmetic coding */
+
+ /* 0x02 -> 0xbf reserved */
+} JPEG_MARKER;
+
+static inline void put_marker(PutBitContext *p, int code)
+{
+ put_bits(p, 8, 0xff);
+ put_bits(p, 8, code);
+}
+
+#define PREDICT(ret, topleft, top, left, predictor)\
+ switch(predictor){\
+ case 1: ret= left; break;\
+ case 2: ret= top; break;\
+ case 3: ret= topleft; break;\
+ case 4: ret= left + top - topleft; break;\
+ case 5: ret= left + ((top - topleft)>>1); break;\
+ case 6: ret= top + ((left - topleft)>>1); break;\
+ default:\
+ case 7: ret= (left + top)>>1; break;\
+ }
+
+extern const uint8_t ff_mjpeg_bits_dc_luminance[];
+extern const uint8_t ff_mjpeg_val_dc[];
+
+extern const uint8_t ff_mjpeg_bits_dc_chrominance[];
+
+extern const uint8_t ff_mjpeg_bits_ac_luminance[];
+extern const uint8_t ff_mjpeg_val_ac_luminance[];
+
+extern const uint8_t ff_mjpeg_bits_ac_chrominance[];
+extern const uint8_t ff_mjpeg_val_ac_chrominance[];
+
+void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code,
+ const uint8_t *bits_table,
+ const uint8_t *val_table);
+
+#endif /* AVCODEC_MJPEG_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpegdec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpegdec.c
new file mode 100644
index 000000000..49c04107e
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpegdec.c
@@ -0,0 +1,1494 @@
+/*
+ * MJPEG decoder
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2003 Alex Beregszaszi
+ * Copyright (c) 2003-2004 Michael Niedermayer
+ *
+ * Support for external huffman table, various fixes (AVID workaround),
+ * aspecting, new decode_frame mechanism and apple mjpeg-b support
+ * by Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/mjpegdec.c
+ * MJPEG decoder.
+ */
+
+//#define DEBUG
+#include <assert.h>
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mjpeg.h"
+#include "mjpegdec.h"
+#include "jpeglsdec.h"
+
+
+static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table,
+ int nb_codes, int use_static, int is_ac)
+{
+ uint8_t huff_size[256+16];
+ uint16_t huff_code[256+16];
+
+ assert(nb_codes <= 256);
+
+ memset(huff_size, 0, sizeof(huff_size));
+ ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
+
+ if(is_ac){
+ memmove(huff_size+16, huff_size, sizeof(uint8_t)*nb_codes);
+ memmove(huff_code+16, huff_code, sizeof(uint16_t)*nb_codes);
+ memset(huff_size, 0, sizeof(uint8_t)*16);
+ memset(huff_code, 0, sizeof(uint16_t)*16);
+ nb_codes += 16;
+ }
+
+ return init_vlc(vlc, 9, nb_codes, huff_size, 1, 1, huff_code, 2, 2, use_static);
+}
+
+static void build_basic_mjpeg_vlc(MJpegDecodeContext * s) {
+ build_vlc(&s->vlcs[0][0], ff_mjpeg_bits_dc_luminance,
+ ff_mjpeg_val_dc, 12, 0, 0);
+ build_vlc(&s->vlcs[0][1], ff_mjpeg_bits_dc_chrominance,
+ ff_mjpeg_val_dc, 12, 0, 0);
+ build_vlc(&s->vlcs[1][0], ff_mjpeg_bits_ac_luminance,
+ ff_mjpeg_val_ac_luminance, 251, 0, 1);
+ build_vlc(&s->vlcs[1][1], ff_mjpeg_bits_ac_chrominance,
+ ff_mjpeg_val_ac_chrominance, 251, 0, 1);
+}
+
+av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
+{
+ MJpegDecodeContext *s = avctx->priv_data;
+
+ s->avctx = avctx;
+ dsputil_init(&s->dsp, avctx);
+ ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
+ s->buffer_size = 0;
+ s->buffer = NULL;
+ s->start_code = -1;
+ s->first_picture = 1;
+ s->org_height = avctx->coded_height;
+ avctx->chroma_sample_location = AVCHROMA_LOC_CENTER;
+
+ build_basic_mjpeg_vlc(s);
+
+ if (avctx->flags & CODEC_FLAG_EXTERN_HUFF)
+ {
+ av_log(avctx, AV_LOG_INFO, "mjpeg: using external huffman table\n");
+ init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size*8);
+ if (ff_mjpeg_decode_dht(s)) {
+ av_log(avctx, AV_LOG_ERROR, "mjpeg: error using external huffman table, switching back to internal\n");
+ build_basic_mjpeg_vlc(s);
+ }
+ }
+ if (avctx->extradata_size > 9 &&
+ AV_RL32(avctx->extradata + 4) == MKTAG('f','i','e','l')) {
+ if (avctx->extradata[9] == 6) { /* quicktime icefloe 019 */
+ s->interlace_polarity = 1; /* bottom field first */
+ av_log(avctx, AV_LOG_DEBUG, "mjpeg bottom field first\n");
+ }
+ }
+ if (avctx->codec->id == CODEC_ID_AMV)
+ s->flipped = 1;
+
+ return 0;
+}
+
+
+/* quantize tables */
+int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
+{
+ int len, index, i, j;
+
+ len = get_bits(&s->gb, 16) - 2;
+
+ while (len >= 65) {
+ /* only 8 bit precision handled */
+ if (get_bits(&s->gb, 4) != 0)
+ {
+ av_log(s->avctx, AV_LOG_ERROR, "dqt: 16bit precision\n");
+ return -1;
+ }
+ index = get_bits(&s->gb, 4);
+ if (index >= 4)
+ return -1;
+ av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
+ /* read quant table */
+ for(i=0;i<64;i++) {
+ j = s->scantable.permutated[i];
+ s->quant_matrixes[index][j] = get_bits(&s->gb, 8);
+ }
+
+ //XXX FIXME finetune, and perhaps add dc too
+ s->qscale[index]= FFMAX(
+ s->quant_matrixes[index][s->scantable.permutated[1]],
+ s->quant_matrixes[index][s->scantable.permutated[8]]) >> 1;
+ av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n", index, s->qscale[index]);
+ len -= 65;
+ }
+
+ return 0;
+}
+
+/* decode huffman tables and build VLC decoders */
+int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
+{
+ int len, index, i, class, n, v, code_max;
+ uint8_t bits_table[17];
+ uint8_t val_table[256];
+
+ len = get_bits(&s->gb, 16) - 2;
+
+ while (len > 0) {
+ if (len < 17)
+ return -1;
+ class = get_bits(&s->gb, 4);
+ if (class >= 2)
+ return -1;
+ index = get_bits(&s->gb, 4);
+ if (index >= 4)
+ return -1;
+ n = 0;
+ for(i=1;i<=16;i++) {
+ bits_table[i] = get_bits(&s->gb, 8);
+ n += bits_table[i];
+ }
+ len -= 17;
+ if (len < n || n > 256)
+ return -1;
+
+ code_max = 0;
+ for(i=0;i<n;i++) {
+ v = get_bits(&s->gb, 8);
+ if (v > code_max)
+ code_max = v;
+ val_table[i] = v;
+ }
+ len -= n;
+
+ /* build VLC and flush previous vlc if present */
+ free_vlc(&s->vlcs[class][index]);
+ av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
+ class, index, code_max + 1);
+ if(build_vlc(&s->vlcs[class][index], bits_table, val_table, code_max + 1, 0, class > 0) < 0){
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
+{
+ int len, nb_components, i, width, height, pix_fmt_id;
+
+ /* XXX: verify len field validity */
+ len = get_bits(&s->gb, 16);
+ s->bits= get_bits(&s->gb, 8);
+
+ if(s->pegasus_rct) s->bits=9;
+ if(s->bits==9 && !s->pegasus_rct) s->rct=1; //FIXME ugly
+
+ if (s->bits != 8 && !s->lossless){
+ av_log(s->avctx, AV_LOG_ERROR, "only 8 bits/component accepted\n");
+ return -1;
+ }
+
+ height = get_bits(&s->gb, 16);
+ width = get_bits(&s->gb, 16);
+
+ //HACK for odd_height.mov
+ if(s->interlaced && s->width == width && s->height == height + 1)
+ height= s->height;
+
+ av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
+ if(avcodec_check_dimensions(s->avctx, width, height))
+ return -1;
+
+ nb_components = get_bits(&s->gb, 8);
+ if (nb_components <= 0 ||
+ nb_components > MAX_COMPONENTS)
+ return -1;
+ if (s->ls && !(s->bits <= 8 || nb_components == 1)){
+ av_log(s->avctx, AV_LOG_ERROR, "only <= 8 bits/component or 16-bit gray accepted for JPEG-LS\n");
+ return -1;
+ }
+ s->nb_components = nb_components;
+ s->h_max = 1;
+ s->v_max = 1;
+ for(i=0;i<nb_components;i++) {
+ /* component id */
+ s->component_id[i] = get_bits(&s->gb, 8) - 1;
+ s->h_count[i] = get_bits(&s->gb, 4);
+ s->v_count[i] = get_bits(&s->gb, 4);
+ /* compute hmax and vmax (only used in interleaved case) */
+ if (s->h_count[i] > s->h_max)
+ s->h_max = s->h_count[i];
+ if (s->v_count[i] > s->v_max)
+ s->v_max = s->v_count[i];
+ s->quant_index[i] = get_bits(&s->gb, 8);
+ if (s->quant_index[i] >= 4)
+ return -1;
+ av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n", i, s->h_count[i],
+ s->v_count[i], s->component_id[i], s->quant_index[i]);
+ }
+
+ if(s->ls && (s->h_max > 1 || s->v_max > 1)) {
+ av_log(s->avctx, AV_LOG_ERROR, "Subsampling in JPEG-LS is not supported.\n");
+ return -1;
+ }
+
+ if(s->v_max==1 && s->h_max==1 && s->lossless==1) s->rgb=1;
+
+ /* if different size, realloc/alloc picture */
+ /* XXX: also check h_count and v_count */
+ if (width != s->width || height != s->height) {
+ av_freep(&s->qscale_table);
+
+ s->width = width;
+ s->height = height;
+ s->interlaced = 0;
+
+ /* test interlaced mode */
+ if (s->first_picture &&
+ s->org_height != 0 &&
+ s->height < ((s->org_height * 3) / 4)) {
+ s->interlaced = 1;
+ s->bottom_field = s->interlace_polarity;
+ s->picture.interlaced_frame = 1;
+ s->picture.top_field_first = !s->interlace_polarity;
+ height *= 2;
+ }
+
+ avcodec_set_dimensions(s->avctx, width, height);
+
+ s->qscale_table= av_mallocz((s->width+15)/16);
+
+ s->first_picture = 0;
+ }
+
+ if(s->interlaced && (s->bottom_field == !s->interlace_polarity))
+ return 0;
+
+ /* XXX: not complete test ! */
+ pix_fmt_id = (s->h_count[0] << 28) | (s->v_count[0] << 24) |
+ (s->h_count[1] << 20) | (s->v_count[1] << 16) |
+ (s->h_count[2] << 12) | (s->v_count[2] << 8) |
+ (s->h_count[3] << 4) | s->v_count[3];
+ av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
+ //NOTE we do not allocate pictures large enough for the possible padding of h/v_count being 4
+ if(!(pix_fmt_id & 0xD0D0D0D0))
+ pix_fmt_id-= (pix_fmt_id & 0xF0F0F0F0)>>1;
+ if(!(pix_fmt_id & 0x0D0D0D0D))
+ pix_fmt_id-= (pix_fmt_id & 0x0F0F0F0F)>>1;
+
+ switch(pix_fmt_id){
+ case 0x11111100:
+ if(s->rgb){
+ s->avctx->pix_fmt = PIX_FMT_BGRA;
+ }else
+ s->avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV444P : PIX_FMT_YUVJ444P;
+ assert(s->nb_components==3);
+ break;
+ case 0x11000000:
+ s->avctx->pix_fmt = PIX_FMT_GRAY8;
+ break;
+ case 0x12111100:
+ s->avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV440P : PIX_FMT_YUVJ440P;
+ break;
+ case 0x21111100:
+ s->avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV422P : PIX_FMT_YUVJ422P;
+ break;
+ case 0x22111100:
+ s->avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV420P : PIX_FMT_YUVJ420P;
+ break;
+ default:
+ av_log(s->avctx, AV_LOG_ERROR, "Unhandled pixel format 0x%x\n", pix_fmt_id);
+ return -1;
+ }
+ if(s->ls){
+ if(s->nb_components > 1)
+ s->avctx->pix_fmt = PIX_FMT_RGB24;
+ else if(s->bits <= 8)
+ s->avctx->pix_fmt = PIX_FMT_GRAY8;
+ else
+ s->avctx->pix_fmt = PIX_FMT_GRAY16;
+ }
+
+ if(s->picture.data[0])
+ s->avctx->release_buffer(s->avctx, &s->picture);
+
+ s->picture.reference= 0;
+ if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+ s->picture.pict_type= FF_I_TYPE;
+ s->picture.key_frame= 1;
+ s->got_picture = 1;
+
+ for(i=0; i<3; i++){
+ s->linesize[i]= s->picture.linesize[i] << s->interlaced;
+ }
+
+// printf("%d %d %d %d %d %d\n", s->width, s->height, s->linesize[0], s->linesize[1], s->interlaced, s->avctx->height);
+
+ if (len != (8+(3*nb_components)))
+ {
+ av_log(s->avctx, AV_LOG_DEBUG, "decode_sof0: error, len(%d) mismatch\n", len);
+ }
+
+ /* totally blank picture as progressive JPEG will only add details to it */
+ if(s->progressive){
+ int bw = (width + s->h_max*8-1) / (s->h_max*8);
+ int bh = (height + s->v_max*8-1) / (s->v_max*8);
+ for(i=0; i<s->nb_components; i++) {
+ int size = bw * bh * s->h_count[i] * s->v_count[i];
+ av_freep(&s->blocks[i]);
+ av_freep(&s->last_nnz[i]);
+ s->blocks[i] = av_malloc(size * sizeof(**s->blocks));
+ s->last_nnz[i] = av_mallocz(size * sizeof(**s->last_nnz));
+ s->block_stride[i] = bw * s->h_count[i];
+ }
+ memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
+ }
+ return 0;
+}
+
+static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
+{
+ int code;
+ code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
+ if (code < 0)
+ {
+ av_log(s->avctx, AV_LOG_WARNING, "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n", 0, dc_index,
+ &s->vlcs[0][dc_index]);
+ return 0xffff;
+ }
+
+ if(code)
+ return get_xbits(&s->gb, code);
+ else
+ return 0;
+}
+
+/* decode block and dequantize */
+static int decode_block(MJpegDecodeContext *s, DCTELEM *block,
+ int component, int dc_index, int ac_index, int16_t *quant_matrix)
+{
+ int code, i, j, level, val;
+
+ /* DC coef */
+ val = mjpeg_decode_dc(s, dc_index);
+ if (val == 0xffff) {
+ av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
+ return -1;
+ }
+ val = val * quant_matrix[0] + s->last_dc[component];
+ s->last_dc[component] = val;
+ block[0] = val;
+ /* AC coefs */
+ i = 0;
+ {OPEN_READER(re, &s->gb)
+ for(;;) {
+ UPDATE_CACHE(re, &s->gb);
+ GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2)
+
+ /* EOB */
+ if (code == 0x10)
+ break;
+ i += ((unsigned)code) >> 4;
+ if(code != 0x100){
+ code &= 0xf;
+ if(code > MIN_CACHE_BITS - 16){
+ UPDATE_CACHE(re, &s->gb)
+ }
+ {
+ int cache=GET_CACHE(re,&s->gb);
+ int sign=(~cache)>>31;
+ level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
+ }
+
+ LAST_SKIP_BITS(re, &s->gb, code)
+
+ if (i >= 63) {
+ if(i == 63){
+ j = s->scantable.permutated[63];
+ block[j] = level * quant_matrix[j];
+ break;
+ }
+ av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
+ return -1;
+ }
+ j = s->scantable.permutated[i];
+ block[j] = level * quant_matrix[j];
+ }
+ }
+ CLOSE_READER(re, &s->gb)}
+
+ return 0;
+}
+
+static int decode_dc_progressive(MJpegDecodeContext *s, DCTELEM *block, int component,
+ int dc_index, int16_t *quant_matrix, int Al)
+{
+ int val;
+ s->dsp.clear_block(block);
+ val = mjpeg_decode_dc(s, dc_index);
+ if (val == 0xffff) {
+ av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
+ return -1;
+ }
+ val = (val * quant_matrix[0] << Al) + s->last_dc[component];
+ s->last_dc[component] = val;
+ block[0] = val;
+ return 0;
+}
+
+/* decode block and dequantize - progressive JPEG version */
+static int decode_block_progressive(MJpegDecodeContext *s, DCTELEM *block, uint8_t *last_nnz,
+ int ac_index, int16_t *quant_matrix,
+ int ss, int se, int Al, int *EOBRUN)
+{
+ int code, i, j, level, val, run;
+
+ if(*EOBRUN){
+ (*EOBRUN)--;
+ return 0;
+ }
+ {OPEN_READER(re, &s->gb)
+ for(i=ss;;i++) {
+ UPDATE_CACHE(re, &s->gb);
+ GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2)
+ /* Progressive JPEG use AC coeffs from zero and this decoder sets offset 16 by default */
+ code -= 16;
+ if(code & 0xF) {
+ i += ((unsigned) code) >> 4;
+ code &= 0xf;
+ if(code > MIN_CACHE_BITS - 16){
+ UPDATE_CACHE(re, &s->gb)
+ }
+ {
+ int cache=GET_CACHE(re,&s->gb);
+ int sign=(~cache)>>31;
+ level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
+ }
+
+ LAST_SKIP_BITS(re, &s->gb, code)
+
+ if (i >= se) {
+ if(i == se){
+ j = s->scantable.permutated[se];
+ block[j] = level * quant_matrix[j] << Al;
+ break;
+ }
+ av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
+ return -1;
+ }
+ j = s->scantable.permutated[i];
+ block[j] = level * quant_matrix[j] << Al;
+ }else{
+ run = ((unsigned) code) >> 4;
+ if(run == 0xF){// ZRL - skip 15 coefficients
+ i += 15;
+ }else{
+ val = run;
+ run = (1 << run);
+ UPDATE_CACHE(re, &s->gb);
+ run += (GET_CACHE(re, &s->gb) >> (32 - val)) & (run - 1);
+ if(val)
+ LAST_SKIP_BITS(re, &s->gb, val);
+ *EOBRUN = run - 1;
+ break;
+ }
+ }
+ }
+ CLOSE_READER(re, &s->gb)}
+ if(i > *last_nnz)
+ *last_nnz = i;
+ return 0;
+}
+
+#define REFINE_BIT(j) {\
+ UPDATE_CACHE(re, &s->gb);\
+ sign = block[j]>>15;\
+ block[j] += SHOW_UBITS(re, &s->gb, 1) * ((quant_matrix[j]^sign)-sign) << Al;\
+ LAST_SKIP_BITS(re, &s->gb, 1);\
+}
+
+#define ZERO_RUN \
+for(;;i++) {\
+ if(i > last) {\
+ i += run;\
+ if(i > se) {\
+ av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);\
+ return -1;\
+ }\
+ break;\
+ }\
+ j = s->scantable.permutated[i];\
+ if(block[j])\
+ REFINE_BIT(j)\
+ else if(run-- == 0)\
+ break;\
+}
+
+/* decode block and dequantize - progressive JPEG refinement pass */
+static int decode_block_refinement(MJpegDecodeContext *s, DCTELEM *block, uint8_t *last_nnz,
+ int ac_index, int16_t *quant_matrix,
+ int ss, int se, int Al, int *EOBRUN)
+{
+ int code, i=ss, j, sign, val, run;
+ int last = FFMIN(se, *last_nnz);
+
+ OPEN_READER(re, &s->gb);
+ if(*EOBRUN)
+ (*EOBRUN)--;
+ else {
+ for(;;i++) {
+ UPDATE_CACHE(re, &s->gb);
+ GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2)
+ /* Progressive JPEG use AC coeffs from zero and this decoder sets offset 16 by default */
+ code -= 16;
+ if(code & 0xF) {
+ run = ((unsigned) code) >> 4;
+ UPDATE_CACHE(re, &s->gb);
+ val = SHOW_UBITS(re, &s->gb, 1);
+ LAST_SKIP_BITS(re, &s->gb, 1);
+ ZERO_RUN;
+ j = s->scantable.permutated[i];
+ val--;
+ block[j] = ((quant_matrix[j]^val)-val) << Al;
+ if(i == se) {
+ if(i > *last_nnz)
+ *last_nnz = i;
+ CLOSE_READER(re, &s->gb)
+ return 0;
+ }
+ }else{
+ run = ((unsigned) code) >> 4;
+ if(run == 0xF){
+ ZERO_RUN;
+ }else{
+ val = run;
+ run = (1 << run);
+ if(val) {
+ UPDATE_CACHE(re, &s->gb);
+ run += SHOW_UBITS(re, &s->gb, val);
+ LAST_SKIP_BITS(re, &s->gb, val);
+ }
+ *EOBRUN = run - 1;
+ break;
+ }
+ }
+ }
+
+ if(i > *last_nnz)
+ *last_nnz = i;
+ }
+
+ for(;i<=last;i++) {
+ j = s->scantable.permutated[i];
+ if(block[j])
+ REFINE_BIT(j)
+ }
+ CLOSE_READER(re, &s->gb);
+
+ return 0;
+}
+#undef REFINE_BIT
+#undef ZERO_RUN
+
+static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int predictor, int point_transform){
+ int i, mb_x, mb_y;
+ uint16_t (*buffer)[4];
+ int left[3], top[3], topleft[3];
+ const int linesize= s->linesize[0];
+ const int mask= (1<<s->bits)-1;
+
+ av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, (unsigned)s->mb_width * 4 * sizeof(s->ljpeg_buffer[0][0]));
+ buffer= s->ljpeg_buffer;
+
+ for(i=0; i<3; i++){
+ buffer[0][i]= 1 << (s->bits + point_transform - 1);
+ }
+ for(mb_y = 0; mb_y < s->mb_height; mb_y++) {
+ const int modified_predictor= mb_y ? predictor : 1;
+ uint8_t *ptr = s->picture.data[0] + (linesize * mb_y);
+
+ if (s->interlaced && s->bottom_field)
+ ptr += linesize >> 1;
+
+ for(i=0; i<3; i++){
+ top[i]= left[i]= topleft[i]= buffer[0][i];
+ }
+ for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
+ if (s->restart_interval && !s->restart_count)
+ s->restart_count = s->restart_interval;
+
+ for(i=0;i<3;i++) {
+ int pred;
+
+ topleft[i]= top[i];
+ top[i]= buffer[mb_x][i];
+
+ PREDICT(pred, topleft[i], top[i], left[i], modified_predictor);
+
+ left[i]=
+ buffer[mb_x][i]= mask & (pred + (mjpeg_decode_dc(s, s->dc_index[i]) << point_transform));
+ }
+
+ if (s->restart_interval && !--s->restart_count) {
+ align_get_bits(&s->gb);
+ skip_bits(&s->gb, 16); /* skip RSTn */
+ }
+ }
+
+ if(s->rct){
+ for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
+ ptr[4*mb_x+1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200)>>2);
+ ptr[4*mb_x+0] = buffer[mb_x][1] + ptr[4*mb_x+1];
+ ptr[4*mb_x+2] = buffer[mb_x][2] + ptr[4*mb_x+1];
+ }
+ }else if(s->pegasus_rct){
+ for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
+ ptr[4*mb_x+1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2])>>2);
+ ptr[4*mb_x+0] = buffer[mb_x][1] + ptr[4*mb_x+1];
+ ptr[4*mb_x+2] = buffer[mb_x][2] + ptr[4*mb_x+1];
+ }
+ }else{
+ for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
+ ptr[4*mb_x+0] = buffer[mb_x][2];
+ ptr[4*mb_x+1] = buffer[mb_x][1];
+ ptr[4*mb_x+2] = buffer[mb_x][0];
+ }
+ }
+ }
+ return 0;
+}
+
+static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform){
+ int i, mb_x, mb_y;
+ const int nb_components=3;
+
+ for(mb_y = 0; mb_y < s->mb_height; mb_y++) {
+ for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
+ if (s->restart_interval && !s->restart_count)
+ s->restart_count = s->restart_interval;
+
+ if(mb_x==0 || mb_y==0 || s->interlaced){
+ for(i=0;i<nb_components;i++) {
+ uint8_t *ptr;
+ int n, h, v, x, y, c, j, linesize;
+ n = s->nb_blocks[i];
+ c = s->comp_index[i];
+ h = s->h_scount[i];
+ v = s->v_scount[i];
+ x = 0;
+ y = 0;
+ linesize= s->linesize[c];
+
+ for(j=0; j<n; j++) {
+ int pred;
+
+ ptr = s->picture.data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
+ if(y==0 && mb_y==0){
+ if(x==0 && mb_x==0){
+ pred= 128 << point_transform;
+ }else{
+ pred= ptr[-1];
+ }
+ }else{
+ if(x==0 && mb_x==0){
+ pred= ptr[-linesize];
+ }else{
+ PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
+ }
+ }
+
+ if (s->interlaced && s->bottom_field)
+ ptr += linesize >> 1;
+ *ptr= pred + (mjpeg_decode_dc(s, s->dc_index[i]) << point_transform);
+
+ if (++x == h) {
+ x = 0;
+ y++;
+ }
+ }
+ }
+ }else{
+ for(i=0;i<nb_components;i++) {
+ uint8_t *ptr;
+ int n, h, v, x, y, c, j, linesize;
+ n = s->nb_blocks[i];
+ c = s->comp_index[i];
+ h = s->h_scount[i];
+ v = s->v_scount[i];
+ x = 0;
+ y = 0;
+ linesize= s->linesize[c];
+
+ for(j=0; j<n; j++) {
+ int pred;
+
+ ptr = s->picture.data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
+ PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
+ *ptr= pred + (mjpeg_decode_dc(s, s->dc_index[i]) << point_transform);
+ if (++x == h) {
+ x = 0;
+ y++;
+ }
+ }
+ }
+ }
+ if (s->restart_interval && !--s->restart_count) {
+ align_get_bits(&s->gb);
+ skip_bits(&s->gb, 16); /* skip RSTn */
+ }
+ }
+ }
+ return 0;
+}
+
+static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al){
+ int i, mb_x, mb_y;
+ uint8_t* data[MAX_COMPONENTS];
+ int linesize[MAX_COMPONENTS];
+
+ if(s->flipped && s->avctx->flags & CODEC_FLAG_EMU_EDGE) {
+ av_log(s->avctx, AV_LOG_ERROR, "Can not flip image with CODEC_FLAG_EMU_EDGE set!\n");
+ s->flipped = 0;
+ }
+ for(i=0; i < nb_components; i++) {
+ int c = s->comp_index[i];
+ data[c] = s->picture.data[c];
+ linesize[c]=s->linesize[c];
+ s->coefs_finished[c] |= 1;
+ if(s->flipped) {
+ //picture should be flipped upside-down for this codec
+ data[c] += (linesize[c] * (s->v_scount[i] * (8 * s->mb_height -((s->height/s->v_max)&7)) - 1 ));
+ linesize[c] *= -1;
+ }
+ }
+
+ for(mb_y = 0; mb_y < s->mb_height; mb_y++) {
+ for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
+ if (s->restart_interval && !s->restart_count)
+ s->restart_count = s->restart_interval;
+
+ for(i=0;i<nb_components;i++) {
+ uint8_t *ptr;
+ int n, h, v, x, y, c, j;
+ n = s->nb_blocks[i];
+ c = s->comp_index[i];
+ h = s->h_scount[i];
+ v = s->v_scount[i];
+ x = 0;
+ y = 0;
+ for(j=0;j<n;j++) {
+ ptr = data[c] +
+ (((linesize[c] * (v * mb_y + y) * 8) +
+ (h * mb_x + x) * 8) >> s->avctx->lowres);
+ if(s->interlaced && s->bottom_field)
+ ptr += linesize[c] >> 1;
+ if(!s->progressive) {
+ s->dsp.clear_block(s->block);
+ if(decode_block(s, s->block, i,
+ s->dc_index[i], s->ac_index[i],
+ s->quant_matrixes[ s->quant_index[c] ]) < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "error y=%d x=%d\n", mb_y, mb_x);
+ return -1;
+ }
+ s->dsp.idct_put(ptr, linesize[c], s->block);
+ } else {
+ int block_idx = s->block_stride[c] * (v * mb_y + y) + (h * mb_x + x);
+ DCTELEM *block = s->blocks[c][block_idx];
+ if(Ah)
+ block[0] += get_bits1(&s->gb) * s->quant_matrixes[ s->quant_index[c] ][0] << Al;
+ else if(decode_dc_progressive(s, block, i, s->dc_index[i], s->quant_matrixes[ s->quant_index[c] ], Al) < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "error y=%d x=%d\n", mb_y, mb_x);
+ return -1;
+ }
+ }
+// av_log(s->avctx, AV_LOG_DEBUG, "mb: %d %d processed\n", mb_y, mb_x);
+//av_log(NULL, AV_LOG_DEBUG, "%d %d %d %d %d %d %d %d \n", mb_x, mb_y, x, y, c, s->bottom_field, (v * mb_y + y) * 8, (h * mb_x + x) * 8);
+ if (++x == h) {
+ x = 0;
+ y++;
+ }
+ }
+ }
+
+ if (s->restart_interval && !--s->restart_count) {
+ align_get_bits(&s->gb);
+ skip_bits(&s->gb, 16); /* skip RSTn */
+ for (i=0; i<nb_components; i++) /* reset dc */
+ s->last_dc[i] = 1024;
+ }
+ }
+ }
+ return 0;
+}
+
+static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al){
+ int mb_x, mb_y;
+ int EOBRUN = 0;
+ int c = s->comp_index[0];
+ uint8_t* data = s->picture.data[c];
+ int linesize = s->linesize[c];
+ int last_scan = 0;
+ int16_t *quant_matrix = s->quant_matrixes[ s->quant_index[c] ];
+
+ if(!Al) {
+ s->coefs_finished[c] |= (1LL<<(se+1))-(1LL<<ss);
+ last_scan = !~s->coefs_finished[c];
+ }
+
+ if(s->interlaced && s->bottom_field)
+ data += linesize >> 1;
+
+ for(mb_y = 0; mb_y < s->mb_height; mb_y++) {
+ uint8_t *ptr = data + (mb_y*linesize*8 >> s->avctx->lowres);
+ int block_idx = mb_y * s->block_stride[c];
+ DCTELEM (*block)[64] = &s->blocks[c][block_idx];
+ uint8_t *last_nnz = &s->last_nnz[c][block_idx];
+ for(mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
+ int ret;
+ if(Ah)
+ ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
+ quant_matrix, ss, se, Al, &EOBRUN);
+ else
+ ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
+ quant_matrix, ss, se, Al, &EOBRUN);
+ if(ret < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "error y=%d x=%d\n", mb_y, mb_x);
+ return -1;
+ }
+ if(last_scan) {
+ s->dsp.idct_put(ptr, linesize, *block);
+ ptr += 8 >> s->avctx->lowres;
+ }
+ }
+ }
+ return 0;
+}
+
+int ff_mjpeg_decode_sos(MJpegDecodeContext *s)
+{
+ int len, nb_components, i, h, v, predictor, point_transform;
+ int index, id;
+ const int block_size= s->lossless ? 1 : 8;
+ int ilv, prev_shift;
+
+ /* XXX: verify len field validity */
+ len = get_bits(&s->gb, 16);
+ nb_components = get_bits(&s->gb, 8);
+ if (nb_components == 0 || nb_components > MAX_COMPONENTS){
+ av_log(s->avctx, AV_LOG_ERROR, "decode_sos: nb_components (%d) unsupported\n", nb_components);
+ return -1;
+ }
+ if (len != 6+2*nb_components)
+ {
+ av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
+ return -1;
+ }
+ for(i=0;i<nb_components;i++) {
+ id = get_bits(&s->gb, 8) - 1;
+ av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
+ /* find component index */
+ for(index=0;index<s->nb_components;index++)
+ if (id == s->component_id[index])
+ break;
+ if (index == s->nb_components)
+ {
+ av_log(s->avctx, AV_LOG_ERROR, "decode_sos: index(%d) out of components\n", index);
+ return -1;
+ }
+ /* Metasoft MJPEG codec has Cb and Cr swapped */
+ if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
+ && nb_components == 3 && s->nb_components == 3 && i)
+ index = 3 - i;
+
+ s->comp_index[i] = index;
+
+ s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
+ s->h_scount[i] = s->h_count[index];
+ s->v_scount[i] = s->v_count[index];
+
+ s->dc_index[i] = get_bits(&s->gb, 4);
+ s->ac_index[i] = get_bits(&s->gb, 4);
+
+ if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
+ s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
+ goto out_of_range;
+ if (!s->vlcs[0][s->dc_index[i]].table || !s->vlcs[1][s->ac_index[i]].table)
+ goto out_of_range;
+ }
+
+ predictor= get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
+ ilv= get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
+ prev_shift = get_bits(&s->gb, 4); /* Ah */
+ point_transform= get_bits(&s->gb, 4); /* Al */
+
+ for(i=0;i<nb_components;i++)
+ s->last_dc[i] = 1024;
+
+ if (nb_components > 1) {
+ /* interleaved stream */
+ s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
+ s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
+ } else if(!s->ls) { /* skip this for JPEG-LS */
+ h = s->h_max / s->h_scount[0];
+ v = s->v_max / s->v_scount[0];
+ s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
+ s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
+ s->nb_blocks[0] = 1;
+ s->h_scount[0] = 1;
+ s->v_scount[0] = 1;
+ }
+
+ if(s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d %s\n", s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
+ predictor, point_transform, ilv, s->bits,
+ s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""));
+
+
+ /* mjpeg-b can have padding bytes between sos and image data, skip them */
+ for (i = s->mjpb_skiptosod; i > 0; i--)
+ skip_bits(&s->gb, 8);
+
+ if(s->lossless){
+ if(CONFIG_JPEGLS_DECODER && s->ls){
+ if(ff_jpegls_decode_picture(s, predictor, point_transform, ilv) < 0)
+ return -1;
+ }else{
+ if(s->rgb){
+ if(ljpeg_decode_rgb_scan(s, predictor, point_transform) < 0)
+ return -1;
+ }else{
+ if(ljpeg_decode_yuv_scan(s, predictor, point_transform) < 0)
+ return -1;
+ }
+ }
+ }else{
+ if(s->progressive && predictor) {
+ if(mjpeg_decode_scan_progressive_ac(s, predictor, ilv, prev_shift, point_transform) < 0)
+ return -1;
+ } else {
+ if(mjpeg_decode_scan(s, nb_components, prev_shift, point_transform) < 0)
+ return -1;
+ }
+ }
+ emms_c();
+ return 0;
+ out_of_range:
+ av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
+ return -1;
+}
+
+static int mjpeg_decode_dri(MJpegDecodeContext *s)
+{
+ if (get_bits(&s->gb, 16) != 4)
+ return -1;
+ s->restart_interval = get_bits(&s->gb, 16);
+ s->restart_count = 0;
+ av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n", s->restart_interval);
+
+ return 0;
+}
+
+static int mjpeg_decode_app(MJpegDecodeContext *s)
+{
+ int len, id, i;
+
+ len = get_bits(&s->gb, 16);
+ if (len < 5)
+ return -1;
+ if(8*len + get_bits_count(&s->gb) > s->gb.size_in_bits)
+ return -1;
+
+ id = (get_bits(&s->gb, 16) << 16) | get_bits(&s->gb, 16);
+ id = be2me_32(id);
+ len -= 6;
+
+ if(s->avctx->debug & FF_DEBUG_STARTCODE){
+ av_log(s->avctx, AV_LOG_DEBUG, "APPx %8X\n", id);
+ }
+
+ /* buggy AVID, it puts EOI only at every 10th frame */
+ /* also this fourcc is used by non-avid files too, it holds some
+ informations, but it's always present in AVID creates files */
+ if (id == AV_RL32("AVI1"))
+ {
+ s->buggy_avid = 1;
+
+ i = get_bits(&s->gb, 8);
+ if (i==2) s->bottom_field= 1;
+ else if(i==1) s->bottom_field= 0;
+
+ goto out;
+ }
+
+ if (id == AV_RL32("JFIF"))
+ {
+ int t_w, t_h, v1, v2;
+ skip_bits(&s->gb, 8); /* the trailing zero-byte */
+ v1= get_bits(&s->gb, 8);
+ v2= get_bits(&s->gb, 8);
+ skip_bits(&s->gb, 8);
+
+ s->avctx->sample_aspect_ratio.num= get_bits(&s->gb, 16);
+ s->avctx->sample_aspect_ratio.den= get_bits(&s->gb, 16);
+
+ if (s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_INFO, "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
+ v1, v2,
+ s->avctx->sample_aspect_ratio.num,
+ s->avctx->sample_aspect_ratio.den
+ );
+
+ t_w = get_bits(&s->gb, 8);
+ t_h = get_bits(&s->gb, 8);
+ if (t_w && t_h)
+ {
+ /* skip thumbnail */
+ if (len-10-(t_w*t_h*3) > 0)
+ len -= t_w*t_h*3;
+ }
+ len -= 10;
+ goto out;
+ }
+
+ if (id == AV_RL32("Adob") && (get_bits(&s->gb, 8) == 'e'))
+ {
+ if (s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found\n");
+ skip_bits(&s->gb, 16); /* version */
+ skip_bits(&s->gb, 16); /* flags0 */
+ skip_bits(&s->gb, 16); /* flags1 */
+ skip_bits(&s->gb, 8); /* transform */
+ len -= 7;
+ goto out;
+ }
+
+ if (id == AV_RL32("LJIF")){
+ if (s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_INFO, "Pegasus lossless jpeg header found\n");
+ skip_bits(&s->gb, 16); /* version ? */
+ skip_bits(&s->gb, 16); /* unknwon always 0? */
+ skip_bits(&s->gb, 16); /* unknwon always 0? */
+ skip_bits(&s->gb, 16); /* unknwon always 0? */
+ switch( get_bits(&s->gb, 8)){
+ case 1:
+ s->rgb= 1;
+ s->pegasus_rct=0;
+ break;
+ case 2:
+ s->rgb= 1;
+ s->pegasus_rct=1;
+ break;
+ default:
+ av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace\n");
+ }
+ len -= 9;
+ goto out;
+ }
+
+ /* Apple MJPEG-A */
+ if ((s->start_code == APP1) && (len > (0x28 - 8)))
+ {
+ id = (get_bits(&s->gb, 16) << 16) | get_bits(&s->gb, 16);
+ id = be2me_32(id);
+ len -= 4;
+ if (id == AV_RL32("mjpg")) /* Apple MJPEG-A */
+ {
+ if (s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
+ }
+ }
+
+out:
+ /* slow but needed for extreme adobe jpegs */
+ if (len < 0)
+ av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error, decode_app parser read over the end\n");
+ while(--len > 0)
+ skip_bits(&s->gb, 8);
+
+ return 0;
+}
+
+static int mjpeg_decode_com(MJpegDecodeContext *s)
+{
+ int len = get_bits(&s->gb, 16);
+ if (len >= 2 && 8*len - 16 + get_bits_count(&s->gb) <= s->gb.size_in_bits) {
+ char *cbuf = av_malloc(len - 1);
+ if (cbuf) {
+ int i;
+ for (i = 0; i < len - 2; i++)
+ cbuf[i] = get_bits(&s->gb, 8);
+ if (i > 0 && cbuf[i-1] == '\n')
+ cbuf[i-1] = 0;
+ else
+ cbuf[i] = 0;
+
+ if(s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_INFO, "mjpeg comment: '%s'\n", cbuf);
+
+ /* buggy avid, it puts EOI only at every 10th frame */
+ if (!strcmp(cbuf, "AVID"))
+ {
+ s->buggy_avid = 1;
+ // if (s->first_picture)
+ // printf("mjpeg: workarounding buggy AVID\n");
+ }
+ else if(!strcmp(cbuf, "CS=ITU601")){
+ s->cs_itu601= 1;
+ }
+ else if((len > 20 && !strncmp(cbuf, "Intel(R) JPEG Library", 21)) ||
+ (len > 19 && !strncmp(cbuf, "Metasoft MJPEG Codec", 20))){
+ s->flipped = 1;
+ }
+
+ av_free(cbuf);
+ }
+ }
+
+ return 0;
+}
+
+/* return the 8 bit start code value and update the search
+ state. Return -1 if no start code found */
+static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
+{
+ const uint8_t *buf_ptr;
+ unsigned int v, v2;
+ int val;
+#ifdef DEBUG
+ int skipped=0;
+#endif
+
+ buf_ptr = *pbuf_ptr;
+ while (buf_ptr < buf_end) {
+ v = *buf_ptr++;
+ v2 = *buf_ptr;
+ if ((v == 0xff) && (v2 >= 0xc0) && (v2 <= 0xfe) && buf_ptr < buf_end) {
+ val = *buf_ptr++;
+ goto found;
+ }
+#ifdef DEBUG
+ skipped++;
+#endif
+ }
+ val = -1;
+found:
+ dprintf(NULL, "find_marker skipped %d bytes\n", skipped);
+ *pbuf_ptr = buf_ptr;
+ return val;
+}
+
+int ff_mjpeg_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ MJpegDecodeContext *s = avctx->priv_data;
+ const uint8_t *buf_end, *buf_ptr;
+ int start_code;
+ AVFrame *picture = data;
+
+ s->got_picture = 0; // picture from previous image can not be reused
+ buf_ptr = buf;
+ buf_end = buf + buf_size;
+ while (buf_ptr < buf_end) {
+ /* find start next marker */
+ start_code = find_marker(&buf_ptr, buf_end);
+ {
+ /* EOF */
+ if (start_code < 0) {
+ goto the_end;
+ } else {
+ av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n", start_code, buf_end - buf_ptr);
+
+ if ((buf_end - buf_ptr) > s->buffer_size)
+ {
+ av_free(s->buffer);
+ s->buffer_size = buf_end-buf_ptr;
+ s->buffer = av_malloc(s->buffer_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ av_log(avctx, AV_LOG_DEBUG, "buffer too small, expanding to %d bytes\n",
+ s->buffer_size);
+ }
+
+ /* unescape buffer of SOS, use special treatment for JPEG-LS */
+ if (start_code == SOS && !s->ls)
+ {
+ const uint8_t *src = buf_ptr;
+ uint8_t *dst = s->buffer;
+
+ while (src<buf_end)
+ {
+ uint8_t x = *(src++);
+
+ *(dst++) = x;
+ //if (avctx->codec_id != CODEC_ID_THP)
+ //{
+ if (x == 0xff) {
+ while (src < buf_end && x == 0xff)
+ x = *(src++);
+
+ if (x >= 0xd0 && x <= 0xd7)
+ *(dst++) = x;
+ else if (x)
+ break;
+ }
+ //}
+ }
+ init_get_bits(&s->gb, s->buffer, (dst - s->buffer)*8);
+
+ av_log(avctx, AV_LOG_DEBUG, "escaping removed %td bytes\n",
+ (buf_end - buf_ptr) - (dst - s->buffer));
+ }
+ else if(start_code == SOS && s->ls){
+ const uint8_t *src = buf_ptr;
+ uint8_t *dst = s->buffer;
+ int bit_count = 0;
+ int t = 0, b = 0;
+ PutBitContext pb;
+
+ s->cur_scan++;
+
+ /* find marker */
+ while (src + t < buf_end){
+ uint8_t x = src[t++];
+ if (x == 0xff){
+ while((src + t < buf_end) && x == 0xff)
+ x = src[t++];
+ if (x & 0x80) {
+ t -= 2;
+ break;
+ }
+ }
+ }
+ bit_count = t * 8;
+
+ init_put_bits(&pb, dst, t);
+
+ /* unescape bitstream */
+ while(b < t){
+ uint8_t x = src[b++];
+ put_bits(&pb, 8, x);
+ if(x == 0xFF){
+ x = src[b++];
+ put_bits(&pb, 7, x);
+ bit_count--;
+ }
+ }
+ flush_put_bits(&pb);
+
+ init_get_bits(&s->gb, dst, bit_count);
+ }
+ else
+ init_get_bits(&s->gb, buf_ptr, (buf_end - buf_ptr)*8);
+
+ s->start_code = start_code;
+ if(s->avctx->debug & FF_DEBUG_STARTCODE){
+ av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
+ }
+
+ /* process markers */
+ if (start_code >= 0xd0 && start_code <= 0xd7) {
+ av_log(avctx, AV_LOG_DEBUG, "restart marker: %d\n", start_code&0x0f);
+ /* APP fields */
+ } else if (start_code >= APP0 && start_code <= APP15) {
+ mjpeg_decode_app(s);
+ /* Comment */
+ } else if (start_code == COM){
+ mjpeg_decode_com(s);
+ }
+
+ switch(start_code) {
+ case SOI:
+ s->restart_interval = 0;
+
+ s->restart_count = 0;
+ /* nothing to do on SOI */
+ break;
+ case DQT:
+ ff_mjpeg_decode_dqt(s);
+ break;
+ case DHT:
+ if(ff_mjpeg_decode_dht(s) < 0){
+ av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
+ return -1;
+ }
+ break;
+ case SOF0:
+ case SOF1:
+ s->lossless=0;
+ s->ls=0;
+ s->progressive=0;
+ if (ff_mjpeg_decode_sof(s) < 0)
+ return -1;
+ break;
+ case SOF2:
+ s->lossless=0;
+ s->ls=0;
+ s->progressive=1;
+ if (ff_mjpeg_decode_sof(s) < 0)
+ return -1;
+ break;
+ case SOF3:
+ s->lossless=1;
+ s->ls=0;
+ s->progressive=0;
+ if (ff_mjpeg_decode_sof(s) < 0)
+ return -1;
+ break;
+ case SOF48:
+ s->lossless=1;
+ s->ls=1;
+ s->progressive=0;
+ if (ff_mjpeg_decode_sof(s) < 0)
+ return -1;
+ break;
+ case LSE:
+ if (!CONFIG_JPEGLS_DECODER || ff_jpegls_decode_lse(s) < 0)
+ return -1;
+ break;
+ case EOI:
+ s->cur_scan = 0;
+ if ((s->buggy_avid && !s->interlaced) || s->restart_interval)
+ break;
+eoi_parser:
+ if (!s->got_picture) {
+ av_log(avctx, AV_LOG_WARNING, "Found EOI before any SOF, ignoring\n");
+ break;
+ }
+ {
+ if (s->interlaced) {
+ s->bottom_field ^= 1;
+ /* if not bottom field, do not output image yet */
+ if (s->bottom_field == !s->interlace_polarity)
+ goto not_the_end;
+ }
+ *picture = s->picture;
+ *data_size = sizeof(AVFrame);
+
+ if(!s->lossless){
+ picture->quality= FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2]);
+ picture->qstride= 0;
+ picture->qscale_table= s->qscale_table;
+ memset(picture->qscale_table, picture->quality, (s->width+15)/16);
+ if(avctx->debug & FF_DEBUG_QP)
+ av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", picture->quality);
+ picture->quality*= FF_QP2LAMBDA;
+ }
+
+ goto the_end;
+ }
+ break;
+ case SOS:
+ if (!s->got_picture) {
+ av_log(avctx, AV_LOG_WARNING, "Can not process SOS before SOF, skipping\n");
+ break;
+ }
+ ff_mjpeg_decode_sos(s);
+ /* buggy avid puts EOI every 10-20th frame */
+ /* if restart period is over process EOI */
+ if ((s->buggy_avid && !s->interlaced) || s->restart_interval)
+ goto eoi_parser;
+ break;
+ case DRI:
+ mjpeg_decode_dri(s);
+ break;
+ case SOF5:
+ case SOF6:
+ case SOF7:
+ case SOF9:
+ case SOF10:
+ case SOF11:
+ case SOF13:
+ case SOF14:
+ case SOF15:
+ case JPG:
+ av_log(avctx, AV_LOG_ERROR, "mjpeg: unsupported coding type (%x)\n", start_code);
+ break;
+// default:
+// printf("mjpeg: unsupported marker (%x)\n", start_code);
+// break;
+ }
+
+not_the_end:
+ /* eof process start code */
+ buf_ptr += (get_bits_count(&s->gb)+7)/8;
+ av_log(avctx, AV_LOG_DEBUG, "marker parser used %d bytes (%d bits)\n",
+ (get_bits_count(&s->gb)+7)/8, get_bits_count(&s->gb));
+ }
+ }
+ }
+ if (s->got_picture) {
+ av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
+ goto eoi_parser;
+ }
+ av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
+ return -1;
+the_end:
+ av_log(avctx, AV_LOG_DEBUG, "mjpeg decode frame unused %td bytes\n", buf_end - buf_ptr);
+// return buf_end - buf_ptr;
+ return buf_ptr - buf;
+}
+
+av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
+{
+ MJpegDecodeContext *s = avctx->priv_data;
+ int i, j;
+
+ if (s->picture.data[0])
+ avctx->release_buffer(avctx, &s->picture);
+
+ av_free(s->buffer);
+ av_free(s->qscale_table);
+ av_freep(&s->ljpeg_buffer);
+ s->ljpeg_buffer_size=0;
+
+ for(i=0;i<2;i++) {
+ for(j=0;j<4;j++)
+ free_vlc(&s->vlcs[i][j]);
+ }
+ for(i=0; i<MAX_COMPONENTS; i++) {
+ av_freep(&s->blocks[i]);
+ av_freep(&s->last_nnz[i]);
+ }
+ return 0;
+}
+
+AVCodec mjpeg_decoder = {
+ "mjpeg",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_MJPEG,
+ sizeof(MJpegDecodeContext),
+ /*.init=*/ff_mjpeg_decode_init,
+ /*.encode=*/NULL,
+ /*.close=*/ff_mjpeg_decode_end,
+ /*.decode=*/ff_mjpeg_decode_frame,
+ /*.capabilities=*/CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpegdec.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpegdec.h
new file mode 100644
index 000000000..b786d8e77
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpegdec.h
@@ -0,0 +1,120 @@
+/*
+ * MJPEG decoder
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2003 Alex Beregszaszi
+ * Copyright (c) 2003-2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/mjpegdec.h
+ * MJPEG decoder.
+ */
+
+#ifndef AVCODEC_MJPEGDEC_H
+#define AVCODEC_MJPEGDEC_H
+
+#include "avcodec.h"
+#include "get_bits.h"
+#include "dsputil.h"
+
+#define MAX_COMPONENTS 4
+
+typedef struct MJpegDecodeContext {
+ AVCodecContext *avctx;
+ GetBitContext gb;
+
+ int start_code; /* current start code */
+ int buffer_size;
+ uint8_t *buffer;
+
+ int16_t quant_matrixes[4][64];
+ VLC vlcs[2][4];
+ int qscale[4]; ///< quantizer scale calculated from quant_matrixes
+
+ int org_height; /* size given at codec init */
+ int first_picture; /* true if decoding first picture */
+ int interlaced; /* true if interlaced */
+ int bottom_field; /* true if bottom field */
+ int lossless;
+ int ls;
+ int progressive;
+ int rgb;
+ int rct; /* standard rct */
+ int pegasus_rct; /* pegasus reversible colorspace transform */
+ int bits; /* bits per component */
+
+ int maxval;
+ int near; ///< near lossless bound (si 0 for lossless)
+ int t1,t2,t3;
+ int reset; ///< context halfing intervall ?rename
+
+ int width, height;
+ int mb_width, mb_height;
+ int nb_components;
+ int block_stride[MAX_COMPONENTS];
+ int component_id[MAX_COMPONENTS];
+ int h_count[MAX_COMPONENTS]; /* horizontal and vertical count for each component */
+ int v_count[MAX_COMPONENTS];
+ int comp_index[MAX_COMPONENTS];
+ int dc_index[MAX_COMPONENTS];
+ int ac_index[MAX_COMPONENTS];
+ int nb_blocks[MAX_COMPONENTS];
+ int h_scount[MAX_COMPONENTS];
+ int v_scount[MAX_COMPONENTS];
+ int h_max, v_max; /* maximum h and v counts */
+ int quant_index[4]; /* quant table index for each component */
+ int last_dc[MAX_COMPONENTS]; /* last DEQUANTIZED dc (XXX: am I right to do that ?) */
+ AVFrame picture; /* picture structure */
+ int got_picture; ///< we found a SOF and picture is valid, too.
+ int linesize[MAX_COMPONENTS]; ///< linesize << interlaced
+ int8_t *qscale_table;
+ DECLARE_ALIGNED_16(DCTELEM, block)[64];
+ DCTELEM (*blocks[MAX_COMPONENTS])[64]; ///< intermediate sums (progressive mode)
+ uint8_t *last_nnz[MAX_COMPONENTS];
+ uint64_t coefs_finished[MAX_COMPONENTS]; ///< bitmask of which coefs have been completely decoded (progressive mode)
+ ScanTable scantable;
+ DSPContext dsp;
+
+ int restart_interval;
+ int restart_count;
+
+ int buggy_avid;
+ int cs_itu601;
+ int interlace_polarity;
+
+ int mjpb_skiptosod;
+
+ int cur_scan; /* current scan, used by JPEG-LS */
+ int flipped; /* true if picture is flipped */
+
+ uint16_t (*ljpeg_buffer)[4];
+ unsigned int ljpeg_buffer_size;
+} MJpegDecodeContext;
+
+int ff_mjpeg_decode_init(AVCodecContext *avctx);
+int ff_mjpeg_decode_end(AVCodecContext *avctx);
+int ff_mjpeg_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size);
+int ff_mjpeg_decode_dqt(MJpegDecodeContext *s);
+int ff_mjpeg_decode_dht(MJpegDecodeContext *s);
+int ff_mjpeg_decode_sof(MJpegDecodeContext *s);
+int ff_mjpeg_decode_sos(MJpegDecodeContext *s);
+
+#endif /* AVCODEC_MJPEGDEC_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpegenc.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpegenc.h
new file mode 100644
index 000000000..15cefa9db
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mjpegenc.h
@@ -0,0 +1,60 @@
+/*
+ * MJPEG encoder
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2003 Alex Beregszaszi
+ * Copyright (c) 2003-2004 Michael Niedermayer
+ *
+ * Support for external huffman table, various fixes (AVID workaround),
+ * aspecting, new decode_frame mechanism and apple mjpeg-b support
+ * by Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file mjpegenc.h
+ * MJPEG encoder.
+ */
+
+#ifndef AVCODEC_MJPEGENC_H
+#define AVCODEC_MJPEGENC_H
+
+#include "dsputil.h"
+#include "mpegvideo.h"
+
+typedef struct MJpegContext {
+ uint8_t huff_size_dc_luminance[12]; //FIXME use array [3] instead of lumi / chrom, for easier addressing
+ uint16_t huff_code_dc_luminance[12];
+ uint8_t huff_size_dc_chrominance[12];
+ uint16_t huff_code_dc_chrominance[12];
+
+ uint8_t huff_size_ac_luminance[256];
+ uint16_t huff_code_ac_luminance[256];
+ uint8_t huff_size_ac_chrominance[256];
+ uint16_t huff_code_ac_chrominance[256];
+} MJpegContext;
+
+int ff_mjpeg_encode_init(MpegEncContext *s);
+void ff_mjpeg_encode_close(MpegEncContext *s);
+void ff_mjpeg_encode_picture_header(MpegEncContext *s);
+void ff_mjpeg_encode_picture_trailer(MpegEncContext *s);
+void ff_mjpeg_encode_stuffing(PutBitContext *pbc);
+void ff_mjpeg_encode_dc(MpegEncContext *s, int val,
+ uint8_t *huff_size, uint16_t *huff_code);
+void ff_mjpeg_encode_mb(MpegEncContext *s, DCTELEM block[6][64]);
+
+#endif /* AVCODEC_MJPEGENC_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp.c
new file mode 100644
index 000000000..87f7c7713
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp.c
@@ -0,0 +1,115 @@
+/*
+ * MLP codec common code
+ * Copyright (c) 2007-2008 Ian Caulfield
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/crc.h"
+#include "libavutil/intreadwrite.h"
+#include "mlp.h"
+
+const uint8_t ff_mlp_huffman_tables[3][18][2] = {
+ { /* Huffman table 0, -7 - +10 */
+ {0x01, 9}, {0x01, 8}, {0x01, 7}, {0x01, 6}, {0x01, 5}, {0x01, 4}, {0x01, 3},
+ {0x04, 3}, {0x05, 3}, {0x06, 3}, {0x07, 3},
+ {0x03, 3}, {0x05, 4}, {0x09, 5}, {0x11, 6}, {0x21, 7}, {0x41, 8}, {0x81, 9},
+ }, { /* Huffman table 1, -7 - +8 */
+ {0x01, 9}, {0x01, 8}, {0x01, 7}, {0x01, 6}, {0x01, 5}, {0x01, 4}, {0x01, 3},
+ {0x02, 2}, {0x03, 2},
+ {0x03, 3}, {0x05, 4}, {0x09, 5}, {0x11, 6}, {0x21, 7}, {0x41, 8}, {0x81, 9},
+ }, { /* Huffman table 2, -7 - +7 */
+ {0x01, 9}, {0x01, 8}, {0x01, 7}, {0x01, 6}, {0x01, 5}, {0x01, 4}, {0x01, 3},
+ {0x01, 1},
+ {0x03, 3}, {0x05, 4}, {0x09, 5}, {0x11, 6}, {0x21, 7}, {0x41, 8}, {0x81, 9},
+ }
+};
+
+static int crc_init = 0;
+#if CONFIG_SMALL
+#define CRC_TABLE_SIZE 257
+#else
+#define CRC_TABLE_SIZE 1024
+#endif
+static AVCRC crc_63[CRC_TABLE_SIZE];
+static AVCRC crc_1D[CRC_TABLE_SIZE];
+static AVCRC crc_2D[CRC_TABLE_SIZE];
+
+av_cold void ff_mlp_init_crc(void)
+{
+ if (!crc_init) {
+ av_crc_init(crc_63, 0, 8, 0x63, sizeof(crc_63));
+ av_crc_init(crc_1D, 0, 8, 0x1D, sizeof(crc_1D));
+ av_crc_init(crc_2D, 0, 16, 0x002D, sizeof(crc_2D));
+ crc_init = 1;
+ }
+}
+
+uint16_t ff_mlp_checksum16(const uint8_t *buf, unsigned int buf_size)
+{
+ uint16_t crc;
+
+ crc = av_crc(crc_2D, 0, buf, buf_size - 2);
+ crc ^= AV_RL16(buf + buf_size - 2);
+ return crc;
+}
+
+uint8_t ff_mlp_checksum8(const uint8_t *buf, unsigned int buf_size)
+{
+ uint8_t checksum = av_crc(crc_63, 0x3c, buf, buf_size - 1); // crc_63[0xa2] == 0x3c
+ checksum ^= buf[buf_size-1];
+ return checksum;
+}
+
+uint8_t ff_mlp_restart_checksum(const uint8_t *buf, unsigned int bit_size)
+{
+ int i;
+ int num_bytes = (bit_size + 2) / 8;
+
+ int crc = crc_1D[buf[0] & 0x3f];
+ crc = av_crc(crc_1D, crc, buf + 1, num_bytes - 2);
+ crc ^= buf[num_bytes - 1];
+
+ for (i = 0; i < ((bit_size + 2) & 7); i++) {
+ crc <<= 1;
+ if (crc & 0x100)
+ crc ^= 0x11D;
+ crc ^= (buf[num_bytes] >> (7 - i)) & 1;
+ }
+
+ return crc;
+}
+
+uint8_t ff_mlp_calculate_parity(const uint8_t *buf, unsigned int buf_size)
+{
+ uint32_t scratch = 0;
+ const uint8_t *buf_end = buf + buf_size;
+
+ for (; ((intptr_t) buf & 3) && buf < buf_end; buf++)
+ scratch ^= *buf;
+ for (; buf < buf_end - 3; buf += 4)
+ scratch ^= *((const uint32_t*)buf);
+
+ scratch = xor_32_to_8(scratch);
+
+ for (; buf < buf_end; buf++)
+ scratch ^= *buf;
+
+ return scratch;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp.h
new file mode 100644
index 000000000..628b58d31
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp.h
@@ -0,0 +1,127 @@
+/*
+ * MLP codec common header file
+ * Copyright (c) 2007-2008 Ian Caulfield
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MLP_H
+#define AVCODEC_MLP_H
+
+#include <stdint.h>
+
+#include "avcodec.h"
+
+/** Last possible matrix channel for each codec */
+#define MAX_MATRIX_CHANNEL_MLP 5
+#define MAX_MATRIX_CHANNEL_TRUEHD 7
+/** Maximum number of channels in a valid stream.
+ * MLP : 5.1 + 2 noise channels -> 8 channels
+ * TrueHD: 7.1 -> 8 channels
+ */
+#define MAX_CHANNELS 8
+
+/** Maximum number of matrices used in decoding; most streams have one matrix
+ * per output channel, but some rematrix a channel (usually 0) more than once.
+ */
+#define MAX_MATRICES_MLP 6
+#define MAX_MATRICES_TRUEHD 8
+#define MAX_MATRICES 8
+
+/** Maximum number of substreams that can be decoded.
+ * MLP's limit is 2. TrueHD supports at least up to 3.
+ */
+#define MAX_SUBSTREAMS 3
+
+/** which multiple of 48000 the maximum sample rate is */
+#define MAX_RATEFACTOR 4
+/** maximum sample frequency seen in files */
+#define MAX_SAMPLERATE (MAX_RATEFACTOR * 48000)
+
+/** maximum number of audio samples within one access unit */
+#define MAX_BLOCKSIZE (40 * MAX_RATEFACTOR)
+/** next power of two greater than MAX_BLOCKSIZE */
+#define MAX_BLOCKSIZE_POW2 (64 * MAX_RATEFACTOR)
+
+/** number of allowed filters */
+#define NUM_FILTERS 2
+
+/** The maximum number of taps in IIR and FIR filters. */
+#define MAX_FIR_ORDER 8
+#define MAX_IIR_ORDER 4
+
+/** Code that signals end of a stream. */
+#define END_OF_STREAM 0xd234d234
+
+#define FIR 0
+#define IIR 1
+
+/** filter data */
+typedef struct {
+ uint8_t order; ///< number of taps in filter
+ uint8_t shift; ///< Right shift to apply to output of filter.
+
+ int32_t state[MAX_FIR_ORDER];
+} FilterParams;
+
+/** sample data coding information */
+typedef struct {
+ FilterParams filter_params[NUM_FILTERS];
+ int32_t coeff[NUM_FILTERS][MAX_FIR_ORDER];
+
+ int16_t huff_offset; ///< Offset to apply to residual values.
+ int32_t sign_huff_offset; ///< sign/rounding-corrected version of huff_offset
+ uint8_t codebook; ///< Which VLC codebook to use to read residuals.
+ uint8_t huff_lsbs; ///< Size of residual suffix not encoded using VLC.
+} ChannelParams;
+
+/** Tables defining the Huffman codes.
+ * There are three entropy coding methods used in MLP (four if you count
+ * "none" as a method). These use the same sequences for codes starting with
+ * 00 or 01, but have different codes starting with 1.
+ */
+extern const uint8_t ff_mlp_huffman_tables[3][18][2];
+
+/** MLP uses checksums that seem to be based on the standard CRC algorithm, but
+ * are not (in implementation terms, the table lookup and XOR are reversed).
+ * We can implement this behavior using a standard av_crc on all but the
+ * last element, then XOR that with the last element.
+ */
+uint8_t ff_mlp_checksum8 (const uint8_t *buf, unsigned int buf_size);
+uint16_t ff_mlp_checksum16(const uint8_t *buf, unsigned int buf_size);
+
+/** Calculate an 8-bit checksum over a restart header -- a non-multiple-of-8
+ * number of bits, starting two bits into the first byte of buf.
+ */
+uint8_t ff_mlp_restart_checksum(const uint8_t *buf, unsigned int bit_size);
+
+/** XOR together all the bytes of a buffer.
+ * Does this belong in dspcontext?
+ */
+uint8_t ff_mlp_calculate_parity(const uint8_t *buf, unsigned int buf_size);
+
+void ff_mlp_init_crc(void);
+
+/** XOR four bytes into one. */
+static inline uint8_t xor_32_to_8(uint32_t value)
+{
+ value ^= value >> 16;
+ value ^= value >> 8;
+ return value;
+}
+
+#endif /* AVCODEC_MLP_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp_parser.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp_parser.c
new file mode 100644
index 000000000..34c1850fe
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp_parser.c
@@ -0,0 +1,301 @@
+/*
+ * MLP parser
+ * Copyright (c) 2007 Ian Caulfield
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/mlp_parser.c
+ * MLP parser
+ */
+
+#include <stdint.h>
+
+#include "libavutil/crc.h"
+#include "get_bits.h"
+#include "parser.h"
+#include "mlp_parser.h"
+#include "mlp.h"
+
+static const uint8_t mlp_quants[16] = {
+ 16, 20, 24, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const uint8_t mlp_channels[32] = {
+ 1, 2, 3, 4, 3, 4, 5, 3, 4, 5, 4, 5, 6, 4, 5, 4,
+ 5, 6, 5, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const uint8_t thd_chancount[13] = {
+// LR C LFE LRs LRvh LRc LRrs Cs Ts LRsd LRw Cvh LFE2
+ 2, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2, 1, 1
+};
+
+static int mlp_samplerate(int in)
+{
+ if (in == 0xF)
+ return 0;
+
+ return (in & 8 ? 44100 : 48000) << (in & 7) ;
+}
+
+static int truehd_channels(int chanmap)
+{
+ int channels = 0, i;
+
+ for (i = 0; i < 13; i++)
+ channels += thd_chancount[i] * ((chanmap >> i) & 1);
+
+ return channels;
+}
+
+/** Read a major sync info header - contains high level information about
+ * the stream - sample rate, channel arrangement etc. Most of this
+ * information is not actually necessary for decoding, only for playback.
+ * gb must be a freshly initialized GetBitContext with no bits read.
+ */
+
+int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb)
+{
+ int ratebits;
+ uint16_t checksum;
+
+ assert(get_bits_count(gb) == 0);
+
+ if (gb->size_in_bits < 28 << 3) {
+ av_log(log, AV_LOG_ERROR, "packet too short, unable to read major sync\n");
+ return -1;
+ }
+
+ checksum = ff_mlp_checksum16(gb->buffer, 26);
+ /* FFDShow custom code: disable crc check because it doesn't work properly with interweaved AC3/TrueHD streams
+ if (checksum != AV_RL16(gb->buffer+26)) {
+ av_log(log, AV_LOG_ERROR, "major sync info header checksum error\n");
+ return -1;
+ }
+ */
+
+ if (get_bits_long(gb, 24) != 0xf8726f) /* Sync words */
+ return -1;
+
+ mh->stream_type = get_bits(gb, 8);
+
+ if (mh->stream_type == 0xbb) {
+ mh->group1_bits = mlp_quants[get_bits(gb, 4)];
+ mh->group2_bits = mlp_quants[get_bits(gb, 4)];
+
+ ratebits = get_bits(gb, 4);
+ mh->group1_samplerate = mlp_samplerate(ratebits);
+ mh->group2_samplerate = mlp_samplerate(get_bits(gb, 4));
+
+ skip_bits(gb, 11);
+
+ mh->channels_mlp = get_bits(gb, 5);
+ } else if (mh->stream_type == 0xba) {
+ mh->group1_bits = 24; // TODO: Is this information actually conveyed anywhere?
+ mh->group2_bits = 0;
+
+ ratebits = get_bits(gb, 4);
+ mh->group1_samplerate = mlp_samplerate(ratebits);
+ mh->group2_samplerate = 0;
+
+ skip_bits(gb, 8);
+
+ mh->channels_thd_stream1 = get_bits(gb, 5);
+
+ skip_bits(gb, 2);
+
+ mh->channels_thd_stream2 = get_bits(gb, 13);
+ } else
+ return -1;
+
+ mh->access_unit_size = 40 << (ratebits & 7);
+ mh->access_unit_size_pow2 = 64 << (ratebits & 7);
+
+ skip_bits_long(gb, 48);
+
+ mh->is_vbr = get_bits1(gb);
+
+ mh->peak_bitrate = (get_bits(gb, 15) * mh->group1_samplerate + 8) >> 4;
+
+ mh->num_substreams = get_bits(gb, 4);
+
+ skip_bits_long(gb, 4 + 11 * 8);
+
+ return 0;
+}
+
+typedef struct MLPParseContext
+{
+ ParseContext pc;
+
+ int bytes_left;
+
+ int in_sync;
+
+ int num_substreams;
+} MLPParseContext;
+
+static av_cold int mlp_init(AVCodecParserContext *s)
+{
+ ff_mlp_init_crc();
+ return 0;
+}
+
+static int mlp_parse(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size)
+{
+ MLPParseContext *mp = s->priv_data;
+ int sync_present;
+ uint8_t parity_bits;
+ int next;
+ int i, p = 0;
+
+ *poutbuf_size = 0;
+ if (buf_size == 0)
+ return 0;
+
+ if (!mp->in_sync) {
+ // Not in sync - find a major sync header
+
+ for (i = 0; i < buf_size; i++) {
+ mp->pc.state = (mp->pc.state << 8) | buf[i];
+ if ((mp->pc.state & 0xfffffffe) == 0xf8726fba &&
+ // ignore if we do not have the data for the start of header
+ mp->pc.index + i >= 7) {
+ mp->in_sync = 1;
+ mp->bytes_left = 0;
+ break;
+ }
+ }
+
+ if (!mp->in_sync) {
+ ff_combine_frame(&mp->pc, END_NOT_FOUND, &buf, &buf_size);
+ return buf_size;
+ }
+
+ ff_combine_frame(&mp->pc, i - 7, &buf, &buf_size);
+
+ return i - 7;
+ }
+
+ if (mp->bytes_left == 0) {
+ // Find length of this packet
+
+ /* Copy overread bytes from last frame into buffer. */
+ for(; mp->pc.overread>0; mp->pc.overread--) {
+ mp->pc.buffer[mp->pc.index++]= mp->pc.buffer[mp->pc.overread_index++];
+ }
+
+ if (mp->pc.index + buf_size < 2) {
+ ff_combine_frame(&mp->pc, END_NOT_FOUND, &buf, &buf_size);
+ return buf_size;
+ }
+
+ mp->bytes_left = ((mp->pc.index > 0 ? mp->pc.buffer[0] : buf[0]) << 8)
+ | (mp->pc.index > 1 ? mp->pc.buffer[1] : buf[1-mp->pc.index]);
+ mp->bytes_left = (mp->bytes_left & 0xfff) * 2;
+ mp->bytes_left -= mp->pc.index;
+ }
+
+ next = (mp->bytes_left > buf_size) ? END_NOT_FOUND : mp->bytes_left;
+
+ if (ff_combine_frame(&mp->pc, next, &buf, &buf_size) < 0) {
+ mp->bytes_left -= buf_size;
+ return buf_size;
+ }
+
+ mp->bytes_left = 0;
+
+ sync_present = (AV_RB32(buf + 4) & 0xfffffffe) == 0xf8726fba;
+
+ if (!sync_present) {
+ /* The first nibble of a frame is a parity check of the 4-byte
+ * access unit header and all the 2- or 4-byte substream headers. */
+ // Only check when this isn't a sync frame - syncs have a checksum.
+
+ parity_bits = 0;
+ for (i = -1; i < mp->num_substreams; i++) {
+ parity_bits ^= buf[p++];
+ parity_bits ^= buf[p++];
+
+ if (i < 0 || buf[p-2] & 0x80) {
+ parity_bits ^= buf[p++];
+ parity_bits ^= buf[p++];
+ }
+ }
+
+ /* FFDShow custom code: parity check disabled, uncompatible with interweaved streams AC3/MLP
+ if ((((parity_bits >> 4) ^ parity_bits) & 0xF) != 0xF) {
+ av_log(avctx, AV_LOG_INFO, "mlpparse: Parity check failed.\n");
+ goto lost_sync;
+ }
+ */
+ } else {
+ GetBitContext gb;
+ MLPHeaderInfo mh;
+
+ init_get_bits(&gb, buf + 4, (buf_size - 4) << 3);
+ if (ff_mlp_read_major_sync(avctx, &mh, &gb) < 0)
+ goto lost_sync;
+
+ avctx->bits_per_raw_sample = mh.group1_bits;
+ if (avctx->bits_per_raw_sample > 16)
+ avctx->sample_fmt = SAMPLE_FMT_S32;
+ else
+ avctx->sample_fmt = SAMPLE_FMT_S16;
+ avctx->sample_rate = mh.group1_samplerate;
+ avctx->frame_size = mh.access_unit_size;
+
+ if (mh.stream_type == 0xbb) {
+ /* MLP stream */
+ avctx->channels = mlp_channels[mh.channels_mlp];
+ } else { /* mh.stream_type == 0xba */
+ /* TrueHD stream */
+ if (mh.channels_thd_stream2)
+ avctx->channels = truehd_channels(mh.channels_thd_stream2);
+ else
+ avctx->channels = truehd_channels(mh.channels_thd_stream1);
+ }
+
+ if (!mh.is_vbr) /* Stream is CBR */
+ avctx->bit_rate = mh.peak_bitrate;
+
+ mp->num_substreams = mh.num_substreams;
+ }
+
+ *poutbuf = buf;
+ *poutbuf_size = buf_size;
+
+ return next;
+
+lost_sync:
+ mp->in_sync = 0;
+ return 1;
+}
+
+AVCodecParser mlp_parser = {
+ { CODEC_ID_MLP, CODEC_ID_TRUEHD },
+ sizeof(MLPParseContext),
+ mlp_init,
+ mlp_parse,
+ NULL,
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp_parser.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp_parser.h
new file mode 100644
index 000000000..5e8861b94
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlp_parser.h
@@ -0,0 +1,59 @@
+/*
+ * MLP parser prototypes
+ * Copyright (c) 2007 Ian Caulfield
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/mlp_parser.h
+ * MLP parser prototypes
+ */
+
+#ifndef AVCODEC_MLP_PARSER_H
+#define AVCODEC_MLP_PARSER_H
+
+#include "get_bits.h"
+
+typedef struct MLPHeaderInfo
+{
+ int stream_type; ///< 0xBB for MLP, 0xBA for TrueHD
+
+ int group1_bits; ///< The bit depth of the first substream
+ int group2_bits; ///< Bit depth of the second substream (MLP only)
+
+ int group1_samplerate; ///< Sample rate of first substream
+ int group2_samplerate; ///< Sample rate of second substream (MLP only)
+
+ int channels_mlp; ///< Channel arrangement for MLP streams
+ int channels_thd_stream1; ///< Channel arrangement for substream 1 of TrueHD streams (5.1)
+ int channels_thd_stream2; ///< Channel arrangement for substream 2 of TrueHD streams (7.1)
+
+ int access_unit_size; ///< Number of samples per coded frame
+ int access_unit_size_pow2; ///< Next power of two above number of samples per frame
+
+ int is_vbr; ///< Stream is VBR instead of CBR
+ int peak_bitrate; ///< Peak bitrate for VBR, actual bitrate (==peak) for CBR
+
+ int num_substreams; ///< Number of substreams within stream
+} MLPHeaderInfo;
+
+
+int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb);
+
+#endif /* AVCODEC_MLP_PARSER_H */
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlpdec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlpdec.c
new file mode 100644
index 000000000..a5ba3d0b6
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlpdec.c
@@ -0,0 +1,1171 @@
+/*
+ * MLP decoder
+ * Copyright (c) 2007-2008 Ian Caulfield
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/mlpdec.c
+ * MLP decoder
+ */
+
+#include <stdint.h>
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "libavutil/intreadwrite.h"
+#include "get_bits.h"
+#include "libavutil/crc.h"
+#include "parser.h"
+#include "mlp_parser.h"
+#include "mlp.h"
+
+/** number of bits used for VLC lookup - longest Huffman code is 9 */
+#define VLC_BITS 9
+
+
+static const char* sample_message =
+ "Please file a bug report following the instructions at "
+ "http://ffmpeg.org/bugreports.html and include "
+ "a sample of this file.";
+
+typedef struct SubStream {
+ //! Set if a valid restart header has been read. Otherwise the substream cannot be decoded.
+ uint8_t restart_seen;
+
+ //@{
+ /** restart header data */
+ //! The type of noise to be used in the rematrix stage.
+ uint16_t noise_type;
+
+ //! The index of the first channel coded in this substream.
+ uint8_t min_channel;
+ //! The index of the last channel coded in this substream.
+ uint8_t max_channel;
+ //! The number of channels input into the rematrix stage.
+ uint8_t max_matrix_channel;
+ //! For each channel output by the matrix, the output channel to map it to
+ uint8_t ch_assign[MAX_CHANNELS];
+
+ //! The left shift applied to random noise in 0x31ea substreams.
+ uint8_t noise_shift;
+ //! The current seed value for the pseudorandom noise generator(s).
+ uint32_t noisegen_seed;
+
+ //! Set if the substream contains extra info to check the size of VLC blocks.
+ uint8_t data_check_present;
+
+ //! Bitmask of which parameter sets are conveyed in a decoding parameter block.
+ uint8_t param_presence_flags;
+#define PARAM_BLOCKSIZE (1 << 7)
+#define PARAM_MATRIX (1 << 6)
+#define PARAM_OUTSHIFT (1 << 5)
+#define PARAM_QUANTSTEP (1 << 4)
+#define PARAM_FIR (1 << 3)
+#define PARAM_IIR (1 << 2)
+#define PARAM_HUFFOFFSET (1 << 1)
+#define PARAM_PRESENCE (1 << 0)
+ //@}
+
+ //@{
+ /** matrix data */
+
+ //! Number of matrices to be applied.
+ uint8_t num_primitive_matrices;
+
+ //! matrix output channel
+ uint8_t matrix_out_ch[MAX_MATRICES];
+
+ //! Whether the LSBs of the matrix output are encoded in the bitstream.
+ uint8_t lsb_bypass[MAX_MATRICES];
+ //! Matrix coefficients, stored as 2.14 fixed point.
+ int32_t matrix_coeff[MAX_MATRICES][MAX_CHANNELS];
+ //! Left shift to apply to noise values in 0x31eb substreams.
+ uint8_t matrix_noise_shift[MAX_MATRICES];
+ //@}
+
+ //! Left shift to apply to Huffman-decoded residuals.
+ uint8_t quant_step_size[MAX_CHANNELS];
+
+ //! number of PCM samples in current audio block
+ uint16_t blocksize;
+ //! Number of PCM samples decoded so far in this frame.
+ uint16_t blockpos;
+
+ //! Left shift to apply to decoded PCM values to get final 24-bit output.
+ int8_t output_shift[MAX_CHANNELS];
+
+ //! Running XOR of all output samples.
+ int32_t lossless_check_data;
+
+} SubStream;
+
+typedef struct MLPDecodeContext {
+ AVCodecContext *avctx;
+
+ //! Current access unit being read has a major sync.
+ int is_major_sync_unit;
+
+ //! Set if a valid major sync block has been read. Otherwise no decoding is possible.
+ uint8_t params_valid;
+
+ //! Number of substreams contained within this stream.
+ uint8_t num_substreams;
+
+ //! Index of the last substream to decode - further substreams are skipped.
+ uint8_t max_decoded_substream;
+
+ //! number of PCM samples contained in each frame
+ int access_unit_size;
+ //! next power of two above the number of samples in each frame
+ int access_unit_size_pow2;
+
+ SubStream substream[MAX_SUBSTREAMS];
+
+ ChannelParams channel_params[MAX_CHANNELS];
+
+ int matrix_changed;
+ int filter_changed[MAX_CHANNELS][NUM_FILTERS];
+
+ int8_t noise_buffer[MAX_BLOCKSIZE_POW2];
+ int8_t bypassed_lsbs[MAX_BLOCKSIZE][MAX_CHANNELS];
+ int32_t sample_buffer[MAX_BLOCKSIZE][MAX_CHANNELS];
+
+ DSPContext dsp;
+} MLPDecodeContext;
+
+static VLC huff_vlc[3];
+
+/** Initialize static data, constant between all invocations of the codec. */
+
+static av_cold void init_static(void)
+{
+ if (!huff_vlc[0].bits) {
+ INIT_VLC_STATIC(&huff_vlc[0], VLC_BITS, 18,
+ &ff_mlp_huffman_tables[0][0][1], 2, 1,
+ &ff_mlp_huffman_tables[0][0][0], 2, 1, 512);
+ INIT_VLC_STATIC(&huff_vlc[1], VLC_BITS, 16,
+ &ff_mlp_huffman_tables[1][0][1], 2, 1,
+ &ff_mlp_huffman_tables[1][0][0], 2, 1, 512);
+ INIT_VLC_STATIC(&huff_vlc[2], VLC_BITS, 15,
+ &ff_mlp_huffman_tables[2][0][1], 2, 1,
+ &ff_mlp_huffman_tables[2][0][0], 2, 1, 512);
+ }
+
+ ff_mlp_init_crc();
+}
+
+static inline int32_t calculate_sign_huff(MLPDecodeContext *m,
+ unsigned int substr, unsigned int ch)
+{
+ ChannelParams *cp = &m->channel_params[ch];
+ SubStream *s = &m->substream[substr];
+ int lsb_bits = cp->huff_lsbs - s->quant_step_size[ch];
+ int sign_shift = lsb_bits + (cp->codebook ? 2 - cp->codebook : -1);
+ int32_t sign_huff_offset = cp->huff_offset;
+
+ if (cp->codebook > 0)
+ sign_huff_offset -= 7 << lsb_bits;
+
+ if (sign_shift >= 0)
+ sign_huff_offset -= 1 << sign_shift;
+
+ return sign_huff_offset;
+}
+
+/** Read a sample, consisting of either, both or neither of entropy-coded MSBs
+ * and plain LSBs. */
+
+static inline int read_huff_channels(MLPDecodeContext *m, GetBitContext *gbp,
+ unsigned int substr, unsigned int pos)
+{
+ SubStream *s = &m->substream[substr];
+ unsigned int mat, channel;
+
+ for (mat = 0; mat < s->num_primitive_matrices; mat++)
+ if (s->lsb_bypass[mat])
+ m->bypassed_lsbs[pos + s->blockpos][mat] = get_bits1(gbp);
+
+ for (channel = s->min_channel; channel <= s->max_channel; channel++) {
+ ChannelParams *cp = &m->channel_params[channel];
+ int codebook = cp->codebook;
+ int quant_step_size = s->quant_step_size[channel];
+ int lsb_bits = cp->huff_lsbs - quant_step_size;
+ int result = 0;
+
+ if (codebook > 0)
+ result = get_vlc2(gbp, huff_vlc[codebook-1].table,
+ VLC_BITS, (9 + VLC_BITS - 1) / VLC_BITS);
+
+ if (result < 0)
+ return -1;
+
+ if (lsb_bits > 0)
+ result = (result << lsb_bits) + get_bits(gbp, lsb_bits);
+
+ result += cp->sign_huff_offset;
+ result <<= quant_step_size;
+
+ m->sample_buffer[pos + s->blockpos][channel] = result;
+ }
+
+ return 0;
+}
+
+static av_cold int mlp_decode_init(AVCodecContext *avctx)
+{
+ MLPDecodeContext *m = avctx->priv_data;
+ int substr;
+
+ init_static();
+ m->avctx = avctx;
+ for (substr = 0; substr < MAX_SUBSTREAMS; substr++)
+ m->substream[substr].lossless_check_data = 0xffffffff;
+ dsputil_init(&m->dsp, avctx);
+
+ return 0;
+}
+
+/** Read a major sync info header - contains high level information about
+ * the stream - sample rate, channel arrangement etc. Most of this
+ * information is not actually necessary for decoding, only for playback.
+ */
+
+static int read_major_sync(MLPDecodeContext *m, GetBitContext *gb)
+{
+ MLPHeaderInfo mh;
+ int substr;
+
+ if (ff_mlp_read_major_sync(m->avctx, &mh, gb) != 0)
+ return -1;
+
+ if (mh.group1_bits == 0) {
+ av_log(m->avctx, AV_LOG_ERROR, "invalid/unknown bits per sample\n");
+ return -1;
+ }
+ if (mh.group2_bits > mh.group1_bits) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Channel group 2 cannot have more bits per sample than group 1.\n");
+ return -1;
+ }
+
+ if (mh.group2_samplerate && mh.group2_samplerate != mh.group1_samplerate) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Channel groups with differing sample rates are not currently supported.\n");
+ return -1;
+ }
+
+ if (mh.group1_samplerate == 0) {
+ av_log(m->avctx, AV_LOG_ERROR, "invalid/unknown sampling rate\n");
+ return -1;
+ }
+ if (mh.group1_samplerate > MAX_SAMPLERATE) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Sampling rate %d is greater than the supported maximum (%d).\n",
+ mh.group1_samplerate, MAX_SAMPLERATE);
+ return -1;
+ }
+ if (mh.access_unit_size > MAX_BLOCKSIZE) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Block size %d is greater than the supported maximum (%d).\n",
+ mh.access_unit_size, MAX_BLOCKSIZE);
+ return -1;
+ }
+ if (mh.access_unit_size_pow2 > MAX_BLOCKSIZE_POW2) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Block size pow2 %d is greater than the supported maximum (%d).\n",
+ mh.access_unit_size_pow2, MAX_BLOCKSIZE_POW2);
+ return -1;
+ }
+
+ if (mh.num_substreams == 0)
+ return -1;
+ if (m->avctx->codec_id == CODEC_ID_MLP && mh.num_substreams > 2) {
+ av_log(m->avctx, AV_LOG_ERROR, "MLP only supports up to 2 substreams.\n");
+ return -1;
+ }
+ if (mh.num_substreams > MAX_SUBSTREAMS) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Number of substreams %d is larger than the maximum supported "
+ "by the decoder. %s\n", mh.num_substreams, sample_message);
+ return -1;
+ }
+
+ m->access_unit_size = mh.access_unit_size;
+ m->access_unit_size_pow2 = mh.access_unit_size_pow2;
+
+ m->num_substreams = mh.num_substreams;
+ m->max_decoded_substream = m->num_substreams - 1;
+
+ m->avctx->sample_rate = mh.group1_samplerate;
+ m->avctx->frame_size = mh.access_unit_size;
+
+ m->avctx->bits_per_raw_sample = mh.group1_bits;
+ if (mh.group1_bits > 16)
+ m->avctx->sample_fmt = SAMPLE_FMT_S32;
+ else
+ m->avctx->sample_fmt = SAMPLE_FMT_S16;
+
+ m->params_valid = 1;
+ for (substr = 0; substr < MAX_SUBSTREAMS; substr++)
+ m->substream[substr].restart_seen = 0;
+
+ return 0;
+}
+
+/** Read a restart header from a block in a substream. This contains parameters
+ * required to decode the audio that do not change very often. Generally
+ * (always) present only in blocks following a major sync. */
+
+static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
+ const uint8_t *buf, unsigned int substr)
+{
+ SubStream *s = &m->substream[substr];
+ unsigned int ch;
+ int sync_word, tmp;
+ uint8_t checksum;
+ uint8_t lossless_check;
+ int start_count = get_bits_count(gbp);
+ const int max_matrix_channel = m->avctx->codec_id == CODEC_ID_MLP
+ ? MAX_MATRIX_CHANNEL_MLP
+ : MAX_MATRIX_CHANNEL_TRUEHD;
+
+ sync_word = get_bits(gbp, 13);
+
+ if (sync_word != 0x31ea >> 1) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "restart header sync incorrect (got 0x%04x)\n", sync_word);
+ return -1;
+ }
+
+ s->noise_type = get_bits1(gbp);
+
+ if (m->avctx->codec_id == CODEC_ID_MLP && s->noise_type) {
+ av_log(m->avctx, AV_LOG_ERROR, "MLP must have 0x31ea sync word.\n");
+ return -1;
+ }
+
+ skip_bits(gbp, 16); /* Output timestamp */
+
+ s->min_channel = get_bits(gbp, 4);
+ s->max_channel = get_bits(gbp, 4);
+ s->max_matrix_channel = get_bits(gbp, 4);
+
+ if (s->max_matrix_channel > max_matrix_channel) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Max matrix channel cannot be greater than %d.\n",
+ max_matrix_channel);
+ return -1;
+ }
+
+ if (s->max_channel != s->max_matrix_channel) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Max channel must be equal max matrix channel.\n");
+ return -1;
+ }
+
+ /* This should happen for TrueHD streams with >6 channels and MLP's noise
+ * type. It is not yet known if this is allowed. */
+ if (s->max_channel > MAX_MATRIX_CHANNEL_MLP && !s->noise_type) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Number of channels %d is larger than the maximum supported "
+ "by the decoder. %s\n", s->max_channel+2, sample_message);
+ return -1;
+ }
+
+ if (s->min_channel > s->max_channel) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Substream min channel cannot be greater than max channel.\n");
+ return -1;
+ }
+
+ if (m->avctx->request_channels > 0
+ && s->max_channel + 1 >= m->avctx->request_channels
+ && substr < m->max_decoded_substream) {
+ av_log(m->avctx, AV_LOG_INFO,
+ "Extracting %d channel downmix from substream %d. "
+ "Further substreams will be skipped.\n",
+ s->max_channel + 1, substr);
+ m->max_decoded_substream = substr;
+ }
+
+ s->noise_shift = get_bits(gbp, 4);
+ s->noisegen_seed = get_bits(gbp, 23);
+
+ skip_bits(gbp, 19);
+
+ s->data_check_present = get_bits1(gbp);
+ lossless_check = get_bits(gbp, 8);
+ if (substr == m->max_decoded_substream
+ && s->lossless_check_data != 0xffffffff) {
+ tmp = xor_32_to_8(s->lossless_check_data);
+ if (tmp != lossless_check)
+ av_log(m->avctx, AV_LOG_WARNING,
+ "Lossless check failed - expected %02x, calculated %02x.\n",
+ lossless_check, tmp);
+ }
+
+ skip_bits(gbp, 16);
+
+ memset(s->ch_assign, 0, sizeof(s->ch_assign));
+
+ for (ch = 0; ch <= s->max_matrix_channel; ch++) {
+ int ch_assign = get_bits(gbp, 6);
+ if (ch_assign > s->max_matrix_channel) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Assignment of matrix channel %d to invalid output channel %d. %s\n",
+ ch, ch_assign, sample_message);
+ return -1;
+ }
+ s->ch_assign[ch_assign] = ch;
+ }
+
+ checksum = ff_mlp_restart_checksum(buf, get_bits_count(gbp) - start_count);
+
+ if (checksum != get_bits(gbp, 8))
+ av_log(m->avctx, AV_LOG_ERROR, "restart header checksum error\n");
+
+ /* Set default decoding parameters. */
+ s->param_presence_flags = 0xff;
+ s->num_primitive_matrices = 0;
+ s->blocksize = 8;
+ s->lossless_check_data = 0;
+
+ memset(s->output_shift , 0, sizeof(s->output_shift ));
+ memset(s->quant_step_size, 0, sizeof(s->quant_step_size));
+
+ for (ch = s->min_channel; ch <= s->max_channel; ch++) {
+ ChannelParams *cp = &m->channel_params[ch];
+ cp->filter_params[FIR].order = 0;
+ cp->filter_params[IIR].order = 0;
+ cp->filter_params[FIR].shift = 0;
+ cp->filter_params[IIR].shift = 0;
+
+ /* Default audio coding is 24-bit raw PCM. */
+ cp->huff_offset = 0;
+ cp->sign_huff_offset = (-1) << 23;
+ cp->codebook = 0;
+ cp->huff_lsbs = 24;
+ }
+
+ if (substr == m->max_decoded_substream)
+ m->avctx->channels = s->max_matrix_channel + 1;
+
+ return 0;
+}
+
+/** Read parameters for one of the prediction filters. */
+
+static int read_filter_params(MLPDecodeContext *m, GetBitContext *gbp,
+ unsigned int channel, unsigned int filter)
+{
+ FilterParams *fp = &m->channel_params[channel].filter_params[filter];
+ const int max_order = filter ? MAX_IIR_ORDER : MAX_FIR_ORDER;
+ const char fchar = filter ? 'I' : 'F';
+ int i, order;
+
+ // Filter is 0 for FIR, 1 for IIR.
+ assert(filter < 2);
+
+ if (m->filter_changed[channel][filter]++ > 1) {
+ av_log(m->avctx, AV_LOG_ERROR, "Filters may change only once per access unit.\n");
+ return -1;
+ }
+
+ order = get_bits(gbp, 4);
+ if (order > max_order) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "%cIR filter order %d is greater than maximum %d.\n",
+ fchar, order, max_order);
+ return -1;
+ }
+ fp->order = order;
+
+ if (order > 0) {
+ int32_t *fcoeff = m->channel_params[channel].coeff[filter];
+ int coeff_bits, coeff_shift;
+
+ fp->shift = get_bits(gbp, 4);
+
+ coeff_bits = get_bits(gbp, 5);
+ coeff_shift = get_bits(gbp, 3);
+ if (coeff_bits < 1 || coeff_bits > 16) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "%cIR filter coeff_bits must be between 1 and 16.\n",
+ fchar);
+ return -1;
+ }
+ if (coeff_bits + coeff_shift > 16) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Sum of coeff_bits and coeff_shift for %cIR filter must be 16 or less.\n",
+ fchar);
+ return -1;
+ }
+
+ for (i = 0; i < order; i++)
+ fcoeff[i] = get_sbits(gbp, coeff_bits) << coeff_shift;
+
+ if (get_bits1(gbp)) {
+ int state_bits, state_shift;
+
+ if (filter == FIR) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "FIR filter has state data specified.\n");
+ return -1;
+ }
+
+ state_bits = get_bits(gbp, 4);
+ state_shift = get_bits(gbp, 4);
+
+ /* TODO: Check validity of state data. */
+
+ for (i = 0; i < order; i++)
+ fp->state[i] = get_sbits(gbp, state_bits) << state_shift;
+ }
+ }
+
+ return 0;
+}
+
+/** Read parameters for primitive matrices. */
+
+static int read_matrix_params(MLPDecodeContext *m, unsigned int substr, GetBitContext *gbp)
+{
+ SubStream *s = &m->substream[substr];
+ unsigned int mat, ch;
+ const int max_primitive_matrices = m->avctx->codec_id == CODEC_ID_MLP
+ ? MAX_MATRICES_MLP
+ : MAX_MATRICES_TRUEHD;
+
+ if (m->matrix_changed++ > 1) {
+ av_log(m->avctx, AV_LOG_ERROR, "Matrices may change only once per access unit.\n");
+ return -1;
+ }
+
+ s->num_primitive_matrices = get_bits(gbp, 4);
+
+ if (s->num_primitive_matrices > max_primitive_matrices) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Number of primitive matrices cannot be greater than %d.\n",
+ max_primitive_matrices);
+ return -1;
+ }
+
+ for (mat = 0; mat < s->num_primitive_matrices; mat++) {
+ int frac_bits, max_chan;
+ s->matrix_out_ch[mat] = get_bits(gbp, 4);
+ frac_bits = get_bits(gbp, 4);
+ s->lsb_bypass [mat] = get_bits1(gbp);
+
+ if (s->matrix_out_ch[mat] > s->max_matrix_channel) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Invalid channel %d specified as output from matrix.\n",
+ s->matrix_out_ch[mat]);
+ return -1;
+ }
+ if (frac_bits > 14) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Too many fractional bits specified.\n");
+ return -1;
+ }
+
+ max_chan = s->max_matrix_channel;
+ if (!s->noise_type)
+ max_chan+=2;
+
+ for (ch = 0; ch <= max_chan; ch++) {
+ int coeff_val = 0;
+ if (get_bits1(gbp))
+ coeff_val = get_sbits(gbp, frac_bits + 2);
+
+ s->matrix_coeff[mat][ch] = coeff_val << (14 - frac_bits);
+ }
+
+ if (s->noise_type)
+ s->matrix_noise_shift[mat] = get_bits(gbp, 4);
+ else
+ s->matrix_noise_shift[mat] = 0;
+ }
+
+ return 0;
+}
+
+/** Read channel parameters. */
+
+static int read_channel_params(MLPDecodeContext *m, unsigned int substr,
+ GetBitContext *gbp, unsigned int ch)
+{
+ ChannelParams *cp = &m->channel_params[ch];
+ FilterParams *fir = &cp->filter_params[FIR];
+ FilterParams *iir = &cp->filter_params[IIR];
+ SubStream *s = &m->substream[substr];
+
+ if (s->param_presence_flags & PARAM_FIR)
+ if (get_bits1(gbp))
+ if (read_filter_params(m, gbp, ch, FIR) < 0)
+ return -1;
+
+ if (s->param_presence_flags & PARAM_IIR)
+ if (get_bits1(gbp))
+ if (read_filter_params(m, gbp, ch, IIR) < 0)
+ return -1;
+
+ if (fir->order + iir->order > 8) {
+ av_log(m->avctx, AV_LOG_ERROR, "Total filter orders too high.\n");
+ return -1;
+ }
+
+ if (fir->order && iir->order &&
+ fir->shift != iir->shift) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "FIR and IIR filters must use the same precision.\n");
+ return -1;
+ }
+ /* The FIR and IIR filters must have the same precision.
+ * To simplify the filtering code, only the precision of the
+ * FIR filter is considered. If only the IIR filter is employed,
+ * the FIR filter precision is set to that of the IIR filter, so
+ * that the filtering code can use it. */
+ if (!fir->order && iir->order)
+ fir->shift = iir->shift;
+
+ if (s->param_presence_flags & PARAM_HUFFOFFSET)
+ if (get_bits1(gbp))
+ cp->huff_offset = get_sbits(gbp, 15);
+
+ cp->codebook = get_bits(gbp, 2);
+ cp->huff_lsbs = get_bits(gbp, 5);
+
+ if (cp->huff_lsbs > 24) {
+ av_log(m->avctx, AV_LOG_ERROR, "Invalid huff_lsbs.\n");
+ return -1;
+ }
+
+ cp->sign_huff_offset = calculate_sign_huff(m, substr, ch);
+
+ return 0;
+}
+
+/** Read decoding parameters that change more often than those in the restart
+ * header. */
+
+static int read_decoding_params(MLPDecodeContext *m, GetBitContext *gbp,
+ unsigned int substr)
+{
+ SubStream *s = &m->substream[substr];
+ unsigned int ch;
+
+ if (s->param_presence_flags & PARAM_PRESENCE)
+ if (get_bits1(gbp))
+ s->param_presence_flags = get_bits(gbp, 8);
+
+ if (s->param_presence_flags & PARAM_BLOCKSIZE)
+ if (get_bits1(gbp)) {
+ s->blocksize = get_bits(gbp, 9);
+ if (s->blocksize < 8 || s->blocksize > m->access_unit_size) {
+ av_log(m->avctx, AV_LOG_ERROR, "Invalid blocksize.");
+ s->blocksize = 0;
+ return -1;
+ }
+ }
+
+ if (s->param_presence_flags & PARAM_MATRIX)
+ if (get_bits1(gbp))
+ if (read_matrix_params(m, substr, gbp) < 0)
+ return -1;
+
+ if (s->param_presence_flags & PARAM_OUTSHIFT)
+ if (get_bits1(gbp))
+ for (ch = 0; ch <= s->max_matrix_channel; ch++)
+ s->output_shift[ch] = get_sbits(gbp, 4);
+
+ if (s->param_presence_flags & PARAM_QUANTSTEP)
+ if (get_bits1(gbp))
+ for (ch = 0; ch <= s->max_channel; ch++) {
+ ChannelParams *cp = &m->channel_params[ch];
+
+ s->quant_step_size[ch] = get_bits(gbp, 4);
+
+ cp->sign_huff_offset = calculate_sign_huff(m, substr, ch);
+ }
+
+ for (ch = s->min_channel; ch <= s->max_channel; ch++)
+ if (get_bits1(gbp))
+ if (read_channel_params(m, substr, gbp, ch) < 0)
+ return -1;
+
+ return 0;
+}
+
+#define MSB_MASK(bits) (-1u << bits)
+
+/** Generate PCM samples using the prediction filters and residual values
+ * read from the data stream, and update the filter state. */
+
+static void filter_channel(MLPDecodeContext *m, unsigned int substr,
+ unsigned int channel)
+{
+ SubStream *s = &m->substream[substr];
+ const int32_t *fircoeff = m->channel_params[channel].coeff[FIR];
+ int32_t state_buffer[NUM_FILTERS][MAX_BLOCKSIZE + MAX_FIR_ORDER];
+ int32_t *firbuf = state_buffer[FIR] + MAX_BLOCKSIZE;
+ int32_t *iirbuf = state_buffer[IIR] + MAX_BLOCKSIZE;
+ FilterParams *fir = &m->channel_params[channel].filter_params[FIR];
+ FilterParams *iir = &m->channel_params[channel].filter_params[IIR];
+ unsigned int filter_shift = fir->shift;
+ int32_t mask = MSB_MASK(s->quant_step_size[channel]);
+
+ memcpy(firbuf, fir->state, MAX_FIR_ORDER * sizeof(int32_t));
+ memcpy(iirbuf, iir->state, MAX_IIR_ORDER * sizeof(int32_t));
+
+ m->dsp.mlp_filter_channel(firbuf, fircoeff,
+ fir->order, iir->order,
+ filter_shift, mask, s->blocksize,
+ &m->sample_buffer[s->blockpos][channel]);
+
+ memcpy(fir->state, firbuf - s->blocksize, MAX_FIR_ORDER * sizeof(int32_t));
+ memcpy(iir->state, iirbuf - s->blocksize, MAX_IIR_ORDER * sizeof(int32_t));
+}
+
+/** Read a block of PCM residual data (or actual if no filtering active). */
+
+static int read_block_data(MLPDecodeContext *m, GetBitContext *gbp,
+ unsigned int substr)
+{
+ SubStream *s = &m->substream[substr];
+ unsigned int i, ch, expected_stream_pos = 0;
+
+ if (s->data_check_present) {
+ expected_stream_pos = get_bits_count(gbp);
+ expected_stream_pos += get_bits(gbp, 16);
+ av_log(m->avctx, AV_LOG_WARNING, "This file contains some features "
+ "we have not tested yet. %s\n", sample_message);
+ }
+
+ if (s->blockpos + s->blocksize > m->access_unit_size) {
+ av_log(m->avctx, AV_LOG_ERROR, "too many audio samples in frame\n");
+ return -1;
+ }
+
+ memset(&m->bypassed_lsbs[s->blockpos][0], 0,
+ s->blocksize * sizeof(m->bypassed_lsbs[0]));
+
+ for (i = 0; i < s->blocksize; i++)
+ if (read_huff_channels(m, gbp, substr, i) < 0)
+ return -1;
+
+ for (ch = s->min_channel; ch <= s->max_channel; ch++)
+ filter_channel(m, substr, ch);
+
+ s->blockpos += s->blocksize;
+
+ if (s->data_check_present) {
+ if (get_bits_count(gbp) != expected_stream_pos)
+ av_log(m->avctx, AV_LOG_ERROR, "block data length mismatch\n");
+ skip_bits(gbp, 8);
+ }
+
+ return 0;
+}
+
+/** Data table used for TrueHD noise generation function. */
+
+static const int8_t noise_table[256] = {
+ 30, 51, 22, 54, 3, 7, -4, 38, 14, 55, 46, 81, 22, 58, -3, 2,
+ 52, 31, -7, 51, 15, 44, 74, 30, 85, -17, 10, 33, 18, 80, 28, 62,
+ 10, 32, 23, 69, 72, 26, 35, 17, 73, 60, 8, 56, 2, 6, -2, -5,
+ 51, 4, 11, 50, 66, 76, 21, 44, 33, 47, 1, 26, 64, 48, 57, 40,
+ 38, 16, -10, -28, 92, 22, -18, 29, -10, 5, -13, 49, 19, 24, 70, 34,
+ 61, 48, 30, 14, -6, 25, 58, 33, 42, 60, 67, 17, 54, 17, 22, 30,
+ 67, 44, -9, 50, -11, 43, 40, 32, 59, 82, 13, 49, -14, 55, 60, 36,
+ 48, 49, 31, 47, 15, 12, 4, 65, 1, 23, 29, 39, 45, -2, 84, 69,
+ 0, 72, 37, 57, 27, 41, -15, -16, 35, 31, 14, 61, 24, 0, 27, 24,
+ 16, 41, 55, 34, 53, 9, 56, 12, 25, 29, 53, 5, 20, -20, -8, 20,
+ 13, 28, -3, 78, 38, 16, 11, 62, 46, 29, 21, 24, 46, 65, 43, -23,
+ 89, 18, 74, 21, 38, -12, 19, 12, -19, 8, 15, 33, 4, 57, 9, -8,
+ 36, 35, 26, 28, 7, 83, 63, 79, 75, 11, 3, 87, 37, 47, 34, 40,
+ 39, 19, 20, 42, 27, 34, 39, 77, 13, 42, 59, 64, 45, -1, 32, 37,
+ 45, -5, 53, -6, 7, 36, 50, 23, 6, 32, 9, -21, 18, 71, 27, 52,
+ -25, 31, 35, 42, -1, 68, 63, 52, 26, 43, 66, 37, 41, 25, 40, 70,
+};
+
+/** Noise generation functions.
+ * I'm not sure what these are for - they seem to be some kind of pseudorandom
+ * sequence generators, used to generate noise data which is used when the
+ * channels are rematrixed. I'm not sure if they provide a practical benefit
+ * to compression, or just obfuscate the decoder. Are they for some kind of
+ * dithering? */
+
+/** Generate two channels of noise, used in the matrix when
+ * restart sync word == 0x31ea. */
+
+static void generate_2_noise_channels(MLPDecodeContext *m, unsigned int substr)
+{
+ SubStream *s = &m->substream[substr];
+ unsigned int i;
+ uint32_t seed = s->noisegen_seed;
+ unsigned int maxchan = s->max_matrix_channel;
+
+ for (i = 0; i < s->blockpos; i++) {
+ uint16_t seed_shr7 = seed >> 7;
+ m->sample_buffer[i][maxchan+1] = ((int8_t)(seed >> 15)) << s->noise_shift;
+ m->sample_buffer[i][maxchan+2] = ((int8_t) seed_shr7) << s->noise_shift;
+
+ seed = (seed << 16) ^ seed_shr7 ^ (seed_shr7 << 5);
+ }
+
+ s->noisegen_seed = seed;
+}
+
+/** Generate a block of noise, used when restart sync word == 0x31eb. */
+
+static void fill_noise_buffer(MLPDecodeContext *m, unsigned int substr)
+{
+ SubStream *s = &m->substream[substr];
+ unsigned int i;
+ uint32_t seed = s->noisegen_seed;
+
+ for (i = 0; i < m->access_unit_size_pow2; i++) {
+ uint8_t seed_shr15 = seed >> 15;
+ m->noise_buffer[i] = noise_table[seed_shr15];
+ seed = (seed << 8) ^ seed_shr15 ^ (seed_shr15 << 5);
+ }
+
+ s->noisegen_seed = seed;
+}
+
+
+/** Apply the channel matrices in turn to reconstruct the original audio
+ * samples. */
+
+static void rematrix_channels(MLPDecodeContext *m, unsigned int substr)
+{
+ SubStream *s = &m->substream[substr];
+ unsigned int mat, src_ch, i;
+ unsigned int maxchan;
+
+ maxchan = s->max_matrix_channel;
+ if (!s->noise_type) {
+ generate_2_noise_channels(m, substr);
+ maxchan += 2;
+ } else {
+ fill_noise_buffer(m, substr);
+ }
+
+ for (mat = 0; mat < s->num_primitive_matrices; mat++) {
+ int matrix_noise_shift = s->matrix_noise_shift[mat];
+ unsigned int dest_ch = s->matrix_out_ch[mat];
+ int32_t mask = MSB_MASK(s->quant_step_size[dest_ch]);
+ int32_t *coeffs = s->matrix_coeff[mat];
+ int index = s->num_primitive_matrices - mat;
+ int index2 = 2 * index + 1;
+
+ /* TODO: DSPContext? */
+
+ for (i = 0; i < s->blockpos; i++) {
+ int32_t bypassed_lsb = m->bypassed_lsbs[i][mat];
+ int32_t *samples = m->sample_buffer[i];
+ int64_t accum = 0;
+
+ for (src_ch = 0; src_ch <= maxchan; src_ch++)
+ accum += (int64_t) samples[src_ch] * coeffs[src_ch];
+
+ if (matrix_noise_shift) {
+ index &= m->access_unit_size_pow2 - 1;
+ accum += m->noise_buffer[index] << (matrix_noise_shift + 7);
+ index += index2;
+ }
+
+ samples[dest_ch] = ((accum >> 14) & mask) + bypassed_lsb;
+ }
+ }
+}
+
+/** Write the audio data into the output buffer. */
+
+static int output_data_internal(MLPDecodeContext *m, unsigned int substr,
+ uint8_t *data, unsigned int *data_size, int is32)
+{
+ SubStream *s = &m->substream[substr];
+ unsigned int i, out_ch = 0;
+ int32_t *data_32 = (int32_t*) data;
+ int16_t *data_16 = (int16_t*) data;
+
+ if (*data_size < (s->max_channel + 1) * s->blockpos * (is32 ? 4 : 2))
+ return -1;
+
+ for (i = 0; i < s->blockpos; i++) {
+ for (out_ch = 0; out_ch <= s->max_matrix_channel; out_ch++) {
+ int mat_ch = s->ch_assign[out_ch];
+ int32_t sample = m->sample_buffer[i][mat_ch]
+ << s->output_shift[mat_ch];
+ s->lossless_check_data ^= (sample & 0xffffff) << mat_ch;
+ if (is32) *data_32++ = sample << 8;
+ else *data_16++ = sample >> 8;
+ }
+ }
+
+ *data_size = i * out_ch * (is32 ? 4 : 2);
+
+ return 0;
+}
+
+static int output_data(MLPDecodeContext *m, unsigned int substr,
+ uint8_t *data, unsigned int *data_size)
+{
+ if (m->avctx->sample_fmt == SAMPLE_FMT_S32)
+ return output_data_internal(m, substr, data, data_size, 1);
+ else
+ return output_data_internal(m, substr, data, data_size, 0);
+}
+
+
+/** Read an access unit from the stream.
+ * Returns < 0 on error, 0 if not enough data is present in the input stream
+ * otherwise returns the number of bytes consumed. */
+
+static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ MLPDecodeContext *m = avctx->priv_data;
+ GetBitContext gb;
+ unsigned int length, substr;
+ unsigned int substream_start;
+ unsigned int header_size = 4;
+ unsigned int substr_header_size = 0;
+ uint8_t substream_parity_present[MAX_SUBSTREAMS];
+ uint16_t substream_data_len[MAX_SUBSTREAMS];
+ uint8_t parity_bits;
+
+ if (buf_size < 4)
+ return 0;
+
+ length = (AV_RB16(buf) & 0xfff) * 2;
+
+ if (length < 4 || length > buf_size)
+ return -1;
+
+ init_get_bits(&gb, (buf + 4), (length - 4) * 8);
+
+ m->is_major_sync_unit = 0;
+ if (show_bits_long(&gb, 31) == (0xf8726fba >> 1)) {
+ if (read_major_sync(m, &gb) < 0)
+ goto error;
+ m->is_major_sync_unit = 1;
+ header_size += 28;
+ }
+
+ if (!m->params_valid) {
+ av_log(m->avctx, AV_LOG_WARNING,
+ "Stream parameters not seen; skipping frame.\n");
+ *data_size = 0;
+ return length;
+ }
+
+ substream_start = 0;
+
+ for (substr = 0; substr < m->num_substreams; substr++) {
+ int extraword_present, checkdata_present, end, nonrestart_substr;
+
+ extraword_present = get_bits1(&gb);
+ nonrestart_substr = get_bits1(&gb);
+ checkdata_present = get_bits1(&gb);
+ skip_bits1(&gb);
+
+ end = get_bits(&gb, 12) * 2;
+
+ substr_header_size += 2;
+
+ if (extraword_present) {
+ if (m->avctx->codec_id == CODEC_ID_MLP) {
+ av_log(m->avctx, AV_LOG_ERROR, "There must be no extraword for MLP.\n");
+ goto error;
+ }
+ skip_bits(&gb, 16);
+ substr_header_size += 2;
+ }
+
+ if (!(nonrestart_substr ^ m->is_major_sync_unit)) {
+ av_log(m->avctx, AV_LOG_ERROR, "Invalid nonrestart_substr.\n");
+ goto error;
+ }
+
+ if (end + header_size + substr_header_size > length) {
+ av_log(m->avctx, AV_LOG_ERROR,
+ "Indicated length of substream %d data goes off end of "
+ "packet.\n", substr);
+ end = length - header_size - substr_header_size;
+ }
+
+ if (end < substream_start) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Indicated end offset of substream %d data "
+ "is smaller than calculated start offset.\n",
+ substr);
+ goto error;
+ }
+
+ if (substr > m->max_decoded_substream)
+ continue;
+
+ substream_parity_present[substr] = checkdata_present;
+ substream_data_len[substr] = end - substream_start;
+ substream_start = end;
+ }
+
+ parity_bits = ff_mlp_calculate_parity(buf, 4);
+ parity_bits ^= ff_mlp_calculate_parity(buf + header_size, substr_header_size);
+
+ /* ffdshow custom code
+ if ((((parity_bits >> 4) ^ parity_bits) & 0xF) != 0xF) {
+ av_log(avctx, AV_LOG_ERROR, "Parity check failed.\n");
+ goto error;
+ }
+ */
+
+ buf += header_size + substr_header_size;
+
+ for (substr = 0; substr <= m->max_decoded_substream; substr++) {
+ SubStream *s = &m->substream[substr];
+ init_get_bits(&gb, buf, substream_data_len[substr] * 8);
+
+ m->matrix_changed = 0;
+ memset(m->filter_changed, 0, sizeof(m->filter_changed));
+
+ s->blockpos = 0;
+ do {
+ if (get_bits1(&gb)) {
+ if (get_bits1(&gb)) {
+ /* A restart header should be present. */
+ if (read_restart_header(m, &gb, buf, substr) < 0)
+ goto next_substr;
+ s->restart_seen = 1;
+ }
+
+ if (!s->restart_seen)
+ goto next_substr;
+ if (read_decoding_params(m, &gb, substr) < 0)
+ goto next_substr;
+ }
+
+ if (!s->restart_seen)
+ goto next_substr;
+
+ if (read_block_data(m, &gb, substr) < 0)
+ return -1;
+
+ if (get_bits_count(&gb) >= substream_data_len[substr] * 8)
+ goto substream_length_mismatch;
+
+ } while (!get_bits1(&gb));
+
+ skip_bits(&gb, (-get_bits_count(&gb)) & 15);
+
+ if (substream_data_len[substr] * 8 - get_bits_count(&gb) >= 32) {
+ int shorten_by;
+
+ if (get_bits(&gb, 16) != 0xD234)
+ return -1;
+
+ shorten_by = get_bits(&gb, 16);
+ if (m->avctx->codec_id == CODEC_ID_TRUEHD && shorten_by & 0x2000)
+ s->blockpos -= FFMIN(shorten_by & 0x1FFF, s->blockpos);
+ else if (m->avctx->codec_id == CODEC_ID_MLP && shorten_by != 0xD234)
+ return -1;
+
+ if (substr == m->max_decoded_substream)
+ av_log(m->avctx, AV_LOG_INFO, "End of stream indicated.\n");
+ }
+
+ if (substream_parity_present[substr]) {
+ uint8_t parity, checksum;
+
+ if (substream_data_len[substr] * 8 - get_bits_count(&gb) != 16)
+ goto substream_length_mismatch;
+
+ parity = ff_mlp_calculate_parity(buf, substream_data_len[substr] - 2);
+ checksum = ff_mlp_checksum8 (buf, substream_data_len[substr] - 2);
+
+ if ((get_bits(&gb, 8) ^ parity) != 0xa9 )
+ av_log(m->avctx, AV_LOG_ERROR, "Substream %d parity check failed.\n", substr);
+ if ( get_bits(&gb, 8) != checksum)
+ av_log(m->avctx, AV_LOG_ERROR, "Substream %d checksum failed.\n" , substr);
+ }
+
+ if (substream_data_len[substr] * 8 != get_bits_count(&gb))
+ goto substream_length_mismatch;
+
+next_substr:
+ if (!s->restart_seen)
+ av_log(m->avctx, AV_LOG_ERROR,
+ "No restart header present in substream %d.\n", substr);
+
+ buf += substream_data_len[substr];
+ }
+
+ rematrix_channels(m, m->max_decoded_substream);
+
+ if (output_data(m, m->max_decoded_substream, data, data_size) < 0)
+ return -1;
+
+ return length;
+
+substream_length_mismatch:
+ av_log(m->avctx, AV_LOG_ERROR, "substream %d length mismatch\n", substr);
+ return -1;
+
+error:
+ m->params_valid = 0;
+ return -1;
+}
+
+AVCodec mlp_decoder = {
+ "mlp",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_MLP,
+ sizeof(MLPDecodeContext),
+ /*.init = */mlp_decode_init,
+ /*.encode = */NULL,
+ /*.close = */NULL,
+ /*.decode = */read_access_unit,
+ /*.capabilities = */0,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("MLP (Meridian Lossless Packing)"),
+};
+
+#if CONFIG_TRUEHD_DECODER
+AVCodec truehd_decoder = {
+ "truehd",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_TRUEHD,
+ sizeof(MLPDecodeContext),
+ /*.init = */mlp_decode_init,
+ /*.encode = */NULL,
+ /*.close = */NULL,
+ /*.decode = */read_access_unit,
+ /*.capabilities = */0,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("TrueHD"),
+};
+#endif /* CONFIG_TRUEHD_DECODER */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlpdsp.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlpdsp.c
new file mode 100644
index 000000000..129a01187
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mlpdsp.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2007-2008 Ian Caulfield
+ * 2009 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/mlp.h"
+#include "dsputil.h"
+
+static void ff_mlp_filter_channel(int32_t *state, const int32_t *coeff,
+ int firorder, int iirorder,
+ unsigned int filter_shift, int32_t mask, int blocksize,
+ int32_t *sample_buffer)
+{
+ int32_t *firbuf = state;
+ int32_t *iirbuf = state + MAX_BLOCKSIZE + MAX_FIR_ORDER;
+ const int32_t *fircoeff = coeff;
+ const int32_t *iircoeff = coeff + MAX_FIR_ORDER;
+ int i;
+
+ for (i = 0; i < blocksize; i++) {
+ int32_t residual = *sample_buffer;
+ unsigned int order;
+ int64_t accum = 0;
+ int32_t result;
+
+ for (order = 0; order < firorder; order++)
+ accum += (int64_t) firbuf[order] * fircoeff[order];
+ for (order = 0; order < iirorder; order++)
+ accum += (int64_t) iirbuf[order] * iircoeff[order];
+
+ accum = accum >> filter_shift;
+ result = (accum + residual) & mask;
+
+ *--firbuf = result;
+ *--iirbuf = result - accum;
+
+ *sample_buffer = result;
+ sample_buffer += MAX_CHANNELS;
+ }
+}
+
+void ff_mlp_init_x86(DSPContext* c, AVCodecContext *avctx);
+
+void ff_mlp_init(DSPContext* c, AVCodecContext *avctx)
+{
+ c->mlp_filter_channel = ff_mlp_filter_channel;
+#if HAVE_MMX
+ if (ARCH_X86)
+ ff_mlp_init_x86(c, avctx);
+#endif
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpc_helper.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpc_helper.c
new file mode 100644
index 000000000..e11a54606
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpc_helper.c
@@ -0,0 +1,93 @@
+/*
+ * This file is part of Media Player Classic HomeCinema.
+ *
+ * MPC-HC is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * MPC-HC is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+#include "ac3.h"
+#include "ac3_parser.h"
+#include "ac3dec.h"
+#include "ac3dec_data.h"
+
+#ifdef __GNUC__
+#define _aligned_malloc __mingw_aligned_malloc
+#define _aligned_realloc __mingw_aligned_realloc
+#define _aligned_free __mingw_aligned_free
+#endif
+
+int FFGetChannelMap(struct AVCodecContext * avctx)
+{
+ switch (avctx->codec_id)
+ {
+ case CODEC_ID_EAC3 :
+ case CODEC_ID_AC3 :
+ {
+ AC3DecodeContext *s = avctx->priv_data;
+
+ // Mapping index for s_scmap_ac3
+ switch (s->channel_mode)
+ {
+ case AC3_CHMODE_DUALMONO: return 0;
+ case AC3_CHMODE_MONO : return 1;
+ case AC3_CHMODE_STEREO : return 2;
+ case AC3_CHMODE_3F : return 3;
+ case AC3_CHMODE_2F1R : return 4;
+ case AC3_CHMODE_3F1R : return 5;
+ case AC3_CHMODE_2F2R : return 6;
+ case AC3_CHMODE_3F2R : return (s->lfe_on ? 8 : 7);
+ }
+ }
+ break;
+ case CODEC_ID_MLP :
+ {
+ // Mapping index for s_scmap_lpcm
+ if (avctx->channels<=8)
+ return avctx->channels-1;
+ else
+ return -1;
+ }
+ default :
+ return 2;
+ }
+ return -1;
+}
+
+
+void* FF_aligned_malloc(size_t size, size_t alignment)
+{
+ return _aligned_malloc(size,alignment);
+}
+
+void FF_aligned_free(void* mem_ptr)
+{
+ if (mem_ptr)
+ _aligned_free(mem_ptr);
+}
+
+void* FF_aligned_realloc(void *ptr,size_t size,size_t alignment)
+{
+ if (!ptr)
+ return FF_aligned_malloc(size,alignment);
+ else
+ if (size==0)
+ {
+ FF_aligned_free(ptr);
+ return NULL;
+ }
+ else
+ return _aligned_realloc(ptr,size,alignment);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12.c
new file mode 100644
index 000000000..9837ffcb4
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12.c
@@ -0,0 +1,2527 @@
+/*
+ * MPEG-1/2 decoder
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/mpeg12.c
+ * MPEG-1/2 decoder
+ */
+
+//#define DEBUG
+#include "internal.h"
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+
+#include "mpeg12.h"
+#include "mpeg12data.h"
+#include "mpeg12decdata.h"
+#include "bytestream.h"
+
+// ==> Start patch MPC
+#include <windows.h>
+#include <dxva.h>
+// <== End patch MPC
+
+//#undef NDEBUG
+//#include <assert.h>
+
+
+#define MV_VLC_BITS 9
+#define MBINCR_VLC_BITS 9
+#define MB_PAT_VLC_BITS 9
+#define MB_PTYPE_VLC_BITS 6
+#define MB_BTYPE_VLC_BITS 6
+
+static inline int mpeg1_decode_block_intra(MpegEncContext *s,
+ DCTELEM *block,
+ int n);
+static inline int mpeg1_decode_block_inter(MpegEncContext *s,
+ DCTELEM *block,
+ int n);
+static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n);
+static inline int mpeg2_decode_block_non_intra(MpegEncContext *s,
+ DCTELEM *block,
+ int n);
+static inline int mpeg2_decode_block_intra(MpegEncContext *s,
+ DCTELEM *block,
+ int n);
+static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, DCTELEM *block, int n);
+static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n);
+static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred);
+static void exchange_uv(MpegEncContext *s);
+
+uint8_t ff_mpeg12_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
+
+
+#define INIT_2D_VLC_RL(rl, static_size)\
+{\
+ static RL_VLC_ELEM rl_vlc_table[static_size];\
+ INIT_VLC_STATIC(&rl.vlc, TEX_VLC_BITS, rl.n + 2,\
+ &rl.table_vlc[0][1], 4, 2,\
+ &rl.table_vlc[0][0], 4, 2, static_size);\
+\
+ rl.rl_vlc[0]= rl_vlc_table;\
+ init_2d_vlc_rl(&rl);\
+}
+
+static void init_2d_vlc_rl(RLTable *rl)
+{
+ int i;
+
+ for(i=0; i<rl->vlc.table_size; i++){
+ int code= rl->vlc.table[i][0];
+ int len = rl->vlc.table[i][1];
+ int level, run;
+
+ if(len==0){ // illegal code
+ run= 65;
+ level= MAX_LEVEL;
+ }else if(len<0){ //more bits needed
+ run= 0;
+ level= code;
+ }else{
+ if(code==rl->n){ //esc
+ run= 65;
+ level= 0;
+ }else if(code==rl->n+1){ //eob
+ run= 0;
+ level= 127;
+ }else{
+ run= rl->table_run [code] + 1;
+ level= rl->table_level[code];
+ }
+ }
+ rl->rl_vlc[0][i].len= len;
+ rl->rl_vlc[0][i].level= level;
+ rl->rl_vlc[0][i].run= run;
+ }
+}
+
+void ff_mpeg12_common_init(MpegEncContext *s)
+{
+
+ s->y_dc_scale_table=
+ s->c_dc_scale_table= mpeg2_dc_scale_table[s->intra_dc_precision];
+
+}
+
+void ff_mpeg1_clean_buffers(MpegEncContext *s){
+ s->last_dc[0] = 1 << (7 + s->intra_dc_precision);
+ s->last_dc[1] = s->last_dc[0];
+ s->last_dc[2] = s->last_dc[0];
+ memset(s->last_mv, 0, sizeof(s->last_mv));
+}
+
+
+/******************************************/
+/* decoding */
+
+static VLC mv_vlc;
+static VLC mbincr_vlc;
+static VLC mb_ptype_vlc;
+static VLC mb_btype_vlc;
+static VLC mb_pat_vlc;
+
+av_cold void ff_mpeg12_init_vlcs(void)
+{
+ static int done = 0;
+
+ if (!done) {
+ done = 1;
+
+ INIT_VLC_STATIC(&dc_lum_vlc, DC_VLC_BITS, 12,
+ ff_mpeg12_vlc_dc_lum_bits, 1, 1,
+ ff_mpeg12_vlc_dc_lum_code, 2, 2, 512);
+ INIT_VLC_STATIC(&dc_chroma_vlc, DC_VLC_BITS, 12,
+ ff_mpeg12_vlc_dc_chroma_bits, 1, 1,
+ ff_mpeg12_vlc_dc_chroma_code, 2, 2, 514);
+ INIT_VLC_STATIC(&mv_vlc, MV_VLC_BITS, 17,
+ &ff_mpeg12_mbMotionVectorTable[0][1], 2, 1,
+ &ff_mpeg12_mbMotionVectorTable[0][0], 2, 1, 518);
+ INIT_VLC_STATIC(&mbincr_vlc, MBINCR_VLC_BITS, 36,
+ &ff_mpeg12_mbAddrIncrTable[0][1], 2, 1,
+ &ff_mpeg12_mbAddrIncrTable[0][0], 2, 1, 538);
+ INIT_VLC_STATIC(&mb_pat_vlc, MB_PAT_VLC_BITS, 64,
+ &ff_mpeg12_mbPatTable[0][1], 2, 1,
+ &ff_mpeg12_mbPatTable[0][0], 2, 1, 512);
+
+ INIT_VLC_STATIC(&mb_ptype_vlc, MB_PTYPE_VLC_BITS, 7,
+ &table_mb_ptype[0][1], 2, 1,
+ &table_mb_ptype[0][0], 2, 1, 64);
+ INIT_VLC_STATIC(&mb_btype_vlc, MB_BTYPE_VLC_BITS, 11,
+ &table_mb_btype[0][1], 2, 1,
+ &table_mb_btype[0][0], 2, 1, 64);
+ init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
+ init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
+
+ INIT_2D_VLC_RL(ff_rl_mpeg1, 680);
+ INIT_2D_VLC_RL(ff_rl_mpeg2, 674);
+ }
+}
+
+static inline int get_dmv(MpegEncContext *s)
+{
+ if(get_bits1(&s->gb))
+ return 1 - (get_bits1(&s->gb) << 1);
+ else
+ return 0;
+}
+
+static inline int get_qscale(MpegEncContext *s)
+{
+ int qscale = get_bits(&s->gb, 5);
+ if (s->q_scale_type) {
+ return non_linear_qscale[qscale];
+ } else {
+ return qscale << 1;
+ }
+}
+
+/* motion type (for MPEG-2) */
+#define MT_FIELD 1
+#define MT_FRAME 2
+#define MT_16X8 2
+#define MT_DMV 3
+
+static int mpeg_decode_mb(MpegEncContext *s,
+ DCTELEM block[12][64])
+{
+ int i, j, k, cbp, val, mb_type, motion_type;
+ const int mb_block_count = 4 + (1<< s->chroma_format);
+
+ dprintf(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
+
+ assert(s->mb_skipped==0);
+
+ if (s->mb_skip_run-- != 0) {
+ if (s->pict_type == FF_P_TYPE) {
+ s->mb_skipped = 1;
+ s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
+ } else {
+ int mb_type;
+
+ if(s->mb_x)
+ mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1];
+ else
+ mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
+ if(IS_INTRA(mb_type))
+ return -1;
+
+ s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]=
+ mb_type | MB_TYPE_SKIP;
+// assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
+
+ if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0)
+ s->mb_skipped = 1;
+ }
+
+ return 0;
+ }
+
+ switch(s->pict_type) {
+ default:
+ case FF_I_TYPE:
+ if (get_bits1(&s->gb) == 0) {
+ if (get_bits1(&s->gb) == 0){
+ av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in I Frame at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
+ } else {
+ mb_type = MB_TYPE_INTRA;
+ }
+ break;
+ case FF_P_TYPE:
+ mb_type = get_vlc2(&s->gb, mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1);
+ if (mb_type < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ mb_type = ptype2mb_type[ mb_type ];
+ break;
+ case FF_B_TYPE:
+ mb_type = get_vlc2(&s->gb, mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
+ if (mb_type < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ mb_type = btype2mb_type[ mb_type ];
+ break;
+ }
+ dprintf(s->avctx, "mb_type=%x\n", mb_type);
+// motion_type = 0; /* avoid warning */
+ if (IS_INTRA(mb_type)) {
+ s->dsp.clear_blocks(s->block[0]);
+
+ if(!s->chroma_y_shift){
+ s->dsp.clear_blocks(s->block[6]);
+ }
+
+ /* compute DCT type */
+ if (s->picture_structure == PICT_FRAME && //FIXME add an interlaced_dct coded var?
+ !s->frame_pred_frame_dct) {
+ s->interlaced_dct = get_bits1(&s->gb);
+ }
+
+ if (IS_QUANT(mb_type))
+ s->qscale = get_qscale(s);
+
+ if (s->concealment_motion_vectors) {
+ /* just parse them */
+ if (s->picture_structure != PICT_FRAME)
+ skip_bits1(&s->gb); /* field select */
+
+ s->mv[0][0][0]= s->last_mv[0][0][0]= s->last_mv[0][1][0] =
+ mpeg_decode_motion(s, s->mpeg_f_code[0][0], s->last_mv[0][0][0]);
+ s->mv[0][0][1]= s->last_mv[0][0][1]= s->last_mv[0][1][1] =
+ mpeg_decode_motion(s, s->mpeg_f_code[0][1], s->last_mv[0][0][1]);
+
+ skip_bits1(&s->gb); /* marker */
+ }else
+ memset(s->last_mv, 0, sizeof(s->last_mv)); /* reset mv prediction */
+ s->mb_intra = 1;
+
+ if (s->codec_id == CODEC_ID_MPEG2VIDEO) {
+ if(s->flags2 & CODEC_FLAG2_FAST){
+ for(i=0;i<6;i++) {
+ mpeg2_fast_decode_block_intra(s, *s->pblocks[i], i);
+ }
+ }else{
+ for(i=0;i<mb_block_count;i++) {
+ if (mpeg2_decode_block_intra(s, *s->pblocks[i], i) < 0)
+ return -1;
+ }
+ }
+ } else {
+ for(i=0;i<6;i++) {
+ if (mpeg1_decode_block_intra(s, *s->pblocks[i], i) < 0)
+ return -1;
+ }
+ }
+ } else {
+ if (mb_type & MB_TYPE_ZERO_MV){
+ assert(mb_type & MB_TYPE_CBP);
+
+ s->mv_dir = MV_DIR_FORWARD;
+ if(s->picture_structure == PICT_FRAME){
+ if(!s->frame_pred_frame_dct)
+ s->interlaced_dct = get_bits1(&s->gb);
+ s->mv_type = MV_TYPE_16X16;
+ }else{
+ s->mv_type = MV_TYPE_FIELD;
+ mb_type |= MB_TYPE_INTERLACED;
+ s->field_select[0][0]= s->picture_structure - 1;
+ }
+
+ if (IS_QUANT(mb_type))
+ s->qscale = get_qscale(s);
+
+ s->last_mv[0][0][0] = 0;
+ s->last_mv[0][0][1] = 0;
+ s->last_mv[0][1][0] = 0;
+ s->last_mv[0][1][1] = 0;
+ s->mv[0][0][0] = 0;
+ s->mv[0][0][1] = 0;
+ }else{
+ assert(mb_type & MB_TYPE_L0L1);
+//FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
+ /* get additional motion vector type */
+ if (s->frame_pred_frame_dct)
+ motion_type = MT_FRAME;
+ else{
+ motion_type = get_bits(&s->gb, 2);
+ if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
+ s->interlaced_dct = get_bits1(&s->gb);
+ }
+
+ if (IS_QUANT(mb_type))
+ s->qscale = get_qscale(s);
+
+ /* motion vectors */
+ s->mv_dir= (mb_type>>13)&3;
+ dprintf(s->avctx, "motion_type=%d\n", motion_type);
+ switch(motion_type) {
+ case MT_FRAME: /* or MT_16X8 */
+ if (s->picture_structure == PICT_FRAME) {
+ mb_type |= MB_TYPE_16x16;
+ s->mv_type = MV_TYPE_16X16;
+ for(i=0;i<2;i++) {
+ if (USES_LIST(mb_type, i)) {
+ /* MT_FRAME */
+ s->mv[i][0][0]= s->last_mv[i][0][0]= s->last_mv[i][1][0] =
+ mpeg_decode_motion(s, s->mpeg_f_code[i][0], s->last_mv[i][0][0]);
+ s->mv[i][0][1]= s->last_mv[i][0][1]= s->last_mv[i][1][1] =
+ mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][0][1]);
+ /* full_pel: only for MPEG-1 */
+ if (s->full_pel[i]){
+ s->mv[i][0][0] <<= 1;
+ s->mv[i][0][1] <<= 1;
+ }
+ }
+ }
+ } else {
+ mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
+ s->mv_type = MV_TYPE_16X8;
+ for(i=0;i<2;i++) {
+ if (USES_LIST(mb_type, i)) {
+ /* MT_16X8 */
+ for(j=0;j<2;j++) {
+ s->field_select[i][j] = get_bits1(&s->gb);
+ for(k=0;k<2;k++) {
+ val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
+ s->last_mv[i][j][k]);
+ s->last_mv[i][j][k] = val;
+ s->mv[i][j][k] = val;
+ }
+ }
+ }
+ }
+ }
+ break;
+ case MT_FIELD:
+ s->mv_type = MV_TYPE_FIELD;
+ if (s->picture_structure == PICT_FRAME) {
+ mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
+ for(i=0;i<2;i++) {
+ if (USES_LIST(mb_type, i)) {
+ for(j=0;j<2;j++) {
+ s->field_select[i][j] = get_bits1(&s->gb);
+ val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
+ s->last_mv[i][j][0]);
+ s->last_mv[i][j][0] = val;
+ s->mv[i][j][0] = val;
+ dprintf(s->avctx, "fmx=%d\n", val);
+ val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
+ s->last_mv[i][j][1] >> 1);
+ s->last_mv[i][j][1] = val << 1;
+ s->mv[i][j][1] = val;
+ dprintf(s->avctx, "fmy=%d\n", val);
+ }
+ }
+ }
+ } else {
+ mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
+ for(i=0;i<2;i++) {
+ if (USES_LIST(mb_type, i)) {
+ s->field_select[i][0] = get_bits1(&s->gb);
+ for(k=0;k<2;k++) {
+ val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
+ s->last_mv[i][0][k]);
+ s->last_mv[i][0][k] = val;
+ s->last_mv[i][1][k] = val;
+ s->mv[i][0][k] = val;
+ }
+ }
+ }
+ }
+ break;
+ case MT_DMV:
+ s->mv_type = MV_TYPE_DMV;
+ for(i=0;i<2;i++) {
+ if (USES_LIST(mb_type, i)) {
+ int dmx, dmy, mx, my, m;
+ const int my_shift= s->picture_structure == PICT_FRAME;
+
+ mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
+ s->last_mv[i][0][0]);
+ s->last_mv[i][0][0] = mx;
+ s->last_mv[i][1][0] = mx;
+ dmx = get_dmv(s);
+ my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
+ s->last_mv[i][0][1] >> my_shift);
+ dmy = get_dmv(s);
+
+
+ s->last_mv[i][0][1] = my<<my_shift;
+ s->last_mv[i][1][1] = my<<my_shift;
+
+ s->mv[i][0][0] = mx;
+ s->mv[i][0][1] = my;
+ s->mv[i][1][0] = mx;//not used
+ s->mv[i][1][1] = my;//not used
+
+ if (s->picture_structure == PICT_FRAME) {
+ mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
+
+ //m = 1 + 2 * s->top_field_first;
+ m = s->top_field_first ? 1 : 3;
+
+ /* top -> top pred */
+ s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
+ s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
+ m = 4 - m;
+ s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
+ s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
+ } else {
+ mb_type |= MB_TYPE_16x16;
+
+ s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
+ s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
+ if(s->picture_structure == PICT_TOP_FIELD)
+ s->mv[i][2][1]--;
+ else
+ s->mv[i][2][1]++;
+ }
+ }
+ }
+ break;
+ default:
+ av_log(s->avctx, AV_LOG_ERROR, "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ }
+
+ s->mb_intra = 0;
+ if (HAS_CBP(mb_type)) {
+ s->dsp.clear_blocks(s->block[0]);
+
+ cbp = get_vlc2(&s->gb, mb_pat_vlc.table, MB_PAT_VLC_BITS, 1);
+ if(mb_block_count > 6){
+ cbp<<= mb_block_count-6;
+ cbp |= get_bits(&s->gb, mb_block_count-6);
+ s->dsp.clear_blocks(s->block[6]);
+ }
+ if (cbp <= 0){
+ av_log(s->avctx, AV_LOG_ERROR, "invalid cbp at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ if (s->codec_id == CODEC_ID_MPEG2VIDEO) {
+ if(s->flags2 & CODEC_FLAG2_FAST){
+ for(i=0;i<6;i++) {
+ if(cbp & 32) {
+ mpeg2_fast_decode_block_non_intra(s, *s->pblocks[i], i);
+ } else {
+ s->block_last_index[i] = -1;
+ }
+ cbp+=cbp;
+ }
+ }else{
+ cbp<<= 12-mb_block_count;
+
+ for(i=0;i<mb_block_count;i++) {
+ if ( cbp & (1<<11) ) {
+ if (mpeg2_decode_block_non_intra(s, *s->pblocks[i], i) < 0)
+ return -1;
+ } else {
+ s->block_last_index[i] = -1;
+ }
+ cbp+=cbp;
+ }
+ }
+ } else {
+ if(s->flags2 & CODEC_FLAG2_FAST){
+ for(i=0;i<6;i++) {
+ if (cbp & 32) {
+ mpeg1_fast_decode_block_inter(s, *s->pblocks[i], i);
+ } else {
+ s->block_last_index[i] = -1;
+ }
+ cbp+=cbp;
+ }
+ }else{
+ for(i=0;i<6;i++) {
+ if (cbp & 32) {
+ if (mpeg1_decode_block_inter(s, *s->pblocks[i], i) < 0)
+ return -1;
+ } else {
+ s->block_last_index[i] = -1;
+ }
+ cbp+=cbp;
+ }
+ }
+ }
+ }else{
+ for(i=0;i<12;i++)
+ s->block_last_index[i] = -1;
+ }
+ }
+
+ s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type;
+
+ return 0;
+}
+
+/* as H.263, but only 17 codes */
+static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
+{
+ int code, sign, val, l, shift;
+
+ code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
+ if (code == 0) {
+ return pred;
+ }
+ if (code < 0) {
+ return 0xffff;
+ }
+
+ sign = get_bits1(&s->gb);
+ shift = fcode - 1;
+ val = code;
+ if (shift) {
+ val = (val - 1) << shift;
+ val |= get_bits(&s->gb, shift);
+ val++;
+ }
+ if (sign)
+ val = -val;
+ val += pred;
+
+ /* modulo decoding */
+ l= INT_BIT - 5 - shift;
+ val = (val<<l)>>l;
+ return val;
+}
+
+static inline int mpeg1_decode_block_intra(MpegEncContext *s,
+ DCTELEM *block,
+ int n)
+{
+ int level, dc, diff, i, j, run;
+ int component;
+ RLTable *rl = &ff_rl_mpeg1;
+ uint8_t * const scantable= s->intra_scantable.permutated;
+ const uint16_t *quant_matrix= s->intra_matrix;
+ const int qscale= s->qscale;
+
+ /* DC coefficient */
+ component = (n <= 3 ? 0 : n - 4 + 1);
+ diff = decode_dc(&s->gb, component);
+ if (diff >= 0xffff)
+ return -1;
+ dc = s->last_dc[component];
+ dc += diff;
+ s->last_dc[component] = dc;
+ block[0] = dc*quant_matrix[0];
+ dprintf(s->avctx, "dc=%d diff=%d\n", dc, diff);
+ i = 0;
+ {
+ OPEN_READER(re, &s->gb);
+ /* now quantify & encode AC coefficients */
+ for(;;) {
+ UPDATE_CACHE(re, &s->gb);
+ GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
+
+ if(level == 127){
+ break;
+ } else if(level != 0) {
+ i += run;
+ j = scantable[i];
+ level= (level*qscale*quant_matrix[j])>>4;
+ level= (level-1)|1;
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ LAST_SKIP_BITS(re, &s->gb, 1);
+ } else {
+ /* escape */
+ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
+ UPDATE_CACHE(re, &s->gb);
+ level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8);
+ if (level == -128) {
+ level = SHOW_UBITS(re, &s->gb, 8) - 256; LAST_SKIP_BITS(re, &s->gb, 8);
+ } else if (level == 0) {
+ level = SHOW_UBITS(re, &s->gb, 8) ; LAST_SKIP_BITS(re, &s->gb, 8);
+ }
+ i += run;
+ j = scantable[i];
+ if(level<0){
+ level= -level;
+ level= (level*qscale*quant_matrix[j])>>4;
+ level= (level-1)|1;
+ level= -level;
+ }else{
+ level= (level*qscale*quant_matrix[j])>>4;
+ level= (level-1)|1;
+ }
+ }
+ if (i > 63){
+ av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ block[j] = level;
+ }
+ CLOSE_READER(re, &s->gb);
+ }
+ s->block_last_index[n] = i;
+ return 0;
+}
+
+int ff_mpeg1_decode_block_intra(MpegEncContext *s,
+ DCTELEM *block,
+ int n)
+{
+ return mpeg1_decode_block_intra(s, block, n);
+}
+
+static inline int mpeg1_decode_block_inter(MpegEncContext *s,
+ DCTELEM *block,
+ int n)
+{
+ int level, i, j, run;
+ RLTable *rl = &ff_rl_mpeg1;
+ uint8_t * const scantable= s->intra_scantable.permutated;
+ const uint16_t *quant_matrix= s->inter_matrix;
+ const int qscale= s->qscale;
+
+ {
+ OPEN_READER(re, &s->gb);
+ i = -1;
+ // special case for first coefficient, no need to add second VLC table
+ UPDATE_CACHE(re, &s->gb);
+ if (((int32_t)GET_CACHE(re, &s->gb)) < 0) {
+ level= (3*qscale*quant_matrix[0])>>5;
+ level= (level-1)|1;
+ if(GET_CACHE(re, &s->gb)&0x40000000)
+ level= -level;
+ block[0] = level;
+ i++;
+ SKIP_BITS(re, &s->gb, 2);
+ if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
+ goto end;
+ }
+
+ /* now quantify & encode AC coefficients */
+ for(;;) {
+ GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
+
+ if(level != 0) {
+ i += run;
+ j = scantable[i];
+ level= ((level*2+1)*qscale*quant_matrix[j])>>5;
+ level= (level-1)|1;
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ SKIP_BITS(re, &s->gb, 1);
+ } else {
+ /* escape */
+ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
+ UPDATE_CACHE(re, &s->gb);
+ level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8);
+ if (level == -128) {
+ level = SHOW_UBITS(re, &s->gb, 8) - 256; SKIP_BITS(re, &s->gb, 8);
+ } else if (level == 0) {
+ level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8);
+ }
+ i += run;
+ j = scantable[i];
+ if(level<0){
+ level= -level;
+ level= ((level*2+1)*qscale*quant_matrix[j])>>5;
+ level= (level-1)|1;
+ level= -level;
+ }else{
+ level= ((level*2+1)*qscale*quant_matrix[j])>>5;
+ level= (level-1)|1;
+ }
+ }
+ if (i > 63){
+ av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ block[j] = level;
+ if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
+ break;
+ UPDATE_CACHE(re, &s->gb);
+ }
+end:
+ LAST_SKIP_BITS(re, &s->gb, 2);
+ CLOSE_READER(re, &s->gb);
+ }
+ s->block_last_index[n] = i;
+ return 0;
+}
+
+static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n)
+{
+ int level, i, j, run;
+ RLTable *rl = &ff_rl_mpeg1;
+ uint8_t * const scantable= s->intra_scantable.permutated;
+ const int qscale= s->qscale;
+
+ {
+ OPEN_READER(re, &s->gb);
+ i = -1;
+ // special case for first coefficient, no need to add second VLC table
+ UPDATE_CACHE(re, &s->gb);
+ if (((int32_t)GET_CACHE(re, &s->gb)) < 0) {
+ level= (3*qscale)>>1;
+ level= (level-1)|1;
+ if(GET_CACHE(re, &s->gb)&0x40000000)
+ level= -level;
+ block[0] = level;
+ i++;
+ SKIP_BITS(re, &s->gb, 2);
+ if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
+ goto end;
+ }
+
+ /* now quantify & encode AC coefficients */
+ for(;;) {
+ GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
+
+ if(level != 0) {
+ i += run;
+ j = scantable[i];
+ level= ((level*2+1)*qscale)>>1;
+ level= (level-1)|1;
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ SKIP_BITS(re, &s->gb, 1);
+ } else {
+ /* escape */
+ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
+ UPDATE_CACHE(re, &s->gb);
+ level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8);
+ if (level == -128) {
+ level = SHOW_UBITS(re, &s->gb, 8) - 256; SKIP_BITS(re, &s->gb, 8);
+ } else if (level == 0) {
+ level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8);
+ }
+ i += run;
+ j = scantable[i];
+ if(level<0){
+ level= -level;
+ level= ((level*2+1)*qscale)>>1;
+ level= (level-1)|1;
+ level= -level;
+ }else{
+ level= ((level*2+1)*qscale)>>1;
+ level= (level-1)|1;
+ }
+ }
+
+ block[j] = level;
+ if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
+ break;
+ UPDATE_CACHE(re, &s->gb);
+ }
+end:
+ LAST_SKIP_BITS(re, &s->gb, 2);
+ CLOSE_READER(re, &s->gb);
+ }
+ s->block_last_index[n] = i;
+ return 0;
+}
+
+
+static inline int mpeg2_decode_block_non_intra(MpegEncContext *s,
+ DCTELEM *block,
+ int n)
+{
+ int level, i, j, run;
+ RLTable *rl = &ff_rl_mpeg1;
+ uint8_t * const scantable= s->intra_scantable.permutated;
+ const uint16_t *quant_matrix;
+ const int qscale= s->qscale;
+ int mismatch;
+
+ mismatch = 1;
+
+ {
+ OPEN_READER(re, &s->gb);
+ i = -1;
+ if (n < 4)
+ quant_matrix = s->inter_matrix;
+ else
+ quant_matrix = s->chroma_inter_matrix;
+
+ // special case for first coefficient, no need to add second VLC table
+ UPDATE_CACHE(re, &s->gb);
+ if (((int32_t)GET_CACHE(re, &s->gb)) < 0) {
+ level= (3*qscale*quant_matrix[0])>>5;
+ if(GET_CACHE(re, &s->gb)&0x40000000)
+ level= -level;
+ block[0] = level;
+ mismatch ^= level;
+ i++;
+ SKIP_BITS(re, &s->gb, 2);
+ if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
+ goto end;
+ }
+
+ /* now quantify & encode AC coefficients */
+ for(;;) {
+ GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
+
+ if(level != 0) {
+ i += run;
+ j = scantable[i];
+ level= ((level*2+1)*qscale*quant_matrix[j])>>5;
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ SKIP_BITS(re, &s->gb, 1);
+ } else {
+ /* escape */
+ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
+ UPDATE_CACHE(re, &s->gb);
+ level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
+
+ i += run;
+ j = scantable[i];
+ if(level<0){
+ level= ((-level*2+1)*qscale*quant_matrix[j])>>5;
+ level= -level;
+ }else{
+ level= ((level*2+1)*qscale*quant_matrix[j])>>5;
+ }
+ }
+ if (i > 63){
+ av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ mismatch ^= level;
+ block[j] = level;
+ if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
+ break;
+ UPDATE_CACHE(re, &s->gb);
+ }
+end:
+ LAST_SKIP_BITS(re, &s->gb, 2);
+ CLOSE_READER(re, &s->gb);
+ }
+ block[63] ^= (mismatch & 1);
+
+ s->block_last_index[n] = i;
+ return 0;
+}
+
+static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s,
+ DCTELEM *block,
+ int n)
+{
+ int level, i, j, run;
+ RLTable *rl = &ff_rl_mpeg1;
+ uint8_t * const scantable= s->intra_scantable.permutated;
+ const int qscale= s->qscale;
+ OPEN_READER(re, &s->gb);
+ i = -1;
+
+ // special case for first coefficient, no need to add second VLC table
+ UPDATE_CACHE(re, &s->gb);
+ if (((int32_t)GET_CACHE(re, &s->gb)) < 0) {
+ level= (3*qscale)>>1;
+ if(GET_CACHE(re, &s->gb)&0x40000000)
+ level= -level;
+ block[0] = level;
+ i++;
+ SKIP_BITS(re, &s->gb, 2);
+ if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
+ goto end;
+ }
+
+ /* now quantify & encode AC coefficients */
+ for(;;) {
+ GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
+
+ if(level != 0) {
+ i += run;
+ j = scantable[i];
+ level= ((level*2+1)*qscale)>>1;
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ SKIP_BITS(re, &s->gb, 1);
+ } else {
+ /* escape */
+ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
+ UPDATE_CACHE(re, &s->gb);
+ level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
+
+ i += run;
+ j = scantable[i];
+ if(level<0){
+ level= ((-level*2+1)*qscale)>>1;
+ level= -level;
+ }else{
+ level= ((level*2+1)*qscale)>>1;
+ }
+ }
+
+ block[j] = level;
+ if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
+ break;
+ UPDATE_CACHE(re, &s->gb);
+ }
+end:
+ LAST_SKIP_BITS(re, &s->gb, 2);
+ CLOSE_READER(re, &s->gb);
+ s->block_last_index[n] = i;
+ return 0;
+}
+
+
+static inline int mpeg2_decode_block_intra(MpegEncContext *s,
+ DCTELEM *block,
+ int n)
+{
+ int level, dc, diff, i, j, run;
+ int component;
+ RLTable *rl;
+ uint8_t * const scantable= s->intra_scantable.permutated;
+ const uint16_t *quant_matrix;
+ const int qscale= s->qscale;
+ int mismatch;
+
+ /* DC coefficient */
+ if (n < 4){
+ quant_matrix = s->intra_matrix;
+ component = 0;
+ }else{
+ quant_matrix = s->chroma_intra_matrix;
+ component = (n&1) + 1;
+ }
+ diff = decode_dc(&s->gb, component);
+ if (diff >= 0xffff)
+ return -1;
+ dc = s->last_dc[component];
+ dc += diff;
+ s->last_dc[component] = dc;
+ block[0] = dc << (3 - s->intra_dc_precision);
+ dprintf(s->avctx, "dc=%d\n", block[0]);
+ mismatch = block[0] ^ 1;
+ i = 0;
+ if (s->intra_vlc_format)
+ rl = &ff_rl_mpeg2;
+ else
+ rl = &ff_rl_mpeg1;
+
+ {
+ OPEN_READER(re, &s->gb);
+ /* now quantify & encode AC coefficients */
+ for(;;) {
+ UPDATE_CACHE(re, &s->gb);
+ GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
+
+ if(level == 127){
+ break;
+ } else if(level != 0) {
+ i += run;
+ j = scantable[i];
+ level= (level*qscale*quant_matrix[j])>>4;
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ LAST_SKIP_BITS(re, &s->gb, 1);
+ } else {
+ /* escape */
+ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
+ UPDATE_CACHE(re, &s->gb);
+ level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
+ i += run;
+ j = scantable[i];
+ if(level<0){
+ level= (-level*qscale*quant_matrix[j])>>4;
+ level= -level;
+ }else{
+ level= (level*qscale*quant_matrix[j])>>4;
+ }
+ }
+ if (i > 63){
+ av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ mismatch^= level;
+ block[j] = level;
+ }
+ CLOSE_READER(re, &s->gb);
+ }
+ block[63]^= mismatch&1;
+
+ s->block_last_index[n] = i;
+ return 0;
+}
+
+static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s,
+ DCTELEM *block,
+ int n)
+{
+ int level, dc, diff, j, run;
+ int component;
+ RLTable *rl;
+ uint8_t * scantable= s->intra_scantable.permutated;
+ const uint16_t *quant_matrix;
+ const int qscale= s->qscale;
+
+ /* DC coefficient */
+ if (n < 4){
+ quant_matrix = s->intra_matrix;
+ component = 0;
+ }else{
+ quant_matrix = s->chroma_intra_matrix;
+ component = (n&1) + 1;
+ }
+ diff = decode_dc(&s->gb, component);
+ if (diff >= 0xffff)
+ return -1;
+ dc = s->last_dc[component];
+ dc += diff;
+ s->last_dc[component] = dc;
+ block[0] = dc << (3 - s->intra_dc_precision);
+ if (s->intra_vlc_format)
+ rl = &ff_rl_mpeg2;
+ else
+ rl = &ff_rl_mpeg1;
+
+ {
+ OPEN_READER(re, &s->gb);
+ /* now quantify & encode AC coefficients */
+ for(;;) {
+ UPDATE_CACHE(re, &s->gb);
+ GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
+
+ if(level == 127){
+ break;
+ } else if(level != 0) {
+ scantable += run;
+ j = *scantable;
+ level= (level*qscale*quant_matrix[j])>>4;
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ LAST_SKIP_BITS(re, &s->gb, 1);
+ } else {
+ /* escape */
+ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
+ UPDATE_CACHE(re, &s->gb);
+ level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
+ scantable += run;
+ j = *scantable;
+ if(level<0){
+ level= (-level*qscale*quant_matrix[j])>>4;
+ level= -level;
+ }else{
+ level= (level*qscale*quant_matrix[j])>>4;
+ }
+ }
+
+ block[j] = level;
+ }
+ CLOSE_READER(re, &s->gb);
+ }
+
+ s->block_last_index[n] = scantable - s->intra_scantable.permutated;
+ return 0;
+}
+
+typedef struct Mpeg1Context {
+ MpegEncContext mpeg_enc_ctx;
+ int mpeg_enc_ctx_allocated; /* true if decoding context allocated */
+ int repeat_field; /* true if we must repeat the field */
+ AVPanScan pan_scan; /** some temporary storage for the panscan */
+ int slice_count;
+ int swap_uv;//indicate VCR2
+ int save_aspect_info;
+ int save_width, save_height, save_progressive_seq;
+ AVRational frame_rate_ext; ///< MPEG-2 specific framerate modificator
+ int sync; ///< Did we reach a sync point like a GOP/SEQ/KEYFrame?
+ // ==> Start patch MPC
+ DXVA_SliceInfo* pSliceInfo;
+ uint8_t* prev_slice;
+ // <== End patch MPC
+} Mpeg1Context;
+
+static av_cold int mpeg_decode_init(AVCodecContext *avctx)
+{
+ Mpeg1Context *s = avctx->priv_data;
+ MpegEncContext *s2 = &s->mpeg_enc_ctx;
+ int i;
+
+ /* we need some permutation to store matrices,
+ * until MPV_common_init() sets the real permutation. */
+ for(i=0;i<64;i++)
+ s2->dsp.idct_permutation[i]=i;
+
+ MPV_decode_defaults(s2);
+
+ s->mpeg_enc_ctx.avctx= avctx;
+ s->mpeg_enc_ctx.flags= avctx->flags;
+ s->mpeg_enc_ctx.flags2= avctx->flags2;
+ ff_mpeg12_common_init(&s->mpeg_enc_ctx);
+ ff_mpeg12_init_vlcs();
+
+ s->mpeg_enc_ctx_allocated = 0;
+ s->mpeg_enc_ctx.picture_number = 0;
+ s->repeat_field = 0;
+ s->mpeg_enc_ctx.codec_id= avctx->codec->id;
+ avctx->color_range= AVCOL_RANGE_MPEG;
+ if (avctx->codec->id == CODEC_ID_MPEG1VIDEO)
+ avctx->chroma_sample_location = AVCHROMA_LOC_CENTER;
+ else
+ avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
+ return 0;
+}
+
+static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm,
+ const uint8_t *new_perm){
+ uint16_t temp_matrix[64];
+ int i;
+
+ memcpy(temp_matrix,matrix,64*sizeof(uint16_t));
+
+ for(i=0;i<64;i++){
+ matrix[new_perm[i]] = temp_matrix[old_perm[i]];
+ }
+}
+
+static enum PixelFormat mpeg_get_pixelformat(AVCodecContext *avctx){
+ Mpeg1Context *s1 = avctx->priv_data;
+ MpegEncContext *s = &s1->mpeg_enc_ctx;
+
+ if(s->chroma_format < 2)
+ return PIX_FMT_YUV420P;
+ else if(s->chroma_format == 2)
+ return PIX_FMT_YUV422P;
+ else
+ return PIX_FMT_YUV444P;
+}
+
+/* Call this function when we know all parameters.
+ * It may be called in different places for MPEG-1 and MPEG-2. */
+static int mpeg_decode_postinit(AVCodecContext *avctx){
+ Mpeg1Context *s1 = avctx->priv_data;
+ MpegEncContext *s = &s1->mpeg_enc_ctx;
+ uint8_t old_permutation[64];
+
+ if (
+ (s1->mpeg_enc_ctx_allocated == 0)||
+ avctx->coded_width != s->width ||
+ avctx->coded_height != s->height||
+ s1->save_width != s->width ||
+ s1->save_height != s->height ||
+ s1->save_aspect_info != s->aspect_ratio_info||
+ s1->save_progressive_seq != s->progressive_sequence ||
+ 0)
+ {
+
+ if (s1->mpeg_enc_ctx_allocated) {
+ ParseContext pc= s->parse_context;
+ s->parse_context.buffer=0;
+ MPV_common_end(s);
+ s->parse_context= pc;
+ }
+
+ if( (s->width == 0 )||(s->height == 0))
+ return -2;
+
+ avcodec_set_dimensions(avctx, s->width, s->height);
+ avctx->bit_rate = s->bit_rate;
+ s1->save_aspect_info = s->aspect_ratio_info;
+ s1->save_width = s->width;
+ s1->save_height = s->height;
+ s1->save_progressive_seq = s->progressive_sequence;
+
+ /* low_delay may be forced, in this case we will have B-frames
+ * that behave like P-frames. */
+ avctx->has_b_frames = !(s->low_delay);
+
+ assert((avctx->sub_id==1) == (avctx->codec_id==CODEC_ID_MPEG1VIDEO));
+ if(avctx->codec_id==CODEC_ID_MPEG1VIDEO){
+ //MPEG-1 fps
+ avctx->time_base.den= ff_frame_rate_tab[s->frame_rate_index].num;
+ avctx->time_base.num= ff_frame_rate_tab[s->frame_rate_index].den;
+ //MPEG-1 aspect
+ avctx->sample_aspect_ratio= av_d2q(
+ 1.0/ff_mpeg1_aspect[s->aspect_ratio_info], 255);
+
+ }else{//MPEG-2
+#if 0 // ffdshow custom code (move this block to mpeg_decode_sequence_extension)
+ //MPEG-2 fps
+ av_reduce(
+ &s->avctx->time_base.den,
+ &s->avctx->time_base.num,
+ ff_frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num,
+ ff_frame_rate_tab[s->frame_rate_index].den * s1->frame_rate_ext.den,
+ 1<<30);
+#endif
+ //MPEG-2 aspect
+ if(s->aspect_ratio_info > 1){
+ //we ignore the spec here as reality does not match the spec, see for example
+ // res_change_ffmpeg_aspect.ts and sequence-display-aspect.mpg
+
+ /* ffdshow custom code begin
+ * We do not want to ignore spec.
+ * However, there are considerable number of videos that are encoded in a wrong way.
+ * We set the spec compliant value in sample_aspect_ratio and
+ * wrong spec-value in sample_aspect_ratio2.
+ * Let ffdshow guess which is likely.
+ */
+ AVRational r1={s->width, s->height};
+ AVRational r2={s1->pan_scan.width, s1->pan_scan.height};
+ if( (s1->pan_scan.width == 0 )||(s1->pan_scan.height == 0)){
+ s->avctx->sample_aspect_ratio=
+ s->avctx->sample_aspect_ratio2=
+ av_div_q(
+ ff_mpeg2_aspect[s->aspect_ratio_info],
+ r1
+ );
+ }else{
+ s->avctx->sample_aspect_ratio=
+ av_div_q(
+ ff_mpeg2_aspect[s->aspect_ratio_info],
+ r2
+ );
+ s->avctx->sample_aspect_ratio2=
+ // Dity workaround
+ // This is wrong, but often used. Let ffdshow chose either r2 or r1.
+ av_div_q(
+ ff_mpeg2_aspect[s->aspect_ratio_info],
+ r1
+ );
+ }
+ /* ffdshow custom code end */
+ }else{
+ s->avctx->sample_aspect_ratio=
+ ff_mpeg2_aspect[s->aspect_ratio_info];
+ }
+ }//MPEG-2
+
+ avctx->pix_fmt = mpeg_get_pixelformat(avctx);
+
+ /* Quantization matrices may need reordering
+ * if DCT permutation is changed. */
+ memcpy(old_permutation,s->dsp.idct_permutation,64*sizeof(uint8_t));
+
+ if (MPV_common_init(s) < 0)
+ return -2;
+
+ quant_matrix_rebuild(s->intra_matrix, old_permutation,s->dsp.idct_permutation);
+ quant_matrix_rebuild(s->inter_matrix, old_permutation,s->dsp.idct_permutation);
+ quant_matrix_rebuild(s->chroma_intra_matrix,old_permutation,s->dsp.idct_permutation);
+ quant_matrix_rebuild(s->chroma_inter_matrix,old_permutation,s->dsp.idct_permutation);
+
+ s1->mpeg_enc_ctx_allocated = 1;
+ }
+ return 0;
+}
+
+static int mpeg1_decode_picture(AVCodecContext *avctx,
+ const uint8_t *buf, int buf_size)
+{
+ Mpeg1Context *s1 = avctx->priv_data;
+ MpegEncContext *s = &s1->mpeg_enc_ctx;
+ int ref, f_code, vbv_delay;
+
+ init_get_bits(&s->gb, buf, buf_size*8);
+
+ ref = get_bits(&s->gb, 10); /* temporal ref */
+ s->pict_type = get_bits(&s->gb, 3);
+ if(s->pict_type == 0 || s->pict_type > 3)
+ return -1;
+
+ vbv_delay= get_bits(&s->gb, 16);
+ if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) {
+ s->full_pel[0] = get_bits1(&s->gb);
+ f_code = get_bits(&s->gb, 3);
+ if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT)
+ return -1;
+ s->mpeg_f_code[0][0] = f_code;
+ s->mpeg_f_code[0][1] = f_code;
+ }
+ if (s->pict_type == FF_B_TYPE) {
+ s->full_pel[1] = get_bits1(&s->gb);
+ f_code = get_bits(&s->gb, 3);
+ if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT)
+ return -1;
+ s->mpeg_f_code[1][0] = f_code;
+ s->mpeg_f_code[1][1] = f_code;
+ }
+ s->current_picture.pict_type= s->pict_type;
+ s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
+
+ if(avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
+
+ s->y_dc_scale = 8;
+ s->c_dc_scale = 8;
+ return 0;
+}
+
+static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
+{
+ MpegEncContext *s= &s1->mpeg_enc_ctx;
+ int horiz_size_ext, vert_size_ext;
+ int bit_rate_ext;
+
+ skip_bits(&s->gb, 1); /* profile and level esc*/
+ s->avctx->profile= get_bits(&s->gb, 3);
+ s->avctx->level= get_bits(&s->gb, 4);
+ s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
+ s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
+ horiz_size_ext = get_bits(&s->gb, 2);
+ vert_size_ext = get_bits(&s->gb, 2);
+ s->width |= (horiz_size_ext << 12);
+ s->height |= (vert_size_ext << 12);
+ bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
+ s->bit_rate += (bit_rate_ext << 18) * 400;
+ skip_bits1(&s->gb); /* marker */
+ s->avctx->rc_buffer_size += get_bits(&s->gb, 8)*1024*16<<10;
+
+ s->low_delay = get_bits1(&s->gb);
+ if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1;
+
+ s1->frame_rate_ext.num = get_bits(&s->gb, 2)+1;
+ s1->frame_rate_ext.den = get_bits(&s->gb, 5)+1;
+ // ffdshow custom code begin (moved from mpeg_decode_postinit)
+ // MPEG-2 fps
+ av_reduce(
+ &s->avctx->time_base.den,
+ &s->avctx->time_base.num,
+ ff_frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num,
+ ff_frame_rate_tab[s->frame_rate_index].den * s1->frame_rate_ext.den,
+ 1<<30);
+ // ffdshow custom code end
+
+ dprintf(s->avctx, "sequence extension\n");
+ s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG2VIDEO;
+ s->avctx->sub_id = 2; /* indicates MPEG-2 found */
+
+ if(s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_DEBUG, "profile: %d, level: %d vbv buffer: %d, bitrate:%d\n",
+ s->avctx->profile, s->avctx->level, s->avctx->rc_buffer_size, s->bit_rate);
+
+}
+
+static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
+{
+ MpegEncContext *s= &s1->mpeg_enc_ctx;
+ int color_description, w, h;
+
+ skip_bits(&s->gb, 3); /* video format */
+ color_description= get_bits1(&s->gb);
+ if(color_description){
+ s->avctx->color_primaries= get_bits(&s->gb, 8);
+ s->avctx->color_trc = get_bits(&s->gb, 8);
+ s->avctx->colorspace = get_bits(&s->gb, 8);
+ }
+ w= get_bits(&s->gb, 14);
+ skip_bits(&s->gb, 1); //marker
+ h= get_bits(&s->gb, 14);
+ skip_bits(&s->gb, 1); //marker
+
+ s1->pan_scan.width= 16*w;
+ s1->pan_scan.height=16*h;
+
+ if(s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
+}
+
+static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
+{
+ MpegEncContext *s= &s1->mpeg_enc_ctx;
+ int i,nofco;
+
+ nofco = 1;
+ if(s->progressive_sequence){
+ if(s->repeat_first_field){
+ nofco++;
+ if(s->top_field_first)
+ nofco++;
+ }
+ }else{
+ if(s->picture_structure == PICT_FRAME){
+ nofco++;
+ if(s->repeat_first_field)
+ nofco++;
+ }
+ }
+ for(i=0; i<nofco; i++){
+ s1->pan_scan.position[i][0]= get_sbits(&s->gb, 16);
+ skip_bits(&s->gb, 1); //marker
+ s1->pan_scan.position[i][1]= get_sbits(&s->gb, 16);
+ skip_bits(&s->gb, 1); //marker
+ }
+
+ if(s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_DEBUG, "pde (%d,%d) (%d,%d) (%d,%d)\n",
+ s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
+ s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
+ s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]
+ );
+}
+
+static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], uint16_t matrix2[64], int intra, int ffdshow_custom){
+ int i;
+
+ for(i=0; i<64; i++) {
+ int j = s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
+ int v = get_bits(&s->gb, 8);
+ if(v==0){
+ av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
+ return -1;
+ }
+ if(intra && i==0 && v!=8){
+ av_log(s->avctx, AV_LOG_ERROR, "intra matrix invalid, ignoring\n");
+ v= 8; // needed by pink.mpg / issue1046
+ }
+ matrix0[j] = v;
+ if(matrix1)
+ matrix1[j] = v;
+ if(ffdshow_custom)
+ matrix2[ff_zigzag_direct[i]] = v;
+ }
+ return 0;
+}
+
+static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
+{
+ dprintf(s->avctx, "matrix extension\n");
+
+ if(get_bits1(&s->gb)) load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, NULL, 1, 0);
+ if(get_bits1(&s->gb)) load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, NULL, 0, 0);
+ if(get_bits1(&s->gb)) load_matrix(s, s->chroma_intra_matrix, NULL , NULL, 1, 0);
+ if(get_bits1(&s->gb)) load_matrix(s, s->chroma_inter_matrix, NULL , NULL, 0, 0);
+}
+
+static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
+{
+ MpegEncContext *s= &s1->mpeg_enc_ctx;
+
+ s->full_pel[0] = s->full_pel[1] = 0;
+ s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
+ s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
+ s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
+ s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
+ if(!s->pict_type && s1->mpeg_enc_ctx_allocated){
+ av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code, guessing missing values\n");
+ if(s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1]==15){
+ if(s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
+ s->pict_type= FF_I_TYPE;
+ else
+ s->pict_type= FF_P_TYPE;
+ }else
+ s->pict_type= FF_B_TYPE;
+ s->current_picture.pict_type= s->pict_type;
+ s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
+ }
+ s->intra_dc_precision = get_bits(&s->gb, 2);
+ s->picture_structure = get_bits(&s->gb, 2);
+ s->top_field_first = get_bits1(&s->gb);
+ s->frame_pred_frame_dct = get_bits1(&s->gb);
+ s->concealment_motion_vectors = get_bits1(&s->gb);
+ s->q_scale_type = get_bits1(&s->gb);
+ s->intra_vlc_format = get_bits1(&s->gb);
+ s->alternate_scan = get_bits1(&s->gb);
+ s->repeat_first_field = get_bits1(&s->gb);
+ s->chroma_420_type = get_bits1(&s->gb);
+ s->progressive_frame = get_bits1(&s->gb);
+
+ if(s->progressive_sequence && !s->progressive_frame){
+ s->progressive_frame= 1;
+ av_log(s->avctx, AV_LOG_ERROR, "interlaced frame in progressive sequence, ignoring\n");
+ }
+
+ if(s->picture_structure==0 || (s->progressive_frame && s->picture_structure!=PICT_FRAME)){
+ av_log(s->avctx, AV_LOG_ERROR, "picture_structure %d invalid, ignoring\n", s->picture_structure);
+ s->picture_structure= PICT_FRAME;
+ }
+
+ if(s->progressive_sequence && !s->frame_pred_frame_dct){
+ av_log(s->avctx, AV_LOG_ERROR, "invalid frame_pred_frame_dct\n");
+ s->frame_pred_frame_dct= 1;
+ }
+
+ if(s->picture_structure == PICT_FRAME){
+ s->first_field=0;
+ s->v_edge_pos= 16*s->mb_height;
+ }else{
+ s->first_field ^= 1;
+ s->v_edge_pos= 8*s->mb_height;
+ memset(s->mbskip_table, 0, s->mb_stride*s->mb_height);
+ }
+
+ if(s->alternate_scan){
+ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
+ }else{
+ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
+ }
+
+ /* composite display not parsed */
+ dprintf(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
+ dprintf(s->avctx, "picture_structure=%d\n", s->picture_structure);
+ dprintf(s->avctx, "top field first=%d\n", s->top_field_first);
+ dprintf(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
+ dprintf(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
+ dprintf(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
+ dprintf(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
+ dprintf(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
+ dprintf(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
+}
+
+static void exchange_uv(MpegEncContext *s){
+ DCTELEM (*tmp)[64];
+
+ tmp = s->pblocks[4];
+ s->pblocks[4] = s->pblocks[5];
+ s->pblocks[5] = tmp;
+}
+
+static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size){
+ AVCodecContext *avctx= s->avctx;
+ Mpeg1Context *s1 = (Mpeg1Context*)s;
+
+ /* start frame decoding */
+ if(s->first_field || s->picture_structure==PICT_FRAME){
+ if(MPV_frame_start(s, avctx) < 0)
+ return -1;
+
+ ff_er_frame_start(s);
+ s->current_picture_ptr->reordered_opaque = s->parse_context.rtStart; /* ffdshow custom code */
+
+ /* first check if we must repeat the frame */
+ s->current_picture_ptr->repeat_pict = 0;
+ if (s->repeat_first_field) {
+ if (s->progressive_sequence) {
+ if (s->top_field_first)
+ s->current_picture_ptr->repeat_pict = 4;
+ else
+ s->current_picture_ptr->repeat_pict = 2;
+ } else if (s->progressive_frame) {
+ s->current_picture_ptr->repeat_pict = 1;
+ }
+ }
+
+ *s->current_picture_ptr->pan_scan= s1->pan_scan;
+ }else{ //second field
+ int i;
+
+ if(!s->current_picture_ptr){
+ av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
+ return -1;
+ }
+
+ for(i=0; i<4; i++){
+ s->current_picture.data[i] = s->current_picture_ptr->data[i];
+ if(s->picture_structure == PICT_BOTTOM_FIELD){
+ s->current_picture.data[i] += s->current_picture_ptr->linesize[i];
+ }
+ }
+ }
+
+ return 0;
+}
+
+#define DECODE_SLICE_ERROR -1
+#define DECODE_SLICE_OK 0
+
+/**
+ * decodes a slice. MpegEncContext.mb_y must be set to the MB row from the startcode
+ * @return DECODE_SLICE_ERROR if the slice is damaged<br>
+ * DECODE_SLICE_OK if this slice is ok<br>
+ */
+static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
+ const uint8_t **buf, int buf_size)
+{
+ MpegEncContext *s = &s1->mpeg_enc_ctx;
+ AVCodecContext *avctx= s->avctx;
+ const int field_pic= s->picture_structure != PICT_FRAME;
+ const int lowres= s->avctx->lowres;
+
+ s->resync_mb_x=
+ s->resync_mb_y= -1;
+
+ assert(mb_y < s->mb_height);
+
+ init_get_bits(&s->gb, *buf, buf_size*8);
+
+ ff_mpeg1_clean_buffers(s);
+ s->interlaced_dct = 0;
+
+ // ==> Start patch MPC
+ // DXVA need raw syntax element
+ if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+ s->qscale = get_bits(&s->gb, 5);
+ else
+ s->qscale = get_qscale(s);
+ // <== End patch MPC
+
+ if(s->qscale == 0){
+ av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
+ return -1;
+ }
+
+ /* extra slice info */
+ while (get_bits1(&s->gb) != 0) {
+ skip_bits(&s->gb, 8);
+ }
+
+ // ==> Start patch MPC
+ if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+ {
+ // Fill DXVA structure and return
+ // s1->pSliceInfo[s1->slice_count].wHorizontalPosition = // TODO : horizontal ?
+ s1->pSliceInfo[s1->slice_count].wVerticalPosition = s1->slice_count;
+ s1->pSliceInfo[s1->slice_count].wMBbitOffset = s->gb.index + 32; // Current pos + Slice Start Code
+ s1->pSliceInfo[s1->slice_count].wNumberMBsInSlice = s->mb_width;
+ s1->pSliceInfo[s1->slice_count].wQuantizerScaleCode = s->qscale;// >> 1;
+ s1->pSliceInfo[s1->slice_count].dwSliceBitsInBuffer = (buf_size*8)+32;
+
+ if (s1->slice_count>0)
+ {
+ s1->pSliceInfo[s1->slice_count-1].dwSliceBitsInBuffer = (*buf - s1->prev_slice)*8;
+ s1->pSliceInfo[s1->slice_count].dwSliceDataLocation = s1->pSliceInfo[s1->slice_count-1].dwSliceDataLocation +
+ s1->pSliceInfo[s1->slice_count-1].dwSliceBitsInBuffer/8;
+ }
+
+ s1->prev_slice = (uint8_t*)*buf;
+ s1->slice_count++;
+
+ return 0;
+ }
+ // <== End patch MPC
+
+ s->mb_x=0;
+
+ if(mb_y==0 && s->codec_tag == AV_RL32("SLIF")){
+ skip_bits1(&s->gb);
+ }else{
+ for(;;) {
+ int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2);
+ if (code < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
+ return -1;
+ }
+ if (code >= 33) {
+ if (code == 33) {
+ s->mb_x += 33;
+ }
+ /* otherwise, stuffing, nothing to do */
+ } else {
+ s->mb_x += code;
+ break;
+ }
+ }
+ }
+
+ if(s->mb_x >= (unsigned)s->mb_width){
+ av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
+ return -1;
+ }
+
+ s->resync_mb_x= s->mb_x;
+ s->resync_mb_y= s->mb_y= mb_y;
+ s->mb_skip_run= 0;
+ ff_init_block_index(s);
+
+ if (s->mb_y==0 && s->mb_x==0 && (s->first_field || s->picture_structure==PICT_FRAME)) {
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO){
+ av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
+ s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1],
+ s->pict_type == FF_I_TYPE ? "I" : (s->pict_type == FF_P_TYPE ? "P" : (s->pict_type == FF_B_TYPE ? "B" : "S")),
+ s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"",
+ s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors,
+ s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :"");
+ }
+ }
+
+ for(;;) {
+ if(mpeg_decode_mb(s, s->block) < 0)
+ return -1;
+
+ if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs
+ const int wrap = s->b8_stride;
+ int xy = s->mb_x*2 + s->mb_y*2*wrap;
+ int motion_x, motion_y, dir, i;
+
+ for(i=0; i<2; i++){
+ for(dir=0; dir<2; dir++){
+ if (s->mb_intra || (dir==1 && s->pict_type != FF_B_TYPE)) {
+ motion_x = motion_y = 0;
+ }else if (s->mv_type == MV_TYPE_16X16 || (s->mv_type == MV_TYPE_FIELD && field_pic)){
+ motion_x = s->mv[dir][0][0];
+ motion_y = s->mv[dir][0][1];
+ } else /*if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8))*/ {
+ motion_x = s->mv[dir][i][0];
+ motion_y = s->mv[dir][i][1];
+ }
+
+ s->current_picture.motion_val[dir][xy ][0] = motion_x;
+ s->current_picture.motion_val[dir][xy ][1] = motion_y;
+ s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
+ s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
+ s->current_picture.ref_index [dir][xy ]=
+ s->current_picture.ref_index [dir][xy + 1]= s->field_select[dir][i];
+ assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1);
+ }
+ xy += wrap;
+ }
+ }
+
+ s->dest[0] += 16 >> lowres;
+ s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
+ s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
+
+ MPV_decode_mb(s, s->block);
+
+ if (++s->mb_x >= s->mb_width) {
+ const int mb_size= 16>>s->avctx->lowres;
+
+ ff_draw_horiz_band(s, mb_size*(s->mb_y>>field_pic), mb_size);
+
+ s->mb_x = 0;
+ s->mb_y += 1<<field_pic;
+
+ if(s->mb_y >= s->mb_height){
+ int left= get_bits_left(&s->gb);
+ int is_d10= s->chroma_format==2 && s->pict_type==FF_I_TYPE && avctx->profile==0 && avctx->level==5
+ && s->intra_dc_precision == 2 && s->q_scale_type == 1 && s->alternate_scan == 0
+ && s->progressive_frame == 0 /* vbv_delay == 0xBBB || 0xE10*/;
+
+ if(left < 0 || (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10)
+ || (avctx->error_recognition >= FF_ER_AGGRESSIVE && left>8)){
+ av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X\n", left, show_bits(&s->gb, FFMIN(left, 23)));
+ return -1;
+ }else
+ goto eos;
+ }
+
+ ff_init_block_index(s);
+ }
+
+ /* skip mb handling */
+ if (s->mb_skip_run == -1) {
+ /* read increment again */
+ s->mb_skip_run = 0;
+ for(;;) {
+ int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2);
+ if (code < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
+ return -1;
+ }
+ if (code >= 33) {
+ if (code == 33) {
+ s->mb_skip_run += 33;
+ }else if(code == 35){
+ if(s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0){
+ av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
+ return -1;
+ }
+ goto eos; /* end of slice */
+ }
+ /* otherwise, stuffing, nothing to do */
+ } else {
+ s->mb_skip_run += code;
+ break;
+ }
+ }
+ if(s->mb_skip_run){
+ int i;
+ if(s->pict_type == FF_I_TYPE){
+ av_log(s->avctx, AV_LOG_ERROR, "skipped MB in I frame at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ /* skip mb */
+ s->mb_intra = 0;
+ for(i=0;i<12;i++)
+ s->block_last_index[i] = -1;
+ if(s->picture_structure == PICT_FRAME)
+ s->mv_type = MV_TYPE_16X16;
+ else
+ s->mv_type = MV_TYPE_FIELD;
+ if (s->pict_type == FF_P_TYPE) {
+ /* if P type, zero motion vector is implied */
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv[0][0][0] = s->mv[0][0][1] = 0;
+ s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
+ s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
+ s->field_select[0][0]= (s->picture_structure - 1) & 1;
+ } else {
+ /* if B type, reuse previous vectors and directions */
+ s->mv[0][0][0] = s->last_mv[0][0][0];
+ s->mv[0][0][1] = s->last_mv[0][0][1];
+ s->mv[1][0][0] = s->last_mv[1][0][0];
+ s->mv[1][0][1] = s->last_mv[1][0][1];
+ }
+ }
+ }
+ }
+eos: // end of slice
+ *buf += (get_bits_count(&s->gb)-1)/8;
+//printf("y %d %d %d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
+ return 0;
+}
+
+static int slice_decode_thread(AVCodecContext *c, void *arg){
+ MpegEncContext *s= *(void**)arg;
+ const uint8_t *buf= s->gb.buffer;
+ int mb_y= s->start_mb_y;
+ const int field_pic= s->picture_structure != PICT_FRAME;
+
+ s->error_count= (3*(s->end_mb_y - s->start_mb_y)*s->mb_width) >> field_pic;
+
+ for(;;){
+ uint32_t start_code;
+ int ret;
+
+ ret= mpeg_decode_slice((Mpeg1Context*)s, mb_y, &buf, s->gb.buffer_end - buf);
+ emms_c();
+//av_log(c, AV_LOG_DEBUG, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
+//ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, s->start_mb_y, s->end_mb_y, s->error_count);
+ if(ret < 0){
+ if(s->resync_mb_x>=0 && s->resync_mb_y>=0)
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, AC_ERROR|DC_ERROR|MV_ERROR);
+ }else{
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);
+ }
+
+ if(s->mb_y == s->end_mb_y)
+ return 0;
+
+ start_code= -1;
+ buf = ff_find_start_code(buf, s->gb.buffer_end, &start_code);
+ mb_y= start_code - SLICE_MIN_START_CODE;
+ if(mb_y < 0 || mb_y >= s->end_mb_y)
+ return -1;
+ }
+
+ return 0; //not reached
+}
+
+/**
+ * Handles slice ends.
+ * @return 1 if it seems to be the last slice
+ */
+static int slice_end(AVCodecContext *avctx, AVFrame *pict)
+{
+ Mpeg1Context *s1 = avctx->priv_data;
+ MpegEncContext *s = &s1->mpeg_enc_ctx;
+
+ if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr)
+ return 0;
+
+ /* end of slice reached */
+ if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) {
+ /* end of image */
+
+ s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2;
+
+ // ==> Start patch MPC
+ if (!s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+ ff_er_frame_end(s);
+ // <== End patch MPC
+
+ MPV_frame_end(s);
+
+ if (s->pict_type == FF_B_TYPE || s->low_delay) {
+ *pict= *(AVFrame*)s->current_picture_ptr;
+ ff_print_debug_info(s, pict);
+ } else {
+ s->picture_number++;
+ /* latency of 1 frame for I- and P-frames */
+ /* XXX: use another variable than picture_number */
+ if (s->last_picture_ptr != NULL) {
+ *pict= *(AVFrame*)s->last_picture_ptr;
+ ff_print_debug_info(s, pict);
+ }
+ }
+
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static int mpeg1_decode_sequence(AVCodecContext *avctx,
+ const uint8_t *buf, int buf_size)
+{
+ Mpeg1Context *s1 = avctx->priv_data;
+ MpegEncContext *s = &s1->mpeg_enc_ctx;
+ int width,height;
+ int i, v, j;
+
+ init_get_bits(&s->gb, buf, buf_size*8);
+
+ width = get_bits(&s->gb, 12);
+ height = get_bits(&s->gb, 12);
+ if (width <= 0 || height <= 0)
+ return -1;
+ s->aspect_ratio_info= get_bits(&s->gb, 4);
+ if (s->aspect_ratio_info == 0) {
+ av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
+ if (avctx->error_recognition >= FF_ER_COMPLIANT)
+ return -1;
+ }
+ s->frame_rate_index = get_bits(&s->gb, 4);
+ if (s->frame_rate_index == 0 || s->frame_rate_index > 13)
+ return -1;
+ s->bit_rate = get_bits(&s->gb, 18) * 400;
+ if (get_bits1(&s->gb) == 0) /* marker */
+ return -1;
+ s->width = width;
+ s->height = height;
+
+ s->avctx->rc_buffer_size= get_bits(&s->gb, 10) * 1024*16;
+ skip_bits(&s->gb, 1);
+
+ /* get matrix */
+ if (get_bits1(&s->gb)) {
+ load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, s->avctx->intra_matrix, 1, 1);
+ } else {
+ for(i=0;i<64;i++) {
+ j = s->dsp.idct_permutation[i];
+ v = ff_mpeg1_default_intra_matrix[i];
+ s->avctx->intra_matrix[i]=v;
+ s->intra_matrix[j] = v;
+ s->chroma_intra_matrix[j] = v;
+ }
+ }
+ if (get_bits1(&s->gb)) {
+ load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, s->avctx->inter_matrix, 0, 1);
+ } else {
+ for(i=0;i<64;i++) {
+ int j= s->dsp.idct_permutation[i];
+ v = ff_mpeg1_default_non_intra_matrix[i];
+ s->avctx->inter_matrix[i]=v;
+ s->inter_matrix[j] = v;
+ s->chroma_inter_matrix[j] = v;
+ }
+ }
+
+ if(show_bits(&s->gb, 23) != 0){
+ av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
+ return -1;
+ }
+
+ /* we set MPEG-2 parameters so that it emulates MPEG-1 */
+ s->progressive_sequence = 1;
+ s->progressive_frame = 1;
+ s->picture_structure = PICT_FRAME;
+ s->frame_pred_frame_dct = 1;
+ s->chroma_format = 1;
+ s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG1VIDEO;
+ avctx->sub_id = 1; /* indicates MPEG-1 */
+ s->out_format = FMT_MPEG1;
+ s->swap_uv = 0;//AFAIK VCR2 does not have SEQ_HEADER
+ if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1;
+
+ if(s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%d\n",
+ s->avctx->rc_buffer_size, s->bit_rate);
+
+ return 0;
+}
+
+static int vcr2_init_sequence(AVCodecContext *avctx)
+{
+ Mpeg1Context *s1 = avctx->priv_data;
+ MpegEncContext *s = &s1->mpeg_enc_ctx;
+ int i, v;
+
+ /* start new MPEG-1 context decoding */
+ s->out_format = FMT_MPEG1;
+ if (s1->mpeg_enc_ctx_allocated) {
+ MPV_common_end(s);
+ }
+ s->width = avctx->coded_width;
+ s->height = avctx->coded_height;
+ avctx->has_b_frames= 0; //true?
+ s->low_delay= 1;
+
+ avctx->pix_fmt = mpeg_get_pixelformat(avctx);
+
+ if (MPV_common_init(s) < 0)
+ return -1;
+ exchange_uv(s);//common init reset pblocks, so we swap them here
+ s->swap_uv = 1;// in case of xvmc we need to swap uv for each MB
+ s1->mpeg_enc_ctx_allocated = 1;
+
+ for(i=0;i<64;i++) {
+ int j= s->dsp.idct_permutation[i];
+ v = ff_mpeg1_default_intra_matrix[i];
+ s->intra_matrix[j] = v;
+ s->chroma_intra_matrix[j] = v;
+
+ v = ff_mpeg1_default_non_intra_matrix[i];
+ s->inter_matrix[j] = v;
+ s->chroma_inter_matrix[j] = v;
+ }
+
+ s->progressive_sequence = 1;
+ s->progressive_frame = 1;
+ s->picture_structure = PICT_FRAME;
+ s->frame_pred_frame_dct = 1;
+ s->chroma_format = 1;
+ s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG2VIDEO;
+ avctx->sub_id = 2; /* indicates MPEG-2 */
+ s1->save_width = s->width;
+ s1->save_height = s->height;
+ s1->save_progressive_seq = s->progressive_sequence;
+ return 0;
+}
+
+
+static void mpeg_decode_user_data(AVCodecContext *avctx,
+ const uint8_t *p, int buf_size)
+{
+ const uint8_t *buf = p;
+ const uint8_t *buf_end = p+buf_size;
+
+ /* we parse the DTG active format information */
+ if (buf_end - p >= 5 &&
+ p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
+ int flags = p[4];
+ p += 5;
+ if (flags & 0x80) {
+ /* skip event id */
+ p += 2;
+ }
+ if (flags & 0x40) {
+ if (buf_end - p < 1)
+ return;
+ avctx->dtg_active_format = p[0] & 0x0f;
+ }
+ }
+ /* ffdshow custom code */
+ else if (avctx->handle_user_data) {
+ uint32_t state;
+ uint8_t *user_data_end = ff_find_start_code(buf, buf + buf_size, &state);
+ avctx->handle_user_data(avctx, buf, user_data_end - buf);
+ }
+}
+
+static void mpeg_decode_gop(AVCodecContext *avctx,
+ const uint8_t *buf, int buf_size){
+ Mpeg1Context *s1 = avctx->priv_data;
+ MpegEncContext *s = &s1->mpeg_enc_ctx;
+
+ int drop_frame_flag;
+ int time_code_hours, time_code_minutes;
+ int time_code_seconds, time_code_pictures;
+ int broken_link;
+
+ init_get_bits(&s->gb, buf, buf_size*8);
+
+ drop_frame_flag = get_bits1(&s->gb);
+
+ time_code_hours=get_bits(&s->gb,5);
+ time_code_minutes = get_bits(&s->gb,6);
+ skip_bits1(&s->gb);//marker bit
+ time_code_seconds = get_bits(&s->gb,6);
+ time_code_pictures = get_bits(&s->gb,6);
+
+ s->closed_gop = get_bits1(&s->gb);
+ /*broken_link indicate that after editing the
+ reference frames of the first B-Frames after GOP I-Frame
+ are missing (open gop)*/
+ broken_link = get_bits1(&s->gb);
+
+ if(s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_DEBUG, "GOP (%2d:%02d:%02d.[%02d]) closed_gop=%d broken_link=%d\n",
+ time_code_hours, time_code_minutes, time_code_seconds,
+ time_code_pictures, s->closed_gop, broken_link);
+}
+/**
+ * Finds the end of the current frame in the bitstream.
+ * @return the position of the first byte of the next frame, or -1
+ */
+int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size, AVCodecParserContext *s, int64_t *rtStart, AVCodecContext *avctx) /* rtStart,avctx: ffdshow custom code */
+{
+ int i;
+ uint32_t state= pc->state;
+
+ /* EOF considered as end of frame */
+ if (buf_size == 0)
+ return 0;
+
+/*
+ 0 frame start -> 1/4
+ 1 first_SEQEXT -> 0/2
+ 2 first field start -> 3/0
+ 3 second_SEQEXT -> 2/0
+ 4 searching end
+*/
+
+ for(i=0; i<buf_size; i++){
+ assert(pc->frame_start_found>=0 && pc->frame_start_found<=4);
+ if(pc->frame_start_found&1){
+ if(state == EXT_START_CODE && (buf[i]&0xF0) != 0x80)
+ pc->frame_start_found--;
+ else if(state == EXT_START_CODE+2){
+ if((buf[i]&3) == 3) pc->frame_start_found= 0;
+ else pc->frame_start_found= (pc->frame_start_found+1)&3;
+ }
+ state++;
+ }else{
+ i= ff_find_start_code(buf+i, buf+buf_size, &state) - buf - 1;
+ if(pc->frame_start_found==0 && state >= SLICE_MIN_START_CODE && state <= SLICE_MAX_START_CODE){
+ i++;
+ pc->frame_start_found=4;
+
+ pc->rtStart=*rtStart; /* ffdshow custom code */
+ *rtStart=_I64_MIN; /* ffdshow custom code */
+ }
+ if(state == SEQ_END_CODE){
+ pc->state=-1;
+ /* ffdshow custom code (i-3 instead of i+1) */
+ /* DVDs won't send the next frame start on still images */
+ /* SEQ_END_CODE will have to stay at the beginning of the next frame */
+ if (avctx->isDVD)
+ return i-3;
+ else
+ return i+1;
+ }
+ if(pc->frame_start_found==2 && state == SEQ_START_CODE)
+ pc->frame_start_found= 0;
+ if(pc->frame_start_found<4 && state == EXT_START_CODE)
+ pc->frame_start_found++;
+ if(pc->frame_start_found == 4 && (state&0xFFFFFF00) == 0x100){
+ if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){
+ pc->frame_start_found=0;
+ pc->state=-1;
+ return i-3;
+ }
+ }
+ if(s && state == PICTURE_START_CODE){
+ ff_fetch_timestamp(s, i-3, 1);
+ }
+ }
+ }
+ pc->state= state;
+ return END_NOT_FOUND;
+}
+
+static int decode_chunks(AVCodecContext *avctx,
+ AVFrame *picture, int *data_size,
+ const uint8_t *buf, int buf_size);
+
+/* handle buffering and image synchronisation */
+static int mpeg_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ Mpeg1Context *s = avctx->priv_data;
+ AVFrame *picture = data;
+ MpegEncContext *s2 = &s->mpeg_enc_ctx;
+ dprintf(avctx, "fill_buffer\n");
+
+ if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
+ /* special case for last picture */
+ if (s2->low_delay==0 && s2->next_picture_ptr) {
+ *picture= *(AVFrame*)s2->next_picture_ptr;
+ s2->next_picture_ptr= NULL;
+ picture->mpeg2_sequence_end_flag = 1; /* ffdshow custom code */
+
+ *data_size = sizeof(AVFrame);
+ }
+ return buf_size;
+ }
+
+ if(s2->flags&CODEC_FLAG_TRUNCATED){
+ int next = ff_mpeg1_find_frame_end(&s2->parse_context, buf, buf_size, NULL, avctx->parserRtStart, avctx); /* avctx->parserRtStart: ffdshow custom code */
+
+ if( ff_combine_frame(&s2->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0 )
+ return buf_size;
+ }
+
+#if 0
+ if (s->repeat_field % 2 == 1) {
+ s->repeat_field++;
+ //fprintf(stderr,"\nRepeating last frame: %d -> %d! pict: %d %d", avctx->frame_number-1, avctx->frame_number,
+ // s2->picture_number, s->repeat_field);
+ if (avctx->flags & CODEC_FLAG_REPEAT_FIELD) {
+ *data_size = sizeof(AVPicture);
+ goto the_end;
+ }
+ }
+#endif
+
+ if(s->mpeg_enc_ctx_allocated==0 && avctx->codec_tag == AV_RL32("VCR2"))
+ vcr2_init_sequence(avctx);
+
+ s->slice_count= 0;
+
+ if(avctx->extradata && !avctx->frame_number)
+ decode_chunks(avctx, picture, data_size, avctx->extradata, avctx->extradata_size);
+
+ return decode_chunks(avctx, picture, data_size, buf, buf_size);
+}
+
+static int decode_chunks(AVCodecContext *avctx,
+ AVFrame *picture, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ Mpeg1Context *s = avctx->priv_data;
+ MpegEncContext *s2 = &s->mpeg_enc_ctx;
+ const uint8_t *buf_ptr = buf;
+ const uint8_t *buf_end = buf + buf_size;
+ int ret, input_size;
+ int last_code= 0;
+
+ for(;;) {
+ /* find next start code */
+ uint32_t start_code = -1;
+ buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code);
+ if (start_code > 0x1ff){
+ if(s2->pict_type != FF_B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){
+ if(avctx->thread_count > 1){
+ int i;
+
+ avctx->execute(avctx, slice_decode_thread, &s2->thread_context[0], NULL, s->slice_count, sizeof(void*));
+ for(i=0; i<s->slice_count; i++)
+ s2->error_count += s2->thread_context[i]->error_count;
+ }
+
+ if (slice_end(avctx, picture)) {
+ if(s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice
+ *data_size = sizeof(AVPicture);
+ }
+ }
+ s2->pict_type= 0;
+ return FFMAX(0, buf_ptr - buf - s2->parse_context.last_index);
+ }
+
+ input_size = buf_end - buf_ptr;
+
+ if(avctx->debug & FF_DEBUG_STARTCODE){
+ av_log(avctx, AV_LOG_DEBUG, "%3X at %td left %d\n", start_code, buf_ptr-buf, input_size);
+ }
+
+ /* prepare data for next start code */
+ switch(start_code) {
+ case SEQ_START_CODE:
+ if(last_code == 0){
+ mpeg1_decode_sequence(avctx, buf_ptr,
+ input_size);
+ s->sync=1;
+ }else{
+ av_log(avctx, AV_LOG_ERROR, "ignoring SEQ_START_CODE after %X\n", last_code);
+ }
+ break;
+
+ case PICTURE_START_CODE:
+ if(last_code == 0 || last_code == SLICE_MIN_START_CODE){
+ if(mpeg_decode_postinit(avctx) < 0){
+ av_log(avctx, AV_LOG_ERROR, "mpeg_decode_postinit() failure\n");
+ return -1;
+ }
+
+ /* we have a complete image: we try to decompress it */
+ if(mpeg1_decode_picture(avctx,
+ buf_ptr, input_size) < 0)
+ s2->pict_type=0;
+ s2->first_slice = 1;
+ last_code= PICTURE_START_CODE;
+ }else{
+ av_log(avctx, AV_LOG_ERROR, "ignoring pic after %X\n", last_code);
+ }
+ break;
+ case EXT_START_CODE:
+ init_get_bits(&s2->gb, buf_ptr, input_size*8);
+
+ switch(get_bits(&s2->gb, 4)) {
+ case 0x1:
+ if(last_code == 0){
+ mpeg_decode_sequence_extension(s);
+ }else{
+ av_log(avctx, AV_LOG_ERROR, "ignoring seq ext after %X\n", last_code);
+ }
+ break;
+ case 0x2:
+ mpeg_decode_sequence_display_extension(s);
+ break;
+ case 0x3:
+ mpeg_decode_quant_matrix_extension(s2);
+ break;
+ case 0x7:
+ mpeg_decode_picture_display_extension(s);
+ break;
+ case 0x8:
+ if(last_code == PICTURE_START_CODE){
+ mpeg_decode_picture_coding_extension(s);
+ }else{
+ av_log(avctx, AV_LOG_ERROR, "ignoring pic cod ext after %X\n", last_code);
+ }
+ break;
+ }
+ break;
+ case USER_START_CODE:
+ mpeg_decode_user_data(avctx,
+ buf_ptr, input_size);
+ break;
+ case GOP_START_CODE:
+ if(last_code == 0){
+ s2->first_field=0;
+ mpeg_decode_gop(avctx,
+ buf_ptr, input_size);
+ s->sync=1;
+ }else{
+ av_log(avctx, AV_LOG_ERROR, "ignoring GOP_START_CODE after %X\n", last_code);
+ }
+ break;
+ default:
+ if (start_code >= SLICE_MIN_START_CODE &&
+ start_code <= SLICE_MAX_START_CODE && last_code!=0) {
+ const int field_pic= s2->picture_structure != PICT_FRAME;
+ int mb_y= (start_code - SLICE_MIN_START_CODE) << field_pic;
+ last_code= SLICE_MIN_START_CODE;
+
+ if(s2->picture_structure == PICT_BOTTOM_FIELD)
+ mb_y++;
+
+ if (mb_y >= s2->mb_height){
+ av_log(s2->avctx, AV_LOG_ERROR, "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
+ return -1;
+ }
+
+ if(s2->last_picture_ptr==NULL){
+ /* Skip B-frames if we do not have reference frames and gop is not closed */
+ if(s2->pict_type==FF_B_TYPE){
+ if(!s2->closed_gop)
+ break;
+ }
+ }
+ if(s2->pict_type==FF_I_TYPE)
+ s->sync=1;
+ if(s2->next_picture_ptr==NULL){
+ /* Skip P-frames if we do not have a reference frame or we have an invalid header. */
+ if(s2->pict_type==FF_P_TYPE && !s->sync) break;
+ }
+ /* Skip B-frames if we are in a hurry. */
+ if(avctx->hurry_up && s2->pict_type==FF_B_TYPE) break;
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==FF_B_TYPE)
+ ||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=FF_I_TYPE)
+ || avctx->skip_frame >= AVDISCARD_ALL)
+ break;
+ /* Skip everything if we are in a hurry>=5. */
+ if(avctx->hurry_up>=5) break;
+
+ if (!s->mpeg_enc_ctx_allocated) break;
+
+ if(s2->codec_id == CODEC_ID_MPEG2VIDEO){
+ if(mb_y < avctx->skip_top || mb_y >= s2->mb_height - avctx->skip_bottom)
+ break;
+ }
+
+ if(!s2->pict_type){
+ av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
+ break;
+ }
+
+ if(s2->first_slice){
+ s2->first_slice=0;
+ if(mpeg_field_start(s2, buf, buf_size) < 0)
+ return -1;
+ }
+ if(!s2->current_picture_ptr){
+ av_log(avctx, AV_LOG_ERROR, "current_picture not initialized\n");
+ return -1;
+ }
+
+ if(avctx->thread_count > 1){
+ int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count;
+ if(threshold <= mb_y){
+ MpegEncContext *thread_context= s2->thread_context[s->slice_count];
+
+ thread_context->start_mb_y= mb_y;
+ thread_context->end_mb_y = s2->mb_height;
+ if(s->slice_count){
+ s2->thread_context[s->slice_count-1]->end_mb_y= mb_y;
+ ff_update_duplicate_context(thread_context, s2);
+ }
+ init_get_bits(&thread_context->gb, buf_ptr, input_size*8);
+ s->slice_count++;
+ }
+ buf_ptr += 2; //FIXME add minimum number of bytes per slice
+ }else{
+ ret = mpeg_decode_slice(s, mb_y, &buf_ptr, input_size);
+ emms_c();
+
+ if(ret < 0){
+ if(s2->resync_mb_x>=0 && s2->resync_mb_y>=0)
+ ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, AC_ERROR|DC_ERROR|MV_ERROR);
+ // ==> Start patch MPC
+ else if (!s2->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+ //}else{
+ // <== End patch MPC
+ ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x-1, s2->mb_y, AC_END|DC_END|MV_END);
+ }
+ }
+ }
+ break;
+ }
+ }
+}
+
+static void flush(AVCodecContext *avctx){
+ Mpeg1Context *s = avctx->priv_data;
+
+ s->sync=0;
+
+ ff_mpeg_flush(avctx);
+}
+
+static int mpeg_decode_end(AVCodecContext *avctx)
+{
+ Mpeg1Context *s = avctx->priv_data;
+
+ if (s->mpeg_enc_ctx_allocated)
+ MPV_common_end(&s->mpeg_enc_ctx);
+ return 0;
+}
+
+AVCodec mpeg1video_decoder = {
+ "mpeg1video",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_MPEG1VIDEO,
+ sizeof(Mpeg1Context),
+ /*.init=*/mpeg_decode_init,
+ /*.encode=*/NULL,
+ /*.close=*/mpeg_decode_end,
+ /*.decode=*/mpeg_decode_frame,
+ /*.capabilities=*/CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
+ /*.next=*/NULL,
+ /*.flush=*/flush,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name= */NULL_IF_CONFIG_SMALL("MPEG-1 video"),
+};
+
+AVCodec mpeg2video_decoder = {
+ "mpeg2video",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_MPEG2VIDEO,
+ sizeof(Mpeg1Context),
+ /*.init=*/mpeg_decode_init,
+ /*.encode=*/NULL,
+ /*.close=*/mpeg_decode_end,
+ /*.decode=*/mpeg_decode_frame,
+ /*.capabilities=*/CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
+ /*.next=*/NULL,
+ /*.flush=*/flush,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+/*.long_name= */NULL_IF_CONFIG_SMALL("MPEG-2 video"),
+};
+
+//legacy decoder
+AVCodec mpegvideo_decoder = {
+ "mpegvideo",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_MPEG2VIDEO,
+ sizeof(Mpeg1Context),
+ /*.init=*/mpeg_decode_init,
+ /*.encode=*/NULL,
+ /*.close=*/mpeg_decode_end,
+ /*.decode=*/mpeg_decode_frame,
+ /*.capabilities=*/CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
+ /*.next=*/NULL,
+ /*.flush=*/flush,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name= */NULL_IF_CONFIG_SMALL("MPEG-1 video"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12.h
new file mode 100644
index 000000000..30bb675d3
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12.h
@@ -0,0 +1,61 @@
+/*
+ * MPEG1/2 common code
+ * Copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MPEG12_H
+#define AVCODEC_MPEG12_H
+
+#include "mpegvideo.h"
+
+#define DC_VLC_BITS 9
+#define TEX_VLC_BITS 9
+
+static VLC dc_lum_vlc;
+static VLC dc_chroma_vlc;
+
+extern uint8_t ff_mpeg12_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
+
+void ff_mpeg12_common_init(MpegEncContext *s);
+void ff_mpeg12_init_vlcs(void);
+
+static inline int decode_dc(GetBitContext *gb, int component)
+{
+ int code, diff;
+
+ if (component == 0) {
+ code = get_vlc2(gb, dc_lum_vlc.table, DC_VLC_BITS, 2);
+ } else {
+ code = get_vlc2(gb, dc_chroma_vlc.table, DC_VLC_BITS, 2);
+ }
+ if (code < 0){
+ av_log(NULL, AV_LOG_ERROR, "invalid dc code at\n");
+ return 0xffff;
+ }
+ if (code == 0) {
+ diff = 0;
+ } else {
+ diff = get_xbits(gb, code);
+ }
+ return diff;
+}
+
+extern int ff_mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n);
+
+#endif /* AVCODEC_MPEG12_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12data.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12data.c
new file mode 100644
index 000000000..229ec3ba2
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12data.c
@@ -0,0 +1,366 @@
+/*
+ * MPEG1/2 tables
+ * copyright (c) 2000,2001 Fabrice Bellard
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file mpeg12data.c
+ * MPEG1/2 tables.
+ */
+
+#include "mpeg12data.h"
+
+const uint16_t ff_mpeg1_default_intra_matrix[64] = {
+ 8, 16, 19, 22, 26, 27, 29, 34,
+ 16, 16, 22, 24, 27, 29, 34, 37,
+ 19, 22, 26, 27, 29, 34, 34, 38,
+ 22, 22, 26, 27, 29, 34, 37, 40,
+ 22, 26, 27, 29, 32, 35, 40, 48,
+ 26, 27, 29, 32, 35, 40, 48, 58,
+ 26, 27, 29, 34, 38, 46, 56, 69,
+ 27, 29, 35, 38, 46, 56, 69, 83
+};
+
+const uint16_t ff_mpeg1_default_non_intra_matrix[64] = {
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+};
+
+const uint16_t ff_mpeg12_vlc_dc_lum_code[12] = {
+ 0x4, 0x0, 0x1, 0x5, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe, 0x1ff,
+};
+const unsigned char ff_mpeg12_vlc_dc_lum_bits[12] = {
+ 3, 2, 2, 3, 3, 4, 5, 6, 7, 8, 9, 9,
+};
+
+const uint16_t ff_mpeg12_vlc_dc_chroma_code[12] = {
+ 0x0, 0x1, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe, 0x3fe, 0x3ff,
+};
+const unsigned char ff_mpeg12_vlc_dc_chroma_bits[12] = {
+ 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10,
+};
+
+static const uint16_t mpeg1_vlc[113][2] = {
+ { 0x3, 2 }, { 0x4, 4 }, { 0x5, 5 }, { 0x6, 7 },
+ { 0x26, 8 }, { 0x21, 8 }, { 0xa, 10 }, { 0x1d, 12 },
+ { 0x18, 12 }, { 0x13, 12 }, { 0x10, 12 }, { 0x1a, 13 },
+ { 0x19, 13 }, { 0x18, 13 }, { 0x17, 13 }, { 0x1f, 14 },
+ { 0x1e, 14 }, { 0x1d, 14 }, { 0x1c, 14 }, { 0x1b, 14 },
+ { 0x1a, 14 }, { 0x19, 14 }, { 0x18, 14 }, { 0x17, 14 },
+ { 0x16, 14 }, { 0x15, 14 }, { 0x14, 14 }, { 0x13, 14 },
+ { 0x12, 14 }, { 0x11, 14 }, { 0x10, 14 }, { 0x18, 15 },
+ { 0x17, 15 }, { 0x16, 15 }, { 0x15, 15 }, { 0x14, 15 },
+ { 0x13, 15 }, { 0x12, 15 }, { 0x11, 15 }, { 0x10, 15 },
+ { 0x3, 3 }, { 0x6, 6 }, { 0x25, 8 }, { 0xc, 10 },
+ { 0x1b, 12 }, { 0x16, 13 }, { 0x15, 13 }, { 0x1f, 15 },
+ { 0x1e, 15 }, { 0x1d, 15 }, { 0x1c, 15 }, { 0x1b, 15 },
+ { 0x1a, 15 }, { 0x19, 15 }, { 0x13, 16 }, { 0x12, 16 },
+ { 0x11, 16 }, { 0x10, 16 }, { 0x5, 4 }, { 0x4, 7 },
+ { 0xb, 10 }, { 0x14, 12 }, { 0x14, 13 }, { 0x7, 5 },
+ { 0x24, 8 }, { 0x1c, 12 }, { 0x13, 13 }, { 0x6, 5 },
+ { 0xf, 10 }, { 0x12, 12 }, { 0x7, 6 }, { 0x9, 10 },
+ { 0x12, 13 }, { 0x5, 6 }, { 0x1e, 12 }, { 0x14, 16 },
+ { 0x4, 6 }, { 0x15, 12 }, { 0x7, 7 }, { 0x11, 12 },
+ { 0x5, 7 }, { 0x11, 13 }, { 0x27, 8 }, { 0x10, 13 },
+ { 0x23, 8 }, { 0x1a, 16 }, { 0x22, 8 }, { 0x19, 16 },
+ { 0x20, 8 }, { 0x18, 16 }, { 0xe, 10 }, { 0x17, 16 },
+ { 0xd, 10 }, { 0x16, 16 }, { 0x8, 10 }, { 0x15, 16 },
+ { 0x1f, 12 }, { 0x1a, 12 }, { 0x19, 12 }, { 0x17, 12 },
+ { 0x16, 12 }, { 0x1f, 13 }, { 0x1e, 13 }, { 0x1d, 13 },
+ { 0x1c, 13 }, { 0x1b, 13 }, { 0x1f, 16 }, { 0x1e, 16 },
+ { 0x1d, 16 }, { 0x1c, 16 }, { 0x1b, 16 },
+ { 0x1, 6 }, /* escape */
+ { 0x2, 2 }, /* EOB */
+};
+
+static const uint16_t mpeg2_vlc[113][2] = {
+ {0x02, 2}, {0x06, 3}, {0x07, 4}, {0x1c, 5},
+ {0x1d, 5}, {0x05, 6}, {0x04, 6}, {0x7b, 7},
+ {0x7c, 7}, {0x23, 8}, {0x22, 8}, {0xfa, 8},
+ {0xfb, 8}, {0xfe, 8}, {0xff, 8}, {0x1f,14},
+ {0x1e,14}, {0x1d,14}, {0x1c,14}, {0x1b,14},
+ {0x1a,14}, {0x19,14}, {0x18,14}, {0x17,14},
+ {0x16,14}, {0x15,14}, {0x14,14}, {0x13,14},
+ {0x12,14}, {0x11,14}, {0x10,14}, {0x18,15},
+ {0x17,15}, {0x16,15}, {0x15,15}, {0x14,15},
+ {0x13,15}, {0x12,15}, {0x11,15}, {0x10,15},
+ {0x02, 3}, {0x06, 5}, {0x79, 7}, {0x27, 8},
+ {0x20, 8}, {0x16,13}, {0x15,13}, {0x1f,15},
+ {0x1e,15}, {0x1d,15}, {0x1c,15}, {0x1b,15},
+ {0x1a,15}, {0x19,15}, {0x13,16}, {0x12,16},
+ {0x11,16}, {0x10,16}, {0x05, 5}, {0x07, 7},
+ {0xfc, 8}, {0x0c,10}, {0x14,13}, {0x07, 5},
+ {0x26, 8}, {0x1c,12}, {0x13,13}, {0x06, 6},
+ {0xfd, 8}, {0x12,12}, {0x07, 6}, {0x04, 9},
+ {0x12,13}, {0x06, 7}, {0x1e,12}, {0x14,16},
+ {0x04, 7}, {0x15,12}, {0x05, 7}, {0x11,12},
+ {0x78, 7}, {0x11,13}, {0x7a, 7}, {0x10,13},
+ {0x21, 8}, {0x1a,16}, {0x25, 8}, {0x19,16},
+ {0x24, 8}, {0x18,16}, {0x05, 9}, {0x17,16},
+ {0x07, 9}, {0x16,16}, {0x0d,10}, {0x15,16},
+ {0x1f,12}, {0x1a,12}, {0x19,12}, {0x17,12},
+ {0x16,12}, {0x1f,13}, {0x1e,13}, {0x1d,13},
+ {0x1c,13}, {0x1b,13}, {0x1f,16}, {0x1e,16},
+ {0x1d,16}, {0x1c,16}, {0x1b,16},
+ {0x01,6}, /* escape */
+ {0x06,4}, /* EOB */
+};
+
+static const int8_t mpeg1_level[111] = {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40,
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 1, 2, 3, 4, 5, 1,
+ 2, 3, 4, 1, 2, 3, 1, 2,
+ 3, 1, 2, 3, 1, 2, 1, 2,
+ 1, 2, 1, 2, 1, 2, 1, 2,
+ 1, 2, 1, 2, 1, 2, 1, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1,
+};
+
+static const int8_t mpeg1_run[111] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 4, 4, 4, 5, 5,
+ 5, 6, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12,
+ 13, 13, 14, 14, 15, 15, 16, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31,
+};
+
+RLTable ff_rl_mpeg1 = {
+ 111,
+ 111,
+ mpeg1_vlc,
+ mpeg1_run,
+ mpeg1_level,
+};
+
+RLTable ff_rl_mpeg2 = {
+ 111,
+ 111,
+ mpeg2_vlc,
+ mpeg1_run,
+ mpeg1_level,
+};
+
+const uint8_t ff_mpeg12_mbAddrIncrTable[36][2] = {
+ {0x1, 1},
+ {0x3, 3},
+ {0x2, 3},
+ {0x3, 4},
+ {0x2, 4},
+ {0x3, 5},
+ {0x2, 5},
+ {0x7, 7},
+ {0x6, 7},
+ {0xb, 8},
+ {0xa, 8},
+ {0x9, 8},
+ {0x8, 8},
+ {0x7, 8},
+ {0x6, 8},
+ {0x17, 10},
+ {0x16, 10},
+ {0x15, 10},
+ {0x14, 10},
+ {0x13, 10},
+ {0x12, 10},
+ {0x23, 11},
+ {0x22, 11},
+ {0x21, 11},
+ {0x20, 11},
+ {0x1f, 11},
+ {0x1e, 11},
+ {0x1d, 11},
+ {0x1c, 11},
+ {0x1b, 11},
+ {0x1a, 11},
+ {0x19, 11},
+ {0x18, 11},
+ {0x8, 11}, /* escape */
+ {0xf, 11}, /* stuffing */
+ {0x0, 8}, /* end (and 15 more 0 bits should follow) */
+};
+
+const uint8_t ff_mpeg12_mbPatTable[64][2] = {
+ {0x1, 9},
+ {0xb, 5},
+ {0x9, 5},
+ {0xd, 6},
+ {0xd, 4},
+ {0x17, 7},
+ {0x13, 7},
+ {0x1f, 8},
+ {0xc, 4},
+ {0x16, 7},
+ {0x12, 7},
+ {0x1e, 8},
+ {0x13, 5},
+ {0x1b, 8},
+ {0x17, 8},
+ {0x13, 8},
+ {0xb, 4},
+ {0x15, 7},
+ {0x11, 7},
+ {0x1d, 8},
+ {0x11, 5},
+ {0x19, 8},
+ {0x15, 8},
+ {0x11, 8},
+ {0xf, 6},
+ {0xf, 8},
+ {0xd, 8},
+ {0x3, 9},
+ {0xf, 5},
+ {0xb, 8},
+ {0x7, 8},
+ {0x7, 9},
+ {0xa, 4},
+ {0x14, 7},
+ {0x10, 7},
+ {0x1c, 8},
+ {0xe, 6},
+ {0xe, 8},
+ {0xc, 8},
+ {0x2, 9},
+ {0x10, 5},
+ {0x18, 8},
+ {0x14, 8},
+ {0x10, 8},
+ {0xe, 5},
+ {0xa, 8},
+ {0x6, 8},
+ {0x6, 9},
+ {0x12, 5},
+ {0x1a, 8},
+ {0x16, 8},
+ {0x12, 8},
+ {0xd, 5},
+ {0x9, 8},
+ {0x5, 8},
+ {0x5, 9},
+ {0xc, 5},
+ {0x8, 8},
+ {0x4, 8},
+ {0x4, 9},
+ {0x7, 3},
+ {0xa, 5},
+ {0x8, 5},
+ {0xc, 6}
+};
+
+const uint8_t ff_mpeg12_mbMotionVectorTable[17][2] = {
+{ 0x1, 1 },
+{ 0x1, 2 },
+{ 0x1, 3 },
+{ 0x1, 4 },
+{ 0x3, 6 },
+{ 0x5, 7 },
+{ 0x4, 7 },
+{ 0x3, 7 },
+{ 0xb, 9 },
+{ 0xa, 9 },
+{ 0x9, 9 },
+{ 0x11, 10 },
+{ 0x10, 10 },
+{ 0xf, 10 },
+{ 0xe, 10 },
+{ 0xd, 10 },
+{ 0xc, 10 },
+};
+
+const AVRational ff_frame_rate_tab[] = {
+ { 0, 0},
+ {24000, 1001},
+ { 24, 1},
+ { 25, 1},
+ {30000, 1001},
+ { 30, 1},
+ { 50, 1},
+ {60000, 1001},
+ { 60, 1},
+ // Xing's 15fps: (9)
+ { 15, 1},
+ // libmpeg3's "Unofficial economy rates": (10-13)
+ { 5, 1},
+ { 10, 1},
+ { 12, 1},
+ { 15, 1},
+ { 0, 0},
+};
+
+const float ff_mpeg1_aspect[16]={
+ 0.0000,
+ 1.0000,
+ 0.6735,
+ 0.7031,
+
+ 0.7615,
+ 0.8055,
+ 0.8437,
+ 0.8935,
+
+ 0.9157,
+ 0.9815,
+ 1.0255,
+ 1.0695,
+
+ 1.0950,
+ 1.1575,
+ 1.2015,
+};
+
+const AVRational ff_mpeg2_aspect[16]={
+ {0,1},
+ {1,1},
+ {4,3},
+ {16,9},
+ {221,100},
+ {0,1},
+ {0,1},
+ {0,1},
+ {0,1},
+ {0,1},
+ {0,1},
+ {0,1},
+ {0,1},
+ {0,1},
+ {0,1},
+ {0,1},
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12data.h
new file mode 100644
index 000000000..a6dbd1c49
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12data.h
@@ -0,0 +1,56 @@
+/*
+ * MPEG1/2 tables
+ * copyright (c) 2000,2001 Fabrice Bellard
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file mpeg12data.h
+ * MPEG1/2 tables.
+ */
+
+#ifndef AVCODEC_MPEG12DATA_H
+#define AVCODEC_MPEG12DATA_H
+
+#include <stdint.h>
+#include "libavutil/rational.h"
+#include "rl.h"
+
+extern const uint16_t ff_mpeg1_default_intra_matrix[64];
+extern const uint16_t ff_mpeg1_default_non_intra_matrix[64];
+
+extern const uint16_t ff_mpeg12_vlc_dc_lum_code[12];
+extern const unsigned char ff_mpeg12_vlc_dc_lum_bits[12];
+extern const uint16_t ff_mpeg12_vlc_dc_chroma_code[12];
+extern const unsigned char ff_mpeg12_vlc_dc_chroma_bits[12];
+
+extern RLTable ff_rl_mpeg1;
+extern RLTable ff_rl_mpeg2;
+
+extern const uint8_t ff_mpeg12_mbAddrIncrTable[36][2];
+extern const uint8_t ff_mpeg12_mbPatTable[64][2];
+
+extern const uint8_t ff_mpeg12_mbMotionVectorTable[17][2];
+
+extern const AVRational ff_frame_rate_tab[];
+
+extern const float ff_mpeg1_aspect[16];
+extern const AVRational ff_mpeg2_aspect[16];
+
+#endif /* AVCODEC_MPEG12DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12decdata.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12decdata.h
new file mode 100644
index 000000000..b3da369b1
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg12decdata.h
@@ -0,0 +1,124 @@
+/*
+ * MPEG1/2 decoder tables
+ * copyright (c) 2000,2001 Fabrice Bellard
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file mpeg12decdata.h
+ * MPEG1/2 decoder tables.
+ */
+
+#ifndef AVCODEC_MPEG12DECDATA_H
+#define AVCODEC_MPEG12DECDATA_H
+
+#include <stdint.h>
+#include "mpegvideo.h"
+
+
+#define MB_TYPE_ZERO_MV 0x20000000
+#define IS_ZERO_MV(a) ((a)&MB_TYPE_ZERO_MV)
+
+static const uint8_t table_mb_ptype[7][2] = {
+ { 3, 5 }, // 0x01 MB_INTRA
+ { 1, 2 }, // 0x02 MB_PAT
+ { 1, 3 }, // 0x08 MB_FOR
+ { 1, 1 }, // 0x0A MB_FOR|MB_PAT
+ { 1, 6 }, // 0x11 MB_QUANT|MB_INTRA
+ { 1, 5 }, // 0x12 MB_QUANT|MB_PAT
+ { 2, 5 }, // 0x1A MB_QUANT|MB_FOR|MB_PAT
+};
+
+static const uint32_t ptype2mb_type[7] = {
+ MB_TYPE_INTRA,
+ MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16,
+ MB_TYPE_L0,
+ MB_TYPE_L0 | MB_TYPE_CBP,
+ MB_TYPE_QUANT | MB_TYPE_INTRA,
+ MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16,
+ MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP,
+};
+
+static const uint8_t table_mb_btype[11][2] = {
+ { 3, 5 }, // 0x01 MB_INTRA
+ { 2, 3 }, // 0x04 MB_BACK
+ { 3, 3 }, // 0x06 MB_BACK|MB_PAT
+ { 2, 4 }, // 0x08 MB_FOR
+ { 3, 4 }, // 0x0A MB_FOR|MB_PAT
+ { 2, 2 }, // 0x0C MB_FOR|MB_BACK
+ { 3, 2 }, // 0x0E MB_FOR|MB_BACK|MB_PAT
+ { 1, 6 }, // 0x11 MB_QUANT|MB_INTRA
+ { 2, 6 }, // 0x16 MB_QUANT|MB_BACK|MB_PAT
+ { 3, 6 }, // 0x1A MB_QUANT|MB_FOR|MB_PAT
+ { 2, 5 }, // 0x1E MB_QUANT|MB_FOR|MB_BACK|MB_PAT
+};
+
+static const uint32_t btype2mb_type[11] = {
+ MB_TYPE_INTRA,
+ MB_TYPE_L1,
+ MB_TYPE_L1 | MB_TYPE_CBP,
+ MB_TYPE_L0,
+ MB_TYPE_L0 | MB_TYPE_CBP,
+ MB_TYPE_L0L1,
+ MB_TYPE_L0L1 | MB_TYPE_CBP,
+ MB_TYPE_QUANT | MB_TYPE_INTRA,
+ MB_TYPE_QUANT | MB_TYPE_L1 | MB_TYPE_CBP,
+ MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP,
+ MB_TYPE_QUANT | MB_TYPE_L0L1 | MB_TYPE_CBP,
+};
+
+static const uint8_t non_linear_qscale[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8,10,12,14,16,18,20,22,
+ 24,28,32,36,40,44,48,52,
+ 56,64,72,80,88,96,104,112,
+};
+
+static const uint8_t mpeg2_dc_scale_table1[128]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+};
+
+static const uint8_t mpeg2_dc_scale_table2[128]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const uint8_t mpeg2_dc_scale_table3[128]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+};
+
+static const uint8_t * const mpeg2_dc_scale_table[4]={
+ ff_mpeg1_dc_scale_table,
+ mpeg2_dc_scale_table1,
+ mpeg2_dc_scale_table2,
+ mpeg2_dc_scale_table3,
+};
+
+#endif /* AVCODEC_MPEG12DECDATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4data.h
new file mode 100644
index 000000000..e127d76c1
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4data.h
@@ -0,0 +1,376 @@
+/*
+ * copyright (c) 2000,2001 Fabrice Bellard
+ * H263+ support
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/mpeg4data.h
+ * mpeg4 tables.
+ */
+
+#ifndef AVCODEC_MPEG4DATA_H
+#define AVCODEC_MPEG4DATA_H
+
+#include <stdint.h>
+#include "mpegvideo.h"
+
+/* dc encoding for mpeg4 */
+const uint8_t ff_mpeg4_DCtab_lum[13][2] =
+{
+ {3,3}, {3,2}, {2,2}, {2,3}, {1,3}, {1,4}, {1,5}, {1,6}, {1,7},
+ {1,8}, {1,9}, {1,10}, {1,11},
+};
+
+const uint8_t ff_mpeg4_DCtab_chrom[13][2] =
+{
+ {3,2}, {2,2}, {1,2}, {1,3}, {1,4}, {1,5}, {1,6}, {1,7}, {1,8},
+ {1,9}, {1,10}, {1,11}, {1,12},
+};
+
+const uint16_t ff_mpeg4_intra_vlc[103][2] = {
+{ 0x2, 2 },
+{ 0x6, 3 },{ 0xf, 4 },{ 0xd, 5 },{ 0xc, 5 },
+{ 0x15, 6 },{ 0x13, 6 },{ 0x12, 6 },{ 0x17, 7 },
+{ 0x1f, 8 },{ 0x1e, 8 },{ 0x1d, 8 },{ 0x25, 9 },
+{ 0x24, 9 },{ 0x23, 9 },{ 0x21, 9 },{ 0x21, 10 },
+{ 0x20, 10 },{ 0xf, 10 },{ 0xe, 10 },{ 0x7, 11 },
+{ 0x6, 11 },{ 0x20, 11 },{ 0x21, 11 },{ 0x50, 12 },
+{ 0x51, 12 },{ 0x52, 12 },{ 0xe, 4 },{ 0x14, 6 },
+{ 0x16, 7 },{ 0x1c, 8 },{ 0x20, 9 },{ 0x1f, 9 },
+{ 0xd, 10 },{ 0x22, 11 },{ 0x53, 12 },{ 0x55, 12 },
+{ 0xb, 5 },{ 0x15, 7 },{ 0x1e, 9 },{ 0xc, 10 },
+{ 0x56, 12 },{ 0x11, 6 },{ 0x1b, 8 },{ 0x1d, 9 },
+{ 0xb, 10 },{ 0x10, 6 },{ 0x22, 9 },{ 0xa, 10 },
+{ 0xd, 6 },{ 0x1c, 9 },{ 0x8, 10 },{ 0x12, 7 },
+{ 0x1b, 9 },{ 0x54, 12 },{ 0x14, 7 },{ 0x1a, 9 },
+{ 0x57, 12 },{ 0x19, 8 },{ 0x9, 10 },{ 0x18, 8 },
+{ 0x23, 11 },{ 0x17, 8 },{ 0x19, 9 },{ 0x18, 9 },
+{ 0x7, 10 },{ 0x58, 12 },{ 0x7, 4 },{ 0xc, 6 },
+{ 0x16, 8 },{ 0x17, 9 },{ 0x6, 10 },{ 0x5, 11 },
+{ 0x4, 11 },{ 0x59, 12 },{ 0xf, 6 },{ 0x16, 9 },
+{ 0x5, 10 },{ 0xe, 6 },{ 0x4, 10 },{ 0x11, 7 },
+{ 0x24, 11 },{ 0x10, 7 },{ 0x25, 11 },{ 0x13, 7 },
+{ 0x5a, 12 },{ 0x15, 8 },{ 0x5b, 12 },{ 0x14, 8 },
+{ 0x13, 8 },{ 0x1a, 8 },{ 0x15, 9 },{ 0x14, 9 },
+{ 0x13, 9 },{ 0x12, 9 },{ 0x11, 9 },{ 0x26, 11 },
+{ 0x27, 11 },{ 0x5c, 12 },{ 0x5d, 12 },{ 0x5e, 12 },
+{ 0x5f, 12 },{ 0x3, 7 },
+};
+
+const int8_t ff_mpeg4_intra_level[102] = {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10, 1, 2, 3,
+ 4, 5, 1, 2, 3, 4, 1, 2,
+ 3, 1, 2, 3, 1, 2, 3, 1,
+ 2, 3, 1, 2, 1, 2, 1, 1,
+ 1, 1, 1, 1, 2, 3, 4, 5,
+ 6, 7, 8, 1, 2, 3, 1, 2,
+ 1, 2, 1, 2, 1, 2, 1, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1,
+};
+
+const int8_t ff_mpeg4_intra_run[102] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 4, 4,
+ 4, 5, 5, 5, 6, 6, 6, 7,
+ 7, 7, 8, 8, 9, 9, 10, 11,
+ 12, 13, 14, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 2, 2,
+ 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20,
+};
+
+RLTable ff_mpeg4_rl_intra = {
+ 102,
+ 67,
+ ff_mpeg4_intra_vlc,
+ ff_mpeg4_intra_run,
+ ff_mpeg4_intra_level,
+};
+
+/* Note this is identical to the intra rvlc except that it is reordered. */
+const uint16_t inter_rvlc[170][2]={
+{0x0006, 3},{0x0001, 4},{0x0004, 5},{0x001C, 7},
+{0x003C, 8},{0x003D, 8},{0x007C, 9},{0x00FC, 10},
+{0x00FD, 10},{0x01FC, 11},{0x01FD, 11},{0x03FC, 12},
+{0x07FC, 13},{0x07FD, 13},{0x0BFC, 13},{0x0BFD, 13},
+{0x0FFC, 14},{0x0FFD, 14},{0x1FFC, 15},{0x0007, 3},
+{0x000C, 6},{0x005C, 8},{0x007D, 9},{0x017C, 10},
+{0x02FC, 11},{0x03FD, 12},{0x0DFC, 13},{0x17FC, 14},
+{0x17FD, 14},{0x000A, 4},{0x001D, 7},{0x00BC, 9},
+{0x02FD, 11},{0x05FC, 12},{0x1BFC, 14},{0x1BFD, 14},
+{0x0005, 5},{0x005D, 8},{0x017D, 10},{0x05FD, 12},
+{0x0DFD, 13},{0x1DFC, 14},{0x1FFD, 15},{0x0008, 5},
+{0x006C, 8},{0x037C, 11},{0x0EFC, 13},{0x2FFC, 15},
+{0x0009, 5},{0x00BD, 9},{0x037D, 11},{0x0EFD, 13},
+{0x000D, 6},{0x01BC, 10},{0x06FC, 12},{0x1DFD, 14},
+{0x0014, 6},{0x01BD, 10},{0x06FD, 12},{0x2FFD, 15},
+{0x0015, 6},{0x01DC, 10},{0x0F7C, 13},{0x002C, 7},
+{0x01DD, 10},{0x1EFC, 14},{0x002D, 7},{0x03BC, 11},
+{0x0034, 7},{0x077C, 12},{0x006D, 8},{0x0F7D, 13},
+{0x0074, 8},{0x1EFD, 14},{0x0075, 8},{0x1F7C, 14},
+{0x00DC, 9},{0x1F7D, 14},{0x00DD, 9},{0x1FBC, 14},
+{0x00EC, 9},{0x37FC, 15},{0x01EC, 10},{0x01ED, 10},
+{0x01F4, 10},{0x03BD, 11},{0x03DC, 11},{0x03DD, 11},
+{0x03EC, 11},{0x03ED, 11},{0x03F4, 11},{0x077D, 12},
+{0x07BC, 12},{0x07BD, 12},{0x0FBC, 13},{0x0FBD, 13},
+{0x0FDC, 13},{0x0FDD, 13},{0x1FBD, 14},{0x1FDC, 14},
+{0x1FDD, 14},{0x37FD, 15},{0x3BFC, 15},
+{0x000B, 4},{0x0078, 8},{0x03F5, 11},{0x0FEC, 13},
+{0x1FEC, 14},{0x0012, 5},{0x00ED, 9},{0x07DC, 12},
+{0x1FED, 14},{0x3BFD, 15},{0x0013, 5},{0x03F8, 11},
+{0x3DFC, 15},{0x0018, 6},{0x07DD, 12},{0x0019, 6},
+{0x07EC, 12},{0x0022, 6},{0x0FED, 13},{0x0023, 6},
+{0x0FF4, 13},{0x0035, 7},{0x0FF5, 13},{0x0038, 7},
+{0x0FF8, 13},{0x0039, 7},{0x0FF9, 13},{0x0042, 7},
+{0x1FF4, 14},{0x0043, 7},{0x1FF5, 14},{0x0079, 8},
+{0x1FF8, 14},{0x0082, 8},{0x3DFD, 15},{0x0083, 8},
+{0x00F4, 9},{0x00F5, 9},{0x00F8, 9},{0x00F9, 9},
+{0x0102, 9},{0x0103, 9},{0x01F5, 10},{0x01F8, 10},
+{0x01F9, 10},{0x0202, 10},{0x0203, 10},{0x03F9, 11},
+{0x0402, 11},{0x0403, 11},{0x07ED, 12},{0x07F4, 12},
+{0x07F5, 12},{0x07F8, 12},{0x07F9, 12},{0x0802, 12},
+{0x0803, 12},{0x1002, 13},{0x1003, 13},{0x1FF9, 14},
+{0x2002, 14},{0x2003, 14},{0x3EFC, 15},{0x3EFD, 15},
+{0x3F7C, 15},{0x3F7D, 15},{0x0000, 4}
+};
+
+static const int8_t inter_rvlc_run[169]={
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 7, 8, 8, 8, 9,
+ 9, 9, 10, 10, 11, 11, 12, 12,
+13, 13, 14, 14, 15, 15, 16, 16,
+17, 17, 18, 19, 20, 21, 22, 23,
+24, 25, 26, 27, 28, 29, 30, 31,
+32, 33, 34, 35, 36, 37, 38,
+ 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 4,
+ 4, 5, 5, 6, 6, 7, 7, 8,
+ 8, 9, 9, 10, 10, 11, 11, 12,
+12, 13, 13, 14, 15, 16, 17, 18,
+19, 20, 21, 22, 23, 24, 25, 26,
+27, 28, 29, 30, 31, 32, 33, 34,
+35, 36, 37, 38, 39, 40, 41, 42,
+43, 44,
+};
+
+static const int8_t inter_rvlc_level[169]={
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+17, 18, 19, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10, 1, 2, 3,
+ 4, 5, 6, 7, 1, 2, 3, 4,
+ 5, 6, 7, 1, 2, 3, 4, 5,
+ 1, 2, 3, 4, 1, 2, 3, 4,
+ 1, 2, 3, 4, 1, 2, 3, 1,
+ 2, 3, 1, 2, 1, 2, 1, 2,
+ 1, 2, 1, 2, 1, 2, 1, 2,
+ 1, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 3, 4, 5, 1, 2, 3,
+ 4, 5, 1, 2, 3, 1, 2, 1,
+ 2, 1, 2, 1, 2, 1, 2, 1,
+ 2, 1, 2, 1, 2, 1, 2, 1,
+ 2, 1, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1,
+};
+
+RLTable rvlc_rl_inter = {
+ 169,
+ 103,
+ inter_rvlc,
+ inter_rvlc_run,
+ inter_rvlc_level,
+};
+
+const uint16_t intra_rvlc[170][2]={
+{0x0006, 3},{0x0007, 3},{0x000A, 4},{0x0009, 5},
+{0x0014, 6},{0x0015, 6},{0x0034, 7},{0x0074, 8},
+{0x0075, 8},{0x00DD, 9},{0x00EC, 9},{0x01EC, 10},
+{0x01ED, 10},{0x01F4, 10},{0x03EC, 11},{0x03ED, 11},
+{0x03F4, 11},{0x077D, 12},{0x07BC, 12},{0x0FBD, 13},
+{0x0FDC, 13},{0x07BD, 12},{0x0FDD, 13},{0x1FBD, 14},
+{0x1FDC, 14},{0x1FDD, 14},{0x1FFC, 15},{0x0001, 4},
+{0x0008, 5},{0x002D, 7},{0x006C, 8},{0x006D, 8},
+{0x00DC, 9},{0x01DD, 10},{0x03DC, 11},{0x03DD, 11},
+{0x077C, 12},{0x0FBC, 13},{0x1F7D, 14},{0x1FBC, 14},
+{0x0004, 5},{0x002C, 7},{0x00BC, 9},{0x01DC, 10},
+{0x03BC, 11},{0x03BD, 11},{0x0EFD, 13},{0x0F7C, 13},
+{0x0F7D, 13},{0x1EFD, 14},{0x1F7C, 14},{0x0005, 5},
+{0x005C, 8},{0x00BD, 9},{0x037D, 11},{0x06FC, 12},
+{0x0EFC, 13},{0x1DFD, 14},{0x1EFC, 14},{0x1FFD, 15},
+{0x000C, 6},{0x005D, 8},{0x01BD, 10},{0x03FD, 12},
+{0x06FD, 12},{0x1BFD, 14},{0x000D, 6},{0x007D, 9},
+{0x02FC, 11},{0x05FC, 12},{0x1BFC, 14},{0x1DFC, 14},
+{0x001C, 7},{0x017C, 10},{0x02FD, 11},{0x05FD, 12},
+{0x2FFC, 15},{0x001D, 7},{0x017D, 10},{0x037C, 11},
+{0x0DFD, 13},{0x2FFD, 15},{0x003C, 8},{0x01BC, 10},
+{0x0BFD, 13},{0x17FD, 14},{0x003D, 8},{0x01FD, 11},
+{0x0DFC, 13},{0x37FC, 15},{0x007C, 9},{0x03FC, 12},
+{0x00FC, 10},{0x0BFC, 13},{0x00FD, 10},{0x37FD, 15},
+{0x01FC, 11},{0x07FC, 13},{0x07FD, 13},{0x0FFC, 14},
+{0x0FFD, 14},{0x17FC, 14},{0x3BFC, 15},
+{0x000B, 4},{0x0078, 8},{0x03F5, 11},{0x0FEC, 13},
+{0x1FEC, 14},{0x0012, 5},{0x00ED, 9},{0x07DC, 12},
+{0x1FED, 14},{0x3BFD, 15},{0x0013, 5},{0x03F8, 11},
+{0x3DFC, 15},{0x0018, 6},{0x07DD, 12},{0x0019, 6},
+{0x07EC, 12},{0x0022, 6},{0x0FED, 13},{0x0023, 6},
+{0x0FF4, 13},{0x0035, 7},{0x0FF5, 13},{0x0038, 7},
+{0x0FF8, 13},{0x0039, 7},{0x0FF9, 13},{0x0042, 7},
+{0x1FF4, 14},{0x0043, 7},{0x1FF5, 14},{0x0079, 8},
+{0x1FF8, 14},{0x0082, 8},{0x3DFD, 15},{0x0083, 8},
+{0x00F4, 9},{0x00F5, 9},{0x00F8, 9},{0x00F9, 9},
+{0x0102, 9},{0x0103, 9},{0x01F5, 10},{0x01F8, 10},
+{0x01F9, 10},{0x0202, 10},{0x0203, 10},{0x03F9, 11},
+{0x0402, 11},{0x0403, 11},{0x07ED, 12},{0x07F4, 12},
+{0x07F5, 12},{0x07F8, 12},{0x07F9, 12},{0x0802, 12},
+{0x0803, 12},{0x1002, 13},{0x1003, 13},{0x1FF9, 14},
+{0x2002, 14},{0x2003, 14},{0x3EFC, 15},{0x3EFD, 15},
+{0x3F7C, 15},{0x3F7D, 15},{0x0000, 4}
+};
+
+static const int8_t intra_rvlc_run[169]={
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 4, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 7, 7, 7,
+ 7, 7, 8, 8, 8, 8, 9, 9,
+ 9, 9, 10, 10, 11, 11, 12, 12,
+13, 14, 15, 16, 17, 18, 19,
+ 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 4,
+ 4, 5, 5, 6, 6, 7, 7, 8,
+ 8, 9, 9, 10, 10, 11, 11, 12,
+12, 13, 13, 14, 15, 16, 17, 18,
+19, 20, 21, 22, 23, 24, 25, 26,
+27, 28, 29, 30, 31, 32, 33, 34,
+35, 36, 37, 38, 39, 40, 41, 42,
+43, 44,
+};
+
+static const int8_t intra_rvlc_level[169]={
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+17, 18, 19, 20, 21, 22, 23, 24,
+25, 26, 27, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10, 11, 12, 13,
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 1, 2, 3, 4,
+ 5, 6, 1, 2, 3, 4, 5, 6,
+ 1, 2, 3, 4, 5, 1, 2, 3,
+ 4, 5, 1, 2, 3, 4, 1, 2,
+ 3, 4, 1, 2, 1, 2, 1, 2,
+ 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 3, 4, 5, 1, 2, 3,
+ 4, 5, 1, 2, 3, 1, 2, 1,
+ 2, 1, 2, 1, 2, 1, 2, 1,
+ 2, 1, 2, 1, 2, 1, 2, 1,
+ 2, 1, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1,
+};
+
+RLTable rvlc_rl_intra = {
+ 169,
+ 103,
+ intra_rvlc,
+ intra_rvlc_run,
+ intra_rvlc_level,
+};
+
+const uint16_t sprite_trajectory_tab[15][2] = {
+ {0x00, 2}, {0x02, 3}, {0x03, 3}, {0x04, 3}, {0x05, 3}, {0x06, 3},
+ {0x0E, 4}, {0x1E, 5}, {0x3E, 6}, {0x7E, 7}, {0xFE, 8},
+ {0x1FE, 9},{0x3FE, 10},{0x7FE, 11},{0xFFE, 12},
+};
+
+const uint8_t mb_type_b_tab[4][2] = {
+ {1, 1}, {1, 2}, {1, 3}, {1, 4},
+};
+
+/* these matrixes will be permuted for the idct */
+const int16_t ff_mpeg4_default_intra_matrix[64] = {
+ 8, 17, 18, 19, 21, 23, 25, 27,
+ 17, 18, 19, 21, 23, 25, 27, 28,
+ 20, 21, 22, 23, 24, 26, 28, 30,
+ 21, 22, 23, 24, 26, 28, 30, 32,
+ 22, 23, 24, 26, 28, 30, 32, 35,
+ 23, 24, 26, 28, 30, 32, 35, 38,
+ 25, 26, 28, 30, 32, 35, 38, 41,
+ 27, 28, 30, 32, 35, 38, 41, 45,
+};
+
+const int16_t ff_mpeg4_default_non_intra_matrix[64] = {
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 17, 18, 19, 20, 21, 22, 23, 24,
+ 18, 19, 20, 21, 22, 23, 24, 25,
+ 19, 20, 21, 22, 23, 24, 26, 27,
+ 20, 21, 22, 23, 25, 26, 27, 28,
+ 21, 22, 23, 24, 26, 27, 28, 30,
+ 22, 23, 24, 26, 27, 28, 30, 31,
+ 23, 24, 25, 27, 28, 30, 31, 33,
+};
+
+const uint8_t ff_mpeg4_y_dc_scale_table[32]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 0, 8, 8, 8, 8,10,12,14,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,36,38,40,42,44,46
+};
+const uint8_t ff_mpeg4_c_dc_scale_table[32]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 0, 8, 8, 8, 8, 9, 9,10,10,11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18,19,20,21,22,23,24,25
+};
+
+const uint16_t ff_mpeg4_resync_prefix[8]={
+ 0x7F00, 0x7E00, 0x7C00, 0x7800, 0x7000, 0x6000, 0x4000, 0x0000
+};
+
+const uint8_t mpeg4_dc_threshold[8]={
+ 99, 13, 15, 17, 19, 21, 23, 0
+};
+
+#endif /* AVCODEC_MPEG4DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video.c
new file mode 100644
index 000000000..a647960a5
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video.c
@@ -0,0 +1,171 @@
+/*
+ * MPEG4 decoder / encoder common code.
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "mpegvideo.h"
+#include "mpeg4video.h"
+#include "mpeg4data.h"
+
+uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3];
+
+int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s){
+ switch(s->pict_type){
+ case FF_I_TYPE:
+ return 16;
+ case FF_P_TYPE:
+ case FF_S_TYPE:
+ return s->f_code+15;
+ case FF_B_TYPE:
+ return FFMAX3(s->f_code, s->b_code, 2) + 15;
+ default:
+ return -1;
+ }
+}
+
+void ff_mpeg4_clean_buffers(MpegEncContext *s)
+{
+ int c_wrap, c_xy, l_wrap, l_xy;
+
+ l_wrap= s->b8_stride;
+ l_xy= (2*s->mb_y-1)*l_wrap + s->mb_x*2 - 1;
+ c_wrap= s->mb_stride;
+ c_xy= (s->mb_y-1)*c_wrap + s->mb_x - 1;
+
+#if 0
+ /* clean DC */
+ memsetw(s->dc_val[0] + l_xy, 1024, l_wrap*2+1);
+ memsetw(s->dc_val[1] + c_xy, 1024, c_wrap+1);
+ memsetw(s->dc_val[2] + c_xy, 1024, c_wrap+1);
+#endif
+
+ /* clean AC */
+ memset(s->ac_val[0] + l_xy, 0, (l_wrap*2+1)*16*sizeof(int16_t));
+ memset(s->ac_val[1] + c_xy, 0, (c_wrap +1)*16*sizeof(int16_t));
+ memset(s->ac_val[2] + c_xy, 0, (c_wrap +1)*16*sizeof(int16_t));
+
+ /* clean MV */
+ // we can't clear the MVs as they might be needed by a b frame
+// memset(s->motion_val + l_xy, 0, (l_wrap*2+1)*2*sizeof(int16_t));
+// memset(s->motion_val, 0, 2*sizeof(int16_t)*(2 + s->mb_width*2)*(2 + s->mb_height*2));
+ s->last_mv[0][0][0]=
+ s->last_mv[0][0][1]=
+ s->last_mv[1][0][0]=
+ s->last_mv[1][0][1]= 0;
+}
+
+#define tab_size ((signed)FF_ARRAY_ELEMS(s->direct_scale_mv[0]))
+#define tab_bias (tab_size/2)
+
+//used by mpeg4 and rv10 decoder
+void ff_mpeg4_init_direct_mv(MpegEncContext *s){
+ int i;
+ for(i=0; i<tab_size; i++){
+ s->direct_scale_mv[0][i] = (i-tab_bias)*s->pb_time/s->pp_time;
+ s->direct_scale_mv[1][i] = (i-tab_bias)*(s->pb_time-s->pp_time)/s->pp_time;
+ }
+}
+
+static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my, int i){
+ int xy= s->block_index[i];
+ uint16_t time_pp= s->pp_time;
+ uint16_t time_pb= s->pb_time;
+ int p_mx, p_my;
+
+ p_mx= s->next_picture.motion_val[0][xy][0];
+ if((unsigned)(p_mx + tab_bias) < tab_size){
+ s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx;
+ s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
+ : s->direct_scale_mv[1][p_mx + tab_bias];
+ }else{
+ s->mv[0][i][0] = p_mx*time_pb/time_pp + mx;
+ s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
+ : p_mx*(time_pb - time_pp)/time_pp;
+ }
+ p_my= s->next_picture.motion_val[0][xy][1];
+ if((unsigned)(p_my + tab_bias) < tab_size){
+ s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my;
+ s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
+ : s->direct_scale_mv[1][p_my + tab_bias];
+ }else{
+ s->mv[0][i][1] = p_my*time_pb/time_pp + my;
+ s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
+ : p_my*(time_pb - time_pp)/time_pp;
+ }
+}
+
+#undef tab_size
+#undef tab_bias
+
+/**
+ *
+ * @return the mb_type
+ */
+int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
+ const int mb_index= s->mb_x + s->mb_y*s->mb_stride;
+ const int colocated_mb_type= s->next_picture.mb_type[mb_index];
+ uint16_t time_pp;
+ uint16_t time_pb;
+ int i;
+
+ //FIXME avoid divides
+ // try special case with shifts for 1 and 3 B-frames?
+
+ if(IS_8X8(colocated_mb_type)){
+ s->mv_type = MV_TYPE_8X8;
+ for(i=0; i<4; i++){
+ ff_mpeg4_set_one_direct_mv(s, mx, my, i);
+ }
+ return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_L0L1;
+ } else if(IS_INTERLACED(colocated_mb_type)){
+ s->mv_type = MV_TYPE_FIELD;
+ for(i=0; i<2; i++){
+ int field_select= s->next_picture.ref_index[0][s->block_index[2*i]];
+ s->field_select[0][i]= field_select;
+ s->field_select[1][i]= i;
+ if(s->top_field_first){
+ time_pp= s->pp_field_time - field_select + i;
+ time_pb= s->pb_field_time - field_select + i;
+ }else{
+ time_pp= s->pp_field_time + field_select - i;
+ time_pb= s->pb_field_time + field_select - i;
+ }
+ s->mv[0][i][0] = s->p_field_mv_table[i][0][mb_index][0]*time_pb/time_pp + mx;
+ s->mv[0][i][1] = s->p_field_mv_table[i][0][mb_index][1]*time_pb/time_pp + my;
+ s->mv[1][i][0] = mx ? s->mv[0][i][0] - s->p_field_mv_table[i][0][mb_index][0]
+ : s->p_field_mv_table[i][0][mb_index][0]*(time_pb - time_pp)/time_pp;
+ s->mv[1][i][1] = my ? s->mv[0][i][1] - s->p_field_mv_table[i][0][mb_index][1]
+ : s->p_field_mv_table[i][0][mb_index][1]*(time_pb - time_pp)/time_pp;
+ }
+ return MB_TYPE_DIRECT2 | MB_TYPE_16x8 | MB_TYPE_L0L1 | MB_TYPE_INTERLACED;
+ }else{
+ ff_mpeg4_set_one_direct_mv(s, mx, my, 0);
+ s->mv[0][1][0] = s->mv[0][2][0] = s->mv[0][3][0] = s->mv[0][0][0];
+ s->mv[0][1][1] = s->mv[0][2][1] = s->mv[0][3][1] = s->mv[0][0][1];
+ s->mv[1][1][0] = s->mv[1][2][0] = s->mv[1][3][0] = s->mv[1][0][0];
+ s->mv[1][1][1] = s->mv[1][2][1] = s->mv[1][3][1] = s->mv[1][0][1];
+ if((s->avctx->workaround_bugs & FF_BUG_DIRECT_BLOCKSIZE) || !s->quarter_sample)
+ s->mv_type= MV_TYPE_16X16;
+ else
+ s->mv_type= MV_TYPE_8X8;
+ return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_L0L1; //Note see prev line
+ }
+}
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video.h
new file mode 100644
index 000000000..bf8762657
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video.h
@@ -0,0 +1,197 @@
+/*
+ * MPEG4 encoder/decoder internal header.
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MPEG4VIDEO_H
+#define AVCODEC_MPEG4VIDEO_H
+
+// shapes
+#define RECT_SHAPE 0
+#define BIN_SHAPE 1
+#define BIN_ONLY_SHAPE 2
+#define GRAY_SHAPE 3
+
+#define SIMPLE_VO_TYPE 1
+#define CORE_VO_TYPE 3
+#define MAIN_VO_TYPE 4
+#define NBIT_VO_TYPE 5
+#define ARTS_VO_TYPE 10
+#define ACE_VO_TYPE 12
+#define ADV_SIMPLE_VO_TYPE 17
+
+// aspect_ratio_info
+#define EXTENDED_PAR 15
+
+//vol_sprite_usage / sprite_enable
+#define STATIC_SPRITE 1
+#define GMC_SPRITE 2
+
+#define MOTION_MARKER 0x1F001
+#define DC_MARKER 0x6B001
+
+#define VOS_STARTCODE 0x1B0
+#define USER_DATA_STARTCODE 0x1B2
+#define GOP_STARTCODE 0x1B3
+#define VISUAL_OBJ_STARTCODE 0x1B5
+#define VOP_STARTCODE 0x1B6
+
+/* dc encoding for mpeg4 */
+extern const uint8_t ff_mpeg4_DCtab_lum[13][2];
+extern const uint8_t ff_mpeg4_DCtab_chrom[13][2];
+
+extern const uint16_t ff_mpeg4_intra_vlc[103][2];
+extern RLTable ff_mpeg4_rl_intra;
+
+/* Note this is identical to the intra rvlc except that it is reordered. */
+extern const uint16_t inter_rvlc[170][2];
+extern RLTable rvlc_rl_inter;
+
+extern const uint16_t intra_rvlc[170][2];
+extern RLTable rvlc_rl_intra;
+
+extern const uint16_t sprite_trajectory_tab[15][2];
+extern const uint8_t mb_type_b_tab[4][2];
+
+/* these matrixes will be permuted for the idct */
+extern const int16_t ff_mpeg4_default_intra_matrix[64];
+extern const int16_t ff_mpeg4_default_non_intra_matrix[64];
+
+extern const uint8_t ff_mpeg4_y_dc_scale_table[32];
+extern const uint8_t ff_mpeg4_c_dc_scale_table[32];
+extern const uint16_t ff_mpeg4_resync_prefix[8];
+
+extern const uint8_t mpeg4_dc_threshold[8];
+
+void mpeg4_encode_mb(MpegEncContext *s,
+ DCTELEM block[6][64],
+ int motion_x, int motion_y);
+void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
+ int dir);
+void ff_set_mpeg4_time(MpegEncContext * s);
+void mpeg4_encode_picture_header(MpegEncContext *s, int picture_number);
+
+int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb);
+void ff_mpeg4_encode_video_packet_header(MpegEncContext *s);
+void ff_mpeg4_clean_buffers(MpegEncContext *s);
+void ff_mpeg4_stuffing(PutBitContext * pbc);
+void ff_mpeg4_init_partitions(MpegEncContext *s);
+void ff_mpeg4_merge_partitions(MpegEncContext *s);
+void ff_clean_mpeg4_qscales(MpegEncContext *s);
+int ff_mpeg4_decode_partitions(MpegEncContext *s);
+int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s);
+int mpeg4_decode_video_packet_header(MpegEncContext *s);
+void ff_mpeg4_init_direct_mv(MpegEncContext *s);
+
+/**
+ *
+ * @return the mb_type
+ */
+int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my);
+
+extern uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3];
+
+
+#if 0 //3IV1 is quite rare and it slows things down a tiny bit
+#define IS_3IV1 s->codec_tag == AV_RL32("3IV1")
+#else
+#define IS_3IV1 0
+#endif
+
+
+/**
+ * predicts the dc.
+ * encoding quantized level -> quantized diff
+ * decoding quantized diff -> quantized level
+ * @param n block index (0-3 are luma, 4-5 are chroma)
+ * @param dir_ptr pointer to an integer where the prediction direction will be stored
+ */
+static inline int ff_mpeg4_pred_dc(MpegEncContext * s, int n, int level, int *dir_ptr, int encoding)
+{
+ int a, b, c, wrap, pred, scale, ret;
+ int16_t *dc_val;
+
+ /* find prediction */
+ if (n < 4) {
+ scale = s->y_dc_scale;
+ } else {
+ scale = s->c_dc_scale;
+ }
+ if(IS_3IV1)
+ scale= 8;
+
+ wrap= s->block_wrap[n];
+ dc_val = s->dc_val[0] + s->block_index[n];
+
+ /* B C
+ * A X
+ */
+ a = dc_val[ - 1];
+ b = dc_val[ - 1 - wrap];
+ c = dc_val[ - wrap];
+
+ /* outside slice handling (we can't do that by memset as we need the dc for error resilience) */
+ if(s->first_slice_line && n!=3){
+ if(n!=2) b=c= 1024;
+ if(n!=1 && s->mb_x == s->resync_mb_x) b=a= 1024;
+ }
+ if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1){
+ if(n==0 || n==4 || n==5)
+ b=1024;
+ }
+
+ if (abs(a - b) < abs(b - c)) {
+ pred = c;
+ *dir_ptr = 1; /* top */
+ } else {
+ pred = a;
+ *dir_ptr = 0; /* left */
+ }
+ /* we assume pred is positive */
+ pred = FASTDIV((pred + (scale >> 1)), scale);
+
+ if(encoding){
+ ret = level - pred;
+ }else{
+ level += pred;
+ ret= level;
+ if(s->error_recognition>=3){
+ if(level<0){
+ av_log(s->avctx, AV_LOG_ERROR, "dc<0 at %dx%d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ if(level*scale > 2048 + scale){
+ av_log(s->avctx, AV_LOG_ERROR, "dc overflow at %dx%d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ }
+ }
+ level *=scale;
+ if(level&(~2047)){
+ if(level<0)
+ level=0;
+ else if(!(s->workaround_bugs&FF_BUG_DC_CLIP))
+ level=2047;
+ }
+ dc_val[0]= level;
+
+ return ret;
+}
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video_parser.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video_parser.c
new file mode 100644
index 000000000..cde2499ab
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video_parser.c
@@ -0,0 +1,63 @@
+/*
+ * MPEG4 Video frame extraction
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "parser.h"
+#include "mpegvideo.h"
+#include "mpeg4video_parser.h"
+
+
+int ff_mpeg4_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size){
+ int vop_found, i;
+ uint32_t state;
+
+ vop_found= pc->frame_start_found;
+ state= pc->state;
+
+ i=0;
+ if(!vop_found){
+ for(i=0; i<buf_size; i++){
+ state= (state<<8) | buf[i];
+ if(state == 0x1B6){
+ i++;
+ vop_found=1;
+ break;
+ }
+ }
+ }
+
+ if(vop_found){
+ /* EOF considered as end of frame */
+ if (buf_size == 0)
+ return 0;
+ for(; i<buf_size; i++){
+ state= (state<<8) | buf[i];
+ if((state&0xFFFFFF00) == 0x100){
+ pc->frame_start_found=0;
+ pc->state=-1;
+ return i-3;
+ }
+ }
+ }
+ pc->frame_start_found= vop_found;
+ pc->state= state;
+ return END_NOT_FOUND;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video_parser.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video_parser.h
new file mode 100644
index 000000000..822a24cb9
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4video_parser.h
@@ -0,0 +1,34 @@
+/*
+ * MPEG4 video parser prototypes
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MPEG4VIDEO_PARSER_H
+#define AVCODEC_MPEG4VIDEO_PARSER_H
+
+#include "parser.h"
+
+/**
+ * finds the end of the current frame in the bitstream.
+ * @return the position of the first byte of the next frame, or -1
+ */
+int ff_mpeg4_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size);
+
+#endif /* AVCODEC_MPEG4VIDEO_PARSER_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4videodec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4videodec.c
new file mode 100644
index 000000000..fc9006413
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpeg4videodec.c
@@ -0,0 +1,2253 @@
+/*
+ * MPEG4 decoder.
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "mpegvideo.h"
+#include "mpeg4video.h"
+#include "h263.h"
+
+// The defines below define the number of bits that are read at once for
+// reading vlc values. Changing these may improve speed and data cache needs
+// be aware though that decreasing them may need the number of stages that is
+// passed to get_vlc* to be increased.
+#define SPRITE_TRAJ_VLC_BITS 6
+#define DC_VLC_BITS 9
+#define MB_TYPE_B_VLC_BITS 4
+
+
+static VLC dc_lum, dc_chrom;
+static VLC sprite_trajectory;
+static VLC mb_type_b_vlc;
+
+static const int mb_type_b_map[4]= {
+ MB_TYPE_DIRECT2 | MB_TYPE_L0L1,
+ MB_TYPE_L0L1 | MB_TYPE_16x16,
+ MB_TYPE_L1 | MB_TYPE_16x16,
+ MB_TYPE_L0 | MB_TYPE_16x16,
+};
+
+/**
+ * predicts the ac.
+ * @param n block index (0-3 are luma, 4-5 are chroma)
+ * @param dir the ac prediction direction
+ */
+void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
+ int dir)
+{
+ int i;
+ int16_t *ac_val, *ac_val1;
+ int8_t * const qscale_table= s->current_picture.qscale_table;
+
+ /* find prediction */
+ ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
+ ac_val1 = ac_val;
+ if (s->ac_pred) {
+ if (dir == 0) {
+ const int xy= s->mb_x-1 + s->mb_y*s->mb_stride;
+ /* left prediction */
+ ac_val -= 16;
+
+ if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){
+ /* same qscale */
+ for(i=1;i<8;i++) {
+ block[s->dsp.idct_permutation[i<<3]] += ac_val[i];
+ }
+ }else{
+ /* different qscale, we must rescale */
+ for(i=1;i<8;i++) {
+ block[s->dsp.idct_permutation[i<<3]] += ROUNDED_DIV(ac_val[i]*qscale_table[xy], s->qscale);
+ }
+ }
+ } else {
+ const int xy= s->mb_x + s->mb_y*s->mb_stride - s->mb_stride;
+ /* top prediction */
+ ac_val -= 16 * s->block_wrap[n];
+
+ if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){
+ /* same qscale */
+ for(i=1;i<8;i++) {
+ block[s->dsp.idct_permutation[i]] += ac_val[i + 8];
+ }
+ }else{
+ /* different qscale, we must rescale */
+ for(i=1;i<8;i++) {
+ block[s->dsp.idct_permutation[i]] += ROUNDED_DIV(ac_val[i + 8]*qscale_table[xy], s->qscale);
+ }
+ }
+ }
+ }
+ /* left copy */
+ for(i=1;i<8;i++)
+ ac_val1[i ] = block[s->dsp.idct_permutation[i<<3]];
+
+ /* top copy */
+ for(i=1;i<8;i++)
+ ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]];
+
+}
+
+/**
+ * check if the next stuff is a resync marker or the end.
+ * @return 0 if not
+ */
+static inline int mpeg4_is_resync(MpegEncContext *s){
+ int bits_count= get_bits_count(&s->gb);
+ int v= show_bits(&s->gb, 16);
+
+ if(s->workaround_bugs&FF_BUG_NO_PADDING){
+ return 0;
+ }
+
+ while(v<=0xFF){
+ if(s->pict_type==FF_B_TYPE || (v>>(8-s->pict_type)!=1) || s->partitioned_frame)
+ break;
+ skip_bits(&s->gb, 8+s->pict_type);
+ bits_count+= 8+s->pict_type;
+ v= show_bits(&s->gb, 16);
+ }
+
+ if(bits_count + 8 >= s->gb.size_in_bits){
+ v>>=8;
+ v|= 0x7F >> (7-(bits_count&7));
+
+ if(v==0x7F)
+ return 1;
+ }else{
+ if(v == ff_mpeg4_resync_prefix[bits_count&7]){
+ int len;
+ GetBitContext gb= s->gb;
+
+ skip_bits(&s->gb, 1);
+ align_get_bits(&s->gb);
+
+ for(len=0; len<32; len++){
+ if(get_bits1(&s->gb)) break;
+ }
+
+ s->gb= gb;
+
+ if(len>=ff_mpeg4_get_video_packet_prefix_length(s))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void mpeg4_decode_sprite_trajectory(MpegEncContext * s, GetBitContext *gb)
+{
+ int i;
+ int a= 2<<s->sprite_warping_accuracy;
+ int rho= 3-s->sprite_warping_accuracy;
+ int r=16/a;
+ const int vop_ref[4][2]= {{0,0}, {s->width,0}, {0, s->height}, {s->width, s->height}}; // only true for rectangle shapes
+ int d[4][2]={{0,0}, {0,0}, {0,0}, {0,0}};
+ int sprite_ref[4][2];
+ int virtual_ref[2][2];
+ int w2, h2, w3, h3;
+ int alpha=0, beta=0;
+ int w= s->width;
+ int h= s->height;
+ int min_ab;
+
+ for(i=0; i<s->num_sprite_warping_points; i++){
+ int length;
+ int x=0, y=0;
+
+ length= get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3);
+ if(length){
+ x= get_xbits(gb, length);
+ }
+ if(!(s->divx_version==500 && s->divx_build==413)) skip_bits1(gb); /* marker bit */
+
+ length= get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3);
+ if(length){
+ y=get_xbits(gb, length);
+ }
+ skip_bits1(gb); /* marker bit */
+ s->sprite_traj[i][0]= d[i][0]= x;
+ s->sprite_traj[i][1]= d[i][1]= y;
+ }
+ for(; i<4; i++)
+ s->sprite_traj[i][0]= s->sprite_traj[i][1]= 0;
+
+ while((1<<alpha)<w) alpha++;
+ while((1<<beta )<h) beta++; // there seems to be a typo in the mpeg4 std for the definition of w' and h'
+ w2= 1<<alpha;
+ h2= 1<<beta;
+
+// Note, the 4th point isn't used for GMC
+ if(s->divx_version==500 && s->divx_build==413){
+ sprite_ref[0][0]= a*vop_ref[0][0] + d[0][0];
+ sprite_ref[0][1]= a*vop_ref[0][1] + d[0][1];
+ sprite_ref[1][0]= a*vop_ref[1][0] + d[0][0] + d[1][0];
+ sprite_ref[1][1]= a*vop_ref[1][1] + d[0][1] + d[1][1];
+ sprite_ref[2][0]= a*vop_ref[2][0] + d[0][0] + d[2][0];
+ sprite_ref[2][1]= a*vop_ref[2][1] + d[0][1] + d[2][1];
+ } else {
+ sprite_ref[0][0]= (a>>1)*(2*vop_ref[0][0] + d[0][0]);
+ sprite_ref[0][1]= (a>>1)*(2*vop_ref[0][1] + d[0][1]);
+ sprite_ref[1][0]= (a>>1)*(2*vop_ref[1][0] + d[0][0] + d[1][0]);
+ sprite_ref[1][1]= (a>>1)*(2*vop_ref[1][1] + d[0][1] + d[1][1]);
+ sprite_ref[2][0]= (a>>1)*(2*vop_ref[2][0] + d[0][0] + d[2][0]);
+ sprite_ref[2][1]= (a>>1)*(2*vop_ref[2][1] + d[0][1] + d[2][1]);
+ }
+/* sprite_ref[3][0]= (a>>1)*(2*vop_ref[3][0] + d[0][0] + d[1][0] + d[2][0] + d[3][0]);
+ sprite_ref[3][1]= (a>>1)*(2*vop_ref[3][1] + d[0][1] + d[1][1] + d[2][1] + d[3][1]); */
+
+// this is mostly identical to the mpeg4 std (and is totally unreadable because of that ...)
+// perhaps it should be reordered to be more readable ...
+// the idea behind this virtual_ref mess is to be able to use shifts later per pixel instead of divides
+// so the distance between points is converted from w&h based to w2&h2 based which are of the 2^x form
+ virtual_ref[0][0]= 16*(vop_ref[0][0] + w2)
+ + ROUNDED_DIV(((w - w2)*(r*sprite_ref[0][0] - 16*vop_ref[0][0]) + w2*(r*sprite_ref[1][0] - 16*vop_ref[1][0])),w);
+ virtual_ref[0][1]= 16*vop_ref[0][1]
+ + ROUNDED_DIV(((w - w2)*(r*sprite_ref[0][1] - 16*vop_ref[0][1]) + w2*(r*sprite_ref[1][1] - 16*vop_ref[1][1])),w);
+ virtual_ref[1][0]= 16*vop_ref[0][0]
+ + ROUNDED_DIV(((h - h2)*(r*sprite_ref[0][0] - 16*vop_ref[0][0]) + h2*(r*sprite_ref[2][0] - 16*vop_ref[2][0])),h);
+ virtual_ref[1][1]= 16*(vop_ref[0][1] + h2)
+ + ROUNDED_DIV(((h - h2)*(r*sprite_ref[0][1] - 16*vop_ref[0][1]) + h2*(r*sprite_ref[2][1] - 16*vop_ref[2][1])),h);
+
+ switch(s->num_sprite_warping_points)
+ {
+ case 0:
+ s->sprite_offset[0][0]= 0;
+ s->sprite_offset[0][1]= 0;
+ s->sprite_offset[1][0]= 0;
+ s->sprite_offset[1][1]= 0;
+ s->sprite_delta[0][0]= a;
+ s->sprite_delta[0][1]= 0;
+ s->sprite_delta[1][0]= 0;
+ s->sprite_delta[1][1]= a;
+ s->sprite_shift[0]= 0;
+ s->sprite_shift[1]= 0;
+ break;
+ case 1: //GMC only
+ s->sprite_offset[0][0]= sprite_ref[0][0] - a*vop_ref[0][0];
+ s->sprite_offset[0][1]= sprite_ref[0][1] - a*vop_ref[0][1];
+ s->sprite_offset[1][0]= ((sprite_ref[0][0]>>1)|(sprite_ref[0][0]&1)) - a*(vop_ref[0][0]/2);
+ s->sprite_offset[1][1]= ((sprite_ref[0][1]>>1)|(sprite_ref[0][1]&1)) - a*(vop_ref[0][1]/2);
+ s->sprite_delta[0][0]= a;
+ s->sprite_delta[0][1]= 0;
+ s->sprite_delta[1][0]= 0;
+ s->sprite_delta[1][1]= a;
+ s->sprite_shift[0]= 0;
+ s->sprite_shift[1]= 0;
+ break;
+ case 2:
+ s->sprite_offset[0][0]= (sprite_ref[0][0]<<(alpha+rho))
+ + (-r*sprite_ref[0][0] + virtual_ref[0][0])*(-vop_ref[0][0])
+ + ( r*sprite_ref[0][1] - virtual_ref[0][1])*(-vop_ref[0][1])
+ + (1<<(alpha+rho-1));
+ s->sprite_offset[0][1]= (sprite_ref[0][1]<<(alpha+rho))
+ + (-r*sprite_ref[0][1] + virtual_ref[0][1])*(-vop_ref[0][0])
+ + (-r*sprite_ref[0][0] + virtual_ref[0][0])*(-vop_ref[0][1])
+ + (1<<(alpha+rho-1));
+ s->sprite_offset[1][0]= ( (-r*sprite_ref[0][0] + virtual_ref[0][0])*(-2*vop_ref[0][0] + 1)
+ +( r*sprite_ref[0][1] - virtual_ref[0][1])*(-2*vop_ref[0][1] + 1)
+ +2*w2*r*sprite_ref[0][0]
+ - 16*w2
+ + (1<<(alpha+rho+1)));
+ s->sprite_offset[1][1]= ( (-r*sprite_ref[0][1] + virtual_ref[0][1])*(-2*vop_ref[0][0] + 1)
+ +(-r*sprite_ref[0][0] + virtual_ref[0][0])*(-2*vop_ref[0][1] + 1)
+ +2*w2*r*sprite_ref[0][1]
+ - 16*w2
+ + (1<<(alpha+rho+1)));
+ s->sprite_delta[0][0]= (-r*sprite_ref[0][0] + virtual_ref[0][0]);
+ s->sprite_delta[0][1]= (+r*sprite_ref[0][1] - virtual_ref[0][1]);
+ s->sprite_delta[1][0]= (-r*sprite_ref[0][1] + virtual_ref[0][1]);
+ s->sprite_delta[1][1]= (-r*sprite_ref[0][0] + virtual_ref[0][0]);
+
+ s->sprite_shift[0]= alpha+rho;
+ s->sprite_shift[1]= alpha+rho+2;
+ break;
+ case 3:
+ min_ab= FFMIN(alpha, beta);
+ w3= w2>>min_ab;
+ h3= h2>>min_ab;
+ s->sprite_offset[0][0]= (sprite_ref[0][0]<<(alpha+beta+rho-min_ab))
+ + (-r*sprite_ref[0][0] + virtual_ref[0][0])*h3*(-vop_ref[0][0])
+ + (-r*sprite_ref[0][0] + virtual_ref[1][0])*w3*(-vop_ref[0][1])
+ + (1<<(alpha+beta+rho-min_ab-1));
+ s->sprite_offset[0][1]= (sprite_ref[0][1]<<(alpha+beta+rho-min_ab))
+ + (-r*sprite_ref[0][1] + virtual_ref[0][1])*h3*(-vop_ref[0][0])
+ + (-r*sprite_ref[0][1] + virtual_ref[1][1])*w3*(-vop_ref[0][1])
+ + (1<<(alpha+beta+rho-min_ab-1));
+ s->sprite_offset[1][0]= (-r*sprite_ref[0][0] + virtual_ref[0][0])*h3*(-2*vop_ref[0][0] + 1)
+ + (-r*sprite_ref[0][0] + virtual_ref[1][0])*w3*(-2*vop_ref[0][1] + 1)
+ + 2*w2*h3*r*sprite_ref[0][0]
+ - 16*w2*h3
+ + (1<<(alpha+beta+rho-min_ab+1));
+ s->sprite_offset[1][1]= (-r*sprite_ref[0][1] + virtual_ref[0][1])*h3*(-2*vop_ref[0][0] + 1)
+ + (-r*sprite_ref[0][1] + virtual_ref[1][1])*w3*(-2*vop_ref[0][1] + 1)
+ + 2*w2*h3*r*sprite_ref[0][1]
+ - 16*w2*h3
+ + (1<<(alpha+beta+rho-min_ab+1));
+ s->sprite_delta[0][0]= (-r*sprite_ref[0][0] + virtual_ref[0][0])*h3;
+ s->sprite_delta[0][1]= (-r*sprite_ref[0][0] + virtual_ref[1][0])*w3;
+ s->sprite_delta[1][0]= (-r*sprite_ref[0][1] + virtual_ref[0][1])*h3;
+ s->sprite_delta[1][1]= (-r*sprite_ref[0][1] + virtual_ref[1][1])*w3;
+
+ s->sprite_shift[0]= alpha + beta + rho - min_ab;
+ s->sprite_shift[1]= alpha + beta + rho - min_ab + 2;
+ break;
+ }
+ /* try to simplify the situation */
+ if( s->sprite_delta[0][0] == a<<s->sprite_shift[0]
+ && s->sprite_delta[0][1] == 0
+ && s->sprite_delta[1][0] == 0
+ && s->sprite_delta[1][1] == a<<s->sprite_shift[0])
+ {
+ s->sprite_offset[0][0]>>=s->sprite_shift[0];
+ s->sprite_offset[0][1]>>=s->sprite_shift[0];
+ s->sprite_offset[1][0]>>=s->sprite_shift[1];
+ s->sprite_offset[1][1]>>=s->sprite_shift[1];
+ s->sprite_delta[0][0]= a;
+ s->sprite_delta[0][1]= 0;
+ s->sprite_delta[1][0]= 0;
+ s->sprite_delta[1][1]= a;
+ s->sprite_shift[0]= 0;
+ s->sprite_shift[1]= 0;
+ s->real_sprite_warping_points=1;
+ }
+ else{
+ int shift_y= 16 - s->sprite_shift[0];
+ int shift_c= 16 - s->sprite_shift[1];
+ for(i=0; i<2; i++){
+ s->sprite_offset[0][i]<<= shift_y;
+ s->sprite_offset[1][i]<<= shift_c;
+ s->sprite_delta[0][i]<<= shift_y;
+ s->sprite_delta[1][i]<<= shift_y;
+ s->sprite_shift[i]= 16;
+ }
+ s->real_sprite_warping_points= s->num_sprite_warping_points;
+ }
+}
+
+/**
+ * decodes the next video packet.
+ * @return <0 if something went wrong
+ */
+int mpeg4_decode_video_packet_header(MpegEncContext *s)
+{
+ int mb_num_bits= av_log2(s->mb_num - 1) + 1;
+ int header_extension=0, mb_num, len;
+
+ /* is there enough space left for a video packet + header */
+ if( get_bits_count(&s->gb) > s->gb.size_in_bits-20) return -1;
+
+ for(len=0; len<32; len++){
+ if(get_bits1(&s->gb)) break;
+ }
+
+ if(len!=ff_mpeg4_get_video_packet_prefix_length(s)){
+ av_log(s->avctx, AV_LOG_ERROR, "marker does not match f_code\n");
+ return -1;
+ }
+
+ if(s->shape != RECT_SHAPE){
+ header_extension= get_bits1(&s->gb);
+ //FIXME more stuff here
+ }
+
+ mb_num= get_bits(&s->gb, mb_num_bits);
+ if(mb_num>=s->mb_num){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num);
+ return -1;
+ }
+ if(s->pict_type == FF_B_TYPE){
+ while(s->next_picture.mbskip_table[ s->mb_index2xy[ mb_num ] ]) mb_num++;
+ if(mb_num >= s->mb_num) return -1; // slice contains just skipped MBs which where already decoded
+ }
+
+ s->mb_x= mb_num % s->mb_width;
+ s->mb_y= mb_num / s->mb_width;
+
+ if(s->shape != BIN_ONLY_SHAPE){
+ int qscale= get_bits(&s->gb, s->quant_precision);
+ if(qscale)
+ s->chroma_qscale=s->qscale= qscale;
+ }
+
+ if(s->shape == RECT_SHAPE){
+ header_extension= get_bits1(&s->gb);
+ }
+ if(header_extension){
+ int time_increment;
+ int time_incr=0;
+
+ while (get_bits1(&s->gb) != 0)
+ time_incr++;
+
+ check_marker(&s->gb, "before time_increment in video packed header");
+ time_increment= get_bits(&s->gb, s->time_increment_bits);
+ check_marker(&s->gb, "before vop_coding_type in video packed header");
+
+ skip_bits(&s->gb, 2); /* vop coding type */
+ //FIXME not rect stuff here
+
+ if(s->shape != BIN_ONLY_SHAPE){
+ skip_bits(&s->gb, 3); /* intra dc vlc threshold */
+//FIXME don't just ignore everything
+ if(s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
+ mpeg4_decode_sprite_trajectory(s, &s->gb);
+ av_log(s->avctx, AV_LOG_ERROR, "untested\n");
+ }
+
+ //FIXME reduced res stuff here
+
+ if (s->pict_type != FF_I_TYPE) {
+ int f_code = get_bits(&s->gb, 3); /* fcode_for */
+ if(f_code==0){
+ av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (f_code=0)\n");
+ }
+ }
+ if (s->pict_type == FF_B_TYPE) {
+ int b_code = get_bits(&s->gb, 3);
+ if(b_code==0){
+ av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (b_code=0)\n");
+ }
+ }
+ }
+ }
+ //FIXME new-pred stuff
+
+ return 0;
+}
+
+/**
+ * gets the average motion vector for a GMC MB.
+ * @param n either 0 for the x component or 1 for y
+ * @returns the average MV for a GMC MB
+ */
+static inline int get_amv(MpegEncContext *s, int n){
+ int x, y, mb_v, sum, dx, dy, shift;
+ int len = 1 << (s->f_code + 4);
+ const int a= s->sprite_warping_accuracy;
+
+ if(s->workaround_bugs & FF_BUG_AMV)
+ len >>= s->quarter_sample;
+
+ if(s->real_sprite_warping_points==1){
+ if(s->divx_version==500 && s->divx_build==413)
+ sum= s->sprite_offset[0][n] / (1<<(a - s->quarter_sample));
+ else
+ sum= RSHIFT(s->sprite_offset[0][n]<<s->quarter_sample, a);
+ }else{
+ dx= s->sprite_delta[n][0];
+ dy= s->sprite_delta[n][1];
+ shift= s->sprite_shift[0];
+ if(n) dy -= 1<<(shift + a + 1);
+ else dx -= 1<<(shift + a + 1);
+ mb_v= s->sprite_offset[0][n] + dx*s->mb_x*16 + dy*s->mb_y*16;
+
+ sum=0;
+ for(y=0; y<16; y++){
+ int v;
+
+ v= mb_v + dy*y;
+ //XXX FIXME optimize
+ for(x=0; x<16; x++){
+ sum+= v>>shift;
+ v+= dx;
+ }
+ }
+ sum= RSHIFT(sum, a+8-s->quarter_sample);
+ }
+
+ if (sum < -len) sum= -len;
+ else if (sum >= len) sum= len-1;
+
+ return sum;
+}
+
+/**
+ * decodes the dc value.
+ * @param n block index (0-3 are luma, 4-5 are chroma)
+ * @param dir_ptr the prediction direction will be stored here
+ * @return the quantized dc
+ */
+static inline int mpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr)
+{
+ int level, code;
+
+ if (n < 4)
+ code = get_vlc2(&s->gb, dc_lum.table, DC_VLC_BITS, 1);
+ else
+ code = get_vlc2(&s->gb, dc_chrom.table, DC_VLC_BITS, 1);
+ if (code < 0 || code > 9 /* && s->nbit<9 */){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal dc vlc\n");
+ return -1;
+ }
+ if (code == 0) {
+ level = 0;
+ } else {
+ if(IS_3IV1){
+ if(code==1)
+ level= 2*get_bits1(&s->gb)-1;
+ else{
+ if(get_bits1(&s->gb))
+ level = get_bits(&s->gb, code-1) + (1<<(code-1));
+ else
+ level = -get_bits(&s->gb, code-1) - (1<<(code-1));
+ }
+ }else{
+ level = get_xbits(&s->gb, code);
+ }
+
+ if (code > 8){
+ if(get_bits1(&s->gb)==0){ /* marker */
+ if(s->error_recognition>=2){
+ av_log(s->avctx, AV_LOG_ERROR, "dc marker bit missing\n");
+ return -1;
+ }
+ }
+ }
+ }
+
+ return ff_mpeg4_pred_dc(s, n, level, dir_ptr, 0);
+}
+
+/**
+ * decodes first partition.
+ * @return number of MBs decoded or <0 if an error occurred
+ */
+static int mpeg4_decode_partition_a(MpegEncContext *s){
+ int mb_num;
+ static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
+
+ /* decode first partition */
+ mb_num=0;
+ s->first_slice_line=1;
+ for(; s->mb_y<s->mb_height; s->mb_y++){
+ ff_init_block_index(s);
+ for(; s->mb_x<s->mb_width; s->mb_x++){
+ const int xy= s->mb_x + s->mb_y*s->mb_stride;
+ int cbpc;
+ int dir=0;
+
+ mb_num++;
+ ff_update_block_index(s);
+ if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1)
+ s->first_slice_line=0;
+
+ if(s->pict_type==FF_I_TYPE){
+ int i;
+
+ do{
+ if(show_bits_long(&s->gb, 19)==DC_MARKER){
+ return mb_num-1;
+ }
+
+ cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
+ if (cbpc < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "cbpc corrupted at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ }while(cbpc == 8);
+
+ s->cbp_table[xy]= cbpc & 3;
+ s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
+ s->mb_intra = 1;
+
+ if(cbpc & 4) {
+ ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
+ }
+ s->current_picture.qscale_table[xy]= s->qscale;
+
+ s->mbintra_table[xy]= 1;
+ for(i=0; i<6; i++){
+ int dc_pred_dir;
+ int dc= mpeg4_decode_dc(s, i, &dc_pred_dir);
+ if(dc < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "DC corrupted at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ dir<<=1;
+ if(dc_pred_dir) dir|=1;
+ }
+ s->pred_dir_table[xy]= dir;
+ }else{ /* P/S_TYPE */
+ int mx, my, pred_x, pred_y, bits;
+ int16_t * const mot_val= s->current_picture.motion_val[0][s->block_index[0]];
+ const int stride= s->b8_stride*2;
+
+try_again:
+ bits= show_bits(&s->gb, 17);
+ if(bits==MOTION_MARKER){
+ return mb_num-1;
+ }
+ skip_bits1(&s->gb);
+ if(bits&0x10000){
+ /* skip mb */
+ if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
+ mx= get_amv(s, 0);
+ my= get_amv(s, 1);
+ }else{
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ mx=my=0;
+ }
+ mot_val[0 ]= mot_val[2 ]=
+ mot_val[0+stride]= mot_val[2+stride]= mx;
+ mot_val[1 ]= mot_val[3 ]=
+ mot_val[1+stride]= mot_val[3+stride]= my;
+
+ if(s->mbintra_table[xy])
+ ff_clean_intra_table_entries(s);
+ continue;
+ }
+
+ cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
+ if (cbpc < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "cbpc corrupted at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ if(cbpc == 20)
+ goto try_again;
+
+ s->cbp_table[xy]= cbpc&(8+3); //8 is dquant
+
+ s->mb_intra = ((cbpc & 4) != 0);
+
+ if(s->mb_intra){
+ s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
+ s->mbintra_table[xy]= 1;
+ mot_val[0 ]= mot_val[2 ]=
+ mot_val[0+stride]= mot_val[2+stride]= 0;
+ mot_val[1 ]= mot_val[3 ]=
+ mot_val[1+stride]= mot_val[3+stride]= 0;
+ }else{
+ if(s->mbintra_table[xy])
+ ff_clean_intra_table_entries(s);
+
+ if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0)
+ s->mcsel= get_bits1(&s->gb);
+ else s->mcsel= 0;
+
+ if ((cbpc & 16) == 0) {
+ /* 16x16 motion prediction */
+
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
+ if(!s->mcsel){
+ mx = h263_decode_motion(s, pred_x, s->f_code);
+ if (mx >= 0xffff)
+ return -1;
+
+ my = h263_decode_motion(s, pred_y, s->f_code);
+ if (my >= 0xffff)
+ return -1;
+ s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ } else {
+ mx = get_amv(s, 0);
+ my = get_amv(s, 1);
+ s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
+ }
+
+ mot_val[0 ]= mot_val[2 ] =
+ mot_val[0+stride]= mot_val[2+stride]= mx;
+ mot_val[1 ]= mot_val[3 ]=
+ mot_val[1+stride]= mot_val[3+stride]= my;
+ } else {
+ int i;
+ s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
+ for(i=0;i<4;i++) {
+ int16_t *mot_val= h263_pred_motion(s, i, 0, &pred_x, &pred_y);
+ mx = h263_decode_motion(s, pred_x, s->f_code);
+ if (mx >= 0xffff)
+ return -1;
+
+ my = h263_decode_motion(s, pred_y, s->f_code);
+ if (my >= 0xffff)
+ return -1;
+ mot_val[0] = mx;
+ mot_val[1] = my;
+ }
+ }
+ }
+ }
+ }
+ s->mb_x= 0;
+ }
+
+ return mb_num;
+}
+
+/**
+ * decode second partition.
+ * @return <0 if an error occurred
+ */
+static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
+ int mb_num=0;
+ static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
+
+ s->mb_x= s->resync_mb_x;
+ s->first_slice_line=1;
+ for(s->mb_y= s->resync_mb_y; mb_num < mb_count; s->mb_y++){
+ ff_init_block_index(s);
+ for(; mb_num < mb_count && s->mb_x<s->mb_width; s->mb_x++){
+ const int xy= s->mb_x + s->mb_y*s->mb_stride;
+
+ mb_num++;
+ ff_update_block_index(s);
+ if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1)
+ s->first_slice_line=0;
+
+ if(s->pict_type==FF_I_TYPE){
+ int ac_pred= get_bits1(&s->gb);
+ int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
+ if(cbpy<0){
+ av_log(s->avctx, AV_LOG_ERROR, "cbpy corrupted at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ s->cbp_table[xy]|= cbpy<<2;
+ s->current_picture.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
+ }else{ /* P || S_TYPE */
+ if(IS_INTRA(s->current_picture.mb_type[xy])){
+ int dir=0,i;
+ int ac_pred = get_bits1(&s->gb);
+ int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
+
+ if(cbpy<0){
+ av_log(s->avctx, AV_LOG_ERROR, "I cbpy corrupted at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ if(s->cbp_table[xy] & 8) {
+ ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
+ }
+ s->current_picture.qscale_table[xy]= s->qscale;
+
+ for(i=0; i<6; i++){
+ int dc_pred_dir;
+ int dc= mpeg4_decode_dc(s, i, &dc_pred_dir);
+ if(dc < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "DC corrupted at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ dir<<=1;
+ if(dc_pred_dir) dir|=1;
+ }
+ s->cbp_table[xy]&= 3; //remove dquant
+ s->cbp_table[xy]|= cbpy<<2;
+ s->current_picture.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
+ s->pred_dir_table[xy]= dir;
+ }else if(IS_SKIP(s->current_picture.mb_type[xy])){
+ s->current_picture.qscale_table[xy]= s->qscale;
+ s->cbp_table[xy]= 0;
+ }else{
+ int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
+
+ if(cbpy<0){
+ av_log(s->avctx, AV_LOG_ERROR, "P cbpy corrupted at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ if(s->cbp_table[xy] & 8) {
+ ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
+ }
+ s->current_picture.qscale_table[xy]= s->qscale;
+
+ s->cbp_table[xy]&= 3; //remove dquant
+ s->cbp_table[xy]|= (cbpy^0xf)<<2;
+ }
+ }
+ }
+ if(mb_num >= mb_count) return 0;
+ s->mb_x= 0;
+ }
+ return 0;
+}
+
+/**
+ * decodes the first & second partition
+ * @return <0 if error (and sets error type in the error_status_table)
+ */
+int ff_mpeg4_decode_partitions(MpegEncContext *s)
+{
+ int mb_num;
+ const int part_a_error= s->pict_type==FF_I_TYPE ? (DC_ERROR|MV_ERROR) : MV_ERROR;
+ const int part_a_end = s->pict_type==FF_I_TYPE ? (DC_END |MV_END) : MV_END;
+
+ mb_num= mpeg4_decode_partition_a(s);
+ if(mb_num<0){
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, part_a_error);
+ return -1;
+ }
+
+ if(s->resync_mb_x + s->resync_mb_y*s->mb_width + mb_num > s->mb_num){
+ av_log(s->avctx, AV_LOG_ERROR, "slice below monitor ...\n");
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, part_a_error);
+ return -1;
+ }
+
+ s->mb_num_left= mb_num;
+
+ if(s->pict_type==FF_I_TYPE){
+ while(show_bits(&s->gb, 9) == 1)
+ skip_bits(&s->gb, 9);
+ if(get_bits_long(&s->gb, 19)!=DC_MARKER){
+ av_log(s->avctx, AV_LOG_ERROR, "marker missing after first I partition at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ }else{
+ while(show_bits(&s->gb, 10) == 1)
+ skip_bits(&s->gb, 10);
+ if(get_bits(&s->gb, 17)!=MOTION_MARKER){
+ av_log(s->avctx, AV_LOG_ERROR, "marker missing after first P partition at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ }
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, part_a_end);
+
+ if( mpeg4_decode_partition_b(s, mb_num) < 0){
+ if(s->pict_type==FF_P_TYPE)
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, DC_ERROR);
+ return -1;
+ }else{
+ if(s->pict_type==FF_P_TYPE)
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, DC_END);
+ }
+
+ return 0;
+}
+
+/**
+ * decodes a block.
+ * @return <0 if an error occurred
+ */
+static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
+ int n, int coded, int intra, int rvlc)
+{
+ int level, i, last, run;
+ int dc_pred_dir;
+ RLTable * rl;
+ RL_VLC_ELEM * rl_vlc;
+ const uint8_t * scan_table;
+ int qmul, qadd;
+
+ //Note intra & rvlc should be optimized away if this is inlined
+
+ if(intra) {
+ if(s->use_intra_dc_vlc){
+ /* DC coef */
+ if(s->partitioned_frame){
+ level = s->dc_val[0][ s->block_index[n] ];
+ if(n<4) level= FASTDIV((level + (s->y_dc_scale>>1)), s->y_dc_scale);
+ else level= FASTDIV((level + (s->c_dc_scale>>1)), s->c_dc_scale);
+ dc_pred_dir= (s->pred_dir_table[s->mb_x + s->mb_y*s->mb_stride]<<n)&32;
+ }else{
+ level = mpeg4_decode_dc(s, n, &dc_pred_dir);
+ if (level < 0)
+ return -1;
+ }
+ block[0] = level;
+ i = 0;
+ }else{
+ i = -1;
+ ff_mpeg4_pred_dc(s, n, 0, &dc_pred_dir, 0);
+ }
+ if (!coded)
+ goto not_coded;
+
+ if(rvlc){
+ rl = &rvlc_rl_intra;
+ rl_vlc = rvlc_rl_intra.rl_vlc[0];
+ }else{
+ rl = &ff_mpeg4_rl_intra;
+ rl_vlc = ff_mpeg4_rl_intra.rl_vlc[0];
+ }
+ if (s->ac_pred) {
+ if (dc_pred_dir == 0)
+ scan_table = s->intra_v_scantable.permutated; /* left */
+ else
+ scan_table = s->intra_h_scantable.permutated; /* top */
+ } else {
+ scan_table = s->intra_scantable.permutated;
+ }
+ qmul=1;
+ qadd=0;
+ } else {
+ i = -1;
+ if (!coded) {
+ s->block_last_index[n] = i;
+ return 0;
+ }
+ if(rvlc) rl = &rvlc_rl_inter;
+ else rl = &ff_h263_rl_inter;
+
+ scan_table = s->intra_scantable.permutated;
+
+ if(s->mpeg_quant){
+ qmul=1;
+ qadd=0;
+ if(rvlc){
+ rl_vlc = rvlc_rl_inter.rl_vlc[0];
+ }else{
+ rl_vlc = ff_h263_rl_inter.rl_vlc[0];
+ }
+ }else{
+ qmul = s->qscale << 1;
+ qadd = (s->qscale - 1) | 1;
+ if(rvlc){
+ rl_vlc = rvlc_rl_inter.rl_vlc[s->qscale];
+ }else{
+ rl_vlc = ff_h263_rl_inter.rl_vlc[s->qscale];
+ }
+ }
+ }
+ {
+ OPEN_READER(re, &s->gb);
+ for(;;) {
+ UPDATE_CACHE(re, &s->gb);
+ GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 0);
+ if (level==0) {
+ /* escape */
+ if(rvlc){
+ if(SHOW_UBITS(re, &s->gb, 1)==0){
+ av_log(s->avctx, AV_LOG_ERROR, "1. marker bit missing in rvlc esc\n");
+ return -1;
+ }; SKIP_CACHE(re, &s->gb, 1);
+
+ last= SHOW_UBITS(re, &s->gb, 1); SKIP_CACHE(re, &s->gb, 1);
+ run= SHOW_UBITS(re, &s->gb, 6); LAST_SKIP_CACHE(re, &s->gb, 6);
+ SKIP_COUNTER(re, &s->gb, 1+1+6);
+ UPDATE_CACHE(re, &s->gb);
+
+ if(SHOW_UBITS(re, &s->gb, 1)==0){
+ av_log(s->avctx, AV_LOG_ERROR, "2. marker bit missing in rvlc esc\n");
+ return -1;
+ }; SKIP_CACHE(re, &s->gb, 1);
+
+ level= SHOW_UBITS(re, &s->gb, 11); SKIP_CACHE(re, &s->gb, 11);
+
+ if(SHOW_UBITS(re, &s->gb, 5)!=0x10){
+ av_log(s->avctx, AV_LOG_ERROR, "reverse esc missing\n");
+ return -1;
+ }; SKIP_CACHE(re, &s->gb, 5);
+
+ level= level * qmul + qadd;
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_CACHE(re, &s->gb, 1);
+ SKIP_COUNTER(re, &s->gb, 1+11+5+1);
+
+ i+= run + 1;
+ if(last) i+=192;
+ }else{
+ int cache;
+ cache= GET_CACHE(re, &s->gb);
+
+ if(IS_3IV1)
+ cache ^= 0xC0000000;
+
+ if (cache&0x80000000) {
+ if (cache&0x40000000) {
+ /* third escape */
+ SKIP_CACHE(re, &s->gb, 2);
+ last= SHOW_UBITS(re, &s->gb, 1); SKIP_CACHE(re, &s->gb, 1);
+ run= SHOW_UBITS(re, &s->gb, 6); LAST_SKIP_CACHE(re, &s->gb, 6);
+ SKIP_COUNTER(re, &s->gb, 2+1+6);
+ UPDATE_CACHE(re, &s->gb);
+
+ if(IS_3IV1){
+ level= SHOW_SBITS(re, &s->gb, 12); LAST_SKIP_BITS(re, &s->gb, 12);
+ }else{
+ if(SHOW_UBITS(re, &s->gb, 1)==0){
+ av_log(s->avctx, AV_LOG_ERROR, "1. marker bit missing in 3. esc\n");
+ return -1;
+ }; SKIP_CACHE(re, &s->gb, 1);
+
+ level= SHOW_SBITS(re, &s->gb, 12); SKIP_CACHE(re, &s->gb, 12);
+
+ if(SHOW_UBITS(re, &s->gb, 1)==0){
+ av_log(s->avctx, AV_LOG_ERROR, "2. marker bit missing in 3. esc\n");
+ return -1;
+ }; LAST_SKIP_CACHE(re, &s->gb, 1);
+
+ SKIP_COUNTER(re, &s->gb, 1+12+1);
+ }
+
+#if 0
+ if(s->error_recognition >= FF_ER_COMPLIANT){
+ const int abs_level= FFABS(level);
+ if(abs_level<=MAX_LEVEL && run<=MAX_RUN){
+ const int run1= run - rl->max_run[last][abs_level] - 1;
+ if(abs_level <= rl->max_level[last][run]){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, vlc encoding possible\n");
+ return -1;
+ }
+ if(s->error_recognition > FF_ER_COMPLIANT){
+ if(abs_level <= rl->max_level[last][run]*2){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 1 encoding possible\n");
+ return -1;
+ }
+ if(run1 >= 0 && abs_level <= rl->max_level[last][run1]){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 2 encoding possible\n");
+ return -1;
+ }
+ }
+ }
+ }
+#endif
+ if (level>0) level= level * qmul + qadd;
+ else level= level * qmul - qadd;
+
+ if((unsigned)(level + 2048) > 4095){
+ if(s->error_recognition > FF_ER_COMPLIANT){
+ if(level > 2560 || level<-2560){
+ av_log(s->avctx, AV_LOG_ERROR, "|level| overflow in 3. esc, qp=%d\n", s->qscale);
+ return -1;
+ }
+ }
+ level= level<0 ? -2048 : 2047;
+ }
+
+ i+= run + 1;
+ if(last) i+=192;
+ } else {
+ /* second escape */
+#if MIN_CACHE_BITS < 20
+ LAST_SKIP_BITS(re, &s->gb, 2);
+ UPDATE_CACHE(re, &s->gb);
+#else
+ SKIP_BITS(re, &s->gb, 2);
+#endif
+ GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1);
+ i+= run + rl->max_run[run>>7][level/qmul] +1; //FIXME opt indexing
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ LAST_SKIP_BITS(re, &s->gb, 1);
+ }
+ } else {
+ /* first escape */
+#if MIN_CACHE_BITS < 19
+ LAST_SKIP_BITS(re, &s->gb, 1);
+ UPDATE_CACHE(re, &s->gb);
+#else
+ SKIP_BITS(re, &s->gb, 1);
+#endif
+ GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1);
+ i+= run;
+ level = level + rl->max_level[run>>7][(run-1)&63] * qmul;//FIXME opt indexing
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ LAST_SKIP_BITS(re, &s->gb, 1);
+ }
+ }
+ } else {
+ i+= run;
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ LAST_SKIP_BITS(re, &s->gb, 1);
+ }
+ if (i > 62){
+ i-= 192;
+ if(i&(~63)){
+ av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ block[scan_table[i]] = level;
+ break;
+ }
+
+ block[scan_table[i]] = level;
+ }
+ CLOSE_READER(re, &s->gb);
+ }
+ not_coded:
+ if (intra) {
+ if(!s->use_intra_dc_vlc){
+ block[0] = ff_mpeg4_pred_dc(s, n, block[0], &dc_pred_dir, 0);
+
+ i -= i>>31; //if(i == -1) i=0;
+ }
+
+ mpeg4_pred_ac(s, block, n, dc_pred_dir);
+ if (s->ac_pred) {
+ i = 63; /* XXX: not optimal */
+ }
+ }
+ s->block_last_index[n] = i;
+ return 0;
+}
+
+/**
+ * decode partition C of one MB.
+ * @return <0 if an error occurred
+ */
+static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
+{
+ int cbp, mb_type;
+ const int xy= s->mb_x + s->mb_y*s->mb_stride;
+
+ mb_type= s->current_picture.mb_type[xy];
+ cbp = s->cbp_table[xy];
+
+ s->use_intra_dc_vlc= s->qscale < s->intra_dc_threshold;
+
+ if(s->current_picture.qscale_table[xy] != s->qscale){
+ ff_set_qscale(s, s->current_picture.qscale_table[xy] );
+ }
+
+ if (s->pict_type == FF_P_TYPE || s->pict_type==FF_S_TYPE) {
+ int i;
+ for(i=0; i<4; i++){
+ s->mv[0][i][0] = s->current_picture.motion_val[0][ s->block_index[i] ][0];
+ s->mv[0][i][1] = s->current_picture.motion_val[0][ s->block_index[i] ][1];
+ }
+ s->mb_intra = IS_INTRA(mb_type);
+
+ if (IS_SKIP(mb_type)) {
+ /* skip mb */
+ for(i=0;i<6;i++)
+ s->block_last_index[i] = -1;
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
+ s->mcsel=1;
+ s->mb_skipped = 0;
+ }else{
+ s->mcsel=0;
+ s->mb_skipped = 1;
+ }
+ }else if(s->mb_intra){
+ s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
+ }else if(!s->mb_intra){
+// s->mcsel= 0; //FIXME do we need to init that
+
+ s->mv_dir = MV_DIR_FORWARD;
+ if (IS_8X8(mb_type)) {
+ s->mv_type = MV_TYPE_8X8;
+ } else {
+ s->mv_type = MV_TYPE_16X16;
+ }
+ }
+ } else { /* I-Frame */
+ s->mb_intra = 1;
+ s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
+ }
+
+ if (!IS_SKIP(mb_type)) {
+ int i;
+ s->dsp.clear_blocks(s->block[0]);
+ /* decode each block */
+ for (i = 0; i < 6; i++) {
+ if(mpeg4_decode_block(s, block[i], i, cbp&32, s->mb_intra, s->rvlc) < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "texture corrupted at %d %d %d\n", s->mb_x, s->mb_y, s->mb_intra);
+ return -1;
+ }
+ cbp+=cbp;
+ }
+ }
+
+ /* per-MB end of slice check */
+
+ if(--s->mb_num_left <= 0){
+ if(mpeg4_is_resync(s))
+ return SLICE_END;
+ else
+ return SLICE_NOEND;
+ }else{
+ if(mpeg4_is_resync(s)){
+ const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
+ if(s->cbp_table[xy+delta])
+ return SLICE_END;
+ }
+ return SLICE_OK;
+ }
+}
+
+static int mpeg4_decode_mb(MpegEncContext *s,
+ DCTELEM block[6][64])
+{
+ int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant;
+ int16_t *mot_val;
+ static int8_t quant_tab[4] = { -1, -2, 1, 2 };
+ const int xy= s->mb_x + s->mb_y * s->mb_stride;
+
+ assert(s->h263_pred);
+
+ if (s->pict_type == FF_P_TYPE || s->pict_type==FF_S_TYPE) {
+ do{
+ if (get_bits1(&s->gb)) {
+ /* skip mb */
+ s->mb_intra = 0;
+ for(i=0;i<6;i++)
+ s->block_last_index[i] = -1;
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->mcsel=1;
+ s->mv[0][0][0]= get_amv(s, 0);
+ s->mv[0][0][1]= get_amv(s, 1);
+
+ s->mb_skipped = 0;
+ }else{
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->mcsel=0;
+ s->mv[0][0][0] = 0;
+ s->mv[0][0][1] = 0;
+ s->mb_skipped = 1;
+ }
+ goto end;
+ }
+ cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
+ if (cbpc < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "cbpc damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ }while(cbpc == 20);
+
+ s->dsp.clear_blocks(s->block[0]);
+ dquant = cbpc & 8;
+ s->mb_intra = ((cbpc & 4) != 0);
+ if (s->mb_intra) goto intra;
+
+ if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0)
+ s->mcsel= get_bits1(&s->gb);
+ else s->mcsel= 0;
+ cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1) ^ 0x0F;
+
+ cbp = (cbpc & 3) | (cbpy << 2);
+ if (dquant) {
+ ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
+ }
+ if((!s->progressive_sequence) && (cbp || (s->workaround_bugs&FF_BUG_XVID_ILACE)))
+ s->interlaced_dct= get_bits1(&s->gb);
+
+ s->mv_dir = MV_DIR_FORWARD;
+ if ((cbpc & 16) == 0) {
+ if(s->mcsel){
+ s->current_picture.mb_type[xy]= MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
+ /* 16x16 global motion prediction */
+ s->mv_type = MV_TYPE_16X16;
+ mx= get_amv(s, 0);
+ my= get_amv(s, 1);
+ s->mv[0][0][0] = mx;
+ s->mv[0][0][1] = my;
+ }else if((!s->progressive_sequence) && get_bits1(&s->gb)){
+ s->current_picture.mb_type[xy]= MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED;
+ /* 16x8 field motion prediction */
+ s->mv_type= MV_TYPE_FIELD;
+
+ s->field_select[0][0]= get_bits1(&s->gb);
+ s->field_select[0][1]= get_bits1(&s->gb);
+
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
+
+ for(i=0; i<2; i++){
+ mx = h263_decode_motion(s, pred_x, s->f_code);
+ if (mx >= 0xffff)
+ return -1;
+
+ my = h263_decode_motion(s, pred_y/2, s->f_code);
+ if (my >= 0xffff)
+ return -1;
+
+ s->mv[0][i][0] = mx;
+ s->mv[0][i][1] = my;
+ }
+ }else{
+ s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ /* 16x16 motion prediction */
+ s->mv_type = MV_TYPE_16X16;
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
+ mx = h263_decode_motion(s, pred_x, s->f_code);
+
+ if (mx >= 0xffff)
+ return -1;
+
+ my = h263_decode_motion(s, pred_y, s->f_code);
+
+ if (my >= 0xffff)
+ return -1;
+ s->mv[0][0][0] = mx;
+ s->mv[0][0][1] = my;
+ }
+ } else {
+ s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
+ s->mv_type = MV_TYPE_8X8;
+ for(i=0;i<4;i++) {
+ mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
+ mx = h263_decode_motion(s, pred_x, s->f_code);
+ if (mx >= 0xffff)
+ return -1;
+
+ my = h263_decode_motion(s, pred_y, s->f_code);
+ if (my >= 0xffff)
+ return -1;
+ s->mv[0][i][0] = mx;
+ s->mv[0][i][1] = my;
+ mot_val[0] = mx;
+ mot_val[1] = my;
+ }
+ }
+ } else if(s->pict_type==FF_B_TYPE) {
+ int modb1; // first bit of modb
+ int modb2; // second bit of modb
+ int mb_type;
+
+ s->mb_intra = 0; //B-frames never contain intra blocks
+ s->mcsel=0; // ... true gmc blocks
+
+ if(s->mb_x==0){
+ for(i=0; i<2; i++){
+ s->last_mv[i][0][0]=
+ s->last_mv[i][0][1]=
+ s->last_mv[i][1][0]=
+ s->last_mv[i][1][1]= 0;
+ }
+ }
+
+ /* if we skipped it in the future P Frame than skip it now too */
+ s->mb_skipped= s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
+
+ if(s->mb_skipped){
+ /* skip mb */
+ for(i=0;i<6;i++)
+ s->block_last_index[i] = -1;
+
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ s->mv[0][0][0] = 0;
+ s->mv[0][0][1] = 0;
+ s->mv[1][0][0] = 0;
+ s->mv[1][0][1] = 0;
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ goto end;
+ }
+
+ modb1= get_bits1(&s->gb);
+ if(modb1){
+ mb_type= MB_TYPE_DIRECT2 | MB_TYPE_SKIP | MB_TYPE_L0L1; //like MB_TYPE_B_DIRECT but no vectors coded
+ cbp=0;
+ }else{
+ modb2= get_bits1(&s->gb);
+ mb_type= get_vlc2(&s->gb, mb_type_b_vlc.table, MB_TYPE_B_VLC_BITS, 1);
+ if(mb_type<0){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal MB_type\n");
+ return -1;
+ }
+ mb_type= mb_type_b_map[ mb_type ];
+ if(modb2) cbp= 0;
+ else{
+ s->dsp.clear_blocks(s->block[0]);
+ cbp= get_bits(&s->gb, 6);
+ }
+
+ if ((!IS_DIRECT(mb_type)) && cbp) {
+ if(get_bits1(&s->gb)){
+ ff_set_qscale(s, s->qscale + get_bits1(&s->gb)*4 - 2);
+ }
+ }
+
+ if(!s->progressive_sequence){
+ if(cbp)
+ s->interlaced_dct= get_bits1(&s->gb);
+
+ if(!IS_DIRECT(mb_type) && get_bits1(&s->gb)){
+ mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
+ mb_type &= ~MB_TYPE_16x16;
+
+ if(USES_LIST(mb_type, 0)){
+ s->field_select[0][0]= get_bits1(&s->gb);
+ s->field_select[0][1]= get_bits1(&s->gb);
+ }
+ if(USES_LIST(mb_type, 1)){
+ s->field_select[1][0]= get_bits1(&s->gb);
+ s->field_select[1][1]= get_bits1(&s->gb);
+ }
+ }
+ }
+
+ s->mv_dir = 0;
+ if((mb_type & (MB_TYPE_DIRECT2|MB_TYPE_INTERLACED)) == 0){
+ s->mv_type= MV_TYPE_16X16;
+
+ if(USES_LIST(mb_type, 0)){
+ s->mv_dir = MV_DIR_FORWARD;
+
+ mx = h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
+ my = h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
+ s->last_mv[0][1][0]= s->last_mv[0][0][0]= s->mv[0][0][0] = mx;
+ s->last_mv[0][1][1]= s->last_mv[0][0][1]= s->mv[0][0][1] = my;
+ }
+
+ if(USES_LIST(mb_type, 1)){
+ s->mv_dir |= MV_DIR_BACKWARD;
+
+ mx = h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
+ my = h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
+ s->last_mv[1][1][0]= s->last_mv[1][0][0]= s->mv[1][0][0] = mx;
+ s->last_mv[1][1][1]= s->last_mv[1][0][1]= s->mv[1][0][1] = my;
+ }
+ }else if(!IS_DIRECT(mb_type)){
+ s->mv_type= MV_TYPE_FIELD;
+
+ if(USES_LIST(mb_type, 0)){
+ s->mv_dir = MV_DIR_FORWARD;
+
+ for(i=0; i<2; i++){
+ mx = h263_decode_motion(s, s->last_mv[0][i][0] , s->f_code);
+ my = h263_decode_motion(s, s->last_mv[0][i][1]/2, s->f_code);
+ s->last_mv[0][i][0]= s->mv[0][i][0] = mx;
+ s->last_mv[0][i][1]= (s->mv[0][i][1] = my)*2;
+ }
+ }
+
+ if(USES_LIST(mb_type, 1)){
+ s->mv_dir |= MV_DIR_BACKWARD;
+
+ for(i=0; i<2; i++){
+ mx = h263_decode_motion(s, s->last_mv[1][i][0] , s->b_code);
+ my = h263_decode_motion(s, s->last_mv[1][i][1]/2, s->b_code);
+ s->last_mv[1][i][0]= s->mv[1][i][0] = mx;
+ s->last_mv[1][i][1]= (s->mv[1][i][1] = my)*2;
+ }
+ }
+ }
+ }
+
+ if(IS_DIRECT(mb_type)){
+ if(IS_SKIP(mb_type))
+ mx=my=0;
+ else{
+ mx = h263_decode_motion(s, 0, 1);
+ my = h263_decode_motion(s, 0, 1);
+ }
+
+ s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
+ mb_type |= ff_mpeg4_set_direct_mv(s, mx, my);
+ }
+ s->current_picture.mb_type[xy]= mb_type;
+ } else { /* I-Frame */
+ do{
+ cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
+ if (cbpc < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "I cbpc damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ }while(cbpc == 8);
+
+ dquant = cbpc & 4;
+ s->mb_intra = 1;
+intra:
+ s->ac_pred = get_bits1(&s->gb);
+ if(s->ac_pred)
+ s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED;
+ else
+ s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
+
+ cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
+ if(cbpy<0){
+ av_log(s->avctx, AV_LOG_ERROR, "I cbpy damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ cbp = (cbpc & 3) | (cbpy << 2);
+
+ s->use_intra_dc_vlc= s->qscale < s->intra_dc_threshold;
+
+ if (dquant) {
+ ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
+ }
+
+ if(!s->progressive_sequence)
+ s->interlaced_dct= get_bits1(&s->gb);
+
+ s->dsp.clear_blocks(s->block[0]);
+ /* decode each block */
+ for (i = 0; i < 6; i++) {
+ if (mpeg4_decode_block(s, block[i], i, cbp&32, 1, 0) < 0)
+ return -1;
+ cbp+=cbp;
+ }
+ goto end;
+ }
+
+ /* decode each block */
+ for (i = 0; i < 6; i++) {
+ if (mpeg4_decode_block(s, block[i], i, cbp&32, 0, 0) < 0)
+ return -1;
+ cbp+=cbp;
+ }
+end:
+
+ /* per-MB end of slice check */
+ if(s->codec_id==CODEC_ID_MPEG4){
+ if(mpeg4_is_resync(s)){
+ const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
+ if(s->pict_type==FF_B_TYPE && s->next_picture.mbskip_table[xy + delta])
+ return SLICE_OK;
+ return SLICE_END;
+ }
+ }
+
+ return SLICE_OK;
+}
+
+
+static int mpeg4_decode_gop_header(MpegEncContext * s, GetBitContext *gb){
+ int hours, minutes, seconds;
+
+ hours= get_bits(gb, 5);
+ minutes= get_bits(gb, 6);
+ skip_bits1(gb);
+ seconds= get_bits(gb, 6);
+
+ s->time_base= seconds + 60*(minutes + 60*hours);
+
+ skip_bits1(gb);
+ skip_bits1(gb);
+
+ return 0;
+}
+
+static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){
+ int width, height, vo_ver_id;
+
+ /* vol header */
+ skip_bits(gb, 1); /* random access */
+ s->vo_type= get_bits(gb, 8);
+ if (get_bits1(gb) != 0) { /* is_ol_id */
+ vo_ver_id = get_bits(gb, 4); /* vo_ver_id */
+ skip_bits(gb, 3); /* vo_priority */
+ } else {
+ vo_ver_id = 1;
+ }
+ s->aspect_ratio_info= get_bits(gb, 4);
+ if(s->aspect_ratio_info == FF_ASPECT_EXTENDED){
+ s->avctx->sample_aspect_ratio.num= get_bits(gb, 8); // par_width
+ s->avctx->sample_aspect_ratio.den= get_bits(gb, 8); // par_height
+ }else{
+ s->avctx->sample_aspect_ratio= ff_h263_pixel_aspect[s->aspect_ratio_info];
+ }
+
+ if ((s->vol_control_parameters=get_bits1(gb))) { /* vol control parameter */
+ int chroma_format= get_bits(gb, 2);
+ if(chroma_format!=CHROMA_420){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal chroma format\n");
+ }
+ s->low_delay= get_bits1(gb);
+ if(get_bits1(gb)){ /* vbv parameters */
+ get_bits(gb, 15); /* first_half_bitrate */
+ skip_bits1(gb); /* marker */
+ get_bits(gb, 15); /* latter_half_bitrate */
+ skip_bits1(gb); /* marker */
+ get_bits(gb, 15); /* first_half_vbv_buffer_size */
+ skip_bits1(gb); /* marker */
+ get_bits(gb, 3); /* latter_half_vbv_buffer_size */
+ get_bits(gb, 11); /* first_half_vbv_occupancy */
+ skip_bits1(gb); /* marker */
+ get_bits(gb, 15); /* latter_half_vbv_occupancy */
+ skip_bits1(gb); /* marker */
+ }
+ }else{
+ // set low delay flag only once the smartest? low delay detection won't be overriden
+ if(s->picture_number==0)
+ s->low_delay=0;
+ }
+
+ s->shape = get_bits(gb, 2); /* vol shape */
+ if(s->shape != RECT_SHAPE) av_log(s->avctx, AV_LOG_ERROR, "only rectangular vol supported\n");
+ if(s->shape == GRAY_SHAPE && vo_ver_id != 1){
+ av_log(s->avctx, AV_LOG_ERROR, "Gray shape not supported\n");
+ skip_bits(gb, 4); //video_object_layer_shape_extension
+ }
+
+ check_marker(gb, "before time_increment_resolution");
+
+ s->avctx->time_base.den = get_bits(gb, 16);
+ if(!s->avctx->time_base.den){
+ av_log(s->avctx, AV_LOG_ERROR, "time_base.den==0\n");
+ return -1;
+ }
+
+ s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
+ if (s->time_increment_bits < 1)
+ s->time_increment_bits = 1;
+
+ check_marker(gb, "before fixed_vop_rate");
+
+ if (get_bits1(gb) != 0) { /* fixed_vop_rate */
+ s->avctx->time_base.num = get_bits(gb, s->time_increment_bits);
+ }else
+ s->avctx->time_base.num = 1;
+
+ s->t_frame=0;
+
+ if (s->shape != BIN_ONLY_SHAPE) {
+ if (s->shape == RECT_SHAPE) {
+ skip_bits1(gb); /* marker */
+ width = get_bits(gb, 13);
+ skip_bits1(gb); /* marker */
+ height = get_bits(gb, 13);
+ skip_bits1(gb); /* marker */
+ if(width && height && !(s->width && s->codec_tag == AV_RL32("MP4S"))){ /* they should be non zero but who knows ... */
+ s->width = width;
+ s->height = height;
+ }
+ }
+
+ s->progressive_sequence=
+ s->progressive_frame= get_bits1(gb)^1;
+ s->interlaced_dct=0;
+ if(!get_bits1(gb) && (s->avctx->debug & FF_DEBUG_PICT_INFO))
+ av_log(s->avctx, AV_LOG_INFO, "MPEG4 OBMC not supported (very likely buggy encoder)\n"); /* OBMC Disable */
+ if (vo_ver_id == 1) {
+ s->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */
+ } else {
+ s->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */
+ }
+ if(s->vol_sprite_usage==STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "Static Sprites not supported\n");
+ if(s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE){
+ if(s->vol_sprite_usage==STATIC_SPRITE){
+ s->sprite_width = get_bits(gb, 13);
+ skip_bits1(gb); /* marker */
+ s->sprite_height= get_bits(gb, 13);
+ skip_bits1(gb); /* marker */
+ s->sprite_left = get_bits(gb, 13);
+ skip_bits1(gb); /* marker */
+ s->sprite_top = get_bits(gb, 13);
+ skip_bits1(gb); /* marker */
+ }
+ s->num_sprite_warping_points= get_bits(gb, 6);
+ if(s->num_sprite_warping_points > 3){
+ av_log(s->avctx, AV_LOG_ERROR, "%d sprite_warping_points\n", s->num_sprite_warping_points);
+ s->num_sprite_warping_points= 0;
+ return -1;
+ }
+ s->sprite_warping_accuracy = get_bits(gb, 2);
+ s->sprite_brightness_change= get_bits1(gb);
+ if(s->vol_sprite_usage==STATIC_SPRITE)
+ s->low_latency_sprite= get_bits1(gb);
+ }
+ // FIXME sadct disable bit if verid!=1 && shape not rect
+
+ if (get_bits1(gb) == 1) { /* not_8_bit */
+ s->quant_precision = get_bits(gb, 4); /* quant_precision */
+ if(get_bits(gb, 4)!=8) av_log(s->avctx, AV_LOG_ERROR, "N-bit not supported\n"); /* bits_per_pixel */
+ if(s->quant_precision!=5) av_log(s->avctx, AV_LOG_ERROR, "quant precision %d\n", s->quant_precision);
+ } else {
+ s->quant_precision = 5;
+ }
+
+ // FIXME a bunch of grayscale shape things
+
+ if((s->mpeg_quant=get_bits1(gb))){ /* vol_quant_type */
+ int i, v;
+
+ /* load default matrixes */
+ for(i=0; i<64; i++){
+ int j= s->dsp.idct_permutation[i];
+ v= ff_mpeg4_default_intra_matrix[i];
+ s->avctx->intra_matrix[i]=v;
+ s->intra_matrix[j]= v;
+ s->chroma_intra_matrix[j]= v;
+
+ v= ff_mpeg4_default_non_intra_matrix[i];
+ s->avctx->inter_matrix[i]=v;
+ s->inter_matrix[j]= v;
+ s->chroma_inter_matrix[j]= v;
+ }
+
+ /* load custom intra matrix */
+ if(get_bits1(gb)){
+ int last=0;
+ for(i=0; i<64; i++){
+ int j;
+ v= get_bits(gb, 8);
+ if(v==0) break;
+
+ last= v;
+ j= s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
+ s->avctx->intra_matrix[ff_zigzag_direct[i]]=v;
+ s->intra_matrix[j]= v;
+ s->chroma_intra_matrix[j]= v;
+ }
+
+ /* replicate last value */
+ for(; i<64; i++){
+ int j= s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
+ s->avctx->intra_matrix[ff_zigzag_direct[i]]=last;
+ s->intra_matrix[j]= last;
+ s->chroma_intra_matrix[j]= last;
+ }
+ }
+
+ /* load custom non intra matrix */
+ if(get_bits1(gb)){
+ int last=0;
+ for(i=0; i<64; i++){
+ int j;
+ v= get_bits(gb, 8);
+ if(v==0) break;
+
+ last= v;
+ j= s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
+ s->avctx->inter_matrix[ff_zigzag_direct[i]]=v;
+ s->inter_matrix[j]= v;
+ s->chroma_inter_matrix[j]= v;
+ }
+
+ /* replicate last value */
+ for(; i<64; i++){
+ int j= s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
+ s->avctx->inter_matrix[ff_zigzag_direct[i]]=last;
+ s->inter_matrix[j]= last;
+ s->chroma_inter_matrix[j]= last;
+ }
+ }
+
+ // FIXME a bunch of grayscale shape things
+ }
+
+ if(vo_ver_id != 1)
+ s->quarter_sample= get_bits1(gb);
+ else s->quarter_sample=0;
+
+ if(!get_bits1(gb)){
+ int pos= get_bits_count(gb);
+ int estimation_method= get_bits(gb, 2);
+ if(estimation_method<2){
+ if(!get_bits1(gb)){
+ s->cplx_estimation_trash_i += 8*get_bits1(gb); //opaque
+ s->cplx_estimation_trash_i += 8*get_bits1(gb); //transparent
+ s->cplx_estimation_trash_i += 8*get_bits1(gb); //intra_cae
+ s->cplx_estimation_trash_i += 8*get_bits1(gb); //inter_cae
+ s->cplx_estimation_trash_i += 8*get_bits1(gb); //no_update
+ s->cplx_estimation_trash_i += 8*get_bits1(gb); //upampling
+ }
+ if(!get_bits1(gb)){
+ s->cplx_estimation_trash_i += 8*get_bits1(gb); //intra_blocks
+ s->cplx_estimation_trash_p += 8*get_bits1(gb); //inter_blocks
+ s->cplx_estimation_trash_p += 8*get_bits1(gb); //inter4v_blocks
+ s->cplx_estimation_trash_i += 8*get_bits1(gb); //not coded blocks
+ }
+ if(!check_marker(gb, "in complexity estimation part 1")){
+ skip_bits_long(gb, pos - get_bits_count(gb));
+ goto no_cplx_est;
+ }
+ if(!get_bits1(gb)){
+ s->cplx_estimation_trash_i += 8*get_bits1(gb); //dct_coeffs
+ s->cplx_estimation_trash_i += 8*get_bits1(gb); //dct_lines
+ s->cplx_estimation_trash_i += 8*get_bits1(gb); //vlc_syms
+ s->cplx_estimation_trash_i += 4*get_bits1(gb); //vlc_bits
+ }
+ if(!get_bits1(gb)){
+ s->cplx_estimation_trash_p += 8*get_bits1(gb); //apm
+ s->cplx_estimation_trash_p += 8*get_bits1(gb); //npm
+ s->cplx_estimation_trash_b += 8*get_bits1(gb); //interpolate_mc_q
+ s->cplx_estimation_trash_p += 8*get_bits1(gb); //forwback_mc_q
+ s->cplx_estimation_trash_p += 8*get_bits1(gb); //halfpel2
+ s->cplx_estimation_trash_p += 8*get_bits1(gb); //halfpel4
+ }
+ if(!check_marker(gb, "in complexity estimation part 2")){
+ skip_bits_long(gb, pos - get_bits_count(gb));
+ goto no_cplx_est;
+ }
+ if(estimation_method==1){
+ s->cplx_estimation_trash_i += 8*get_bits1(gb); //sadct
+ s->cplx_estimation_trash_p += 8*get_bits1(gb); //qpel
+ }
+ }else
+ av_log(s->avctx, AV_LOG_ERROR, "Invalid Complexity estimation method %d\n", estimation_method);
+ }else{
+no_cplx_est:
+ s->cplx_estimation_trash_i=
+ s->cplx_estimation_trash_p=
+ s->cplx_estimation_trash_b= 0;
+ }
+
+ s->resync_marker= !get_bits1(gb); /* resync_marker_disabled */
+
+ s->data_partitioning= get_bits1(gb);
+ if(s->data_partitioning){
+ s->rvlc= get_bits1(gb);
+ }
+
+ if(vo_ver_id != 1) {
+ s->new_pred= get_bits1(gb);
+ if(s->new_pred){
+ av_log(s->avctx, AV_LOG_ERROR, "new pred not supported\n");
+ skip_bits(gb, 2); /* requested upstream message type */
+ skip_bits1(gb); /* newpred segment type */
+ }
+ s->reduced_res_vop= get_bits1(gb);
+ if(s->reduced_res_vop) av_log(s->avctx, AV_LOG_ERROR, "reduced resolution VOP not supported\n");
+ }
+ else{
+ s->new_pred=0;
+ s->reduced_res_vop= 0;
+ }
+
+ s->scalability= get_bits1(gb);
+
+ if (s->scalability) {
+ GetBitContext bak= *gb;
+ int ref_layer_id;
+ int ref_layer_sampling_dir;
+ int h_sampling_factor_n;
+ int h_sampling_factor_m;
+ int v_sampling_factor_n;
+ int v_sampling_factor_m;
+
+ s->hierachy_type= get_bits1(gb);
+ ref_layer_id= get_bits(gb, 4);
+ ref_layer_sampling_dir= get_bits1(gb);
+ h_sampling_factor_n= get_bits(gb, 5);
+ h_sampling_factor_m= get_bits(gb, 5);
+ v_sampling_factor_n= get_bits(gb, 5);
+ v_sampling_factor_m= get_bits(gb, 5);
+ s->enhancement_type= get_bits1(gb);
+
+ if( h_sampling_factor_n==0 || h_sampling_factor_m==0
+ || v_sampling_factor_n==0 || v_sampling_factor_m==0){
+ /* illegal scalability header (VERY broken encoder),
+ * trying to workaround */
+ s->scalability=0;
+ *gb= bak;
+ }else
+ av_log(s->avctx, AV_LOG_ERROR, "scalability not supported\n");
+
+ // bin shape stuff FIXME
+ }
+ }
+ return 0;
+}
+
+/**
+ * decodes the user data stuff in the header.
+ * Also initializes divx/xvid/lavc_version/build.
+ */
+static int decode_user_data(MpegEncContext *s, GetBitContext *gb){
+ char buf[256];
+ int i;
+ int e;
+ int ver = 0, build = 0, ver2 = 0, ver3 = 0;
+ char last;
+
+ for(i=0; i<255 && get_bits_count(gb) < gb->size_in_bits; i++){
+ if(show_bits(gb, 23) == 0) break;
+ buf[i]= get_bits(gb, 8);
+ }
+ buf[i]=0;
+
+ /* divx detection */
+ e=sscanf(buf, "DivX%dBuild%d%c", &ver, &build, &last);
+ if(e<2)
+ e=sscanf(buf, "DivX%db%d%c", &ver, &build, &last);
+ if(e>=2){
+ s->divx_version= ver;
+ s->divx_build= build;
+ s->divx_packed= e==3 && last=='p';
+ if(s->divx_packed && !s->showed_packed_warning) {
+ av_log(s->avctx, AV_LOG_WARNING, "Invalid and inefficient vfw-avi packed B frames detected\n");
+ s->showed_packed_warning=1;
+ }
+ }
+
+ /* ffmpeg detection */
+ e=sscanf(buf, "FFmpe%*[^b]b%d", &build)+3;
+ if(e!=4)
+ e=sscanf(buf, "FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build);
+ if(e!=4){
+ e=sscanf(buf, "Lavc%d.%d.%d", &ver, &ver2, &ver3)+1;
+ if (e>1)
+ build= (ver<<16) + (ver2<<8) + ver3;
+ }
+ if(e!=4){
+ if(strcmp(buf, "ffmpeg")==0){
+ s->lavc_build= 4600;
+ }
+ }
+ if(e==4){
+ s->lavc_build= build;
+ }
+
+ /* Xvid detection */
+ e=sscanf(buf, "XviD%d", &build);
+ if(e==1){
+ s->xvid_build= build;
+ }
+
+ return 0;
+}
+
+static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
+ int time_incr, time_increment;
+
+ s->pict_type = get_bits(gb, 2) + FF_I_TYPE; /* pict type: I = 0 , P = 1 */
+ if(s->pict_type==FF_B_TYPE && s->low_delay && s->vol_control_parameters==0 && !(s->flags & CODEC_FLAG_LOW_DELAY)){
+ av_log(s->avctx, AV_LOG_ERROR, "low_delay flag incorrectly, clearing it\n");
+ s->low_delay=0;
+ }
+
+ s->partitioned_frame= s->data_partitioning && s->pict_type!=FF_B_TYPE;
+ if(s->partitioned_frame)
+ s->decode_mb= mpeg4_decode_partitioned_mb;
+ else
+ s->decode_mb= mpeg4_decode_mb;
+
+ time_incr=0;
+ while (get_bits1(gb) != 0)
+ time_incr++;
+
+ check_marker(gb, "before time_increment");
+
+ if(s->time_increment_bits==0 || !(show_bits(gb, s->time_increment_bits+1)&1)){
+ av_log(s->avctx, AV_LOG_ERROR, "hmm, seems the headers are not complete, trying to guess time_increment_bits\n");
+
+ for(s->time_increment_bits=1 ;s->time_increment_bits<16; s->time_increment_bits++){
+ if ( s->pict_type == FF_P_TYPE
+ || (s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE)) {
+ if((show_bits(gb, s->time_increment_bits+6)&0x37) == 0x30) break;
+ }else
+ if((show_bits(gb, s->time_increment_bits+5)&0x1F) == 0x18) break;
+ }
+
+ av_log(s->avctx, AV_LOG_ERROR, "my guess is %d bits ;)\n",s->time_increment_bits);
+ }
+
+ if(IS_3IV1) time_increment= get_bits1(gb); //FIXME investigate further
+ else time_increment= get_bits(gb, s->time_increment_bits);
+
+ if(s->pict_type!=FF_B_TYPE){
+ s->last_time_base= s->time_base;
+ s->time_base+= time_incr;
+ s->time= s->time_base*s->avctx->time_base.den + time_increment;
+ if(s->workaround_bugs&FF_BUG_UMP4){
+ if(s->time < s->last_non_b_time){
+ /* header is not mpeg-4-compatible, broken encoder,
+ * trying to workaround */
+ s->time_base++;
+ s->time+= s->avctx->time_base.den;
+ }
+ }
+ s->pp_time= s->time - s->last_non_b_time;
+ s->last_non_b_time= s->time;
+ }else{
+ s->time= (s->last_time_base + time_incr)*s->avctx->time_base.den + time_increment;
+ s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
+ if(s->pp_time <=s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time<=0){
+ /* messed up order, maybe after seeking? skipping current b-frame */
+ return FRAME_SKIPPED;
+ }
+ ff_mpeg4_init_direct_mv(s);
+
+ if(s->t_frame==0) s->t_frame= s->pb_time;
+ if(s->t_frame==0) s->t_frame=1; // 1/0 protection
+ s->pp_field_time= ( ROUNDED_DIV(s->last_non_b_time, s->t_frame)
+ - ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2;
+ s->pb_field_time= ( ROUNDED_DIV(s->time, s->t_frame)
+ - ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2;
+ if(!s->progressive_sequence){
+ if(s->pp_field_time <= s->pb_field_time || s->pb_field_time <= 1)
+ return FRAME_SKIPPED;
+ }
+ }
+
+ if(s->avctx->time_base.num)
+ s->current_picture_ptr->pts= (s->time + s->avctx->time_base.num/2) / s->avctx->time_base.num;
+ else
+ s->current_picture_ptr->pts= AV_NOPTS_VALUE;
+ if(s->avctx->debug&FF_DEBUG_PTS)
+ av_log(s->avctx, AV_LOG_DEBUG, "MPEG4 PTS: %"PRId64"\n", s->current_picture_ptr->pts);
+
+ check_marker(gb, "before vop_coded");
+
+ /* vop coded */
+ if (get_bits1(gb) != 1){
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_ERROR, "vop not coded\n");
+ return FRAME_SKIPPED;
+ }
+ if (s->shape != BIN_ONLY_SHAPE && ( s->pict_type == FF_P_TYPE
+ || (s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE))) {
+ /* rounding type for motion estimation */
+ s->no_rounding = get_bits1(gb);
+ } else {
+ s->no_rounding = 0;
+ }
+//FIXME reduced res stuff
+
+ if (s->shape != RECT_SHAPE) {
+ if (s->vol_sprite_usage != 1 || s->pict_type != FF_I_TYPE) {
+ int width, height, hor_spat_ref, ver_spat_ref;
+
+ width = get_bits(gb, 13);
+ skip_bits1(gb); /* marker */
+ height = get_bits(gb, 13);
+ skip_bits1(gb); /* marker */
+ hor_spat_ref = get_bits(gb, 13); /* hor_spat_ref */
+ skip_bits1(gb); /* marker */
+ ver_spat_ref = get_bits(gb, 13); /* ver_spat_ref */
+ }
+ skip_bits1(gb); /* change_CR_disable */
+
+ if (get_bits1(gb) != 0) {
+ skip_bits(gb, 8); /* constant_alpha_value */
+ }
+ }
+//FIXME complexity estimation stuff
+
+ if (s->shape != BIN_ONLY_SHAPE) {
+ skip_bits_long(gb, s->cplx_estimation_trash_i);
+ if(s->pict_type != FF_I_TYPE)
+ skip_bits_long(gb, s->cplx_estimation_trash_p);
+ if(s->pict_type == FF_B_TYPE)
+ skip_bits_long(gb, s->cplx_estimation_trash_b);
+
+ s->intra_dc_threshold= mpeg4_dc_threshold[ get_bits(gb, 3) ];
+ if(!s->progressive_sequence){
+ s->top_field_first= get_bits1(gb);
+ s->alternate_scan= get_bits1(gb);
+ }else
+ s->alternate_scan= 0;
+ }
+
+ if(s->alternate_scan){
+ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
+ } else{
+ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
+ }
+
+ if(s->pict_type == FF_S_TYPE && (s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE)){
+ mpeg4_decode_sprite_trajectory(s, gb);
+ if(s->sprite_brightness_change) av_log(s->avctx, AV_LOG_ERROR, "sprite_brightness_change not supported\n");
+ if(s->vol_sprite_usage==STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "static sprite not supported\n");
+ }
+
+ if (s->shape != BIN_ONLY_SHAPE) {
+ s->chroma_qscale= s->qscale = get_bits(gb, s->quant_precision);
+ if(s->qscale==0){
+ av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG4 header (qscale=0)\n");
+ return -1; // makes no sense to continue, as there is nothing left from the image then
+ }
+
+ if (s->pict_type != FF_I_TYPE) {
+ s->f_code = get_bits(gb, 3); /* fcode_for */
+ if(s->f_code==0){
+ av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG4 header (f_code=0)\n");
+ return -1; // makes no sense to continue, as the MV decoding will break very quickly
+ }
+ }else
+ s->f_code=1;
+
+ if (s->pict_type == FF_B_TYPE) {
+ s->b_code = get_bits(gb, 3);
+ }else
+ s->b_code=1;
+
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO){
+ av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d\n",
+ s->qscale, s->f_code, s->b_code,
+ s->pict_type == FF_I_TYPE ? "I" : (s->pict_type == FF_P_TYPE ? "P" : (s->pict_type == FF_B_TYPE ? "B" : "S")),
+ gb->size_in_bits,s->progressive_sequence, s->alternate_scan, s->top_field_first,
+ s->quarter_sample ? "q" : "h", s->data_partitioning, s->resync_marker, s->num_sprite_warping_points,
+ s->sprite_warping_accuracy, 1-s->no_rounding, s->vo_type, s->vol_control_parameters ? " VOLC" : " ", s->intra_dc_threshold, s->cplx_estimation_trash_i, s->cplx_estimation_trash_p, s->cplx_estimation_trash_b);
+ }
+
+ if(!s->scalability){
+ if (s->shape!=RECT_SHAPE && s->pict_type!=FF_I_TYPE) {
+ skip_bits1(gb); // vop shape coding type
+ }
+ }else{
+ if(s->enhancement_type){
+ int load_backward_shape= get_bits1(gb);
+ if(load_backward_shape){
+ av_log(s->avctx, AV_LOG_ERROR, "load backward shape isn't supported\n");
+ }
+ }
+ skip_bits(gb, 2); //ref_select_code
+ }
+ }
+ /* detect buggy encoders which don't set the low_delay flag (divx4/xvid/opendivx)*/
+ // note we cannot detect divx5 without b-frames easily (although it's buggy too)
+ if(s->vo_type==0 && s->vol_control_parameters==0 && s->divx_version==0 && s->picture_number==0){
+ av_log(s->avctx, AV_LOG_ERROR, "looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n");
+ s->low_delay=1;
+ }
+
+ s->picture_number++; // better than pic number==0 always ;)
+
+ s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table; //FIXME add short header support
+ s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table;
+
+ if(s->workaround_bugs&FF_BUG_EDGE){
+ s->h_edge_pos= s->width;
+ s->v_edge_pos= s->height;
+ }
+ return 0;
+}
+
+/**
+ * decode mpeg4 headers
+ * @return <0 if no VOP found (or a damaged one)
+ * FRAME_SKIPPED if a not coded VOP is found
+ * 0 if a VOP is found
+ */
+int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb)
+{
+ int startcode, v;
+
+ /* search next start code */
+ align_get_bits(gb);
+
+ if(s->codec_tag == AV_RL32("WV1F") && show_bits(gb, 24) == 0x575630){
+ skip_bits(gb, 24);
+ if(get_bits(gb, 8) == 0xF0)
+ goto end;
+ }
+
+ startcode = 0xff;
+ for(;;) {
+ if(get_bits_count(gb) >= gb->size_in_bits){
+ if(gb->size_in_bits==8 && (s->divx_version || s->xvid_build)){
+ av_log(s->avctx, AV_LOG_ERROR, "frame skip %d\n", gb->size_in_bits);
+ return FRAME_SKIPPED; //divx bug
+ }else
+ return -1; //end of stream
+ }
+
+ /* use the bits after the test */
+ v = get_bits(gb, 8);
+ startcode = ((startcode << 8) | v) & 0xffffffff;
+
+ if((startcode&0xFFFFFF00) != 0x100)
+ continue; //no startcode
+
+ if(s->avctx->debug&FF_DEBUG_STARTCODE){
+ av_log(s->avctx, AV_LOG_DEBUG, "startcode: %3X ", startcode);
+ if (startcode<=0x11F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Start");
+ else if(startcode<=0x12F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Layer Start");
+ else if(startcode<=0x13F) av_log(s->avctx, AV_LOG_DEBUG, "Reserved");
+ else if(startcode<=0x15F) av_log(s->avctx, AV_LOG_DEBUG, "FGS bp start");
+ else if(startcode<=0x1AF) av_log(s->avctx, AV_LOG_DEBUG, "Reserved");
+ else if(startcode==0x1B0) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq Start");
+ else if(startcode==0x1B1) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq End");
+ else if(startcode==0x1B2) av_log(s->avctx, AV_LOG_DEBUG, "User Data");
+ else if(startcode==0x1B3) av_log(s->avctx, AV_LOG_DEBUG, "Group of VOP start");
+ else if(startcode==0x1B4) av_log(s->avctx, AV_LOG_DEBUG, "Video Session Error");
+ else if(startcode==0x1B5) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Start");
+ else if(startcode==0x1B6) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Plane start");
+ else if(startcode==0x1B7) av_log(s->avctx, AV_LOG_DEBUG, "slice start");
+ else if(startcode==0x1B8) av_log(s->avctx, AV_LOG_DEBUG, "extension start");
+ else if(startcode==0x1B9) av_log(s->avctx, AV_LOG_DEBUG, "fgs start");
+ else if(startcode==0x1BA) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object start");
+ else if(startcode==0x1BB) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object Plane start");
+ else if(startcode==0x1BC) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object start");
+ else if(startcode==0x1BD) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object Plane start");
+ else if(startcode==0x1BE) av_log(s->avctx, AV_LOG_DEBUG, "Still Texture Object start");
+ else if(startcode==0x1BF) av_log(s->avctx, AV_LOG_DEBUG, "Texture Spatial Layer start");
+ else if(startcode==0x1C0) av_log(s->avctx, AV_LOG_DEBUG, "Texture SNR Layer start");
+ else if(startcode==0x1C1) av_log(s->avctx, AV_LOG_DEBUG, "Texture Tile start");
+ else if(startcode==0x1C2) av_log(s->avctx, AV_LOG_DEBUG, "Texture Shape Layer start");
+ else if(startcode==0x1C3) av_log(s->avctx, AV_LOG_DEBUG, "stuffing start");
+ else if(startcode<=0x1C5) av_log(s->avctx, AV_LOG_DEBUG, "reserved");
+ else if(startcode<=0x1FF) av_log(s->avctx, AV_LOG_DEBUG, "System start");
+ av_log(s->avctx, AV_LOG_DEBUG, " at %d\n", get_bits_count(gb));
+ }
+
+ if(startcode >= 0x120 && startcode <= 0x12F){
+ if(decode_vol_header(s, gb) < 0)
+ return -1;
+ }
+ else if(startcode == USER_DATA_STARTCODE){
+ decode_user_data(s, gb);
+ }
+ else if(startcode == GOP_STARTCODE){
+ mpeg4_decode_gop_header(s, gb);
+ }
+ else if(startcode == VOP_STARTCODE){
+ break;
+ }
+
+ align_get_bits(gb);
+ startcode = 0xff;
+ }
+end:
+ if(s->flags& CODEC_FLAG_LOW_DELAY)
+ s->low_delay=1;
+ s->avctx->has_b_frames= !s->low_delay;
+ return decode_vop_header(s, gb);
+}
+
+static av_cold int decode_init(AVCodecContext *avctx)
+{
+ MpegEncContext *s = avctx->priv_data;
+ int ret;
+ static int done = 0;
+
+ if((ret=ff_h263_decode_init(avctx)) < 0)
+ return ret;
+
+ if (!done) {
+ done = 1;
+
+ init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
+ init_rl(&rvlc_rl_inter, ff_mpeg4_static_rl_table_store[1]);
+ init_rl(&rvlc_rl_intra, ff_mpeg4_static_rl_table_store[2]);
+ INIT_VLC_RL(ff_mpeg4_rl_intra, 554);
+ INIT_VLC_RL(rvlc_rl_inter, 1072);
+ INIT_VLC_RL(rvlc_rl_intra, 1072);
+ INIT_VLC_STATIC(&dc_lum, DC_VLC_BITS, 10 /* 13 */,
+ &ff_mpeg4_DCtab_lum[0][1], 2, 1,
+ &ff_mpeg4_DCtab_lum[0][0], 2, 1, 512);
+ INIT_VLC_STATIC(&dc_chrom, DC_VLC_BITS, 10 /* 13 */,
+ &ff_mpeg4_DCtab_chrom[0][1], 2, 1,
+ &ff_mpeg4_DCtab_chrom[0][0], 2, 1, 512);
+ INIT_VLC_STATIC(&sprite_trajectory, SPRITE_TRAJ_VLC_BITS, 15,
+ &sprite_trajectory_tab[0][1], 4, 2,
+ &sprite_trajectory_tab[0][0], 4, 2, 128);
+ INIT_VLC_STATIC(&mb_type_b_vlc, MB_TYPE_B_VLC_BITS, 4,
+ &mb_type_b_tab[0][1], 2, 1,
+ &mb_type_b_tab[0][0], 2, 1, 16);
+ }
+
+ s->h263_pred = 1;
+ s->low_delay = 0; //default, might be overriden in the vol header during header parsing
+ s->decode_mb= mpeg4_decode_mb;
+ s->time_increment_bits = 4; /* default value for broken headers */
+ avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
+
+ return 0;
+}
+
+AVCodec mpeg4_decoder = {
+ "mpeg4",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_MPEG4,
+ sizeof(MpegEncContext),
+ decode_init,
+ NULL,
+ ff_h263_decode_end,
+ ff_h263_decode_frame,
+ CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
+ /*.next= */NULL,
+ /*.flush= */ff_mpeg_flush,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name= */NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpegvideo.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpegvideo.c
new file mode 100644
index 000000000..72c0892d8
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpegvideo.c
@@ -0,0 +1,2012 @@
+/*
+ * The simplest mpeg encoder (well, it was the simplest!)
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/mpegvideo.c
+ * The simplest mpeg encoder (well, it was the simplest!).
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+#include "mpegvideo_common.h"
+#include "mjpegenc.h"
+#include "msmpeg4.h"
+#include "faandct.h"
+#include <limits.h>
+
+//#undef NDEBUG
+//#include <assert.h>
+
+static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale);
+static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale);
+static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale);
+static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale);
+static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale);
+static void dct_unquantize_h263_intra_c(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale);
+static void dct_unquantize_h263_inter_c(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale);
+
+
+/* enable all paranoid tests for rounding, overflows, etc... */
+//#define PARANOID
+
+//#define DEBUG
+
+
+static const uint8_t ff_default_chroma_qscale_table[32]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+};
+
+const uint8_t ff_mpeg1_dc_scale_table[128]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+};
+
+
+const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
+ int i;
+
+ assert(p<=end);
+ if(p>=end)
+ return end;
+
+ for(i=0; i<3; i++){
+ uint32_t tmp= *state << 8;
+ *state= tmp + *(p++);
+ if(tmp == 0x100 || p==end)
+ return p;
+ }
+
+ while(p<end){
+ if (p[-1] > 1 ) p+= 3;
+ else if(p[-2] ) p+= 2;
+ else if(p[-3]|(p[-1]-1)) p++;
+ else{
+ p++;
+ break;
+ }
+ }
+
+ p= FFMIN(p, end)-4;
+ *state= AV_RB32(p);
+
+ return p+4;
+}
+
+/* init common dct for both encoder and decoder */
+av_cold int ff_dct_common_init(MpegEncContext *s)
+{
+ s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
+ s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
+ s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
+ s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
+ s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
+ if(s->flags & CODEC_FLAG_BITEXACT)
+ s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
+ s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
+
+#if HAVE_MMX
+ MPV_common_init_mmx(s);
+#endif
+
+ /* load & permutate scantables
+ note: only wmv uses different ones
+ */
+ if(s->alternate_scan){
+ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
+ }else{
+ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
+ }
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
+
+ return 0;
+}
+
+void ff_copy_picture(Picture *dst, Picture *src){
+ *dst = *src;
+ dst->type= FF_BUFFER_TYPE_COPY;
+}
+
+/**
+ * Releases a frame buffer
+ */
+static void free_frame_buffer(MpegEncContext *s, Picture *pic)
+{
+ s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
+}
+
+/**
+ * Allocates a frame buffer
+ */
+static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
+{
+ int r;
+
+ r = s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
+
+ if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
+ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
+ return -1;
+ }
+
+ if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
+ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
+ free_frame_buffer(s, pic);
+ return -1;
+ }
+
+ if (pic->linesize[1] != pic->linesize[2]) {
+ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
+ free_frame_buffer(s, pic);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * allocates a Picture
+ * The pixels are allocated/set by calling get_buffer() if shared=0
+ */
+int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
+ const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
+ const int mb_array_size= s->mb_stride*s->mb_height;
+ const int b8_array_size= s->b8_stride*s->mb_height*2;
+ const int b4_array_size= s->b4_stride*s->mb_height*4;
+ int i;
+ int r= -1;
+
+ if(shared){
+ assert(pic->data[0]);
+ assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
+ pic->type= FF_BUFFER_TYPE_SHARED;
+ }else{
+ assert(!pic->data[0]);
+
+ if (alloc_frame_buffer(s, pic) < 0)
+ return -1;
+
+ s->linesize = pic->linesize[0];
+ s->uvlinesize= pic->linesize[1];
+ }
+
+ if(pic->qscale_table==NULL){
+ if (s->encoding) {
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
+ }
+
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
+ pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
+ if(s->out_format == FMT_H264){
+ for(i=0; i<2; i++){
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
+ pic->motion_val[i]= pic->motion_val_base[i]+4;
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], b8_array_size * sizeof(uint8_t), fail)
+ }
+ pic->motion_subsample_log2= 2;
+ }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
+ for(i=0; i<2; i++){
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
+ pic->motion_val[i]= pic->motion_val_base[i]+4;
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], b8_array_size * sizeof(uint8_t), fail)
+ }
+ pic->motion_subsample_log2= 3;
+ }
+ if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
+ }
+ pic->qstride= s->mb_stride;
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
+ }
+
+ /* It might be nicer if the application would keep track of these
+ * but it would require an API change. */
+ memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
+ s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
+ if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
+ pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
+
+ return 0;
+fail: //for the FF_ALLOCZ_OR_GOTO macro
+ if(r>=0)
+ free_frame_buffer(s, pic);
+ return -1;
+}
+
+/**
+ * deallocates a picture
+ */
+static void free_picture(MpegEncContext *s, Picture *pic){
+ int i;
+
+ if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
+ free_frame_buffer(s, pic);
+ }
+
+ av_freep(&pic->mb_var);
+ av_freep(&pic->mc_mb_var);
+ av_freep(&pic->mb_mean);
+ av_freep(&pic->mbskip_table);
+ av_freep(&pic->qscale_table);
+ av_freep(&pic->mb_type_base);
+ av_freep(&pic->dct_coeff);
+ av_freep(&pic->pan_scan);
+ pic->mb_type= NULL;
+ for(i=0; i<2; i++){
+ av_freep(&pic->motion_val_base[i]);
+ av_freep(&pic->ref_index[i]);
+ }
+
+ if(pic->type == FF_BUFFER_TYPE_SHARED){
+ for(i=0; i<4; i++){
+ pic->base[i]=
+ pic->data[i]= NULL;
+ }
+ pic->type= 0;
+ }
+}
+
+static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
+ int i;
+
+ // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
+ s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
+
+ //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
+ s->me.temp= s->me.scratchpad;
+ s->rd_scratchpad= s->me.scratchpad;
+ s->b_scratchpad= s->me.scratchpad;
+ s->obmc_scratchpad= s->me.scratchpad + 16;
+ if (s->encoding) {
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
+ if(s->avctx->noise_reduction){
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
+ }
+ }
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
+ s->block= s->blocks[0];
+
+ for(i=0;i<12;i++){
+ s->pblocks[i] = &s->block[i];
+ }
+ return 0;
+fail:
+ return -1; //free() through MPV_common_end()
+}
+
+static void free_duplicate_context(MpegEncContext *s){
+ if(s==NULL) return;
+
+ av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
+ av_freep(&s->me.scratchpad);
+ s->me.temp=
+ s->rd_scratchpad=
+ s->b_scratchpad=
+ s->obmc_scratchpad= NULL;
+
+ av_freep(&s->dct_error_sum);
+ av_freep(&s->me.map);
+ av_freep(&s->me.score_map);
+ av_freep(&s->blocks);
+ s->block= NULL;
+}
+
+static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
+#define COPY(a) bak->a= src->a
+ COPY(allocated_edge_emu_buffer);
+ COPY(edge_emu_buffer);
+ COPY(me.scratchpad);
+ COPY(me.temp);
+ COPY(rd_scratchpad);
+ COPY(b_scratchpad);
+ COPY(obmc_scratchpad);
+ COPY(me.map);
+ COPY(me.score_map);
+ COPY(blocks);
+ COPY(block);
+ COPY(start_mb_y);
+ COPY(end_mb_y);
+ COPY(me.map_generation);
+ COPY(pb);
+ COPY(dct_error_sum);
+ COPY(dct_count[0]);
+ COPY(dct_count[1]);
+#undef COPY
+}
+
+void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
+ MpegEncContext bak;
+ int i;
+ //FIXME copy only needed parts
+//START_TIMER
+ backup_duplicate_context(&bak, dst);
+ memcpy(dst, src, sizeof(MpegEncContext));
+ backup_duplicate_context(dst, &bak);
+ for(i=0;i<12;i++){
+ dst->pblocks[i] = &dst->block[i];
+ }
+//STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
+}
+
+/**
+ * sets the given MpegEncContext to common defaults (same for encoding and decoding).
+ * the changed fields will not depend upon the prior state of the MpegEncContext.
+ */
+void MPV_common_defaults(MpegEncContext *s){
+ s->y_dc_scale_table=
+ s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
+ s->chroma_qscale_table= ff_default_chroma_qscale_table;
+ s->progressive_frame= 1;
+ s->progressive_sequence= 1;
+ s->picture_structure= PICT_FRAME;
+
+ s->coded_picture_number = 0;
+ s->picture_number = 0;
+ s->input_picture_number = 0;
+
+ s->picture_in_gop_number = 0;
+
+ s->f_code = 1;
+ s->b_code = 1;
+}
+
+/**
+ * sets the given MpegEncContext to defaults for decoding.
+ * the changed fields will not depend upon the prior state of the MpegEncContext.
+ */
+void MPV_decode_defaults(MpegEncContext *s){
+ MPV_common_defaults(s);
+}
+
+/**
+ * init common structure for both encoder and decoder.
+ * this assumes that some variables like width/height are already set
+ */
+av_cold int MPV_common_init(MpegEncContext *s)
+{
+ int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
+
+ if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
+ s->mb_height = (s->height + 31) / 32 * 2;
+ else
+ s->mb_height = (s->height + 15) / 16;
+
+ if(s->avctx->pix_fmt == PIX_FMT_NONE){
+ av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
+ return -1;
+ }
+
+ if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
+ av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
+ return -1;
+ }
+
+ if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
+ return -1;
+
+ dsputil_init(&s->dsp, s->avctx);
+ ff_dct_common_init(s);
+
+ s->flags= s->avctx->flags;
+ s->flags2= s->avctx->flags2;
+
+ s->mb_width = (s->width + 15) / 16;
+ s->mb_stride = s->mb_width + 1;
+ s->b8_stride = s->mb_width*2 + 1;
+ s->b4_stride = s->mb_width*4 + 1;
+ mb_array_size= s->mb_height * s->mb_stride;
+ mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
+
+ /* set chroma shifts */
+ avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
+ &(s->chroma_y_shift) );
+
+ /* set default edge pos, will be overriden in decode_header if needed */
+ s->h_edge_pos= s->mb_width*16;
+ s->v_edge_pos= s->mb_height*16;
+
+ s->mb_num = s->mb_width * s->mb_height;
+
+ s->block_wrap[0]=
+ s->block_wrap[1]=
+ s->block_wrap[2]=
+ s->block_wrap[3]= s->b8_stride;
+ s->block_wrap[4]=
+ s->block_wrap[5]= s->mb_stride;
+
+ y_size = s->b8_stride * (2 * s->mb_height + 1);
+ c_size = s->mb_stride * (s->mb_height + 1);
+ yc_size = y_size + 2 * c_size;
+
+ /* convert fourcc to upper case */
+ s->codec_tag= toupper( s->avctx->codec_tag &0xFF)
+ + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
+ + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
+ + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
+
+ s->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
+ + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
+ + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
+ + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
+
+ s->avctx->coded_frame= (AVFrame*)&s->current_picture;
+
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
+ for(y=0; y<s->mb_height; y++){
+ for(x=0; x<s->mb_width; x++){
+ s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
+ }
+ }
+ s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
+
+ if (s->encoding) {
+ /* Allocate MV tables */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
+ s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
+ s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
+ s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
+ s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
+ s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
+ s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
+
+ if(s->msmpeg4_version){
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
+ }
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
+
+ /* Allocate MB type table */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
+
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
+
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
+
+ if(s->avctx->noise_reduction){
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
+ }
+ }
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, MAX_PICTURE_COUNT * sizeof(Picture), fail)
+ for(i = 0; i < MAX_PICTURE_COUNT; i++) {
+ avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
+ }
+
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
+
+ if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
+ /* interlaced direct mode decoding tables */
+ for(i=0; i<2; i++){
+ int j, k;
+ for(j=0; j<2; j++){
+ for(k=0; k<2; k++){
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
+ s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
+ }
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
+ s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
+ }
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
+ }
+ }
+ if (s->out_format == FMT_H263) {
+ /* ac values */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
+ s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
+ s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
+ s->ac_val[2] = s->ac_val[1] + c_size;
+
+ /* cbp values */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
+ s->coded_block= s->coded_block_base + s->b8_stride + 1;
+
+ /* cbp, ac_pred, pred_dir */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
+ }
+
+ if (s->h263_pred || s->h263_plus || !s->encoding) {
+ /* dc values */
+ //MN: we need these for error resilience of intra-frames
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
+ s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
+ s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
+ s->dc_val[2] = s->dc_val[1] + c_size;
+ for(i=0;i<yc_size;i++)
+ s->dc_val_base[i] = 1024;
+ }
+
+ /* which mb is a intra block */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
+ memset(s->mbintra_table, 1, mb_array_size);
+
+ /* init macroblock skip table */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
+ //Note the +1 is for a quicker mpeg4 slice_end detection
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
+
+ s->parse_context.state= -1;
+ if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
+ s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
+ s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
+ s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
+ }
+
+ s->context_initialized = 1;
+
+ s->thread_context[0]= s;
+ threads = s->avctx->thread_count;
+
+ for(i=1; i<threads; i++){
+ s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
+ memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
+ }
+
+ for(i=0; i<threads; i++){
+ if(init_duplicate_context(s->thread_context[i], s) < 0)
+ goto fail;
+ s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
+ s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
+ }
+
+ return 0;
+ fail:
+ MPV_common_end(s);
+ return -1;
+}
+
+/* init common structure for both encoder and decoder */
+void MPV_common_end(MpegEncContext *s)
+{
+ int i, j, k;
+
+ for(i=0; i<s->avctx->thread_count; i++){
+ free_duplicate_context(s->thread_context[i]);
+ }
+ for(i=1; i<s->avctx->thread_count; i++){
+ av_freep(&s->thread_context[i]);
+ }
+
+ av_freep(&s->parse_context.buffer);
+ s->parse_context.buffer_size=0;
+
+ av_freep(&s->mb_type);
+ av_freep(&s->p_mv_table_base);
+ av_freep(&s->b_forw_mv_table_base);
+ av_freep(&s->b_back_mv_table_base);
+ av_freep(&s->b_bidir_forw_mv_table_base);
+ av_freep(&s->b_bidir_back_mv_table_base);
+ av_freep(&s->b_direct_mv_table_base);
+ s->p_mv_table= NULL;
+ s->b_forw_mv_table= NULL;
+ s->b_back_mv_table= NULL;
+ s->b_bidir_forw_mv_table= NULL;
+ s->b_bidir_back_mv_table= NULL;
+ s->b_direct_mv_table= NULL;
+ for(i=0; i<2; i++){
+ for(j=0; j<2; j++){
+ for(k=0; k<2; k++){
+ av_freep(&s->b_field_mv_table_base[i][j][k]);
+ s->b_field_mv_table[i][j][k]=NULL;
+ }
+ av_freep(&s->b_field_select_table[i][j]);
+ av_freep(&s->p_field_mv_table_base[i][j]);
+ s->p_field_mv_table[i][j]=NULL;
+ }
+ av_freep(&s->p_field_select_table[i]);
+ }
+
+ av_freep(&s->dc_val_base);
+ av_freep(&s->ac_val_base);
+ av_freep(&s->coded_block_base);
+ av_freep(&s->mbintra_table);
+ av_freep(&s->cbp_table);
+ av_freep(&s->pred_dir_table);
+
+ av_freep(&s->mbskip_table);
+ av_freep(&s->prev_pict_types);
+ av_freep(&s->bitstream_buffer);
+ s->allocated_bitstream_buffer_size=0;
+
+ av_freep(&s->avctx->stats_out);
+ av_freep(&s->ac_stats);
+ av_freep(&s->error_status_table);
+ av_freep(&s->mb_index2xy);
+ av_freep(&s->lambda_table);
+ av_freep(&s->q_intra_matrix);
+ av_freep(&s->q_inter_matrix);
+ av_freep(&s->q_intra_matrix16);
+ av_freep(&s->q_inter_matrix16);
+ av_freep(&s->input_picture);
+ av_freep(&s->reordered_input_picture);
+ av_freep(&s->dct_offset);
+
+ if(s->picture){
+ for(i=0; i<MAX_PICTURE_COUNT; i++){
+ free_picture(s, &s->picture[i]);
+ }
+ }
+ av_freep(&s->picture);
+ s->context_initialized = 0;
+ s->last_picture_ptr=
+ s->next_picture_ptr=
+ s->current_picture_ptr= NULL;
+ s->linesize= s->uvlinesize= 0;
+
+ for(i=0; i<3; i++)
+ av_freep(&s->visualization_buffer[i]);
+
+ avcodec_default_free_buffers(s->avctx);
+}
+
+void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
+{
+ int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
+ uint8_t index_run[MAX_RUN+1];
+ int last, run, level, start, end, i;
+
+ /* If table is static, we can quit if rl->max_level[0] is not NULL */
+ if(static_store && rl->max_level[0])
+ return;
+
+ /* compute max_level[], max_run[] and index_run[] */
+ for(last=0;last<2;last++) {
+ if (last == 0) {
+ start = 0;
+ end = rl->last;
+ } else {
+ start = rl->last;
+ end = rl->n;
+ }
+
+ memset(max_level, 0, MAX_RUN + 1);
+ memset(max_run, 0, MAX_LEVEL + 1);
+ memset(index_run, rl->n, MAX_RUN + 1);
+ for(i=start;i<end;i++) {
+ run = rl->table_run[i];
+ level = rl->table_level[i];
+ if (index_run[run] == rl->n)
+ index_run[run] = i;
+ if (level > max_level[run])
+ max_level[run] = level;
+ if (run > max_run[level])
+ max_run[level] = run;
+ }
+ if(static_store)
+ rl->max_level[last] = static_store[last];
+ else
+ rl->max_level[last] = av_malloc(MAX_RUN + 1);
+ memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
+ if(static_store)
+ rl->max_run[last] = static_store[last] + MAX_RUN + 1;
+ else
+ rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
+ memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
+ if(static_store)
+ rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
+ else
+ rl->index_run[last] = av_malloc(MAX_RUN + 1);
+ memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
+ }
+}
+
+void init_vlc_rl(RLTable *rl)
+{
+ int i, q;
+
+ for(q=0; q<32; q++){
+ int qmul= q*2;
+ int qadd= (q-1)|1;
+
+ if(q==0){
+ qmul=1;
+ qadd=0;
+ }
+ for(i=0; i<rl->vlc.table_size; i++){
+ int code= rl->vlc.table[i][0];
+ int len = rl->vlc.table[i][1];
+ int level, run;
+
+ if(len==0){ // illegal code
+ run= 66;
+ level= MAX_LEVEL;
+ }else if(len<0){ //more bits needed
+ run= 0;
+ level= code;
+ }else{
+ if(code==rl->n){ //esc
+ run= 66;
+ level= 0;
+ }else{
+ run= rl->table_run [code] + 1;
+ level= rl->table_level[code] * qmul + qadd;
+ if(code >= rl->last) run+=192;
+ }
+ }
+ rl->rl_vlc[q][i].len= len;
+ rl->rl_vlc[q][i].level= level;
+ rl->rl_vlc[q][i].run= run;
+ }
+ }
+}
+
+int ff_find_unused_picture(MpegEncContext *s, int shared){
+ int i;
+
+ if(shared){
+ for(i=0; i<MAX_PICTURE_COUNT; i++){
+ if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
+ }
+ }else{
+ for(i=0; i<MAX_PICTURE_COUNT; i++){
+ if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
+ }
+ for(i=0; i<MAX_PICTURE_COUNT; i++){
+ if(s->picture[i].data[0]==NULL) return i;
+ }
+ }
+
+ av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
+ /* We could return -1, but the codec would crash trying to draw into a
+ * non-existing frame anyway. This is safer than waiting for a random crash.
+ * Also the return of this is never useful, an encoder must only allocate
+ * as much as allowed in the specification. This has no relationship to how
+ * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
+ * enough for such valid streams).
+ * Plus, a decoder has to check stream validity and remove frames if too
+ * many reference frames are around. Waiting for "OOM" is not correct at
+ * all. Similarly, missing reference frames have to be replaced by
+ * interpolated/MC frames, anything else is a bug in the codec ...
+ */
+ abort();
+ return -1;
+}
+
+static void update_noise_reduction(MpegEncContext *s){
+ int intra, i;
+
+ for(intra=0; intra<2; intra++){
+ if(s->dct_count[intra] > (1<<16)){
+ for(i=0; i<64; i++){
+ s->dct_error_sum[intra][i] >>=1;
+ }
+ s->dct_count[intra] >>= 1;
+ }
+
+ for(i=0; i<64; i++){
+ s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
+ }
+ }
+}
+
+/**
+ * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
+ */
+int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
+{
+ int i;
+ Picture *pic;
+ s->mb_skipped = 0;
+
+ assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
+
+ /* mark&release old frames */
+ if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
+ if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
+ free_frame_buffer(s, s->last_picture_ptr);
+
+ /* release forgotten pictures */
+ /* if(mpeg124/h263) */
+ if(!s->encoding){
+ for(i=0; i<MAX_PICTURE_COUNT; i++){
+ if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
+ av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
+ free_frame_buffer(s, &s->picture[i]);
+ }
+ }
+ }
+ }
+ }
+
+ if(!s->encoding){
+ /* release non reference frames */
+ for(i=0; i<MAX_PICTURE_COUNT; i++){
+ if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
+ free_frame_buffer(s, &s->picture[i]);
+ }
+ }
+
+ if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
+ pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
+ else{
+ i= ff_find_unused_picture(s, 0);
+ pic= &s->picture[i];
+ }
+
+ pic->reference= 0;
+ if (!s->dropable){
+ if (s->codec_id == CODEC_ID_H264)
+ pic->reference = s->picture_structure;
+ else if (s->pict_type != FF_B_TYPE)
+ pic->reference = 3;
+ }
+
+ pic->coded_picture_number= s->coded_picture_number++;
+
+ if(ff_alloc_picture(s, pic, 0) < 0)
+ return -1;
+
+ s->current_picture_ptr= pic;
+ s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
+ s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
+ }
+
+ s->current_picture_ptr->pict_type= s->pict_type;
+// if(s->flags && CODEC_FLAG_QSCALE)
+ // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
+ s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
+
+ ff_copy_picture(&s->current_picture, s->current_picture_ptr);
+
+ if (s->pict_type != FF_B_TYPE) {
+ s->last_picture_ptr= s->next_picture_ptr;
+ if(!s->dropable)
+ s->next_picture_ptr= s->current_picture_ptr;
+ }
+/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
+ s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
+ s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
+ s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
+ s->pict_type, s->dropable);*/
+
+ if(s->codec_id != CODEC_ID_H264){
+ if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){
+ av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
+ /* Allocate a dummy frame */
+ i= ff_find_unused_picture(s, 0);
+ s->last_picture_ptr= &s->picture[i];
+ if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
+ return -1;
+ }
+ if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){
+ /* Allocate a dummy frame */
+ i= ff_find_unused_picture(s, 0);
+ s->next_picture_ptr= &s->picture[i];
+ if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
+ return -1;
+ }
+ }
+
+ if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
+ if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
+
+ assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
+
+ if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
+ int i;
+ for(i=0; i<4; i++){
+ if(s->picture_structure == PICT_BOTTOM_FIELD){
+ s->current_picture.data[i] += s->current_picture.linesize[i];
+ }
+ s->current_picture.linesize[i] *= 2;
+ s->last_picture.linesize[i] *=2;
+ s->next_picture.linesize[i] *=2;
+ }
+ }
+
+ s->hurry_up= s->avctx->hurry_up;
+ s->error_recognition= avctx->error_recognition;
+
+ /* set dequantizer, we can't do it during init as it might change for mpeg4
+ and we can't do it in the header decode as init is not called for mpeg4 there yet */
+ if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
+ s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
+ s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
+ }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
+ s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
+ s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
+ }else{
+ s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
+ s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
+ }
+
+ if(s->dct_error_sum){
+ assert(s->avctx->noise_reduction && s->encoding);
+
+ update_noise_reduction(s);
+ }
+
+ return 0;
+}
+
+/* generic function for encode/decode called after a frame has been coded/decoded */
+void MPV_frame_end(MpegEncContext *s)
+{
+ int i;
+ /* draw edge for correct motion prediction if outside */
+
+ if(1
+ && s->unrestricted_mv
+ && s->current_picture.reference
+ && !s->intra_only
+ && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
+ s->dsp.draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
+ s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
+ s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
+ }
+ emms_c();
+
+ s->last_pict_type = s->pict_type;
+ s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
+ if(s->pict_type!=FF_B_TYPE){
+ s->last_non_b_pict_type= s->pict_type;
+ }
+#if 0
+ /* copy back current_picture variables */
+ for(i=0; i<MAX_PICTURE_COUNT; i++){
+ if(s->picture[i].data[0] == s->current_picture.data[0]){
+ s->picture[i]= s->current_picture;
+ break;
+ }
+ }
+ assert(i<MAX_PICTURE_COUNT);
+#endif
+
+ if(s->encoding){
+ /* release non-reference frames */
+ for(i=0; i<MAX_PICTURE_COUNT; i++){
+ if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
+ free_frame_buffer(s, &s->picture[i]);
+ }
+ }
+ }
+ // clear copies, to avoid confusion
+#if 0
+ memset(&s->last_picture, 0, sizeof(Picture));
+ memset(&s->next_picture, 0, sizeof(Picture));
+ memset(&s->current_picture, 0, sizeof(Picture));
+#endif
+ s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
+}
+
+/**
+ * prints debuging info for the given picture.
+ */
+void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
+
+ if(!pict || !pict->mb_type) return;
+
+ if (s->avctx->debug_mv && pict->motion_val) {
+ pict->mb_height=s->mb_height;
+ pict->mb_width=s->mb_width;
+ pict->mb_stride=s->mb_stride;
+ pict->b8_stride=s->b8_stride;
+ }
+ if (pict)
+ {
+ pict->play_flags=(s->quarter_sample?CODEC_FLAG_QPEL:0);
+ pict->num_sprite_warping_points=s->num_sprite_warping_points;
+ pict->real_sprite_warping_points=s->real_sprite_warping_points;
+ }
+}
+
+static inline int hpel_motion_lowres(MpegEncContext *s,
+ uint8_t *dest, uint8_t *src,
+ int field_based, int field_select,
+ int src_x, int src_y,
+ int width, int height, int stride,
+ int h_edge_pos, int v_edge_pos,
+ int w, int h, h264_chroma_mc_func *pix_op,
+ int motion_x, int motion_y)
+{
+ const int lowres= s->avctx->lowres;
+ const int op_index= FFMIN(lowres, 2);
+ const int s_mask= (2<<lowres)-1;
+ int emu=0;
+ int sx, sy;
+
+ if(s->quarter_sample){
+ motion_x/=2;
+ motion_y/=2;
+ }
+
+ sx= motion_x & s_mask;
+ sy= motion_y & s_mask;
+ src_x += motion_x >> (lowres+1);
+ src_y += motion_y >> (lowres+1);
+
+ src += src_y * stride + src_x;
+
+ if( (unsigned)src_x > h_edge_pos - (!!sx) - w
+ || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
+ ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
+ src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
+ src= s->edge_emu_buffer;
+ emu=1;
+ }
+
+ sx= (sx << 2) >> lowres;
+ sy= (sy << 2) >> lowres;
+ if(field_select)
+ src += s->linesize;
+ pix_op[op_index](dest, src, stride, h, sx, sy);
+ return emu;
+}
+
+/* apply one mpeg motion vector to the three components */
+static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ int field_based, int bottom_field, int field_select,
+ uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
+ int motion_x, int motion_y, int h, int mb_y)
+{
+ uint8_t *ptr_y, *ptr_cb, *ptr_cr;
+ int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
+ const int lowres= s->avctx->lowres;
+ const int op_index= FFMIN(lowres, 2);
+ const int block_s= 8>>lowres;
+ const int s_mask= (2<<lowres)-1;
+ const int h_edge_pos = s->h_edge_pos >> lowres;
+ const int v_edge_pos = s->v_edge_pos >> lowres;
+ linesize = s->current_picture.linesize[0] << field_based;
+ uvlinesize = s->current_picture.linesize[1] << field_based;
+
+ if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
+ motion_x/=2;
+ motion_y/=2;
+ }
+
+ if(field_based){
+ motion_y += (bottom_field - field_select)*((1<<lowres)-1);
+ }
+
+ sx= motion_x & s_mask;
+ sy= motion_y & s_mask;
+ src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
+ src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
+
+ if (s->out_format == FMT_H263) {
+ uvsx = ((motion_x>>1) & s_mask) | (sx&1);
+ uvsy = ((motion_y>>1) & s_mask) | (sy&1);
+ uvsrc_x = src_x>>1;
+ uvsrc_y = src_y>>1;
+ }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
+ mx = motion_x / 4;
+ my = motion_y / 4;
+ uvsx = (2*mx) & s_mask;
+ uvsy = (2*my) & s_mask;
+ uvsrc_x = s->mb_x*block_s + (mx >> lowres);
+ uvsrc_y = mb_y*block_s + (my >> lowres);
+ } else {
+ mx = motion_x / 2;
+ my = motion_y / 2;
+ uvsx = mx & s_mask;
+ uvsy = my & s_mask;
+ uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
+ uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
+ }
+
+ ptr_y = ref_picture[0] + src_y * linesize + src_x;
+ ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
+ ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
+
+ if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
+ || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
+ src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
+ ptr_y = s->edge_emu_buffer;
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
+ ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
+ uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
+ ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
+ uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
+ ptr_cb= uvbuf;
+ ptr_cr= uvbuf+16;
+ }
+ }
+
+ if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
+ dest_y += s->linesize;
+ dest_cb+= s->uvlinesize;
+ dest_cr+= s->uvlinesize;
+ }
+
+ if(field_select){
+ ptr_y += s->linesize;
+ ptr_cb+= s->uvlinesize;
+ ptr_cr+= s->uvlinesize;
+ }
+
+ sx= (sx << 2) >> lowres;
+ sy= (sy << 2) >> lowres;
+ pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
+
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ uvsx= (uvsx << 2) >> lowres;
+ uvsy= (uvsy << 2) >> lowres;
+ pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
+ pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
+ }
+ //FIXME h261 lowres loop filter
+}
+
+static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
+ uint8_t *dest_cb, uint8_t *dest_cr,
+ uint8_t **ref_picture,
+ h264_chroma_mc_func *pix_op,
+ int mx, int my){
+ const int lowres= s->avctx->lowres;
+ const int op_index= FFMIN(lowres, 2);
+ const int block_s= 8>>lowres;
+ const int s_mask= (2<<lowres)-1;
+ const int h_edge_pos = s->h_edge_pos >> (lowres+1);
+ const int v_edge_pos = s->v_edge_pos >> (lowres+1);
+ int emu=0, src_x, src_y, offset, sx, sy;
+ uint8_t *ptr;
+
+ if(s->quarter_sample){
+ mx/=2;
+ my/=2;
+ }
+
+ /* In case of 8X8, we construct a single chroma motion vector
+ with a special rounding */
+ mx= ff_h263_round_chroma(mx);
+ my= ff_h263_round_chroma(my);
+
+ sx= mx & s_mask;
+ sy= my & s_mask;
+ src_x = s->mb_x*block_s + (mx >> (lowres+1));
+ src_y = s->mb_y*block_s + (my >> (lowres+1));
+
+ offset = src_y * s->uvlinesize + src_x;
+ ptr = ref_picture[1] + offset;
+ if(s->flags&CODEC_FLAG_EMU_EDGE){
+ if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
+ || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
+ ptr= s->edge_emu_buffer;
+ emu=1;
+ }
+ }
+ sx= (sx << 2) >> lowres;
+ sy= (sy << 2) >> lowres;
+ pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
+
+ ptr = ref_picture[2] + offset;
+ if(emu){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
+ ptr= s->edge_emu_buffer;
+ }
+ pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
+}
+
+/**
+ * motion compensation of a single macroblock
+ * @param s context
+ * @param dest_y luma destination pointer
+ * @param dest_cb chroma cb/u destination pointer
+ * @param dest_cr chroma cr/v destination pointer
+ * @param dir direction (0->forward, 1->backward)
+ * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
+ * @param pic_op halfpel motion compensation function (average or put normally)
+ * the motion vectors are taken from s->mv and the MV type from s->mv_type
+ */
+static inline void MPV_motion_lowres(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ int dir, uint8_t **ref_picture,
+ h264_chroma_mc_func *pix_op)
+{
+ int mx, my;
+ int mb_x, mb_y, i;
+ const int lowres= s->avctx->lowres;
+ const int block_s= 8>>lowres;
+
+ mb_x = s->mb_x;
+ mb_y = s->mb_y;
+
+ switch(s->mv_type) {
+ case MV_TYPE_16X16:
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, 0,
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
+ break;
+ case MV_TYPE_8X8:
+ mx = 0;
+ my = 0;
+ for(i=0;i<4;i++) {
+ hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
+ ref_picture[0], 0, 0,
+ (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
+ s->width, s->height, s->linesize,
+ s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
+ block_s, block_s, pix_op,
+ s->mv[dir][i][0], s->mv[dir][i][1]);
+
+ mx += s->mv[dir][i][0];
+ my += s->mv[dir][i][1];
+ }
+
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
+ chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
+ break;
+ case MV_TYPE_FIELD:
+ if (s->picture_structure == PICT_FRAME) {
+ /* top field */
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 1, 0, s->field_select[dir][0],
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
+ /* bottom field */
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 1, 1, s->field_select[dir][1],
+ ref_picture, pix_op,
+ s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
+ } else {
+ if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
+ ref_picture= s->current_picture_ptr->data;
+ }
+
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->field_select[dir][0],
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
+ }
+ break;
+ case MV_TYPE_16X8:
+ for(i=0; i<2; i++){
+ uint8_t ** ref2picture;
+
+ if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
+ ref2picture= ref_picture;
+ }else{
+ ref2picture= s->current_picture_ptr->data;
+ }
+
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->field_select[dir][i],
+ ref2picture, pix_op,
+ s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
+
+ dest_y += 2*block_s*s->linesize;
+ dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
+ dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
+ }
+ break;
+ case MV_TYPE_DMV:
+ if(s->picture_structure == PICT_FRAME){
+ for(i=0; i<2; i++){
+ int j;
+ for(j=0; j<2; j++){
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 1, j, j^i,
+ ref_picture, pix_op,
+ s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
+ }
+ pix_op = s->dsp.avg_h264_chroma_pixels_tab;
+ }
+ }else{
+ for(i=0; i<2; i++){
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->picture_structure != i+1,
+ ref_picture, pix_op,
+ s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
+
+ // after put we make avg of the same block
+ pix_op = s->dsp.avg_h264_chroma_pixels_tab;
+
+ //opposite parity is always in the same frame if this is second field
+ if(!s->first_field){
+ ref_picture = s->current_picture_ptr->data;
+ }
+ }
+ }
+ break;
+ default: assert(0);
+ }
+}
+
+/* put block[] to dest[] */
+static inline void put_dct(MpegEncContext *s,
+ DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
+{
+ s->dct_unquantize_intra(s, block, i, qscale);
+ s->dsp.idct_put (dest, line_size, block);
+}
+
+/* add block[] to dest[] */
+static inline void add_dct(MpegEncContext *s,
+ DCTELEM *block, int i, uint8_t *dest, int line_size)
+{
+ if (s->block_last_index[i] >= 0) {
+ s->dsp.idct_add (dest, line_size, block);
+ }
+}
+
+static inline void add_dequant_dct(MpegEncContext *s,
+ DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
+{
+ if (s->block_last_index[i] >= 0) {
+ s->dct_unquantize_inter(s, block, i, qscale);
+
+ s->dsp.idct_add (dest, line_size, block);
+ }
+}
+
+/**
+ * cleans dc, ac, coded_block for the current non intra MB
+ */
+void ff_clean_intra_table_entries(MpegEncContext *s)
+{
+ int wrap = s->b8_stride;
+ int xy = s->block_index[0];
+
+ s->dc_val[0][xy ] =
+ s->dc_val[0][xy + 1 ] =
+ s->dc_val[0][xy + wrap] =
+ s->dc_val[0][xy + 1 + wrap] = 1024;
+ /* ac pred */
+ memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
+ memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
+ if (s->msmpeg4_version>=3) {
+ s->coded_block[xy ] =
+ s->coded_block[xy + 1 ] =
+ s->coded_block[xy + wrap] =
+ s->coded_block[xy + 1 + wrap] = 0;
+ }
+ /* chroma */
+ wrap = s->mb_stride;
+ xy = s->mb_x + s->mb_y * wrap;
+ s->dc_val[1][xy] =
+ s->dc_val[2][xy] = 1024;
+ /* ac pred */
+ memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
+ memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
+
+ s->mbintra_table[xy]= 0;
+}
+
+/* generic function called after a macroblock has been parsed by the
+ decoder or after it has been encoded by the encoder.
+
+ Important variables used:
+ s->mb_intra : true if intra macroblock
+ s->mv_dir : motion vector direction
+ s->mv_type : motion vector type
+ s->mv : motion vector
+ s->interlaced_dct : true if interlaced dct used (mpeg2)
+ */
+static av_always_inline
+void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
+ int lowres_flag, int is_mpeg12)
+{
+ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
+
+ if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
+ /* save DCT coefficients */
+ int i,j;
+ DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
+ for(i=0; i<6; i++)
+ for(j=0; j<64; j++)
+ *dct++ = block[i][s->dsp.idct_permutation[j]];
+ }
+
+ s->current_picture.qscale_table[mb_xy]= s->qscale;
+
+ /* update DC predictors for P macroblocks */
+ if (!s->mb_intra) {
+ if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
+ if(s->mbintra_table[mb_xy])
+ ff_clean_intra_table_entries(s);
+ } else {
+ s->last_dc[0] =
+ s->last_dc[1] =
+ s->last_dc[2] = 128 << s->intra_dc_precision;
+ }
+ }
+ else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
+ s->mbintra_table[mb_xy]=1;
+
+ if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
+ uint8_t *dest_y, *dest_cb, *dest_cr;
+ int dct_linesize, dct_offset;
+ op_pixels_func (*op_pix)[4];
+ qpel_mc_func (*op_qpix)[16];
+ const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
+ const int uvlinesize= s->current_picture.linesize[1];
+ const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
+ const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
+
+ /* avoid copy if macroblock skipped in last frame too */
+ /* skip only during decoding as we might trash the buffers during encoding a bit */
+ if(!s->encoding){
+ uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
+ const int age= s->current_picture.age;
+
+ assert(age);
+
+ if (s->mb_skipped) {
+ s->mb_skipped= 0;
+ assert(s->pict_type!=FF_I_TYPE);
+
+ (*mbskip_ptr) ++; /* indicate that this time we skipped it */
+ if(*mbskip_ptr >99) *mbskip_ptr= 99;
+
+ /* if previous was skipped too, then nothing to do ! */
+ if (*mbskip_ptr >= age && s->current_picture.reference){
+ return;
+ }
+ } else if(!s->current_picture.reference){
+ (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
+ if(*mbskip_ptr >99) *mbskip_ptr= 99;
+ } else{
+ *mbskip_ptr = 0; /* not skipped */
+ }
+ }
+
+ dct_linesize = linesize << s->interlaced_dct;
+ dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
+
+ if(readable){
+ dest_y= s->dest[0];
+ dest_cb= s->dest[1];
+ dest_cr= s->dest[2];
+ }else{
+ dest_y = s->b_scratchpad;
+ dest_cb= s->b_scratchpad+16*linesize;
+ dest_cr= s->b_scratchpad+32*linesize;
+ }
+
+ if (!s->mb_intra) {
+ /* motion handling */
+ /* decoding or more than one mb_type (MC was already done otherwise) */
+ if(!s->encoding){
+ if(lowres_flag){
+ h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
+
+ if (s->mv_dir & MV_DIR_FORWARD) {
+ MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
+ op_pix = s->dsp.avg_h264_chroma_pixels_tab;
+ }
+ if (s->mv_dir & MV_DIR_BACKWARD) {
+ MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
+ }
+ }else{
+ op_qpix= s->me.qpel_put;
+ if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
+ op_pix = s->dsp.put_pixels_tab;
+ }else{
+ op_pix = s->dsp.put_no_rnd_pixels_tab;
+ }
+ if (s->mv_dir & MV_DIR_FORWARD) {
+ MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
+ op_pix = s->dsp.avg_pixels_tab;
+ op_qpix= s->me.qpel_avg;
+ }
+ if (s->mv_dir & MV_DIR_BACKWARD) {
+ MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
+ }
+ }
+ }
+
+ /* skip dequant / idct if we are really late ;) */
+ if(s->hurry_up>1) goto skip_idct;
+ if(s->avctx->skip_idct){
+ if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
+ ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
+ || s->avctx->skip_idct >= AVDISCARD_ALL)
+ goto skip_idct;
+ }
+
+ /* add dct residue */
+ if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
+ || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
+ add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
+ add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
+ add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
+ add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
+
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ if (s->chroma_y_shift){
+ add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
+ add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
+ }else{
+ dct_linesize >>= 1;
+ dct_offset >>=1;
+ add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
+ add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
+ add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
+ add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
+ }
+ }
+ } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
+ add_dct(s, block[0], 0, dest_y , dct_linesize);
+ add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
+ add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
+ add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
+
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ if(s->chroma_y_shift){//Chroma420
+ add_dct(s, block[4], 4, dest_cb, uvlinesize);
+ add_dct(s, block[5], 5, dest_cr, uvlinesize);
+ }else{
+ //chroma422
+ dct_linesize = uvlinesize << s->interlaced_dct;
+ dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
+
+ add_dct(s, block[4], 4, dest_cb, dct_linesize);
+ add_dct(s, block[5], 5, dest_cr, dct_linesize);
+ add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
+ add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
+ if(!s->chroma_x_shift){//Chroma444
+ add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
+ add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
+ add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
+ add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
+ }
+ }
+ }//fi gray
+ }
+ else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
+ ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
+ }
+ } else {
+ /* dct only in intra block */
+ if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
+ put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
+ put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
+ put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
+ put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
+
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ if(s->chroma_y_shift){
+ put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
+ put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
+ }else{
+ dct_offset >>=1;
+ dct_linesize >>=1;
+ put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
+ put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
+ put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
+ put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
+ }
+ }
+ }else{
+ s->dsp.idct_put(dest_y , dct_linesize, block[0]);
+ s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
+ s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
+ s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
+
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ if(s->chroma_y_shift){
+ s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
+ s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
+ }else{
+
+ dct_linesize = uvlinesize << s->interlaced_dct;
+ dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
+
+ s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
+ s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
+ s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
+ s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
+ if(!s->chroma_x_shift){//Chroma444
+ s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
+ s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
+ s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
+ s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
+ }
+ }
+ }//gray
+ }
+ }
+skip_idct:
+ if(!readable){
+ s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
+ s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
+ s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
+ }
+ }
+}
+
+void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
+#if !CONFIG_SMALL
+ if(s->out_format == FMT_MPEG1) {
+ if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
+ else MPV_decode_mb_internal(s, block, 0, 1);
+ } else
+#endif
+ if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
+ else MPV_decode_mb_internal(s, block, 0, 0);
+}
+
+/**
+ *
+ * @param h is the normal height, this will be reduced automatically if needed for the last row
+ */
+void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
+ if (s->avctx->draw_horiz_band) {
+ AVFrame *src;
+ const int field_pic= s->picture_structure != PICT_FRAME;
+ int offset[4];
+
+ h= FFMIN(h, (s->avctx->height>>field_pic) - y);
+
+ if(field_pic && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)){
+ h <<= 1;
+ y <<= 1;
+ if(s->first_field) return;
+ }
+
+ if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
+ src= (AVFrame*)s->current_picture_ptr;
+ else if(s->last_picture_ptr)
+ src= (AVFrame*)s->last_picture_ptr;
+ else
+ return;
+
+ if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
+ offset[0]=
+ offset[1]=
+ offset[2]=
+ offset[3]= 0;
+ }else{
+ offset[0]= y * s->linesize;
+ offset[1]=
+ offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
+ offset[3]= 0;
+ }
+
+ emms_c();
+
+ s->avctx->draw_horiz_band(s->avctx, src, offset,
+ y, s->picture_structure, h);
+ }
+}
+
+void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
+ const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
+ const int uvlinesize= s->current_picture.linesize[1];
+ const int mb_size= 4 - s->avctx->lowres;
+
+ s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
+ s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
+ s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
+ s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
+ s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
+ s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
+ //block_index is not used by mpeg2, so it is not affected by chroma_format
+
+ s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
+ s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
+ s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
+
+ if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
+ {
+ if(s->picture_structure==PICT_FRAME){
+ s->dest[0] += s->mb_y * linesize << mb_size;
+ s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
+ s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
+ }else{
+ s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
+ s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
+ s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
+ assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
+ }
+ }
+}
+
+void ff_mpeg_flush(AVCodecContext *avctx){
+ int i;
+ MpegEncContext *s = avctx->priv_data;
+
+ if(s==NULL || s->picture==NULL)
+ return;
+
+ for(i=0; i<MAX_PICTURE_COUNT; i++){
+ if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
+ || s->picture[i].type == FF_BUFFER_TYPE_USER))
+ free_frame_buffer(s, &s->picture[i]);
+ }
+ s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
+
+ s->mb_x= s->mb_y= 0;
+ s->closed_gop= 0;
+
+ s->parse_context.state= -1;
+ s->parse_context.frame_start_found= 0;
+ s->parse_context.overread= 0;
+ s->parse_context.overread_index= 0;
+ s->parse_context.index= 0;
+ s->parse_context.last_index= 0;
+ s->bitstream_buffer_size=0;
+ s->pp_time=0;
+}
+
+static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ int i, level, nCoeffs;
+ const uint16_t *quant_matrix;
+
+ nCoeffs= s->block_last_index[n];
+
+ if (n < 4)
+ block[0] = block[0] * s->y_dc_scale;
+ else
+ block[0] = block[0] * s->c_dc_scale;
+ /* XXX: only mpeg1 */
+ quant_matrix = s->intra_matrix;
+ for(i=1;i<=nCoeffs;i++) {
+ int j= s->intra_scantable.permutated[i];
+ level = block[j];
+ if (level) {
+ if (level < 0) {
+ level = -level;
+ level = (int)(level * qscale * quant_matrix[j]) >> 3;
+ level = (level - 1) | 1;
+ level = -level;
+ } else {
+ level = (int)(level * qscale * quant_matrix[j]) >> 3;
+ level = (level - 1) | 1;
+ }
+ block[j] = level;
+ }
+ }
+}
+
+static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ int i, level, nCoeffs;
+ const uint16_t *quant_matrix;
+
+ nCoeffs= s->block_last_index[n];
+
+ quant_matrix = s->inter_matrix;
+ for(i=0; i<=nCoeffs; i++) {
+ int j= s->intra_scantable.permutated[i];
+ level = block[j];
+ if (level) {
+ if (level < 0) {
+ level = -level;
+ level = (((level << 1) + 1) * qscale *
+ ((int) (quant_matrix[j]))) >> 4;
+ level = (level - 1) | 1;
+ level = -level;
+ } else {
+ level = (((level << 1) + 1) * qscale *
+ ((int) (quant_matrix[j]))) >> 4;
+ level = (level - 1) | 1;
+ }
+ block[j] = level;
+ }
+ }
+}
+
+static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ int i, level, nCoeffs;
+ const uint16_t *quant_matrix;
+
+ if(s->alternate_scan) nCoeffs= 63;
+ else nCoeffs= s->block_last_index[n];
+
+ if (n < 4)
+ block[0] = block[0] * s->y_dc_scale;
+ else
+ block[0] = block[0] * s->c_dc_scale;
+ quant_matrix = s->intra_matrix;
+ for(i=1;i<=nCoeffs;i++) {
+ int j= s->intra_scantable.permutated[i];
+ level = block[j];
+ if (level) {
+ if (level < 0) {
+ level = -level;
+ level = (int)(level * qscale * quant_matrix[j]) >> 3;
+ level = -level;
+ } else {
+ level = (int)(level * qscale * quant_matrix[j]) >> 3;
+ }
+ block[j] = level;
+ }
+ }
+}
+
+static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ int i, level, nCoeffs;
+ const uint16_t *quant_matrix;
+ int sum=-1;
+
+ if(s->alternate_scan) nCoeffs= 63;
+ else nCoeffs= s->block_last_index[n];
+
+ if (n < 4)
+ block[0] = block[0] * s->y_dc_scale;
+ else
+ block[0] = block[0] * s->c_dc_scale;
+ quant_matrix = s->intra_matrix;
+ for(i=1;i<=nCoeffs;i++) {
+ int j= s->intra_scantable.permutated[i];
+ level = block[j];
+ if (level) {
+ if (level < 0) {
+ level = -level;
+ level = (int)(level * qscale * quant_matrix[j]) >> 3;
+ level = -level;
+ } else {
+ level = (int)(level * qscale * quant_matrix[j]) >> 3;
+ }
+ block[j] = level;
+ sum+=level;
+ }
+ }
+ block[63]^=sum&1;
+}
+
+static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ int i, level, nCoeffs;
+ const uint16_t *quant_matrix;
+ int sum=-1;
+
+ if(s->alternate_scan) nCoeffs= 63;
+ else nCoeffs= s->block_last_index[n];
+
+ quant_matrix = s->inter_matrix;
+ for(i=0; i<=nCoeffs; i++) {
+ int j= s->intra_scantable.permutated[i];
+ level = block[j];
+ if (level) {
+ if (level < 0) {
+ level = -level;
+ level = (((level << 1) + 1) * qscale *
+ ((int) (quant_matrix[j]))) >> 4;
+ level = -level;
+ } else {
+ level = (((level << 1) + 1) * qscale *
+ ((int) (quant_matrix[j]))) >> 4;
+ }
+ block[j] = level;
+ sum+=level;
+ }
+ }
+ block[63]^=sum&1;
+}
+
+static void dct_unquantize_h263_intra_c(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ int i, level, qmul, qadd;
+ int nCoeffs;
+
+ assert(s->block_last_index[n]>=0);
+
+ qmul = qscale << 1;
+
+ if (!s->h263_aic) {
+ if (n < 4)
+ block[0] = block[0] * s->y_dc_scale;
+ else
+ block[0] = block[0] * s->c_dc_scale;
+ qadd = (qscale - 1) | 1;
+ }else{
+ qadd = 0;
+ }
+ if(s->ac_pred)
+ nCoeffs=63;
+ else
+ nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
+
+ for(i=1; i<=nCoeffs; i++) {
+ level = block[i];
+ if (level) {
+ if (level < 0) {
+ level = level * qmul - qadd;
+ } else {
+ level = level * qmul + qadd;
+ }
+ block[i] = level;
+ }
+ }
+}
+
+static void dct_unquantize_h263_inter_c(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ int i, level, qmul, qadd;
+ int nCoeffs;
+
+ assert(s->block_last_index[n]>=0);
+
+ qadd = (qscale - 1) | 1;
+ qmul = qscale << 1;
+
+ nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
+
+ for(i=0; i<=nCoeffs; i++) {
+ level = block[i];
+ if (level) {
+ if (level < 0) {
+ level = level * qmul - qadd;
+ } else {
+ level = level * qmul + qadd;
+ }
+ block[i] = level;
+ }
+ }
+}
+
+/**
+ * set qscale and update qscale dependent variables.
+ */
+void ff_set_qscale(MpegEncContext * s, int qscale)
+{
+ if (qscale < 1)
+ qscale = 1;
+ else if (qscale > 31)
+ qscale = 31;
+
+ s->qscale = qscale;
+ s->chroma_qscale= s->chroma_qscale_table[qscale];
+
+ s->y_dc_scale= s->y_dc_scale_table[ qscale ];
+ s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpegvideo.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpegvideo.h
new file mode 100644
index 000000000..d011a943e
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpegvideo.h
@@ -0,0 +1,821 @@
+/*
+ * Generic DCT based hybrid video encoder
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/mpegvideo.h
+ * mpegvideo header.
+ */
+
+#ifndef AVCODEC_MPEGVIDEO_H
+#define AVCODEC_MPEGVIDEO_H
+
+#include "dsputil.h"
+#include "get_bits.h"
+#include "put_bits.h"
+#include "ratecontrol.h"
+#include "parser.h"
+#include "mpeg12data.h"
+#include "rl.h"
+
+#define FRAME_SKIPPED 100 ///< return value for header parsers if frame is not coded
+
+enum OutputFormat {
+ FMT_MPEG1,
+ FMT_H261,
+ FMT_H263,
+ FMT_MJPEG,
+ FMT_H264,
+};
+
+#define MPEG_BUF_SIZE (16 * 1024)
+
+#define QMAT_SHIFT_MMX 16
+#define QMAT_SHIFT 22
+
+#define MAX_FCODE 7
+#define MAX_MV 2048
+
+#define MAX_THREADS 16
+
+#define MAX_PICTURE_COUNT 32
+
+#define ME_MAP_SIZE 64
+#define ME_MAP_SHIFT 3
+#define ME_MAP_MV_BITS 11
+
+#define MAX_MB_BYTES (30*16*16*3/8 + 120)
+
+#define INPLACE_OFFSET 16
+
+/* Start codes. */
+#define SEQ_END_CODE 0x000001b7
+#define SEQ_START_CODE 0x000001b3
+#define GOP_START_CODE 0x000001b8
+#define PICTURE_START_CODE 0x00000100
+#define SLICE_MIN_START_CODE 0x00000101
+#define SLICE_MAX_START_CODE 0x000001af
+#define EXT_START_CODE 0x000001b5
+#define USER_START_CODE 0x000001b2
+
+/**
+ * Picture.
+ */
+typedef struct Picture{
+ FF_COMMON_FRAME
+
+ /**
+ * halfpel luma planes.
+ */
+ uint8_t *interpolated[3];
+ int16_t (*motion_val_base[2])[2];
+ uint32_t *mb_type_base;
+#define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if there is just one type
+#define IS_INTRA4x4(a) ((a)&MB_TYPE_INTRA4x4)
+#define IS_INTRA16x16(a) ((a)&MB_TYPE_INTRA16x16)
+#define IS_PCM(a) ((a)&MB_TYPE_INTRA_PCM)
+#define IS_INTRA(a) ((a)&7)
+#define IS_INTER(a) ((a)&(MB_TYPE_16x16|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8))
+#define IS_SKIP(a) ((a)&MB_TYPE_SKIP)
+#define IS_INTRA_PCM(a) ((a)&MB_TYPE_INTRA_PCM)
+#define IS_INTERLACED(a) ((a)&MB_TYPE_INTERLACED)
+#define IS_DIRECT(a) ((a)&MB_TYPE_DIRECT2)
+#define IS_GMC(a) ((a)&MB_TYPE_GMC)
+#define IS_16X16(a) ((a)&MB_TYPE_16x16)
+#define IS_16X8(a) ((a)&MB_TYPE_16x8)
+#define IS_8X16(a) ((a)&MB_TYPE_8x16)
+#define IS_8X8(a) ((a)&MB_TYPE_8x8)
+#define IS_SUB_8X8(a) ((a)&MB_TYPE_16x16) //note reused
+#define IS_SUB_8X4(a) ((a)&MB_TYPE_16x8) //note reused
+#define IS_SUB_4X8(a) ((a)&MB_TYPE_8x16) //note reused
+#define IS_SUB_4X4(a) ((a)&MB_TYPE_8x8) //note reused
+#define IS_ACPRED(a) ((a)&MB_TYPE_ACPRED)
+#define IS_QUANT(a) ((a)&MB_TYPE_QUANT)
+#define IS_DIR(a, part, list) ((a) & (MB_TYPE_P0L0<<((part)+2*(list))))
+#define USES_LIST(a, list) ((a) & ((MB_TYPE_P0L0|MB_TYPE_P1L0)<<(2*(list)))) ///< does this mb use listX, note does not work if subMBs
+#define HAS_CBP(a) ((a)&MB_TYPE_CBP)
+
+ int field_poc[2]; ///< h264 top/bottom POC
+ int poc; ///< h264 frame POC
+ int frame_num; ///< h264 frame_num (raw frame_num from slice header)
+ int mmco_reset; ///< h264 MMCO_RESET set this 1. Reordering code must not mix pictures before and after MMCO_RESET.
+ int pic_id; /**< h264 pic_num (short -> no wrap version of pic_num,
+ pic_num & max_pic_num; long -> long_pic_num) */
+ int long_ref; ///< 1->long term reference 0->short term reference
+ int ref_poc[2][2][16]; ///< h264 POCs of the frames used as reference (FIXME need per slice)
+ int ref_count[2][2]; ///< number of entries in ref_poc (FIXME need per slice)
+ int mbaff; ///< h264 1 -> MBAFF frame 0-> not MBAFF
+
+ int mb_var_sum; ///< sum of MB variance for current frame
+ int mc_mb_var_sum; ///< motion compensated MB variance for current frame
+ uint16_t *mb_var; ///< Table for MB variances
+ uint16_t *mc_mb_var; ///< Table for motion compensated MB variances
+ uint8_t *mb_mean; ///< Table for MB luminance
+ int32_t *mb_cmp_score; ///< Table for MB cmp scores, for mb decision FIXME remove
+ int b_frame_score; /* */
+} Picture;
+
+struct MpegEncContext;
+
+/**
+ * Motion estimation context.
+ */
+typedef struct MotionEstContext{
+ AVCodecContext *avctx;
+ int skip; ///< set if ME is skipped for the current MB
+ int co_located_mv[4][2]; ///< mv from last P-frame for direct mode ME
+ int direct_basis_mv[4][2];
+ uint8_t *scratchpad; ///< data area for the ME algo, so that the ME does not need to malloc/free
+ uint8_t *best_mb;
+ uint8_t *temp_mb[2];
+ uint8_t *temp;
+ int best_bits;
+ uint32_t *map; ///< map to avoid duplicate evaluations
+ uint32_t *score_map; ///< map to store the scores
+ int map_generation;
+ int pre_penalty_factor;
+ int penalty_factor; /*!< an estimate of the bits required to
+ code a given mv value, e.g. (1,0) takes
+ more bits than (0,0). We have to
+ estimate whether any reduction in
+ residual is worth the extra bits. */
+ int sub_penalty_factor;
+ int mb_penalty_factor;
+ int flags;
+ int sub_flags;
+ int mb_flags;
+ int pre_pass; ///< = 1 for the pre pass
+ int dia_size;
+ int xmin;
+ int xmax;
+ int ymin;
+ int ymax;
+ int pred_x;
+ int pred_y;
+ uint8_t *src[4][4];
+ uint8_t *ref[4][4];
+ int stride;
+ int uvstride;
+ /* temp variables for picture complexity calculation */
+ int mc_mb_var_sum_temp;
+ int mb_var_sum_temp;
+ int scene_change_score;
+/* cmp, chroma_cmp;*/
+ op_pixels_func (*hpel_put)[4];
+ op_pixels_func (*hpel_avg)[4];
+ qpel_mc_func (*qpel_put)[16];
+ qpel_mc_func (*qpel_avg)[16];
+ uint8_t (*mv_penalty)[MAX_MV*2+1]; ///< amount of bits needed to encode a MV
+ uint8_t *current_mv_penalty;
+ int (*sub_motion_search)(struct MpegEncContext * s,
+ int *mx_ptr, int *my_ptr, int dmin,
+ int src_index, int ref_index,
+ int size, int h);
+}MotionEstContext;
+
+/**
+ * MpegEncContext.
+ */
+typedef struct MpegEncContext {
+ struct AVCodecContext *avctx;
+ /* the following parameters must be initialized before encoding */
+ int width, height;///< picture size. must be a multiple of 16
+ int gop_size;
+ int intra_only; ///< if true, only intra pictures are generated
+ int bit_rate; ///< wanted bit rate
+ enum OutputFormat out_format; ///< output format
+ int h263_pred; ///< use mpeg4/h263 ac/dc predictions
+ int pb_frame; ///< PB frame mode (0 = none, 1 = base, 2 = improved)
+
+/* the following codec id fields are deprecated in favor of codec_id */
+ int h263_plus; ///< h263 plus headers
+ int h263_msmpeg4; ///< generate MSMPEG4 compatible stream (deprecated, use msmpeg4_version instead)
+ int h263_flv; ///< use flv h263 header
+
+ enum CodecID codec_id; /* see CODEC_ID_xxx */
+ int fixed_qscale; ///< fixed qscale if non zero
+ int encoding; ///< true if we are encoding (vs decoding)
+ int flags; ///< AVCodecContext.flags (HQ, MV4, ...)
+ int flags2; ///< AVCodecContext.flags2
+ int max_b_frames; ///< max number of b-frames for encoding
+ int luma_elim_threshold;
+ int chroma_elim_threshold;
+ int strict_std_compliance; ///< strictly follow the std (MPEG4, ...)
+ int workaround_bugs; ///< workaround bugs in encoders which cannot be detected automatically
+ int codec_tag; ///< internal codec_tag upper case converted from avctx codec_tag
+ int stream_codec_tag; ///< internal stream_codec_tag upper case converted from avctx stream_codec_tag
+ /* the following fields are managed internally by the encoder */
+
+ /** bit output */
+ PutBitContext pb;
+
+ /* sequence parameters */
+ int context_initialized;
+ int input_picture_number; ///< used to set pic->display_picture_number, should not be used for/by anything else
+ int coded_picture_number; ///< used to set pic->coded_picture_number, should not be used for/by anything else
+ int picture_number; //FIXME remove, unclear definition
+ int picture_in_gop_number; ///< 0-> first pic in gop, ...
+ int b_frames_since_non_b; ///< used for encoding, relative to not yet reordered input
+ int64_t user_specified_pts;///< last non zero pts from AVFrame which was passed into avcodec_encode_video()
+ int mb_width, mb_height; ///< number of MBs horizontally & vertically
+ int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
+ int b8_stride; ///< 2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
+ int b4_stride; ///< 4*mb_width+1 used for some 4x4 block arrays to allow simple addressing
+ int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replication)
+ int mb_num; ///< number of MBs of a picture
+ int linesize; ///< line size, in bytes, may be different from width
+ int uvlinesize; ///< line size, for chroma in bytes, may be different from width
+ Picture *picture; ///< main picture buffer
+ Picture **input_picture; ///< next pictures on display order for encoding
+ Picture **reordered_input_picture; ///< pointer to the next pictures in codedorder for encoding
+
+ int start_mb_y; ///< start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
+ int end_mb_y; ///< end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
+ struct MpegEncContext *thread_context[MAX_THREADS];
+
+ /**
+ * copy of the previous picture structure.
+ * note, linesize & data, might not match the previous picture (for field pictures)
+ */
+ Picture last_picture;
+
+ /**
+ * copy of the next picture structure.
+ * note, linesize & data, might not match the next picture (for field pictures)
+ */
+ Picture next_picture;
+
+ /**
+ * copy of the source picture structure for encoding.
+ * note, linesize & data, might not match the source picture (for field pictures)
+ */
+ Picture new_picture;
+
+ /**
+ * copy of the current picture structure.
+ * note, linesize & data, might not match the current picture (for field pictures)
+ */
+ Picture current_picture; ///< buffer to store the decompressed current picture
+
+ Picture *last_picture_ptr; ///< pointer to the previous picture.
+ Picture *next_picture_ptr; ///< pointer to the next picture (for bidir pred)
+ Picture *current_picture_ptr; ///< pointer to the current picture
+ uint8_t *visualization_buffer[3]; //< temporary buffer vor MV visualization
+ int last_dc[3]; ///< last DC values for MPEG1
+ int16_t *dc_val_base;
+ int16_t *dc_val[3]; ///< used for mpeg4 DC prediction, all 3 arrays must be continuous
+ int16_t dc_cache[4*5];
+ int y_dc_scale, c_dc_scale;
+ const uint8_t *y_dc_scale_table; ///< qscale -> y_dc_scale table
+ const uint8_t *c_dc_scale_table; ///< qscale -> c_dc_scale table
+ const uint8_t *chroma_qscale_table; ///< qscale -> chroma_qscale (h263)
+ uint8_t *coded_block_base;
+ uint8_t *coded_block; ///< used for coded block pattern prediction (msmpeg4v3, wmv1)
+ int16_t (*ac_val_base)[16];
+ int16_t (*ac_val[3])[16]; ///< used for for mpeg4 AC prediction, all 3 arrays must be continuous
+ int ac_pred;
+ uint8_t *prev_pict_types; ///< previous picture types in bitstream order, used for mb skip
+#define PREV_PICT_TYPES_BUFFER_SIZE 256
+ int mb_skipped; ///< MUST BE SET only during DECODING
+ uint8_t *mbskip_table; /**< used to avoid copy if macroblock skipped (for black regions for example)
+ and used for b-frame encoding & decoding (contains skip table of next P Frame) */
+ uint8_t *mbintra_table; ///< used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
+ uint8_t *cbp_table; ///< used to store cbp, ac_pred for partitioned decoding
+ uint8_t *pred_dir_table; ///< used to store pred_dir for partitioned decoding
+ uint8_t *allocated_edge_emu_buffer;
+ uint8_t *edge_emu_buffer; ///< points into the middle of allocated_edge_emu_buffer
+ uint8_t *rd_scratchpad; ///< scratchpad for rate distortion mb decision
+ uint8_t *obmc_scratchpad;
+ uint8_t *b_scratchpad; ///< scratchpad used for writing into write only buffers
+
+ int qscale; ///< QP
+ int chroma_qscale; ///< chroma QP
+ unsigned int lambda; ///< lagrange multipler used in rate distortion
+ unsigned int lambda2; ///< (lambda*lambda) >> FF_LAMBDA_SHIFT
+ int *lambda_table;
+ int adaptive_quant; ///< use adaptive quantization
+ int dquant; ///< qscale difference to prev qscale
+ int closed_gop; ///< MPEG1/2 GOP is closed
+ int pict_type; ///< FF_I_TYPE, FF_P_TYPE, FF_B_TYPE, ...
+ int last_pict_type; //FIXME removes
+ int last_non_b_pict_type; ///< used for mpeg4 gmc b-frames & ratecontrol
+ int dropable;
+ int frame_rate_index;
+ int last_lambda_for[5]; ///< last lambda for a specific pict type
+ int skipdct; ///< skip dct and code zero residual
+
+ /* motion compensation */
+ int unrestricted_mv; ///< mv can point outside of the coded picture
+ int h263_long_vectors; ///< use horrible h263v1 long vector mode
+ int decode; ///< if 0 then decoding will be skipped (for encoding b frames for example)
+
+ DSPContext dsp; ///< pointers for accelerated dsp functions
+ int f_code; ///< forward MV resolution
+ int b_code; ///< backward MV resolution for B Frames (mpeg4)
+ int16_t (*p_mv_table_base)[2];
+ int16_t (*b_forw_mv_table_base)[2];
+ int16_t (*b_back_mv_table_base)[2];
+ int16_t (*b_bidir_forw_mv_table_base)[2];
+ int16_t (*b_bidir_back_mv_table_base)[2];
+ int16_t (*b_direct_mv_table_base)[2];
+ int16_t (*p_field_mv_table_base[2][2])[2];
+ int16_t (*b_field_mv_table_base[2][2][2])[2];
+ int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) p-frame encoding
+ int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode b-frame encoding
+ int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode b-frame encoding
+ int16_t (*b_bidir_forw_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding
+ int16_t (*b_bidir_back_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding
+ int16_t (*b_direct_mv_table)[2]; ///< MV table (1MV per MB) direct mode b-frame encoding
+ int16_t (*p_field_mv_table[2][2])[2]; ///< MV table (2MV per MB) interlaced p-frame encoding
+ int16_t (*b_field_mv_table[2][2][2])[2];///< MV table (4MV per MB) interlaced b-frame encoding
+ uint8_t (*p_field_select_table[2]);
+ uint8_t (*b_field_select_table[2][2]);
+ int me_method; ///< ME algorithm
+ int mv_dir;
+#define MV_DIR_FORWARD 1
+#define MV_DIR_BACKWARD 2
+#define MV_DIRECT 4 ///< bidirectional mode where the difference equals the MV of the last P/S/I-Frame (mpeg4)
+ int mv_type;
+#define MV_TYPE_16X16 0 ///< 1 vector for the whole mb
+#define MV_TYPE_8X8 1 ///< 4 vectors (h263, mpeg4 4MV)
+#define MV_TYPE_16X8 2 ///< 2 vectors, one per 16x8 block
+#define MV_TYPE_FIELD 3 ///< 2 vectors, one per field
+#define MV_TYPE_DMV 4 ///< 2 vectors, special mpeg2 Dual Prime Vectors
+ /**motion vectors for a macroblock
+ first coordinate : 0 = forward 1 = backward
+ second " : depend on type
+ third " : 0 = x, 1 = y
+ */
+ int mv[2][4][2];
+ int field_select[2][2];
+ int last_mv[2][2][2]; ///< last MV, used for MV prediction in MPEG1 & B-frame MPEG4
+ uint8_t *fcode_tab; ///< smallest fcode needed for each MV
+ int16_t direct_scale_mv[2][64]; ///< precomputed to avoid divisions in ff_mpeg4_set_direct_mv
+
+ MotionEstContext me;
+
+ int no_rounding; /**< apply no rounding to motion compensation (MPEG4, msmpeg4, ...)
+ for b-frames rounding mode is always 0 */
+
+ int hurry_up; /**< when set to 1 during decoding, b frames will be skipped
+ when set to 2 idct/dequant will be skipped too */
+
+ /* macroblock layer */
+ int mb_x, mb_y;
+ int mb_skip_run;
+ int mb_intra;
+ uint16_t *mb_type; ///< Table for candidate MB types for encoding
+#define CANDIDATE_MB_TYPE_INTRA 0x01
+#define CANDIDATE_MB_TYPE_INTER 0x02
+#define CANDIDATE_MB_TYPE_INTER4V 0x04
+#define CANDIDATE_MB_TYPE_SKIPPED 0x08
+//#define MB_TYPE_GMC 0x10
+
+#define CANDIDATE_MB_TYPE_DIRECT 0x10
+#define CANDIDATE_MB_TYPE_FORWARD 0x20
+#define CANDIDATE_MB_TYPE_BACKWARD 0x40
+#define CANDIDATE_MB_TYPE_BIDIR 0x80
+
+#define CANDIDATE_MB_TYPE_INTER_I 0x100
+#define CANDIDATE_MB_TYPE_FORWARD_I 0x200
+#define CANDIDATE_MB_TYPE_BACKWARD_I 0x400
+#define CANDIDATE_MB_TYPE_BIDIR_I 0x800
+
+#define CANDIDATE_MB_TYPE_DIRECT0 0x1000
+
+ int block_index[6]; ///< index to current MB in block based arrays with edges
+ int block_wrap[6];
+ uint8_t *dest[3];
+
+ int *mb_index2xy; ///< mb_index -> mb_x + mb_y*mb_stride
+
+ /** matrix transmitted in the bitstream */
+ uint16_t intra_matrix[64];
+ uint16_t chroma_intra_matrix[64];
+ uint16_t inter_matrix[64];
+ uint16_t chroma_inter_matrix[64];
+#define QUANT_BIAS_SHIFT 8
+ int intra_quant_bias; ///< bias for the quantizer
+ int inter_quant_bias; ///< bias for the quantizer
+ int min_qcoeff; ///< minimum encodable coefficient
+ int max_qcoeff; ///< maximum encodable coefficient
+ int ac_esc_length; ///< num of bits needed to encode the longest esc
+ uint8_t *intra_ac_vlc_length;
+ uint8_t *intra_ac_vlc_last_length;
+ uint8_t *inter_ac_vlc_length;
+ uint8_t *inter_ac_vlc_last_length;
+ uint8_t *luma_dc_vlc_length;
+ uint8_t *chroma_dc_vlc_length;
+#define UNI_AC_ENC_INDEX(run,level) ((run)*128 + (level))
+
+ int coded_score[8];
+
+ /** precomputed matrix (combine qscale and DCT renorm) */
+ int (*q_intra_matrix)[64];
+ int (*q_inter_matrix)[64];
+ /** identical to the above but for MMX & these are not permutated, second 64 entries are bias*/
+ uint16_t (*q_intra_matrix16)[2][64];
+ uint16_t (*q_inter_matrix16)[2][64];
+ int block_last_index[12]; ///< last non zero coefficient in block
+ /* scantables */
+ ScanTable intra_scantable;
+ ScanTable intra_h_scantable;
+ ScanTable intra_v_scantable;
+ ScanTable inter_scantable; ///< if inter == intra then intra should be used to reduce tha cache usage
+
+ /* noise reduction */
+ int (*dct_error_sum)[64];
+ int dct_count[2];
+ uint16_t (*dct_offset)[64];
+
+ void *opaque; ///< private data for the user
+
+ /* bit rate control */
+ int64_t wanted_bits;
+ int64_t total_bits;
+ int frame_bits; ///< bits used for the current frame
+ int next_lambda; ///< next lambda used for retrying to encode a frame
+ RateControlContext rc_context; ///< contains stuff only accessed in ratecontrol.c
+
+ /* statistics, used for 2-pass encoding */
+ int mv_bits;
+ int header_bits;
+ int i_tex_bits;
+ int p_tex_bits;
+ int i_count;
+ int f_count;
+ int b_count;
+ int skip_count;
+ int misc_bits; ///< cbp, mb_type
+ int last_bits; ///< temp var used for calculating the above vars
+
+ /* error concealment / resync */
+ int error_count;
+ uint8_t *error_status_table; ///< table of the error status of each MB
+#define VP_START 1 ///< current MB is the first after a resync marker
+#define AC_ERROR 2
+#define DC_ERROR 4
+#define MV_ERROR 8
+#define AC_END 16
+#define DC_END 32
+#define MV_END 64
+//FIXME some prefix?
+
+ int resync_mb_x; ///< x position of last resync marker
+ int resync_mb_y; ///< y position of last resync marker
+ GetBitContext last_resync_gb; ///< used to search for the next resync marker
+ int mb_num_left; ///< number of MBs left in this video packet (for partitioned Slices only)
+ int next_p_frame_damaged; ///< set if the next p frame is damaged, to avoid showing trashed b frames
+ int error_recognition;
+
+ ParseContext parse_context;
+
+ /* H.263 specific */
+ int gob_index;
+ int obmc; ///< overlapped block motion compensation
+ int showed_packed_warning; ///< flag for having shown the warning about divxs invalid b frames
+
+ /* H.263+ specific */
+ int umvplus; ///< == H263+ && unrestricted_mv
+ int h263_aic; ///< Advanded INTRA Coding (AIC)
+ int h263_aic_dir; ///< AIC direction: 0 = left, 1 = top
+ int h263_slice_structured;
+ int alt_inter_vlc; ///< alternative inter vlc
+ int modified_quant;
+ int loop_filter;
+ int custom_pcf;
+
+ /* mpeg4 specific */
+ int time_increment_bits; ///< number of bits to represent the fractional part of time
+ int last_time_base;
+ int time_base; ///< time in seconds of last I,P,S Frame
+ int64_t time; ///< time of current frame
+ int64_t last_non_b_time;
+ uint16_t pp_time; ///< time distance between the last 2 p,s,i frames
+ uint16_t pb_time; ///< time distance between the last b and p,s,i frame
+ uint16_t pp_field_time;
+ uint16_t pb_field_time; ///< like above, just for interlaced
+ int shape;
+ int vol_sprite_usage;
+ int sprite_width;
+ int sprite_height;
+ int sprite_left;
+ int sprite_top;
+ int sprite_brightness_change;
+ int num_sprite_warping_points;
+ int real_sprite_warping_points;
+ uint16_t sprite_traj[4][2]; ///< sprite trajectory points
+ int sprite_offset[2][2]; ///< sprite offset[isChroma][isMVY]
+ int sprite_delta[2][2]; ///< sprite_delta [isY][isMVY]
+ int sprite_shift[2]; ///< sprite shift [isChroma]
+ int mcsel;
+ int quant_precision;
+ int quarter_sample; ///< 1->qpel, 0->half pel ME/MC
+ int scalability;
+ int hierachy_type;
+ int enhancement_type;
+ int new_pred;
+ int reduced_res_vop;
+ int aspect_ratio_info; //FIXME remove
+ int sprite_warping_accuracy;
+ int low_latency_sprite;
+ int data_partitioning; ///< data partitioning flag from header
+ int partitioned_frame; ///< is current frame partitioned
+ int rvlc; ///< reversible vlc
+ int resync_marker; ///< could this stream contain resync markers
+ int low_delay; ///< no reordering needed / has no b-frames
+ int vo_type;
+ int vol_control_parameters; ///< does the stream contain the low_delay flag, used to workaround buggy encoders
+ int intra_dc_threshold; ///< QP above whch the ac VLC should be used for intra dc
+ int use_intra_dc_vlc;
+ PutBitContext tex_pb; ///< used for data partitioned VOPs
+ PutBitContext pb2; ///< used for data partitioned VOPs
+ int mpeg_quant;
+ int t_frame; ///< time distance of first I -> B, used for interlaced b frames
+ int padding_bug_score; ///< used to detect the VERY common padding bug in MPEG4
+ int cplx_estimation_trash_i;
+ int cplx_estimation_trash_p;
+ int cplx_estimation_trash_b;
+
+ /* divx specific, used to workaround (many) bugs in divx5 */
+ int divx_version;
+ int divx_build;
+ int divx_packed;
+ uint8_t *bitstream_buffer; //Divx 5.01 puts several frames in a single one, this is used to reorder them
+ int bitstream_buffer_size;
+ unsigned int allocated_bitstream_buffer_size;
+
+ int xvid_build;
+
+ /* lavc specific stuff, used to workaround bugs in libavcodec */
+ int lavc_build;
+
+ /* RV10 specific */
+ int rv10_version; ///< RV10 version: 0 or 3
+ int rv10_first_dc_coded[3];
+ int orig_width, orig_height;
+
+ /* MJPEG specific */
+ struct MJpegContext *mjpeg_ctx;
+ int mjpeg_vsample[3]; ///< vertical sampling factors, default = {2, 1, 1}
+ int mjpeg_hsample[3]; ///< horizontal sampling factors, default = {2, 1, 1}
+
+ /* MSMPEG4 specific */
+ int mv_table_index;
+ int rl_table_index;
+ int rl_chroma_table_index;
+ int dc_table_index;
+ int use_skip_mb_code;
+ int slice_height; ///< in macroblocks
+ int first_slice_line; ///< used in mpeg4 too to handle resync markers
+ int flipflop_rounding;
+ int msmpeg4_version; ///< 0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
+ int per_mb_rl_table;
+ int esc3_level_length;
+ int esc3_run_length;
+ /** [mb_intra][isChroma][level][run][last] */
+ int (*ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2];
+ int inter_intra_pred;
+ int mspel;
+
+ /* decompression specific */
+ GetBitContext gb;
+
+ /* Mpeg1 specific */
+ int gop_picture_number; ///< index of the first picture of a GOP based on fake_pic_num & mpeg1 specific
+ int last_mv_dir; ///< last mv_dir, used for b frame encoding
+ int broken_link; ///< no_output_of_prior_pics_flag
+ uint8_t *vbv_delay_ptr; ///< pointer to vbv_delay in the bitstream
+
+ /* MPEG-2-specific - I wished not to have to support this mess. */
+ int progressive_sequence;
+ int mpeg_f_code[2][2];
+ int picture_structure;
+/* picture type */
+#define PICT_TOP_FIELD 1
+#define PICT_BOTTOM_FIELD 2
+#define PICT_FRAME 3
+
+ int intra_dc_precision;
+ int frame_pred_frame_dct;
+ int top_field_first;
+ int concealment_motion_vectors;
+ int q_scale_type;
+ int intra_vlc_format;
+ int alternate_scan;
+ int repeat_first_field;
+ int chroma_420_type;
+ int chroma_format;
+#define CHROMA_420 1
+#define CHROMA_422 2
+#define CHROMA_444 3
+ int chroma_x_shift;//depend on pix_format, that depend on chroma_format
+ int chroma_y_shift;
+
+ int progressive_frame;
+ int full_pel[2];
+ int interlaced_dct;
+ int first_slice;
+ int first_field; ///< is 1 for the first field of a field picture 0 otherwise
+
+ /* RTP specific */
+ int rtp_mode;
+
+ uint8_t *ptr_lastgob;
+ int swap_uv; //vcr2 codec is an MPEG-2 variant with U and V swapped
+ DCTELEM (*pblocks[12])[64];
+
+ DCTELEM (*block)[64]; ///< points to one of the following blocks
+ DCTELEM (*blocks)[8][64]; // for HQ mode we need to keep the best block
+ int (*decode_mb)(struct MpegEncContext *s, DCTELEM block[6][64]); // used by some codecs to avoid a switch()
+#define SLICE_OK 0
+#define SLICE_ERROR -1
+#define SLICE_END -2 ///<end marker found
+#define SLICE_NOEND -3 ///<no end marker or error found but mb count exceeded
+
+ void (*dct_unquantize_mpeg1_intra)(struct MpegEncContext *s,
+ DCTELEM *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_mpeg1_inter)(struct MpegEncContext *s,
+ DCTELEM *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_mpeg2_intra)(struct MpegEncContext *s,
+ DCTELEM *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_mpeg2_inter)(struct MpegEncContext *s,
+ DCTELEM *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_h263_intra)(struct MpegEncContext *s,
+ DCTELEM *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_h263_inter)(struct MpegEncContext *s,
+ DCTELEM *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_h261_intra)(struct MpegEncContext *s,
+ DCTELEM *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_h261_inter)(struct MpegEncContext *s,
+ DCTELEM *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_intra)(struct MpegEncContext *s, // unquantizer to use (mpeg4 can use both)
+ DCTELEM *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_inter)(struct MpegEncContext *s, // unquantizer to use (mpeg4 can use both)
+ DCTELEM *block/*align 16*/, int n, int qscale);
+ int (*dct_quantize)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale, int *overflow);
+ int (*fast_dct_quantize)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale, int *overflow);
+ void (*denoise_dct)(struct MpegEncContext *s, DCTELEM *block);
+} MpegEncContext;
+
+
+void MPV_decode_defaults(MpegEncContext *s);
+int MPV_common_init(MpegEncContext *s);
+void MPV_common_end(MpegEncContext *s);
+void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]);
+int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx);
+void MPV_frame_end(MpegEncContext *s);
+int MPV_encode_init(AVCodecContext *avctx);
+int MPV_encode_end(AVCodecContext *avctx);
+int MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data);
+void MPV_common_init_mmx(MpegEncContext *s);
+void ff_clean_intra_table_entries(MpegEncContext *s);
+void ff_draw_horiz_band(MpegEncContext *s, int y, int h);
+void ff_mpeg_flush(AVCodecContext *avctx);
+void ff_print_debug_info(MpegEncContext *s, AVFrame *pict);
+void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix);
+int ff_find_unused_picture(MpegEncContext *s, int shared);
+void ff_denoise_dct(MpegEncContext *s, DCTELEM *block);
+void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src);
+const uint8_t *ff_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state);
+
+void ff_er_frame_start(MpegEncContext *s);
+void ff_er_frame_end(MpegEncContext *s);
+void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status);
+
+int ff_dct_common_init(MpegEncContext *s);
+void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
+ const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra);
+
+void ff_init_block_index(MpegEncContext *s);
+void ff_copy_picture(Picture *dst, Picture *src);
+
+/**
+ * allocates a Picture
+ * The pixels are allocated/set by calling get_buffer() if shared=0
+ */
+int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared);
+
+static inline void ff_update_block_index(MpegEncContext *s){
+ const int block_size= 8>>s->avctx->lowres;
+
+ s->block_index[0]+=2;
+ s->block_index[1]+=2;
+ s->block_index[2]+=2;
+ s->block_index[3]+=2;
+ s->block_index[4]++;
+ s->block_index[5]++;
+ s->dest[0]+= 2*block_size;
+ s->dest[1]+= block_size;
+ s->dest[2]+= block_size;
+}
+
+static inline int get_bits_diff(MpegEncContext *s){
+ const int bits= put_bits_count(&s->pb);
+ const int last= s->last_bits;
+
+ s->last_bits = bits;
+
+ return bits - last;
+}
+
+static inline int ff_h263_round_chroma(int x){
+ static const uint8_t h263_chroma_roundtab[16] = {
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1,
+ };
+ return h263_chroma_roundtab[x & 0xf] + (x >> 3);
+}
+
+/* motion_est.c */
+void ff_estimate_p_frame_motion(MpegEncContext * s,
+ int mb_x, int mb_y);
+void ff_estimate_b_frame_motion(MpegEncContext * s,
+ int mb_x, int mb_y);
+int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type);
+void ff_fix_long_p_mvs(MpegEncContext * s);
+void ff_fix_long_mvs(MpegEncContext * s, uint8_t *field_select_table, int field_select,
+ int16_t (*mv_table)[2], int f_code, int type, int truncate);
+int ff_init_me(MpegEncContext *s);
+int ff_pre_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y);
+int ff_epzs_motion_search(MpegEncContext * s, int *mx_ptr, int *my_ptr,
+ int P[10][2], int src_index, int ref_index, int16_t (*last_mv)[2],
+ int ref_mv_scale, int size, int h);
+int ff_get_mb_score(MpegEncContext * s, int mx, int my, int src_index,
+ int ref_index, int size, int h, int add_rate);
+
+/* mpeg12.c */
+extern const uint8_t ff_mpeg1_dc_scale_table[128];
+
+void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number);
+void mpeg1_encode_mb(MpegEncContext *s,
+ DCTELEM block[6][64],
+ int motion_x, int motion_y);
+void ff_mpeg1_encode_init(MpegEncContext *s);
+void ff_mpeg1_encode_slice_header(MpegEncContext *s);
+void ff_mpeg1_clean_buffers(MpegEncContext *s);
+int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size, AVCodecParserContext *s, int64_t *rtStart, AVCodecContext *avctx); /* rtStart, avctx: ffdshow custom code */
+
+extern const uint8_t ff_aic_dc_scale_table[32];
+extern const uint8_t ff_h263_chroma_qscale_table[32];
+extern const uint8_t ff_h263_loop_filter_strength[32];
+
+/* h261.c */
+void ff_h261_loop_filter(MpegEncContext *s);
+void ff_h261_reorder_mb_index(MpegEncContext* s);
+void ff_h261_encode_mb(MpegEncContext *s,
+ DCTELEM block[6][64],
+ int motion_x, int motion_y);
+void ff_h261_encode_picture_header(MpegEncContext * s, int picture_number);
+void ff_h261_encode_init(MpegEncContext *s);
+int ff_h261_get_picture_format(int width, int height);
+
+
+/* rv10.c */
+void rv10_encode_picture_header(MpegEncContext *s, int picture_number);
+int rv_decode_dc(MpegEncContext *s, int n);
+void rv20_encode_picture_header(MpegEncContext *s, int picture_number);
+
+
+/* msmpeg4.c */
+void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number);
+void msmpeg4_encode_ext_header(MpegEncContext * s);
+void msmpeg4_encode_mb(MpegEncContext * s,
+ DCTELEM block[6][64],
+ int motion_x, int motion_y);
+int msmpeg4_decode_picture_header(MpegEncContext * s);
+int msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size);
+int ff_msmpeg4_decode_init(AVCodecContext *avctx);
+void ff_msmpeg4_encode_init(MpegEncContext *s);
+int ff_wmv2_decode_picture_header(MpegEncContext * s);
+int ff_wmv2_decode_secondary_picture_header(MpegEncContext * s);
+void ff_wmv2_add_mb(MpegEncContext *s, DCTELEM block[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr);
+void ff_mspel_motion(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
+ int motion_x, int motion_y, int h);
+int ff_wmv2_encode_picture_header(MpegEncContext * s, int picture_number);
+void ff_wmv2_encode_mb(MpegEncContext * s,
+ DCTELEM block[6][64],
+ int motion_x, int motion_y);
+
+#endif /* AVCODEC_MPEGVIDEO_H */
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpegvideo_common.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpegvideo_common.h
new file mode 100644
index 000000000..6174a7219
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/mpegvideo_common.h
@@ -0,0 +1,901 @@
+/*
+ * The simplest mpeg encoder (well, it was the simplest!)
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/mpegvideo_common.h
+ * The simplest mpeg encoder (well, it was the simplest!).
+ */
+
+#ifndef AVCODEC_MPEGVIDEO_COMMON_H
+#define AVCODEC_MPEGVIDEO_COMMON_H
+
+#include <string.h>
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+#include "mjpegenc.h"
+#include "msmpeg4.h"
+#include "faandct.h"
+#include <limits.h>
+
+int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
+int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
+void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
+
+/**
+ * allocates a Picture
+ * The pixels are allocated/set by calling get_buffer() if shared=0
+ */
+int alloc_picture(MpegEncContext *s, Picture *pic, int shared);
+
+/**
+ * sets the given MpegEncContext to common defaults (same for encoding and decoding).
+ * the changed fields will not depend upon the prior state of the MpegEncContext.
+ */
+void MPV_common_defaults(MpegEncContext *s);
+
+static inline void gmc1_motion(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ uint8_t **ref_picture)
+{
+ uint8_t *ptr;
+ int offset, src_x, src_y, linesize, uvlinesize;
+ int motion_x, motion_y;
+ int emu=0;
+
+ motion_x= s->sprite_offset[0][0];
+ motion_y= s->sprite_offset[0][1];
+ src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
+ src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
+ motion_x<<=(3-s->sprite_warping_accuracy);
+ motion_y<<=(3-s->sprite_warping_accuracy);
+ src_x = av_clip(src_x, -16, s->width);
+ if (src_x == s->width)
+ motion_x =0;
+ src_y = av_clip(src_y, -16, s->height);
+ if (src_y == s->height)
+ motion_y =0;
+
+ linesize = s->linesize;
+ uvlinesize = s->uvlinesize;
+
+ ptr = ref_picture[0] + (src_y * linesize) + src_x;
+
+ if(s->flags&CODEC_FLAG_EMU_EDGE){
+ if( (unsigned)src_x >= s->h_edge_pos - 17
+ || (unsigned)src_y >= s->v_edge_pos - 17){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
+ ptr= s->edge_emu_buffer;
+ }
+ }
+
+ if((motion_x|motion_y)&7){
+ s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
+ s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
+ }else{
+ int dxy;
+
+ dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
+ if (s->no_rounding){
+ s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
+ }else{
+ s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
+ }
+ }
+
+ if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return;
+
+ motion_x= s->sprite_offset[1][0];
+ motion_y= s->sprite_offset[1][1];
+ src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
+ src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
+ motion_x<<=(3-s->sprite_warping_accuracy);
+ motion_y<<=(3-s->sprite_warping_accuracy);
+ src_x = av_clip(src_x, -8, s->width>>1);
+ if (src_x == s->width>>1)
+ motion_x =0;
+ src_y = av_clip(src_y, -8, s->height>>1);
+ if (src_y == s->height>>1)
+ motion_y =0;
+
+ offset = (src_y * uvlinesize) + src_x;
+ ptr = ref_picture[1] + offset;
+ if(s->flags&CODEC_FLAG_EMU_EDGE){
+ if( (unsigned)src_x >= (s->h_edge_pos>>1) - 9
+ || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ptr= s->edge_emu_buffer;
+ emu=1;
+ }
+ }
+ s->dsp.gmc1(dest_cb, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
+
+ ptr = ref_picture[2] + offset;
+ if(emu){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ptr= s->edge_emu_buffer;
+ }
+ s->dsp.gmc1(dest_cr, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
+
+ return;
+}
+
+static inline void gmc_motion(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ uint8_t **ref_picture)
+{
+ uint8_t *ptr;
+ int linesize, uvlinesize;
+ const int a= s->sprite_warping_accuracy;
+ int ox, oy;
+
+ linesize = s->linesize;
+ uvlinesize = s->uvlinesize;
+
+ ptr = ref_picture[0];
+
+ ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
+ oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
+
+ s->dsp.gmc(dest_y, ptr, linesize, 16,
+ ox,
+ oy,
+ s->sprite_delta[0][0], s->sprite_delta[0][1],
+ s->sprite_delta[1][0], s->sprite_delta[1][1],
+ a+1, (1<<(2*a+1)) - s->no_rounding,
+ s->h_edge_pos, s->v_edge_pos);
+ s->dsp.gmc(dest_y+8, ptr, linesize, 16,
+ ox + s->sprite_delta[0][0]*8,
+ oy + s->sprite_delta[1][0]*8,
+ s->sprite_delta[0][0], s->sprite_delta[0][1],
+ s->sprite_delta[1][0], s->sprite_delta[1][1],
+ a+1, (1<<(2*a+1)) - s->no_rounding,
+ s->h_edge_pos, s->v_edge_pos);
+
+ if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return;
+
+ ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
+ oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
+
+ ptr = ref_picture[1];
+ s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
+ ox,
+ oy,
+ s->sprite_delta[0][0], s->sprite_delta[0][1],
+ s->sprite_delta[1][0], s->sprite_delta[1][1],
+ a+1, (1<<(2*a+1)) - s->no_rounding,
+ s->h_edge_pos>>1, s->v_edge_pos>>1);
+
+ ptr = ref_picture[2];
+ s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
+ ox,
+ oy,
+ s->sprite_delta[0][0], s->sprite_delta[0][1],
+ s->sprite_delta[1][0], s->sprite_delta[1][1],
+ a+1, (1<<(2*a+1)) - s->no_rounding,
+ s->h_edge_pos>>1, s->v_edge_pos>>1);
+}
+
+static inline int hpel_motion(MpegEncContext *s,
+ uint8_t *dest, uint8_t *src,
+ int field_based, int field_select,
+ int src_x, int src_y,
+ int width, int height, int stride,
+ int h_edge_pos, int v_edge_pos,
+ int w, int h, op_pixels_func *pix_op,
+ int motion_x, int motion_y)
+{
+ int dxy;
+ int emu=0;
+
+ dxy = ((motion_y & 1) << 1) | (motion_x & 1);
+ src_x += motion_x >> 1;
+ src_y += motion_y >> 1;
+
+ /* WARNING: do no forget half pels */
+ src_x = av_clip(src_x, -16, width); //FIXME unneeded for emu?
+ if (src_x == width)
+ dxy &= ~1;
+ src_y = av_clip(src_y, -16, height);
+ if (src_y == height)
+ dxy &= ~2;
+ src += src_y * stride + src_x;
+
+ if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){
+ if( (unsigned)src_x > h_edge_pos - (motion_x&1) - w
+ || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
+ ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
+ src_x, src_y<<field_based, h_edge_pos, s->v_edge_pos);
+ src= s->edge_emu_buffer;
+ emu=1;
+ }
+ }
+ if(field_select)
+ src += s->linesize;
+ pix_op[dxy](dest, src, stride, h);
+ return emu;
+}
+
+static av_always_inline
+void mpeg_motion_internal(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ int field_based, int bottom_field, int field_select,
+ uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
+ int motion_x, int motion_y, int h, int is_mpeg12, int mb_y)
+{
+ uint8_t *ptr_y, *ptr_cb, *ptr_cr;
+ int dxy, uvdxy, mx, my, src_x, src_y,
+ uvsrc_x, uvsrc_y, v_edge_pos, uvlinesize, linesize;
+
+#if 0
+if(s->quarter_sample)
+{
+ motion_x>>=1;
+ motion_y>>=1;
+}
+#endif
+
+ v_edge_pos = s->v_edge_pos >> field_based;
+ linesize = s->current_picture.linesize[0] << field_based;
+ uvlinesize = s->current_picture.linesize[1] << field_based;
+
+ dxy = ((motion_y & 1) << 1) | (motion_x & 1);
+ src_x = s->mb_x* 16 + (motion_x >> 1);
+ src_y =( mb_y<<(4-field_based)) + (motion_y >> 1);
+
+ if (!is_mpeg12 && s->out_format == FMT_H263) {
+ if((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based){
+ mx = (motion_x>>1)|(motion_x&1);
+ my = motion_y >>1;
+ uvdxy = ((my & 1) << 1) | (mx & 1);
+ uvsrc_x = s->mb_x* 8 + (mx >> 1);
+ uvsrc_y =( mb_y<<(3-field_based))+ (my >> 1);
+ }else{
+ uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
+ uvsrc_x = src_x>>1;
+ uvsrc_y = src_y>>1;
+ }
+ }else if(!is_mpeg12 && s->out_format == FMT_H261){//even chroma mv's are full pel in H261
+ mx = motion_x / 4;
+ my = motion_y / 4;
+ uvdxy = 0;
+ uvsrc_x = s->mb_x*8 + mx;
+ uvsrc_y = mb_y*8 + my;
+ } else {
+ if(s->chroma_y_shift){
+ mx = motion_x / 2;
+ my = motion_y / 2;
+ uvdxy = ((my & 1) << 1) | (mx & 1);
+ uvsrc_x = s->mb_x* 8 + (mx >> 1);
+ uvsrc_y =( mb_y<<(3-field_based))+ (my >> 1);
+ } else {
+ if(s->chroma_x_shift){
+ //Chroma422
+ mx = motion_x / 2;
+ uvdxy = ((motion_y & 1) << 1) | (mx & 1);
+ uvsrc_x = s->mb_x* 8 + (mx >> 1);
+ uvsrc_y = src_y;
+ } else {
+ //Chroma444
+ uvdxy = dxy;
+ uvsrc_x = src_x;
+ uvsrc_y = src_y;
+ }
+ }
+ }
+
+ ptr_y = ref_picture[0] + src_y * linesize + src_x;
+ ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
+ ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
+
+ if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16
+ || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
+ if(is_mpeg12 || s->codec_id == CODEC_ID_MPEG2VIDEO ||
+ s->codec_id == CODEC_ID_MPEG1VIDEO){
+ av_log(s->avctx,AV_LOG_DEBUG,
+ "MPEG motion vector out of boundary (%d %d)\n", src_x, src_y);
+ return;
+ }
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize,
+ 17, 17+field_based,
+ src_x, src_y<<field_based,
+ s->h_edge_pos, s->v_edge_pos);
+ ptr_y = s->edge_emu_buffer;
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
+ ff_emulated_edge_mc(uvbuf ,
+ ptr_cb, s->uvlinesize,
+ 9, 9+field_based,
+ uvsrc_x, uvsrc_y<<field_based,
+ s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ff_emulated_edge_mc(uvbuf+16,
+ ptr_cr, s->uvlinesize,
+ 9, 9+field_based,
+ uvsrc_x, uvsrc_y<<field_based,
+ s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ptr_cb= uvbuf;
+ ptr_cr= uvbuf+16;
+ }
+ }
+
+ if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
+ dest_y += s->linesize;
+ dest_cb+= s->uvlinesize;
+ dest_cr+= s->uvlinesize;
+ }
+
+ if(field_select){
+ ptr_y += s->linesize;
+ ptr_cb+= s->uvlinesize;
+ ptr_cr+= s->uvlinesize;
+ }
+
+ pix_op[0][dxy](dest_y, ptr_y, linesize, h);
+
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ pix_op[s->chroma_x_shift][uvdxy]
+ (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
+ pix_op[s->chroma_x_shift][uvdxy]
+ (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
+ }
+ if(!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
+ s->out_format == FMT_H261){
+ ff_h261_loop_filter(s);
+ }
+}
+/* apply one mpeg motion vector to the three components */
+static av_always_inline
+void mpeg_motion(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ int field_based, int bottom_field, int field_select,
+ uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
+ int motion_x, int motion_y, int h, int mb_y)
+{
+#if !CONFIG_SMALL
+ if(s->out_format == FMT_MPEG1)
+ mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, field_based,
+ bottom_field, field_select, ref_picture, pix_op,
+ motion_x, motion_y, h, 1, mb_y);
+ else
+#endif
+ mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, field_based,
+ bottom_field, field_select, ref_picture, pix_op,
+ motion_x, motion_y, h, 0, mb_y);
+}
+
+//FIXME move to dsputil, avg variant, 16x16 version
+static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){
+ int x;
+ uint8_t * const top = src[1];
+ uint8_t * const left = src[2];
+ uint8_t * const mid = src[0];
+ uint8_t * const right = src[3];
+ uint8_t * const bottom= src[4];
+#define OBMC_FILTER(x, t, l, m, r, b)\
+ dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
+#define OBMC_FILTER4(x, t, l, m, r, b)\
+ OBMC_FILTER(x , t, l, m, r, b);\
+ OBMC_FILTER(x+1 , t, l, m, r, b);\
+ OBMC_FILTER(x +stride, t, l, m, r, b);\
+ OBMC_FILTER(x+1+stride, t, l, m, r, b);
+
+ x=0;
+ OBMC_FILTER (x , 2, 2, 4, 0, 0);
+ OBMC_FILTER (x+1, 2, 1, 5, 0, 0);
+ OBMC_FILTER4(x+2, 2, 1, 5, 0, 0);
+ OBMC_FILTER4(x+4, 2, 0, 5, 1, 0);
+ OBMC_FILTER (x+6, 2, 0, 5, 1, 0);
+ OBMC_FILTER (x+7, 2, 0, 4, 2, 0);
+ x+= stride;
+ OBMC_FILTER (x , 1, 2, 5, 0, 0);
+ OBMC_FILTER (x+1, 1, 2, 5, 0, 0);
+ OBMC_FILTER (x+6, 1, 0, 5, 2, 0);
+ OBMC_FILTER (x+7, 1, 0, 5, 2, 0);
+ x+= stride;
+ OBMC_FILTER4(x , 1, 2, 5, 0, 0);
+ OBMC_FILTER4(x+2, 1, 1, 6, 0, 0);
+ OBMC_FILTER4(x+4, 1, 0, 6, 1, 0);
+ OBMC_FILTER4(x+6, 1, 0, 5, 2, 0);
+ x+= 2*stride;
+ OBMC_FILTER4(x , 0, 2, 5, 0, 1);
+ OBMC_FILTER4(x+2, 0, 1, 6, 0, 1);
+ OBMC_FILTER4(x+4, 0, 0, 6, 1, 1);
+ OBMC_FILTER4(x+6, 0, 0, 5, 2, 1);
+ x+= 2*stride;
+ OBMC_FILTER (x , 0, 2, 5, 0, 1);
+ OBMC_FILTER (x+1, 0, 2, 5, 0, 1);
+ OBMC_FILTER4(x+2, 0, 1, 5, 0, 2);
+ OBMC_FILTER4(x+4, 0, 0, 5, 1, 2);
+ OBMC_FILTER (x+6, 0, 0, 5, 2, 1);
+ OBMC_FILTER (x+7, 0, 0, 5, 2, 1);
+ x+= stride;
+ OBMC_FILTER (x , 0, 2, 4, 0, 2);
+ OBMC_FILTER (x+1, 0, 1, 5, 0, 2);
+ OBMC_FILTER (x+6, 0, 0, 5, 1, 2);
+ OBMC_FILTER (x+7, 0, 0, 4, 2, 2);
+}
+
+/* obmc for 1 8x8 luma block */
+static inline void obmc_motion(MpegEncContext *s,
+ uint8_t *dest, uint8_t *src,
+ int src_x, int src_y,
+ op_pixels_func *pix_op,
+ int16_t mv[5][2]/* mid top left right bottom*/)
+#define MID 0
+{
+ int i;
+ uint8_t *ptr[5];
+
+ assert(s->quarter_sample==0);
+
+ for(i=0; i<5; i++){
+ if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){
+ ptr[i]= ptr[MID];
+ }else{
+ ptr[i]= s->obmc_scratchpad + 8*(i&1) + s->linesize*8*(i>>1);
+ hpel_motion(s, ptr[i], src, 0, 0,
+ src_x, src_y,
+ s->width, s->height, s->linesize,
+ s->h_edge_pos, s->v_edge_pos,
+ 8, 8, pix_op,
+ mv[i][0], mv[i][1]);
+ }
+ }
+
+ put_obmc(dest, ptr, s->linesize);
+}
+
+static inline void qpel_motion(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ int field_based, int bottom_field, int field_select,
+ uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
+ qpel_mc_func (*qpix_op)[16],
+ int motion_x, int motion_y, int h)
+{
+ uint8_t *ptr_y, *ptr_cb, *ptr_cr;
+ int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, linesize, uvlinesize;
+
+ dxy = ((motion_y & 3) << 2) | (motion_x & 3);
+ src_x = s->mb_x * 16 + (motion_x >> 2);
+ src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
+
+ v_edge_pos = s->v_edge_pos >> field_based;
+ linesize = s->linesize << field_based;
+ uvlinesize = s->uvlinesize << field_based;
+
+ if(field_based){
+ mx= motion_x/2;
+ my= motion_y>>1;
+ }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
+ static const int rtab[8]= {0,0,1,1,0,0,0,1};
+ mx= (motion_x>>1) + rtab[motion_x&7];
+ my= (motion_y>>1) + rtab[motion_y&7];
+ }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
+ mx= (motion_x>>1)|(motion_x&1);
+ my= (motion_y>>1)|(motion_y&1);
+ }else{
+ mx= motion_x/2;
+ my= motion_y/2;
+ }
+ mx= (mx>>1)|(mx&1);
+ my= (my>>1)|(my&1);
+
+ uvdxy= (mx&1) | ((my&1)<<1);
+ mx>>=1;
+ my>>=1;
+
+ uvsrc_x = s->mb_x * 8 + mx;
+ uvsrc_y = s->mb_y * (8 >> field_based) + my;
+
+ ptr_y = ref_picture[0] + src_y * linesize + src_x;
+ ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
+ ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
+
+ if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16
+ || (unsigned)src_y > v_edge_pos - (motion_y&3) - h ){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize,
+ 17, 17+field_based, src_x, src_y<<field_based,
+ s->h_edge_pos, s->v_edge_pos);
+ ptr_y= s->edge_emu_buffer;
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ uint8_t *uvbuf= s->edge_emu_buffer + 18*s->linesize;
+ ff_emulated_edge_mc(uvbuf, ptr_cb, s->uvlinesize,
+ 9, 9 + field_based,
+ uvsrc_x, uvsrc_y<<field_based,
+ s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ff_emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize,
+ 9, 9 + field_based,
+ uvsrc_x, uvsrc_y<<field_based,
+ s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ptr_cb= uvbuf;
+ ptr_cr= uvbuf + 16;
+ }
+ }
+
+ if(!field_based)
+ qpix_op[0][dxy](dest_y, ptr_y, linesize);
+ else{
+ if(bottom_field){
+ dest_y += s->linesize;
+ dest_cb+= s->uvlinesize;
+ dest_cr+= s->uvlinesize;
+ }
+
+ if(field_select){
+ ptr_y += s->linesize;
+ ptr_cb += s->uvlinesize;
+ ptr_cr += s->uvlinesize;
+ }
+ //damn interlaced mode
+ //FIXME boundary mirroring is not exactly correct here
+ qpix_op[1][dxy](dest_y , ptr_y , linesize);
+ qpix_op[1][dxy](dest_y+8, ptr_y+8, linesize);
+ }
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
+ pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
+ pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
+ }
+}
+
+/**
+ * h263 chroma 4mv motion compensation.
+ */
+static inline void chroma_4mv_motion(MpegEncContext *s,
+ uint8_t *dest_cb, uint8_t *dest_cr,
+ uint8_t **ref_picture,
+ op_pixels_func *pix_op,
+ int mx, int my){
+ int dxy, emu=0, src_x, src_y, offset;
+ uint8_t *ptr;
+
+ /* In case of 8X8, we construct a single chroma motion vector
+ with a special rounding */
+ mx= ff_h263_round_chroma(mx);
+ my= ff_h263_round_chroma(my);
+
+ dxy = ((my & 1) << 1) | (mx & 1);
+ mx >>= 1;
+ my >>= 1;
+
+ src_x = s->mb_x * 8 + mx;
+ src_y = s->mb_y * 8 + my;
+ src_x = av_clip(src_x, -8, s->width/2);
+ if (src_x == s->width/2)
+ dxy &= ~1;
+ src_y = av_clip(src_y, -8, s->height/2);
+ if (src_y == s->height/2)
+ dxy &= ~2;
+
+ offset = (src_y * (s->uvlinesize)) + src_x;
+ ptr = ref_picture[1] + offset;
+ if(s->flags&CODEC_FLAG_EMU_EDGE){
+ if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8
+ || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
+ 9, 9, src_x, src_y,
+ s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ptr= s->edge_emu_buffer;
+ emu=1;
+ }
+ }
+ pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
+
+ ptr = ref_picture[2] + offset;
+ if(emu){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
+ 9, 9, src_x, src_y,
+ s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ptr= s->edge_emu_buffer;
+ }
+ pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
+}
+
+static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir){
+ /* fetch pixels for estimated mv 4 macroblocks ahead
+ * optimized for 64byte cache lines */
+ const int shift = s->quarter_sample ? 2 : 1;
+ const int mx= (s->mv[dir][0][0]>>shift) + 16*s->mb_x + 8;
+ const int my= (s->mv[dir][0][1]>>shift) + 16*s->mb_y;
+ int off= mx + (my + (s->mb_x&3)*4)*s->linesize + 64;
+ s->dsp.prefetch(pix[0]+off, s->linesize, 4);
+ off= (mx>>1) + ((my>>1) + (s->mb_x&7))*s->uvlinesize + 64;
+ s->dsp.prefetch(pix[1]+off, pix[2]-pix[1], 2);
+}
+
+/**
+ * motion compensation of a single macroblock
+ * @param s context
+ * @param dest_y luma destination pointer
+ * @param dest_cb chroma cb/u destination pointer
+ * @param dest_cr chroma cr/v destination pointer
+ * @param dir direction (0->forward, 1->backward)
+ * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
+ * @param pic_op halfpel motion compensation function (average or put normally)
+ * @param pic_op qpel motion compensation function (average or put normally)
+ * the motion vectors are taken from s->mv and the MV type from s->mv_type
+ */
+static av_always_inline void MPV_motion_internal(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb,
+ uint8_t *dest_cr, int dir,
+ uint8_t **ref_picture,
+ op_pixels_func (*pix_op)[4],
+ qpel_mc_func (*qpix_op)[16], int is_mpeg12)
+{
+ int dxy, mx, my, src_x, src_y, motion_x, motion_y;
+ int mb_x, mb_y, i;
+ uint8_t *ptr, *dest;
+
+ mb_x = s->mb_x;
+ mb_y = s->mb_y;
+
+ prefetch_motion(s, ref_picture, dir);
+
+ if(!is_mpeg12 && s->obmc && s->pict_type != FF_B_TYPE){
+ int16_t mv_cache[4][4][2];
+ const int xy= s->mb_x + s->mb_y*s->mb_stride;
+ const int mot_stride= s->b8_stride;
+ const int mot_xy= mb_x*2 + mb_y*2*mot_stride;
+
+ assert(!s->mb_skipped);
+
+ memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4);
+ memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
+ memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
+
+ if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){
+ memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4);
+ }else{
+ memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4);
+ }
+
+ if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){
+ *(int32_t*)mv_cache[1][0]= *(int32_t*)mv_cache[1][1];
+ *(int32_t*)mv_cache[2][0]= *(int32_t*)mv_cache[2][1];
+ }else{
+ *(int32_t*)mv_cache[1][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1];
+ *(int32_t*)mv_cache[2][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1+mot_stride];
+ }
+
+ if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){
+ *(int32_t*)mv_cache[1][3]= *(int32_t*)mv_cache[1][2];
+ *(int32_t*)mv_cache[2][3]= *(int32_t*)mv_cache[2][2];
+ }else{
+ *(int32_t*)mv_cache[1][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2];
+ *(int32_t*)mv_cache[2][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2+mot_stride];
+ }
+
+ mx = 0;
+ my = 0;
+ for(i=0;i<4;i++) {
+ const int x= (i&1)+1;
+ const int y= (i>>1)+1;
+ int16_t mv[5][2]= {
+ {mv_cache[y][x ][0], mv_cache[y][x ][1]},
+ {mv_cache[y-1][x][0], mv_cache[y-1][x][1]},
+ {mv_cache[y][x-1][0], mv_cache[y][x-1][1]},
+ {mv_cache[y][x+1][0], mv_cache[y][x+1][1]},
+ {mv_cache[y+1][x][0], mv_cache[y+1][x][1]}};
+ //FIXME cleanup
+ obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
+ ref_picture[0],
+ mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
+ pix_op[1],
+ mv);
+
+ mx += mv[0][0];
+ my += mv[0][1];
+ }
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
+ chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
+
+ return;
+ }
+
+ switch(s->mv_type) {
+ case MV_TYPE_16X16:
+ if(s->mcsel){
+ if(s->real_sprite_warping_points==1){
+ gmc1_motion(s, dest_y, dest_cb, dest_cr,
+ ref_picture);
+ }else{
+ gmc_motion(s, dest_y, dest_cb, dest_cr,
+ ref_picture);
+ }
+ }else if(!is_mpeg12 && s->quarter_sample){
+ qpel_motion(s, dest_y, dest_cb, dest_cr,
+ 0, 0, 0,
+ ref_picture, pix_op, qpix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 16);
+ }else if(!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) && s->mspel){
+ ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 16);
+ }else
+ {
+ mpeg_motion(s, dest_y, dest_cb, dest_cr,
+ 0, 0, 0,
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y);
+ }
+ break;
+ case MV_TYPE_8X8:
+ if (!is_mpeg12) {
+ mx = 0;
+ my = 0;
+ if(s->quarter_sample){
+ for(i=0;i<4;i++) {
+ motion_x = s->mv[dir][i][0];
+ motion_y = s->mv[dir][i][1];
+
+ dxy = ((motion_y & 3) << 2) | (motion_x & 3);
+ src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
+ src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
+
+ /* WARNING: do no forget half pels */
+ src_x = av_clip(src_x, -16, s->width);
+ if (src_x == s->width)
+ dxy &= ~3;
+ src_y = av_clip(src_y, -16, s->height);
+ if (src_y == s->height)
+ dxy &= ~12;
+
+ ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
+ if(s->flags&CODEC_FLAG_EMU_EDGE){
+ if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 8
+ || (unsigned)src_y > s->v_edge_pos - (motion_y&3) - 8 ){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr,
+ s->linesize, 9, 9,
+ src_x, src_y,
+ s->h_edge_pos, s->v_edge_pos);
+ ptr= s->edge_emu_buffer;
+ }
+ }
+ dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
+ qpix_op[1][dxy](dest, ptr, s->linesize);
+
+ mx += s->mv[dir][i][0]/2;
+ my += s->mv[dir][i][1]/2;
+ }
+ }else{
+ for(i=0;i<4;i++) {
+ hpel_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
+ ref_picture[0], 0, 0,
+ mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
+ s->width, s->height, s->linesize,
+ s->h_edge_pos, s->v_edge_pos,
+ 8, 8, pix_op[1],
+ s->mv[dir][i][0], s->mv[dir][i][1]);
+
+ mx += s->mv[dir][i][0];
+ my += s->mv[dir][i][1];
+ }
+ }
+
+ if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
+ chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
+ }
+ break;
+ case MV_TYPE_FIELD:
+ if (s->picture_structure == PICT_FRAME) {
+ if(!is_mpeg12 && s->quarter_sample){
+ for(i=0; i<2; i++){
+ qpel_motion(s, dest_y, dest_cb, dest_cr,
+ 1, i, s->field_select[dir][i],
+ ref_picture, pix_op, qpix_op,
+ s->mv[dir][i][0], s->mv[dir][i][1], 8);
+ }
+ }else{
+ /* top field */
+ mpeg_motion(s, dest_y, dest_cb, dest_cr,
+ 1, 0, s->field_select[dir][0],
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
+ /* bottom field */
+ mpeg_motion(s, dest_y, dest_cb, dest_cr,
+ 1, 1, s->field_select[dir][1],
+ ref_picture, pix_op,
+ s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
+ }
+ } else {
+ if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
+ ref_picture= s->current_picture_ptr->data;
+ }
+
+ mpeg_motion(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->field_select[dir][0],
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y>>1);
+ }
+ break;
+ case MV_TYPE_16X8:
+ for(i=0; i<2; i++){
+ uint8_t ** ref2picture;
+
+ if(s->picture_structure == s->field_select[dir][i] + 1
+ || s->pict_type == FF_B_TYPE || s->first_field){
+ ref2picture= ref_picture;
+ }else{
+ ref2picture= s->current_picture_ptr->data;
+ }
+
+ mpeg_motion(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->field_select[dir][i],
+ ref2picture, pix_op,
+ s->mv[dir][i][0], s->mv[dir][i][1] + 16*i, 8, mb_y>>1);
+
+ dest_y += 16*s->linesize;
+ dest_cb+= (16>>s->chroma_y_shift)*s->uvlinesize;
+ dest_cr+= (16>>s->chroma_y_shift)*s->uvlinesize;
+ }
+ break;
+ case MV_TYPE_DMV:
+ if(s->picture_structure == PICT_FRAME){
+ for(i=0; i<2; i++){
+ int j;
+ for(j=0; j<2; j++){
+ mpeg_motion(s, dest_y, dest_cb, dest_cr,
+ 1, j, j^i,
+ ref_picture, pix_op,
+ s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], 8, mb_y);
+ }
+ pix_op = s->dsp.avg_pixels_tab;
+ }
+ }else{
+ for(i=0; i<2; i++){
+ mpeg_motion(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->picture_structure != i+1,
+ ref_picture, pix_op,
+ s->mv[dir][2*i][0],s->mv[dir][2*i][1],16, mb_y>>1);
+
+ // after put we make avg of the same block
+ pix_op=s->dsp.avg_pixels_tab;
+
+ //opposite parity is always in the same frame if this is second field
+ if(!s->first_field){
+ ref_picture = s->current_picture_ptr->data;
+ }
+ }
+ }
+ break;
+ default: assert(0);
+ }
+}
+
+static inline void MPV_motion(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb,
+ uint8_t *dest_cr, int dir,
+ uint8_t **ref_picture,
+ op_pixels_func (*pix_op)[4],
+ qpel_mc_func (*qpix_op)[16])
+{
+#if !CONFIG_SMALL
+ if(s->out_format == FMT_MPEG1)
+ MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
+ ref_picture, pix_op, qpix_op, 1);
+ else
+#endif
+ MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
+ ref_picture, pix_op, qpix_op, 0);
+}
+#endif /* AVCODEC_MPEGVIDEO_COMMON_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4.c
new file mode 100644
index 000000000..32c0a95a6
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4.c
@@ -0,0 +1,1990 @@
+/*
+ * MSMPEG4 backend for ffmpeg encoder and decoder
+ * Copyright (c) 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * msmpeg4v1 & v2 stuff by Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/msmpeg4.c
+ * MSMPEG4 backend for ffmpeg encoder and decoder.
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+#include "msmpeg4.h"
+#include "libavutil/x86_cpu.h"
+#include "h263.h"
+#include "mpeg4video.h"
+
+/*
+ * You can also call this codec : MPEG4 with a twist !
+ *
+ * TODO:
+ * - (encoding) select best mv table (two choices)
+ * - (encoding) select best vlc/dc table
+ */
+//#define DEBUG
+
+#define DC_VLC_BITS 9
+#define V2_INTRA_CBPC_VLC_BITS 3
+#define V2_MB_TYPE_VLC_BITS 7
+#define MV_VLC_BITS 9
+#define V2_MV_VLC_BITS 9
+#define TEX_VLC_BITS 9
+
+#define II_BITRATE 128*1024
+#define MBAC_BITRATE 50*1024
+
+#define DEFAULT_INTER_INDEX 3
+
+static uint32_t v2_dc_lum_table[512][2];
+static uint32_t v2_dc_chroma_table[512][2];
+
+/* vc1 externs */
+extern const uint8_t wmv3_dc_scale_table[32];
+
+#ifdef DEBUG
+int frame_count = 0;
+#endif
+
+#include "msmpeg4data.h"
+
+#if CONFIG_ENCODERS //strangely gcc includes this even if it is not referenced
+static uint8_t rl_length[NB_RL_TABLES][MAX_LEVEL+1][MAX_RUN+1][2];
+#endif //CONFIG_ENCODERS
+
+static uint8_t static_rl_table_store[NB_RL_TABLES][2][2*MAX_RUN + MAX_LEVEL + 3];
+
+/* This table is practically identical to the one from h263
+ * except that it is inverted. */
+static av_cold void init_h263_dc_for_msmpeg4(void)
+{
+ int level, uni_code, uni_len;
+
+ for(level=-256; level<256; level++){
+ int size, v, l;
+ /* find number of bits */
+ size = 0;
+ v = abs(level);
+ while (v) {
+ v >>= 1;
+ size++;
+ }
+
+ if (level < 0)
+ l= (-level) ^ ((1 << size) - 1);
+ else
+ l= level;
+
+ /* luminance h263 */
+ uni_code= ff_mpeg4_DCtab_lum[size][0];
+ uni_len = ff_mpeg4_DCtab_lum[size][1];
+ uni_code ^= (1<<uni_len)-1; //M$ does not like compatibility
+
+ if (size > 0) {
+ uni_code<<=size; uni_code|=l;
+ uni_len+=size;
+ if (size > 8){
+ uni_code<<=1; uni_code|=1;
+ uni_len++;
+ }
+ }
+ v2_dc_lum_table[level+256][0]= uni_code;
+ v2_dc_lum_table[level+256][1]= uni_len;
+
+ /* chrominance h263 */
+ uni_code= ff_mpeg4_DCtab_chrom[size][0];
+ uni_len = ff_mpeg4_DCtab_chrom[size][1];
+ uni_code ^= (1<<uni_len)-1; //M$ does not like compatibility
+
+ if (size > 0) {
+ uni_code<<=size; uni_code|=l;
+ uni_len+=size;
+ if (size > 8){
+ uni_code<<=1; uni_code|=1;
+ uni_len++;
+ }
+ }
+ v2_dc_chroma_table[level+256][0]= uni_code;
+ v2_dc_chroma_table[level+256][1]= uni_len;
+
+ }
+}
+
+static av_cold void common_init(MpegEncContext * s)
+{
+ static int initialized=0;
+
+ switch(s->msmpeg4_version){
+ case 1:
+ case 2:
+ s->y_dc_scale_table=
+ s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
+ break;
+ case 3:
+ if(s->workaround_bugs){
+ s->y_dc_scale_table= old_ff_y_dc_scale_table;
+ s->c_dc_scale_table= wmv1_c_dc_scale_table;
+ } else{
+ s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table;
+ s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table;
+ }
+ break;
+ case 4:
+ case 5:
+ s->y_dc_scale_table= wmv1_y_dc_scale_table;
+ s->c_dc_scale_table= wmv1_c_dc_scale_table;
+ break;
+#if CONFIG_VC1_DECODER
+ case 6:
+ s->y_dc_scale_table= wmv3_dc_scale_table;
+ s->c_dc_scale_table= wmv3_dc_scale_table;
+ break;
+#endif
+
+ }
+
+
+ if(s->msmpeg4_version>=4){
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , wmv1_scantable[1]);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, wmv1_scantable[2]);
+ ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, wmv1_scantable[3]);
+ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , wmv1_scantable[0]);
+ }
+ //Note the default tables are set in common_init in mpegvideo.c
+
+ if(!initialized){
+ initialized=1;
+
+ init_h263_dc_for_msmpeg4();
+ }
+}
+
+#if CONFIG_ENCODERS
+
+/* build the table which associate a (x,y) motion vector to a vlc */
+static void init_mv_table(MVTable *tab)
+{
+ int i, x, y;
+
+ tab->table_mv_index = av_malloc(sizeof(uint16_t) * 4096);
+ /* mark all entries as not used */
+ for(i=0;i<4096;i++)
+ tab->table_mv_index[i] = tab->n;
+
+ for(i=0;i<tab->n;i++) {
+ x = tab->table_mvx[i];
+ y = tab->table_mvy[i];
+ tab->table_mv_index[(x << 6) | y] = i;
+ }
+}
+
+void ff_msmpeg4_code012(PutBitContext *pb, int n)
+{
+ if (n == 0) {
+ put_bits(pb, 1, 0);
+ } else {
+ put_bits(pb, 1, 1);
+ put_bits(pb, 1, (n >= 2));
+ }
+}
+
+static int get_size_of_code(MpegEncContext * s, RLTable *rl, int last, int run, int level, int intra){
+ int size=0;
+ int code;
+ int run_diff= intra ? 0 : 1;
+
+ code = get_rl_index(rl, last, run, level);
+ size+= rl->table_vlc[code][1];
+ if (code == rl->n) {
+ int level1, run1;
+
+ level1 = level - rl->max_level[last][run];
+ if (level1 < 1)
+ goto esc2;
+ code = get_rl_index(rl, last, run, level1);
+ if (code == rl->n) {
+ esc2:
+ size++;
+ if (level > MAX_LEVEL)
+ goto esc3;
+ run1 = run - rl->max_run[last][level] - run_diff;
+ if (run1 < 0)
+ goto esc3;
+ code = get_rl_index(rl, last, run1, level);
+ if (code == rl->n) {
+ esc3:
+ /* third escape */
+ size+=1+1+6+8;
+ } else {
+ /* second escape */
+ size+= 1+1+ rl->table_vlc[code][1];
+ }
+ } else {
+ /* first escape */
+ size+= 1+1+ rl->table_vlc[code][1];
+ }
+ } else {
+ size++;
+ }
+ return size;
+}
+
+av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
+{
+ static int init_done=0;
+ int i;
+
+ common_init(s);
+ if(s->msmpeg4_version>=4){
+ s->min_qcoeff= -255;
+ s->max_qcoeff= 255;
+ }
+
+ if (!init_done) {
+ /* init various encoding tables */
+ init_done = 1;
+ init_mv_table(&mv_tables[0]);
+ init_mv_table(&mv_tables[1]);
+ for(i=0;i<NB_RL_TABLES;i++)
+ init_rl(&rl_table[i], static_rl_table_store[i]);
+
+ for(i=0; i<NB_RL_TABLES; i++){
+ int level;
+ for(level=0; level<=MAX_LEVEL; level++){
+ int run;
+ for(run=0; run<=MAX_RUN; run++){
+ int last;
+ for(last=0; last<2; last++){
+ rl_length[i][level][run][last]= get_size_of_code(s, &rl_table[ i], last, run, level, 0);
+ }
+ }
+ }
+ }
+ }
+}
+
+static void find_best_tables(MpegEncContext * s)
+{
+ int i;
+ int best =-1, best_size =9999999;
+ int chroma_best=-1, best_chroma_size=9999999;
+
+ for(i=0; i<3; i++){
+ int level;
+ int chroma_size=0;
+ int size=0;
+
+ if(i>0){// ;)
+ size++;
+ chroma_size++;
+ }
+ for(level=0; level<=MAX_LEVEL; level++){
+ int run;
+ for(run=0; run<=MAX_RUN; run++){
+ int last;
+ const int last_size= size + chroma_size;
+ for(last=0; last<2; last++){
+ int inter_count = s->ac_stats[0][0][level][run][last] + s->ac_stats[0][1][level][run][last];
+ int intra_luma_count = s->ac_stats[1][0][level][run][last];
+ int intra_chroma_count= s->ac_stats[1][1][level][run][last];
+
+ if(s->pict_type==FF_I_TYPE){
+ size += intra_luma_count *rl_length[i ][level][run][last];
+ chroma_size+= intra_chroma_count*rl_length[i+3][level][run][last];
+ }else{
+ size+= intra_luma_count *rl_length[i ][level][run][last]
+ +intra_chroma_count*rl_length[i+3][level][run][last]
+ +inter_count *rl_length[i+3][level][run][last];
+ }
+ }
+ if(last_size == size+chroma_size) break;
+ }
+ }
+ if(size<best_size){
+ best_size= size;
+ best= i;
+ }
+ if(chroma_size<best_chroma_size){
+ best_chroma_size= chroma_size;
+ chroma_best= i;
+ }
+ }
+
+// printf("type:%d, best:%d, qp:%d, var:%d, mcvar:%d, size:%d //\n",
+// s->pict_type, best, s->qscale, s->mb_var_sum, s->mc_mb_var_sum, best_size);
+
+ if(s->pict_type==FF_P_TYPE) chroma_best= best;
+
+ memset(s->ac_stats, 0, sizeof(int)*(MAX_LEVEL+1)*(MAX_RUN+1)*2*2*2);
+
+ s->rl_table_index = best;
+ s->rl_chroma_table_index= chroma_best;
+
+ if(s->pict_type != s->last_non_b_pict_type){
+ s->rl_table_index= 2;
+ if(s->pict_type==FF_I_TYPE)
+ s->rl_chroma_table_index= 1;
+ else
+ s->rl_chroma_table_index= 2;
+ }
+
+}
+
+/* write MSMPEG4 compatible frame header */
+void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
+{
+ find_best_tables(s);
+
+ align_put_bits(&s->pb);
+ put_bits(&s->pb, 2, s->pict_type - 1);
+
+ put_bits(&s->pb, 5, s->qscale);
+ if(s->msmpeg4_version<=2){
+ s->rl_table_index = 2;
+ s->rl_chroma_table_index = 2;
+ }
+
+ s->dc_table_index = 1;
+ s->mv_table_index = 1; /* only if P frame */
+ s->use_skip_mb_code = 1; /* only if P frame */
+ s->per_mb_rl_table = 0;
+ if(s->msmpeg4_version==4)
+ s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE && s->pict_type==FF_P_TYPE);
+//printf("%d %d %d %d %d\n", s->pict_type, s->bit_rate, s->inter_intra_pred, s->width, s->height);
+
+ if (s->pict_type == FF_I_TYPE) {
+ s->slice_height= s->mb_height/1;
+ put_bits(&s->pb, 5, 0x16 + s->mb_height/s->slice_height);
+
+ if(s->msmpeg4_version==4){
+ msmpeg4_encode_ext_header(s);
+ if(s->bit_rate>MBAC_BITRATE)
+ put_bits(&s->pb, 1, s->per_mb_rl_table);
+ }
+
+ if(s->msmpeg4_version>2){
+ if(!s->per_mb_rl_table){
+ ff_msmpeg4_code012(&s->pb, s->rl_chroma_table_index);
+ ff_msmpeg4_code012(&s->pb, s->rl_table_index);
+ }
+
+ put_bits(&s->pb, 1, s->dc_table_index);
+ }
+ } else {
+ put_bits(&s->pb, 1, s->use_skip_mb_code);
+
+ if(s->msmpeg4_version==4 && s->bit_rate>MBAC_BITRATE)
+ put_bits(&s->pb, 1, s->per_mb_rl_table);
+
+ if(s->msmpeg4_version>2){
+ if(!s->per_mb_rl_table)
+ ff_msmpeg4_code012(&s->pb, s->rl_table_index);
+
+ put_bits(&s->pb, 1, s->dc_table_index);
+
+ put_bits(&s->pb, 1, s->mv_table_index);
+ }
+ }
+
+ s->esc3_level_length= 0;
+ s->esc3_run_length= 0;
+}
+
+void msmpeg4_encode_ext_header(MpegEncContext * s)
+{
+ put_bits(&s->pb, 5, s->avctx->time_base.den / s->avctx->time_base.num); //yes 29.97 -> 29
+
+ put_bits(&s->pb, 11, FFMIN(s->bit_rate/1024, 2047));
+
+ if(s->msmpeg4_version>=3)
+ put_bits(&s->pb, 1, s->flipflop_rounding);
+ else
+ assert(s->flipflop_rounding==0);
+}
+
+#endif //CONFIG_ENCODERS
+
+/* predict coded block */
+int ff_msmpeg4_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
+{
+ int xy, wrap, pred, a, b, c;
+
+ xy = s->block_index[n];
+ wrap = s->b8_stride;
+
+ /* B C
+ * A X
+ */
+ a = s->coded_block[xy - 1 ];
+ b = s->coded_block[xy - 1 - wrap];
+ c = s->coded_block[xy - wrap];
+
+ if (b == c) {
+ pred = a;
+ } else {
+ pred = c;
+ }
+
+ /* store value */
+ *coded_block_ptr = &s->coded_block[xy];
+
+ return pred;
+}
+
+#if CONFIG_ENCODERS
+
+void ff_msmpeg4_encode_motion(MpegEncContext * s,
+ int mx, int my)
+{
+ int code;
+ MVTable *mv;
+
+ /* modulo encoding */
+ /* WARNING : you cannot reach all the MVs even with the modulo
+ encoding. This is a somewhat strange compromise they took !!! */
+ if (mx <= -64)
+ mx += 64;
+ else if (mx >= 64)
+ mx -= 64;
+ if (my <= -64)
+ my += 64;
+ else if (my >= 64)
+ my -= 64;
+
+ mx += 32;
+ my += 32;
+#if 0
+ if ((unsigned)mx >= 64 ||
+ (unsigned)my >= 64)
+ av_log(s->avctx, AV_LOG_ERROR, "error mx=%d my=%d\n", mx, my);
+#endif
+ mv = &mv_tables[s->mv_table_index];
+
+ code = mv->table_mv_index[(mx << 6) | my];
+ put_bits(&s->pb,
+ mv->table_mv_bits[code],
+ mv->table_mv_code[code]);
+ if (code == mv->n) {
+ /* escape : code literally */
+ put_bits(&s->pb, 6, mx);
+ put_bits(&s->pb, 6, my);
+ }
+}
+
+void ff_msmpeg4_handle_slices(MpegEncContext *s){
+ if (s->mb_x == 0) {
+ if (s->slice_height && (s->mb_y % s->slice_height) == 0) {
+ if(s->msmpeg4_version < 4){
+ ff_mpeg4_clean_buffers(s);
+ }
+ s->first_slice_line = 1;
+ } else {
+ s->first_slice_line = 0;
+ }
+ }
+}
+
+static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
+{
+ int range, bit_size, sign, code, bits;
+
+ if (val == 0) {
+ /* zero vector */
+ code = 0;
+ put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
+ } else {
+ bit_size = s->f_code - 1;
+ range = 1 << bit_size;
+ if (val <= -64)
+ val += 64;
+ else if (val >= 64)
+ val -= 64;
+
+ if (val >= 0) {
+ sign = 0;
+ } else {
+ val = -val;
+ sign = 1;
+ }
+ val--;
+ code = (val >> bit_size) + 1;
+ bits = val & (range - 1);
+
+ put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
+ if (bit_size > 0) {
+ put_bits(&s->pb, bit_size, bits);
+ }
+ }
+}
+
+void msmpeg4_encode_mb(MpegEncContext * s,
+ DCTELEM block[6][64],
+ int motion_x, int motion_y)
+{
+ int cbp, coded_cbp, i;
+ int pred_x, pred_y;
+ uint8_t *coded_block;
+
+ ff_msmpeg4_handle_slices(s);
+
+ if (!s->mb_intra) {
+ /* compute cbp */
+ cbp = 0;
+ for (i = 0; i < 6; i++) {
+ if (s->block_last_index[i] >= 0)
+ cbp |= 1 << (5 - i);
+ }
+ if (s->use_skip_mb_code && (cbp | motion_x | motion_y) == 0) {
+ /* skip macroblock */
+ put_bits(&s->pb, 1, 1);
+ s->last_bits++;
+ s->misc_bits++;
+ s->skip_count++;
+
+ return;
+ }
+ if (s->use_skip_mb_code)
+ put_bits(&s->pb, 1, 0); /* mb coded */
+
+ if(s->msmpeg4_version<=2){
+ put_bits(&s->pb,
+ v2_mb_type[cbp&3][1],
+ v2_mb_type[cbp&3][0]);
+ if((cbp&3) != 3) coded_cbp= cbp ^ 0x3C;
+ else coded_cbp= cbp;
+
+ put_bits(&s->pb,
+ ff_h263_cbpy_tab[coded_cbp>>2][1],
+ ff_h263_cbpy_tab[coded_cbp>>2][0]);
+
+ s->misc_bits += get_bits_diff(s);
+
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
+ msmpeg4v2_encode_motion(s, motion_x - pred_x);
+ msmpeg4v2_encode_motion(s, motion_y - pred_y);
+ }else{
+ put_bits(&s->pb,
+ table_mb_non_intra[cbp + 64][1],
+ table_mb_non_intra[cbp + 64][0]);
+
+ s->misc_bits += get_bits_diff(s);
+
+ /* motion vector */
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
+ ff_msmpeg4_encode_motion(s, motion_x - pred_x,
+ motion_y - pred_y);
+ }
+
+ s->mv_bits += get_bits_diff(s);
+
+ for (i = 0; i < 6; i++) {
+ ff_msmpeg4_encode_block(s, block[i], i);
+ }
+ s->p_tex_bits += get_bits_diff(s);
+ } else {
+ /* compute cbp */
+ cbp = 0;
+ coded_cbp = 0;
+ for (i = 0; i < 6; i++) {
+ int val, pred;
+ val = (s->block_last_index[i] >= 1);
+ cbp |= val << (5 - i);
+ if (i < 4) {
+ /* predict value for close blocks only for luma */
+ pred = ff_msmpeg4_coded_block_pred(s, i, &coded_block);
+ *coded_block = val;
+ val = val ^ pred;
+ }
+ coded_cbp |= val << (5 - i);
+ }
+#if 0
+ if (coded_cbp)
+ printf("cbp=%x %x\n", cbp, coded_cbp);
+#endif
+
+ if(s->msmpeg4_version<=2){
+ if (s->pict_type == FF_I_TYPE) {
+ put_bits(&s->pb,
+ v2_intra_cbpc[cbp&3][1], v2_intra_cbpc[cbp&3][0]);
+ } else {
+ if (s->use_skip_mb_code)
+ put_bits(&s->pb, 1, 0); /* mb coded */
+ put_bits(&s->pb,
+ v2_mb_type[(cbp&3) + 4][1],
+ v2_mb_type[(cbp&3) + 4][0]);
+ }
+ put_bits(&s->pb, 1, 0); /* no AC prediction yet */
+ put_bits(&s->pb,
+ ff_h263_cbpy_tab[cbp>>2][1],
+ ff_h263_cbpy_tab[cbp>>2][0]);
+ }else{
+ if (s->pict_type == FF_I_TYPE) {
+ put_bits(&s->pb,
+ ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][0]);
+ } else {
+ if (s->use_skip_mb_code)
+ put_bits(&s->pb, 1, 0); /* mb coded */
+ put_bits(&s->pb,
+ table_mb_non_intra[cbp][1],
+ table_mb_non_intra[cbp][0]);
+ }
+ put_bits(&s->pb, 1, 0); /* no AC prediction yet */
+ if(s->inter_intra_pred){
+ s->h263_aic_dir=0;
+ put_bits(&s->pb, table_inter_intra[s->h263_aic_dir][1], table_inter_intra[s->h263_aic_dir][0]);
+ }
+ }
+ s->misc_bits += get_bits_diff(s);
+
+ for (i = 0; i < 6; i++) {
+ ff_msmpeg4_encode_block(s, block[i], i);
+ }
+ s->i_tex_bits += get_bits_diff(s);
+ s->i_count++;
+ }
+}
+
+#endif //CONFIG_ENCODERS
+
+static inline int msmpeg4v1_pred_dc(MpegEncContext * s, int n,
+ int32_t **dc_val_ptr)
+{
+ int i;
+
+ if (n < 4) {
+ i= 0;
+ } else {
+ i= n-3;
+ }
+
+ *dc_val_ptr= &s->last_dc[i];
+ return s->last_dc[i];
+}
+
+static int get_dc(uint8_t *src, int stride, int scale)
+{
+ int y;
+ int sum=0;
+ for(y=0; y<8; y++){
+ int x;
+ for(x=0; x<8; x++){
+ sum+=src[x + y*stride];
+ }
+ }
+ return FASTDIV((sum + (scale>>1)), scale);
+}
+
+/* dir = 0: left, dir = 1: top prediction */
+static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
+ int16_t **dc_val_ptr, int *dir_ptr)
+{
+ int a, b, c, wrap, pred, scale;
+ int16_t *dc_val;
+
+ /* find prediction */
+ if (n < 4) {
+ scale = s->y_dc_scale;
+ } else {
+ scale = s->c_dc_scale;
+ }
+
+ wrap = s->block_wrap[n];
+ dc_val= s->dc_val[0] + s->block_index[n];
+
+ /* B C
+ * A X
+ */
+ a = dc_val[ - 1];
+ b = dc_val[ - 1 - wrap];
+ c = dc_val[ - wrap];
+
+ if(s->first_slice_line && (n&2)==0 && s->msmpeg4_version<4){
+ b=c=1024;
+ }
+
+ /* XXX: the following solution consumes divisions, but it does not
+ necessitate to modify mpegvideo.c. The problem comes from the
+ fact they decided to store the quantized DC (which would lead
+ to problems if Q could vary !) */
+#if ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE
+ __asm__ volatile(
+ "movl %3, %%eax \n\t"
+ "shrl $1, %%eax \n\t"
+ "addl %%eax, %2 \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %0, %%eax \n\t"
+ "mull %4 \n\t"
+ "movl %%edx, %0 \n\t"
+ "movl %1, %%eax \n\t"
+ "mull %4 \n\t"
+ "movl %%edx, %1 \n\t"
+ "movl %2, %%eax \n\t"
+ "mull %4 \n\t"
+ "movl %%edx, %2 \n\t"
+ : "+b" (a), "+c" (b), "+D" (c)
+ : "g" (scale), "S" (ff_inverse[scale])
+ : "%eax", "%edx"
+ );
+#else
+ /* #elif ARCH_ALPHA */
+ /* Divisions are extremely costly on Alpha; optimize the most
+ common case. But they are costly everywhere...
+ */
+ if (scale == 8) {
+ a = (a + (8 >> 1)) / 8;
+ b = (b + (8 >> 1)) / 8;
+ c = (c + (8 >> 1)) / 8;
+ } else {
+ a = FASTDIV((a + (scale >> 1)), scale);
+ b = FASTDIV((b + (scale >> 1)), scale);
+ c = FASTDIV((c + (scale >> 1)), scale);
+ }
+#endif
+ /* XXX: WARNING: they did not choose the same test as MPEG4. This
+ is very important ! */
+ if(s->msmpeg4_version>3){
+ if(s->inter_intra_pred){
+ uint8_t *dest;
+ int wrap;
+
+ if(n==1){
+ pred=a;
+ *dir_ptr = 0;
+ }else if(n==2){
+ pred=c;
+ *dir_ptr = 1;
+ }else if(n==3){
+ if (abs(a - b) < abs(b - c)) {
+ pred = c;
+ *dir_ptr = 1;
+ } else {
+ pred = a;
+ *dir_ptr = 0;
+ }
+ }else{
+ if(n<4){
+ wrap= s->linesize;
+ dest= s->current_picture.data[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8;
+ }else{
+ wrap= s->uvlinesize;
+ dest= s->current_picture.data[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
+ }
+ if(s->mb_x==0) a= (1024 + (scale>>1))/scale;
+ else a= get_dc(dest-8, wrap, scale*8);
+ if(s->mb_y==0) c= (1024 + (scale>>1))/scale;
+ else c= get_dc(dest-8*wrap, wrap, scale*8);
+
+ if (s->h263_aic_dir==0) {
+ pred= a;
+ *dir_ptr = 0;
+ }else if (s->h263_aic_dir==1) {
+ if(n==0){
+ pred= c;
+ *dir_ptr = 1;
+ }else{
+ pred= a;
+ *dir_ptr = 0;
+ }
+ }else if (s->h263_aic_dir==2) {
+ if(n==0){
+ pred= a;
+ *dir_ptr = 0;
+ }else{
+ pred= c;
+ *dir_ptr = 1;
+ }
+ } else {
+ pred= c;
+ *dir_ptr = 1;
+ }
+ }
+ }else{
+ if (abs(a - b) < abs(b - c)) {
+ pred = c;
+ *dir_ptr = 1;
+ } else {
+ pred = a;
+ *dir_ptr = 0;
+ }
+ }
+ }else{
+ if (abs(a - b) <= abs(b - c)) {
+ pred = c;
+ *dir_ptr = 1;
+ } else {
+ pred = a;
+ *dir_ptr = 0;
+ }
+ }
+
+ /* update predictor */
+ *dc_val_ptr = &dc_val[0];
+ return pred;
+}
+
+#define DC_MAX 119
+
+static void msmpeg4_encode_dc(MpegEncContext * s, int level, int n, int *dir_ptr)
+{
+ int sign, code;
+ int pred, extquant;
+ int extrabits = 0;
+
+ if(s->msmpeg4_version==1){
+ int32_t *dc_val;
+ pred = msmpeg4v1_pred_dc(s, n, &dc_val);
+
+ /* update predictor */
+ *dc_val= level;
+ }else{
+ int16_t *dc_val;
+ pred = msmpeg4_pred_dc(s, n, &dc_val, dir_ptr);
+
+ /* update predictor */
+ if (n < 4) {
+ *dc_val = level * s->y_dc_scale;
+ } else {
+ *dc_val = level * s->c_dc_scale;
+ }
+ }
+
+ /* do the prediction */
+ level -= pred;
+
+ if(s->msmpeg4_version<=2){
+ if (n < 4) {
+ put_bits(&s->pb,
+ v2_dc_lum_table[level+256][1],
+ v2_dc_lum_table[level+256][0]);
+ }else{
+ put_bits(&s->pb,
+ v2_dc_chroma_table[level+256][1],
+ v2_dc_chroma_table[level+256][0]);
+ }
+ }else{
+ sign = 0;
+ if (level < 0) {
+ level = -level;
+ sign = 1;
+ }
+ code = level;
+ if (code > DC_MAX)
+ code = DC_MAX;
+ else if( s->msmpeg4_version>=6 ) {
+ if( s->qscale == 1 ) {
+ extquant = (level + 3) & 0x3;
+ code = ((level+3)>>2);
+ } else if( s->qscale == 2 ) {
+ extquant = (level + 1) & 0x1;
+ code = ((level+1)>>1);
+ }
+ }
+
+ if (s->dc_table_index == 0) {
+ if (n < 4) {
+ put_bits(&s->pb, ff_table0_dc_lum[code][1], ff_table0_dc_lum[code][0]);
+ } else {
+ put_bits(&s->pb, ff_table0_dc_chroma[code][1], ff_table0_dc_chroma[code][0]);
+ }
+ } else {
+ if (n < 4) {
+ put_bits(&s->pb, ff_table1_dc_lum[code][1], ff_table1_dc_lum[code][0]);
+ } else {
+ put_bits(&s->pb, ff_table1_dc_chroma[code][1], ff_table1_dc_chroma[code][0]);
+ }
+ }
+
+ if(s->msmpeg4_version>=6 && s->qscale<=2)
+ extrabits = 3 - s->qscale;
+
+ if (code == DC_MAX)
+ put_bits(&s->pb, 8 + extrabits, level);
+ else if(extrabits > 0)//== VC1 && s->qscale<=2
+ put_bits(&s->pb, extrabits, extquant);
+
+ if (level != 0) {
+ put_bits(&s->pb, 1, sign);
+ }
+ }
+}
+
+/* Encoding of a block. Very similar to MPEG4 except for a different
+ escape coding (same as H263) and more vlc tables.
+ */
+void ff_msmpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n)
+{
+ int level, run, last, i, j, last_index;
+ int last_non_zero, sign, slevel;
+ int code, run_diff, dc_pred_dir;
+ const RLTable *rl;
+ const uint8_t *scantable;
+
+ if (s->mb_intra) {
+ msmpeg4_encode_dc(s, block[0], n, &dc_pred_dir);
+ i = 1;
+ if (n < 4) {
+ rl = &rl_table[s->rl_table_index];
+ } else {
+ rl = &rl_table[3 + s->rl_chroma_table_index];
+ }
+ run_diff = s->msmpeg4_version>=4;
+ scantable= s->intra_scantable.permutated;
+ } else {
+ i = 0;
+ rl = &rl_table[3 + s->rl_table_index];
+ if(s->msmpeg4_version<=2)
+ run_diff = 0;
+ else
+ run_diff = 1;
+ scantable= s->inter_scantable.permutated;
+ }
+
+ /* recalculate block_last_index for M$ wmv1 */
+ if(s->msmpeg4_version>=4 && s->msmpeg4_version<6 && s->block_last_index[n]>0){
+ for(last_index=63; last_index>=0; last_index--){
+ if(block[scantable[last_index]]) break;
+ }
+ s->block_last_index[n]= last_index;
+ }else
+ last_index = s->block_last_index[n];
+ /* AC coefs */
+ last_non_zero = i - 1;
+ for (; i <= last_index; i++) {
+ j = scantable[i];
+ level = block[j];
+ if (level) {
+ run = i - last_non_zero - 1;
+ last = (i == last_index);
+ sign = 0;
+ slevel = level;
+ if (level < 0) {
+ sign = 1;
+ level = -level;
+ }
+
+ if(level<=MAX_LEVEL && run<=MAX_RUN){
+ s->ac_stats[s->mb_intra][n>3][level][run][last]++;
+ }
+#if 0
+else
+ s->ac_stats[s->mb_intra][n>3][40][63][0]++; //esc3 like
+#endif
+ code = get_rl_index(rl, last, run, level);
+ put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
+ if (code == rl->n) {
+ int level1, run1;
+
+ level1 = level - rl->max_level[last][run];
+ if (level1 < 1)
+ goto esc2;
+ code = get_rl_index(rl, last, run, level1);
+ if (code == rl->n) {
+ esc2:
+ put_bits(&s->pb, 1, 0);
+ if (level > MAX_LEVEL)
+ goto esc3;
+ run1 = run - rl->max_run[last][level] - run_diff;
+ if (run1 < 0)
+ goto esc3;
+ code = get_rl_index(rl, last, run1+1, level);
+ if (s->msmpeg4_version == 4 && code == rl->n)
+ goto esc3;
+ code = get_rl_index(rl, last, run1, level);
+ if (code == rl->n) {
+ esc3:
+ /* third escape */
+ put_bits(&s->pb, 1, 0);
+ put_bits(&s->pb, 1, last);
+ if(s->msmpeg4_version>=4){
+ if(s->esc3_level_length==0){
+ s->esc3_level_length=8;
+ s->esc3_run_length= 6;
+ //ESCLVLSZ + ESCRUNSZ
+ if(s->qscale<8)
+ put_bits(&s->pb, 6 + (s->msmpeg4_version>=6), 3);
+ else
+ put_bits(&s->pb, 8, 3);
+ }
+ put_bits(&s->pb, s->esc3_run_length, run);
+ put_bits(&s->pb, 1, sign);
+ put_bits(&s->pb, s->esc3_level_length, level);
+ }else{
+ put_bits(&s->pb, 6, run);
+ put_sbits(&s->pb, 8, slevel);
+ }
+ } else {
+ /* second escape */
+ put_bits(&s->pb, 1, 1);
+ put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
+ put_bits(&s->pb, 1, sign);
+ }
+ } else {
+ /* first escape */
+ put_bits(&s->pb, 1, 1);
+ put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
+ put_bits(&s->pb, 1, sign);
+ }
+ } else {
+ put_bits(&s->pb, 1, sign);
+ }
+ last_non_zero = i;
+ }
+ }
+}
+
+/****************************************/
+/* decoding stuff */
+
+VLC ff_mb_non_intra_vlc[4];
+static VLC v2_dc_lum_vlc;
+static VLC v2_dc_chroma_vlc;
+static VLC v2_intra_cbpc_vlc;
+static VLC v2_mb_type_vlc;
+static VLC v2_mv_vlc;
+VLC ff_inter_intra_vlc;
+
+/* This is identical to h263 except that its range is multiplied by 2. */
+static int msmpeg4v2_decode_motion(MpegEncContext * s, int pred, int f_code)
+{
+ int code, val, sign, shift;
+
+ code = get_vlc2(&s->gb, v2_mv_vlc.table, V2_MV_VLC_BITS, 2);
+// printf("MV code %d at %d %d pred: %d\n", code, s->mb_x,s->mb_y, pred);
+ if (code < 0)
+ return 0xffff;
+
+ if (code == 0)
+ return pred;
+ sign = get_bits1(&s->gb);
+ shift = f_code - 1;
+ val = code;
+ if (shift) {
+ val = (val - 1) << shift;
+ val |= get_bits(&s->gb, shift);
+ val++;
+ }
+ if (sign)
+ val = -val;
+
+ val += pred;
+ if (val <= -64)
+ val += 64;
+ else if (val >= 64)
+ val -= 64;
+
+ return val;
+}
+
+static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
+{
+ int cbp, code, i;
+
+ if (s->pict_type == FF_P_TYPE) {
+ if (s->use_skip_mb_code) {
+ if (get_bits1(&s->gb)) {
+ /* skip mb */
+ s->mb_intra = 0;
+ for(i=0;i<6;i++)
+ s->block_last_index[i] = -1;
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ s->mv[0][0][0] = 0;
+ s->mv[0][0][1] = 0;
+ s->mb_skipped = 1;
+ return 0;
+ }
+ }
+
+ if(s->msmpeg4_version==2)
+ code = get_vlc2(&s->gb, v2_mb_type_vlc.table, V2_MB_TYPE_VLC_BITS, 1);
+ else
+ code = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
+ if(code<0 || code>7){
+ av_log(s->avctx, AV_LOG_ERROR, "cbpc %d invalid at %d %d\n", code, s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ s->mb_intra = code >>2;
+
+ cbp = code & 0x3;
+ } else {
+ s->mb_intra = 1;
+ if(s->msmpeg4_version==2)
+ cbp= get_vlc2(&s->gb, v2_intra_cbpc_vlc.table, V2_INTRA_CBPC_VLC_BITS, 1);
+ else
+ cbp= get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 1);
+ if(cbp<0 || cbp>3){
+ av_log(s->avctx, AV_LOG_ERROR, "cbpc %d invalid at %d %d\n", cbp, s->mb_x, s->mb_y);
+ return -1;
+ }
+ }
+
+ if (!s->mb_intra) {
+ int mx, my, cbpy;
+
+ cbpy= get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
+ if(cbpy<0){
+ av_log(s->avctx, AV_LOG_ERROR, "cbpy %d invalid at %d %d\n", cbp, s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ cbp|= cbpy<<2;
+ if(s->msmpeg4_version==1 || (cbp&3) != 3) cbp^= 0x3C;
+
+ h263_pred_motion(s, 0, 0, &mx, &my);
+ mx= msmpeg4v2_decode_motion(s, mx, 1);
+ my= msmpeg4v2_decode_motion(s, my, 1);
+
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ s->mv[0][0][0] = mx;
+ s->mv[0][0][1] = my;
+ } else {
+ if(s->msmpeg4_version==2){
+ s->ac_pred = get_bits1(&s->gb);
+ cbp|= get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1)<<2; //FIXME check errors
+ } else{
+ s->ac_pred = 0;
+ cbp|= get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1)<<2; //FIXME check errors
+ if(s->pict_type==FF_P_TYPE) cbp^=0x3C;
+ }
+ }
+
+ s->dsp.clear_blocks(s->block[0]);
+ for (i = 0; i < 6; i++) {
+ if (ff_msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0)
+ {
+ av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding block: %d x %d (%d)\n", s->mb_x, s->mb_y, i);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
+{
+ int cbp, code, i;
+ uint8_t *coded_val;
+ uint32_t * const mb_type_ptr= &s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ];
+
+ if (s->pict_type == FF_P_TYPE) {
+ if (s->use_skip_mb_code) {
+ if (get_bits1(&s->gb)) {
+ /* skip mb */
+ s->mb_intra = 0;
+ for(i=0;i<6;i++)
+ s->block_last_index[i] = -1;
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ s->mv[0][0][0] = 0;
+ s->mv[0][0][1] = 0;
+ s->mb_skipped = 1;
+ *mb_type_ptr = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
+
+ return 0;
+ }
+ }
+
+ code = get_vlc2(&s->gb, ff_mb_non_intra_vlc[DEFAULT_INTER_INDEX].table, MB_NON_INTRA_VLC_BITS, 3);
+ if (code < 0)
+ return -1;
+ //s->mb_intra = (code & 0x40) ? 0 : 1;
+ s->mb_intra = (~code & 0x40) >> 6;
+
+ cbp = code & 0x3f;
+ } else {
+ s->mb_intra = 1;
+ code = get_vlc2(&s->gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
+ if (code < 0)
+ return -1;
+ /* predict coded block pattern */
+ cbp = 0;
+ for(i=0;i<6;i++) {
+ int val = ((code >> (5 - i)) & 1);
+ if (i < 4) {
+ int pred = ff_msmpeg4_coded_block_pred(s, i, &coded_val);
+ val = val ^ pred;
+ *coded_val = val;
+ }
+ cbp |= val << (5 - i);
+ }
+ }
+
+ if (!s->mb_intra) {
+ int mx, my;
+//printf("P at %d %d\n", s->mb_x, s->mb_y);
+ if(s->per_mb_rl_table && cbp){
+ s->rl_table_index = decode012(&s->gb);
+ s->rl_chroma_table_index = s->rl_table_index;
+ }
+ h263_pred_motion(s, 0, 0, &mx, &my);
+ if (ff_msmpeg4_decode_motion(s, &mx, &my) < 0)
+ return -1;
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ s->mv[0][0][0] = mx;
+ s->mv[0][0][1] = my;
+ *mb_type_ptr = MB_TYPE_L0 | MB_TYPE_16x16;
+ } else {
+//printf("I at %d %d %d %06X\n", s->mb_x, s->mb_y, ((cbp&3)? 1 : 0) +((cbp&0x3C)? 2 : 0), show_bits(&s->gb, 24));
+ s->ac_pred = get_bits1(&s->gb);
+ *mb_type_ptr = MB_TYPE_INTRA;
+ if(s->inter_intra_pred){
+ s->h263_aic_dir= get_vlc2(&s->gb, ff_inter_intra_vlc.table, INTER_INTRA_VLC_BITS, 1);
+// printf("%d%d %d %d/", s->ac_pred, s->h263_aic_dir, s->mb_x, s->mb_y);
+ }
+ if(s->per_mb_rl_table && cbp){
+ s->rl_table_index = decode012(&s->gb);
+ s->rl_chroma_table_index = s->rl_table_index;
+ }
+ }
+
+ s->dsp.clear_blocks(s->block[0]);
+ for (i = 0; i < 6; i++) {
+ if (ff_msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0)
+ {
+ av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding block: %d x %d (%d)\n", s->mb_x, s->mb_y, i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/* init all vlc decoding tables */
+av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
+{
+ MpegEncContext *s = avctx->priv_data;
+ static int done = 0;
+ int i;
+ MVTable *mv;
+
+ ff_h263_decode_init(avctx);
+
+ common_init(s);
+
+ if (!done) {
+ done = 1;
+
+ for(i=0;i<NB_RL_TABLES;i++) {
+ init_rl(&rl_table[i], static_rl_table_store[i]);
+ }
+ INIT_VLC_RL(rl_table[0], 642);
+ INIT_VLC_RL(rl_table[1], 1104);
+ INIT_VLC_RL(rl_table[2], 554);
+ INIT_VLC_RL(rl_table[3], 940);
+ INIT_VLC_RL(rl_table[4], 962);
+ INIT_VLC_RL(rl_table[5], 554);
+
+ mv = &mv_tables[0];
+ INIT_VLC_STATIC(&mv->vlc, MV_VLC_BITS, mv->n + 1,
+ mv->table_mv_bits, 1, 1,
+ mv->table_mv_code, 2, 2, 3714);
+ mv = &mv_tables[1];
+ INIT_VLC_STATIC(&mv->vlc, MV_VLC_BITS, mv->n + 1,
+ mv->table_mv_bits, 1, 1,
+ mv->table_mv_code, 2, 2, 2694);
+
+ INIT_VLC_STATIC(&ff_msmp4_dc_luma_vlc[0], DC_VLC_BITS, 120,
+ &ff_table0_dc_lum[0][1], 8, 4,
+ &ff_table0_dc_lum[0][0], 8, 4, 1158);
+ INIT_VLC_STATIC(&ff_msmp4_dc_chroma_vlc[0], DC_VLC_BITS, 120,
+ &ff_table0_dc_chroma[0][1], 8, 4,
+ &ff_table0_dc_chroma[0][0], 8, 4, 1118);
+ INIT_VLC_STATIC(&ff_msmp4_dc_luma_vlc[1], DC_VLC_BITS, 120,
+ &ff_table1_dc_lum[0][1], 8, 4,
+ &ff_table1_dc_lum[0][0], 8, 4, 1476);
+ INIT_VLC_STATIC(&ff_msmp4_dc_chroma_vlc[1], DC_VLC_BITS, 120,
+ &ff_table1_dc_chroma[0][1], 8, 4,
+ &ff_table1_dc_chroma[0][0], 8, 4, 1216);
+
+ INIT_VLC_STATIC(&v2_dc_lum_vlc, DC_VLC_BITS, 512,
+ &v2_dc_lum_table[0][1], 8, 4,
+ &v2_dc_lum_table[0][0], 8, 4, 1472);
+ INIT_VLC_STATIC(&v2_dc_chroma_vlc, DC_VLC_BITS, 512,
+ &v2_dc_chroma_table[0][1], 8, 4,
+ &v2_dc_chroma_table[0][0], 8, 4, 1506);
+
+ INIT_VLC_STATIC(&v2_intra_cbpc_vlc, V2_INTRA_CBPC_VLC_BITS, 4,
+ &v2_intra_cbpc[0][1], 2, 1,
+ &v2_intra_cbpc[0][0], 2, 1, 8);
+ INIT_VLC_STATIC(&v2_mb_type_vlc, V2_MB_TYPE_VLC_BITS, 8,
+ &v2_mb_type[0][1], 2, 1,
+ &v2_mb_type[0][0], 2, 1, 128);
+ INIT_VLC_STATIC(&v2_mv_vlc, V2_MV_VLC_BITS, 33,
+ &mvtab[0][1], 2, 1,
+ &mvtab[0][0], 2, 1, 538);
+
+ INIT_VLC_STATIC(&ff_mb_non_intra_vlc[0], MB_NON_INTRA_VLC_BITS, 128,
+ &wmv2_inter_table[0][0][1], 8, 4,
+ &wmv2_inter_table[0][0][0], 8, 4, 1636);
+ INIT_VLC_STATIC(&ff_mb_non_intra_vlc[1], MB_NON_INTRA_VLC_BITS, 128,
+ &wmv2_inter_table[1][0][1], 8, 4,
+ &wmv2_inter_table[1][0][0], 8, 4, 2648);
+ INIT_VLC_STATIC(&ff_mb_non_intra_vlc[2], MB_NON_INTRA_VLC_BITS, 128,
+ &wmv2_inter_table[2][0][1], 8, 4,
+ &wmv2_inter_table[2][0][0], 8, 4, 1532);
+ INIT_VLC_STATIC(&ff_mb_non_intra_vlc[3], MB_NON_INTRA_VLC_BITS, 128,
+ &wmv2_inter_table[3][0][1], 8, 4,
+ &wmv2_inter_table[3][0][0], 8, 4, 2488);
+
+ INIT_VLC_STATIC(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
+ &ff_msmp4_mb_i_table[0][1], 4, 2,
+ &ff_msmp4_mb_i_table[0][0], 4, 2, 536);
+
+ INIT_VLC_STATIC(&ff_inter_intra_vlc, INTER_INTRA_VLC_BITS, 4,
+ &table_inter_intra[0][1], 2, 1,
+ &table_inter_intra[0][0], 2, 1, 8);
+ }
+
+ switch(s->msmpeg4_version){
+ case 1:
+ case 2:
+ s->decode_mb= msmpeg4v12_decode_mb;
+ break;
+ case 3:
+ case 4:
+ s->decode_mb= msmpeg4v34_decode_mb;
+ break;
+ case 5:
+ if (CONFIG_WMV2_DECODER)
+ s->decode_mb= ff_wmv2_decode_mb;
+ case 6:
+ //FIXME + TODO VC1 decode mb
+ break;
+ }
+
+ s->slice_height= s->mb_height; //to avoid 1/0 if the first frame is not a keyframe
+
+ return 0;
+}
+
+int msmpeg4_decode_picture_header(MpegEncContext * s)
+{
+ int code;
+
+#if 0
+{
+int i;
+for(i=0; i<s->gb.size_in_bits; i++)
+ av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb));
+// get_bits1(&s->gb);
+av_log(s->avctx, AV_LOG_DEBUG, "END\n");
+return -1;
+}
+#endif
+
+ if(s->msmpeg4_version==1){
+ int start_code;
+ start_code = (get_bits(&s->gb, 16)<<16) | get_bits(&s->gb, 16);
+ if(start_code!=0x00000100){
+ av_log(s->avctx, AV_LOG_ERROR, "invalid startcode\n");
+ return -1;
+ }
+
+ skip_bits(&s->gb, 5); // frame number */
+ }
+
+ s->pict_type = get_bits(&s->gb, 2) + 1;
+ if (s->pict_type != FF_I_TYPE &&
+ s->pict_type != FF_P_TYPE){
+ av_log(s->avctx, AV_LOG_ERROR, "invalid picture type\n");
+ return -1;
+ }
+#if 0
+{
+ static int had_i=0;
+ if(s->pict_type == FF_I_TYPE) had_i=1;
+ if(!had_i) return -1;
+}
+#endif
+ s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);
+ if(s->qscale==0){
+ av_log(s->avctx, AV_LOG_ERROR, "invalid qscale\n");
+ return -1;
+ }
+
+ if (s->pict_type == FF_I_TYPE) {
+ code = get_bits(&s->gb, 5);
+ if(s->msmpeg4_version==1){
+ if(code==0 || code>s->mb_height){
+ av_log(s->avctx, AV_LOG_ERROR, "invalid slice height %d\n", code);
+ return -1;
+ }
+
+ s->slice_height = code;
+ }else{
+ /* 0x17: one slice, 0x18: two slices, ... */
+ if (code < 0x17){
+ av_log(s->avctx, AV_LOG_ERROR, "error, slice code was %X\n", code);
+ return -1;
+ }
+
+ s->slice_height = s->mb_height / (code - 0x16);
+ }
+
+ switch(s->msmpeg4_version){
+ case 1:
+ case 2:
+ s->rl_chroma_table_index = 2;
+ s->rl_table_index = 2;
+
+ s->dc_table_index = 0; //not used
+ break;
+ case 3:
+ s->rl_chroma_table_index = decode012(&s->gb);
+ s->rl_table_index = decode012(&s->gb);
+
+ s->dc_table_index = get_bits1(&s->gb);
+ break;
+ case 4:
+ msmpeg4_decode_ext_header(s, (2+5+5+17+7)/8);
+
+ if(s->bit_rate > MBAC_BITRATE) s->per_mb_rl_table= get_bits1(&s->gb);
+ else s->per_mb_rl_table= 0;
+
+ if(!s->per_mb_rl_table){
+ s->rl_chroma_table_index = decode012(&s->gb);
+ s->rl_table_index = decode012(&s->gb);
+ }
+
+ s->dc_table_index = get_bits1(&s->gb);
+ s->inter_intra_pred= 0;
+ break;
+ }
+ s->no_rounding = 1;
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_DEBUG, "qscale:%d rlc:%d rl:%d dc:%d mbrl:%d slice:%d \n",
+ s->qscale,
+ s->rl_chroma_table_index,
+ s->rl_table_index,
+ s->dc_table_index,
+ s->per_mb_rl_table,
+ s->slice_height);
+ } else {
+ switch(s->msmpeg4_version){
+ case 1:
+ case 2:
+ if(s->msmpeg4_version==1)
+ s->use_skip_mb_code = 1;
+ else
+ s->use_skip_mb_code = get_bits1(&s->gb);
+ s->rl_table_index = 2;
+ s->rl_chroma_table_index = s->rl_table_index;
+ s->dc_table_index = 0; //not used
+ s->mv_table_index = 0;
+ break;
+ case 3:
+ s->use_skip_mb_code = get_bits1(&s->gb);
+ s->rl_table_index = decode012(&s->gb);
+ s->rl_chroma_table_index = s->rl_table_index;
+
+ s->dc_table_index = get_bits1(&s->gb);
+
+ s->mv_table_index = get_bits1(&s->gb);
+ break;
+ case 4:
+ s->use_skip_mb_code = get_bits1(&s->gb);
+
+ if(s->bit_rate > MBAC_BITRATE) s->per_mb_rl_table= get_bits1(&s->gb);
+ else s->per_mb_rl_table= 0;
+
+ if(!s->per_mb_rl_table){
+ s->rl_table_index = decode012(&s->gb);
+ s->rl_chroma_table_index = s->rl_table_index;
+ }
+
+ s->dc_table_index = get_bits1(&s->gb);
+
+ s->mv_table_index = get_bits1(&s->gb);
+ s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE);
+ break;
+ }
+
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_DEBUG, "skip:%d rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d \n",
+ s->use_skip_mb_code,
+ s->rl_table_index,
+ s->rl_chroma_table_index,
+ s->dc_table_index,
+ s->mv_table_index,
+ s->per_mb_rl_table,
+ s->qscale);
+
+ if(s->flipflop_rounding){
+ s->no_rounding ^= 1;
+ }else{
+ s->no_rounding = 0;
+ }
+ }
+//printf("%d %d %d %d %d\n", s->pict_type, s->bit_rate, s->inter_intra_pred, s->width, s->height);
+
+ s->esc3_level_length= 0;
+ s->esc3_run_length= 0;
+
+ return 0;
+}
+
+int msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size)
+{
+ int left= buf_size*8 - get_bits_count(&s->gb);
+ int length= s->msmpeg4_version>=3 ? 17 : 16;
+ /* the alt_bitstream reader could read over the end so we need to check it */
+ if(left>=length && left<length+8)
+ {
+ int fps;
+
+ fps= get_bits(&s->gb, 5);
+ s->bit_rate= get_bits(&s->gb, 11)*1024;
+ if(s->msmpeg4_version>=3)
+ s->flipflop_rounding= get_bits1(&s->gb);
+ else
+ s->flipflop_rounding= 0;
+
+// printf("fps:%2d bps:%2d roundingType:%1d\n", fps, s->bit_rate/1024, s->flipflop_rounding);
+ }
+ else if(left<length+8)
+ {
+ s->flipflop_rounding= 0;
+ if(s->msmpeg4_version != 2)
+ av_log(s->avctx, AV_LOG_ERROR, "ext header missing, %d left\n", left);
+ }
+ else
+ {
+ av_log(s->avctx, AV_LOG_ERROR, "I frame too long, ignoring ext header\n");
+ }
+
+ return 0;
+}
+
+static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr)
+{
+ int level, pred;
+
+ if(s->msmpeg4_version<=2){
+ if (n < 4) {
+ level = get_vlc2(&s->gb, v2_dc_lum_vlc.table, DC_VLC_BITS, 3);
+ } else {
+ level = get_vlc2(&s->gb, v2_dc_chroma_vlc.table, DC_VLC_BITS, 3);
+ }
+ if (level < 0)
+ return -1;
+ level-=256;
+ }else{ //FIXME optimize use unified tables & index
+ if (n < 4) {
+ level = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
+ } else {
+ level = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
+ }
+ if (level < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal dc vlc\n");
+ return -1;
+ }
+
+ if (level == DC_MAX) {
+ level = get_bits(&s->gb, 8);
+ if (get_bits1(&s->gb))
+ level = -level;
+ } else if (level != 0) {
+ if (get_bits1(&s->gb))
+ level = -level;
+ }
+ }
+
+ if(s->msmpeg4_version==1){
+ int32_t *dc_val;
+ pred = msmpeg4v1_pred_dc(s, n, &dc_val);
+ level += pred;
+
+ /* update predictor */
+ *dc_val= level;
+ }else{
+ int16_t *dc_val;
+ pred = msmpeg4_pred_dc(s, n, &dc_val, dir_ptr);
+ level += pred;
+
+ /* update predictor */
+ if (n < 4) {
+ *dc_val = level * s->y_dc_scale;
+ } else {
+ *dc_val = level * s->c_dc_scale;
+ }
+ }
+
+ return level;
+}
+
+//#define ERROR_DETAILS
+int ff_msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
+ int n, int coded, const uint8_t *scan_table)
+{
+ int level, i, last, run, run_diff;
+ int av_uninit(dc_pred_dir);
+ RLTable *rl;
+ RL_VLC_ELEM *rl_vlc;
+ int qmul, qadd;
+
+ if (s->mb_intra) {
+ qmul=1;
+ qadd=0;
+
+ /* DC coef */
+ level = msmpeg4_decode_dc(s, n, &dc_pred_dir);
+
+ if (level < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "dc overflow- block: %d qscale: %d//\n", n, s->qscale);
+ if(s->inter_intra_pred) level=0;
+ else return -1;
+ }
+ if (n < 4) {
+ rl = &rl_table[s->rl_table_index];
+ if(level > 256*s->y_dc_scale){
+ av_log(s->avctx, AV_LOG_ERROR, "dc overflow+ L qscale: %d//\n", s->qscale);
+ if(!s->inter_intra_pred) return -1;
+ }
+ } else {
+ rl = &rl_table[3 + s->rl_chroma_table_index];
+ if(level > 256*s->c_dc_scale){
+ av_log(s->avctx, AV_LOG_ERROR, "dc overflow+ C qscale: %d//\n", s->qscale);
+ if(!s->inter_intra_pred) return -1;
+ }
+ }
+ block[0] = level;
+
+ run_diff = s->msmpeg4_version >= 4;
+ i = 0;
+ if (!coded) {
+ goto not_coded;
+ }
+ if (s->ac_pred) {
+ if (dc_pred_dir == 0)
+ scan_table = s->intra_v_scantable.permutated; /* left */
+ else
+ scan_table = s->intra_h_scantable.permutated; /* top */
+ } else {
+ scan_table = s->intra_scantable.permutated;
+ }
+ rl_vlc= rl->rl_vlc[0];
+ } else {
+ qmul = s->qscale << 1;
+ qadd = (s->qscale - 1) | 1;
+ i = -1;
+ rl = &rl_table[3 + s->rl_table_index];
+
+ if(s->msmpeg4_version==2)
+ run_diff = 0;
+ else
+ run_diff = 1;
+
+ if (!coded) {
+ s->block_last_index[n] = i;
+ return 0;
+ }
+ if(!scan_table)
+ scan_table = s->inter_scantable.permutated;
+ rl_vlc= rl->rl_vlc[s->qscale];
+ }
+ {
+ OPEN_READER(re, &s->gb);
+ for(;;) {
+ UPDATE_CACHE(re, &s->gb);
+ GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 0);
+ if (level==0) {
+ int cache;
+ cache= GET_CACHE(re, &s->gb);
+ /* escape */
+ if (s->msmpeg4_version==1 || (cache&0x80000000)==0) {
+ if (s->msmpeg4_version==1 || (cache&0x40000000)==0) {
+ /* third escape */
+ if(s->msmpeg4_version!=1) LAST_SKIP_BITS(re, &s->gb, 2);
+ UPDATE_CACHE(re, &s->gb);
+ if(s->msmpeg4_version<=3){
+ last= SHOW_UBITS(re, &s->gb, 1); SKIP_CACHE(re, &s->gb, 1);
+ run= SHOW_UBITS(re, &s->gb, 6); SKIP_CACHE(re, &s->gb, 6);
+ level= SHOW_SBITS(re, &s->gb, 8); LAST_SKIP_CACHE(re, &s->gb, 8);
+ SKIP_COUNTER(re, &s->gb, 1+6+8);
+ }else{
+ int sign;
+ last= SHOW_UBITS(re, &s->gb, 1); SKIP_BITS(re, &s->gb, 1);
+ if(!s->esc3_level_length){
+ int ll;
+ //printf("ESC-3 %X at %d %d\n", show_bits(&s->gb, 24), s->mb_x, s->mb_y);
+ if(s->qscale<8){
+ ll= SHOW_UBITS(re, &s->gb, 3); SKIP_BITS(re, &s->gb, 3);
+ if(ll==0){
+ if(SHOW_UBITS(re, &s->gb, 1)) av_log(s->avctx, AV_LOG_ERROR, "cool a new vlc code ,contact the ffmpeg developers and upload the file\n");
+ SKIP_BITS(re, &s->gb, 1);
+ ll=8;
+ }
+ }else{
+ ll=2;
+ while(ll<8 && SHOW_UBITS(re, &s->gb, 1)==0){
+ ll++;
+ SKIP_BITS(re, &s->gb, 1);
+ }
+ if(ll<8) SKIP_BITS(re, &s->gb, 1);
+ }
+
+ s->esc3_level_length= ll;
+ s->esc3_run_length= SHOW_UBITS(re, &s->gb, 2) + 3; SKIP_BITS(re, &s->gb, 2);
+//printf("level length:%d, run length: %d\n", ll, s->esc3_run_length);
+ UPDATE_CACHE(re, &s->gb);
+ }
+ run= SHOW_UBITS(re, &s->gb, s->esc3_run_length);
+ SKIP_BITS(re, &s->gb, s->esc3_run_length);
+
+ sign= SHOW_UBITS(re, &s->gb, 1);
+ SKIP_BITS(re, &s->gb, 1);
+
+ level= SHOW_UBITS(re, &s->gb, s->esc3_level_length);
+ SKIP_BITS(re, &s->gb, s->esc3_level_length);
+ if(sign) level= -level;
+ }
+//printf("level: %d, run: %d at %d %d\n", level, run, s->mb_x, s->mb_y);
+#if 0 // waste of time / this will detect very few errors
+ {
+ const int abs_level= FFABS(level);
+ const int run1= run - rl->max_run[last][abs_level] - run_diff;
+ if(abs_level<=MAX_LEVEL && run<=MAX_RUN){
+ if(abs_level <= rl->max_level[last][run]){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, vlc encoding possible\n");
+ return DECODING_AC_LOST;
+ }
+ if(abs_level <= rl->max_level[last][run]*2){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 1 encoding possible\n");
+ return DECODING_AC_LOST;
+ }
+ if(run1>=0 && abs_level <= rl->max_level[last][run1]){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 2 encoding possible\n");
+ return DECODING_AC_LOST;
+ }
+ }
+ }
+#endif
+ //level = level * qmul + (level>0) * qadd - (level<=0) * qadd ;
+ if (level>0) level= level * qmul + qadd;
+ else level= level * qmul - qadd;
+#if 0 // waste of time too :(
+ if(level>2048 || level<-2048){
+ av_log(s->avctx, AV_LOG_ERROR, "|level| overflow in 3. esc\n");
+ return DECODING_AC_LOST;
+ }
+#endif
+ i+= run + 1;
+ if(last) i+=192;
+#ifdef ERROR_DETAILS
+ if(run==66)
+ av_log(s->avctx, AV_LOG_ERROR, "illegal vlc code in ESC3 level=%d\n", level);
+ else if((i>62 && i<192) || i>192+63)
+ av_log(s->avctx, AV_LOG_ERROR, "run overflow in ESC3 i=%d run=%d level=%d\n", i, run, level);
+#endif
+ } else {
+ /* second escape */
+#if MIN_CACHE_BITS < 23
+ LAST_SKIP_BITS(re, &s->gb, 2);
+ UPDATE_CACHE(re, &s->gb);
+#else
+ SKIP_BITS(re, &s->gb, 2);
+#endif
+ GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1);
+ i+= run + rl->max_run[run>>7][level/qmul] + run_diff; //FIXME opt indexing
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ LAST_SKIP_BITS(re, &s->gb, 1);
+#ifdef ERROR_DETAILS
+ if(run==66)
+ av_log(s->avctx, AV_LOG_ERROR, "illegal vlc code in ESC2 level=%d\n", level);
+ else if((i>62 && i<192) || i>192+63)
+ av_log(s->avctx, AV_LOG_ERROR, "run overflow in ESC2 i=%d run=%d level=%d\n", i, run, level);
+#endif
+ }
+ } else {
+ /* first escape */
+#if MIN_CACHE_BITS < 22
+ LAST_SKIP_BITS(re, &s->gb, 1);
+ UPDATE_CACHE(re, &s->gb);
+#else
+ SKIP_BITS(re, &s->gb, 1);
+#endif
+ GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1);
+ i+= run;
+ level = level + rl->max_level[run>>7][(run-1)&63] * qmul;//FIXME opt indexing
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ LAST_SKIP_BITS(re, &s->gb, 1);
+#ifdef ERROR_DETAILS
+ if(run==66)
+ av_log(s->avctx, AV_LOG_ERROR, "illegal vlc code in ESC1 level=%d\n", level);
+ else if((i>62 && i<192) || i>192+63)
+ av_log(s->avctx, AV_LOG_ERROR, "run overflow in ESC1 i=%d run=%d level=%d\n", i, run, level);
+#endif
+ }
+ } else {
+ i+= run;
+ level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
+ LAST_SKIP_BITS(re, &s->gb, 1);
+#ifdef ERROR_DETAILS
+ if(run==66)
+ av_log(s->avctx, AV_LOG_ERROR, "illegal vlc code level=%d\n", level);
+ else if((i>62 && i<192) || i>192+63)
+ av_log(s->avctx, AV_LOG_ERROR, "run overflow i=%d run=%d level=%d\n", i, run, level);
+#endif
+ }
+ if (i > 62){
+ i-= 192;
+ if(i&(~63)){
+ const int left= get_bits_left(&s->gb);
+ if(((i+192 == 64 && level/qmul==-1) || s->error_recognition<=1) && left>=0){
+ av_log(s->avctx, AV_LOG_ERROR, "ignoring overflow at %d %d\n", s->mb_x, s->mb_y);
+ break;
+ }else{
+ av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ }
+
+ block[scan_table[i]] = level;
+ break;
+ }
+
+ block[scan_table[i]] = level;
+ }
+ CLOSE_READER(re, &s->gb);
+ }
+ not_coded:
+ if (s->mb_intra) {
+ mpeg4_pred_ac(s, block, n, dc_pred_dir);
+ if (s->ac_pred) {
+ i = 63; /* XXX: not optimal */
+ }
+ }
+ if(s->msmpeg4_version>=4 && i>0) i=63; //FIXME/XXX optimize
+ s->block_last_index[n] = i;
+
+ return 0;
+}
+
+int ff_msmpeg4_decode_motion(MpegEncContext * s,
+ int *mx_ptr, int *my_ptr)
+{
+ MVTable *mv;
+ int code, mx, my;
+
+ mv = &mv_tables[s->mv_table_index];
+
+ code = get_vlc2(&s->gb, mv->vlc.table, MV_VLC_BITS, 2);
+ if (code < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "illegal MV code at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ if (code == mv->n) {
+//printf("MV ESC %X at %d %d\n", show_bits(&s->gb, 24), s->mb_x, s->mb_y);
+ mx = get_bits(&s->gb, 6);
+ my = get_bits(&s->gb, 6);
+ } else {
+ mx = mv->table_mvx[code];
+ my = mv->table_mvy[code];
+ }
+
+ mx += *mx_ptr - 32;
+ my += *my_ptr - 32;
+ /* WARNING : they do not do exactly modulo encoding */
+ if (mx <= -64)
+ mx += 64;
+ else if (mx >= 64)
+ mx -= 64;
+
+ if (my <= -64)
+ my += 64;
+ else if (my >= 64)
+ my -= 64;
+ *mx_ptr = mx;
+ *my_ptr = my;
+ return 0;
+}
+
+AVCodec msmpeg4v1_decoder = {
+ "msmpeg4v1",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_MSMPEG4V1,
+ sizeof(MpegEncContext),
+ ff_msmpeg4_decode_init,
+ NULL,
+ ff_h263_decode_end,
+ ff_h263_decode_frame,
+ CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"),
+};
+
+AVCodec msmpeg4v2_decoder = {
+ "msmpeg4v2",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_MSMPEG4V2,
+ sizeof(MpegEncContext),
+ ff_msmpeg4_decode_init,
+ NULL,
+ ff_h263_decode_end,
+ ff_h263_decode_frame,
+ CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
+};
+
+AVCodec msmpeg4v3_decoder = {
+ "msmpeg4",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_MSMPEG4V3,
+ sizeof(MpegEncContext),
+ ff_msmpeg4_decode_init,
+ NULL,
+ ff_h263_decode_end,
+ ff_h263_decode_frame,
+ CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
+};
+
+AVCodec wmv1_decoder = {
+ "wmv1",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_WMV1,
+ sizeof(MpegEncContext),
+ ff_msmpeg4_decode_init,
+ NULL,
+ ff_h263_decode_end,
+ ff_h263_decode_frame,
+ CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4.h
new file mode 100644
index 000000000..259be2f37
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4.h
@@ -0,0 +1,62 @@
+/*
+ * MSMPEG4 backend for ffmpeg encoder and decoder
+ * copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/msmpeg4.h
+ */
+
+#ifndef AVCODEC_MSMPEG4_H
+#define AVCODEC_MSMPEG4_H
+
+#include "config.h"
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+
+#define INTER_INTRA_VLC_BITS 3
+#define MB_NON_INTRA_VLC_BITS 9
+#define MB_INTRA_VLC_BITS 9
+
+extern VLC ff_mb_non_intra_vlc[4];
+extern VLC ff_inter_intra_vlc;
+
+void ff_msmpeg4_code012(PutBitContext *pb, int n);
+void ff_msmpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n);
+void ff_msmpeg4_handle_slices(MpegEncContext *s);
+void ff_msmpeg4_encode_motion(MpegEncContext * s, int mx, int my);
+int ff_msmpeg4_coded_block_pred(MpegEncContext * s, int n,
+ uint8_t **coded_block_ptr);
+int ff_msmpeg4_decode_motion(MpegEncContext * s, int *mx_ptr, int *my_ptr);
+int ff_msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
+ int n, int coded, const uint8_t *scan_table);
+int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64]);
+
+#define CONFIG_MSMPEG4_DECODER (CONFIG_MSMPEG4V1_DECODER || \
+ CONFIG_MSMPEG4V2_DECODER || \
+ CONFIG_MSMPEG4V3_DECODER || \
+ CONFIG_WMV2_DECODER || \
+ CONFIG_VC1_DECODER)
+#define CONFIG_MSMPEG4_ENCODER (CONFIG_MSMPEG4V1_ENCODER || \
+ CONFIG_MSMPEG4V2_ENCODER || \
+ CONFIG_MSMPEG4V3_ENCODER || \
+ CONFIG_WMV2_ENCODER)
+
+#endif /* AVCODEC_MSMPEG4_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4data.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4data.c
new file mode 100644
index 000000000..cfef62b12
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4data.c
@@ -0,0 +1,2001 @@
+/*
+ * MSMPEG4 backend for ffmpeg encoder and decoder
+ * copyright (c) 2001 Fabrice Bellard
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * msmpeg4v1 & v2 stuff by Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/msmpeg4data.c
+ * MSMPEG4 data tables.
+ */
+
+#include "msmpeg4data.h"
+
+VLC ff_msmp4_mb_i_vlc;
+VLC ff_msmp4_dc_luma_vlc[2];
+VLC ff_msmp4_dc_chroma_vlc[2];
+
+/* intra picture macroblock coded block pattern */
+const uint16_t ff_msmp4_mb_i_table[64][2] = {
+{ 0x1, 1 },{ 0x17, 6 },{ 0x9, 5 },{ 0x5, 5 },
+{ 0x6, 5 },{ 0x47, 9 },{ 0x20, 7 },{ 0x10, 7 },
+{ 0x2, 5 },{ 0x7c, 9 },{ 0x3a, 7 },{ 0x1d, 7 },
+{ 0x2, 6 },{ 0xec, 9 },{ 0x77, 8 },{ 0x0, 8 },
+{ 0x3, 5 },{ 0xb7, 9 },{ 0x2c, 7 },{ 0x13, 7 },
+{ 0x1, 6 },{ 0x168, 10 },{ 0x46, 8 },{ 0x3f, 8 },
+{ 0x1e, 6 },{ 0x712, 13 },{ 0xb5, 9 },{ 0x42, 8 },
+{ 0x22, 7 },{ 0x1c5, 11 },{ 0x11e, 10 },{ 0x87, 9 },
+{ 0x6, 4 },{ 0x3, 9 },{ 0x1e, 7 },{ 0x1c, 6 },
+{ 0x12, 7 },{ 0x388, 12 },{ 0x44, 9 },{ 0x70, 9 },
+{ 0x1f, 6 },{ 0x23e, 11 },{ 0x39, 8 },{ 0x8e, 9 },
+{ 0x1, 7 },{ 0x1c6, 11 },{ 0xb6, 9 },{ 0x45, 9 },
+{ 0x14, 6 },{ 0x23f, 11 },{ 0x7d, 9 },{ 0x18, 9 },
+{ 0x7, 7 },{ 0x1c7, 11 },{ 0x86, 9 },{ 0x19, 9 },
+{ 0x15, 6 },{ 0x1db, 10 },{ 0x2, 9 },{ 0x46, 9 },
+{ 0xd, 8 },{ 0x713, 13 },{ 0x1da, 10 },{ 0x169, 10 },
+};
+
+/* non intra picture macroblock coded block pattern + mb type */
+const uint32_t table_mb_non_intra[128][2] = {
+{ 0x40, 7 },{ 0x13c9, 13 },{ 0x9fd, 12 },{ 0x1fc, 15 },
+{ 0x9fc, 12 },{ 0xa83, 18 },{ 0x12d34, 17 },{ 0x83bc, 16 },
+{ 0x83a, 12 },{ 0x7f8, 17 },{ 0x3fd, 16 },{ 0x3ff, 16 },
+{ 0x79, 13 },{ 0xa82, 18 },{ 0x969d, 16 },{ 0x2a4, 16 },
+{ 0x978, 12 },{ 0x543, 17 },{ 0x41df, 15 },{ 0x7f9, 17 },
+{ 0x12f3, 13 },{ 0x25a6b, 18 },{ 0x25ef9, 18 },{ 0x3fa, 16 },
+{ 0x20ee, 14 },{ 0x969ab, 20 },{ 0x969c, 16 },{ 0x25ef8, 18 },
+{ 0x12d2, 13 },{ 0xa85, 18 },{ 0x969e, 16 },{ 0x4bc8, 15 },
+{ 0x3d, 12 },{ 0x12f7f, 17 },{ 0x2a2, 16 },{ 0x969f, 16 },
+{ 0x25ee, 14 },{ 0x12d355, 21 },{ 0x12f7d, 17 },{ 0x12f7e, 17 },
+{ 0x9e5, 12 },{ 0xa81, 18 },{ 0x4b4d4, 19 },{ 0x83bd, 16 },
+{ 0x78, 13 },{ 0x969b, 16 },{ 0x3fe, 16 },{ 0x2a5, 16 },
+{ 0x7e, 13 },{ 0xa80, 18 },{ 0x2a3, 16 },{ 0x3fb, 16 },
+{ 0x1076, 13 },{ 0xa84, 18 },{ 0x153, 15 },{ 0x4bc9, 15 },
+{ 0x55, 13 },{ 0x12d354, 21 },{ 0x4bde, 15 },{ 0x25e5, 14 },
+{ 0x25b, 10 },{ 0x4b4c, 15 },{ 0x96b, 12 },{ 0x96a, 12 },
+{ 0x1, 2 },{ 0x0, 7 },{ 0x26, 6 },{ 0x12b, 9 },
+{ 0x7, 3 },{ 0x20f, 10 },{ 0x4, 9 },{ 0x28, 12 },
+{ 0x6, 3 },{ 0x20a, 10 },{ 0x128, 9 },{ 0x2b, 12 },
+{ 0x11, 5 },{ 0x1b, 11 },{ 0x13a, 9 },{ 0x4ff, 11 },
+{ 0x3, 4 },{ 0x277, 10 },{ 0x106, 9 },{ 0x839, 12 },
+{ 0xb, 4 },{ 0x27b, 10 },{ 0x12c, 9 },{ 0x4bf, 11 },
+{ 0x9, 6 },{ 0x35, 12 },{ 0x27e, 10 },{ 0x13c8, 13 },
+{ 0x1, 6 },{ 0x4aa, 11 },{ 0x208, 10 },{ 0x29, 12 },
+{ 0x1, 4 },{ 0x254, 10 },{ 0x12e, 9 },{ 0x838, 12 },
+{ 0x24, 6 },{ 0x4f3, 11 },{ 0x276, 10 },{ 0x12f6, 13 },
+{ 0x1, 5 },{ 0x27a, 10 },{ 0x13e, 9 },{ 0x3e, 12 },
+{ 0x8, 6 },{ 0x413, 11 },{ 0xc, 10 },{ 0x4be, 11 },
+{ 0x14, 5 },{ 0x412, 11 },{ 0x253, 10 },{ 0x97a, 12 },
+{ 0x21, 6 },{ 0x4ab, 11 },{ 0x20b, 10 },{ 0x34, 12 },
+{ 0x15, 5 },{ 0x278, 10 },{ 0x252, 10 },{ 0x968, 12 },
+{ 0x5, 5 },{ 0xb, 10 },{ 0x9c, 8 },{ 0xe, 10 },
+};
+
+/* dc table 0 */
+
+const uint32_t ff_table0_dc_lum[120][2] = {
+{ 0x1, 1 },{ 0x1, 2 },{ 0x1, 4 },{ 0x1, 5 },
+{ 0x5, 5 },{ 0x7, 5 },{ 0x8, 6 },{ 0xc, 6 },
+{ 0x0, 7 },{ 0x2, 7 },{ 0x12, 7 },{ 0x1a, 7 },
+{ 0x3, 8 },{ 0x7, 8 },{ 0x27, 8 },{ 0x37, 8 },
+{ 0x5, 9 },{ 0x4c, 9 },{ 0x6c, 9 },{ 0x6d, 9 },
+{ 0x8, 10 },{ 0x19, 10 },{ 0x9b, 10 },{ 0x1b, 10 },
+{ 0x9a, 10 },{ 0x13, 11 },{ 0x34, 11 },{ 0x35, 11 },
+{ 0x61, 12 },{ 0x48, 13 },{ 0xc4, 13 },{ 0x4a, 13 },
+{ 0xc6, 13 },{ 0xc7, 13 },{ 0x92, 14 },{ 0x18b, 14 },
+{ 0x93, 14 },{ 0x183, 14 },{ 0x182, 14 },{ 0x96, 14 },
+{ 0x97, 14 },{ 0x180, 14 },{ 0x314, 15 },{ 0x315, 15 },
+{ 0x605, 16 },{ 0x604, 16 },{ 0x606, 16 },{ 0xc0e, 17 },
+{ 0x303cd, 23 },{ 0x303c9, 23 },{ 0x303c8, 23 },{ 0x303ca, 23 },
+{ 0x303cb, 23 },{ 0x303cc, 23 },{ 0x303ce, 23 },{ 0x303cf, 23 },
+{ 0x303d0, 23 },{ 0x303d1, 23 },{ 0x303d2, 23 },{ 0x303d3, 23 },
+{ 0x303d4, 23 },{ 0x303d5, 23 },{ 0x303d6, 23 },{ 0x303d7, 23 },
+{ 0x303d8, 23 },{ 0x303d9, 23 },{ 0x303da, 23 },{ 0x303db, 23 },
+{ 0x303dc, 23 },{ 0x303dd, 23 },{ 0x303de, 23 },{ 0x303df, 23 },
+{ 0x303e0, 23 },{ 0x303e1, 23 },{ 0x303e2, 23 },{ 0x303e3, 23 },
+{ 0x303e4, 23 },{ 0x303e5, 23 },{ 0x303e6, 23 },{ 0x303e7, 23 },
+{ 0x303e8, 23 },{ 0x303e9, 23 },{ 0x303ea, 23 },{ 0x303eb, 23 },
+{ 0x303ec, 23 },{ 0x303ed, 23 },{ 0x303ee, 23 },{ 0x303ef, 23 },
+{ 0x303f0, 23 },{ 0x303f1, 23 },{ 0x303f2, 23 },{ 0x303f3, 23 },
+{ 0x303f4, 23 },{ 0x303f5, 23 },{ 0x303f6, 23 },{ 0x303f7, 23 },
+{ 0x303f8, 23 },{ 0x303f9, 23 },{ 0x303fa, 23 },{ 0x303fb, 23 },
+{ 0x303fc, 23 },{ 0x303fd, 23 },{ 0x303fe, 23 },{ 0x303ff, 23 },
+{ 0x60780, 24 },{ 0x60781, 24 },{ 0x60782, 24 },{ 0x60783, 24 },
+{ 0x60784, 24 },{ 0x60785, 24 },{ 0x60786, 24 },{ 0x60787, 24 },
+{ 0x60788, 24 },{ 0x60789, 24 },{ 0x6078a, 24 },{ 0x6078b, 24 },
+{ 0x6078c, 24 },{ 0x6078d, 24 },{ 0x6078e, 24 },{ 0x6078f, 24 },
+};
+
+const uint32_t ff_table0_dc_chroma[120][2] = {
+{ 0x0, 2 },{ 0x1, 2 },{ 0x5, 3 },{ 0x9, 4 },
+{ 0xd, 4 },{ 0x11, 5 },{ 0x1d, 5 },{ 0x1f, 5 },
+{ 0x21, 6 },{ 0x31, 6 },{ 0x38, 6 },{ 0x33, 6 },
+{ 0x39, 6 },{ 0x3d, 6 },{ 0x61, 7 },{ 0x79, 7 },
+{ 0x80, 8 },{ 0xc8, 8 },{ 0xca, 8 },{ 0xf0, 8 },
+{ 0x81, 8 },{ 0xc0, 8 },{ 0xc9, 8 },{ 0x107, 9 },
+{ 0x106, 9 },{ 0x196, 9 },{ 0x183, 9 },{ 0x1e3, 9 },
+{ 0x1e2, 9 },{ 0x20a, 10 },{ 0x20b, 10 },{ 0x609, 11 },
+{ 0x412, 11 },{ 0x413, 11 },{ 0x60b, 11 },{ 0x411, 11 },
+{ 0x60a, 11 },{ 0x65f, 11 },{ 0x410, 11 },{ 0x65d, 11 },
+{ 0x65e, 11 },{ 0xcb8, 12 },{ 0xc10, 12 },{ 0xcb9, 12 },
+{ 0x1823, 13 },{ 0x3045, 14 },{ 0x6089, 15 },{ 0xc110, 16 },
+{ 0x304448, 22 },{ 0x304449, 22 },{ 0x30444a, 22 },{ 0x30444b, 22 },
+{ 0x30444c, 22 },{ 0x30444d, 22 },{ 0x30444e, 22 },{ 0x30444f, 22 },
+{ 0x304450, 22 },{ 0x304451, 22 },{ 0x304452, 22 },{ 0x304453, 22 },
+{ 0x304454, 22 },{ 0x304455, 22 },{ 0x304456, 22 },{ 0x304457, 22 },
+{ 0x304458, 22 },{ 0x304459, 22 },{ 0x30445a, 22 },{ 0x30445b, 22 },
+{ 0x30445c, 22 },{ 0x30445d, 22 },{ 0x30445e, 22 },{ 0x30445f, 22 },
+{ 0x304460, 22 },{ 0x304461, 22 },{ 0x304462, 22 },{ 0x304463, 22 },
+{ 0x304464, 22 },{ 0x304465, 22 },{ 0x304466, 22 },{ 0x304467, 22 },
+{ 0x304468, 22 },{ 0x304469, 22 },{ 0x30446a, 22 },{ 0x30446b, 22 },
+{ 0x30446c, 22 },{ 0x30446d, 22 },{ 0x30446e, 22 },{ 0x30446f, 22 },
+{ 0x304470, 22 },{ 0x304471, 22 },{ 0x304472, 22 },{ 0x304473, 22 },
+{ 0x304474, 22 },{ 0x304475, 22 },{ 0x304476, 22 },{ 0x304477, 22 },
+{ 0x304478, 22 },{ 0x304479, 22 },{ 0x30447a, 22 },{ 0x30447b, 22 },
+{ 0x30447c, 22 },{ 0x30447d, 22 },{ 0x30447e, 22 },{ 0x30447f, 22 },
+{ 0x608880, 23 },{ 0x608881, 23 },{ 0x608882, 23 },{ 0x608883, 23 },
+{ 0x608884, 23 },{ 0x608885, 23 },{ 0x608886, 23 },{ 0x608887, 23 },
+{ 0x608888, 23 },{ 0x608889, 23 },{ 0x60888a, 23 },{ 0x60888b, 23 },
+{ 0x60888c, 23 },{ 0x60888d, 23 },{ 0x60888e, 23 },{ 0x60888f, 23 },
+};
+
+/* dc table 1 */
+
+const uint32_t ff_table1_dc_lum[120][2] = {
+{ 0x2, 2 },{ 0x3, 2 },{ 0x3, 3 },{ 0x2, 4 },
+{ 0x5, 4 },{ 0x1, 5 },{ 0x3, 5 },{ 0x8, 5 },
+{ 0x0, 6 },{ 0x5, 6 },{ 0xd, 6 },{ 0xf, 6 },
+{ 0x13, 6 },{ 0x8, 7 },{ 0x18, 7 },{ 0x1c, 7 },
+{ 0x24, 7 },{ 0x4, 8 },{ 0x6, 8 },{ 0x12, 8 },
+{ 0x32, 8 },{ 0x3b, 8 },{ 0x4a, 8 },{ 0x4b, 8 },
+{ 0xb, 9 },{ 0x26, 9 },{ 0x27, 9 },{ 0x66, 9 },
+{ 0x74, 9 },{ 0x75, 9 },{ 0x14, 10 },{ 0x1c, 10 },
+{ 0x1f, 10 },{ 0x1d, 10 },{ 0x2b, 11 },{ 0x3d, 11 },
+{ 0x19d, 11 },{ 0x19f, 11 },{ 0x54, 12 },{ 0x339, 12 },
+{ 0x338, 12 },{ 0x33d, 12 },{ 0xab, 13 },{ 0xf1, 13 },
+{ 0x678, 13 },{ 0xf2, 13 },{ 0x1e0, 14 },{ 0x1e1, 14 },
+{ 0x154, 14 },{ 0xcf2, 14 },{ 0x3cc, 15 },{ 0x2ab, 15 },
+{ 0x19e7, 15 },{ 0x3ce, 15 },{ 0x19e6, 15 },{ 0x554, 16 },
+{ 0x79f, 16 },{ 0x555, 16 },{ 0xf3d, 17 },{ 0xf37, 17 },
+{ 0xf3c, 17 },{ 0xf35, 17 },{ 0x1e6d, 18 },{ 0x1e68, 18 },
+{ 0x3cd8, 19 },{ 0x3cd3, 19 },{ 0x3cd9, 19 },{ 0x79a4, 20 },
+{ 0xf34ba, 25 },{ 0xf34b4, 25 },{ 0xf34b5, 25 },{ 0xf34b6, 25 },
+{ 0xf34b7, 25 },{ 0xf34b8, 25 },{ 0xf34b9, 25 },{ 0xf34bb, 25 },
+{ 0xf34bc, 25 },{ 0xf34bd, 25 },{ 0xf34be, 25 },{ 0xf34bf, 25 },
+{ 0x1e6940, 26 },{ 0x1e6941, 26 },{ 0x1e6942, 26 },{ 0x1e6943, 26 },
+{ 0x1e6944, 26 },{ 0x1e6945, 26 },{ 0x1e6946, 26 },{ 0x1e6947, 26 },
+{ 0x1e6948, 26 },{ 0x1e6949, 26 },{ 0x1e694a, 26 },{ 0x1e694b, 26 },
+{ 0x1e694c, 26 },{ 0x1e694d, 26 },{ 0x1e694e, 26 },{ 0x1e694f, 26 },
+{ 0x1e6950, 26 },{ 0x1e6951, 26 },{ 0x1e6952, 26 },{ 0x1e6953, 26 },
+{ 0x1e6954, 26 },{ 0x1e6955, 26 },{ 0x1e6956, 26 },{ 0x1e6957, 26 },
+{ 0x1e6958, 26 },{ 0x1e6959, 26 },{ 0x1e695a, 26 },{ 0x1e695b, 26 },
+{ 0x1e695c, 26 },{ 0x1e695d, 26 },{ 0x1e695e, 26 },{ 0x1e695f, 26 },
+{ 0x1e6960, 26 },{ 0x1e6961, 26 },{ 0x1e6962, 26 },{ 0x1e6963, 26 },
+{ 0x1e6964, 26 },{ 0x1e6965, 26 },{ 0x1e6966, 26 },{ 0x1e6967, 26 },
+};
+
+const uint32_t ff_table1_dc_chroma[120][2] = {
+{ 0x0, 2 },{ 0x1, 2 },{ 0x4, 3 },{ 0x7, 3 },
+{ 0xb, 4 },{ 0xd, 4 },{ 0x15, 5 },{ 0x28, 6 },
+{ 0x30, 6 },{ 0x32, 6 },{ 0x52, 7 },{ 0x62, 7 },
+{ 0x66, 7 },{ 0xa6, 8 },{ 0xc6, 8 },{ 0xcf, 8 },
+{ 0x14f, 9 },{ 0x18e, 9 },{ 0x19c, 9 },{ 0x29d, 10 },
+{ 0x33a, 10 },{ 0x538, 11 },{ 0x63c, 11 },{ 0x63e, 11 },
+{ 0x63f, 11 },{ 0x676, 11 },{ 0xa73, 12 },{ 0xc7a, 12 },
+{ 0xcef, 12 },{ 0x14e5, 13 },{ 0x19dd, 13 },{ 0x29c8, 14 },
+{ 0x29c9, 14 },{ 0x63dd, 15 },{ 0x33b8, 14 },{ 0x33b9, 14 },
+{ 0xc7b6, 16 },{ 0x63d8, 15 },{ 0x63df, 15 },{ 0xc7b3, 16 },
+{ 0xc7b4, 16 },{ 0xc7b5, 16 },{ 0x63de, 15 },{ 0xc7b7, 16 },
+{ 0xc7b8, 16 },{ 0xc7b9, 16 },{ 0x18f65, 17 },{ 0x31ec8, 18 },
+{ 0xc7b248, 24 },{ 0xc7b249, 24 },{ 0xc7b24a, 24 },{ 0xc7b24b, 24 },
+{ 0xc7b24c, 24 },{ 0xc7b24d, 24 },{ 0xc7b24e, 24 },{ 0xc7b24f, 24 },
+{ 0xc7b250, 24 },{ 0xc7b251, 24 },{ 0xc7b252, 24 },{ 0xc7b253, 24 },
+{ 0xc7b254, 24 },{ 0xc7b255, 24 },{ 0xc7b256, 24 },{ 0xc7b257, 24 },
+{ 0xc7b258, 24 },{ 0xc7b259, 24 },{ 0xc7b25a, 24 },{ 0xc7b25b, 24 },
+{ 0xc7b25c, 24 },{ 0xc7b25d, 24 },{ 0xc7b25e, 24 },{ 0xc7b25f, 24 },
+{ 0xc7b260, 24 },{ 0xc7b261, 24 },{ 0xc7b262, 24 },{ 0xc7b263, 24 },
+{ 0xc7b264, 24 },{ 0xc7b265, 24 },{ 0xc7b266, 24 },{ 0xc7b267, 24 },
+{ 0xc7b268, 24 },{ 0xc7b269, 24 },{ 0xc7b26a, 24 },{ 0xc7b26b, 24 },
+{ 0xc7b26c, 24 },{ 0xc7b26d, 24 },{ 0xc7b26e, 24 },{ 0xc7b26f, 24 },
+{ 0xc7b270, 24 },{ 0xc7b271, 24 },{ 0xc7b272, 24 },{ 0xc7b273, 24 },
+{ 0xc7b274, 24 },{ 0xc7b275, 24 },{ 0xc7b276, 24 },{ 0xc7b277, 24 },
+{ 0xc7b278, 24 },{ 0xc7b279, 24 },{ 0xc7b27a, 24 },{ 0xc7b27b, 24 },
+{ 0xc7b27c, 24 },{ 0xc7b27d, 24 },{ 0xc7b27e, 24 },{ 0xc7b27f, 24 },
+{ 0x18f6480, 25 },{ 0x18f6481, 25 },{ 0x18f6482, 25 },{ 0x18f6483, 25 },
+{ 0x18f6484, 25 },{ 0x18f6485, 25 },{ 0x18f6486, 25 },{ 0x18f6487, 25 },
+{ 0x18f6488, 25 },{ 0x18f6489, 25 },{ 0x18f648a, 25 },{ 0x18f648b, 25 },
+{ 0x18f648c, 25 },{ 0x18f648d, 25 },{ 0x18f648e, 25 },{ 0x18f648f, 25 },
+};
+
+/* vlc table 0, for intra luma */
+
+static const uint16_t table0_vlc[133][2] = {
+{ 0x1, 2 },{ 0x6, 3 },{ 0xf, 4 },{ 0x16, 5 },
+{ 0x20, 6 },{ 0x18, 7 },{ 0x8, 8 },{ 0x9a, 8 },
+{ 0x56, 9 },{ 0x13e, 9 },{ 0xf0, 10 },{ 0x3a5, 10 },
+{ 0x77, 11 },{ 0x1ef, 11 },{ 0x9a, 12 },{ 0x5d, 13 },
+{ 0x1, 4 },{ 0x11, 5 },{ 0x2, 7 },{ 0xb, 8 },
+{ 0x12, 9 },{ 0x1d6, 9 },{ 0x27e, 10 },{ 0x191, 11 },
+{ 0xea, 12 },{ 0x3dc, 12 },{ 0x13b, 13 },{ 0x4, 5 },
+{ 0x14, 7 },{ 0x9e, 8 },{ 0x9, 10 },{ 0x1ac, 11 },
+{ 0x1e2, 11 },{ 0x3ca, 12 },{ 0x5f, 13 },{ 0x17, 5 },
+{ 0x4e, 7 },{ 0x5e, 9 },{ 0xf3, 10 },{ 0x1ad, 11 },
+{ 0xec, 12 },{ 0x5f0, 13 },{ 0xe, 6 },{ 0xe1, 8 },
+{ 0x3a4, 10 },{ 0x9c, 12 },{ 0x13d, 13 },{ 0x3b, 6 },
+{ 0x1c, 9 },{ 0x14, 11 },{ 0x9be, 12 },{ 0x6, 7 },
+{ 0x7a, 9 },{ 0x190, 11 },{ 0x137, 13 },{ 0x1b, 7 },
+{ 0x8, 10 },{ 0x75c, 11 },{ 0x71, 7 },{ 0xd7, 10 },
+{ 0x9bf, 12 },{ 0x7, 8 },{ 0xaf, 10 },{ 0x4cc, 11 },
+{ 0x34, 8 },{ 0x265, 10 },{ 0x9f, 12 },{ 0xe0, 8 },
+{ 0x16, 11 },{ 0x327, 12 },{ 0x15, 9 },{ 0x17d, 11 },
+{ 0xebb, 12 },{ 0x14, 9 },{ 0xf6, 10 },{ 0x1e4, 11 },
+{ 0xcb, 10 },{ 0x99d, 12 },{ 0xca, 10 },{ 0x2fc, 12 },
+{ 0x17f, 11 },{ 0x4cd, 11 },{ 0x2fd, 12 },{ 0x4fe, 11 },
+{ 0x13a, 13 },{ 0xa, 4 },{ 0x42, 7 },{ 0x1d3, 9 },
+{ 0x4dd, 11 },{ 0x12, 5 },{ 0xe8, 8 },{ 0x4c, 11 },
+{ 0x136, 13 },{ 0x39, 6 },{ 0x264, 10 },{ 0xeba, 12 },
+{ 0x0, 7 },{ 0xae, 10 },{ 0x99c, 12 },{ 0x1f, 7 },
+{ 0x4de, 11 },{ 0x43, 7 },{ 0x4dc, 11 },{ 0x3, 8 },
+{ 0x3cb, 12 },{ 0x6, 8 },{ 0x99e, 12 },{ 0x2a, 8 },
+{ 0x5f1, 13 },{ 0xf, 8 },{ 0x9fe, 12 },{ 0x33, 8 },
+{ 0x9ff, 12 },{ 0x98, 8 },{ 0x99f, 12 },{ 0xea, 8 },
+{ 0x13c, 13 },{ 0x2e, 8 },{ 0x192, 11 },{ 0x136, 9 },
+{ 0x6a, 9 },{ 0x15, 11 },{ 0x3af, 10 },{ 0x1e3, 11 },
+{ 0x74, 11 },{ 0xeb, 12 },{ 0x2f9, 12 },{ 0x5c, 13 },
+{ 0xed, 12 },{ 0x3dd, 12 },{ 0x326, 12 },{ 0x5e, 13 },
+{ 0x16, 7 },
+};
+
+static const int8_t table0_level[132] = {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 1, 2, 3, 4, 5,
+ 6, 7, 8, 1, 2, 3, 4, 5,
+ 6, 7, 1, 2, 3, 4, 5, 1,
+ 2, 3, 4, 1, 2, 3, 4, 1,
+ 2, 3, 1, 2, 3, 1, 2, 3,
+ 1, 2, 3, 1, 2, 3, 1, 2,
+ 3, 1, 2, 3, 1, 2, 1, 2,
+ 1, 1, 1, 1, 1, 1, 2, 3,
+ 4, 1, 2, 3, 4, 1, 2, 3,
+ 1, 2, 3, 1, 2, 1, 2, 1,
+ 2, 1, 2, 1, 2, 1, 2, 1,
+ 2, 1, 2, 1, 2, 1, 2, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1,
+};
+
+static const int8_t table0_run[132] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 4, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 8, 8, 8, 9, 9, 9,
+ 10, 10, 10, 11, 11, 11, 12, 12,
+ 12, 13, 13, 13, 14, 14, 15, 15,
+ 16, 17, 18, 19, 20, 0, 0, 0,
+ 0, 1, 1, 1, 1, 2, 2, 2,
+ 3, 3, 3, 4, 4, 5, 5, 6,
+ 6, 7, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 24, 25, 26,
+};
+
+/* vlc table 1, for intra chroma and P macroblocks */
+
+static const uint16_t table1_vlc[149][2] = {
+{ 0x4, 3 },{ 0x14, 5 },{ 0x17, 7 },{ 0x7f, 8 },
+{ 0x154, 9 },{ 0x1f2, 10 },{ 0xbf, 11 },{ 0x65, 12 },
+{ 0xaaa, 12 },{ 0x630, 13 },{ 0x1597, 13 },{ 0x3b7, 14 },
+{ 0x2b22, 14 },{ 0xbe6, 15 },{ 0xb, 4 },{ 0x37, 7 },
+{ 0x62, 9 },{ 0x7, 11 },{ 0x166, 12 },{ 0xce, 13 },
+{ 0x1590, 13 },{ 0x5f6, 14 },{ 0xbe7, 15 },{ 0x7, 5 },
+{ 0x6d, 8 },{ 0x3, 11 },{ 0x31f, 12 },{ 0x5f2, 14 },
+{ 0x2, 6 },{ 0x61, 9 },{ 0x55, 12 },{ 0x1df, 14 },
+{ 0x1a, 6 },{ 0x1e, 10 },{ 0xac9, 12 },{ 0x2b23, 14 },
+{ 0x1e, 6 },{ 0x1f, 10 },{ 0xac3, 12 },{ 0x2b2b, 14 },
+{ 0x6, 7 },{ 0x4, 11 },{ 0x2f8, 13 },{ 0x19, 7 },
+{ 0x6, 11 },{ 0x63d, 13 },{ 0x57, 7 },{ 0x182, 11 },
+{ 0x2aa2, 14 },{ 0x4, 8 },{ 0x180, 11 },{ 0x59c, 14 },
+{ 0x7d, 8 },{ 0x164, 12 },{ 0x76d, 15 },{ 0x2, 9 },
+{ 0x18d, 11 },{ 0x1581, 13 },{ 0xad, 8 },{ 0x60, 12 },
+{ 0xc67, 14 },{ 0x1c, 9 },{ 0xee, 13 },{ 0x3, 9 },
+{ 0x2cf, 13 },{ 0xd9, 9 },{ 0x1580, 13 },{ 0x2, 11 },
+{ 0x183, 11 },{ 0x57, 12 },{ 0x61, 12 },{ 0x31, 11 },
+{ 0x66, 12 },{ 0x631, 13 },{ 0x632, 13 },{ 0xac, 13 },
+{ 0x31d, 12 },{ 0x76, 12 },{ 0x3a, 11 },{ 0x165, 12 },
+{ 0xc66, 14 },{ 0x3, 2 },{ 0x54, 7 },{ 0x2ab, 10 },
+{ 0x16, 13 },{ 0x5f7, 14 },{ 0x5, 4 },{ 0xf8, 9 },
+{ 0xaa9, 12 },{ 0x5f, 15 },{ 0x4, 4 },{ 0x1c, 10 },
+{ 0x1550, 13 },{ 0x4, 5 },{ 0x77, 11 },{ 0x76c, 15 },
+{ 0xe, 5 },{ 0xa, 12 },{ 0xc, 5 },{ 0x562, 11 },
+{ 0x4, 6 },{ 0x31c, 12 },{ 0x6, 6 },{ 0xc8, 13 },
+{ 0xd, 6 },{ 0x1da, 13 },{ 0x7, 6 },{ 0xc9, 13 },
+{ 0x1, 7 },{ 0x2e, 14 },{ 0x14, 7 },{ 0x1596, 13 },
+{ 0xa, 7 },{ 0xac2, 12 },{ 0x16, 7 },{ 0x15b, 14 },
+{ 0x15, 7 },{ 0x15a, 14 },{ 0xf, 8 },{ 0x5e, 15 },
+{ 0x7e, 8 },{ 0xab, 8 },{ 0x2d, 9 },{ 0xd8, 9 },
+{ 0xb, 9 },{ 0x14, 10 },{ 0x2b3, 10 },{ 0x1f3, 10 },
+{ 0x3a, 10 },{ 0x0, 10 },{ 0x58, 10 },{ 0x2e, 9 },
+{ 0x5e, 10 },{ 0x563, 11 },{ 0xec, 12 },{ 0x54, 12 },
+{ 0xac1, 12 },{ 0x1556, 13 },{ 0x2fa, 13 },{ 0x181, 11 },
+{ 0x1557, 13 },{ 0x59d, 14 },{ 0x2aa3, 14 },{ 0x2b2a, 14 },
+{ 0x1de, 14 },{ 0x63c, 13 },{ 0xcf, 13 },{ 0x1594, 13 },
+{ 0xd, 9 },
+};
+
+static const int8_t table1_level[148] = {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 1, 2,
+ 3, 4, 5, 6, 7, 8, 9, 1,
+ 2, 3, 4, 5, 1, 2, 3, 4,
+ 1, 2, 3, 4, 1, 2, 3, 4,
+ 1, 2, 3, 1, 2, 3, 1, 2,
+ 3, 1, 2, 3, 1, 2, 3, 1,
+ 2, 3, 1, 2, 3, 1, 2, 1,
+ 2, 1, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 3, 4, 5, 1, 2,
+ 3, 4, 1, 2, 3, 1, 2, 3,
+ 1, 2, 1, 2, 1, 2, 1, 2,
+ 1, 2, 1, 2, 1, 2, 1, 2,
+ 1, 2, 1, 2, 1, 2, 1, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1,
+};
+
+static const int8_t table1_run[148] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 11, 12, 12, 12, 13, 13, 14,
+ 14, 15, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 10, 10, 11, 11,
+ 12, 12, 13, 13, 14, 14, 15, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43,
+};
+
+/* third vlc table */
+
+static const uint16_t table2_vlc[186][2] = {
+{ 0x1, 2 },{ 0x5, 3 },{ 0xd, 4 },{ 0x12, 5 },
+{ 0xe, 6 },{ 0x15, 7 },{ 0x13, 8 },{ 0x3f, 8 },
+{ 0x4b, 9 },{ 0x11f, 9 },{ 0xb8, 10 },{ 0x3e3, 10 },
+{ 0x172, 11 },{ 0x24d, 12 },{ 0x3da, 12 },{ 0x2dd, 13 },
+{ 0x1f55, 13 },{ 0x5b9, 14 },{ 0x3eae, 14 },{ 0x0, 4 },
+{ 0x10, 5 },{ 0x8, 7 },{ 0x20, 8 },{ 0x29, 9 },
+{ 0x1f4, 9 },{ 0x233, 10 },{ 0x1e0, 11 },{ 0x12a, 12 },
+{ 0x3dd, 12 },{ 0x50a, 13 },{ 0x1f29, 13 },{ 0xa42, 14 },
+{ 0x1272, 15 },{ 0x1737, 15 },{ 0x3, 5 },{ 0x11, 7 },
+{ 0xc4, 8 },{ 0x4b, 10 },{ 0xb4, 11 },{ 0x7d4, 11 },
+{ 0x345, 12 },{ 0x2d7, 13 },{ 0x7bf, 13 },{ 0x938, 14 },
+{ 0xbbb, 14 },{ 0x95e, 15 },{ 0x13, 5 },{ 0x78, 7 },
+{ 0x69, 9 },{ 0x232, 10 },{ 0x461, 11 },{ 0x3ec, 12 },
+{ 0x520, 13 },{ 0x1f2a, 13 },{ 0x3e50, 14 },{ 0x3e51, 14 },
+{ 0x1486, 15 },{ 0xc, 6 },{ 0x24, 9 },{ 0x94, 11 },
+{ 0x8c0, 12 },{ 0xf09, 14 },{ 0x1ef0, 15 },{ 0x3d, 6 },
+{ 0x53, 9 },{ 0x1a0, 11 },{ 0x2d6, 13 },{ 0xf08, 14 },
+{ 0x13, 7 },{ 0x7c, 9 },{ 0x7c1, 11 },{ 0x4ac, 14 },
+{ 0x1b, 7 },{ 0xa0, 10 },{ 0x344, 12 },{ 0xf79, 14 },
+{ 0x79, 7 },{ 0x3e1, 10 },{ 0x2d4, 13 },{ 0x2306, 14 },
+{ 0x21, 8 },{ 0x23c, 10 },{ 0xfae, 12 },{ 0x23de, 14 },
+{ 0x35, 8 },{ 0x175, 11 },{ 0x7b3, 13 },{ 0xc5, 8 },
+{ 0x174, 11 },{ 0x785, 13 },{ 0x48, 9 },{ 0x1a3, 11 },
+{ 0x49e, 13 },{ 0x2c, 9 },{ 0xfa, 10 },{ 0x7d6, 11 },
+{ 0x92, 10 },{ 0x5cc, 13 },{ 0x1ef1, 15 },{ 0xa3, 10 },
+{ 0x3ed, 12 },{ 0x93e, 14 },{ 0x1e2, 11 },{ 0x1273, 15 },
+{ 0x7c4, 11 },{ 0x1487, 15 },{ 0x291, 12 },{ 0x293, 12 },
+{ 0xf8a, 12 },{ 0x509, 13 },{ 0x508, 13 },{ 0x78d, 13 },
+{ 0x7be, 13 },{ 0x78c, 13 },{ 0x4ae, 14 },{ 0xbba, 14 },
+{ 0x2307, 14 },{ 0xb9a, 14 },{ 0x1736, 15 },{ 0xe, 4 },
+{ 0x45, 7 },{ 0x1f3, 9 },{ 0x47a, 11 },{ 0x5dc, 13 },
+{ 0x23df, 14 },{ 0x19, 5 },{ 0x28, 9 },{ 0x176, 11 },
+{ 0x49d, 13 },{ 0x23dd, 14 },{ 0x30, 6 },{ 0xa2, 10 },
+{ 0x2ef, 12 },{ 0x5b8, 14 },{ 0x3f, 6 },{ 0xa5, 10 },
+{ 0x3db, 12 },{ 0x93f, 14 },{ 0x44, 7 },{ 0x7cb, 11 },
+{ 0x95f, 15 },{ 0x63, 7 },{ 0x3c3, 12 },{ 0x15, 8 },
+{ 0x8f6, 12 },{ 0x17, 8 },{ 0x498, 13 },{ 0x2c, 8 },
+{ 0x7b2, 13 },{ 0x2f, 8 },{ 0x1f54, 13 },{ 0x8d, 8 },
+{ 0x7bd, 13 },{ 0x8e, 8 },{ 0x1182, 13 },{ 0xfb, 8 },
+{ 0x50b, 13 },{ 0x2d, 8 },{ 0x7c0, 11 },{ 0x79, 9 },
+{ 0x1f5f, 13 },{ 0x7a, 9 },{ 0x1f56, 13 },{ 0x231, 10 },
+{ 0x3e4, 10 },{ 0x1a1, 11 },{ 0x143, 11 },{ 0x1f7, 11 },
+{ 0x16f, 12 },{ 0x292, 12 },{ 0x2e7, 12 },{ 0x16c, 12 },
+{ 0x16d, 12 },{ 0x3dc, 12 },{ 0xf8b, 12 },{ 0x499, 13 },
+{ 0x3d8, 12 },{ 0x78e, 13 },{ 0x2d5, 13 },{ 0x1f5e, 13 },
+{ 0x1f2b, 13 },{ 0x78f, 13 },{ 0x4ad, 14 },{ 0x3eaf, 14 },
+{ 0x23dc, 14 },{ 0x4a, 9 },
+};
+
+static const int8_t table2_level[185] = {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 1, 2,
+ 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 1, 2, 3, 4, 5, 6, 1,
+ 2, 3, 4, 5, 1, 2, 3, 4,
+ 1, 2, 3, 4, 1, 2, 3, 4,
+ 1, 2, 3, 4, 1, 2, 3, 1,
+ 2, 3, 1, 2, 3, 1, 2, 3,
+ 1, 2, 3, 1, 2, 3, 1, 2,
+ 1, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 3, 4, 5, 6, 1, 2, 3,
+ 4, 5, 1, 2, 3, 4, 1, 2,
+ 3, 4, 1, 2, 3, 1, 2, 1,
+ 2, 1, 2, 1, 2, 1, 2, 1,
+ 2, 1, 2, 1, 2, 1, 2, 1,
+ 2, 1, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1,
+};
+
+static const int8_t table2_run[185] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 4, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 7, 8, 8, 8, 8,
+ 9, 9, 9, 9, 10, 10, 10, 11,
+ 11, 11, 12, 12, 12, 13, 13, 13,
+ 14, 14, 14, 15, 15, 15, 16, 16,
+ 17, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3,
+ 3, 3, 4, 4, 4, 5, 5, 6,
+ 6, 7, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14,
+ 14, 15, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36,
+ 37,
+};
+
+/* second non intra vlc table */
+static const uint16_t table4_vlc[169][2] = {
+{ 0x0, 3 },{ 0x3, 4 },{ 0xb, 5 },{ 0x14, 6 },
+{ 0x3f, 6 },{ 0x5d, 7 },{ 0xa2, 8 },{ 0xac, 9 },
+{ 0x16e, 9 },{ 0x20a, 10 },{ 0x2e2, 10 },{ 0x432, 11 },
+{ 0x5c9, 11 },{ 0x827, 12 },{ 0xb54, 12 },{ 0x4e6, 13 },
+{ 0x105f, 13 },{ 0x172a, 13 },{ 0x20b2, 14 },{ 0x2d4e, 14 },
+{ 0x39f0, 14 },{ 0x4175, 15 },{ 0x5a9e, 15 },{ 0x4, 4 },
+{ 0x1e, 5 },{ 0x42, 7 },{ 0xb6, 8 },{ 0x173, 9 },
+{ 0x395, 10 },{ 0x72e, 11 },{ 0xb94, 12 },{ 0x16a4, 13 },
+{ 0x20b3, 14 },{ 0x2e45, 14 },{ 0x5, 5 },{ 0x40, 7 },
+{ 0x49, 9 },{ 0x28f, 10 },{ 0x5cb, 11 },{ 0x48a, 13 },
+{ 0x9dd, 14 },{ 0x73e2, 15 },{ 0x18, 5 },{ 0x25, 8 },
+{ 0x8a, 10 },{ 0x51b, 11 },{ 0xe5f, 12 },{ 0x9c9, 14 },
+{ 0x139c, 15 },{ 0x29, 6 },{ 0x4f, 9 },{ 0x412, 11 },
+{ 0x48d, 13 },{ 0x2e41, 14 },{ 0x38, 6 },{ 0x10e, 9 },
+{ 0x5a8, 11 },{ 0x105c, 13 },{ 0x39f2, 14 },{ 0x58, 7 },
+{ 0x21f, 10 },{ 0xe7e, 12 },{ 0x39ff, 14 },{ 0x23, 8 },
+{ 0x2e3, 10 },{ 0x4e5, 13 },{ 0x2e40, 14 },{ 0xa1, 8 },
+{ 0x5be, 11 },{ 0x9c8, 14 },{ 0x83, 8 },{ 0x13a, 11 },
+{ 0x1721, 13 },{ 0x44, 9 },{ 0x276, 12 },{ 0x39f6, 14 },
+{ 0x8b, 10 },{ 0x4ef, 13 },{ 0x5a9b, 15 },{ 0x208, 10 },
+{ 0x1cfe, 13 },{ 0x399, 10 },{ 0x1cb4, 13 },{ 0x39e, 10 },
+{ 0x39f3, 14 },{ 0x5ab, 11 },{ 0x73e3, 15 },{ 0x737, 11 },
+{ 0x5a9f, 15 },{ 0x82d, 12 },{ 0xe69, 12 },{ 0xe68, 12 },
+{ 0x433, 11 },{ 0xb7b, 12 },{ 0x2df8, 14 },{ 0x2e56, 14 },
+{ 0x2e57, 14 },{ 0x39f7, 14 },{ 0x51a5, 15 },{ 0x3, 3 },
+{ 0x2a, 6 },{ 0xe4, 8 },{ 0x28e, 10 },{ 0x735, 11 },
+{ 0x1058, 13 },{ 0x1cfa, 13 },{ 0x2df9, 14 },{ 0x4174, 15 },
+{ 0x9, 4 },{ 0x54, 8 },{ 0x398, 10 },{ 0x48b, 13 },
+{ 0x139d, 15 },{ 0xd, 4 },{ 0xad, 9 },{ 0x826, 12 },
+{ 0x2d4c, 14 },{ 0x11, 5 },{ 0x16b, 9 },{ 0xb7f, 12 },
+{ 0x51a4, 15 },{ 0x19, 5 },{ 0x21b, 10 },{ 0x16fd, 13 },
+{ 0x1d, 5 },{ 0x394, 10 },{ 0x28d3, 14 },{ 0x2b, 6 },
+{ 0x5bc, 11 },{ 0x5a9a, 15 },{ 0x2f, 6 },{ 0x247, 12 },
+{ 0x10, 7 },{ 0xa35, 12 },{ 0x3e, 6 },{ 0xb7a, 12 },
+{ 0x59, 7 },{ 0x105e, 13 },{ 0x26, 8 },{ 0x9cf, 14 },
+{ 0x55, 8 },{ 0x1cb5, 13 },{ 0x57, 8 },{ 0xe5b, 12 },
+{ 0xa0, 8 },{ 0x1468, 13 },{ 0x170, 9 },{ 0x90, 10 },
+{ 0x1ce, 9 },{ 0x21a, 10 },{ 0x218, 10 },{ 0x168, 9 },
+{ 0x21e, 10 },{ 0x244, 12 },{ 0x736, 11 },{ 0x138, 11 },
+{ 0x519, 11 },{ 0xe5e, 12 },{ 0x72c, 11 },{ 0xb55, 12 },
+{ 0x9dc, 14 },{ 0x20bb, 14 },{ 0x48c, 13 },{ 0x1723, 13 },
+{ 0x2e44, 14 },{ 0x16a5, 13 },{ 0x518, 11 },{ 0x39fe, 14 },
+{ 0x169, 9 },
+};
+
+static const int8_t table4_level[168] = {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 1,
+ 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 1, 2, 3, 4, 5, 6,
+ 7, 8, 1, 2, 3, 4, 5, 6,
+ 7, 1, 2, 3, 4, 5, 1, 2,
+ 3, 4, 5, 1, 2, 3, 4, 1,
+ 2, 3, 4, 1, 2, 3, 1, 2,
+ 3, 1, 2, 3, 1, 2, 3, 1,
+ 2, 1, 2, 1, 2, 1, 2, 1,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 1, 2, 3, 4,
+ 5, 1, 2, 3, 4, 1, 2, 3,
+ 4, 1, 2, 3, 1, 2, 3, 1,
+ 2, 3, 1, 2, 1, 2, 1, 2,
+ 1, 2, 1, 2, 1, 2, 1, 2,
+ 1, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+};
+
+static const int8_t table4_run[168] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 14, 14, 15, 15, 16,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 3, 3, 3,
+ 3, 4, 4, 4, 5, 5, 5, 6,
+ 6, 6, 7, 7, 8, 8, 9, 9,
+ 10, 10, 11, 11, 12, 12, 13, 13,
+ 14, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36,
+};
+
+extern const uint16_t inter_vlc[103][2];
+extern const int8_t inter_level[102];
+extern const int8_t inter_run[102];
+
+extern const uint16_t ff_mpeg4_intra_vlc[103][2];
+extern const int8_t ff_mpeg4_intra_level[102];
+extern const int8_t ff_mpeg4_intra_run[102];
+
+RLTable rl_table[NB_RL_TABLES] = {
+ /* intra luminance tables */
+ /* low motion */
+ {
+ 132,
+ 85,
+ table0_vlc,
+ table0_run,
+ table0_level,
+ },
+ /* high motion */
+ {
+ 185,
+ 119,
+ table2_vlc,
+ table2_run,
+ table2_level,
+ },
+ /* mid-rate */
+ {
+ 102,
+ 67,
+ ff_mpeg4_intra_vlc,
+ ff_mpeg4_intra_run,
+ ff_mpeg4_intra_level,
+ },
+ /* intra chrominance / non intra tables */
+ /* low motion inter */
+ {
+ 148,
+ 81,
+ table1_vlc,
+ table1_run,
+ table1_level,
+ },
+ /* high motion inter */
+ {
+ 168,
+ 99,
+ table4_vlc,
+ table4_run,
+ table4_level,
+ },
+ /* mid rate inter */
+ {
+ 102,
+ 58,
+ inter_vlc,
+ inter_run,
+ inter_level,
+ },
+};
+
+/* motion vector table 0 */
+
+static const uint16_t table0_mv_code[1100] = {
+ 0x0001, 0x0003, 0x0005, 0x0007, 0x0003, 0x0008, 0x000c, 0x0001,
+ 0x0002, 0x001b, 0x0006, 0x000b, 0x0015, 0x0002, 0x000e, 0x000f,
+ 0x0014, 0x0020, 0x0022, 0x0025, 0x0027, 0x0029, 0x002d, 0x004b,
+ 0x004d, 0x0003, 0x0022, 0x0023, 0x0025, 0x0027, 0x0042, 0x0048,
+ 0x0049, 0x0050, 0x005c, 0x0091, 0x009f, 0x000e, 0x0043, 0x004c,
+ 0x0054, 0x0056, 0x008c, 0x0098, 0x009a, 0x009b, 0x00b1, 0x00b2,
+ 0x0120, 0x0121, 0x0126, 0x0133, 0x0139, 0x01a1, 0x01a4, 0x01a5,
+ 0x01a6, 0x01a7, 0x01ae, 0x01af, 0x000b, 0x0019, 0x0085, 0x0090,
+ 0x009b, 0x00aa, 0x00af, 0x010c, 0x010e, 0x011c, 0x011e, 0x0133,
+ 0x0144, 0x0160, 0x0174, 0x0175, 0x0177, 0x0178, 0x0249, 0x024b,
+ 0x0252, 0x0261, 0x0265, 0x0270, 0x0352, 0x0353, 0x0355, 0x0359,
+ 0x0010, 0x0011, 0x0013, 0x0034, 0x0035, 0x0036, 0x0037, 0x003d,
+ 0x003e, 0x0109, 0x0126, 0x0156, 0x021a, 0x021e, 0x023a, 0x023e,
+ 0x028e, 0x028f, 0x02cf, 0x0491, 0x0494, 0x049f, 0x04a0, 0x04a3,
+ 0x04a6, 0x04a7, 0x04ad, 0x04ae, 0x04c0, 0x04c4, 0x04c6, 0x04c8,
+ 0x04c9, 0x04f5, 0x04f6, 0x04f7, 0x0680, 0x0682, 0x0683, 0x0688,
+ 0x0689, 0x068d, 0x068e, 0x068f, 0x06a2, 0x06a3, 0x06a9, 0x06b0,
+ 0x06b1, 0x06b4, 0x06b5, 0x0024, 0x0060, 0x0063, 0x0078, 0x0079,
+ 0x0211, 0x0244, 0x0245, 0x0247, 0x0248, 0x0249, 0x024a, 0x024b,
+ 0x026b, 0x02af, 0x02b8, 0x02bb, 0x0436, 0x0476, 0x0477, 0x047e,
+ 0x04c8, 0x04c9, 0x04ca, 0x0514, 0x0586, 0x0587, 0x0598, 0x059d,
+ 0x05d9, 0x05da, 0x0920, 0x0921, 0x093b, 0x093c, 0x093d, 0x0942,
+ 0x0943, 0x0944, 0x0945, 0x0959, 0x095e, 0x095f, 0x0982, 0x0983,
+ 0x098e, 0x098f, 0x09c4, 0x09e7, 0x09e8, 0x09e9, 0x0d02, 0x0d17,
+ 0x0d18, 0x0d19, 0x0d41, 0x0d42, 0x0d43, 0x0d50, 0x0d5f, 0x0d6d,
+ 0x0d6e, 0x0d6f, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x041e, 0x041f, 0x0420, 0x0421,
+ 0x048c, 0x048d, 0x04d3, 0x04d4, 0x04d5, 0x055c, 0x055d, 0x0572,
+ 0x0573, 0x0574, 0x0575, 0x08de, 0x08df, 0x08fe, 0x08ff, 0x0996,
+ 0x0a36, 0x0a37, 0x0b08, 0x0b09, 0x0b0a, 0x0b0b, 0x0b32, 0x0b33,
+ 0x0b34, 0x0b35, 0x0b36, 0x0b37, 0x0b38, 0x0b39, 0x0bb0, 0x0bf7,
+ 0x0bf8, 0x0bf9, 0x0bfa, 0x0bfb, 0x0bfc, 0x0bfd, 0x0bfe, 0x0bff,
+ 0x1254, 0x1255, 0x1256, 0x1257, 0x1270, 0x1271, 0x1272, 0x1273,
+ 0x1274, 0x1275, 0x12ab, 0x12ac, 0x12ad, 0x12ae, 0x12af, 0x12b0,
+ 0x12b1, 0x1315, 0x1316, 0x1317, 0x13bf, 0x13c0, 0x13c1, 0x13c2,
+ 0x13c3, 0x13c4, 0x13c5, 0x13c6, 0x13c7, 0x13c8, 0x13c9, 0x13ca,
+ 0x13cb, 0x13cc, 0x13cd, 0x1a06, 0x1a07, 0x1a28, 0x1a29, 0x1a2a,
+ 0x1a2b, 0x1a2c, 0x1a2d, 0x1a80, 0x1abb, 0x1abc, 0x1abd, 0x1ad8,
+ 0x1ad9, 0x0094, 0x0095, 0x0096, 0x0097, 0x00a0, 0x00a1, 0x00a2,
+ 0x00a3, 0x0831, 0x0832, 0x0833, 0x0834, 0x0835, 0x0836, 0x0837,
+ 0x0838, 0x0839, 0x083a, 0x083b, 0x0939, 0x093a, 0x093b, 0x093c,
+ 0x093d, 0x093e, 0x093f, 0x09a0, 0x09a1, 0x09a2, 0x09a3, 0x09a4,
+ 0x09a5, 0x11ac, 0x11ad, 0x11ae, 0x11af, 0x11b0, 0x11b1, 0x11b2,
+ 0x11b3, 0x11b4, 0x11b5, 0x11b6, 0x11b7, 0x11b8, 0x11b9, 0x11ba,
+ 0x11bb, 0x132f, 0x1454, 0x1455, 0x1456, 0x1457, 0x1458, 0x1459,
+ 0x145a, 0x145b, 0x145c, 0x145d, 0x145e, 0x145f, 0x1460, 0x1461,
+ 0x1462, 0x1463, 0x1464, 0x1465, 0x1466, 0x1467, 0x1468, 0x1469,
+ 0x146a, 0x146b, 0x17de, 0x17df, 0x17e0, 0x17e1, 0x17e2, 0x17e3,
+ 0x17e4, 0x17e5, 0x17e6, 0x17e7, 0x17e8, 0x17e9, 0x17ea, 0x17eb,
+ 0x17ec, 0x17ed, 0x2540, 0x2541, 0x2542, 0x2543, 0x2544, 0x2545,
+ 0x2546, 0x2547, 0x2548, 0x2549, 0x254a, 0x254b, 0x254c, 0x254d,
+ 0x254e, 0x254f, 0x2550, 0x2551, 0x2552, 0x2553, 0x2554, 0x2555,
+ 0x2628, 0x2766, 0x2767, 0x2768, 0x2769, 0x276a, 0x276b, 0x276c,
+ 0x276d, 0x276e, 0x276f, 0x2770, 0x2771, 0x2772, 0x2773, 0x2774,
+ 0x2775, 0x2776, 0x2777, 0x2778, 0x2779, 0x277a, 0x277b, 0x277c,
+ 0x277d, 0x3503, 0x3544, 0x3545, 0x3546, 0x3547, 0x3560, 0x3561,
+ 0x3562, 0x3563, 0x3564, 0x3565, 0x3566, 0x3567, 0x3568, 0x3569,
+ 0x356a, 0x356b, 0x356c, 0x356d, 0x356e, 0x356f, 0x3570, 0x3571,
+ 0x3572, 0x3573, 0x3574, 0x3575, 0x03f0, 0x103d, 0x103e, 0x103f,
+ 0x1040, 0x1041, 0x1042, 0x1043, 0x1044, 0x1045, 0x1046, 0x1047,
+ 0x1048, 0x1049, 0x104a, 0x104b, 0x104c, 0x104d, 0x104e, 0x104f,
+ 0x1050, 0x1051, 0x1052, 0x1053, 0x1054, 0x1055, 0x1056, 0x1057,
+ 0x1058, 0x1059, 0x105a, 0x105b, 0x105c, 0x105d, 0x105e, 0x105f,
+ 0x1060, 0x1061, 0x1270, 0x1271, 0x21b8, 0x21b9, 0x21ba, 0x21bb,
+ 0x21bc, 0x21bd, 0x21be, 0x21bf, 0x21f0, 0x21f1, 0x21f2, 0x21f3,
+ 0x21f4, 0x21f5, 0x21f6, 0x21f7, 0x21f8, 0x21f9, 0x21fa, 0x21fb,
+ 0x21fc, 0x21fd, 0x21fe, 0x21ff, 0x2340, 0x2341, 0x2342, 0x2343,
+ 0x2344, 0x2345, 0x2346, 0x2347, 0x2348, 0x2349, 0x234a, 0x234b,
+ 0x234c, 0x234d, 0x234e, 0x234f, 0x2350, 0x2351, 0x2352, 0x2353,
+ 0x2354, 0x2355, 0x2356, 0x2357, 0x265c, 0x2f88, 0x2f89, 0x2f8a,
+ 0x2f8b, 0x2f8c, 0x2f8d, 0x2f8e, 0x2f8f, 0x2f90, 0x2f91, 0x2f92,
+ 0x2f93, 0x2f94, 0x2f95, 0x2f96, 0x2f97, 0x2f98, 0x2f99, 0x2f9a,
+ 0x2f9b, 0x2f9c, 0x2f9d, 0x2f9e, 0x2f9f, 0x2fa0, 0x2fa1, 0x2fa2,
+ 0x2fa3, 0x2fa4, 0x2fa5, 0x2fa6, 0x2fa7, 0x2fa8, 0x2fa9, 0x2faa,
+ 0x2fab, 0x2fac, 0x2fad, 0x2fae, 0x2faf, 0x2fb0, 0x2fb1, 0x2fb2,
+ 0x2fb3, 0x2fb4, 0x2fb5, 0x2fb6, 0x2fb7, 0x2fb8, 0x2fb9, 0x2fba,
+ 0x2fbb, 0x4c52, 0x4c53, 0x4e28, 0x4e29, 0x4e2a, 0x4e2b, 0x4e2c,
+ 0x4e2d, 0x4e2e, 0x4e2f, 0x4e30, 0x4e31, 0x4e32, 0x4e33, 0x4e34,
+ 0x4e35, 0x4e36, 0x4e37, 0x4e38, 0x4e39, 0x4e3a, 0x4e3b, 0x4e3c,
+ 0x4e3d, 0x4e3e, 0x4e3f, 0x4e80, 0x4e81, 0x4e82, 0x4e83, 0x4e84,
+ 0x4e85, 0x4e86, 0x4e87, 0x4e88, 0x4e89, 0x4e8a, 0x4e8b, 0x4e8c,
+ 0x4e8d, 0x4e8e, 0x4e8f, 0x4e90, 0x4e91, 0x4e92, 0x4e93, 0x4e94,
+ 0x4e95, 0x4e96, 0x4e97, 0x4e98, 0x4e99, 0x4e9a, 0x4e9b, 0x4e9c,
+ 0x4e9d, 0x4e9e, 0x4e9f, 0x4ea0, 0x4ea1, 0x4ea2, 0x4ea3, 0x4ea4,
+ 0x4ea5, 0x4ea6, 0x4ea7, 0x4ea8, 0x4ea9, 0x4eaa, 0x4eab, 0x4eac,
+ 0x4ead, 0x4eae, 0x4eaf, 0x4eb0, 0x4eb1, 0x4eb2, 0x4eb3, 0x4eb4,
+ 0x4eb5, 0x4eb6, 0x4eb7, 0x4eb8, 0x4eb9, 0x4eba, 0x4ebb, 0x4ebc,
+ 0x4ebd, 0x4ebe, 0x4ebf, 0x4ec0, 0x4ec1, 0x4ec2, 0x4ec3, 0x4ec4,
+ 0x4ec5, 0x4ec6, 0x4ec7, 0x4ec8, 0x4ec9, 0x4eca, 0x4ecb, 0x6a04,
+ 0x6a05, 0x07e2, 0x07e3, 0x07e4, 0x07e5, 0x07e6, 0x07e7, 0x07e8,
+ 0x07e9, 0x07ea, 0x07eb, 0x07ec, 0x07ed, 0x07ee, 0x07ef, 0x07f0,
+ 0x07f1, 0x07f2, 0x07f3, 0x07f4, 0x07f5, 0x07f6, 0x07f7, 0x07f8,
+ 0x07f9, 0x07fa, 0x07fb, 0x07fc, 0x07fd, 0x07fe, 0x07ff, 0x2000,
+ 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008,
+ 0x2009, 0x200a, 0x200b, 0x200c, 0x200d, 0x200e, 0x200f, 0x2010,
+ 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, 0x2016, 0x2017, 0x2018,
+ 0x2019, 0x201a, 0x201b, 0x201c, 0x201d, 0x201e, 0x201f, 0x2020,
+ 0x2021, 0x2022, 0x2023, 0x2024, 0x2025, 0x2026, 0x2027, 0x2028,
+ 0x2029, 0x202a, 0x202b, 0x202c, 0x202d, 0x202e, 0x202f, 0x2030,
+ 0x2031, 0x2032, 0x2033, 0x2034, 0x2035, 0x2036, 0x2037, 0x2038,
+ 0x2039, 0x203a, 0x203b, 0x203c, 0x203d, 0x203e, 0x203f, 0x2040,
+ 0x2041, 0x2042, 0x2043, 0x2044, 0x2045, 0x2046, 0x2047, 0x2048,
+ 0x2049, 0x204a, 0x204b, 0x204c, 0x204d, 0x204e, 0x204f, 0x2050,
+ 0x2051, 0x2052, 0x2053, 0x2054, 0x2055, 0x2056, 0x2057, 0x2058,
+ 0x2059, 0x205a, 0x205b, 0x205c, 0x205d, 0x205e, 0x205f, 0x2060,
+ 0x2061, 0x2062, 0x2063, 0x2064, 0x2065, 0x2066, 0x2067, 0x2068,
+ 0x2069, 0x206a, 0x206b, 0x206c, 0x206d, 0x206e, 0x206f, 0x2070,
+ 0x2071, 0x2072, 0x2073, 0x2074, 0x2075, 0x2076, 0x2077, 0x2078,
+ 0x2079, 0x4cba, 0x4cbb, 0x5d88, 0x5d89, 0x5d8a, 0x5d8b, 0x5d8c,
+ 0x5d8d, 0x5d8e, 0x5d8f, 0x5db0, 0x5db1, 0x5db2, 0x5db3, 0x5db4,
+ 0x5db5, 0x5db6, 0x5db7, 0x5db8, 0x5db9, 0x5dba, 0x5dbb, 0x5dbc,
+ 0x5dbd, 0x5dbe, 0x5dbf, 0x5e40, 0x5e41, 0x5e42, 0x5e43, 0x5e44,
+ 0x5e45, 0x5e46, 0x5e47, 0x5e48, 0x5e49, 0x5e4a, 0x5e4b, 0x5e4c,
+ 0x5e4d, 0x5e4e, 0x5e4f, 0x5e50, 0x5e51, 0x5e52, 0x5e53, 0x5e54,
+ 0x5e55, 0x5e56, 0x5e57, 0x5e58, 0x5e59, 0x5e5a, 0x5e5b, 0x5e5c,
+ 0x5e5d, 0x5e5e, 0x5e5f, 0x5e60, 0x5e61, 0x5e62, 0x5e63, 0x5e64,
+ 0x5e65, 0x5e66, 0x5e67, 0x5e68, 0x5e69, 0x5e6a, 0x5e6b, 0x5e6c,
+ 0x5e6d, 0x5e6e, 0x5e6f, 0x5e70, 0x5e71, 0x5e72, 0x5e73, 0x5e74,
+ 0x5e75, 0x5e76, 0x5e77, 0x5e78, 0x5e79, 0x5e7a, 0x5e7b, 0x5e7c,
+ 0x5e7d, 0x5e7e, 0x5e7f, 0x5e80, 0x5e81, 0x5e82, 0x5e83, 0x5e84,
+ 0x5e85, 0x5e86, 0x5e87, 0x5e88, 0x5e89, 0x5e8a, 0x5e8b, 0x5e8c,
+ 0x5e8d, 0x5e8e, 0x5e8f, 0x5e90, 0x5e91, 0x5e92, 0x5e93, 0x5e94,
+ 0x5e95, 0x5e96, 0x5e97, 0x5e98, 0x5e99, 0x5e9a, 0x5e9b, 0x5e9c,
+ 0x5e9d, 0x5e9e, 0x5e9f, 0x5ea0, 0x5ea1, 0x5ea2, 0x5ea3, 0x5ea4,
+ 0x5ea5, 0x5ea6, 0x5ea7, 0x5ea8, 0x5ea9, 0x5eaa, 0x5eab, 0x5eac,
+ 0x5ead, 0x5eae, 0x5eaf, 0x5eb0, 0x5eb1, 0x5eb2, 0x5eb3, 0x5eb4,
+ 0x5eb5, 0x5eb6, 0x5eb7, 0x5eb8, 0x5eb9, 0x5eba, 0x5ebb, 0x5ebc,
+ 0x5ebd, 0x5ebe, 0x5ebf, 0x5ec0, 0x5ec1, 0x5ec2, 0x5ec3, 0x5ec4,
+ 0x5ec5, 0x5ec6, 0x5ec7, 0x5ec8, 0x5ec9, 0x5eca, 0x5ecb, 0x5ecc,
+ 0x5ecd, 0x5ece, 0x5ecf, 0x5ed0, 0x5ed1, 0x5ed2, 0x5ed3, 0x5ed4,
+ 0x5ed5, 0x5ed6, 0x5ed7, 0x5ed8, 0x5ed9, 0x5eda, 0x5edb, 0x5edc,
+ 0x5edd, 0x5ede, 0x5edf, 0x5ee0, 0x5ee1, 0x5ee2, 0x5ee3, 0x5ee4,
+ 0x5ee5, 0x5ee6, 0x5ee7, 0x5ee8, 0x5ee9, 0x5eea, 0x5eeb, 0x5eec,
+ 0x5eed, 0x5eee, 0x5eef, 0x5ef0, 0x5ef1, 0x5ef2, 0x5ef3, 0x5ef4,
+ 0x5ef5, 0x5ef6, 0x5ef7, 0x5ef8, 0x5ef9, 0x5efa, 0x5efb, 0x5efc,
+ 0x5efd, 0x5efe, 0x5eff, 0x5f00, 0x5f01, 0x5f02, 0x5f03, 0x5f04,
+ 0x5f05, 0x5f06, 0x5f07, 0x5f08, 0x5f09, 0x5f0a, 0x5f0b, 0x5f0c,
+ 0x5f0d, 0x5f0e, 0x5f0f, 0x0000,
+};
+
+static const uint8_t table0_mv_bits[1100] = {
+ 1, 4, 4, 4, 5, 5, 5, 6,
+ 6, 6, 7, 7, 7, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 8,
+};
+
+static const uint8_t table0_mvx[1099] = {
+ 32, 32, 31, 32, 33, 31, 33, 31,
+ 33, 32, 34, 32, 30, 32, 31, 34,
+ 35, 32, 34, 33, 29, 33, 30, 30,
+ 31, 31, 35, 29, 33, 35, 33, 34,
+ 31, 29, 30, 34, 30, 36, 28, 32,
+ 34, 37, 30, 27, 32, 25, 39, 32,
+ 34, 32, 35, 35, 35, 31, 35, 29,
+ 32, 29, 30, 29, 37, 27, 36, 38,
+ 37, 33, 32, 31, 29, 31, 28, 36,
+ 33, 30, 34, 33, 33, 28, 27, 25,
+ 31, 26, 39, 32, 32, 31, 33, 39,
+ 31, 38, 28, 36, 21, 23, 43, 36,
+ 34, 41, 30, 25, 28, 31, 30, 34,
+ 38, 35, 61, 34, 28, 30, 37, 37,
+ 35, 27, 36, 3, 59, 38, 37, 32,
+ 31, 29, 26, 33, 37, 33, 27, 27,
+ 35, 34, 34, 40, 42, 33, 32, 29,
+ 4, 5, 28, 24, 25, 35, 39, 38,
+ 32, 23, 27, 32, 30, 35, 26, 34,
+ 60, 36, 29, 22, 26, 41, 7, 30,
+ 38, 30, 36, 29, 30, 41, 26, 25,
+ 32, 34, 24, 39, 1, 25, 39, 32,
+ 28, 29, 32, 38, 26, 36, 28, 63,
+ 28, 39, 23, 21, 26, 35, 31, 35,
+ 57, 31, 29, 29, 28, 30, 27, 35,
+ 2, 38, 40, 34, 37, 29, 38, 43,
+ 26, 32, 33, 42, 24, 40, 28, 32,
+ 32, 32, 36, 32, 43, 25, 21, 31,
+ 30, 31, 41, 29, 33, 37, 26, 37,
+ 27, 59, 23, 33, 35, 31, 31, 37,
+ 38, 39, 32, 23, 32, 27, 37, 36,
+ 31, 40, 25, 27, 38, 31, 36, 28,
+ 31, 36, 25, 45, 3, 34, 38, 39,
+ 40, 38, 30, 32, 19, 24, 25, 26,
+ 45, 20, 24, 33, 33, 31, 41, 34,
+ 39, 47, 40, 58, 59, 41, 33, 3,
+ 17, 61, 42, 30, 26, 29, 36, 61,
+ 33, 37, 62, 28, 25, 38, 25, 38,
+ 17, 23, 34, 33, 21, 33, 49, 27,
+ 32, 23, 27, 22, 24, 22, 39, 43,
+ 27, 37, 6, 42, 47, 26, 30, 31,
+ 41, 39, 33, 22, 45, 36, 32, 45,
+ 19, 22, 30, 5, 5, 17, 29, 22,
+ 31, 31, 43, 37, 27, 32, 32, 32,
+ 33, 34, 43, 35, 29, 26, 22, 32,
+ 19, 32, 25, 31, 41, 49, 28, 34,
+ 28, 39, 34, 19, 37, 38, 29, 21,
+ 36, 42, 24, 48, 16, 28, 49, 22,
+ 34, 31, 38, 39, 44, 11, 35, 30,
+ 33, 33, 23, 28, 33, 46, 15, 13,
+ 24, 41, 24, 34, 34, 30, 26, 24,
+ 14, 60, 21, 29, 39, 23, 35, 37,
+ 63, 45, 33, 34, 47, 41, 22, 42,
+ 35, 35, 23, 32, 35, 43, 32, 7,
+ 31, 41, 20, 31, 16, 13, 63, 25,
+ 30, 32, 35, 30, 30, 31, 42, 47,
+ 39, 38, 40, 40, 51, 55, 56, 18,
+ 21, 39, 39, 33, 17, 41, 23, 24,
+ 43, 25, 31, 20, 19, 45, 1, 34,
+ 31, 22, 35, 15, 46, 46, 35, 31,
+ 28, 29, 29, 23, 41, 27, 14, 53,
+ 53, 27, 24, 32, 57, 32, 17, 42,
+ 37, 29, 33, 1, 25, 32, 32, 63,
+ 26, 40, 44, 36, 31, 39, 20, 20,
+ 44, 23, 33, 34, 35, 33, 33, 28,
+ 41, 23, 41, 41, 29, 25, 26, 49,
+ 29, 24, 37, 49, 50, 51, 51, 26,
+ 39, 25, 26, 15, 39, 18, 42, 17,
+ 4, 31, 32, 32, 60, 1, 42, 32,
+ 0, 12, 19, 35, 21, 41, 17, 26,
+ 20, 45, 46, 32, 37, 22, 47, 29,
+ 31, 27, 29, 30, 21, 33, 35, 18,
+ 25, 33, 50, 51, 42, 2, 15, 51,
+ 53, 33, 25, 29, 55, 37, 38, 33,
+ 38, 59, 38, 33, 39, 13, 32, 40,
+ 61, 61, 32, 9, 44, 3, 31, 29,
+ 25, 31, 27, 23, 9, 25, 9, 29,
+ 20, 30, 30, 42, 18, 28, 25, 28,
+ 28, 21, 29, 43, 29, 43, 26, 44,
+ 44, 21, 38, 21, 24, 45, 45, 35,
+ 39, 22, 35, 36, 34, 34, 45, 34,
+ 29, 31, 46, 25, 46, 16, 17, 31,
+ 20, 32, 47, 47, 47, 32, 49, 49,
+ 49, 31, 1, 27, 28, 39, 39, 21,
+ 36, 23, 51, 2, 40, 51, 32, 53,
+ 24, 30, 24, 30, 21, 40, 57, 57,
+ 31, 41, 58, 32, 12, 4, 32, 34,
+ 59, 31, 32, 13, 9, 35, 26, 35,
+ 37, 61, 37, 63, 26, 29, 41, 38,
+ 23, 20, 41, 26, 41, 42, 42, 42,
+ 26, 26, 26, 26, 1, 26, 37, 37,
+ 37, 23, 34, 42, 27, 43, 34, 27,
+ 31, 24, 33, 16, 3, 31, 24, 33,
+ 24, 4, 44, 44, 11, 44, 31, 13,
+ 13, 44, 45, 13, 25, 22, 38, 26,
+ 38, 38, 39, 32, 30, 39, 30, 22,
+ 32, 26, 30, 47, 47, 47, 19, 47,
+ 30, 31, 35, 8, 23, 47, 47, 27,
+ 35, 47, 31, 48, 35, 19, 36, 49,
+ 49, 33, 31, 39, 27, 39, 49, 49,
+ 50, 50, 50, 39, 31, 51, 51, 39,
+ 28, 33, 33, 21, 40, 31, 52, 53,
+ 40, 53, 9, 33, 31, 53, 54, 54,
+ 54, 55, 55, 34, 15, 56, 25, 56,
+ 21, 21, 40, 40, 25, 40, 58, 36,
+ 5, 41, 41, 12, 60, 41, 41, 37,
+ 22, 61, 18, 29, 29, 30, 61, 30,
+ 61, 62, 62, 30, 30, 63, 18, 13,
+ 30, 23, 19, 20, 20, 41, 13, 2,
+ 5, 5, 1, 5, 32, 6, 32, 35,
+ 20, 35, 27, 35, 35, 36, 36, 13,
+ 36, 41, 41, 41, 3, 30, 42, 27,
+ 20, 30, 27, 28, 30, 21, 33, 33,
+ 14, 24, 30, 42, 24, 33, 25, 42,
+ 43, 14, 43, 43, 14, 43, 7, 36,
+ 37, 37, 37, 37, 7, 14, 25, 43,
+ 43, 44, 15, 37, 7, 7, 3, 1,
+ 8, 15, 15, 8, 44, 44, 44, 45,
+ 45, 45, 45, 8, 8, 45, 21, 45,
+ 28, 28, 28, 21, 28, 28, 22, 37,
+ 46, 46, 37, 8, 29, 37, 29, 22,
+ 46, 37, 22, 29, 47, 47, 38, 38,
+ 16, 38, 38, 33, 38, 22, 47, 47,
+ 29, 25, 16, 0, 48, 1, 34, 48,
+ 48, 34, 25, 26, 26, 49, 49, 26,
+ 1, 49, 4, 26, 4, 49, 1, 9,
+ 49, 49, 49, 10, 49, 17, 38, 17,
+ 17, 50, 38, 50, 50, 22, 38, 51,
+ 38, 38, 51, 39, 39, 18, 22, 39,
+ 51, 22, 52, 52, 52, 39, 53, 53,
+ 10, 23, 18, 29, 10, 53, 29, 54,
+ 11, 54, 11, 11, 55, 1, 18, 55,
+ 55, 55, 55, 55, 55, 29, 34, 18,
+ 29, 56, 56, 34, 57, 34, 34, 29,
+ 29, 57, 57, 35, 35, 35, 35, 35,
+ 39, 35, 59, 59, 18, 59, 39, 30,
+ 18, 40, 60, 60, 61, 30, 18, 61,
+ 61, 19, 19,
+};
+
+static const uint8_t table0_mvy[1099] = {
+ 32, 31, 32, 33, 32, 31, 31, 33,
+ 33, 34, 32, 30, 32, 35, 34, 31,
+ 32, 29, 33, 30, 32, 34, 33, 31,
+ 30, 35, 31, 31, 29, 33, 35, 30,
+ 29, 33, 34, 34, 30, 32, 32, 36,
+ 29, 32, 35, 32, 28, 32, 32, 27,
+ 35, 37, 34, 29, 30, 36, 35, 34,
+ 25, 30, 29, 35, 33, 31, 31, 32,
+ 31, 28, 39, 28, 29, 37, 31, 33,
+ 27, 36, 28, 36, 37, 33, 33, 31,
+ 27, 32, 31, 38, 26, 25, 25, 33,
+ 39, 31, 34, 30, 32, 32, 32, 34,
+ 36, 32, 28, 33, 30, 38, 37, 27,
+ 33, 28, 32, 37, 35, 38, 29, 34,
+ 27, 29, 29, 32, 32, 34, 35, 3,
+ 26, 36, 31, 38, 30, 26, 35, 34,
+ 37, 26, 25, 32, 32, 39, 23, 37,
+ 32, 32, 29, 32, 29, 36, 29, 30,
+ 41, 31, 30, 21, 39, 25, 34, 38,
+ 32, 35, 39, 32, 33, 33, 32, 27,
+ 29, 25, 28, 27, 26, 31, 30, 35,
+ 24, 24, 31, 34, 32, 30, 35, 40,
+ 28, 38, 5, 35, 29, 36, 36, 32,
+ 38, 30, 33, 31, 35, 26, 23, 38,
+ 32, 41, 28, 25, 37, 40, 37, 39,
+ 32, 36, 33, 39, 25, 26, 28, 31,
+ 28, 42, 23, 31, 33, 31, 39, 1,
+ 59, 22, 27, 4, 33, 34, 33, 24,
+ 41, 3, 35, 41, 41, 28, 36, 36,
+ 28, 33, 35, 21, 23, 21, 22, 37,
+ 27, 27, 43, 29, 60, 39, 27, 25,
+ 59, 34, 27, 27, 26, 40, 37, 27,
+ 61, 26, 39, 33, 31, 22, 37, 25,
+ 30, 25, 24, 61, 31, 34, 25, 38,
+ 32, 32, 30, 3, 61, 43, 29, 23,
+ 28, 32, 28, 32, 31, 34, 5, 33,
+ 32, 33, 33, 42, 37, 23, 38, 31,
+ 40, 26, 32, 26, 37, 38, 36, 24,
+ 29, 30, 20, 22, 29, 24, 32, 41,
+ 2, 34, 25, 33, 29, 31, 39, 35,
+ 36, 24, 32, 30, 33, 27, 44, 60,
+ 30, 36, 19, 34, 31, 24, 16, 35,
+ 32, 38, 21, 33, 31, 31, 21, 35,
+ 5, 17, 29, 38, 38, 18, 58, 19,
+ 43, 41, 30, 41, 43, 39, 29, 7,
+ 29, 17, 28, 19, 28, 31, 25, 19,
+ 40, 26, 21, 33, 39, 23, 40, 30,
+ 39, 34, 35, 32, 32, 24, 33, 30,
+ 40, 47, 39, 37, 32, 33, 24, 23,
+ 45, 47, 27, 23, 42, 32, 32, 33,
+ 36, 37, 37, 17, 18, 22, 40, 38,
+ 32, 31, 35, 24, 17, 25, 17, 23,
+ 33, 34, 51, 42, 31, 36, 36, 29,
+ 21, 22, 37, 44, 43, 25, 47, 33,
+ 45, 27, 31, 58, 31, 32, 31, 38,
+ 43, 20, 47, 45, 54, 1, 26, 34,
+ 38, 14, 22, 24, 33, 34, 32, 32,
+ 37, 21, 23, 49, 35, 23, 28, 39,
+ 39, 23, 55, 33, 30, 30, 63, 16,
+ 42, 28, 13, 33, 33, 35, 19, 46,
+ 43, 17, 19, 36, 39, 24, 31, 32,
+ 33, 26, 28, 62, 33, 63, 33, 39,
+ 19, 49, 17, 31, 43, 13, 15, 29,
+ 25, 35, 33, 23, 49, 41, 28, 29,
+ 34, 38, 7, 61, 11, 50, 13, 41,
+ 19, 47, 25, 26, 15, 42, 41, 29,
+ 45, 27, 17, 35, 32, 29, 32, 24,
+ 13, 26, 26, 31, 24, 33, 28, 30,
+ 31, 11, 45, 46, 33, 33, 35, 57,
+ 32, 32, 35, 45, 34, 11, 37, 42,
+ 39, 37, 31, 49, 21, 27, 29, 47,
+ 53, 40, 51, 16, 26, 1, 40, 30,
+ 41, 44, 34, 25, 27, 31, 35, 35,
+ 31, 15, 49, 1, 35, 40, 5, 58,
+ 21, 29, 22, 59, 45, 31, 9, 26,
+ 9, 29, 11, 32, 30, 3, 13, 20,
+ 18, 20, 11, 3, 29, 40, 31, 53,
+ 30, 17, 20, 37, 31, 42, 47, 47,
+ 54, 38, 9, 34, 13, 37, 21, 25,
+ 27, 43, 42, 45, 40, 25, 27, 46,
+ 22, 25, 53, 20, 2, 14, 39, 15,
+ 22, 44, 34, 21, 38, 33, 27, 48,
+ 34, 52, 35, 47, 49, 54, 2, 13,
+ 23, 52, 29, 45, 22, 49, 54, 21,
+ 40, 42, 31, 30, 29, 34, 0, 25,
+ 23, 51, 24, 59, 28, 38, 29, 31,
+ 2, 13, 31, 8, 31, 33, 12, 45,
+ 41, 7, 14, 30, 25, 18, 43, 20,
+ 43, 35, 44, 1, 49, 42, 42, 18,
+ 41, 38, 41, 44, 53, 11, 20, 25,
+ 45, 46, 47, 48, 39, 52, 46, 49,
+ 63, 55, 44, 38, 13, 13, 57, 22,
+ 51, 16, 12, 28, 35, 57, 25, 20,
+ 26, 28, 28, 29, 32, 31, 62, 34,
+ 35, 35, 19, 49, 48, 39, 40, 18,
+ 43, 46, 11, 6, 48, 19, 49, 41,
+ 10, 23, 58, 17, 21, 23, 34, 30,
+ 60, 0, 44, 34, 26, 37, 46, 43,
+ 49, 59, 4, 34, 59, 37, 22, 25,
+ 28, 46, 6, 40, 59, 42, 36, 61,
+ 28, 30, 31, 43, 10, 22, 23, 47,
+ 20, 52, 55, 36, 25, 16, 1, 11,
+ 27, 29, 5, 63, 18, 41, 31, 34,
+ 38, 1, 5, 13, 28, 31, 17, 38,
+ 39, 41, 36, 37, 22, 39, 33, 43,
+ 43, 15, 17, 49, 30, 21, 22, 20,
+ 10, 17, 25, 54, 57, 3, 34, 8,
+ 36, 25, 31, 14, 15, 19, 29, 25,
+ 18, 39, 53, 22, 27, 20, 29, 33,
+ 41, 42, 35, 62, 50, 29, 53, 50,
+ 35, 55, 42, 61, 63, 4, 7, 42,
+ 21, 46, 47, 49, 27, 46, 17, 55,
+ 41, 50, 63, 4, 56, 18, 8, 10,
+ 18, 51, 63, 36, 55, 18, 5, 55,
+ 9, 29, 17, 21, 30, 27, 1, 59,
+ 7, 11, 12, 15, 5, 42, 24, 41,
+ 43, 7, 27, 22, 25, 31, 30, 37,
+ 22, 39, 53, 29, 36, 37, 48, 0,
+ 5, 13, 17, 31, 32, 26, 46, 28,
+ 44, 45, 46, 53, 49, 51, 3, 41,
+ 3, 22, 42, 33, 5, 45, 7, 22,
+ 40, 53, 24, 14, 25, 27, 10, 12,
+ 34, 16, 17, 53, 20, 26, 39, 45,
+ 18, 45, 35, 33, 31, 49, 4, 39,
+ 42, 11, 51, 5, 13, 26, 27, 17,
+ 52, 30, 0, 22, 12, 34, 62, 36,
+ 38, 41, 47, 30, 63, 38, 41, 43,
+ 59, 33, 45, 37, 38, 40, 47, 24,
+ 48, 49, 30, 1, 10, 22, 49, 15,
+ 39, 59, 31, 32, 33, 18, 13, 15,
+ 31, 21, 27, 44, 42, 39, 46, 17,
+ 26, 32, 30, 31, 0, 30, 34, 9,
+ 12, 13, 25, 31, 32, 55, 43, 35,
+ 61, 33, 35, 46, 25, 47, 48, 62,
+ 63, 38, 61, 1, 2, 5, 7, 9,
+ 46, 10, 34, 35, 36, 55, 51, 7,
+ 40, 23, 34, 37, 5, 13, 42, 18,
+ 25, 27, 28,
+};
+
+/* motion vector table 1 */
+static const uint16_t table1_mv_code[1100] = {
+ 0x0000, 0x0007, 0x0009, 0x000f, 0x000a, 0x0011, 0x001a, 0x001c,
+ 0x0011, 0x0031, 0x0025, 0x002d, 0x002f, 0x006f, 0x0075, 0x0041,
+ 0x004c, 0x004e, 0x005c, 0x0060, 0x0062, 0x0066, 0x0068, 0x0069,
+ 0x006b, 0x00a6, 0x00c1, 0x00cb, 0x00cc, 0x00ce, 0x00da, 0x00e8,
+ 0x00ee, 0x0087, 0x0090, 0x009e, 0x009f, 0x00ba, 0x00ca, 0x00d8,
+ 0x00db, 0x00df, 0x0104, 0x0109, 0x010c, 0x0143, 0x0145, 0x014a,
+ 0x0156, 0x015c, 0x01b3, 0x01d3, 0x01da, 0x0103, 0x0109, 0x010b,
+ 0x0122, 0x0127, 0x0134, 0x0161, 0x0164, 0x0176, 0x0184, 0x018d,
+ 0x018e, 0x018f, 0x0190, 0x0193, 0x0196, 0x019d, 0x019e, 0x019f,
+ 0x01a9, 0x01b2, 0x01b4, 0x01ba, 0x01bb, 0x01bc, 0x0201, 0x0202,
+ 0x0205, 0x0207, 0x020d, 0x0210, 0x0211, 0x0215, 0x021b, 0x021f,
+ 0x0281, 0x0285, 0x0290, 0x029c, 0x029d, 0x02a2, 0x02a7, 0x02a8,
+ 0x02aa, 0x02b0, 0x02b1, 0x02b4, 0x02bc, 0x02bf, 0x0320, 0x0326,
+ 0x0327, 0x0329, 0x032a, 0x0336, 0x0360, 0x0362, 0x0363, 0x0372,
+ 0x03b2, 0x03bc, 0x03bd, 0x0203, 0x0205, 0x021a, 0x0249, 0x024a,
+ 0x024c, 0x02c7, 0x02ca, 0x02ce, 0x02ef, 0x030d, 0x0322, 0x0325,
+ 0x0338, 0x0373, 0x037a, 0x0409, 0x0415, 0x0416, 0x0418, 0x0428,
+ 0x042d, 0x042f, 0x0434, 0x0508, 0x0509, 0x0510, 0x0511, 0x051c,
+ 0x051e, 0x0524, 0x0541, 0x0543, 0x0546, 0x0547, 0x054d, 0x0557,
+ 0x055f, 0x056a, 0x056c, 0x056d, 0x056f, 0x0576, 0x0577, 0x057a,
+ 0x057b, 0x057c, 0x057d, 0x0600, 0x0601, 0x0603, 0x0614, 0x0616,
+ 0x0617, 0x061c, 0x061f, 0x0642, 0x0648, 0x0649, 0x064a, 0x064b,
+ 0x0657, 0x0668, 0x0669, 0x066b, 0x066e, 0x067f, 0x06c2, 0x06c8,
+ 0x06cb, 0x06de, 0x06df, 0x06e2, 0x06e3, 0x06ef, 0x0748, 0x074b,
+ 0x076e, 0x076f, 0x077c, 0x0409, 0x0423, 0x0428, 0x0429, 0x042a,
+ 0x042b, 0x0432, 0x0433, 0x0496, 0x049a, 0x04d5, 0x04db, 0x0581,
+ 0x0582, 0x058b, 0x058c, 0x058d, 0x0598, 0x0599, 0x059a, 0x059e,
+ 0x05dd, 0x0619, 0x0632, 0x0633, 0x0648, 0x0672, 0x06a1, 0x06a2,
+ 0x06a3, 0x06af, 0x06e2, 0x06e3, 0x06e4, 0x0800, 0x0801, 0x0802,
+ 0x0803, 0x081a, 0x081b, 0x0829, 0x082f, 0x0832, 0x083e, 0x083f,
+ 0x0852, 0x0853, 0x0858, 0x086b, 0x0877, 0x0878, 0x0879, 0x087a,
+ 0x087b, 0x0a00, 0x0a01, 0x0a0d, 0x0a0e, 0x0a0f, 0x0a24, 0x0a37,
+ 0x0a3a, 0x0a3b, 0x0a3e, 0x0a46, 0x0a47, 0x0a4a, 0x0a4b, 0x0a5f,
+ 0x0a79, 0x0a7a, 0x0a7b, 0x0a80, 0x0a81, 0x0a84, 0x0a85, 0x0a99,
+ 0x0aa5, 0x0aa6, 0x0ab8, 0x0aba, 0x0abb, 0x0abc, 0x0abd, 0x0ac8,
+ 0x0ace, 0x0acf, 0x0ad7, 0x0adc, 0x0aeb, 0x0c04, 0x0c25, 0x0c26,
+ 0x0c27, 0x0c2a, 0x0c2b, 0x0c3a, 0x0c3b, 0x0c3c, 0x0c3d, 0x0ca0,
+ 0x0cad, 0x0cd4, 0x0cd5, 0x0cfc, 0x0cfd, 0x0d86, 0x0d92, 0x0d93,
+ 0x0d94, 0x0d95, 0x0db0, 0x0db8, 0x0db9, 0x0dba, 0x0dbb, 0x0dc0,
+ 0x0dc2, 0x0dc3, 0x0dda, 0x0ddb, 0x0ddc, 0x0ddd, 0x0e92, 0x0e93,
+ 0x0e94, 0x0e95, 0x0ec7, 0x0ecc, 0x0ece, 0x0ecf, 0x0ed8, 0x0ed9,
+ 0x0eda, 0x0edb, 0x0808, 0x0809, 0x080a, 0x0810, 0x0811, 0x0844,
+ 0x0845, 0x0861, 0x0862, 0x0863, 0x086c, 0x0922, 0x0923, 0x092e,
+ 0x092f, 0x0936, 0x0937, 0x09b1, 0x09b2, 0x09b3, 0x09b4, 0x09b5,
+ 0x09b8, 0x09b9, 0x09ba, 0x09bb, 0x09bc, 0x09bd, 0x09be, 0x09bf,
+ 0x0b00, 0x0b15, 0x0b2c, 0x0b2d, 0x0b2e, 0x0b2f, 0x0b36, 0x0bb9,
+ 0x0c28, 0x0c2a, 0x0c2b, 0x0c2c, 0x0c2d, 0x0c2e, 0x0c2f, 0x0c30,
+ 0x0c31, 0x0c38, 0x0c60, 0x0c61, 0x0c62, 0x0c63, 0x0c8d, 0x0c8e,
+ 0x0c8f, 0x0c92, 0x0cbe, 0x0cbf, 0x0ce6, 0x0ce7, 0x0d40, 0x0d41,
+ 0x0d57, 0x0d58, 0x0d59, 0x0d5a, 0x0d5b, 0x0d5c, 0x0d5d, 0x0d98,
+ 0x0d99, 0x0d9a, 0x0d9b, 0x0d9c, 0x0d9d, 0x0dad, 0x0dae, 0x0daf,
+ 0x0dc0, 0x0dc1, 0x0dc2, 0x0dc3, 0x0dca, 0x0dcb, 0x0dec, 0x0ded,
+ 0x0dee, 0x0def, 0x1018, 0x1022, 0x1023, 0x1030, 0x1031, 0x1032,
+ 0x1033, 0x1050, 0x1051, 0x105c, 0x1074, 0x1075, 0x1076, 0x1077,
+ 0x1078, 0x1079, 0x107a, 0x107b, 0x10b2, 0x10b3, 0x10b8, 0x10b9,
+ 0x10ba, 0x10bb, 0x10d4, 0x10ea, 0x10eb, 0x10ec, 0x10ed, 0x1404,
+ 0x1405, 0x1406, 0x1407, 0x1410, 0x1411, 0x1412, 0x1413, 0x1414,
+ 0x1415, 0x1416, 0x1417, 0x1418, 0x1419, 0x1466, 0x1467, 0x1468,
+ 0x1469, 0x146a, 0x146b, 0x146c, 0x146d, 0x147e, 0x147f, 0x1488,
+ 0x1489, 0x148a, 0x148b, 0x14b6, 0x14b7, 0x14b8, 0x14b9, 0x14ba,
+ 0x14bb, 0x14bc, 0x14bd, 0x14f0, 0x14f1, 0x14f8, 0x14f9, 0x14fa,
+ 0x14fb, 0x14fc, 0x14fd, 0x14fe, 0x14ff, 0x152a, 0x152b, 0x152c,
+ 0x152d, 0x152e, 0x152f, 0x1530, 0x1531, 0x1548, 0x1549, 0x154e,
+ 0x154f, 0x1558, 0x1559, 0x155a, 0x155b, 0x1572, 0x159a, 0x159b,
+ 0x15ac, 0x15ba, 0x15bb, 0x15d0, 0x15d1, 0x15d2, 0x15d3, 0x15d4,
+ 0x15d5, 0x181d, 0x181e, 0x181f, 0x1840, 0x1841, 0x1842, 0x1843,
+ 0x1844, 0x1845, 0x1846, 0x1847, 0x1848, 0x1849, 0x1861, 0x1862,
+ 0x1863, 0x1864, 0x1865, 0x1866, 0x1867, 0x1868, 0x1869, 0x186a,
+ 0x186b, 0x186c, 0x186d, 0x186e, 0x191b, 0x191c, 0x191d, 0x191e,
+ 0x191f, 0x1942, 0x1943, 0x1944, 0x1945, 0x1946, 0x1947, 0x1958,
+ 0x1959, 0x19ed, 0x19ee, 0x19ef, 0x19f0, 0x19f1, 0x19f2, 0x19f3,
+ 0x19f4, 0x19f5, 0x19f6, 0x19f7, 0x1b0e, 0x1b0f, 0x1b62, 0x1b63,
+ 0x1b64, 0x1b65, 0x1b66, 0x1b67, 0x1b68, 0x1b69, 0x1b6a, 0x1b6b,
+ 0x1b6c, 0x1b6d, 0x1b6e, 0x1b6f, 0x1b82, 0x1ba8, 0x1ba9, 0x1baa,
+ 0x1bab, 0x1bac, 0x1bad, 0x1bae, 0x1baf, 0x1bb0, 0x1bb1, 0x1bb2,
+ 0x1bb3, 0x1d80, 0x1d81, 0x1d82, 0x1d83, 0x1d84, 0x1d85, 0x1d86,
+ 0x1d87, 0x1d88, 0x1d89, 0x1d8a, 0x1d8b, 0x1d8c, 0x1d8d, 0x1007,
+ 0x1008, 0x1009, 0x100a, 0x100b, 0x100c, 0x100d, 0x100e, 0x100f,
+ 0x1016, 0x1080, 0x1081, 0x1082, 0x1083, 0x1084, 0x1085, 0x1086,
+ 0x1087, 0x10c0, 0x123a, 0x123b, 0x123c, 0x123d, 0x123e, 0x123f,
+ 0x1240, 0x1241, 0x1242, 0x1243, 0x1350, 0x1352, 0x1353, 0x1358,
+ 0x1359, 0x135a, 0x135b, 0x135c, 0x135d, 0x135e, 0x135f, 0x1360,
+ 0x1361, 0x1602, 0x1603, 0x160c, 0x160d, 0x160e, 0x160f, 0x1620,
+ 0x1621, 0x1622, 0x1623, 0x1624, 0x1625, 0x1626, 0x1627, 0x1628,
+ 0x1629, 0x166e, 0x166f, 0x167c, 0x167d, 0x167e, 0x167f, 0x1770,
+ 0x1771, 0x1852, 0x1853, 0x1872, 0x1873, 0x1874, 0x1875, 0x1876,
+ 0x1877, 0x1878, 0x1879, 0x187a, 0x187b, 0x187c, 0x187d, 0x187e,
+ 0x187f, 0x1918, 0x1919, 0x1926, 0x1927, 0x1970, 0x1971, 0x1972,
+ 0x1973, 0x1974, 0x1975, 0x1976, 0x1977, 0x1978, 0x1979, 0x197a,
+ 0x197b, 0x1aa0, 0x1aa1, 0x1aa2, 0x1aa3, 0x1aa4, 0x1aa5, 0x1aa6,
+ 0x1aa7, 0x1aa8, 0x1aa9, 0x1aaa, 0x1aab, 0x1aac, 0x1aad, 0x1b3c,
+ 0x1b3d, 0x1b3e, 0x1b3f, 0x1b50, 0x1b51, 0x1b52, 0x1b53, 0x1b54,
+ 0x1b55, 0x1b56, 0x1b57, 0x1b58, 0x1b59, 0x2032, 0x2033, 0x2034,
+ 0x2035, 0x2036, 0x2037, 0x2038, 0x2039, 0x203a, 0x203b, 0x203c,
+ 0x203d, 0x203e, 0x203f, 0x2040, 0x2041, 0x2042, 0x2043, 0x20ba,
+ 0x20bb, 0x20cc, 0x20cd, 0x20ce, 0x20cf, 0x20e0, 0x20e1, 0x20e2,
+ 0x20e3, 0x20e4, 0x20e5, 0x20e6, 0x20e7, 0x21aa, 0x21ab, 0x21c0,
+ 0x21c1, 0x21c2, 0x21c3, 0x21c4, 0x21c5, 0x21c6, 0x21c7, 0x21c8,
+ 0x21c9, 0x21ca, 0x21cb, 0x21cc, 0x21cd, 0x21ce, 0x21cf, 0x21d0,
+ 0x21d1, 0x21d2, 0x21d3, 0x2894, 0x2895, 0x2896, 0x2897, 0x2898,
+ 0x2899, 0x289a, 0x289b, 0x289c, 0x289d, 0x289e, 0x289f, 0x28c0,
+ 0x28c1, 0x28c2, 0x28c3, 0x28c4, 0x28c5, 0x28c6, 0x28c7, 0x28c8,
+ 0x28c9, 0x28ca, 0x28cb, 0x2930, 0x2931, 0x2932, 0x2933, 0x2934,
+ 0x2935, 0x2936, 0x2937, 0x2938, 0x2939, 0x293a, 0x293b, 0x293c,
+ 0x293d, 0x293e, 0x293f, 0x2960, 0x2961, 0x2962, 0x2963, 0x2964,
+ 0x2965, 0x2966, 0x2967, 0x2968, 0x2969, 0x296a, 0x296b, 0x2a40,
+ 0x2a41, 0x2a42, 0x2a43, 0x2a44, 0x2a45, 0x2a46, 0x2a47, 0x2a48,
+ 0x2a49, 0x2a4a, 0x2a4b, 0x2a4c, 0x2a4d, 0x2a4e, 0x2a4f, 0x2a50,
+ 0x2a51, 0x2a52, 0x2a53, 0x2ae6, 0x2ae7, 0x2b24, 0x2b25, 0x2b26,
+ 0x2b27, 0x2b28, 0x2b29, 0x2b2a, 0x2b2b, 0x2b2c, 0x2b2d, 0x2b2e,
+ 0x2b2f, 0x2b30, 0x2b31, 0x2b32, 0x2b33, 0x2b5a, 0x2b5b, 0x3014,
+ 0x3015, 0x3016, 0x3017, 0x3020, 0x3021, 0x3022, 0x3023, 0x3024,
+ 0x3025, 0x3026, 0x3027, 0x3028, 0x3029, 0x302a, 0x302b, 0x302c,
+ 0x302d, 0x302e, 0x302f, 0x3030, 0x3031, 0x3032, 0x3033, 0x3034,
+ 0x3035, 0x3036, 0x3037, 0x3038, 0x3039, 0x30c0, 0x30c1, 0x30de,
+ 0x30df, 0x3218, 0x3219, 0x321a, 0x321b, 0x321c, 0x321d, 0x321e,
+ 0x321f, 0x3220, 0x3221, 0x3222, 0x3223, 0x3224, 0x3225, 0x3226,
+ 0x3227, 0x3228, 0x3229, 0x322a, 0x322b, 0x322c, 0x322d, 0x322e,
+ 0x322f, 0x3230, 0x3231, 0x3232, 0x3233, 0x3234, 0x3235, 0x3378,
+ 0x3379, 0x337a, 0x337b, 0x337c, 0x337d, 0x337e, 0x337f, 0x33c0,
+ 0x33c1, 0x33c2, 0x33c3, 0x33c4, 0x33c5, 0x33c6, 0x33c7, 0x33c8,
+ 0x33c9, 0x33ca, 0x33cb, 0x33cc, 0x33cd, 0x33ce, 0x33cf, 0x33d0,
+ 0x33d1, 0x33d2, 0x33d3, 0x33d4, 0x33d5, 0x33d6, 0x33d7, 0x33d8,
+ 0x33d9, 0x3706, 0x3707, 0x3730, 0x3731, 0x3732, 0x3733, 0x3734,
+ 0x3735, 0x3736, 0x3737, 0x3738, 0x3739, 0x373a, 0x373b, 0x373c,
+ 0x373d, 0x373e, 0x373f, 0x3740, 0x3741, 0x3742, 0x3743, 0x3744,
+ 0x3745, 0x3746, 0x3747, 0x3748, 0x3749, 0x374a, 0x374b, 0x374c,
+ 0x374d, 0x374e, 0x374f, 0x3b34, 0x3b35, 0x3b36, 0x3b37, 0x3be8,
+ 0x3be9, 0x3bea, 0x3beb, 0x3bec, 0x3bed, 0x3bee, 0x3bef, 0x3bf0,
+ 0x3bf1, 0x3bf2, 0x3bf3, 0x3bf4, 0x3bf5, 0x3bf6, 0x3bf7, 0x3bf8,
+ 0x3bf9, 0x3bfa, 0x3bfb, 0x3bfc, 0x3bfd, 0x3bfe, 0x3bff, 0x2000,
+ 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008,
+ 0x2009, 0x200a, 0x200b, 0x200c, 0x200d, 0x202e, 0x202f, 0x2182,
+ 0x2183, 0x21b4, 0x21b5, 0x21b6, 0x21b7, 0x21b8, 0x21b9, 0x21ba,
+ 0x21bb, 0x21bc, 0x21bd, 0x21be, 0x21bf, 0x2460, 0x2461, 0x2462,
+ 0x2463, 0x2464, 0x2465, 0x2466, 0x2467, 0x2468, 0x2469, 0x246a,
+ 0x246b, 0x246c, 0x246d, 0x246e, 0x246f, 0x2470, 0x2471, 0x2472,
+ 0x2473, 0x26a2, 0x26a3, 0x000b,
+};
+
+static const uint8_t table1_mv_bits[1100] = {
+ 2, 4, 4, 4, 5, 5, 5, 5,
+ 6, 6, 7, 7, 7, 7, 7, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 4,
+};
+
+static const uint8_t table1_mvx[1099] = {
+ 32, 31, 32, 31, 33, 32, 33, 33,
+ 31, 34, 30, 32, 32, 34, 35, 32,
+ 34, 33, 29, 30, 30, 32, 31, 31,
+ 33, 35, 35, 33, 31, 29, 29, 33,
+ 34, 30, 31, 28, 36, 30, 34, 32,
+ 32, 37, 32, 32, 25, 27, 39, 32,
+ 32, 32, 38, 35, 36, 32, 37, 61,
+ 26, 32, 34, 35, 3, 35, 27, 28,
+ 29, 34, 28, 37, 31, 36, 32, 27,
+ 31, 30, 29, 39, 33, 29, 33, 35,
+ 25, 25, 29, 33, 31, 31, 31, 33,
+ 32, 30, 32, 32, 41, 39, 33, 36,
+ 32, 28, 34, 36, 38, 24, 60, 31,
+ 23, 28, 32, 33, 59, 32, 40, 30,
+ 5, 34, 32, 38, 32, 30, 43, 4,
+ 32, 32, 42, 31, 31, 32, 26, 38,
+ 26, 22, 21, 37, 61, 63, 37, 31,
+ 32, 33, 2, 1, 23, 33, 41, 27,
+ 35, 30, 38, 23, 33, 3, 28, 34,
+ 34, 27, 41, 29, 39, 35, 36, 29,
+ 32, 27, 30, 32, 24, 61, 37, 26,
+ 59, 25, 35, 27, 36, 37, 30, 31,
+ 34, 40, 3, 28, 34, 39, 32, 31,
+ 32, 30, 24, 28, 35, 36, 26, 32,
+ 31, 33, 29, 33, 39, 25, 30, 24,
+ 35, 59, 29, 34, 25, 30, 21, 35,
+ 43, 40, 32, 29, 5, 28, 31, 62,
+ 33, 33, 25, 31, 21, 31, 43, 31,
+ 34, 33, 20, 40, 39, 31, 31, 57,
+ 38, 32, 42, 33, 32, 31, 32, 29,
+ 30, 44, 5, 31, 22, 34, 36, 17,
+ 38, 58, 38, 35, 32, 60, 35, 24,
+ 32, 38, 16, 45, 42, 32, 31, 29,
+ 4, 30, 17, 40, 46, 48, 63, 32,
+ 42, 19, 41, 22, 28, 36, 45, 33,
+ 33, 32, 29, 7, 41, 42, 18, 33,
+ 33, 32, 22, 37, 1, 26, 22, 23,
+ 49, 28, 26, 27, 32, 33, 27, 23,
+ 28, 36, 15, 6, 34, 27, 31, 26,
+ 23, 2, 33, 32, 34, 41, 28, 32,
+ 41, 0, 36, 38, 34, 31, 47, 32,
+ 17, 31, 39, 33, 37, 51, 30, 47,
+ 32, 50, 32, 19, 63, 30, 25, 27,
+ 33, 62, 24, 31, 27, 30, 37, 31,
+ 45, 32, 39, 20, 46, 47, 35, 19,
+ 34, 1, 49, 21, 21, 14, 51, 26,
+ 23, 31, 36, 35, 58, 29, 29, 21,
+ 20, 42, 13, 28, 12, 40, 31, 33,
+ 39, 60, 32, 44, 33, 31, 28, 37,
+ 29, 32, 30, 49, 43, 28, 39, 25,
+ 32, 48, 2, 15, 20, 25, 31, 28,
+ 21, 24, 25, 15, 31, 17, 37, 43,
+ 18, 32, 33, 24, 33, 36, 13, 33,
+ 31, 39, 11, 31, 33, 32, 39, 37,
+ 32, 32, 29, 17, 44, 46, 36, 35,
+ 26, 37, 58, 32, 34, 38, 8, 38,
+ 38, 22, 29, 25, 16, 35, 32, 35,
+ 33, 43, 18, 46, 38, 50, 33, 18,
+ 53, 60, 13, 32, 36, 33, 51, 36,
+ 43, 45, 27, 42, 29, 24, 30, 25,
+ 31, 52, 31, 35, 38, 9, 22, 34,
+ 4, 17, 28, 55, 42, 25, 17, 20,
+ 47, 34, 33, 16, 40, 25, 16, 30,
+ 53, 29, 10, 11, 14, 26, 33, 4,
+ 35, 44, 26, 16, 31, 26, 34, 38,
+ 29, 31, 30, 24, 22, 61, 32, 9,
+ 45, 34, 31, 19, 9, 31, 46, 31,
+ 35, 54, 29, 57, 30, 50, 3, 31,
+ 63, 34, 47, 41, 51, 18, 31, 14,
+ 37, 38, 31, 24, 32, 31, 50, 33,
+ 31, 54, 27, 9, 33, 23, 19, 32,
+ 29, 29, 33, 28, 47, 49, 30, 47,
+ 33, 27, 25, 54, 44, 45, 50, 58,
+ 51, 48, 33, 59, 33, 34, 57, 13,
+ 26, 33, 13, 48, 30, 11, 7, 56,
+ 34, 55, 26, 0, 26, 35, 1, 51,
+ 33, 53, 31, 45, 12, 29, 29, 51,
+ 31, 48, 2, 6, 34, 30, 28, 33,
+ 60, 40, 27, 46, 31, 9, 35, 29,
+ 31, 39, 55, 46, 19, 37, 62, 34,
+ 30, 16, 19, 49, 41, 41, 39, 37,
+ 14, 5, 13, 35, 55, 30, 40, 40,
+ 42, 8, 20, 25, 45, 35, 33, 36,
+ 54, 38, 27, 37, 62, 40, 15, 59,
+ 49, 31, 29, 34, 34, 39, 24, 29,
+ 25, 29, 21, 29, 10, 61, 33, 49,
+ 35, 34, 3, 38, 39, 29, 7, 41,
+ 1, 35, 4, 23, 15, 23, 11, 37,
+ 28, 35, 30, 30, 24, 1, 43, 56,
+ 8, 34, 42, 24, 45, 30, 20, 23,
+ 8, 38, 22, 33, 17, 52, 34, 22,
+ 53, 43, 44, 1, 27, 31, 41, 43,
+ 41, 30, 31, 36, 30, 5, 55, 31,
+ 33, 30, 40, 23, 15, 29, 34, 34,
+ 59, 34, 30, 11, 13, 38, 5, 0,
+ 30, 42, 5, 30, 29, 34, 10, 44,
+ 30, 63, 35, 12, 3, 26, 15, 17,
+ 25, 34, 43, 39, 34, 56, 29, 23,
+ 30, 12, 30, 10, 35, 9, 24, 58,
+ 10, 12, 54, 33, 37, 20, 41, 35,
+ 29, 18, 61, 30, 40, 24, 39, 53,
+ 62, 26, 29, 33, 34, 53, 49, 21,
+ 27, 11, 63, 20, 26, 23, 7, 13,
+ 6, 47, 29, 30, 9, 51, 22, 34,
+ 21, 25, 33, 56, 57, 30, 38, 51,
+ 51, 38, 63, 28, 40, 35, 33, 18,
+ 33, 33, 24, 58, 58, 34, 49, 29,
+ 43, 4, 1, 4, 42, 35, 35, 30,
+ 17, 5, 56, 61, 25, 37, 36, 55,
+ 28, 35, 29, 50, 48, 52, 2, 42,
+ 34, 40, 46, 46, 43, 35, 29, 48,
+ 20, 29, 31, 41, 7, 30, 35, 19,
+ 14, 21, 8, 39, 39, 40, 46, 55,
+ 34, 6, 30, 34, 37, 25, 37, 33,
+ 22, 44, 52, 17, 35, 29, 36, 35,
+ 40, 37, 28, 30, 50, 14, 28, 55,
+ 6, 23, 19, 14, 30, 3, 30, 28,
+ 28, 61, 61, 47, 45, 48, 40, 40,
+ 34, 34, 25, 30, 29, 35, 4, 26,
+ 53, 50, 26, 41, 27, 59, 27, 38,
+ 39, 3, 50, 43, 47, 23, 33, 55,
+ 35, 21, 23, 35, 61, 33, 46, 52,
+ 35, 34, 24, 30, 43, 16, 37, 21,
+ 2, 24, 45, 34, 30, 55, 55, 1,
+ 29, 29, 26, 28, 25, 31, 36, 22,
+ 17, 30, 52, 2, 44, 44, 57, 26,
+ 62, 41, 39, 57, 26, 46, 49, 11,
+ 16, 19, 5, 59, 38, 39, 58, 38,
+ 25, 49, 50, 22, 28, 59, 9, 59,
+ 7, 28, 55, 17, 4, 35, 50, 21,
+ 29, 44, 47, 18, 24, 19, 25, 42,
+ 35, 3, 51, 35, 16, 35, 30, 63,
+ 57, 39, 39, 25, 35, 38, 9, 16,
+ 36, 45, 31, 60, 14, 34, 42, 24,
+ 0, 37, 18, 61, 57, 37, 28, 53,
+ 20, 46, 14, 47, 38, 38, 38, 9,
+ 34, 39, 43, 17, 39, 59, 5, 27,
+ 0, 12, 27,
+};
+
+static const uint8_t table1_mvy[1099] = {
+ 32, 32, 31, 31, 32, 33, 31, 33,
+ 33, 32, 32, 30, 34, 31, 32, 29,
+ 33, 30, 32, 33, 31, 35, 34, 30,
+ 34, 31, 33, 29, 29, 31, 33, 35,
+ 30, 30, 35, 32, 32, 34, 34, 28,
+ 25, 32, 36, 27, 32, 32, 32, 37,
+ 39, 3, 32, 30, 31, 26, 31, 32,
+ 32, 38, 29, 29, 32, 34, 31, 31,
+ 34, 35, 33, 33, 28, 33, 1, 33,
+ 27, 29, 30, 31, 28, 29, 37, 35,
+ 31, 33, 35, 27, 36, 37, 25, 25,
+ 61, 35, 4, 5, 32, 33, 36, 30,
+ 23, 30, 28, 34, 31, 32, 32, 39,
+ 32, 34, 21, 39, 32, 59, 32, 28,
+ 32, 36, 60, 33, 24, 36, 32, 32,
+ 41, 2, 32, 38, 26, 22, 33, 30,
+ 31, 32, 32, 30, 31, 32, 29, 3,
+ 40, 38, 32, 32, 33, 26, 31, 34,
+ 28, 38, 34, 31, 3, 31, 35, 38,
+ 27, 35, 33, 28, 29, 27, 29, 27,
+ 43, 29, 37, 63, 31, 33, 34, 30,
+ 31, 30, 37, 30, 35, 35, 26, 41,
+ 37, 31, 33, 28, 26, 30, 42, 24,
+ 7, 27, 33, 29, 36, 28, 34, 57,
+ 23, 41, 36, 23, 35, 34, 25, 30,
+ 25, 33, 25, 25, 29, 24, 33, 39,
+ 33, 33, 0, 37, 31, 36, 21, 32,
+ 61, 24, 35, 61, 31, 5, 31, 59,
+ 39, 21, 32, 30, 34, 22, 40, 32,
+ 29, 16, 31, 5, 62, 2, 20, 39,
+ 39, 32, 33, 1, 31, 24, 36, 32,
+ 36, 32, 28, 26, 6, 31, 38, 34,
+ 58, 35, 32, 33, 33, 17, 43, 26,
+ 31, 40, 31, 34, 32, 32, 31, 19,
+ 30, 32, 29, 33, 38, 38, 32, 59,
+ 40, 18, 38, 32, 35, 34, 32, 17,
+ 1, 15, 30, 28, 31, 28, 34, 29,
+ 32, 27, 35, 27, 49, 22, 37, 34,
+ 37, 26, 32, 32, 22, 28, 45, 29,
+ 30, 31, 43, 46, 41, 30, 26, 13,
+ 34, 32, 27, 38, 42, 42, 33, 47,
+ 33, 60, 27, 42, 25, 32, 22, 32,
+ 48, 32, 45, 33, 33, 41, 27, 25,
+ 19, 31, 35, 19, 36, 42, 27, 17,
+ 31, 44, 28, 33, 33, 31, 23, 31,
+ 40, 33, 31, 34, 30, 32, 33, 36,
+ 35, 47, 37, 41, 31, 23, 41, 29,
+ 30, 35, 32, 25, 32, 28, 58, 2,
+ 37, 33, 14, 33, 49, 20, 39, 36,
+ 21, 9, 23, 33, 35, 24, 39, 37,
+ 11, 33, 30, 31, 31, 28, 51, 40,
+ 35, 29, 25, 33, 46, 35, 37, 30,
+ 30, 8, 63, 28, 15, 40, 33, 45,
+ 49, 25, 32, 4, 47, 51, 36, 39,
+ 53, 10, 24, 29, 30, 31, 25, 40,
+ 38, 38, 33, 56, 23, 27, 32, 37,
+ 26, 29, 43, 36, 33, 24, 55, 43,
+ 9, 29, 34, 34, 24, 33, 18, 33,
+ 33, 30, 31, 50, 24, 60, 30, 39,
+ 34, 30, 39, 28, 22, 38, 2, 26,
+ 63, 32, 57, 21, 39, 33, 28, 18,
+ 30, 34, 22, 33, 29, 41, 30, 34,
+ 35, 21, 13, 34, 35, 39, 30, 46,
+ 32, 42, 32, 31, 33, 26, 11, 33,
+ 22, 31, 25, 31, 53, 27, 43, 25,
+ 40, 50, 21, 36, 38, 30, 12, 31,
+ 34, 20, 15, 29, 32, 62, 30, 13,
+ 17, 32, 19, 31, 20, 31, 30, 7,
+ 1, 17, 34, 37, 31, 31, 44, 34,
+ 26, 40, 16, 37, 52, 48, 30, 20,
+ 18, 33, 38, 29, 7, 25, 30, 54,
+ 45, 47, 46, 41, 29, 29, 16, 30,
+ 14, 26, 38, 34, 34, 29, 34, 30,
+ 29, 30, 57, 30, 4, 46, 33, 29,
+ 39, 44, 30, 31, 50, 33, 31, 32,
+ 19, 32, 40, 31, 37, 47, 1, 35,
+ 16, 31, 0, 35, 33, 1, 17, 34,
+ 9, 34, 33, 31, 49, 43, 42, 51,
+ 34, 29, 23, 29, 14, 30, 45, 49,
+ 11, 24, 31, 28, 35, 41, 30, 44,
+ 18, 29, 34, 35, 36, 25, 26, 21,
+ 31, 30, 34, 19, 34, 44, 36, 38,
+ 25, 31, 28, 23, 37, 3, 55, 41,
+ 30, 22, 41, 24, 33, 26, 35, 35,
+ 30, 55, 51, 47, 48, 38, 24, 15,
+ 21, 50, 25, 46, 30, 29, 10, 34,
+ 42, 45, 29, 42, 22, 3, 33, 27,
+ 34, 1, 34, 28, 34, 36, 35, 23,
+ 23, 13, 58, 3, 26, 63, 25, 31,
+ 34, 61, 38, 39, 25, 61, 29, 37,
+ 30, 41, 26, 48, 28, 33, 50, 35,
+ 30, 37, 29, 29, 40, 6, 39, 28,
+ 28, 19, 8, 22, 45, 34, 35, 10,
+ 58, 17, 37, 39, 30, 18, 54, 14,
+ 29, 16, 59, 30, 35, 23, 35, 30,
+ 47, 36, 29, 55, 20, 12, 31, 35,
+ 14, 29, 18, 34, 34, 24, 29, 26,
+ 22, 2, 27, 23, 8, 30, 55, 38,
+ 60, 31, 4, 34, 49, 34, 27, 34,
+ 33, 30, 31, 54, 42, 35, 38, 46,
+ 44, 26, 27, 9, 39, 25, 21, 29,
+ 28, 42, 13, 0, 5, 34, 37, 28,
+ 24, 29, 63, 26, 22, 27, 29, 25,
+ 33, 25, 61, 0, 35, 25, 36, 15,
+ 27, 40, 53, 33, 3, 10, 16, 37,
+ 38, 18, 30, 46, 27, 9, 6, 29,
+ 62, 8, 42, 28, 29, 3, 25, 16,
+ 26, 29, 35, 28, 27, 51, 61, 48,
+ 37, 9, 34, 7, 49, 45, 20, 29,
+ 21, 5, 5, 29, 28, 34, 29, 24,
+ 10, 24, 35, 36, 38, 55, 11, 36,
+ 38, 53, 54, 26, 30, 49, 20, 27,
+ 30, 39, 33, 41, 49, 22, 38, 38,
+ 4, 30, 8, 9, 3, 24, 22, 50,
+ 37, 36, 31, 27, 2, 9, 42, 63,
+ 25, 19, 44, 1, 28, 28, 48, 30,
+ 34, 41, 41, 38, 12, 27, 15, 0,
+ 16, 34, 35, 38, 28, 29, 40, 42,
+ 51, 52, 45, 54, 59, 59, 42, 44,
+ 37, 26, 46, 24, 15, 39, 22, 46,
+ 19, 35, 38, 17, 37, 23, 52, 55,
+ 50, 37, 26, 11, 37, 12, 24, 30,
+ 16, 13, 22, 13, 36, 35, 40, 41,
+ 34, 41, 26, 53, 51, 5, 21, 30,
+ 2, 63, 41, 20, 1, 56, 21, 24,
+ 25, 5, 28, 35, 26, 28, 30, 18,
+ 29, 23, 40, 34, 20, 42, 39, 34,
+ 28, 61, 38, 27, 62, 9, 36, 17,
+ 9, 49, 24, 25, 54, 34, 39, 37,
+ 3, 1, 25, 38, 38, 44, 35, 36,
+ 12, 60, 36, 38, 40, 25, 43, 39,
+ 53, 28, 39, 57, 46, 10, 52, 27,
+ 35, 42, 45, 59, 15, 60, 38, 24,
+ 23, 39, 12, 29, 24, 0, 20, 16,
+ 28, 43, 35, 28, 1, 49, 4, 21,
+ 42, 39, 29, 3, 44, 21, 53, 55,
+ 11, 5, 3, 39, 53, 28, 25, 19,
+ 34, 28, 21,
+};
+
+MVTable mv_tables[2] = {
+ {
+ 1099,
+ table0_mv_code,
+ table0_mv_bits,
+ table0_mvx,
+ table0_mvy,
+ },
+ {
+ 1099,
+ table1_mv_code,
+ table1_mv_bits,
+ table1_mvx,
+ table1_mvy,
+ }
+};
+
+const uint8_t v2_mb_type[8][2] = {
+ {1, 1}, {0 , 2}, {3 , 3}, {9 , 5},
+ {5, 4}, {0x21, 7}, {0x20, 7}, {0x11, 6},
+};
+
+const uint8_t v2_intra_cbpc[4][2] = {
+ {1, 1}, {0, 3}, {1, 3}, {1, 2},
+};
+
+const uint8_t wmv1_y_dc_scale_table[32]={
+// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+ 0, 8, 8, 8, 8, 8, 9, 9,10,10,11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18,19,19,20,20,21,21
+};
+const uint8_t wmv1_c_dc_scale_table[32]={
+// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+ 0, 8, 8, 8, 8, 9, 9,10,10,11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18,19,19,20,20,21,21,22
+};
+
+const uint8_t old_ff_y_dc_scale_table[32]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 0, 8, 8, 8, 8,10,12,14,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39
+};
+
+const uint8_t wmv1_scantable[WMV1_SCANTABLE_COUNT][64]={
+ {
+ 0x00, 0x08, 0x01, 0x02, 0x09, 0x10, 0x18, 0x11,
+ 0x0A, 0x03, 0x04, 0x0B, 0x12, 0x19, 0x20, 0x28,
+ 0x30, 0x38, 0x29, 0x21, 0x1A, 0x13, 0x0C, 0x05,
+ 0x06, 0x0D, 0x14, 0x1B, 0x22, 0x31, 0x39, 0x3A,
+ 0x32, 0x2A, 0x23, 0x1C, 0x15, 0x0E, 0x07, 0x0F,
+ 0x16, 0x1D, 0x24, 0x2B, 0x33, 0x3B, 0x3C, 0x34,
+ 0x2C, 0x25, 0x1E, 0x17, 0x1F, 0x26, 0x2D, 0x35,
+ 0x3D, 0x3E, 0x36, 0x2E, 0x27, 0x2F, 0x37, 0x3F,
+ },
+ {
+ 0x00, 0x08, 0x01, 0x02, 0x09, 0x10, 0x18, 0x11,
+ 0x0A, 0x03, 0x04, 0x0B, 0x12, 0x19, 0x20, 0x28,
+ 0x21, 0x30, 0x1A, 0x13, 0x0C, 0x05, 0x06, 0x0D,
+ 0x14, 0x1B, 0x22, 0x29, 0x38, 0x31, 0x39, 0x2A,
+ 0x23, 0x1C, 0x15, 0x0E, 0x07, 0x0F, 0x16, 0x1D,
+ 0x24, 0x2B, 0x32, 0x3A, 0x33, 0x3B, 0x2C, 0x25,
+ 0x1E, 0x17, 0x1F, 0x26, 0x2D, 0x34, 0x3C, 0x35,
+ 0x3D, 0x2E, 0x27, 0x2F, 0x36, 0x3E, 0x37, 0x3F,
+ },
+ {
+ 0x00, 0x01, 0x08, 0x02, 0x03, 0x09, 0x10, 0x18,
+ 0x11, 0x0A, 0x04, 0x05, 0x0B, 0x12, 0x19, 0x20,
+ 0x28, 0x30, 0x21, 0x1A, 0x13, 0x0C, 0x06, 0x07,
+ 0x0D, 0x14, 0x1B, 0x22, 0x29, 0x38, 0x31, 0x39,
+ 0x2A, 0x23, 0x1C, 0x15, 0x0E, 0x0F, 0x16, 0x1D,
+ 0x24, 0x2B, 0x32, 0x3A, 0x33, 0x2C, 0x25, 0x1E,
+ 0x17, 0x1F, 0x26, 0x2D, 0x34, 0x3B, 0x3C, 0x35,
+ 0x2E, 0x27, 0x2F, 0x36, 0x3D, 0x3E, 0x37, 0x3F,
+ },
+ {
+ 0x00, 0x08, 0x10, 0x01, 0x18, 0x20, 0x28, 0x09,
+ 0x02, 0x03, 0x0A, 0x11, 0x19, 0x30, 0x38, 0x29,
+ 0x21, 0x1A, 0x12, 0x0B, 0x04, 0x05, 0x0C, 0x13,
+ 0x1B, 0x22, 0x31, 0x39, 0x32, 0x2A, 0x23, 0x1C,
+ 0x14, 0x0D, 0x06, 0x07, 0x0E, 0x15, 0x1D, 0x24,
+ 0x2B, 0x33, 0x3A, 0x3B, 0x34, 0x2C, 0x25, 0x1E,
+ 0x16, 0x0F, 0x17, 0x1F, 0x26, 0x2D, 0x3C, 0x35,
+ 0x2E, 0x27, 0x2F, 0x36, 0x3D, 0x3E, 0x37, 0x3F,
+ }
+};
+
+const uint8_t table_inter_intra[4][2]={
+ {0,1} /*Luma-Left Chroma-Left*/,
+ {2,2} /*Luma-Top Chroma-Left*/,
+ {6,3} /*luma-Left Chroma-Top */,
+ {7,3} /*luma-Top Chroma-Top */
+};
+
+static const uint32_t table_mb_non_intra2[128][2] = {
+{0x0000A7, 14}, {0x01B2B8, 18}, {0x01B28E, 18}, {0x036575, 19},
+{0x006CAC, 16}, {0x000A69, 18}, {0x002934, 20}, {0x00526B, 21},
+{0x006CA1, 16}, {0x01B2B9, 18}, {0x0029AD, 20}, {0x029353, 24},
+{0x006CA7, 16}, {0x006CAB, 16}, {0x01B2BB, 18}, {0x00029B, 16},
+{0x00D944, 17}, {0x000A6A, 18}, {0x0149A8, 23}, {0x03651F, 19},
+{0x006CAF, 16}, {0x000A4C, 18}, {0x03651E, 19}, {0x000A48, 18},
+{0x00299C, 20}, {0x00299F, 20}, {0x029352, 24}, {0x0029AC, 20},
+{0x000296, 16}, {0x00D946, 17}, {0x000A68, 18}, {0x000298, 16},
+{0x000527, 17}, {0x00D94D, 17}, {0x0014D7, 19}, {0x036574, 19},
+{0x000A5C, 18}, {0x01B299, 18}, {0x00299D, 20}, {0x00299E, 20},
+{0x000525, 17}, {0x000A66, 18}, {0x00A4D5, 22}, {0x00149B, 19},
+{0x000295, 16}, {0x006CAD, 16}, {0x000A49, 18}, {0x000521, 17},
+{0x006CAA, 16}, {0x00D945, 17}, {0x01B298, 18}, {0x00052F, 17},
+{0x003654, 15}, {0x006CA0, 16}, {0x000532, 17}, {0x000291, 16},
+{0x003652, 15}, {0x000520, 17}, {0x000A5D, 18}, {0x000294, 16},
+{0x00009B, 11}, {0x0006E2, 12}, {0x000028, 12}, {0x0001B0, 10},
+{0x000001, 3}, {0x000010, 8}, {0x00002F, 6}, {0x00004C, 10},
+{0x00000D, 4}, {0x000000, 10}, {0x000006, 9}, {0x000134, 12},
+{0x00000C, 4}, {0x000007, 10}, {0x000007, 9}, {0x0006E1, 12},
+{0x00000E, 5}, {0x0000DA, 9}, {0x000022, 9}, {0x000364, 11},
+{0x00000F, 4}, {0x000006, 10}, {0x00000F, 9}, {0x000135, 12},
+{0x000014, 5}, {0x0000DD, 9}, {0x000004, 9}, {0x000015, 11},
+{0x00001A, 6}, {0x0001B3, 10}, {0x000005, 10}, {0x0006E3, 12},
+{0x00000C, 5}, {0x0000B9, 8}, {0x000004, 8}, {0x0000DB, 9},
+{0x00000E, 4}, {0x00000B, 10}, {0x000023, 9}, {0x0006CB, 12},
+{0x000005, 6}, {0x0001B1, 10}, {0x000001, 10}, {0x0006E0, 12},
+{0x000011, 5}, {0x0000DF, 9}, {0x00000E, 9}, {0x000373, 11},
+{0x000003, 5}, {0x0000B8, 8}, {0x000006, 8}, {0x000175, 9},
+{0x000015, 5}, {0x000174, 9}, {0x000027, 9}, {0x000372, 11},
+{0x000010, 5}, {0x0000BB, 8}, {0x000005, 8}, {0x0000DE, 9},
+{0x00000F, 5}, {0x000001, 9}, {0x000012, 8}, {0x000004, 10},
+{0x000002, 3}, {0x000016, 5}, {0x000009, 4}, {0x000001, 5},
+};
+
+static const uint32_t table_mb_non_intra3[128][2] = {
+{0x0002A1, 10}, {0x005740, 15}, {0x01A0BF, 18}, {0x015D19, 17},
+{0x001514, 13}, {0x00461E, 15}, {0x015176, 17}, {0x015177, 17},
+{0x0011AD, 13}, {0x00682E, 16}, {0x0682F9, 20}, {0x03417D, 19},
+{0x001A36, 14}, {0x002A2D, 14}, {0x00D05E, 17}, {0x006824, 16},
+{0x001515, 13}, {0x00545C, 15}, {0x0230E9, 18}, {0x011AFA, 17},
+{0x0015D7, 13}, {0x005747, 15}, {0x008D79, 16}, {0x006825, 16},
+{0x002BA2, 14}, {0x00A8BA, 16}, {0x0235F6, 18}, {0x015D18, 17},
+{0x0011AE, 13}, {0x00346F, 15}, {0x008C3B, 16}, {0x00346E, 15},
+{0x000D1A, 13}, {0x00461F, 15}, {0x0682F8, 20}, {0x011875, 17},
+{0x002BA1, 14}, {0x008D61, 16}, {0x0235F7, 18}, {0x0230E8, 18},
+{0x001513, 13}, {0x008D7B, 16}, {0x011AF4, 17}, {0x011AF5, 17},
+{0x001185, 13}, {0x0046BF, 15}, {0x008D60, 16}, {0x008D7C, 16},
+{0x001512, 13}, {0x00461C, 15}, {0x00AE8D, 16}, {0x008D78, 16},
+{0x000D0E, 13}, {0x003413, 15}, {0x0046B1, 15}, {0x003416, 15},
+{0x000AEA, 12}, {0x002A2C, 14}, {0x005741, 15}, {0x002A2F, 14},
+{0x000158, 9}, {0x0008D2, 12}, {0x00054C, 11}, {0x000686, 12},
+{0x000000, 2}, {0x000069, 8}, {0x00006B, 8}, {0x00068C, 12},
+{0x000007, 3}, {0x00015E, 9}, {0x0002A3, 10}, {0x000AE9, 12},
+{0x000006, 3}, {0x000231, 10}, {0x0002B8, 10}, {0x001A08, 14},
+{0x000010, 5}, {0x0001A9, 10}, {0x000342, 11}, {0x000A88, 12},
+{0x000004, 4}, {0x0001A2, 10}, {0x0002A4, 10}, {0x001184, 13},
+{0x000012, 5}, {0x000232, 10}, {0x0002B2, 10}, {0x000680, 12},
+{0x00001B, 6}, {0x00046A, 11}, {0x00068E, 12}, {0x002359, 14},
+{0x000016, 5}, {0x00015F, 9}, {0x0002A0, 10}, {0x00054D, 11},
+{0x000005, 4}, {0x000233, 10}, {0x0002B9, 10}, {0x0015D6, 13},
+{0x000022, 6}, {0x000468, 11}, {0x000683, 12}, {0x001A0A, 14},
+{0x000013, 5}, {0x000236, 10}, {0x0002BB, 10}, {0x001186, 13},
+{0x000017, 5}, {0x0001AB, 10}, {0x0002A7, 10}, {0x0008D3, 12},
+{0x000014, 5}, {0x000237, 10}, {0x000460, 11}, {0x000D0F, 13},
+{0x000019, 6}, {0x0001AA, 10}, {0x0002B3, 10}, {0x000681, 12},
+{0x000018, 6}, {0x0001A8, 10}, {0x0002A5, 10}, {0x00068F, 12},
+{0x000007, 4}, {0x000055, 7}, {0x000047, 7}, {0x0000AD, 8},
+};
+
+static const uint32_t table_mb_non_intra4[128][2] = {
+{0x0000D4, 8}, {0x0021C5, 14}, {0x00F18A, 16}, {0x00D5BC, 16},
+{0x000879, 12}, {0x00354D, 14}, {0x010E3F, 17}, {0x010F54, 17},
+{0x000866, 12}, {0x00356E, 14}, {0x010F55, 17}, {0x010E3E, 17},
+{0x0010CE, 13}, {0x003C84, 14}, {0x00D5BD, 16}, {0x00F18B, 16},
+{0x000868, 12}, {0x00438C, 15}, {0x0087AB, 16}, {0x00790B, 15},
+{0x000F10, 12}, {0x00433D, 15}, {0x006AD3, 15}, {0x00790A, 15},
+{0x001AA7, 13}, {0x0043D4, 15}, {0x00871E, 16}, {0x006ADF, 15},
+{0x000D7C, 12}, {0x003C94, 14}, {0x00438D, 15}, {0x006AD2, 15},
+{0x0006BC, 11}, {0x0021E9, 14}, {0x006ADA, 15}, {0x006A99, 15},
+{0x0010F7, 13}, {0x004389, 15}, {0x006ADB, 15}, {0x0078C4, 15},
+{0x000D56, 12}, {0x0035F7, 14}, {0x00438E, 15}, {0x006A98, 15},
+{0x000D52, 12}, {0x003C95, 14}, {0x004388, 15}, {0x00433C, 15},
+{0x000D54, 12}, {0x001E4B, 13}, {0x003C63, 14}, {0x003C83, 14},
+{0x000861, 12}, {0x0021EB, 14}, {0x00356C, 14}, {0x0035F6, 14},
+{0x000863, 12}, {0x00219F, 14}, {0x003568, 14}, {0x003C82, 14},
+{0x0001AE, 9}, {0x0010C0, 13}, {0x000F11, 12}, {0x001AFA, 13},
+{0x000000, 1}, {0x0000F0, 8}, {0x0001AD, 9}, {0x0010C1, 13},
+{0x00000A, 4}, {0x0003C5, 10}, {0x000789, 11}, {0x001AB5, 13},
+{0x000009, 4}, {0x000435, 11}, {0x000793, 11}, {0x001E40, 13},
+{0x00001D, 5}, {0x0003CB, 10}, {0x000878, 12}, {0x001AAF, 13},
+{0x00000B, 4}, {0x0003C7, 10}, {0x000791, 11}, {0x001AAB, 13},
+{0x00001F, 5}, {0x000436, 11}, {0x0006BF, 11}, {0x000F19, 12},
+{0x00003D, 6}, {0x000D51, 12}, {0x0010C4, 13}, {0x0021E8, 14},
+{0x000036, 6}, {0x000437, 11}, {0x0006AF, 11}, {0x0010C5, 13},
+{0x00000C, 4}, {0x000432, 11}, {0x000794, 11}, {0x001E30, 13},
+{0x000042, 7}, {0x000870, 12}, {0x000F24, 12}, {0x001E43, 13},
+{0x000020, 6}, {0x00043E, 11}, {0x000795, 11}, {0x001AAA, 13},
+{0x000037, 6}, {0x0006AC, 11}, {0x0006AE, 11}, {0x0010F6, 13},
+{0x000034, 6}, {0x00043A, 11}, {0x000D50, 12}, {0x001AAE, 13},
+{0x000039, 6}, {0x00043F, 11}, {0x00078D, 11}, {0x0010D2, 13},
+{0x000038, 6}, {0x00043B, 11}, {0x0006BD, 11}, {0x0010D3, 13},
+{0x000011, 5}, {0x0001AC, 9}, {0x0000F3, 8}, {0x000439, 11},
+};
+
+const uint32_t (* const wmv2_inter_table[WMV2_INTER_CBP_TABLE_COUNT])[2]={
+ table_mb_non_intra2,
+ table_mb_non_intra3,
+ table_mb_non_intra4,
+ table_mb_non_intra,
+};
+
+const uint8_t wmv2_scantableA[64]={
+0x00, 0x01, 0x02, 0x08, 0x03, 0x09, 0x0A, 0x10,
+0x04, 0x0B, 0x11, 0x18, 0x12, 0x0C, 0x05, 0x13,
+0x19, 0x0D, 0x14, 0x1A, 0x1B, 0x06, 0x15, 0x1C,
+0x0E, 0x16, 0x1D, 0x07, 0x1E, 0x0F, 0x17, 0x1F,
+};
+
+const uint8_t wmv2_scantableB[64]={
+0x00, 0x08, 0x01, 0x10, 0x09, 0x18, 0x11, 0x02,
+0x20, 0x0A, 0x19, 0x28, 0x12, 0x30, 0x21, 0x1A,
+0x38, 0x29, 0x22, 0x03, 0x31, 0x39, 0x0B, 0x2A,
+0x13, 0x32, 0x1B, 0x3A, 0x23, 0x2B, 0x33, 0x3B,
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4data.h
new file mode 100644
index 000000000..9baa9370d
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/msmpeg4data.h
@@ -0,0 +1,86 @@
+/*
+ * MSMPEG4 backend for ffmpeg encoder and decoder
+ * copyright (c) 2001 Fabrice Bellard
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * msmpeg4v1 & v2 stuff by Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/msmpeg4data.h
+ * MSMPEG4 data tables.
+ */
+
+#ifndef AVCODEC_MSMPEG4DATA_H
+#define AVCODEC_MSMPEG4DATA_H
+
+#include "libavutil/common.h"
+#include "get_bits.h"
+#include "rl.h"
+
+/* motion vector table */
+typedef struct MVTable {
+ int n;
+ const uint16_t *table_mv_code;
+ const uint8_t *table_mv_bits;
+ const uint8_t *table_mvx;
+ const uint8_t *table_mvy;
+ uint16_t *table_mv_index; /* encoding: convert mv to index in table_mv */
+ VLC vlc; /* decoding: vlc */
+} MVTable;
+
+extern VLC ff_msmp4_mb_i_vlc;
+extern VLC ff_msmp4_dc_luma_vlc[2];
+extern VLC ff_msmp4_dc_chroma_vlc[2];
+
+/* intra picture macroblock coded block pattern */
+extern const uint16_t ff_msmp4_mb_i_table[64][2];
+
+#define WMV1_SCANTABLE_COUNT 4
+
+extern const uint8_t wmv1_scantable[WMV1_SCANTABLE_COUNT][64];
+
+#define NB_RL_TABLES 6
+
+extern RLTable rl_table[NB_RL_TABLES];
+
+extern const uint8_t wmv1_y_dc_scale_table[32];
+extern const uint8_t wmv1_c_dc_scale_table[32];
+extern const uint8_t old_ff_y_dc_scale_table[32];
+
+extern MVTable mv_tables[2];
+
+extern const uint8_t v2_mb_type[8][2];
+extern const uint8_t v2_intra_cbpc[4][2];
+
+extern const uint32_t table_mb_non_intra[128][2];
+extern const uint8_t table_inter_intra[4][2];
+
+extern const uint32_t ff_table0_dc_lum[120][2];
+extern const uint32_t ff_table1_dc_lum[120][2];
+extern const uint32_t ff_table0_dc_chroma[120][2];
+extern const uint32_t ff_table1_dc_chroma[120][2];
+
+#define WMV2_INTER_CBP_TABLE_COUNT 4
+extern const uint32_t (* const wmv2_inter_table[WMV2_INTER_CBP_TABLE_COUNT])[2];
+
+extern const uint8_t wmv2_scantableA[64];
+extern const uint8_t wmv2_scantableB[64];
+
+#endif /* AVCODEC_MSMPEG4DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/nellymoser.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/nellymoser.c
new file mode 100644
index 000000000..08559e68c
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/nellymoser.c
@@ -0,0 +1,226 @@
+/*
+ * Common code between Nellymoser encoder and decoder
+ * Copyright (c) 2007 a840bda5870ba11f19698ff6eb9581dfb0f95fa5,
+ * 539459aeb7d425140b62a3ec7dbf6dc8e408a306, and
+ * 520e17cd55896441042b14df2566a6eb610ed444
+ * Copyright (c) 2007 Loic Minier <lool at dooz.org>
+ * Benjamin Larsson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * @file libavcodec/nellymoser.c
+ * The 3 alphanumeric copyright notices are md5summed they are from the original
+ * implementors. The original code is available from http://code.google.com/p/nelly2pcm/
+ */
+
+#include "nellymoser.h"
+#include "avcodec.h"
+#include "dsputil.h"
+
+#define ALT_BITSTREAM_READER_LE
+#include "get_bits.h"
+
+const float ff_nelly_dequantization_table[127] = {
+ 0.0000000000,
+
+-0.8472560048, 0.7224709988,
+
+-1.5247479677,-0.4531480074, 0.3753609955, 1.4717899561,
+
+-1.9822579622,-1.1929379702,-0.5829370022,-0.0693780035, 0.3909569979, 0.9069200158, 1.4862740040, 2.2215409279,
+
+-2.3887870312,-1.8067539930,-1.4105420113,-1.0773609877,-0.7995010018,-0.5558109879,-0.3334020078,-0.1324490011,
+ 0.0568020009, 0.2548770010, 0.4773550034, 0.7386850119, 1.0443060398, 1.3954459429, 1.8098750114, 2.3918759823,
+
+-2.3893830776,-1.9884680510,-1.7514040470,-1.5643119812,-1.3922129869,-1.2164649963,-1.0469499826,-0.8905100226,
+-0.7645580173,-0.6454579830,-0.5259280205,-0.4059549868,-0.3029719889,-0.2096900046,-0.1239869967,-0.0479229987,
+ 0.0257730000, 0.1001340002, 0.1737180054, 0.2585540116, 0.3522900045, 0.4569880068, 0.5767750144, 0.7003160119,
+ 0.8425520062, 1.0093879700, 1.1821349859, 1.3534560204, 1.5320819616, 1.7332619429, 1.9722349644, 2.3978140354,
+
+-2.5756309032,-2.0573320389,-1.8984919786,-1.7727810144,-1.6662600040,-1.5742180347,-1.4993319511,-1.4316639900,
+-1.3652280569,-1.3000990152,-1.2280930281,-1.1588579416,-1.0921250582,-1.0135740042,-0.9202849865,-0.8287050128,
+-0.7374889851,-0.6447759867,-0.5590940118,-0.4857139885,-0.4110319912,-0.3459700048,-0.2851159871,-0.2341620028,
+-0.1870580018,-0.1442500055,-0.1107169986,-0.0739680007,-0.0365610011,-0.0073290002, 0.0203610007, 0.0479039997,
+ 0.0751969963, 0.0980999991, 0.1220389977, 0.1458999962, 0.1694349945, 0.1970459968, 0.2252430022, 0.2556869984,
+ 0.2870100141, 0.3197099864, 0.3525829911, 0.3889069855, 0.4334920049, 0.4769459963, 0.5204820037, 0.5644530058,
+ 0.6122040153, 0.6685929894, 0.7341650128, 0.8032159805, 0.8784040213, 0.9566209912, 1.0397069454, 1.1293770075,
+ 1.2211159468, 1.3080279827, 1.4024800062, 1.5056819916, 1.6227730513, 1.7724959850, 1.9430880547, 2.2903931141
+};
+
+const uint8_t ff_nelly_band_sizes_table[NELLY_BANDS] = {
+2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 9, 10, 12, 14, 15
+};
+
+const uint16_t ff_nelly_init_table[64] = {
+3134, 5342, 6870, 7792, 8569, 9185, 9744, 10191, 10631, 11061, 11434, 11770,
+12116, 12513, 12925, 13300, 13674, 14027, 14352, 14716, 15117, 15477, 15824,
+16157, 16513, 16804, 17090, 17401, 17679, 17948, 18238, 18520, 18764, 19078,
+19381, 19640, 19921, 20205, 20500, 20813, 21162, 21465, 21794, 22137, 22453,
+22756, 23067, 23350, 23636, 23926, 24227, 24521, 24819, 25107, 25414, 25730,
+26120, 26497, 26895, 27344, 27877, 28463, 29426, 31355
+};
+
+const int16_t ff_nelly_delta_table[32] = {
+-11725, -9420, -7910, -6801, -5948, -5233, -4599, -4039, -3507, -3030, -2596,
+-2170, -1774, -1383, -1016, -660, -329, -1, 337, 696, 1085, 1512, 1962, 2433,
+2968, 3569, 4314, 5279, 6622, 8154, 10076, 12975
+};
+
+static inline int signed_shift(int i, int shift) {
+ if (shift > 0)
+ return i << shift;
+ return i >> -shift;
+}
+
+static int sum_bits(short *buf, short shift, short off)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < NELLY_FILL_LEN; i++) {
+ int b = buf[i]-off;
+ b = ((b>>(shift-1))+1)>>1;
+ ret += av_clip(b, 0, NELLY_BIT_CAP);
+ }
+
+ return ret;
+}
+
+static int headroom(int *la)
+{
+ int l;
+ if (*la == 0) {
+ return 31;
+ }
+ l = 30 - av_log2(FFABS(*la));
+ *la <<= l;
+ return l;
+}
+
+
+void ff_nelly_get_sample_bits(const float *buf, int *bits)
+{
+ int i, j;
+ short sbuf[128];
+ int bitsum = 0, last_bitsum, small_bitsum, big_bitsum;
+ short shift, shift_saved;
+ int max, sum, last_off, tmp;
+ int big_off, small_off;
+ int off;
+
+ max = 0;
+ for (i = 0; i < NELLY_FILL_LEN; i++) {
+ max = FFMAX(max, buf[i]);
+ }
+ shift = -16;
+ shift += headroom(&max);
+
+ sum = 0;
+ for (i = 0; i < NELLY_FILL_LEN; i++) {
+ sbuf[i] = signed_shift(buf[i], shift);
+ sbuf[i] = (3*sbuf[i])>>2;
+ sum += sbuf[i];
+ }
+
+ shift += 11;
+ shift_saved = shift;
+ sum -= NELLY_DETAIL_BITS << shift;
+ shift += headroom(&sum);
+ small_off = (NELLY_BASE_OFF * (sum>>16)) >> 15;
+ shift = shift_saved - (NELLY_BASE_SHIFT+shift-31);
+
+ small_off = signed_shift(small_off, shift);
+
+ bitsum = sum_bits(sbuf, shift_saved, small_off);
+
+ if (bitsum != NELLY_DETAIL_BITS) {
+ off = bitsum - NELLY_DETAIL_BITS;
+
+ for(shift=0; FFABS(off) <= 16383; shift++)
+ off *= 2;
+
+ off = (off * NELLY_BASE_OFF) >> 15;
+ shift = shift_saved-(NELLY_BASE_SHIFT+shift-15);
+
+ off = signed_shift(off, shift);
+
+ for (j = 1; j < 20; j++) {
+ last_off = small_off;
+ small_off += off;
+ last_bitsum = bitsum;
+
+ bitsum = sum_bits(sbuf, shift_saved, small_off);
+
+ if ((bitsum-NELLY_DETAIL_BITS) * (last_bitsum-NELLY_DETAIL_BITS) <= 0)
+ break;
+ }
+
+ if (bitsum > NELLY_DETAIL_BITS) {
+ big_off = small_off;
+ small_off = last_off;
+ big_bitsum=bitsum;
+ small_bitsum=last_bitsum;
+ } else {
+ big_off = last_off;
+ big_bitsum=last_bitsum;
+ small_bitsum=bitsum;
+ }
+
+ while (bitsum != NELLY_DETAIL_BITS && j <= 19) {
+ off = (big_off+small_off)>>1;
+ bitsum = sum_bits(sbuf, shift_saved, off);
+ if (bitsum > NELLY_DETAIL_BITS) {
+ big_off=off;
+ big_bitsum=bitsum;
+ } else {
+ small_off = off;
+ small_bitsum=bitsum;
+ }
+ j++;
+ }
+
+ if (abs(big_bitsum-NELLY_DETAIL_BITS) >=
+ abs(small_bitsum-NELLY_DETAIL_BITS)) {
+ bitsum = small_bitsum;
+ } else {
+ small_off = big_off;
+ bitsum = big_bitsum;
+ }
+ }
+
+ for (i = 0; i < NELLY_FILL_LEN; i++) {
+ tmp = sbuf[i]-small_off;
+ tmp = ((tmp>>(shift_saved-1))+1)>>1;
+ bits[i] = av_clip(tmp, 0, NELLY_BIT_CAP);
+ }
+
+ if (bitsum > NELLY_DETAIL_BITS) {
+ tmp = i = 0;
+ while (tmp < NELLY_DETAIL_BITS) {
+ tmp += bits[i];
+ i++;
+ }
+
+ bits[i-1] -= tmp - NELLY_DETAIL_BITS;
+ for(; i < NELLY_FILL_LEN; i++)
+ bits[i] = 0;
+ }
+}
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/nellymoser.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/nellymoser.h
new file mode 100644
index 000000000..6a7526dbf
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/nellymoser.h
@@ -0,0 +1,57 @@
+/*
+ * Common code between Nellymoser encoder and decoder
+ * Copyright (c) 2007 a840bda5870ba11f19698ff6eb9581dfb0f95fa5,
+ * 539459aeb7d425140b62a3ec7dbf6dc8e408a306, and
+ * 520e17cd55896441042b14df2566a6eb610ed444
+ * Copyright (c) 2007 Loic Minier <lool at dooz.org>
+ * Benjamin Larsson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * @file nellymoser.h
+ * The 3 alphanumeric copyright notices are md5summed they are from the original
+ * implementors. The original code is available from http://code.google.com/p/nelly2pcm/
+ */
+
+#ifndef AVCODEC_NELLYMOSER_H
+#define AVCODEC_NELLYMOSER_H
+
+#include "avcodec.h"
+
+#define NELLY_BANDS 23
+#define NELLY_BLOCK_LEN 64
+#define NELLY_HEADER_BITS 116
+#define NELLY_DETAIL_BITS 198
+#define NELLY_BUF_LEN 128
+#define NELLY_FILL_LEN 124
+#define NELLY_BIT_CAP 6
+#define NELLY_BASE_OFF 4228
+#define NELLY_BASE_SHIFT 19
+#define NELLY_SAMPLES (2 * NELLY_BUF_LEN)
+
+extern const float ff_nelly_dequantization_table[127];
+extern const uint8_t ff_nelly_band_sizes_table[NELLY_BANDS];
+extern const uint16_t ff_nelly_init_table[64];
+extern const int16_t ff_nelly_delta_table[32];
+
+void ff_nelly_get_sample_bits(const float *buf, int *bits);
+
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/nellymoserdec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/nellymoserdec.c
new file mode 100644
index 000000000..942f8de30
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/nellymoserdec.c
@@ -0,0 +1,218 @@
+/*
+ * NellyMoser audio decoder
+ * Copyright (c) 2007 a840bda5870ba11f19698ff6eb9581dfb0f95fa5,
+ * 539459aeb7d425140b62a3ec7dbf6dc8e408a306, and
+ * 520e17cd55896441042b14df2566a6eb610ed444
+ * Copyright (c) 2007 Loic Minier <lool at dooz.org>
+ * Benjamin Larsson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * @file libavcodec/nellymoserdec.c
+ * The 3 alphanumeric copyright notices are md5summed they are from the original
+ * implementors. The original code is available from http://code.google.com/p/nelly2pcm/
+ */
+
+#include "nellymoser.h"
+#include "libavutil/lfg.h"
+#include "libavutil/random_seed.h"
+#include "avcodec.h"
+#include "dsputil.h"
+
+#define ALT_BITSTREAM_READER_LE
+#include "get_bits.h"
+
+#ifndef M_SQRT1_2
+#define M_SQRT1_2 0.7071067812
+#endif
+
+
+typedef struct NellyMoserDecodeContext {
+ AVCodecContext* avctx;
+ DECLARE_ALIGNED_16(float,float_buf)[NELLY_SAMPLES];
+ float state[128];
+ AVLFG random_state;
+ GetBitContext gb;
+ int add_bias;
+ float scale_bias;
+ DSPContext dsp;
+ FFTContext imdct_ctx;
+ DECLARE_ALIGNED_16(float,imdct_out)[NELLY_BUF_LEN * 2];
+} NellyMoserDecodeContext;
+
+static void overlap_and_window(NellyMoserDecodeContext *s, float *state, float *audio, float *a_in)
+{
+ int bot, top;
+
+ bot = 0;
+ top = NELLY_BUF_LEN-1;
+
+ while (bot < NELLY_BUF_LEN) {
+ audio[bot] = a_in [bot]*ff_sine_128[bot]
+ +state[bot]*ff_sine_128[top] + s->add_bias;
+
+ bot++;
+ top--;
+ }
+ memcpy(state, a_in + NELLY_BUF_LEN, sizeof(float)*NELLY_BUF_LEN);
+}
+
+static void nelly_decode_block(NellyMoserDecodeContext *s,
+ const unsigned char block[NELLY_BLOCK_LEN],
+ float audio[NELLY_SAMPLES])
+{
+ int i,j;
+ float buf[NELLY_FILL_LEN], pows[NELLY_FILL_LEN];
+ float *aptr, *bptr, *pptr, val, pval;
+ int bits[NELLY_BUF_LEN];
+ unsigned char v;
+
+ init_get_bits(&s->gb, block, NELLY_BLOCK_LEN * 8);
+
+ bptr = buf;
+ pptr = pows;
+ val = ff_nelly_init_table[get_bits(&s->gb, 6)];
+ for (i=0 ; i<NELLY_BANDS ; i++) {
+ if (i > 0)
+ val += ff_nelly_delta_table[get_bits(&s->gb, 5)];
+ pval = -pow(2, val/2048) * s->scale_bias;
+ for (j = 0; j < ff_nelly_band_sizes_table[i]; j++) {
+ *bptr++ = val;
+ *pptr++ = pval;
+ }
+
+ }
+
+ ff_nelly_get_sample_bits(buf, bits);
+
+ for (i = 0; i < 2; i++) {
+ aptr = audio + i * NELLY_BUF_LEN;
+
+ init_get_bits(&s->gb, block, NELLY_BLOCK_LEN * 8);
+ skip_bits_long(&s->gb, NELLY_HEADER_BITS + i*NELLY_DETAIL_BITS);
+
+ for (j = 0; j < NELLY_FILL_LEN; j++) {
+ if (bits[j] <= 0) {
+ aptr[j] = M_SQRT1_2*pows[j];
+ if (av_lfg_get(&s->random_state) & 1)
+ aptr[j] *= -1.0;
+ } else {
+ v = get_bits(&s->gb, bits[j]);
+ aptr[j] = ff_nelly_dequantization_table[(1<<bits[j])-1+v]*pows[j];
+ }
+ }
+ memset(&aptr[NELLY_FILL_LEN], 0,
+ (NELLY_BUF_LEN - NELLY_FILL_LEN) * sizeof(float));
+
+ ff_imdct_calc(&s->imdct_ctx, s->imdct_out, aptr);
+ /* XXX: overlapping and windowing should be part of a more
+ generic imdct function */
+ overlap_and_window(s, s->state, aptr, s->imdct_out);
+ }
+}
+
+static av_cold int decode_init(AVCodecContext * avctx) {
+ NellyMoserDecodeContext *s = avctx->priv_data;
+
+ s->avctx = avctx;
+ av_lfg_init(&s->random_state, 0);
+ ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0);
+
+ dsputil_init(&s->dsp, avctx);
+
+ if(s->dsp.float_to_int16 == ff_float_to_int16_c) {
+ s->add_bias = 385;
+ s->scale_bias = 1.0/(8*32768);
+ } else {
+ s->add_bias = 0;
+ s->scale_bias = 1.0/(1*8);
+ }
+
+ /* Generate overlap window */
+ if (!ff_sine_128[127])
+ ff_init_ff_sine_windows(7);
+
+ avctx->sample_fmt = SAMPLE_FMT_S16;
+ avctx->channel_layout = CH_LAYOUT_MONO;
+ return 0;
+}
+
+static int decode_tag(AVCodecContext * avctx,
+ void *data, int *data_size,
+ const uint8_t * buf, int buf_size) {
+ NellyMoserDecodeContext *s = avctx->priv_data;
+ int blocks, i;
+ int16_t* samples;
+ *data_size = 0;
+ samples = (int16_t*)data;
+
+ if (buf_size < avctx->block_align)
+ return buf_size;
+
+ switch (buf_size) {
+ case 64: // 8000Hz
+ blocks = 1; break;
+ case 128: // 11025Hz
+ blocks = 2; break;
+ case 192: // 16000Hz
+ blocks = 3; break;
+ case 256: // 22050Hz
+ blocks = 4; break;
+ case 512: // 44100Hz
+ blocks = 8; break;
+ default:
+ av_log(avctx, AV_LOG_DEBUG, "Tag size %d.\n", buf_size);
+ return buf_size;
+ }
+
+ for (i=0 ; i<blocks ; i++) {
+ nelly_decode_block(s, &buf[i*NELLY_BLOCK_LEN], s->float_buf);
+ s->dsp.float_to_int16(&samples[i*NELLY_SAMPLES], s->float_buf, NELLY_SAMPLES);
+ *data_size += NELLY_SAMPLES*sizeof(int16_t);
+ }
+
+ return buf_size;
+}
+
+static av_cold int decode_end(AVCodecContext * avctx) {
+ NellyMoserDecodeContext *s = avctx->priv_data;
+
+ ff_mdct_end(&s->imdct_ctx);
+ return 0;
+}
+
+AVCodec nellymoser_decoder = {
+ "nellymoser",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_NELLYMOSER,
+ sizeof(NellyMoserDecodeContext),
+ decode_init,
+ NULL,
+ decode_end,
+ decode_tag,
+ /*.capabilities = */0,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("Nellymoser Asao"),
+};
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/options.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/options.c
new file mode 100644
index 000000000..ae6ee4785
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/options.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/options.c
+ * Options definition for AVCodecContext.
+ */
+
+#include "avcodec.h"
+
+static const char* context_to_name(void* ptr) {
+ AVCodecContext *avc= ptr;
+
+ if(avc && avc->codec && avc->codec->name)
+ return avc->codec->name;
+ else
+ return "NULL";
+}
+
+static AVClass av_codec_context_class = { "AVCodecContext", context_to_name };
+
+void avcodec_get_context_defaults(AVCodecContext *s){
+ memset(s, 0, sizeof(AVCodecContext));
+
+ s->av_class= &av_codec_context_class;
+
+ s->time_base.num=0; s->time_base.den=1;
+
+ s->get_buffer= avcodec_default_get_buffer;
+ s->release_buffer= avcodec_default_release_buffer;
+ s->get_format= avcodec_default_get_format;
+ s->execute= avcodec_default_execute;
+ s->execute2= avcodec_default_execute2;
+ s->sample_aspect_ratio.num=0; s->sample_aspect_ratio.den=1;
+ s->pix_fmt= PIX_FMT_NONE;
+ s->sample_fmt= SAMPLE_FMT_S16; // FIXME: set to NONE
+
+ s->palctrl = NULL;
+ s->reget_buffer= avcodec_default_reget_buffer;
+
+ s->bit_rate= 800*1000;
+ s->bit_rate_tolerance= s->bit_rate*10;
+ s->qmin= 2;
+ s->qmax= 31;
+ s->mb_lmin= FF_QP2LAMBDA * 2;
+ s->mb_lmax= FF_QP2LAMBDA * 31;
+ s->cqp = -1;
+ s->refs = 1;
+ s->directpred = 2;
+ s->qcompress= 0.5;
+ s->complexityblur = 20.0;
+ s->keyint_min = 25;
+ s->flags2 = CODEC_FLAG2_FASTPSKIP;
+ s->max_qdiff= 3;
+ s->b_quant_factor=1.25;
+ s->b_quant_offset=1.25;
+ s->i_quant_factor=-0.8;
+ s->i_quant_offset=0.0;
+ s->error_concealment= 3;
+ s->error_recognition= 1;
+ s->workaround_bugs= FF_BUG_AUTODETECT;
+ s->gop_size= 50;
+ s->me_method= ME_EPZS;
+ s->thread_count=1;
+ s->me_subpel_quality=8;
+ s->lmin= FF_QP2LAMBDA * s->qmin;
+ s->lmax= FF_QP2LAMBDA * s->qmax;
+ s->ildct_cmp= FF_CMP_VSAD;
+ s->profile= FF_PROFILE_UNKNOWN;
+ s->level= FF_LEVEL_UNKNOWN;
+ s->me_penalty_compensation= 256;
+ s->frame_skip_cmp= FF_CMP_DCTMAX;
+ s->nsse_weight= 8;
+ s->mv0_threshold= 256;
+ s->b_sensitivity= 40;
+ s->compression_level = FF_COMPRESSION_DEFAULT;
+ s->use_lpc = -1;
+ s->min_prediction_order = -1;
+ s->max_prediction_order = -1;
+ s->prediction_order_method = -1;
+ s->min_partition_order = -1;
+ s->max_partition_order = -1;
+ s->intra_quant_bias= FF_DEFAULT_QUANT_BIAS;
+ s->inter_quant_bias= FF_DEFAULT_QUANT_BIAS;
+ s->rc_max_available_vbv_use = 1.0/3;
+ s->rc_min_vbv_overflow_use = 3;
+}
+
+AVCodecContext *avcodec_alloc_context(void){
+ AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext));
+
+ if(avctx==NULL) return NULL;
+
+ avcodec_get_context_defaults(avctx);
+
+ return avctx;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/parser.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/parser.c
new file mode 100644
index 000000000..3b5097950
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/parser.c
@@ -0,0 +1,343 @@
+/*
+ * Audio and Video frame extraction
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "parser.h"
+
+static AVCodecParser *av_first_parser = NULL;
+
+AVCodecParser* av_parser_next(AVCodecParser *p){
+ if(p) return p->next;
+ else return av_first_parser;
+}
+
+void av_register_codec_parser(AVCodecParser *parser)
+{
+ parser->next = av_first_parser;
+ av_first_parser = parser;
+}
+
+AVCodecParserContext *av_parser_init(int codec_id)
+{
+ AVCodecParserContext *s;
+ AVCodecParser *parser;
+ int ret;
+
+ if(codec_id == CODEC_ID_NONE)
+ return NULL;
+
+ for(parser = av_first_parser; parser != NULL; parser = parser->next) {
+ if (parser->codec_ids[0] == codec_id ||
+ parser->codec_ids[1] == codec_id ||
+ parser->codec_ids[2] == codec_id ||
+ parser->codec_ids[3] == codec_id ||
+ parser->codec_ids[4] == codec_id)
+ goto found;
+ }
+ return NULL;
+ found:
+ s = av_mallocz(sizeof(AVCodecParserContext));
+ if (!s)
+ return NULL;
+ s->parser = parser;
+ s->priv_data = av_mallocz(parser->priv_data_size);
+ if (!s->priv_data) {
+ av_free(s);
+ return NULL;
+ }
+ if (parser->parser_init) {
+ ret = parser->parser_init(s);
+ if (ret != 0) {
+ av_free(s->priv_data);
+ av_free(s);
+ return NULL;
+ }
+ }
+ s->fetch_timestamp=1;
+ s->pict_type = FF_I_TYPE;
+ s->key_frame = -1;
+ s->convergence_duration = AV_NOPTS_VALUE;
+ s->dts_sync_point = INT_MIN;
+ s->dts_ref_dts_delta = INT_MIN;
+ s->pts_dts_delta = INT_MIN;
+ return s;
+}
+
+void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove){
+ int i;
+
+ s->dts= s->pts= AV_NOPTS_VALUE;
+ s->pos= -1;
+ s->offset= 0;
+ for(i = 0; i < AV_PARSER_PTS_NB; i++) {
+ if ( s->cur_offset + off >= s->cur_frame_offset[i]
+ && (s->frame_offset < s->cur_frame_offset[i] ||
+ (!s->frame_offset && !s->next_frame_offset)) // first field/frame
+ //check is disabled becausue mpeg-ts doesnt send complete PES packets
+ && /*s->next_frame_offset + off <*/ s->cur_frame_end[i]){
+ s->dts= s->cur_frame_dts[i];
+ s->pts= s->cur_frame_pts[i];
+ s->pos= s->cur_frame_pos[i];
+ s->offset = s->next_frame_offset - s->cur_frame_offset[i];
+ if(remove)
+ s->cur_frame_offset[i]= INT64_MAX;
+ if(s->cur_offset + off < s->cur_frame_end[i])
+ break;
+ }
+ }
+}
+
+/**
+ *
+ * @param buf input
+ * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output)
+ * @param pts input presentation timestamp
+ * @param dts input decoding timestamp
+ * @param poutbuf will contain a pointer to the first byte of the output frame
+ * @param poutbuf_size will contain the length of the output frame
+ * @return the number of bytes of the input bitstream used
+ *
+ * Example:
+ * @code
+ * while(in_len){
+ * len = av_parser_parse(myparser, AVCodecContext, &data, &size,
+ * in_data, in_len,
+ * pts, dts);
+ * in_data += len;
+ * in_len -= len;
+ *
+ * if(size)
+ * decode_frame(data, size);
+ * }
+ * @endcode
+ *
+ * @deprecated Use av_parser_parse2() instead.
+ */
+int av_parser_parse(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size,
+ int64_t pts, int64_t dts)
+{
+ return av_parser_parse2(s, avctx, poutbuf, poutbuf_size, buf, buf_size, pts, dts, AV_NOPTS_VALUE);
+}
+
+int av_parser_parse2(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size,
+ int64_t pts, int64_t dts,
+ int64_t pos)
+{
+ int index, i;
+ uint8_t dummy_buf[FF_INPUT_BUFFER_PADDING_SIZE];
+
+ if (buf_size == 0) {
+ /* padding is always necessary even if EOF, so we add it here */
+ memset(dummy_buf, 0, sizeof(dummy_buf));
+ buf = dummy_buf;
+ } else if (s->cur_offset + buf_size !=
+ s->cur_frame_end[s->cur_frame_start_index]) { /* skip remainder packets */
+ /* add a new packet descriptor */
+ i = (s->cur_frame_start_index + 1) & (AV_PARSER_PTS_NB - 1);
+ s->cur_frame_start_index = i;
+ s->cur_frame_offset[i] = s->cur_offset;
+ s->cur_frame_end[i] = s->cur_offset + buf_size;
+ s->cur_frame_pts[i] = pts;
+ s->cur_frame_dts[i] = dts;
+ s->cur_frame_pos[i] = pos;
+ }
+
+ if (s->fetch_timestamp){
+ s->fetch_timestamp=0;
+ s->last_pts = s->pts;
+ s->last_dts = s->dts;
+ s->last_pos = s->pos;
+ ff_fetch_timestamp(s, 0, 0);
+ }
+
+ /* WARNING: the returned index can be negative */
+ index = s->parser->parser_parse(s, avctx, (const uint8_t **)poutbuf, poutbuf_size, buf, buf_size);
+//av_log(NULL, AV_LOG_DEBUG, "parser: in:%"PRId64", %"PRId64", out:%"PRId64", %"PRId64", in:%d out:%d id:%d\n", pts, dts, s->last_pts, s->last_dts, buf_size, *poutbuf_size, avctx->codec_id);
+ /* update the file pointer */
+ if (*poutbuf_size) {
+ /* fill the data for the current frame */
+ s->frame_offset = s->next_frame_offset;
+
+ /* offset of the next frame */
+ s->next_frame_offset = s->cur_offset + index;
+ s->fetch_timestamp=1;
+ }
+ if (index < 0)
+ index = 0;
+ s->cur_offset += index;
+ return index;
+}
+
+/**
+ *
+ * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed
+ * @deprecated use AVBitstreamFilter
+ */
+int av_parser_change(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe){
+
+ if(s && s->parser->split){
+ if((avctx->flags & CODEC_FLAG_GLOBAL_HEADER) || (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER)){
+ int i= s->parser->split(avctx, buf, buf_size);
+ buf += i;
+ buf_size -= i;
+ }
+ }
+
+ /* cast to avoid warning about discarding qualifiers */
+ *poutbuf= (uint8_t *) buf;
+ *poutbuf_size= buf_size;
+ if(avctx->extradata){
+ if( (keyframe && (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER))
+ /*||(s->pict_type != FF_I_TYPE && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_NOKEY))*/
+ /*||(? && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_BEGIN)*/){
+ int size= buf_size + avctx->extradata_size;
+ *poutbuf_size= size;
+ *poutbuf= av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ memcpy(*poutbuf, avctx->extradata, avctx->extradata_size);
+ memcpy((*poutbuf) + avctx->extradata_size, buf, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+void av_parser_close(AVCodecParserContext *s)
+{
+ if(s){
+ if (s->parser->parser_close)
+ s->parser->parser_close(s);
+ av_free(s->priv_data);
+ av_free(s);
+ }
+}
+
+/*****************************************************/
+
+/**
+ * combines the (truncated) bitstream to a complete frame
+ * @returns -1 if no complete frame could be created, AVERROR(ENOMEM) if there was a memory allocation error
+ */
+int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
+{
+#if 0
+ if(pc->overread){
+ printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
+ printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
+ }
+#endif
+
+ /* Copy overread bytes from last frame into buffer. */
+ for(; pc->overread>0; pc->overread--){
+ pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
+ }
+
+ /* flush remaining if EOF */
+ if(!*buf_size && next == END_NOT_FOUND){
+ next= 0;
+ }
+
+ pc->last_index= pc->index;
+
+ /* copy into buffer end return */
+ if(next == END_NOT_FOUND){
+ void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if(!new_buffer)
+ return AVERROR(ENOMEM);
+ pc->buffer = new_buffer;
+ memcpy(&pc->buffer[pc->index], *buf, *buf_size);
+ pc->index += *buf_size;
+ return -1;
+ }
+
+ *buf_size=
+ pc->overread_index= pc->index + next;
+
+ /* append to buffer */
+ if(pc->index){
+ void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if(!new_buffer)
+ return AVERROR(ENOMEM);
+ pc->buffer = new_buffer;
+ memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
+ pc->index = 0;
+ *buf= pc->buffer;
+ }
+
+ /* store overread bytes */
+ for(;next < 0; next++){
+ pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
+ pc->state64 = (pc->state64<<8) | pc->buffer[pc->last_index + next];
+ pc->overread++;
+ }
+
+#if 0
+ if(pc->overread){
+ printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
+ printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
+ }
+#endif
+
+ return 0;
+}
+
+void ff_parse_close(AVCodecParserContext *s)
+{
+ ParseContext *pc = s->priv_data;
+
+ av_freep(&pc->buffer);
+}
+
+void ff_parse1_close(AVCodecParserContext *s)
+{
+ ParseContext1 *pc1 = s->priv_data;
+
+ av_free(pc1->pc.buffer);
+ av_free(pc1->enc);
+}
+
+/*************************/
+
+int ff_mpeg4video_split(AVCodecContext *avctx,
+ const uint8_t *buf, int buf_size)
+{
+ int i;
+ uint32_t state= -1;
+
+ for(i=0; i<buf_size; i++){
+ state= (state<<8) | buf[i];
+ if(state == 0x1B3 || state == 0x1B6)
+ return i-3;
+ }
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/parser.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/parser.h
new file mode 100644
index 000000000..200b13be5
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/parser.h
@@ -0,0 +1,71 @@
+/*
+ * AVCodecParser prototypes and definitions
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_PARSER_H
+#define AVCODEC_PARSER_H
+
+#include "avcodec.h"
+
+typedef struct ParseContext{
+ uint8_t *buffer;
+ int index;
+ int last_index;
+ unsigned int buffer_size;
+ uint32_t state; ///< contains the last few bytes in MSB order
+ int frame_start_found;
+ int overread; ///< the number of bytes which where irreversibly read from the next frame
+ int overread_index; ///< the index into ParseContext.buffer of the overread bytes
+ uint64_t state64; ///< contains the last 8 bytes in MSB order
+ int64_t rtStart; /* ffdshow custom code */
+} ParseContext;
+
+struct MpegEncContext;
+
+typedef struct ParseContext1{
+ ParseContext pc;
+/* XXX/FIXME PC1 vs. PC */
+ /* MPEG-2-specific */
+ AVRational frame_rate;
+ int progressive_sequence;
+ int width, height;
+
+ /* XXX: suppress that, needed by MPEG-4 */
+ struct MpegEncContext *enc;
+ int first_picture;
+} ParseContext1;
+
+#define END_NOT_FOUND (-100)
+
+int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size);
+int ff_mpeg4video_split(AVCodecContext *avctx, const uint8_t *buf,
+ int buf_size);
+void ff_parse_close(AVCodecParserContext *s);
+void ff_parse1_close(AVCodecParserContext *s);
+
+/**
+ * Fetches timestamps for a specific byte within the current access unit.
+ * @param off byte position within the access unit
+ * @param remove Found timestamps will be removed if set to 1, kept if set to 0.
+ */
+void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove);
+
+#endif /* AVCODEC_PARSER_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/put_bits.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/put_bits.h
new file mode 100644
index 000000000..c02027857
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/put_bits.h
@@ -0,0 +1,343 @@
+/*
+ * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/put_bits.h
+ * bitstream writer API
+ */
+
+#ifndef AVCODEC_PUT_BITS_H
+#define AVCODEC_PUT_BITS_H
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <assert.h>
+#include "libavutil/bswap.h"
+#include "libavutil/common.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/log.h"
+#include "mathops.h"
+
+//#define ALT_BITSTREAM_WRITER
+//#define ALIGNED_BITSTREAM_WRITER
+
+/* buf and buf_end must be present and used by every alternative writer. */
+typedef struct PutBitContext {
+#ifdef ALT_BITSTREAM_WRITER
+ uint8_t *buf, *buf_end;
+ int index;
+#else
+ uint32_t bit_buf;
+ int bit_left;
+ uint8_t *buf, *buf_ptr, *buf_end;
+#endif
+ int size_in_bits;
+} PutBitContext;
+
+/**
+ * Initializes the PutBitContext s.
+ *
+ * @param buffer the buffer where to put bits
+ * @param buffer_size the size in bytes of buffer
+ */
+static inline void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
+{
+ if(buffer_size < 0) {
+ buffer_size = 0;
+ buffer = NULL;
+ }
+
+ s->size_in_bits= 8*buffer_size;
+ s->buf = buffer;
+ s->buf_end = s->buf + buffer_size;
+#ifdef ALT_BITSTREAM_WRITER
+ s->index=0;
+ ((uint32_t*)(s->buf))[0]=0;
+// memset(buffer, 0, buffer_size);
+#else
+ s->buf_ptr = s->buf;
+ s->bit_left=32;
+ s->bit_buf=0;
+#endif
+}
+
+/**
+ * Returns the total number of bits written to the bitstream.
+ */
+static inline int put_bits_count(PutBitContext *s)
+{
+#ifdef ALT_BITSTREAM_WRITER
+ return s->index;
+#else
+ return (s->buf_ptr - s->buf) * 8 + 32 - s->bit_left;
+#endif
+}
+
+/**
+ * Pads the end of the output stream with zeros.
+ */
+static inline void flush_put_bits(PutBitContext *s)
+{
+#ifdef ALT_BITSTREAM_WRITER
+ align_put_bits(s);
+#else
+#ifndef BITSTREAM_WRITER_LE
+ s->bit_buf<<= s->bit_left;
+#endif
+ while (s->bit_left < 32) {
+ /* XXX: should test end of buffer */
+#ifdef BITSTREAM_WRITER_LE
+ *s->buf_ptr++=s->bit_buf;
+ s->bit_buf>>=8;
+#else
+ *s->buf_ptr++=s->bit_buf >> 24;
+ s->bit_buf<<=8;
+#endif
+ s->bit_left+=8;
+ }
+ s->bit_left=32;
+ s->bit_buf=0;
+#endif
+}
+
+#if defined(ALT_BITSTREAM_WRITER) || defined(BITSTREAM_WRITER_LE)
+#define align_put_bits align_put_bits_unsupported_here
+#define ff_put_string ff_put_string_unsupported_here
+#define ff_copy_bits ff_copy_bits_unsupported_here
+#else
+/**
+ * Pads the bitstream with zeros up to the next byte boundary.
+ */
+void align_put_bits(PutBitContext *s);
+
+/**
+ * Puts the string string in the bitstream.
+ *
+ * @param terminate_string 0-terminates the written string if value is 1
+ */
+void ff_put_string(PutBitContext *pb, const char *string, int terminate_string);
+
+/**
+ * Copies the content of src to the bitstream.
+ *
+ * @param length the number of bits of src to copy
+ */
+void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length);
+#endif
+
+/**
+ * Writes up to 31 bits into a bitstream.
+ * Use put_bits32 to write 32 bits.
+ */
+static inline void put_bits(PutBitContext *s, int n, unsigned int value)
+#ifndef ALT_BITSTREAM_WRITER
+{
+ unsigned int bit_buf;
+ int bit_left;
+
+ // printf("put_bits=%d %x\n", n, value);
+ assert(n <= 31 && value < (1U << n));
+
+ bit_buf = s->bit_buf;
+ bit_left = s->bit_left;
+
+ // printf("n=%d value=%x cnt=%d buf=%x\n", n, value, bit_cnt, bit_buf);
+ /* XXX: optimize */
+#ifdef BITSTREAM_WRITER_LE
+ bit_buf |= value << (32 - bit_left);
+ if (n >= bit_left) {
+#if !HAVE_FAST_UNALIGNED
+ if (3 & (intptr_t) s->buf_ptr) {
+ AV_WL32(s->buf_ptr, bit_buf);
+ } else
+#endif
+ *(uint32_t *)s->buf_ptr = le2me_32(bit_buf);
+ s->buf_ptr+=4;
+ bit_buf = (bit_left==32)?0:value >> bit_left;
+ bit_left+=32;
+ }
+ bit_left-=n;
+#else
+ if (n < bit_left) {
+ bit_buf = (bit_buf<<n) | value;
+ bit_left-=n;
+ } else {
+ bit_buf<<=bit_left;
+ bit_buf |= value >> (n - bit_left);
+#if !HAVE_FAST_UNALIGNED
+ if (3 & (intptr_t) s->buf_ptr) {
+ AV_WB32(s->buf_ptr, bit_buf);
+ } else
+#endif
+ *(uint32_t *)s->buf_ptr = be2me_32(bit_buf);
+ //printf("bitbuf = %08x\n", bit_buf);
+ s->buf_ptr+=4;
+ bit_left+=32 - n;
+ bit_buf = value;
+ }
+#endif
+
+ s->bit_buf = bit_buf;
+ s->bit_left = bit_left;
+}
+#else /* ALT_BITSTREAM_WRITER defined */
+{
+# ifdef ALIGNED_BITSTREAM_WRITER
+# if ARCH_X86
+ __asm__ volatile(
+ "movl %0, %%ecx \n\t"
+ "xorl %%eax, %%eax \n\t"
+ "shrdl %%cl, %1, %%eax \n\t"
+ "shrl %%cl, %1 \n\t"
+ "movl %0, %%ecx \n\t"
+ "shrl $3, %%ecx \n\t"
+ "andl $0xFFFFFFFC, %%ecx \n\t"
+ "bswapl %1 \n\t"
+ "orl %1, (%2, %%ecx) \n\t"
+ "bswapl %%eax \n\t"
+ "addl %3, %0 \n\t"
+ "movl %%eax, 4(%2, %%ecx) \n\t"
+ : "=&r" (s->index), "=&r" (value)
+ : "r" (s->buf), "r" (n), "0" (s->index), "1" (value<<(-n))
+ : "%eax", "%ecx"
+ );
+# else
+ int index= s->index;
+ uint32_t *ptr= ((uint32_t *)s->buf)+(index>>5);
+
+ value<<= 32-n;
+
+ ptr[0] |= be2me_32(value>>(index&31));
+ ptr[1] = be2me_32(value<<(32-(index&31)));
+//if(n>24) printf("%d %d\n", n, value);
+ index+= n;
+ s->index= index;
+# endif
+# else //ALIGNED_BITSTREAM_WRITER
+# if ARCH_X86
+ __asm__ volatile(
+ "movl $7, %%ecx \n\t"
+ "andl %0, %%ecx \n\t"
+ "addl %3, %%ecx \n\t"
+ "negl %%ecx \n\t"
+ "shll %%cl, %1 \n\t"
+ "bswapl %1 \n\t"
+ "movl %0, %%ecx \n\t"
+ "shrl $3, %%ecx \n\t"
+ "orl %1, (%%ecx, %2) \n\t"
+ "addl %3, %0 \n\t"
+ "movl $0, 4(%%ecx, %2) \n\t"
+ : "=&r" (s->index), "=&r" (value)
+ : "r" (s->buf), "r" (n), "0" (s->index), "1" (value)
+ : "%ecx"
+ );
+# else
+ int index= s->index;
+ uint32_t *ptr= (uint32_t*)(((uint8_t *)s->buf)+(index>>3));
+
+ ptr[0] |= be2me_32(value<<(32-n-(index&7) ));
+ ptr[1] = 0;
+//if(n>24) printf("%d %d\n", n, value);
+ index+= n;
+ s->index= index;
+# endif
+# endif //!ALIGNED_BITSTREAM_WRITER
+}
+#endif
+
+static inline void put_sbits(PutBitContext *pb, int n, int32_t value)
+{
+ assert(n >= 0 && n <= 31);
+
+ put_bits(pb, n, value & ((1<<n)-1));
+}
+
+/**
+ * Writes exactly 32 bits into a bitstream.
+ */
+static void av_unused put_bits32(PutBitContext *s, uint32_t value)
+{
+ int lo = value & 0xffff;
+ int hi = value >> 16;
+#ifdef BITSTREAM_WRITER_LE
+ put_bits(s, 16, lo);
+ put_bits(s, 16, hi);
+#else
+ put_bits(s, 16, hi);
+ put_bits(s, 16, lo);
+#endif
+}
+
+/**
+ * Returns the pointer to the byte where the bitstream writer will put
+ * the next bit.
+ */
+static inline uint8_t* put_bits_ptr(PutBitContext *s)
+{
+#ifdef ALT_BITSTREAM_WRITER
+ return s->buf + (s->index>>3);
+#else
+ return s->buf_ptr;
+#endif
+}
+
+/**
+ * Skips the given number of bytes.
+ * PutBitContext must be flushed & aligned to a byte boundary before calling this.
+ */
+static inline void skip_put_bytes(PutBitContext *s, int n)
+{
+ assert((put_bits_count(s)&7)==0);
+#ifdef ALT_BITSTREAM_WRITER
+ FIXME may need some cleaning of the buffer
+ s->index += n<<3;
+#else
+ assert(s->bit_left==32);
+ s->buf_ptr += n;
+#endif
+}
+
+/**
+ * Skips the given number of bits.
+ * Must only be used if the actual values in the bitstream do not matter.
+ * If n is 0 the behavior is undefined.
+ */
+static inline void skip_put_bits(PutBitContext *s, int n)
+{
+#ifdef ALT_BITSTREAM_WRITER
+ s->index += n;
+#else
+ s->bit_left -= n;
+ s->buf_ptr-= 4*(s->bit_left>>5);
+ s->bit_left &= 31;
+#endif
+}
+
+/**
+ * Changes the end of the buffer.
+ *
+ * @param size the new size in bytes of the buffer where to put bits
+ */
+static inline void set_put_bits_buffer_size(PutBitContext *s, int size)
+{
+ s->buf_end= s->buf + size;
+}
+
+#endif /* AVCODEC_PUT_BITS_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rangecoder.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rangecoder.h
new file mode 100644
index 000000000..f5ce3684f
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rangecoder.h
@@ -0,0 +1,141 @@
+/*
+ * Range coder
+ * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file rangecoder.h
+ * Range coder.
+ */
+
+#ifndef AVCODEC_RANGECODER_H
+#define AVCODEC_RANGECODER_H
+
+#include <stdint.h>
+#include <assert.h>
+#include "libavutil/common.h"
+
+typedef struct RangeCoder{
+ int low;
+ int range;
+ int outstanding_count;
+ int outstanding_byte;
+ uint8_t zero_state[256];
+ uint8_t one_state[256];
+ uint8_t *bytestream_start;
+ uint8_t *bytestream;
+ uint8_t *bytestream_end;
+}RangeCoder;
+
+void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size);
+void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size);
+int ff_rac_terminate(RangeCoder *c);
+void ff_build_rac_states(RangeCoder *c, int factor, int max_p);
+
+static inline void renorm_encoder(RangeCoder *c){
+ //FIXME optimize
+ while(c->range < 0x100){
+ if(c->outstanding_byte < 0){
+ c->outstanding_byte= c->low>>8;
+ }else if(c->low <= 0xFF00){
+ *c->bytestream++ = c->outstanding_byte;
+ for(;c->outstanding_count; c->outstanding_count--)
+ *c->bytestream++ = 0xFF;
+ c->outstanding_byte= c->low>>8;
+ }else if(c->low >= 0x10000){
+ *c->bytestream++ = c->outstanding_byte + 1;
+ for(;c->outstanding_count; c->outstanding_count--)
+ *c->bytestream++ = 0x00;
+ c->outstanding_byte= (c->low>>8) & 0xFF;
+ }else{
+ c->outstanding_count++;
+ }
+
+ c->low = (c->low & 0xFF)<<8;
+ c->range <<= 8;
+ }
+}
+
+static inline int get_rac_count(RangeCoder *c){
+ int x= c->bytestream - c->bytestream_start + c->outstanding_count;
+ if(c->outstanding_byte >= 0)
+ x++;
+ return 8*x - av_log2(c->range);
+}
+
+static inline void put_rac(RangeCoder *c, uint8_t * const state, int bit){
+ int range1= (c->range * (*state)) >> 8;
+
+ assert(*state);
+ assert(range1 < c->range);
+ assert(range1 > 0);
+ if(!bit){
+ c->range -= range1;
+ *state= c->zero_state[*state];
+ }else{
+ c->low += c->range - range1;
+ c->range = range1;
+ *state= c->one_state[*state];
+ }
+
+ renorm_encoder(c);
+}
+
+static inline void refill(RangeCoder *c){
+ if(c->range < 0x100){
+ c->range <<= 8;
+ c->low <<= 8;
+ if(c->bytestream < c->bytestream_end)
+ c->low+= c->bytestream[0];
+ c->bytestream++;
+ }
+}
+
+static inline int get_rac(RangeCoder *c, uint8_t * const state){
+ int range1= (c->range * (*state)) >> 8;
+ int av_unused one_mask;
+
+ c->range -= range1;
+#if 1
+ if(c->low < c->range){
+ *state= c->zero_state[*state];
+ refill(c);
+ return 0;
+ }else{
+ c->low -= c->range;
+ *state= c->one_state[*state];
+ c->range = range1;
+ refill(c);
+ return 1;
+ }
+#else
+ one_mask= (c->range - c->low-1)>>31;
+
+ c->low -= c->range & one_mask;
+ c->range += (range1 - c->range) & one_mask;
+
+ *state= c->zero_state[(*state) + (256&one_mask)];
+
+ refill(c);
+
+ return one_mask&1;
+#endif
+}
+
+#endif /* AVCODEC_RANGECODER_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ratecontrol.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ratecontrol.h
new file mode 100644
index 000000000..9a070e997
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/ratecontrol.h
@@ -0,0 +1,106 @@
+/*
+ * Ratecontrol
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_RATECONTROL_H
+#define AVCODEC_RATECONTROL_H
+
+/**
+ * @file ratecontrol.h
+ * ratecontrol header.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include "eval.h"
+
+typedef struct Predictor{
+ double coeff;
+ double count;
+ double decay;
+} Predictor;
+
+typedef struct RateControlEntry{
+ int rcOverrideIndex1,rcOverrideQscale; //Milan Cutka - don't obey qmin/qmax in RcOverride sections
+ int pict_type;
+ float qscale;
+ int mv_bits;
+ int i_tex_bits;
+ int p_tex_bits;
+ int misc_bits;
+ int header_bits;
+ uint64_t expected_bits;
+ int new_pict_type;
+ float new_qscale;
+ int mc_mb_var_sum;
+ int mb_var_sum;
+ int i_count;
+ int skip_count;
+ int f_code;
+ int b_code;
+}RateControlEntry;
+
+/**
+ * rate control context.
+ */
+typedef struct RateControlContext{
+ FILE *stats_file;
+ int num_entries; ///< number of RateControlEntries
+ RateControlEntry *entry;
+ double buffer_index; ///< amount of bits in the video/audio buffer
+ Predictor pred[5];
+ double short_term_qsum; ///< sum of recent qscales
+ double short_term_qcount; ///< count of recent qscales
+ double pass1_rc_eq_output_sum;///< sum of the output of the rc equation, this is used for normalization
+ double pass1_wanted_bits; ///< bits which should have been outputed by the pass1 code (including complexity init)
+ double last_qscale;
+ double last_qscale_for[5]; ///< last qscale for a specific pict type, used for max_diff & ipb factor stuff
+ int last_mc_mb_var_sum;
+ int last_mb_var_sum;
+ uint64_t i_cplx_sum[5];
+ uint64_t p_cplx_sum[5];
+ uint64_t mv_bits_sum[5];
+ uint64_t qscale_sum[5];
+ int frame_count[5];
+ int last_non_b_pict_type;
+
+ void *non_lavc_opaque; ///< context for non lavc rc code (for example xvid)
+ float dry_run_qscale; ///< for xvid rc
+ int last_picture_number; ///< for xvid rc
+ AVEvalExpr * rc_eq_eval;
+}RateControlContext;
+
+struct MpegEncContext;
+
+/* rate control */
+int ff_rate_control_init(struct MpegEncContext *s);
+float ff_rate_estimate_qscale(struct MpegEncContext *s, int dry_run);
+void ff_write_pass1_stats(struct MpegEncContext *s);
+void ff_rate_control_uninit(struct MpegEncContext *s);
+int ff_vbv_update(struct MpegEncContext *s, int frame_size);
+void ff_get_2pass_fcode(struct MpegEncContext *s);
+
+int ff_xvid_rate_control_init(struct MpegEncContext *s);
+void ff_xvid_rate_control_uninit(struct MpegEncContext *s);
+float ff_xvid_rate_estimate_qscale(struct MpegEncContext *s, int dry_run);
+
+#endif /* AVCODEC_RATECONTROL_H */
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rectangle.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rectangle.h
new file mode 100644
index 000000000..f0574261e
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rectangle.h
@@ -0,0 +1,124 @@
+/*
+ * rectangle filling function
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file rectangle.h
+ * useful rectangle filling function
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVCODEC_RECTANGLE_H
+#define AVCODEC_RECTANGLE_H
+
+#include <assert.h>
+#include "config.h"
+#include "libavutil/common.h"
+#include "dsputil.h"
+
+/**
+ * fill a rectangle.
+ * @param h height of the rectangle, should be a constant
+ * @param w width of the rectangle, should be a constant
+ * @param size the size of val (1 or 4), should be a constant
+ */
+static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){
+ uint8_t *p= (uint8_t*)vp;
+ assert(size==1 || size==4);
+ assert(w<=4);
+
+ w *= size;
+ stride *= size;
+
+ assert((((long)vp)&(FFMIN(w, STRIDE_ALIGN)-1)) == 0);
+ assert((stride&(w-1))==0);
+ if(w==2){
+ const uint16_t v= size==4 ? val : val*0x0101;
+ *(uint16_t*)(p + 0*stride)= v;
+ if(h==1) return;
+ *(uint16_t*)(p + 1*stride)= v;
+ if(h==2) return;
+ *(uint16_t*)(p + 2*stride)= v;
+ *(uint16_t*)(p + 3*stride)= v;
+ }else if(w==4){
+ const uint32_t v= size==4 ? val : val*0x01010101;
+ *(uint32_t*)(p + 0*stride)= v;
+ if(h==1) return;
+ *(uint32_t*)(p + 1*stride)= v;
+ if(h==2) return;
+ *(uint32_t*)(p + 2*stride)= v;
+ *(uint32_t*)(p + 3*stride)= v;
+ }else if(w==8){
+ //gcc can't optimize 64bit math on x86_32
+#if HAVE_FAST_64BIT
+ const uint64_t v= val*0x0100000001ULL;
+ *(uint64_t*)(p + 0*stride)= v;
+ if(h==1) return;
+ *(uint64_t*)(p + 1*stride)= v;
+ if(h==2) return;
+ *(uint64_t*)(p + 2*stride)= v;
+ *(uint64_t*)(p + 3*stride)= v;
+ }else if(w==16){
+ const uint64_t v= val*0x0100000001ULL;
+ *(uint64_t*)(p + 0+0*stride)= v;
+ *(uint64_t*)(p + 8+0*stride)= v;
+ *(uint64_t*)(p + 0+1*stride)= v;
+ *(uint64_t*)(p + 8+1*stride)= v;
+ if(h==2) return;
+ *(uint64_t*)(p + 0+2*stride)= v;
+ *(uint64_t*)(p + 8+2*stride)= v;
+ *(uint64_t*)(p + 0+3*stride)= v;
+ *(uint64_t*)(p + 8+3*stride)= v;
+#else
+ *(uint32_t*)(p + 0+0*stride)= val;
+ *(uint32_t*)(p + 4+0*stride)= val;
+ if(h==1) return;
+ *(uint32_t*)(p + 0+1*stride)= val;
+ *(uint32_t*)(p + 4+1*stride)= val;
+ if(h==2) return;
+ *(uint32_t*)(p + 0+2*stride)= val;
+ *(uint32_t*)(p + 4+2*stride)= val;
+ *(uint32_t*)(p + 0+3*stride)= val;
+ *(uint32_t*)(p + 4+3*stride)= val;
+ }else if(w==16){
+ *(uint32_t*)(p + 0+0*stride)= val;
+ *(uint32_t*)(p + 4+0*stride)= val;
+ *(uint32_t*)(p + 8+0*stride)= val;
+ *(uint32_t*)(p +12+0*stride)= val;
+ *(uint32_t*)(p + 0+1*stride)= val;
+ *(uint32_t*)(p + 4+1*stride)= val;
+ *(uint32_t*)(p + 8+1*stride)= val;
+ *(uint32_t*)(p +12+1*stride)= val;
+ if(h==2) return;
+ *(uint32_t*)(p + 0+2*stride)= val;
+ *(uint32_t*)(p + 4+2*stride)= val;
+ *(uint32_t*)(p + 8+2*stride)= val;
+ *(uint32_t*)(p +12+2*stride)= val;
+ *(uint32_t*)(p + 0+3*stride)= val;
+ *(uint32_t*)(p + 4+3*stride)= val;
+ *(uint32_t*)(p + 8+3*stride)= val;
+ *(uint32_t*)(p +12+3*stride)= val;
+#endif
+ }else
+ assert(0);
+ assert(h==4);
+}
+
+#endif /* AVCODEC_RECTANGLE_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rl.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rl.h
new file mode 100644
index 000000000..773d96e49
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rl.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2000-2002 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/rl.h
+ * rl header.
+ */
+
+#ifndef AVCODEC_RL_H
+#define AVCODEC_RL_H
+
+#include <stdint.h>
+#include "get_bits.h"
+
+/* run length table */
+#define MAX_RUN 64
+#define MAX_LEVEL 64
+
+/** RLTable. */
+typedef struct RLTable {
+ int n; ///< number of entries of table_vlc minus 1
+ int last; ///< number of values for last = 0
+ const uint16_t (*table_vlc)[2];
+ const int8_t *table_run;
+ const int8_t *table_level;
+ uint8_t *index_run[2]; ///< encoding only
+ int8_t *max_level[2]; ///< encoding & decoding
+ int8_t *max_run[2]; ///< encoding & decoding
+ VLC vlc; ///< decoding only deprecated FIXME remove
+ RL_VLC_ELEM *rl_vlc[32]; ///< decoding only
+} RLTable;
+
+/**
+ *
+ * @param static_store static uint8_t array[2][2*MAX_RUN + MAX_LEVEL + 3] which will hold
+ * the level and run tables, if this is NULL av_malloc() will be used
+ */
+void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3]);
+void init_vlc_rl(RLTable *rl);
+
+#define INIT_VLC_RL(rl, static_size)\
+{\
+ int q;\
+ static RL_VLC_ELEM rl_vlc_table[32][static_size];\
+ INIT_VLC_STATIC(&rl.vlc, 9, rl.n + 1,\
+ &rl.table_vlc[0][1], 4, 2,\
+ &rl.table_vlc[0][0], 4, 2, static_size);\
+\
+ if(!rl.rl_vlc[0]){\
+ for(q=0; q<32; q++)\
+ rl.rl_vlc[q]= rl_vlc_table[q];\
+\
+ init_vlc_rl(&rl);\
+ }\
+}
+
+static inline int get_rl_index(const RLTable *rl, int last, int run, int level)
+{
+ int index;
+ index = rl->index_run[last][run];
+ if (index >= rl->n)
+ return rl->n;
+ if (level > rl->max_level[last][run])
+ return rl->n;
+ return index + level - 1;
+}
+
+#endif /* AVCODEC_RL_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv10.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv10.c
new file mode 100644
index 000000000..11397049b
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv10.c
@@ -0,0 +1,739 @@
+/*
+ * RV10/RV20 decoder
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/rv10.c
+ * RV10/RV20 decoder
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+#include "h263.h"
+
+//#define DEBUG
+
+#define DC_VLC_BITS 14 //FIXME find a better solution
+
+static const uint16_t rv_lum_code[256] =
+{
+ 0x3e7f, 0x0f00, 0x0f01, 0x0f02, 0x0f03, 0x0f04, 0x0f05, 0x0f06,
+ 0x0f07, 0x0f08, 0x0f09, 0x0f0a, 0x0f0b, 0x0f0c, 0x0f0d, 0x0f0e,
+ 0x0f0f, 0x0f10, 0x0f11, 0x0f12, 0x0f13, 0x0f14, 0x0f15, 0x0f16,
+ 0x0f17, 0x0f18, 0x0f19, 0x0f1a, 0x0f1b, 0x0f1c, 0x0f1d, 0x0f1e,
+ 0x0f1f, 0x0f20, 0x0f21, 0x0f22, 0x0f23, 0x0f24, 0x0f25, 0x0f26,
+ 0x0f27, 0x0f28, 0x0f29, 0x0f2a, 0x0f2b, 0x0f2c, 0x0f2d, 0x0f2e,
+ 0x0f2f, 0x0f30, 0x0f31, 0x0f32, 0x0f33, 0x0f34, 0x0f35, 0x0f36,
+ 0x0f37, 0x0f38, 0x0f39, 0x0f3a, 0x0f3b, 0x0f3c, 0x0f3d, 0x0f3e,
+ 0x0f3f, 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0386,
+ 0x0387, 0x0388, 0x0389, 0x038a, 0x038b, 0x038c, 0x038d, 0x038e,
+ 0x038f, 0x0390, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396,
+ 0x0397, 0x0398, 0x0399, 0x039a, 0x039b, 0x039c, 0x039d, 0x039e,
+ 0x039f, 0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6,
+ 0x00c7, 0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce,
+ 0x00cf, 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056,
+ 0x0057, 0x0020, 0x0021, 0x0022, 0x0023, 0x000c, 0x000d, 0x0004,
+ 0x0000, 0x0005, 0x000e, 0x000f, 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
+ 0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7,
+ 0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
+ 0x03a0, 0x03a1, 0x03a2, 0x03a3, 0x03a4, 0x03a5, 0x03a6, 0x03a7,
+ 0x03a8, 0x03a9, 0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03ae, 0x03af,
+ 0x03b0, 0x03b1, 0x03b2, 0x03b3, 0x03b4, 0x03b5, 0x03b6, 0x03b7,
+ 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bc, 0x03bd, 0x03be, 0x03bf,
+ 0x0f40, 0x0f41, 0x0f42, 0x0f43, 0x0f44, 0x0f45, 0x0f46, 0x0f47,
+ 0x0f48, 0x0f49, 0x0f4a, 0x0f4b, 0x0f4c, 0x0f4d, 0x0f4e, 0x0f4f,
+ 0x0f50, 0x0f51, 0x0f52, 0x0f53, 0x0f54, 0x0f55, 0x0f56, 0x0f57,
+ 0x0f58, 0x0f59, 0x0f5a, 0x0f5b, 0x0f5c, 0x0f5d, 0x0f5e, 0x0f5f,
+ 0x0f60, 0x0f61, 0x0f62, 0x0f63, 0x0f64, 0x0f65, 0x0f66, 0x0f67,
+ 0x0f68, 0x0f69, 0x0f6a, 0x0f6b, 0x0f6c, 0x0f6d, 0x0f6e, 0x0f6f,
+ 0x0f70, 0x0f71, 0x0f72, 0x0f73, 0x0f74, 0x0f75, 0x0f76, 0x0f77,
+ 0x0f78, 0x0f79, 0x0f7a, 0x0f7b, 0x0f7c, 0x0f7d, 0x0f7e, 0x0f7f,
+};
+
+static const uint8_t rv_lum_bits[256] =
+{
+ 14, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 7, 7, 7, 7, 7, 7, 7,
+ 7, 6, 6, 6, 6, 5, 5, 4,
+ 2, 4, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+};
+
+static const uint16_t rv_chrom_code[256] =
+{
+ 0xfe7f, 0x3f00, 0x3f01, 0x3f02, 0x3f03, 0x3f04, 0x3f05, 0x3f06,
+ 0x3f07, 0x3f08, 0x3f09, 0x3f0a, 0x3f0b, 0x3f0c, 0x3f0d, 0x3f0e,
+ 0x3f0f, 0x3f10, 0x3f11, 0x3f12, 0x3f13, 0x3f14, 0x3f15, 0x3f16,
+ 0x3f17, 0x3f18, 0x3f19, 0x3f1a, 0x3f1b, 0x3f1c, 0x3f1d, 0x3f1e,
+ 0x3f1f, 0x3f20, 0x3f21, 0x3f22, 0x3f23, 0x3f24, 0x3f25, 0x3f26,
+ 0x3f27, 0x3f28, 0x3f29, 0x3f2a, 0x3f2b, 0x3f2c, 0x3f2d, 0x3f2e,
+ 0x3f2f, 0x3f30, 0x3f31, 0x3f32, 0x3f33, 0x3f34, 0x3f35, 0x3f36,
+ 0x3f37, 0x3f38, 0x3f39, 0x3f3a, 0x3f3b, 0x3f3c, 0x3f3d, 0x3f3e,
+ 0x3f3f, 0x0f80, 0x0f81, 0x0f82, 0x0f83, 0x0f84, 0x0f85, 0x0f86,
+ 0x0f87, 0x0f88, 0x0f89, 0x0f8a, 0x0f8b, 0x0f8c, 0x0f8d, 0x0f8e,
+ 0x0f8f, 0x0f90, 0x0f91, 0x0f92, 0x0f93, 0x0f94, 0x0f95, 0x0f96,
+ 0x0f97, 0x0f98, 0x0f99, 0x0f9a, 0x0f9b, 0x0f9c, 0x0f9d, 0x0f9e,
+ 0x0f9f, 0x03c0, 0x03c1, 0x03c2, 0x03c3, 0x03c4, 0x03c5, 0x03c6,
+ 0x03c7, 0x03c8, 0x03c9, 0x03ca, 0x03cb, 0x03cc, 0x03cd, 0x03ce,
+ 0x03cf, 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6,
+ 0x00e7, 0x0030, 0x0031, 0x0032, 0x0033, 0x0008, 0x0009, 0x0002,
+ 0x0000, 0x0003, 0x000a, 0x000b, 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
+ 0x03d0, 0x03d1, 0x03d2, 0x03d3, 0x03d4, 0x03d5, 0x03d6, 0x03d7,
+ 0x03d8, 0x03d9, 0x03da, 0x03db, 0x03dc, 0x03dd, 0x03de, 0x03df,
+ 0x0fa0, 0x0fa1, 0x0fa2, 0x0fa3, 0x0fa4, 0x0fa5, 0x0fa6, 0x0fa7,
+ 0x0fa8, 0x0fa9, 0x0faa, 0x0fab, 0x0fac, 0x0fad, 0x0fae, 0x0faf,
+ 0x0fb0, 0x0fb1, 0x0fb2, 0x0fb3, 0x0fb4, 0x0fb5, 0x0fb6, 0x0fb7,
+ 0x0fb8, 0x0fb9, 0x0fba, 0x0fbb, 0x0fbc, 0x0fbd, 0x0fbe, 0x0fbf,
+ 0x3f40, 0x3f41, 0x3f42, 0x3f43, 0x3f44, 0x3f45, 0x3f46, 0x3f47,
+ 0x3f48, 0x3f49, 0x3f4a, 0x3f4b, 0x3f4c, 0x3f4d, 0x3f4e, 0x3f4f,
+ 0x3f50, 0x3f51, 0x3f52, 0x3f53, 0x3f54, 0x3f55, 0x3f56, 0x3f57,
+ 0x3f58, 0x3f59, 0x3f5a, 0x3f5b, 0x3f5c, 0x3f5d, 0x3f5e, 0x3f5f,
+ 0x3f60, 0x3f61, 0x3f62, 0x3f63, 0x3f64, 0x3f65, 0x3f66, 0x3f67,
+ 0x3f68, 0x3f69, 0x3f6a, 0x3f6b, 0x3f6c, 0x3f6d, 0x3f6e, 0x3f6f,
+ 0x3f70, 0x3f71, 0x3f72, 0x3f73, 0x3f74, 0x3f75, 0x3f76, 0x3f77,
+ 0x3f78, 0x3f79, 0x3f7a, 0x3f7b, 0x3f7c, 0x3f7d, 0x3f7e, 0x3f7f,
+};
+
+static const uint8_t rv_chrom_bits[256] =
+{
+ 16, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 8, 8, 8, 8, 8, 8, 8,
+ 8, 6, 6, 6, 6, 4, 4, 3,
+ 2, 3, 4, 4, 6, 6, 6, 6,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14,
+};
+
+static VLC rv_dc_lum, rv_dc_chrom;
+
+int rv_decode_dc(MpegEncContext *s, int n)
+{
+ int code;
+
+ if (n < 4) {
+ code = get_vlc2(&s->gb, rv_dc_lum.table, DC_VLC_BITS, 2);
+ if (code < 0) {
+ /* XXX: I don't understand why they use LONGER codes than
+ necessary. The following code would be completely useless
+ if they had thought about it !!! */
+ code = get_bits(&s->gb, 7);
+ if (code == 0x7c) {
+ code = (int8_t)(get_bits(&s->gb, 7) + 1);
+ } else if (code == 0x7d) {
+ code = -128 + get_bits(&s->gb, 7);
+ } else if (code == 0x7e) {
+ if (get_bits1(&s->gb) == 0)
+ code = (int8_t)(get_bits(&s->gb, 8) + 1);
+ else
+ code = (int8_t)(get_bits(&s->gb, 8));
+ } else if (code == 0x7f) {
+ skip_bits(&s->gb, 11);
+ code = 1;
+ }
+ } else {
+ code -= 128;
+ }
+ } else {
+ code = get_vlc2(&s->gb, rv_dc_chrom.table, DC_VLC_BITS, 2);
+ /* same remark */
+ if (code < 0) {
+ code = get_bits(&s->gb, 9);
+ if (code == 0x1fc) {
+ code = (int8_t)(get_bits(&s->gb, 7) + 1);
+ } else if (code == 0x1fd) {
+ code = -128 + get_bits(&s->gb, 7);
+ } else if (code == 0x1fe) {
+ skip_bits(&s->gb, 9);
+ code = 1;
+ } else {
+ av_log(s->avctx, AV_LOG_ERROR, "chroma dc error\n");
+ return 0xffff;
+ }
+ } else {
+ code -= 128;
+ }
+ }
+ return -code;
+}
+
+/* read RV 1.0 compatible frame header */
+static int rv10_decode_picture_header(MpegEncContext *s)
+{
+ int mb_count, pb_frame, marker, unk, mb_xy;
+
+ marker = get_bits1(&s->gb);
+
+ if (get_bits1(&s->gb))
+ s->pict_type = FF_P_TYPE;
+ else
+ s->pict_type = FF_I_TYPE;
+ if(!marker) av_log(s->avctx, AV_LOG_ERROR, "marker missing\n");
+ pb_frame = get_bits1(&s->gb);
+
+ dprintf(s->avctx, "pict_type=%d pb_frame=%d\n", s->pict_type, pb_frame);
+
+ if (pb_frame){
+ av_log(s->avctx, AV_LOG_ERROR, "pb frame not supported\n");
+ return -1;
+ }
+
+ s->qscale = get_bits(&s->gb, 5);
+ if(s->qscale==0){
+ av_log(s->avctx, AV_LOG_ERROR, "error, qscale:0\n");
+ return -1;
+ }
+
+ if (s->pict_type == FF_I_TYPE) {
+ if (s->rv10_version == 3) {
+ /* specific MPEG like DC coding not used */
+ s->last_dc[0] = get_bits(&s->gb, 8);
+ s->last_dc[1] = get_bits(&s->gb, 8);
+ s->last_dc[2] = get_bits(&s->gb, 8);
+ dprintf(s->avctx, "DC:%d %d %d\n", s->last_dc[0],
+ s->last_dc[1], s->last_dc[2]);
+ }
+ }
+ /* if multiple packets per frame are sent, the position at which
+ to display the macroblocks is coded here */
+
+ mb_xy= s->mb_x + s->mb_y*s->mb_width;
+ if(show_bits(&s->gb, 12)==0 || (mb_xy && mb_xy < s->mb_num)){
+ s->mb_x = get_bits(&s->gb, 6); /* mb_x */
+ s->mb_y = get_bits(&s->gb, 6); /* mb_y */
+ mb_count = get_bits(&s->gb, 12);
+ } else {
+ s->mb_x = 0;
+ s->mb_y = 0;
+ mb_count = s->mb_width * s->mb_height;
+ }
+ unk= get_bits(&s->gb, 3); /* ignored */
+ s->f_code = 1;
+ s->unrestricted_mv = 1;
+
+ return mb_count;
+}
+
+static int rv20_decode_picture_header(MpegEncContext *s)
+{
+ int seq, mb_pos, i;
+
+#if 0
+ GetBitContext gb= s->gb;
+ for(i=0; i<64; i++){
+ av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&gb));
+ if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " ");
+ }
+ av_log(s->avctx, AV_LOG_DEBUG, "\n");
+#endif
+#if 0
+ av_log(s->avctx, AV_LOG_DEBUG, "%3dx%03d/%02Xx%02X ", s->width, s->height, s->width/4, s->height/4);
+ for(i=0; i<s->avctx->extradata_size; i++){
+ av_log(s->avctx, AV_LOG_DEBUG, "%02X ", ((uint8_t*)s->avctx->extradata)[i]);
+ if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " ");
+ }
+ av_log(s->avctx, AV_LOG_DEBUG, "\n");
+#endif
+
+ if(s->avctx->sub_id == 0x30202002 || s->avctx->sub_id == 0x30203002){
+ if (get_bits(&s->gb, 3)){
+ av_log(s->avctx, AV_LOG_ERROR, "unknown triplet set\n");
+ return -1;
+ }
+ }
+
+ i= get_bits(&s->gb, 2);
+ switch(i){
+ case 0: s->pict_type= FF_I_TYPE; break;
+ case 1: s->pict_type= FF_I_TYPE; break; //hmm ...
+ case 2: s->pict_type= FF_P_TYPE; break;
+ case 3: s->pict_type= FF_B_TYPE; break;
+ default:
+ av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n");
+ return -1;
+ }
+
+ if(s->last_picture_ptr==NULL && s->pict_type==FF_B_TYPE){
+ av_log(s->avctx, AV_LOG_ERROR, "early B pix\n");
+ return -1;
+ }
+
+ if (get_bits1(&s->gb)){
+ av_log(s->avctx, AV_LOG_ERROR, "unknown bit set\n");
+ return -1;
+ }
+
+ s->qscale = get_bits(&s->gb, 5);
+ if(s->qscale==0){
+ av_log(s->avctx, AV_LOG_ERROR, "error, qscale:0\n");
+ return -1;
+ }
+ if(s->avctx->sub_id == 0x30203002){
+ if (get_bits1(&s->gb)){
+ av_log(s->avctx, AV_LOG_ERROR, "unknown bit2 set\n");
+ return -1;
+ }
+ }
+
+ if(s->avctx->has_b_frames){
+ int f, new_w, new_h;
+ int v= s->avctx->extradata_size >= 4 ? 7&((uint8_t*)s->avctx->extradata)[1] : 0;
+
+ if (get_bits1(&s->gb)){
+ av_log(s->avctx, AV_LOG_ERROR, "unknown bit3 set\n");
+ }
+ seq= get_bits(&s->gb, 13)<<2;
+
+ f= get_bits(&s->gb, av_log2(v)+1);
+
+ if(f){
+ new_w= 4*((uint8_t*)s->avctx->extradata)[6+2*f];
+ new_h= 4*((uint8_t*)s->avctx->extradata)[7+2*f];
+ }else{
+ new_w= s->orig_width ;
+ new_h= s->orig_height;
+ }
+ if(new_w != s->width || new_h != s->height){
+ av_log(s->avctx, AV_LOG_DEBUG, "attempting to change resolution to %dx%d\n", new_w, new_h);
+ if (avcodec_check_dimensions(s->avctx, new_w, new_h) < 0)
+ return -1;
+ MPV_common_end(s);
+ avcodec_set_dimensions(s->avctx, new_w, new_h);
+ s->width = new_w;
+ s->height = new_h;
+ if (MPV_common_init(s) < 0)
+ return -1;
+ }
+
+ if(s->avctx->debug & FF_DEBUG_PICT_INFO){
+ av_log(s->avctx, AV_LOG_DEBUG, "F %d/%d\n", f, v);
+ }
+ }else{
+ seq= get_bits(&s->gb, 8)*128;
+ }
+
+// if(s->avctx->sub_id <= 0x20201002){ //0x20201002 definitely needs this
+ mb_pos= ff_h263_decode_mba(s);
+/* }else{
+ mb_pos= get_bits(&s->gb, av_log2(s->mb_num-1)+1);
+ s->mb_x= mb_pos % s->mb_width;
+ s->mb_y= mb_pos / s->mb_width;
+ }*/
+//av_log(s->avctx, AV_LOG_DEBUG, "%d\n", seq);
+ seq |= s->time &~0x7FFF;
+ if(seq - s->time > 0x4000) seq -= 0x8000;
+ if(seq - s->time < -0x4000) seq += 0x8000;
+ if(seq != s->time){
+ if(s->pict_type!=FF_B_TYPE){
+ s->time= seq;
+ s->pp_time= s->time - s->last_non_b_time;
+ s->last_non_b_time= s->time;
+ }else{
+ s->time= seq;
+ s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
+ if(s->pp_time <=s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time<=0){
+ av_log(s->avctx, AV_LOG_DEBUG, "messed up order, possible from seeking? skipping current b frame\n");
+ return FRAME_SKIPPED;
+ }
+ ff_mpeg4_init_direct_mv(s);
+ }
+ }
+// printf("%d %d %d %d %d\n", seq, (int)s->time, (int)s->last_non_b_time, s->pp_time, s->pb_time);
+/*for(i=0; i<32; i++){
+ av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb));
+}
+av_log(s->avctx, AV_LOG_DEBUG, "\n");*/
+ s->no_rounding= get_bits1(&s->gb);
+
+ s->f_code = 1;
+ s->unrestricted_mv = 1;
+ s->h263_aic= s->pict_type == FF_I_TYPE;
+// s->alt_inter_vlc=1;
+// s->obmc=1;
+// s->umvplus=1;
+ s->modified_quant=1;
+ if(!s->avctx->lowres)
+ s->loop_filter=1;
+
+ if(s->avctx->debug & FF_DEBUG_PICT_INFO){
+ av_log(s->avctx, AV_LOG_INFO, "num:%5d x:%2d y:%2d type:%d qscale:%2d rnd:%d\n",
+ seq, s->mb_x, s->mb_y, s->pict_type, s->qscale, s->no_rounding);
+ }
+
+ assert(s->pict_type != FF_B_TYPE || !s->low_delay);
+
+ return s->mb_width*s->mb_height - mb_pos;
+}
+
+static av_cold int rv10_decode_init(AVCodecContext *avctx)
+{
+ MpegEncContext *s = avctx->priv_data;
+ static int done=0;
+
+ if (avctx->extradata_size < 8) {
+ av_log(avctx, AV_LOG_ERROR, "Extradata is too small.\n");
+ return -1;
+ }
+
+ MPV_decode_defaults(s);
+
+ s->avctx= avctx;
+ s->out_format = FMT_H263;
+ s->codec_id= avctx->codec_id;
+
+ s->orig_width = s->width = avctx->coded_width;
+ s->orig_height= s->height = avctx->coded_height;
+
+ s->h263_long_vectors= ((uint8_t*)avctx->extradata)[3] & 1;
+ avctx->sub_id= AV_RB32((uint8_t*)avctx->extradata + 4);
+
+ if (avctx->sub_id == 0x10000000) {
+ s->rv10_version= 0;
+ s->low_delay=1;
+ } else if (avctx->sub_id == 0x10001000) {
+ s->rv10_version= 3;
+ s->low_delay=1;
+ } else if (avctx->sub_id == 0x10002000) {
+ s->rv10_version= 3;
+ s->low_delay=1;
+ s->obmc=1;
+ } else if (avctx->sub_id == 0x10003000) {
+ s->rv10_version= 3;
+ s->low_delay=1;
+ } else if (avctx->sub_id == 0x10003001) {
+ s->rv10_version= 3;
+ s->low_delay=1;
+ } else if ( avctx->sub_id == 0x20001000
+ || (avctx->sub_id >= 0x20100000 && avctx->sub_id < 0x201a0000)) {
+ s->low_delay=1;
+ } else if ( avctx->sub_id == 0x30202002
+ || avctx->sub_id == 0x30203002
+ || (avctx->sub_id >= 0x20200002 && avctx->sub_id < 0x20300000)) {
+ s->low_delay=0;
+ s->avctx->has_b_frames=1;
+ } else
+ av_log(s->avctx, AV_LOG_ERROR, "unknown header %X\n", avctx->sub_id);
+
+ if(avctx->debug & FF_DEBUG_PICT_INFO){
+ av_log(avctx, AV_LOG_DEBUG, "ver:%X ver0:%X\n", avctx->sub_id, avctx->extradata_size >= 4 ? ((uint32_t*)avctx->extradata)[0] : -1);
+ }
+
+ avctx->pix_fmt = PIX_FMT_YUV420P;
+
+ if (MPV_common_init(s) < 0)
+ return -1;
+
+ h263_decode_init_vlc(s);
+
+ /* init rv vlc */
+ if (!done) {
+ INIT_VLC_STATIC(&rv_dc_lum, DC_VLC_BITS, 256,
+ rv_lum_bits, 1, 1,
+ rv_lum_code, 2, 2, 16384);
+ INIT_VLC_STATIC(&rv_dc_chrom, DC_VLC_BITS, 256,
+ rv_chrom_bits, 1, 1,
+ rv_chrom_code, 2, 2, 16388);
+ done = 1;
+ }
+
+ return 0;
+}
+
+static av_cold int rv10_decode_end(AVCodecContext *avctx)
+{
+ MpegEncContext *s = avctx->priv_data;
+
+ MPV_common_end(s);
+ return 0;
+}
+
+static int rv10_decode_packet(AVCodecContext *avctx,
+ const uint8_t *buf, int buf_size, int buf_size2)
+{
+ MpegEncContext *s = avctx->priv_data;
+ int mb_count, mb_pos, left, start_mb_x;
+
+ init_get_bits(&s->gb, buf, buf_size*8);
+ if(s->codec_id ==CODEC_ID_RV10)
+ mb_count = rv10_decode_picture_header(s);
+ else
+ mb_count = rv20_decode_picture_header(s);
+ if (mb_count < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "HEADER ERROR\n");
+ return -1;
+ }
+
+ if (s->mb_x >= s->mb_width ||
+ s->mb_y >= s->mb_height) {
+ av_log(s->avctx, AV_LOG_ERROR, "POS ERROR %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ mb_pos = s->mb_y * s->mb_width + s->mb_x;
+ left = s->mb_width * s->mb_height - mb_pos;
+ if (mb_count > left) {
+ av_log(s->avctx, AV_LOG_ERROR, "COUNT ERROR\n");
+ return -1;
+ }
+
+ if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
+ if(s->current_picture_ptr){ //FIXME write parser so we always have complete frames?
+ ff_er_frame_end(s);
+ MPV_frame_end(s);
+ s->mb_x= s->mb_y = s->resync_mb_x = s->resync_mb_y= 0;
+ }
+ if(MPV_frame_start(s, avctx) < 0)
+ return -1;
+ ff_er_frame_start(s);
+ }
+
+ dprintf(avctx, "qscale=%d\n", s->qscale);
+
+ /* default quantization values */
+ if(s->codec_id== CODEC_ID_RV10){
+ if(s->mb_y==0) s->first_slice_line=1;
+ }else{
+ s->first_slice_line=1;
+ s->resync_mb_x= s->mb_x;
+ }
+ start_mb_x= s->mb_x;
+ s->resync_mb_y= s->mb_y;
+ if(s->h263_aic){
+ s->y_dc_scale_table=
+ s->c_dc_scale_table= ff_aic_dc_scale_table;
+ }else{
+ s->y_dc_scale_table=
+ s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
+ }
+
+ if(s->modified_quant)
+ s->chroma_qscale_table= ff_h263_chroma_qscale_table;
+
+ ff_set_qscale(s, s->qscale);
+
+ s->rv10_first_dc_coded[0] = 0;
+ s->rv10_first_dc_coded[1] = 0;
+ s->rv10_first_dc_coded[2] = 0;
+ s->block_wrap[0]=
+ s->block_wrap[1]=
+ s->block_wrap[2]=
+ s->block_wrap[3]= s->b8_stride;
+ s->block_wrap[4]=
+ s->block_wrap[5]= s->mb_stride;
+ ff_init_block_index(s);
+ /* decode each macroblock */
+
+ for(s->mb_num_left= mb_count; s->mb_num_left>0; s->mb_num_left--) {
+ int ret;
+ ff_update_block_index(s);
+ dprintf(avctx, "**mb x=%d y=%d\n", s->mb_x, s->mb_y);
+
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ ret=ff_h263_decode_mb(s, s->block);
+
+ if (ret != SLICE_ERROR && s->gb.size_in_bits < get_bits_count(&s->gb) && 8*buf_size2 >= get_bits_count(&s->gb)){
+ av_log(avctx, AV_LOG_DEBUG, "update size from %d to %d\n", s->gb.size_in_bits, 8*buf_size2);
+ s->gb.size_in_bits= 8*buf_size2;
+ ret= SLICE_OK;
+ }
+
+ if (ret == SLICE_ERROR || s->gb.size_in_bits < get_bits_count(&s->gb)) {
+ av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ if(s->pict_type != FF_B_TYPE)
+ ff_h263_update_motion_val(s);
+ MPV_decode_mb(s, s->block);
+ if(s->loop_filter)
+ ff_h263_loop_filter(s);
+
+ if (++s->mb_x == s->mb_width) {
+ s->mb_x = 0;
+ s->mb_y++;
+ ff_init_block_index(s);
+ }
+ if(s->mb_x == s->resync_mb_x)
+ s->first_slice_line=0;
+ if(ret == SLICE_END) break;
+ }
+
+ ff_er_add_slice(s, start_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);
+
+ return s->gb.size_in_bits;
+}
+
+static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n)
+{
+ if(avctx->slice_count) return avctx->slice_offset[n];
+ else return AV_RL32(buf + n*8);
+}
+
+static int rv10_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ MpegEncContext *s = avctx->priv_data;
+ int i;
+ AVFrame *pict = data;
+ int slice_count;
+ const uint8_t *slices_hdr = NULL;
+
+ dprintf(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size);
+
+ /* no supplementary picture */
+ if (buf_size == 0) {
+ return 0;
+ }
+
+ if(!avctx->slice_count){
+ slice_count = (*buf++) + 1;
+ slices_hdr = buf + 4;
+ buf += 8 * slice_count;
+ }else
+ slice_count = avctx->slice_count;
+
+ for(i=0; i<slice_count; i++){
+ int offset= get_slice_offset(avctx, slices_hdr, i);
+ int size, size2;
+
+ if(i+1 == slice_count)
+ size= buf_size - offset;
+ else
+ size= get_slice_offset(avctx, slices_hdr, i+1) - offset;
+
+ if(i+2 >= slice_count)
+ size2= buf_size - offset;
+ else
+ size2= get_slice_offset(avctx, slices_hdr, i+2) - offset;
+
+ if(rv10_decode_packet(avctx, buf+offset, size, size2) > 8*size)
+ i++;
+ }
+
+ if(s->current_picture_ptr != NULL && s->mb_y>=s->mb_height){
+ ff_er_frame_end(s);
+ MPV_frame_end(s);
+
+ if (s->pict_type == FF_B_TYPE || s->low_delay) {
+ *pict= *(AVFrame*)s->current_picture_ptr;
+ } else if (s->last_picture_ptr != NULL) {
+ *pict= *(AVFrame*)s->last_picture_ptr;
+ }
+
+ if(s->last_picture_ptr || s->low_delay){
+ *data_size = sizeof(AVFrame);
+ ff_print_debug_info(s, pict);
+ }
+ s->current_picture_ptr= NULL; //so we can detect if frame_end wasnt called (find some nicer solution...)
+ }
+
+ return buf_size;
+}
+
+AVCodec rv10_decoder = {
+ "rv10",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_RV10,
+ sizeof(MpegEncContext),
+ rv10_decode_init,
+ NULL,
+ rv10_decode_end,
+ rv10_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
+};
+
+AVCodec rv20_decoder = {
+ "rv20",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_RV20,
+ sizeof(MpegEncContext),
+ rv10_decode_init,
+ NULL,
+ rv10_decode_end,
+ rv10_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ /*.next = */NULL,
+ /*.flush = */ff_mpeg_flush,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
+};
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv30.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv30.c
new file mode 100644
index 000000000..f97c326f8
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv30.c
@@ -0,0 +1,284 @@
+/*
+ * RV30 decoder
+ * Copyright (c) 2007 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/rv30.c
+ * RV30 decoder
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+#include "golomb.h"
+
+#include "rv34.h"
+#include "rv30data.h"
+
+
+static int rv30_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
+{
+ int mb_bits;
+ int w = r->s.width, h = r->s.height;
+ int mb_size;
+ int rpr;
+
+ memset(si, 0, sizeof(SliceInfo));
+ if(get_bits(gb, 3))
+ return -1;
+ si->type = get_bits(gb, 2);
+ if(si->type == 1) si->type = 0;
+ if(get_bits1(gb))
+ return -1;
+ si->quant = get_bits(gb, 5);
+ skip_bits1(gb);
+ si->pts = get_bits(gb, 13);
+ rpr = get_bits(gb, r->rpr);
+ if(rpr){
+ w = r->s.avctx->extradata[6 + rpr*2] << 2;
+ h = r->s.avctx->extradata[7 + rpr*2] << 2;
+ }
+ si->width = w;
+ si->height = h;
+ mb_size = ((w + 15) >> 4) * ((h + 15) >> 4);
+ mb_bits = ff_rv34_get_start_offset(gb, mb_size);
+ si->start = get_bits(gb, mb_bits);
+ skip_bits1(gb);
+ return 0;
+}
+
+/**
+ * Decode 4x4 intra types array.
+ */
+static int rv30_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst)
+{
+ int i, j, k;
+
+ for(i = 0; i < 4; i++, dst += r->intra_types_stride - 4){
+ for(j = 0; j < 4; j+= 2){
+ int code = svq3_get_ue_golomb(gb) << 1;
+ if(code >= 81*2){
+ av_log(r->s.avctx, AV_LOG_ERROR, "Incorrect intra prediction code\n");
+ return -1;
+ }
+ for(k = 0; k < 2; k++){
+ int A = dst[-r->intra_types_stride] + 1;
+ int B = dst[-1] + 1;
+ *dst++ = rv30_itype_from_context[A * 90 + B * 9 + rv30_itype_code[code + k]];
+ if(dst[-1] == 9){
+ av_log(r->s.avctx, AV_LOG_ERROR, "Incorrect intra prediction mode\n");
+ return -1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Decode macroblock information.
+ */
+static int rv30_decode_mb_info(RV34DecContext *r)
+{
+ static const int rv30_p_types[6] = { RV34_MB_SKIP, RV34_MB_P_16x16, RV34_MB_P_8x8, -1, RV34_MB_TYPE_INTRA, RV34_MB_TYPE_INTRA16x16 };
+ static const int rv30_b_types[6] = { RV34_MB_SKIP, RV34_MB_B_DIRECT, RV34_MB_B_FORWARD, RV34_MB_B_BACKWARD, RV34_MB_TYPE_INTRA, RV34_MB_TYPE_INTRA16x16 };
+ MpegEncContext *s = &r->s;
+ GetBitContext *gb = &s->gb;
+ int code = svq3_get_ue_golomb(gb);
+
+ if(code > 11){
+ av_log(s->avctx, AV_LOG_ERROR, "Incorrect MB type code\n");
+ return -1;
+ }
+ if(code > 5){
+ av_log(s->avctx, AV_LOG_ERROR, "dquant needed\n");
+ code -= 6;
+ }
+ if(s->pict_type != FF_B_TYPE)
+ return rv30_p_types[code];
+ else
+ return rv30_b_types[code];
+}
+
+static inline void rv30_weak_loop_filter(uint8_t *src, const int step,
+ const int stride, const int lim)
+{
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ int i, diff;
+
+ for(i = 0; i < 4; i++){
+ diff = ((src[-2*step] - src[1*step]) - (src[-1*step] - src[0*step])*4) >> 3;
+ diff = av_clip(diff, -lim, lim);
+ src[-1*step] = cm[src[-1*step] + diff];
+ src[ 0*step] = cm[src[ 0*step] - diff];
+ src += stride;
+ }
+}
+
+static void rv30_loop_filter(RV34DecContext *r, int row)
+{
+ MpegEncContext *s = &r->s;
+ int mb_pos, mb_x;
+ int i, j, k;
+ uint8_t *Y, *C;
+ int loc_lim, cur_lim, left_lim = 0, top_lim = 0;
+
+ mb_pos = row * s->mb_stride;
+ for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
+ int mbtype = s->current_picture_ptr->mb_type[mb_pos];
+ if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
+ r->deblock_coefs[mb_pos] = 0xFFFF;
+ if(IS_INTRA(mbtype))
+ r->cbp_chroma[mb_pos] = 0xFF;
+ }
+
+ /* all vertical edges are filtered first
+ * and horizontal edges are filtered on the next iteration
+ */
+ mb_pos = row * s->mb_stride;
+ for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
+ cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
+ if(mb_x)
+ left_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - 1]];
+ for(j = 0; j < 16; j += 4){
+ Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
+ for(i = !mb_x; i < 4; i++, Y += 4){
+ int ij = i + j;
+ loc_lim = 0;
+ if(r->deblock_coefs[mb_pos] & (1 << ij))
+ loc_lim = cur_lim;
+ else if(!i && r->deblock_coefs[mb_pos - 1] & (1 << (ij + 3)))
+ loc_lim = left_lim;
+ else if( i && r->deblock_coefs[mb_pos] & (1 << (ij - 1)))
+ loc_lim = cur_lim;
+ if(loc_lim)
+ rv30_weak_loop_filter(Y, 1, s->linesize, loc_lim);
+ }
+ }
+ for(k = 0; k < 2; k++){
+ int cur_cbp, left_cbp = 0;
+ cur_cbp = (r->cbp_chroma[mb_pos] >> (k*4)) & 0xF;
+ if(mb_x)
+ left_cbp = (r->cbp_chroma[mb_pos - 1] >> (k*4)) & 0xF;
+ for(j = 0; j < 8; j += 4){
+ C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
+ for(i = !mb_x; i < 2; i++, C += 4){
+ int ij = i + (j >> 1);
+ loc_lim = 0;
+ if(cur_cbp && (1 << ij))
+ loc_lim = cur_lim;
+ else if(!i && left_cbp & (1 << (ij + 1)))
+ loc_lim = left_lim;
+ else if( i && cur_cbp & (1 << (ij - 1)))
+ loc_lim = cur_lim;
+ if(loc_lim)
+ rv30_weak_loop_filter(C, 1, s->uvlinesize, loc_lim);
+ }
+ }
+ }
+ }
+ mb_pos = row * s->mb_stride;
+ for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
+ cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
+ if(row)
+ top_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - s->mb_stride]];
+ for(j = 4*!row; j < 16; j += 4){
+ Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
+ for(i = 0; i < 4; i++, Y += 4){
+ int ij = i + j;
+ loc_lim = 0;
+ if(r->deblock_coefs[mb_pos] & (1 << ij))
+ loc_lim = cur_lim;
+ else if(!j && r->deblock_coefs[mb_pos - s->mb_stride] & (1 << (ij + 12)))
+ loc_lim = top_lim;
+ else if( j && r->deblock_coefs[mb_pos] & (1 << (ij - 4)))
+ loc_lim = cur_lim;
+ if(loc_lim)
+ rv30_weak_loop_filter(Y, s->linesize, 1, loc_lim);
+ }
+ }
+ for(k = 0; k < 2; k++){
+ int cur_cbp, top_cbp = 0;
+ cur_cbp = (r->cbp_chroma[mb_pos] >> (k*4)) & 0xF;
+ if(row)
+ top_cbp = (r->cbp_chroma[mb_pos - s->mb_stride] >> (k*4)) & 0xF;
+ for(j = 4*!row; j < 8; j += 4){
+ C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
+ for(i = 0; i < 2; i++, C += 4){
+ int ij = i + (j >> 1);
+ loc_lim = 0;
+ if(r->cbp_chroma[mb_pos] && (1 << ij))
+ loc_lim = cur_lim;
+ else if(!j && top_cbp & (1 << (ij + 2)))
+ loc_lim = top_lim;
+ else if( j && cur_cbp & (1 << (ij - 2)))
+ loc_lim = cur_lim;
+ if(loc_lim)
+ rv30_weak_loop_filter(C, s->uvlinesize, 1, loc_lim);
+ }
+ }
+ }
+ }
+}
+
+/**
+ * Initialize decoder.
+ */
+static av_cold int rv30_decode_init(AVCodecContext *avctx)
+{
+ RV34DecContext *r = avctx->priv_data;
+
+ r->rv30 = 1;
+ ff_rv34_decode_init(avctx);
+ if(avctx->extradata_size < 2){
+ av_log(avctx, AV_LOG_ERROR, "Extradata is too small.\n");
+ return -1;
+ }
+ r->rpr = (avctx->extradata[1] & 7) >> 1;
+ r->rpr = FFMIN(r->rpr + 1, 3);
+ if(avctx->extradata_size - 8 < (r->rpr - 1) * 2){
+ av_log(avctx, AV_LOG_ERROR, "Insufficient extradata - need at least %d bytes, got %d\n",
+ 6 + r->rpr * 2, avctx->extradata_size);
+ }
+ r->parse_slice_header = rv30_parse_slice_header;
+ r->decode_intra_types = rv30_decode_intra_types;
+ r->decode_mb_info = rv30_decode_mb_info;
+ r->loop_filter = rv30_loop_filter;
+ r->luma_dc_quant_i = rv30_luma_dc_quant;
+ r->luma_dc_quant_p = rv30_luma_dc_quant;
+ return 0;
+}
+
+AVCodec rv30_decoder = {
+ "rv30",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_RV30,
+ sizeof(RV34DecContext),
+ /*.init = */rv30_decode_init,
+ /*.encode = */NULL,
+ /*.decode = */ff_rv34_decode_end,
+ /*.close = */ff_rv34_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ /*.next = */NULL,
+ /*.flush = */ff_mpeg_flush,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("RealVideo 3.0"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv30data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv30data.h
new file mode 100644
index 000000000..1662fd7af
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv30data.h
@@ -0,0 +1,181 @@
+/*
+ * RealVideo 3 decoder
+ * copyright (c) 2007 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file rv30data.h
+ * miscellaneous RV30 tables
+ */
+
+#ifndef AVCODEC_RV30DATA_H
+#define AVCODEC_RV30DATA_H
+
+#include <stdint.h>
+
+/** DC quantizer mapping for RV30 */
+static const uint8_t rv30_luma_dc_quant[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 22, 22, 23, 23, 23, 24, 24, 25, 25
+};
+
+/**
+ * This table is used for storing the differences
+ * between the predicted and the real intra type.
+ */
+static const uint8_t rv30_itype_code[9*9*2] = {
+ 0, 0, 0, 1, 1, 0, 1, 1, 0, 2, 2, 0, 0, 3, 3, 0, 1, 2,
+ 2, 1, 0, 4, 4, 0, 3, 1, 1, 3, 0, 5, 5, 0, 2, 2, 1, 4,
+ 4, 1, 0, 6, 3, 2, 1, 5, 2, 3, 5, 1, 6, 0, 0, 7, 4, 2,
+ 2, 4, 3, 3, 6, 1, 1, 6, 7, 0, 0, 8, 5, 2, 4, 3, 2, 5,
+ 3, 4, 1, 7, 4, 4, 7, 1, 8, 0, 6, 2, 3, 5, 5, 3, 2, 6,
+ 1, 8, 2, 7, 7, 2, 8, 1, 5, 4, 4, 5, 3, 6, 6, 3, 8, 2,
+ 4, 6, 5, 5, 6, 4, 2, 8, 7, 3, 3, 7, 6, 5, 5, 6, 7, 4,
+ 4, 7, 8, 3, 3, 8, 7, 5, 8, 4, 5, 7, 4, 8, 6, 6, 7, 6,
+ 5, 8, 8, 5, 6, 7, 8, 6, 7, 7, 6, 8, 8, 7, 7, 8, 8, 8,
+};
+
+/**
+ * This table is used for retrieving the current intra type
+ * based on its neighbors and adjustment provided by
+ * code read and decoded before.
+ *
+ * This is really a three-dimensional matrix with dimensions
+ * [-1..9][-1..9][0..9]. The first and second coordinates are
+ * detemined by the top and left neighbors (-1 if unavailable).
+ */
+static const uint8_t rv30_itype_from_context[900] = {
+ 0, 9, 9, 9, 9, 9, 9, 9, 9,
+ 0, 2, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 2, 0, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+
+ 0, 1, 9, 9, 9, 9, 9, 9, 9,
+ 0, 2, 1, 6, 4, 8, 5, 7, 3,
+ 1, 0, 2, 6, 5, 4, 3, 8, 7,
+ 2, 8, 0, 1, 7, 4, 3, 6, 5,
+ 2, 0, 1, 3, 8, 5, 4, 7, 6,
+ 2, 0, 1, 4, 6, 7, 8, 3, 5,
+ 0, 1, 5, 2, 6, 3, 8, 4, 7,
+ 0, 1, 6, 2, 4, 7, 5, 8, 3,
+ 2, 7, 0, 1, 4, 8, 6, 3, 5,
+ 2, 8, 0, 1, 7, 3, 4, 5, 6,
+
+ 1, 0, 9, 9, 9, 9, 9, 9, 9,
+ 1, 2, 5, 6, 3, 0, 4, 8, 7,
+ 1, 6, 2, 5, 3, 0, 4, 8, 7,
+ 2, 1, 7, 6, 8, 3, 5, 0, 4,
+ 1, 2, 5, 3, 6, 8, 4, 7, 0,
+ 1, 6, 2, 0, 4, 5, 8, 7, 3,
+ 1, 5, 2, 6, 3, 8, 4, 0, 7,
+ 1, 6, 0, 2, 4, 5, 7, 3, 8,
+ 2, 1, 7, 6, 0, 8, 5, 4, 3,
+ 1, 2, 7, 8, 3, 4, 5, 6, 0,
+
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 0, 2, 1, 8, 7, 6, 5, 4, 3,
+ 1, 2, 0, 6, 5, 7, 4, 8, 3,
+ 2, 8, 7, 1, 0, 6, 4, 3, 5,
+ 2, 0, 8, 1, 3, 7, 5, 4, 6,
+ 2, 0, 4, 1, 7, 8, 6, 3, 5,
+ 2, 0, 1, 5, 8, 4, 6, 7, 3,
+ 2, 0, 6, 1, 4, 7, 8, 5, 3,
+ 2, 7, 8, 1, 0, 5, 4, 6, 3,
+ 2, 8, 7, 1, 0, 4, 3, 6, 5,
+
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 0, 2, 1, 3, 5, 8, 6, 4, 7,
+ 1, 0, 2, 5, 3, 6, 4, 8, 7,
+ 2, 8, 1, 0, 3, 5, 7, 6, 4,
+ 3, 2, 5, 8, 1, 4, 6, 7, 0,
+ 4, 2, 0, 6, 1, 5, 8, 3, 7,
+ 5, 3, 1, 2, 8, 6, 4, 0, 7,
+ 1, 6, 0, 2, 4, 5, 8, 3, 7,
+ 2, 7, 0, 1, 5, 4, 8, 6, 3,
+ 2, 8, 3, 5, 1, 0, 7, 6, 4,
+
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 2, 0, 6, 1, 4, 7, 5, 8, 3,
+ 1, 6, 2, 0, 4, 5, 3, 7, 8,
+ 2, 8, 7, 6, 4, 0, 1, 5, 3,
+ 4, 2, 1, 0, 6, 8, 3, 5, 7,
+ 4, 2, 6, 0, 1, 5, 7, 8, 3,
+ 1, 2, 5, 0, 6, 3, 4, 7, 8,
+ 6, 4, 0, 1, 2, 7, 5, 3, 8,
+ 2, 7, 4, 6, 0, 1, 8, 5, 3,
+ 2, 8, 7, 4, 6, 1, 3, 5, 0,
+
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 5, 1, 2, 3, 6, 8, 0, 4, 7,
+ 1, 5, 6, 3, 2, 0, 4, 8, 7,
+ 2, 1, 5, 3, 6, 8, 7, 4, 0,
+ 5, 3, 1, 2, 6, 8, 4, 7, 0,
+ 1, 6, 2, 4, 5, 8, 0, 3, 7,
+ 5, 1, 3, 6, 2, 0, 8, 4, 7,
+ 1, 6, 5, 2, 0, 4, 3, 7, 8,
+ 2, 7, 1, 6, 5, 0, 8, 3, 4,
+ 2, 5, 1, 3, 6, 8, 4, 0, 7,
+
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 1, 6, 2, 0, 5, 4, 3, 7, 8,
+ 1, 6, 5, 4, 2, 3, 0, 7, 8,
+ 2, 1, 6, 7, 4, 8, 5, 3, 0,
+ 2, 1, 6, 5, 8, 4, 3, 0, 7,
+ 6, 4, 1, 2, 0, 5, 7, 8, 3,
+ 1, 6, 5, 2, 3, 0, 4, 8, 7,
+ 6, 1, 4, 0, 2, 7, 5, 3, 8,
+ 2, 7, 4, 6, 1, 5, 0, 8, 3,
+ 2, 1, 6, 8, 4, 7, 3, 5, 0,
+
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 2, 0, 4, 7, 6, 1, 8, 5, 3,
+ 6, 1, 2, 0, 4, 7, 5, 8, 3,
+ 2, 7, 8, 0, 1, 6, 4, 3, 5,
+ 2, 4, 0, 8, 3, 1, 7, 6, 5,
+ 4, 2, 7, 0, 6, 1, 8, 5, 3,
+ 2, 1, 0, 8, 5, 6, 7, 4, 3,
+ 2, 6, 4, 1, 7, 0, 5, 8, 3,
+ 2, 7, 4, 0, 8, 6, 1, 5, 3,
+ 2, 8, 7, 4, 1, 0, 3, 6, 5,
+
+ 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 2, 0, 8, 1, 3, 4, 6, 5, 7,
+ 1, 2, 0, 6, 8, 5, 7, 3, 4,
+ 2, 8, 7, 1, 0, 3, 6, 5, 4,
+ 8, 3, 2, 5, 1, 0, 4, 7, 6,
+ 2, 0, 4, 8, 5, 1, 7, 6, 3,
+ 2, 1, 0, 8, 5, 3, 6, 4, 7,
+ 2, 1, 6, 0, 8, 4, 5, 7, 3,
+ 2, 7, 8, 4, 0, 6, 1, 5, 3,
+ 2, 8, 3, 0, 7, 4, 1, 6, 5,
+};
+
+/**
+ * Loop filter limits are taken from this table.
+ */
+static const uint8_t rv30_loop_filt_lim[32] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5
+};
+#endif /* AVCODEC_RV30DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv30dsp.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv30dsp.c
new file mode 100644
index 000000000..bf2dc18b8
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv30dsp.c
@@ -0,0 +1,291 @@
+/*
+ * RV30 decoder motion compensation functions
+ * Copyright (c) 2007 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/rv30dsp.c
+ * RV30 decoder motion compensation functions
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+
+#define RV30_LOWPASS(OPNAME, OP) \
+static av_unused void OPNAME ## rv30_tpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, const int C1, const int C2){\
+ const int h=8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<h; i++)\
+ {\
+ OP(dst[0], (-(src[-1]+src[2]) + src[0]*C1 + src[1]*C2 + 8)>>4);\
+ OP(dst[1], (-(src[ 0]+src[3]) + src[1]*C1 + src[2]*C2 + 8)>>4);\
+ OP(dst[2], (-(src[ 1]+src[4]) + src[2]*C1 + src[3]*C2 + 8)>>4);\
+ OP(dst[3], (-(src[ 2]+src[5]) + src[3]*C1 + src[4]*C2 + 8)>>4);\
+ OP(dst[4], (-(src[ 3]+src[6]) + src[4]*C1 + src[5]*C2 + 8)>>4);\
+ OP(dst[5], (-(src[ 4]+src[7]) + src[5]*C1 + src[6]*C2 + 8)>>4);\
+ OP(dst[6], (-(src[ 5]+src[8]) + src[6]*C1 + src[7]*C2 + 8)>>4);\
+ OP(dst[7], (-(src[ 6]+src[9]) + src[7]*C1 + src[8]*C2 + 8)>>4);\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }\
+}\
+\
+static void OPNAME ## rv30_tpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, const int C1, const int C2){\
+ const int w=8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<w; i++)\
+ {\
+ const int srcA= src[-1*srcStride];\
+ const int src0= src[0 *srcStride];\
+ const int src1= src[1 *srcStride];\
+ const int src2= src[2 *srcStride];\
+ const int src3= src[3 *srcStride];\
+ const int src4= src[4 *srcStride];\
+ const int src5= src[5 *srcStride];\
+ const int src6= src[6 *srcStride];\
+ const int src7= src[7 *srcStride];\
+ const int src8= src[8 *srcStride];\
+ const int src9= src[9 *srcStride];\
+ OP(dst[0*dstStride], (-(srcA+src2) + src0*C1 + src1*C2 + 8)>>4);\
+ OP(dst[1*dstStride], (-(src0+src3) + src1*C1 + src2*C2 + 8)>>4);\
+ OP(dst[2*dstStride], (-(src1+src4) + src2*C1 + src3*C2 + 8)>>4);\
+ OP(dst[3*dstStride], (-(src2+src5) + src3*C1 + src4*C2 + 8)>>4);\
+ OP(dst[4*dstStride], (-(src3+src6) + src4*C1 + src5*C2 + 8)>>4);\
+ OP(dst[5*dstStride], (-(src4+src7) + src5*C1 + src6*C2 + 8)>>4);\
+ OP(dst[6*dstStride], (-(src5+src8) + src6*C1 + src7*C2 + 8)>>4);\
+ OP(dst[7*dstStride], (-(src6+src9) + src7*C1 + src8*C2 + 8)>>4);\
+ dst++;\
+ src++;\
+ }\
+}\
+\
+static void OPNAME ## rv30_tpel8_hv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int w = 8;\
+ const int h = 8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i, j;\
+ for(j = 0; j < h; j++){\
+ for(i = 0; i < w; i++){\
+ OP(dst[i], (\
+ src[srcStride*-1+i-1] -12*src[srcStride*-1+i] -6*src[srcStride*-1+i+1] +src[srcStride*-1+i+2]+\
+ -12*src[srcStride* 0+i-1] +144*src[srcStride* 0+i] +72*src[srcStride* 0+i+1] -12*src[srcStride* 0+i+2] +\
+ -6*src[srcStride* 1+i-1] +72*src[srcStride* 1+i] +36*src[srcStride* 1+i+1] -6*src[srcStride* 1+i+2] +\
+ src[srcStride* 2+i-1] -12*src[srcStride* 2+i] -6*src[srcStride* 2+i+1] +src[srcStride* 2+i+2] +\
+ 128)>>8);\
+ }\
+ src += srcStride;\
+ dst += dstStride;\
+ }\
+}\
+\
+static void OPNAME ## rv30_tpel8_hhv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int w = 8;\
+ const int h = 8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i, j;\
+ for(j = 0; j < h; j++){\
+ for(i = 0; i < w; i++){\
+ OP(dst[i], (\
+ src[srcStride*-1+i-1] -12*src[srcStride*-1+i+1] -6*src[srcStride*-1+i] +src[srcStride*-1+i+2]+\
+ -12*src[srcStride* 0+i-1] +144*src[srcStride* 0+i+1] +72*src[srcStride* 0+i] -12*src[srcStride* 0+i+2]+\
+ -6*src[srcStride* 1+i-1] +72*src[srcStride* 1+i+1] +36*src[srcStride* 1+i] -6*src[srcStride* 1+i+2]+\
+ src[srcStride* 2+i-1] -12*src[srcStride* 2+i+1] -6*src[srcStride* 2+i] +src[srcStride* 2+i+2]+\
+ 128)>>8);\
+ }\
+ src += srcStride;\
+ dst += dstStride;\
+ }\
+}\
+\
+static void OPNAME ## rv30_tpel8_hvv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int w = 8;\
+ const int h = 8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i, j;\
+ for(j = 0; j < h; j++){\
+ for(i = 0; i < w; i++){\
+ OP(dst[i], (\
+ src[srcStride*-1+i-1] -12*src[srcStride*-1+i] -6*src[srcStride*-1+i+1] +src[srcStride*-1+i+2]+\
+ -6*src[srcStride* 0+i-1] +72*src[srcStride* 0+i] +36*src[srcStride* 0+i+1] -6*src[srcStride* 0+i+2]+\
+ -12*src[srcStride* 1+i-1] +144*src[srcStride* 1+i] +72*src[srcStride* 1+i+1] -12*src[srcStride* 1+i+2]+\
+ src[srcStride* 2+i-1] -12*src[srcStride* 2+i] -6*src[srcStride* 2+i+1] +src[srcStride* 2+i+2]+\
+ 128)>>8);\
+ }\
+ src += srcStride;\
+ dst += dstStride;\
+ }\
+}\
+\
+static void OPNAME ## rv30_tpel8_hhvv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int w = 8;\
+ const int h = 8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i, j;\
+ for(j = 0; j < h; j++){\
+ for(i = 0; i < w; i++){\
+ OP(dst[i], (\
+ 36*src[i+srcStride*0] +54*src[i+1+srcStride*0] +6*src[i+2+srcStride*0]+\
+ 54*src[i+srcStride*1] +81*src[i+1+srcStride*1] +9*src[i+2+srcStride*1]+\
+ 6*src[i+srcStride*2] + 9*src[i+1+srcStride*2] + src[i+2+srcStride*2]+\
+ 128)>>8);\
+ }\
+ src += srcStride;\
+ dst += dstStride;\
+ }\
+}\
+\
+static void OPNAME ## rv30_tpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, const int C1, const int C2){\
+ OPNAME ## rv30_tpel8_v_lowpass(dst , src , dstStride, srcStride, C1, C2);\
+ OPNAME ## rv30_tpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, C1, C2);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## rv30_tpel8_v_lowpass(dst , src , dstStride, srcStride, C1, C2);\
+ OPNAME ## rv30_tpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, C1, C2);\
+}\
+\
+static void OPNAME ## rv30_tpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, const int C1, const int C2){\
+ OPNAME ## rv30_tpel8_h_lowpass(dst , src , dstStride, srcStride, C1, C2);\
+ OPNAME ## rv30_tpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, C1, C2);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## rv30_tpel8_h_lowpass(dst , src , dstStride, srcStride, C1, C2);\
+ OPNAME ## rv30_tpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, C1, C2);\
+}\
+\
+static void OPNAME ## rv30_tpel16_hv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## rv30_tpel8_hv_lowpass(dst , src , dstStride, srcStride);\
+ OPNAME ## rv30_tpel8_hv_lowpass(dst+8, src+8, dstStride, srcStride);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## rv30_tpel8_hv_lowpass(dst , src , dstStride, srcStride);\
+ OPNAME ## rv30_tpel8_hv_lowpass(dst+8, src+8, dstStride, srcStride);\
+}\
+\
+static void OPNAME ## rv30_tpel16_hhv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## rv30_tpel8_hhv_lowpass(dst , src , dstStride, srcStride);\
+ OPNAME ## rv30_tpel8_hhv_lowpass(dst+8, src+8, dstStride, srcStride);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## rv30_tpel8_hhv_lowpass(dst , src , dstStride, srcStride);\
+ OPNAME ## rv30_tpel8_hhv_lowpass(dst+8, src+8, dstStride, srcStride);\
+}\
+\
+static void OPNAME ## rv30_tpel16_hvv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## rv30_tpel8_hvv_lowpass(dst , src , dstStride, srcStride);\
+ OPNAME ## rv30_tpel8_hvv_lowpass(dst+8, src+8, dstStride, srcStride);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## rv30_tpel8_hvv_lowpass(dst , src , dstStride, srcStride);\
+ OPNAME ## rv30_tpel8_hvv_lowpass(dst+8, src+8, dstStride, srcStride);\
+}\
+\
+static void OPNAME ## rv30_tpel16_hhvv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## rv30_tpel8_hhvv_lowpass(dst , src , dstStride, srcStride);\
+ OPNAME ## rv30_tpel8_hhvv_lowpass(dst+8, src+8, dstStride, srcStride);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## rv30_tpel8_hhvv_lowpass(dst , src , dstStride, srcStride);\
+ OPNAME ## rv30_tpel8_hhvv_lowpass(dst+8, src+8, dstStride, srcStride);\
+}\
+\
+
+#define RV30_MC(OPNAME, SIZE) \
+static void OPNAME ## rv30_tpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv30_tpel ## SIZE ## _h_lowpass(dst, src, stride, stride, 12, 6);\
+}\
+\
+static void OPNAME ## rv30_tpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv30_tpel ## SIZE ## _h_lowpass(dst, src, stride, stride, 6, 12);\
+}\
+\
+static void OPNAME ## rv30_tpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv30_tpel ## SIZE ## _v_lowpass(dst, src, stride, stride, 12, 6);\
+}\
+\
+static void OPNAME ## rv30_tpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv30_tpel ## SIZE ## _v_lowpass(dst, src, stride, stride, 6, 12);\
+}\
+\
+static void OPNAME ## rv30_tpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv30_tpel ## SIZE ## _hv_lowpass(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## rv30_tpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv30_tpel ## SIZE ## _hvv_lowpass(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## rv30_tpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv30_tpel ## SIZE ## _hhv_lowpass(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## rv30_tpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv30_tpel ## SIZE ## _hhvv_lowpass(dst, src, stride, stride);\
+}\
+\
+
+#define op_avg(a, b) a = (((a)+cm[b]+1)>>1)
+#define op_put(a, b) a = cm[b]
+
+RV30_LOWPASS(put_ , op_put)
+RV30_LOWPASS(avg_ , op_avg)
+RV30_MC(put_, 8)
+RV30_MC(put_, 16)
+RV30_MC(avg_, 8)
+RV30_MC(avg_, 16)
+
+av_cold void ff_rv30dsp_init(DSPContext* c, AVCodecContext *avctx) {
+ c->put_rv30_tpel_pixels_tab[0][ 0] = c->put_h264_qpel_pixels_tab[0][0];
+ c->put_rv30_tpel_pixels_tab[0][ 1] = put_rv30_tpel16_mc10_c;
+ c->put_rv30_tpel_pixels_tab[0][ 2] = put_rv30_tpel16_mc20_c;
+ c->put_rv30_tpel_pixels_tab[0][ 4] = put_rv30_tpel16_mc01_c;
+ c->put_rv30_tpel_pixels_tab[0][ 5] = put_rv30_tpel16_mc11_c;
+ c->put_rv30_tpel_pixels_tab[0][ 6] = put_rv30_tpel16_mc21_c;
+ c->put_rv30_tpel_pixels_tab[0][ 8] = put_rv30_tpel16_mc02_c;
+ c->put_rv30_tpel_pixels_tab[0][ 9] = put_rv30_tpel16_mc12_c;
+ c->put_rv30_tpel_pixels_tab[0][10] = put_rv30_tpel16_mc22_c;
+ c->avg_rv30_tpel_pixels_tab[0][ 0] = c->avg_h264_qpel_pixels_tab[0][0];
+ c->avg_rv30_tpel_pixels_tab[0][ 1] = avg_rv30_tpel16_mc10_c;
+ c->avg_rv30_tpel_pixels_tab[0][ 2] = avg_rv30_tpel16_mc20_c;
+ c->avg_rv30_tpel_pixels_tab[0][ 4] = avg_rv30_tpel16_mc01_c;
+ c->avg_rv30_tpel_pixels_tab[0][ 5] = avg_rv30_tpel16_mc11_c;
+ c->avg_rv30_tpel_pixels_tab[0][ 6] = avg_rv30_tpel16_mc21_c;
+ c->avg_rv30_tpel_pixels_tab[0][ 8] = avg_rv30_tpel16_mc02_c;
+ c->avg_rv30_tpel_pixels_tab[0][ 9] = avg_rv30_tpel16_mc12_c;
+ c->avg_rv30_tpel_pixels_tab[0][10] = avg_rv30_tpel16_mc22_c;
+ c->put_rv30_tpel_pixels_tab[1][ 0] = c->put_h264_qpel_pixels_tab[1][0];
+ c->put_rv30_tpel_pixels_tab[1][ 1] = put_rv30_tpel8_mc10_c;
+ c->put_rv30_tpel_pixels_tab[1][ 2] = put_rv30_tpel8_mc20_c;
+ c->put_rv30_tpel_pixels_tab[1][ 4] = put_rv30_tpel8_mc01_c;
+ c->put_rv30_tpel_pixels_tab[1][ 5] = put_rv30_tpel8_mc11_c;
+ c->put_rv30_tpel_pixels_tab[1][ 6] = put_rv30_tpel8_mc21_c;
+ c->put_rv30_tpel_pixels_tab[1][ 8] = put_rv30_tpel8_mc02_c;
+ c->put_rv30_tpel_pixels_tab[1][ 9] = put_rv30_tpel8_mc12_c;
+ c->put_rv30_tpel_pixels_tab[1][10] = put_rv30_tpel8_mc22_c;
+ c->avg_rv30_tpel_pixels_tab[1][ 0] = c->avg_h264_qpel_pixels_tab[1][0];
+ c->avg_rv30_tpel_pixels_tab[1][ 1] = avg_rv30_tpel8_mc10_c;
+ c->avg_rv30_tpel_pixels_tab[1][ 2] = avg_rv30_tpel8_mc20_c;
+ c->avg_rv30_tpel_pixels_tab[1][ 4] = avg_rv30_tpel8_mc01_c;
+ c->avg_rv30_tpel_pixels_tab[1][ 5] = avg_rv30_tpel8_mc11_c;
+ c->avg_rv30_tpel_pixels_tab[1][ 6] = avg_rv30_tpel8_mc21_c;
+ c->avg_rv30_tpel_pixels_tab[1][ 8] = avg_rv30_tpel8_mc02_c;
+ c->avg_rv30_tpel_pixels_tab[1][ 9] = avg_rv30_tpel8_mc12_c;
+ c->avg_rv30_tpel_pixels_tab[1][10] = avg_rv30_tpel8_mc22_c;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34.c
new file mode 100644
index 000000000..8948c90b9
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34.c
@@ -0,0 +1,1535 @@
+/*
+ * RV30/40 decoder common data
+ * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/rv34.c
+ * RV30/40 decoder common data
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+#include "golomb.h"
+#include "mathops.h"
+#include "rectangle.h"
+
+#include "rv34vlc.h"
+#include "rv34data.h"
+#include "rv34.h"
+
+//#define DEBUG
+
+static inline void ZERO8x2(void* dst, int stride)
+{
+ fill_rectangle(dst, 1, 2, stride, 0, 4);
+ fill_rectangle(((uint8_t*)(dst))+4, 1, 2, stride, 0, 4);
+}
+
+/** translation of RV30/40 macroblock types to lavc ones */
+static const int rv34_mb_type_to_lavc[12] = {
+ MB_TYPE_INTRA,
+ MB_TYPE_INTRA16x16 | MB_TYPE_SEPARATE_DC,
+ MB_TYPE_16x16 | MB_TYPE_L0,
+ MB_TYPE_8x8 | MB_TYPE_L0,
+ MB_TYPE_16x16 | MB_TYPE_L0,
+ MB_TYPE_16x16 | MB_TYPE_L1,
+ MB_TYPE_SKIP,
+ MB_TYPE_DIRECT2 | MB_TYPE_16x16,
+ MB_TYPE_16x8 | MB_TYPE_L0,
+ MB_TYPE_8x16 | MB_TYPE_L0,
+ MB_TYPE_16x16 | MB_TYPE_L0L1,
+ MB_TYPE_16x16 | MB_TYPE_L0 | MB_TYPE_SEPARATE_DC
+};
+
+
+static RV34VLC intra_vlcs[NUM_INTRA_TABLES], inter_vlcs[NUM_INTER_TABLES];
+
+/**
+ * @defgroup vlc RV30/40 VLC generating functions
+ * @{
+ */
+
+static const int table_offs[] = {
+ 0, 1818, 3622, 4144, 4698, 5234, 5804, 5868, 5900, 5932,
+ 5996, 6252, 6316, 6348, 6380, 7674, 8944, 10274, 11668, 12250,
+ 14060, 15846, 16372, 16962, 17512, 18148, 18180, 18212, 18244, 18308,
+ 18564, 18628, 18660, 18692, 20036, 21314, 22648, 23968, 24614, 26384,
+ 28190, 28736, 29366, 29938, 30608, 30640, 30672, 30704, 30768, 31024,
+ 31088, 31120, 31184, 32570, 33898, 35236, 36644, 37286, 39020, 40802,
+ 41368, 42052, 42692, 43348, 43380, 43412, 43444, 43476, 43604, 43668,
+ 43700, 43732, 45100, 46430, 47778, 49160, 49802, 51550, 53340, 53972,
+ 54648, 55348, 55994, 56122, 56154, 56186, 56218, 56346, 56410, 56442,
+ 56474, 57878, 59290, 60636, 62036, 62682, 64460, 64524, 64588, 64716,
+ 64844, 66076, 67466, 67978, 68542, 69064, 69648, 70296, 72010, 72074,
+ 72138, 72202, 72330, 73572, 74936, 75454, 76030, 76566, 77176, 77822,
+ 79582, 79646, 79678, 79742, 79870, 81180, 82536, 83064, 83672, 84242,
+ 84934, 85576, 87384, 87448, 87480, 87544, 87672, 88982, 90340, 90902,
+ 91598, 92182, 92846, 93488, 95246, 95278, 95310, 95374, 95502, 96878,
+ 98266, 98848, 99542, 100234, 100884, 101524, 103320, 103352, 103384, 103416,
+ 103480, 104874, 106222, 106910, 107584, 108258, 108902, 109544, 111366, 111398,
+ 111430, 111462, 111494, 112878, 114320, 114988, 115660, 116310, 116950, 117592
+};
+
+static VLC_TYPE table_data[117592][2];
+
+/**
+ * Generate VLC from codeword lengths.
+ * @param bits codeword lengths (zeroes are accepted)
+ * @param size length of input data
+ * @param vlc output VLC
+ * @param insyms symbols for input codes (NULL for default ones)
+ * @param num VLC table number (for static initialization)
+ */
+static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *insyms,
+ const int num)
+{
+ int i;
+ int counts[17] = {0}, codes[17];
+ #if __STDC_VERSION__ >= 199901L
+ uint16_t cw[size], syms[size];
+ uint8_t bits2[size];
+ #else
+ uint16_t *cw = _alloca(sizeof(uint16_t) * size);
+ uint16_t *syms = _alloca(sizeof(uint16_t) * size);
+ uint8_t *bits2 = _alloca(sizeof(uint8_t) * size);
+ #endif
+ int maxbits = 0, realsize = 0;
+
+ for(i = 0; i < size; i++){
+ if(bits[i]){
+ bits2[realsize] = bits[i];
+ syms[realsize] = insyms ? insyms[i] : i;
+ realsize++;
+ maxbits = FFMAX(maxbits, bits[i]);
+ counts[bits[i]]++;
+ }
+ }
+
+ codes[0] = 0;
+ for(i = 0; i < 16; i++)
+ codes[i+1] = (codes[i] + counts[i]) << 1;
+ for(i = 0; i < realsize; i++)
+ cw[i] = codes[bits2[i]]++;
+
+ vlc->table = &table_data[table_offs[num]];
+ vlc->table_allocated = table_offs[num + 1] - table_offs[num];
+ init_vlc_sparse(vlc, FFMIN(maxbits, 9), realsize,
+ bits2, 1, 1,
+ cw, 2, 2,
+ syms, 2, 2, INIT_VLC_USE_NEW_STATIC);
+}
+
+/**
+ * Initialize all tables.
+ */
+static av_cold void rv34_init_tables(void)
+{
+ int i, j, k;
+
+ for(i = 0; i < NUM_INTRA_TABLES; i++){
+ for(j = 0; j < 2; j++){
+ rv34_gen_vlc(rv34_table_intra_cbppat [i][j], CBPPAT_VLC_SIZE, &intra_vlcs[i].cbppattern[j], NULL, 19*i + 0 + j);
+ rv34_gen_vlc(rv34_table_intra_secondpat[i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].second_pattern[j], NULL, 19*i + 2 + j);
+ rv34_gen_vlc(rv34_table_intra_thirdpat [i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].third_pattern[j], NULL, 19*i + 4 + j);
+ for(k = 0; k < 4; k++){
+ rv34_gen_vlc(rv34_table_intra_cbp[i][j+k*2], CBP_VLC_SIZE, &intra_vlcs[i].cbp[j][k], rv34_cbp_code, 19*i + 6 + j*4 + k);
+ }
+ }
+ for(j = 0; j < 4; j++){
+ rv34_gen_vlc(rv34_table_intra_firstpat[i][j], FIRSTBLK_VLC_SIZE, &intra_vlcs[i].first_pattern[j], NULL, 19*i + 14 + j);
+ }
+ rv34_gen_vlc(rv34_intra_coeff[i], COEFF_VLC_SIZE, &intra_vlcs[i].coefficient, NULL, 19*i + 18);
+ }
+
+ for(i = 0; i < NUM_INTER_TABLES; i++){
+ rv34_gen_vlc(rv34_inter_cbppat[i], CBPPAT_VLC_SIZE, &inter_vlcs[i].cbppattern[0], NULL, i*12 + 95);
+ for(j = 0; j < 4; j++){
+ rv34_gen_vlc(rv34_inter_cbp[i][j], CBP_VLC_SIZE, &inter_vlcs[i].cbp[0][j], rv34_cbp_code, i*12 + 96 + j);
+ }
+ for(j = 0; j < 2; j++){
+ rv34_gen_vlc(rv34_table_inter_firstpat [i][j], FIRSTBLK_VLC_SIZE, &inter_vlcs[i].first_pattern[j], NULL, i*12 + 100 + j);
+ rv34_gen_vlc(rv34_table_inter_secondpat[i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].second_pattern[j], NULL, i*12 + 102 + j);
+ rv34_gen_vlc(rv34_table_inter_thirdpat [i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].third_pattern[j], NULL, i*12 + 104 + j);
+ }
+ rv34_gen_vlc(rv34_inter_coeff[i], COEFF_VLC_SIZE, &inter_vlcs[i].coefficient, NULL, i*12 + 106);
+ }
+}
+
+/** @} */ // vlc group
+
+
+/**
+ * @defgroup transform RV30/40 inverse transform functions
+ * @{
+ */
+
+static av_always_inline void rv34_row_transform(int temp[16], DCTELEM *block)
+{
+ int i;
+
+ for(i=0; i<4; i++){
+ const int z0= 13*(block[i+8*0] + block[i+8*2]);
+ const int z1= 13*(block[i+8*0] - block[i+8*2]);
+ const int z2= 7* block[i+8*1] - 17*block[i+8*3];
+ const int z3= 17* block[i+8*1] + 7*block[i+8*3];
+
+ temp[4*i+0]= z0+z3;
+ temp[4*i+1]= z1+z2;
+ temp[4*i+2]= z1-z2;
+ temp[4*i+3]= z0-z3;
+ }
+}
+
+/**
+ * Real Video 3.0/4.0 inverse transform
+ * Code is almost the same as in SVQ3, only scaling is different.
+ */
+static void rv34_inv_transform(DCTELEM *block){
+ int temp[16];
+ int i;
+
+ rv34_row_transform(temp, block);
+
+ for(i=0; i<4; i++){
+ const int z0= 13*(temp[4*0+i] + temp[4*2+i]) + 0x200;
+ const int z1= 13*(temp[4*0+i] - temp[4*2+i]) + 0x200;
+ const int z2= 7* temp[4*1+i] - 17*temp[4*3+i];
+ const int z3= 17* temp[4*1+i] + 7*temp[4*3+i];
+
+ block[i*8+0]= (z0 + z3)>>10;
+ block[i*8+1]= (z1 + z2)>>10;
+ block[i*8+2]= (z1 - z2)>>10;
+ block[i*8+3]= (z0 - z3)>>10;
+ }
+
+}
+
+/**
+ * RealVideo 3.0/4.0 inverse transform for DC block
+ *
+ * Code is almost the same as rv34_inv_transform()
+ * but final coefficients are multiplied by 1.5 and have no rounding.
+ */
+static void rv34_inv_transform_noround(DCTELEM *block){
+ int temp[16];
+ int i;
+
+ rv34_row_transform(temp, block);
+
+ for(i=0; i<4; i++){
+ const int z0= 13*(temp[4*0+i] + temp[4*2+i]);
+ const int z1= 13*(temp[4*0+i] - temp[4*2+i]);
+ const int z2= 7* temp[4*1+i] - 17*temp[4*3+i];
+ const int z3= 17* temp[4*1+i] + 7*temp[4*3+i];
+
+ block[i*8+0]= ((z0 + z3)*3)>>11;
+ block[i*8+1]= ((z1 + z2)*3)>>11;
+ block[i*8+2]= ((z1 - z2)*3)>>11;
+ block[i*8+3]= ((z0 - z3)*3)>>11;
+ }
+
+}
+
+/** @} */ // transform
+
+
+/**
+ * @defgroup block RV30/40 4x4 block decoding functions
+ * @{
+ */
+
+/**
+ * Decode coded block pattern.
+ */
+static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
+{
+ int pattern, code, cbp=0;
+ int ones;
+ static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
+ static const int shifts[4] = { 0, 2, 8, 10 };
+ const int *curshift = shifts;
+ int i, t, mask;
+
+ code = get_vlc2(gb, vlc->cbppattern[table].table, 9, 2);
+ pattern = code & 0xF;
+ code >>= 4;
+
+ ones = rv34_count_ones[pattern];
+
+ for(mask = 8; mask; mask >>= 1, curshift++){
+ if(pattern & mask)
+ cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0];
+ }
+
+ for(i = 0; i < 4; i++){
+ t = modulo_three_table[code][i];
+ if(t == 1)
+ cbp |= cbp_masks[get_bits1(gb)] << i;
+ if(t == 2)
+ cbp |= cbp_masks[2] << i;
+ }
+ return cbp;
+}
+
+/**
+ * Get one coefficient value from the bistream and store it.
+ */
+static inline void decode_coeff(DCTELEM *dst, int coef, int esc, GetBitContext *gb, VLC* vlc)
+{
+ if(coef){
+ if(coef == esc){
+ coef = get_vlc2(gb, vlc->table, 9, 2);
+ if(coef > 23){
+ coef -= 23;
+ coef = 22 + ((1 << coef) | get_bits(gb, coef));
+ }
+ coef += esc;
+ }
+ if(get_bits1(gb))
+ coef = -coef;
+ *dst = coef;
+ }
+}
+
+/**
+ * Decode 2x2 subblock of coefficients.
+ */
+static inline void decode_subblock(DCTELEM *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc)
+{
+ int coeffs[4];
+
+ coeffs[0] = modulo_three_table[code][0];
+ coeffs[1] = modulo_three_table[code][1];
+ coeffs[2] = modulo_three_table[code][2];
+ coeffs[3] = modulo_three_table[code][3];
+ decode_coeff(dst , coeffs[0], 3, gb, vlc);
+ if(is_block2){
+ decode_coeff(dst+8, coeffs[1], 2, gb, vlc);
+ decode_coeff(dst+1, coeffs[2], 2, gb, vlc);
+ }else{
+ decode_coeff(dst+1, coeffs[1], 2, gb, vlc);
+ decode_coeff(dst+8, coeffs[2], 2, gb, vlc);
+ }
+ decode_coeff(dst+9, coeffs[3], 2, gb, vlc);
+}
+
+/**
+ * Decode coefficients for 4x4 block.
+ *
+ * This is done by filling 2x2 subblocks with decoded coefficients
+ * in this order (the same for subblocks and subblock coefficients):
+ * o--o
+ * /
+ * /
+ * o--o
+ */
+
+static inline void rv34_decode_block(DCTELEM *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc)
+{
+ int code, pattern;
+
+ code = get_vlc2(gb, rvlc->first_pattern[fc].table, 9, 2);
+
+ pattern = code & 0x7;
+
+ code >>= 3;
+ decode_subblock(dst, code, 0, gb, &rvlc->coefficient);
+
+ if(pattern & 4){
+ code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
+ decode_subblock(dst + 2, code, 0, gb, &rvlc->coefficient);
+ }
+ if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
+ code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
+ decode_subblock(dst + 8*2, code, 1, gb, &rvlc->coefficient);
+ }
+ if(pattern & 1){
+ code = get_vlc2(gb, rvlc->third_pattern[sc].table, 9, 2);
+ decode_subblock(dst + 8*2+2, code, 0, gb, &rvlc->coefficient);
+ }
+
+}
+
+/**
+ * Dequantize ordinary 4x4 block.
+ * @todo optimize
+ */
+static inline void rv34_dequant4x4(DCTELEM *block, int Qdc, int Q)
+{
+ int i, j;
+
+ block[0] = (block[0] * Qdc + 8) >> 4;
+ for(i = 0; i < 4; i++)
+ for(j = !i; j < 4; j++)
+ block[j + i*8] = (block[j + i*8] * Q + 8) >> 4;
+}
+
+/**
+ * Dequantize 4x4 block of DC values for 16x16 macroblock.
+ * @todo optimize
+ */
+static inline void rv34_dequant4x4_16x16(DCTELEM *block, int Qdc, int Q)
+{
+ int i;
+
+ for(i = 0; i < 3; i++)
+ block[rv34_dezigzag[i]] = (block[rv34_dezigzag[i]] * Qdc + 8) >> 4;
+ for(; i < 16; i++)
+ block[rv34_dezigzag[i]] = (block[rv34_dezigzag[i]] * Q + 8) >> 4;
+}
+/** @} */ //block functions
+
+
+/**
+ * @defgroup bitstream RV30/40 bitstream parsing
+ * @{
+ */
+
+/**
+ * Decode starting slice position.
+ * @todo Maybe replace with ff_h263_decode_mba() ?
+ */
+int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
+{
+ int i;
+ for(i = 0; i < 5; i++)
+ if(rv34_mb_max_sizes[i] >= mb_size - 1)
+ break;
+ return rv34_mb_bits_sizes[i];
+}
+
+/**
+ * Select VLC set for decoding from current quantizer, modifier and frame type.
+ */
+static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
+{
+ if(mod == 2 && quant < 19) quant += 10;
+ else if(mod && quant < 26) quant += 5;
+ return type ? &inter_vlcs[rv34_quant_to_vlc_set[1][av_clip(quant, 0, 30)]]
+ : &intra_vlcs[rv34_quant_to_vlc_set[0][av_clip(quant, 0, 30)]];
+}
+
+/**
+ * Decode quantizer difference and return modified quantizer.
+ */
+static inline int rv34_decode_dquant(GetBitContext *gb, int quant)
+{
+ if(get_bits1(gb))
+ return rv34_dquant_tab[get_bits1(gb)][quant];
+ else
+ return get_bits(gb, 5);
+}
+
+/** @} */ //bitstream functions
+
+/**
+ * @defgroup mv motion vector related code (prediction, reconstruction, motion compensation)
+ * @{
+ */
+
+/** macroblock partition width in 8x8 blocks */
+static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
+
+/** macroblock partition height in 8x8 blocks */
+static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
+
+/** availability index for subblocks */
+static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
+
+/**
+ * motion vector prediction
+ *
+ * Motion prediction performed for the block by using median prediction of
+ * motion vectors from the left, top and right top blocks but in corner cases
+ * some other vectors may be used instead.
+ */
+static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
+{
+ MpegEncContext *s = &r->s;
+ int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
+ int A[2] = {0}, B[2], C[2];
+ int i, j;
+ int mx, my;
+ int avail_index = avail_indexes[subblock_no];
+ int c_off = part_sizes_w[block_type];
+
+ mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;
+ if(subblock_no == 3)
+ c_off = -1;
+
+ if(r->avail_cache[avail_index - 1]){
+ A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
+ A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
+ }
+ if(r->avail_cache[avail_index - 4]){
+ B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
+ B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
+ }else{
+ B[0] = A[0];
+ B[1] = A[1];
+ }
+ if(!r->avail_cache[avail_index - 4 + c_off]){
+ if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1] || r->rv30)){
+ C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
+ C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
+ }else{
+ C[0] = A[0];
+ C[1] = A[1];
+ }
+ }else{
+ C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
+ C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
+ }
+ mx = mid_pred(A[0], B[0], C[0]);
+ my = mid_pred(A[1], B[1], C[1]);
+ mx += r->dmv[dmv_no][0];
+ my += r->dmv[dmv_no][1];
+ for(j = 0; j < part_sizes_h[block_type]; j++){
+ for(i = 0; i < part_sizes_w[block_type]; i++){
+ s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
+ s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
+ }
+ }
+}
+
+#define GET_PTS_DIFF(a, b) ((a - b + 8192) & 0x1FFF)
+
+/**
+ * Calculate motion vector component that should be added for direct blocks.
+ */
+static int calc_add_mv(RV34DecContext *r, int dir, int val)
+{
+ int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
+ int dist = dir ? -GET_PTS_DIFF(r->next_pts, r->cur_pts) : GET_PTS_DIFF(r->cur_pts, r->last_pts);
+ int mul;
+
+ if(!refdist) return 0;
+ mul = (dist << 14) / refdist;
+ return (val * mul + 0x2000) >> 14;
+}
+
+/**
+ * Predict motion vector for B-frame macroblock.
+ */
+static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
+ int A_avail, int B_avail, int C_avail,
+ int *mx, int *my)
+{
+ if(A_avail + B_avail + C_avail != 3){
+ *mx = A[0] + B[0] + C[0];
+ *my = A[1] + B[1] + C[1];
+ if(A_avail + B_avail + C_avail == 2){
+ *mx /= 2;
+ *my /= 2;
+ }
+ }else{
+ *mx = mid_pred(A[0], B[0], C[0]);
+ *my = mid_pred(A[1], B[1], C[1]);
+ }
+}
+
+/**
+ * motion vector prediction for B-frames
+ */
+static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
+{
+ MpegEncContext *s = &r->s;
+ int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+ int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
+ int A[2], B[2], C[2];
+ int has_A = 0, has_B = 0, has_C = 0;
+ int mx, my;
+ int i, j;
+ Picture *cur_pic = s->current_picture_ptr;
+ const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
+ int type = cur_pic->mb_type[mb_pos];
+
+ memset(A, 0, sizeof(A));
+ memset(B, 0, sizeof(B));
+ memset(C, 0, sizeof(C));
+ if((r->avail_cache[6-1] & type) & mask){
+ A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
+ A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
+ has_A = 1;
+ }
+ if((r->avail_cache[6-4] & type) & mask){
+ B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
+ B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
+ has_B = 1;
+ }
+ if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
+ C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
+ C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
+ has_C = 1;
+ }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
+ C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
+ C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
+ has_C = 1;
+ }
+
+ rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);
+
+ mx += r->dmv[dir][0];
+ my += r->dmv[dir][1];
+
+ for(j = 0; j < 2; j++){
+ for(i = 0; i < 2; i++){
+ cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
+ cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
+ }
+ }
+ if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
+ ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
+ }
+}
+
+/**
+ * motion vector prediction - RV3 version
+ */
+static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
+{
+ MpegEncContext *s = &r->s;
+ int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
+ int A[2] = {0}, B[2], C[2];
+ int i, j, k;
+ int mx, my;
+ int avail_index = avail_indexes[0];
+
+ if(r->avail_cache[avail_index - 1]){
+ A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
+ A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
+ }
+ if(r->avail_cache[avail_index - 4]){
+ B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
+ B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
+ }else{
+ B[0] = A[0];
+ B[1] = A[1];
+ }
+ if(!r->avail_cache[avail_index - 4 + 2]){
+ if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1])){
+ C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
+ C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
+ }else{
+ C[0] = A[0];
+ C[1] = A[1];
+ }
+ }else{
+ C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+2][0];
+ C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+2][1];
+ }
+ mx = mid_pred(A[0], B[0], C[0]);
+ my = mid_pred(A[1], B[1], C[1]);
+ mx += r->dmv[0][0];
+ my += r->dmv[0][1];
+ for(j = 0; j < 2; j++){
+ for(i = 0; i < 2; i++){
+ for(k = 0; k < 2; k++){
+ s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
+ s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
+ }
+ }
+ }
+}
+
+static const int chroma_coeffs[3] = { 0, 3, 5 };
+
+/**
+ * generic motion compensation function
+ *
+ * @param r decoder context
+ * @param block_type type of the current block
+ * @param xoff horizontal offset from the start of the current block
+ * @param yoff vertical offset from the start of the current block
+ * @param mv_off offset to the motion vector information
+ * @param width width of the current partition in 8x8 blocks
+ * @param height height of the current partition in 8x8 blocks
+ * @param dir motion compensation direction (i.e. from the last or the next reference frame)
+ * @param thirdpel motion vectors are specified in 1/3 of pixel
+ * @param qpel_mc a set of functions used to perform luma motion compensation
+ * @param chroma_mc a set of functions used to perform chroma motion compensation
+ */
+static inline void rv34_mc(RV34DecContext *r, const int block_type,
+ const int xoff, const int yoff, int mv_off,
+ const int width, const int height, int dir,
+ const int thirdpel,
+ qpel_mc_func (*qpel_mc)[16],
+ h264_chroma_mc_func (*chroma_mc))
+{
+ MpegEncContext *s = &r->s;
+ uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;
+ int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
+ int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
+ int is16x16 = 1;
+
+ if(thirdpel){
+ int chroma_mx, chroma_my;
+ mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
+ my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
+ lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
+ ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
+ chroma_mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + 1) >> 1;
+ chroma_my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + 1) >> 1;
+ umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
+ umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
+ uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
+ uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
+ }else{
+ int cx, cy;
+ mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
+ my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
+ lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
+ ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
+ cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
+ cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
+ umx = cx >> 2;
+ umy = cy >> 2;
+ uvmx = (cx & 3) << 1;
+ uvmy = (cy & 3) << 1;
+ //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3
+ if(uvmx == 6 && uvmy == 6)
+ uvmx = uvmy = 4;
+ }
+ dxy = ly*4 + lx;
+ srcY = dir ? s->next_picture_ptr->data[0] : s->last_picture_ptr->data[0];
+ srcU = dir ? s->next_picture_ptr->data[1] : s->last_picture_ptr->data[1];
+ srcV = dir ? s->next_picture_ptr->data[2] : s->last_picture_ptr->data[2];
+ src_x = s->mb_x * 16 + xoff + mx;
+ src_y = s->mb_y * 16 + yoff + my;
+ uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
+ uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy;
+ srcY += src_y * s->linesize + src_x;
+ srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
+ srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
+ if( (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4
+ || (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4){
+ uint8_t *uvbuf= s->edge_emu_buffer + 22 * s->linesize;
+
+ srcY -= 2 + 2*s->linesize;
+ ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, (width<<3)+6, (height<<3)+6,
+ src_x - 2, src_y - 2, s->h_edge_pos, s->v_edge_pos);
+ srcY = s->edge_emu_buffer + 2 + 2*s->linesize;
+ ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, (width<<2)+1, (height<<2)+1,
+ uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
+ ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, (width<<2)+1, (height<<2)+1,
+ uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
+ srcU = uvbuf;
+ srcV = uvbuf + 16;
+ }
+ Y = s->dest[0] + xoff + yoff *s->linesize;
+ U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
+ V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
+
+ if(block_type == RV34_MB_P_16x8){
+ qpel_mc[1][dxy](Y, srcY, s->linesize);
+ Y += 8;
+ srcY += 8;
+ }else if(block_type == RV34_MB_P_8x16){
+ qpel_mc[1][dxy](Y, srcY, s->linesize);
+ Y += 8 * s->linesize;
+ srcY += 8 * s->linesize;
+ }
+ is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);
+ qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);
+ chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);
+ chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);
+}
+
+static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
+ const int xoff, const int yoff, int mv_off,
+ const int width, const int height, int dir)
+{
+ rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30,
+ r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab
+ : r->s.dsp.put_rv40_qpel_pixels_tab,
+ r->rv30 ? r->s.dsp.put_h264_chroma_pixels_tab
+ : r->s.dsp.put_rv40_chroma_pixels_tab);
+}
+
+static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
+{
+ rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30,
+ r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab
+ : r->s.dsp.put_rv40_qpel_pixels_tab,
+ r->rv30 ? r->s.dsp.put_h264_chroma_pixels_tab
+ : r->s.dsp.put_rv40_chroma_pixels_tab);
+ rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30,
+ r->rv30 ? r->s.dsp.avg_rv30_tpel_pixels_tab
+ : r->s.dsp.avg_rv40_qpel_pixels_tab,
+ r->rv30 ? r->s.dsp.avg_h264_chroma_pixels_tab
+ : r->s.dsp.avg_rv40_chroma_pixels_tab);
+}
+
+static void rv34_mc_2mv_skip(RV34DecContext *r)
+{
+ int i, j;
+ for(j = 0; j < 2; j++)
+ for(i = 0; i < 2; i++){
+ rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
+ r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab
+ : r->s.dsp.put_rv40_qpel_pixels_tab,
+ r->rv30 ? r->s.dsp.put_h264_chroma_pixels_tab
+ : r->s.dsp.put_rv40_chroma_pixels_tab);
+ rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
+ r->rv30 ? r->s.dsp.avg_rv30_tpel_pixels_tab
+ : r->s.dsp.avg_rv40_qpel_pixels_tab,
+ r->rv30 ? r->s.dsp.avg_h264_chroma_pixels_tab
+ : r->s.dsp.avg_rv40_chroma_pixels_tab);
+ }
+}
+
+/** number of motion vectors in each macroblock type */
+static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
+
+/**
+ * Decode motion vector differences
+ * and perform motion vector reconstruction and motion compensation.
+ */
+static int rv34_decode_mv(RV34DecContext *r, int block_type)
+{
+ MpegEncContext *s = &r->s;
+ GetBitContext *gb = &s->gb;
+ int i, j, k, l;
+ int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
+ int next_bt;
+
+ memset(r->dmv, 0, sizeof(r->dmv));
+ for(i = 0; i < num_mvs[block_type]; i++){
+ r->dmv[i][0] = svq3_get_se_golomb(gb);
+ r->dmv[i][1] = svq3_get_se_golomb(gb);
+ }
+ switch(block_type){
+ case RV34_MB_TYPE_INTRA:
+ case RV34_MB_TYPE_INTRA16x16:
+ ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ return 0;
+ case RV34_MB_SKIP:
+ if(s->pict_type == FF_P_TYPE){
+ ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
+ break;
+ }
+ case RV34_MB_B_DIRECT:
+ //surprisingly, it uses motion scheme from next reference frame
+ next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
+ if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
+ ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ }else
+ for(j = 0; j < 2; j++)
+ for(i = 0; i < 2; i++)
+ for(k = 0; k < 2; k++)
+ for(l = 0; l < 2; l++)
+ s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
+ if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
+ rv34_mc_2mv(r, block_type);
+ else
+ rv34_mc_2mv_skip(r);
+ ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ break;
+ case RV34_MB_P_16x16:
+ case RV34_MB_P_MIX16x16:
+ rv34_pred_mv(r, block_type, 0, 0);
+ rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
+ break;
+ case RV34_MB_B_FORWARD:
+ case RV34_MB_B_BACKWARD:
+ r->dmv[1][0] = r->dmv[0][0];
+ r->dmv[1][1] = r->dmv[0][1];
+ if(r->rv30)
+ rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
+ else
+ rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
+ rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
+ break;
+ case RV34_MB_P_16x8:
+ case RV34_MB_P_8x16:
+ rv34_pred_mv(r, block_type, 0, 0);
+ rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
+ if(block_type == RV34_MB_P_16x8){
+ rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
+ rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
+ }
+ if(block_type == RV34_MB_P_8x16){
+ rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
+ rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
+ }
+ break;
+ case RV34_MB_B_BIDIR:
+ rv34_pred_mv_b (r, block_type, 0);
+ rv34_pred_mv_b (r, block_type, 1);
+ rv34_mc_2mv (r, block_type);
+ break;
+ case RV34_MB_P_8x8:
+ for(i=0;i< 4;i++){
+ rv34_pred_mv(r, block_type, i, i);
+ rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
+ }
+ break;
+ }
+
+ return 0;
+}
+/** @} */ // mv group
+
+/**
+ * @defgroup recons Macroblock reconstruction functions
+ * @{
+ */
+/** mapping of RV30/40 intra prediction types to standard H.264 types */
+static const int ittrans[9] = {
+ DC_PRED, VERT_PRED, HOR_PRED, DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_LEFT_PRED,
+ VERT_RIGHT_PRED, VERT_LEFT_PRED, HOR_UP_PRED, HOR_DOWN_PRED,
+};
+
+/** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */
+static const int ittrans16[4] = {
+ DC_PRED8x8, VERT_PRED8x8, HOR_PRED8x8, PLANE_PRED8x8,
+};
+
+/**
+ * Perform 4x4 intra prediction.
+ */
+static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
+{
+ uint8_t *prev = dst - stride + 4;
+ uint32_t topleft;
+
+ if(!up && !left)
+ itype = DC_128_PRED;
+ else if(!up){
+ if(itype == VERT_PRED) itype = HOR_PRED;
+ if(itype == DC_PRED) itype = LEFT_DC_PRED;
+ }else if(!left){
+ if(itype == HOR_PRED) itype = VERT_PRED;
+ if(itype == DC_PRED) itype = TOP_DC_PRED;
+ if(itype == DIAG_DOWN_LEFT_PRED) itype = DIAG_DOWN_LEFT_PRED_RV40_NODOWN;
+ }
+ if(!down){
+ if(itype == DIAG_DOWN_LEFT_PRED) itype = DIAG_DOWN_LEFT_PRED_RV40_NODOWN;
+ if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN;
+ if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN;
+ }
+ if(!right && up){
+ topleft = dst[-stride + 3] * 0x01010101;
+ prev = (uint8_t*)&topleft;
+ }
+ r->h.pred4x4[itype](dst, prev, stride);
+}
+
+/** add_pixels_clamped for 4x4 block */
+static void rv34_add_4x4_block(uint8_t *dst, int stride, DCTELEM block[64], int off)
+{
+ int x, y;
+ for(y = 0; y < 4; y++)
+ for(x = 0; x < 4; x++)
+ dst[x + y*stride] = av_clip_uint8(dst[x + y*stride] + block[off + x+y*8]);
+}
+
+static inline int adjust_pred16(int itype, int up, int left)
+{
+ if(!up && !left)
+ itype = DC_128_PRED8x8;
+ else if(!up){
+ if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8;
+ if(itype == VERT_PRED8x8) itype = HOR_PRED8x8;
+ if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8;
+ }else if(!left){
+ if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8;
+ if(itype == HOR_PRED8x8) itype = VERT_PRED8x8;
+ if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8;
+ }
+ return itype;
+}
+
+static void rv34_output_macroblock(RV34DecContext *r, int8_t *intra_types, int cbp, int is16)
+{
+ MpegEncContext *s = &r->s;
+ DSPContext *dsp = &s->dsp;
+ int i, j;
+ uint8_t *Y, *U, *V;
+ int itype;
+ int avail[6*8] = {0};
+ int idx;
+
+ // Set neighbour information.
+ if(r->avail_cache[1])
+ avail[0] = 1;
+ if(r->avail_cache[2])
+ avail[1] = avail[2] = 1;
+ if(r->avail_cache[3])
+ avail[3] = avail[4] = 1;
+ if(r->avail_cache[4])
+ avail[5] = 1;
+ if(r->avail_cache[5])
+ avail[8] = avail[16] = 1;
+ if(r->avail_cache[9])
+ avail[24] = avail[32] = 1;
+
+ Y = s->dest[0];
+ U = s->dest[1];
+ V = s->dest[2];
+ if(!is16){
+ for(j = 0; j < 4; j++){
+ idx = 9 + j*8;
+ for(i = 0; i < 4; i++, cbp >>= 1, Y += 4, idx++){
+ rv34_pred_4x4_block(r, Y, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
+ avail[idx] = 1;
+ if(cbp & 1)
+ rv34_add_4x4_block(Y, s->linesize, s->block[(i>>1)+(j&2)], (i&1)*4+(j&1)*32);
+ }
+ Y += s->linesize * 4 - 4*4;
+ intra_types += r->intra_types_stride;
+ }
+ intra_types -= r->intra_types_stride * 4;
+ fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
+ for(j = 0; j < 2; j++){
+ idx = 6 + j*4;
+ for(i = 0; i < 2; i++, cbp >>= 1, idx++){
+ rv34_pred_4x4_block(r, U + i*4 + j*4*s->uvlinesize, s->uvlinesize, ittrans[intra_types[i*2+j*2*r->intra_types_stride]], r->avail_cache[idx-4], r->avail_cache[idx-1], !i && !j, r->avail_cache[idx-3]);
+ rv34_pred_4x4_block(r, V + i*4 + j*4*s->uvlinesize, s->uvlinesize, ittrans[intra_types[i*2+j*2*r->intra_types_stride]], r->avail_cache[idx-4], r->avail_cache[idx-1], !i && !j, r->avail_cache[idx-3]);
+ r->avail_cache[idx] = 1;
+ if(cbp & 0x01)
+ rv34_add_4x4_block(U + i*4 + j*4*s->uvlinesize, s->uvlinesize, s->block[4], i*4+j*32);
+ if(cbp & 0x10)
+ rv34_add_4x4_block(V + i*4 + j*4*s->uvlinesize, s->uvlinesize, s->block[5], i*4+j*32);
+ }
+ }
+ }else{
+ itype = ittrans16[intra_types[0]];
+ itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
+ r->h.pred16x16[itype](Y, s->linesize);
+ dsp->add_pixels_clamped(s->block[0], Y, s->linesize);
+ dsp->add_pixels_clamped(s->block[1], Y + 8, s->linesize);
+ Y += s->linesize * 8;
+ dsp->add_pixels_clamped(s->block[2], Y, s->linesize);
+ dsp->add_pixels_clamped(s->block[3], Y + 8, s->linesize);
+
+ itype = ittrans16[intra_types[0]];
+ if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
+ itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
+ r->h.pred8x8[itype](U, s->uvlinesize);
+ dsp->add_pixels_clamped(s->block[4], U, s->uvlinesize);
+ r->h.pred8x8[itype](V, s->uvlinesize);
+ dsp->add_pixels_clamped(s->block[5], V, s->uvlinesize);
+ }
+}
+
+/** @} */ // recons group
+
+/**
+ * @addtogroup bitstream
+ * Decode macroblock header and return CBP in case of success, -1 otherwise.
+ */
+static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types)
+{
+ MpegEncContext *s = &r->s;
+ GetBitContext *gb = &s->gb;
+ int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+ int i, t;
+
+ if(!r->si.type){
+ r->is16 = get_bits1(gb);
+ if(!r->is16 && !r->rv30){
+ if(!get_bits1(gb))
+ av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
+ }
+ s->current_picture_ptr->mb_type[mb_pos] = r->is16 ? MB_TYPE_INTRA16x16 : MB_TYPE_INTRA;
+ r->block_type = r->is16 ? RV34_MB_TYPE_INTRA16x16 : RV34_MB_TYPE_INTRA;
+ }else{
+ r->block_type = r->decode_mb_info(r);
+ if(r->block_type == -1)
+ return -1;
+ s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
+ r->mb_type[mb_pos] = r->block_type;
+ if(r->block_type == RV34_MB_SKIP){
+ if(s->pict_type == FF_P_TYPE)
+ r->mb_type[mb_pos] = RV34_MB_P_16x16;
+ if(s->pict_type == FF_B_TYPE)
+ r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
+ }
+ r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
+ rv34_decode_mv(r, r->block_type);
+ if(r->block_type == RV34_MB_SKIP){
+ fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
+ return 0;
+ }
+ r->chroma_vlc = 1;
+ r->luma_vlc = 0;
+ }
+ if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
+ if(r->is16){
+ t = get_bits(gb, 2);
+ fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
+ r->luma_vlc = 2;
+ }else{
+ if(r->decode_intra_types(r, gb, intra_types) < 0)
+ return -1;
+ r->luma_vlc = 1;
+ }
+ r->chroma_vlc = 0;
+ r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
+ }else{
+ for(i = 0; i < 16; i++)
+ intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0;
+ r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
+ if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){
+ r->is16 = 1;
+ r->chroma_vlc = 1;
+ r->luma_vlc = 2;
+ r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
+ }
+ }
+
+ return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
+}
+
+/**
+ * @addtogroup recons
+ * @{
+ */
+/**
+ * mask for retrieving all bits in coded block pattern
+ * corresponding to one 8x8 block
+ */
+#define LUMA_CBP_BLOCK_MASK 0x33
+
+#define U_CBP_MASK 0x0F0000
+#define V_CBP_MASK 0xF00000
+
+
+static void rv34_apply_differences(RV34DecContext *r, int cbp)
+{
+ static const int shifts[4] = { 0, 2, 8, 10 };
+ MpegEncContext *s = &r->s;
+ int i;
+
+ for(i = 0; i < 4; i++)
+ if((cbp & (LUMA_CBP_BLOCK_MASK << shifts[i])) || r->block_type == RV34_MB_P_MIX16x16)
+ s->dsp.add_pixels_clamped(s->block[i], s->dest[0] + (i & 1)*8 + (i&2)*4*s->linesize, s->linesize);
+ if(cbp & U_CBP_MASK)
+ s->dsp.add_pixels_clamped(s->block[4], s->dest[1], s->uvlinesize);
+ if(cbp & V_CBP_MASK)
+ s->dsp.add_pixels_clamped(s->block[5], s->dest[2], s->uvlinesize);
+}
+
+static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
+{
+ int d;
+ d = motion_val[0][0] - motion_val[-step][0];
+ if(d < -3 || d > 3)
+ return 1;
+ d = motion_val[0][1] - motion_val[-step][1];
+ if(d < -3 || d > 3)
+ return 1;
+ return 0;
+}
+
+static int rv34_set_deblock_coef(RV34DecContext *r)
+{
+ MpegEncContext *s = &r->s;
+ int hmvmask = 0, vmvmask = 0, i, j;
+ int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
+ int16_t (*motion_val)[2] = s->current_picture_ptr->motion_val[0][midx];
+ for(j = 0; j < 16; j += 8){
+ for(i = 0; i < 2; i++){
+ if(is_mv_diff_gt_3(motion_val + i, 1))
+ vmvmask |= 0x11 << (j + i*2);
+ if((j || s->mb_y) && is_mv_diff_gt_3(motion_val + i, s->b8_stride))
+ hmvmask |= 0x03 << (j + i*2);
+ }
+ motion_val += s->b8_stride;
+ }
+ if(s->first_slice_line)
+ hmvmask &= ~0x000F;
+ if(!s->mb_x)
+ vmvmask &= ~0x1111;
+ if(r->rv30){ //RV30 marks both subblocks on the edge for filtering
+ vmvmask |= (vmvmask & 0x4444) >> 1;
+ hmvmask |= (hmvmask & 0x0F00) >> 4;
+ if(s->mb_x)
+ r->deblock_coefs[s->mb_x - 1 + s->mb_y*s->mb_stride] |= (vmvmask & 0x1111) << 3;
+ if(!s->first_slice_line)
+ r->deblock_coefs[s->mb_x + (s->mb_y - 1)*s->mb_stride] |= (hmvmask & 0xF) << 12;
+ }
+ return hmvmask | vmvmask;
+}
+
+static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types)
+{
+ MpegEncContext *s = &r->s;
+ GetBitContext *gb = &s->gb;
+ int cbp, cbp2;
+ int i, blknum, blkoff;
+ DCTELEM block16[64];
+ int luma_dc_quant;
+ int dist;
+ int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+
+ // Calculate which neighbours are available. Maybe it's worth optimizing too.
+ memset(r->avail_cache, 0, sizeof(r->avail_cache));
+ fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
+ dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
+ if(s->mb_x && dist)
+ r->avail_cache[5] =
+ r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
+ if(dist >= s->mb_width)
+ r->avail_cache[2] =
+ r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
+ if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
+ r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
+ if(s->mb_x && dist > s->mb_width)
+ r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
+
+ s->qscale = r->si.quant;
+ cbp = cbp2 = rv34_decode_mb_header(r, intra_types);
+ r->cbp_luma [mb_pos] = cbp;
+ r->cbp_chroma[mb_pos] = cbp >> 16;
+ if(s->pict_type == FF_I_TYPE)
+ r->deblock_coefs[mb_pos] = 0xFFFF;
+ else
+ r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
+ s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
+
+ if(cbp == -1)
+ return -1;
+
+ luma_dc_quant = r->block_type == RV34_MB_P_MIX16x16 ? r->luma_dc_quant_p[s->qscale] : r->luma_dc_quant_i[s->qscale];
+ if(r->is16){
+ memset(block16, 0, sizeof(block16));
+ rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0);
+ rv34_dequant4x4_16x16(block16, rv34_qscale_tab[luma_dc_quant],rv34_qscale_tab[s->qscale]);
+ rv34_inv_transform_noround(block16);
+ }
+
+ for(i = 0; i < 16; i++, cbp >>= 1){
+ if(!r->is16 && !(cbp & 1)) continue;
+ blknum = ((i & 2) >> 1) + ((i & 8) >> 2);
+ blkoff = ((i & 1) << 2) + ((i & 4) << 3);
+ if(cbp & 1)
+ rv34_decode_block(s->block[blknum] + blkoff, gb, r->cur_vlcs, r->luma_vlc, 0);
+ rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[s->qscale],rv34_qscale_tab[s->qscale]);
+ if(r->is16) //FIXME: optimize
+ s->block[blknum][blkoff] = block16[(i & 3) | ((i & 0xC) << 1)];
+ rv34_inv_transform(s->block[blknum] + blkoff);
+ }
+ if(r->block_type == RV34_MB_P_MIX16x16)
+ r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
+ for(; i < 24; i++, cbp >>= 1){
+ if(!(cbp & 1)) continue;
+ blknum = ((i & 4) >> 2) + 4;
+ blkoff = ((i & 1) << 2) + ((i & 2) << 4);
+ rv34_decode_block(s->block[blknum] + blkoff, gb, r->cur_vlcs, r->chroma_vlc, 1);
+ rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]],rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]]);
+ rv34_inv_transform(s->block[blknum] + blkoff);
+ }
+ if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos]))
+ rv34_output_macroblock(r, intra_types, cbp2, r->is16);
+ else
+ rv34_apply_differences(r, cbp2);
+
+ return 0;
+}
+
+static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
+{
+ int bits;
+ if(s->mb_y >= s->mb_height)
+ return 1;
+ if(!s->mb_num_left)
+ return 1;
+ if(r->s.mb_skip_run > 1)
+ return 0;
+ bits = r->bits - get_bits_count(&s->gb);
+ if(bits < 0 || (bits < 8 && !show_bits(&s->gb, bits)))
+ return 1;
+ return 0;
+}
+
+static inline int slice_compare(SliceInfo *si1, SliceInfo *si2)
+{
+ return si1->type != si2->type ||
+ si1->start >= si2->start ||
+ si1->width != si2->width ||
+ si1->height != si2->height||
+ si1->pts != si2->pts;
+}
+
+static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
+{
+ MpegEncContext *s = &r->s;
+ GetBitContext *gb = &s->gb;
+ int mb_pos;
+ int res;
+
+ init_get_bits(&r->s.gb, buf, buf_size*8);
+ res = r->parse_slice_header(r, gb, &r->si);
+ if(res < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
+ return -1;
+ }
+
+ if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
+ if(s->width != r->si.width || s->height != r->si.height){
+ av_log(s->avctx, AV_LOG_DEBUG, "Changing dimensions to %dx%d\n", r->si.width,r->si.height);
+ MPV_common_end(s);
+ s->width = r->si.width;
+ s->height = r->si.height;
+ avcodec_set_dimensions(s->avctx, s->width, s->height);
+ if(MPV_common_init(s) < 0)
+ return -1;
+ r->intra_types_stride = s->mb_width*4 + 4;
+ r->intra_types_hist = av_realloc(r->intra_types_hist, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
+ r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
+ r->mb_type = av_realloc(r->mb_type, r->s.mb_stride * r->s.mb_height * sizeof(*r->mb_type));
+ r->cbp_luma = av_realloc(r->cbp_luma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_luma));
+ r->cbp_chroma = av_realloc(r->cbp_chroma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma));
+ r->deblock_coefs = av_realloc(r->deblock_coefs, r->s.mb_stride * r->s.mb_height * sizeof(*r->deblock_coefs));
+ }
+ s->pict_type = r->si.type ? r->si.type : FF_I_TYPE;
+ if(MPV_frame_start(s, s->avctx) < 0)
+ return -1;
+ ff_er_frame_start(s);
+ r->cur_pts = r->si.pts;
+ if(s->pict_type != FF_B_TYPE){
+ r->last_pts = r->next_pts;
+ r->next_pts = r->cur_pts;
+ }
+ s->mb_x = s->mb_y = 0;
+ }
+
+ r->si.end = end;
+ s->qscale = r->si.quant;
+ r->bits = buf_size*8;
+ s->mb_num_left = r->si.end - r->si.start;
+ r->s.mb_skip_run = 0;
+
+ mb_pos = s->mb_x + s->mb_y * s->mb_width;
+ if(r->si.start != mb_pos){
+ av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos);
+ s->mb_x = r->si.start % s->mb_width;
+ s->mb_y = r->si.start / s->mb_width;
+ }
+ memset(r->intra_types_hist, -1, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
+ s->first_slice_line = 1;
+ s->resync_mb_x= s->mb_x;
+ s->resync_mb_y= s->mb_y;
+
+ ff_init_block_index(s);
+ while(!check_slice_end(r, s)) {
+ ff_update_block_index(s);
+ s->dsp.clear_blocks(s->block[0]);
+
+ if(rv34_decode_macroblock(r, r->intra_types + s->mb_x * 4 + 4) < 0){
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_ERROR|DC_ERROR|MV_ERROR);
+ return -1;
+ }
+ if (++s->mb_x == s->mb_width) {
+ s->mb_x = 0;
+ s->mb_y++;
+ ff_init_block_index(s);
+
+ memmove(r->intra_types_hist, r->intra_types, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
+ memset(r->intra_types, -1, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
+
+ if(r->loop_filter && s->mb_y >= 2)
+ r->loop_filter(r, s->mb_y - 2);
+ }
+ if(s->mb_x == s->resync_mb_x)
+ s->first_slice_line=0;
+ s->mb_num_left--;
+ }
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);
+
+ return s->mb_y == s->mb_height;
+}
+
+/** @} */ // recons group end
+
+/**
+ * Initialize decoder.
+ */
+av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
+{
+ RV34DecContext *r = avctx->priv_data;
+ MpegEncContext *s = &r->s;
+
+ MPV_decode_defaults(s);
+ s->avctx= avctx;
+ s->out_format = FMT_H263;
+ s->codec_id= avctx->codec_id;
+
+ s->width = avctx->width;
+ s->height = avctx->height;
+
+ r->s.avctx = avctx;
+ avctx->flags |= CODEC_FLAG_EMU_EDGE;
+ r->s.flags |= CODEC_FLAG_EMU_EDGE;
+ avctx->pix_fmt = PIX_FMT_YUV420P;
+ avctx->has_b_frames = 1;
+ s->low_delay = 0;
+
+ if (MPV_common_init(s) < 0)
+ return -1;
+
+ ff_h264_pred_init(&r->h, CODEC_ID_RV40);
+
+ r->intra_types_stride = 4*s->mb_stride + 4;
+ r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
+ r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
+
+ r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height * sizeof(*r->mb_type));
+
+ r->cbp_luma = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_luma));
+ r->cbp_chroma = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma));
+ r->deblock_coefs = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->deblock_coefs));
+
+ if(!intra_vlcs[0].cbppattern[0].bits)
+ rv34_init_tables();
+
+ return 0;
+}
+
+static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n)
+{
+ if(avctx->slice_count) return avctx->slice_offset[n];
+ else return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
+}
+
+int ff_rv34_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ RV34DecContext *r = avctx->priv_data;
+ MpegEncContext *s = &r->s;
+ AVFrame *pict = data;
+ SliceInfo si;
+ int i;
+ int slice_count;
+ const uint8_t *slices_hdr = NULL;
+ int last = 0;
+
+ /* no supplementary picture */
+ if (buf_size == 0) {
+ /* special case for last picture */
+ if (s->low_delay==0 && s->next_picture_ptr) {
+ *pict= *(AVFrame*)s->next_picture_ptr;
+ s->next_picture_ptr= NULL;
+
+ *data_size = sizeof(AVFrame);
+ }
+ return 0;
+ }
+
+ if(!avctx->slice_count){
+ slice_count = (*buf++) + 1;
+ slices_hdr = buf + 4;
+ buf += 8 * slice_count;
+ }else
+ slice_count = avctx->slice_count;
+
+ //parse first slice header to check whether this frame can be decoded
+ if(get_slice_offset(avctx, slices_hdr, 0) > buf_size){
+ av_log(avctx, AV_LOG_ERROR, "Slice offset is greater than frame size\n");
+ return -1;
+ }
+ init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, 0), buf_size-get_slice_offset(avctx, slices_hdr, 0));
+ if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
+ av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
+ return -1;
+ }
+ if((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) && si.type == FF_B_TYPE)
+ return -1;
+ /* skip b frames if we are in a hurry */
+ if(avctx->hurry_up && si.type==FF_B_TYPE) return buf_size;
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==FF_B_TYPE)
+ || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=FF_I_TYPE)
+ || avctx->skip_frame >= AVDISCARD_ALL)
+ return buf_size;
+ /* skip everything if we are in a hurry>=5 */
+ if(avctx->hurry_up>=5)
+ return buf_size;
+
+ for(i=0; i<slice_count; i++){
+ int offset= get_slice_offset(avctx, slices_hdr, i);
+ int size;
+ if(i+1 == slice_count)
+ size= buf_size - offset;
+ else
+ size= get_slice_offset(avctx, slices_hdr, i+1) - offset;
+
+ if(offset > buf_size){
+ av_log(avctx, AV_LOG_ERROR, "Slice offset is greater than frame size\n");
+ break;
+ }
+
+ r->si.end = s->mb_width * s->mb_height;
+ if(i+1 < slice_count){
+ init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, i+1), (buf_size-get_slice_offset(avctx, slices_hdr, i+1))*8);
+ if(r->parse_slice_header(r, &r->s.gb, &si) < 0){
+ if(i+2 < slice_count)
+ size = get_slice_offset(avctx, slices_hdr, i+2) - offset;
+ else
+ size = buf_size - offset;
+ }else
+ r->si.end = si.start;
+ }
+ last = rv34_decode_slice(r, r->si.end, buf + offset, size);
+ s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
+ if(last)
+ break;
+ }
+
+ if(last){
+ if(r->loop_filter)
+ r->loop_filter(r, s->mb_height - 1);
+ ff_er_frame_end(s);
+ MPV_frame_end(s);
+ if (s->pict_type == FF_B_TYPE || s->low_delay) {
+ *pict= *(AVFrame*)s->current_picture_ptr;
+ } else if (s->last_picture_ptr != NULL) {
+ *pict= *(AVFrame*)s->last_picture_ptr;
+ }
+
+ if(s->last_picture_ptr || s->low_delay){
+ *data_size = sizeof(AVFrame);
+ ff_print_debug_info(s, pict);
+ }
+ s->current_picture_ptr= NULL; //so we can detect if frame_end wasnt called (find some nicer solution...)
+ }
+ return buf_size;
+}
+
+av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
+{
+ RV34DecContext *r = avctx->priv_data;
+
+ MPV_common_end(&r->s);
+
+ av_freep(&r->intra_types_hist);
+ r->intra_types = NULL;
+ av_freep(&r->mb_type);
+ av_freep(&r->cbp_luma);
+ av_freep(&r->cbp_chroma);
+ av_freep(&r->deblock_coefs);
+
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34.h
new file mode 100644
index 000000000..c199ea97f
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34.h
@@ -0,0 +1,130 @@
+/*
+ * RV30/40 decoder common data declarations
+ * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/rv34.h
+ * RV30 and RV40 decoder common data declarations
+ */
+
+#ifndef AVCODEC_RV34_H
+#define AVCODEC_RV34_H
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+
+#include "h264pred.h"
+
+#define MB_TYPE_SEPARATE_DC 0x01000000
+#define IS_SEPARATE_DC(a) ((a) & MB_TYPE_SEPARATE_DC)
+
+/**
+ * RV30 and RV40 Macroblock types
+ */
+enum RV40BlockTypes{
+ RV34_MB_TYPE_INTRA, ///< Intra macroblock
+ RV34_MB_TYPE_INTRA16x16, ///< Intra macroblock with DCs in a separate 4x4 block
+ RV34_MB_P_16x16, ///< P-frame macroblock, one motion frame
+ RV34_MB_P_8x8, ///< P-frame macroblock, 8x8 motion compensation partitions
+ RV34_MB_B_FORWARD, ///< B-frame macroblock, forward prediction
+ RV34_MB_B_BACKWARD, ///< B-frame macroblock, backward prediction
+ RV34_MB_SKIP, ///< Skipped block
+ RV34_MB_B_DIRECT, ///< Bidirectionally predicted B-frame macroblock, no motion vectors
+ RV34_MB_P_16x8, ///< P-frame macroblock, 16x8 motion compensation partitions
+ RV34_MB_P_8x16, ///< P-frame macroblock, 8x16 motion compensation partitions
+ RV34_MB_B_BIDIR, ///< Bidirectionally predicted B-frame macroblock, two motion vectors
+ RV34_MB_P_MIX16x16, ///< P-frame macroblock with DCs in a separate 4x4 block, one motion vector
+ RV34_MB_TYPES
+};
+
+/**
+ * VLC tables used by the decoder
+ *
+ * Intra frame VLC sets do not contain some of those tables.
+ */
+typedef struct RV34VLC{
+ VLC cbppattern[2]; ///< VLCs used for pattern of coded block patterns decoding
+ VLC cbp[2][4]; ///< VLCs used for coded block patterns decoding
+ VLC first_pattern[4]; ///< VLCs used for decoding coefficients in the first subblock
+ VLC second_pattern[2]; ///< VLCs used for decoding coefficients in the subblocks 2 and 3
+ VLC third_pattern[2]; ///< VLCs used for decoding coefficients in the last subblock
+ VLC coefficient; ///< VLCs used for decoding big coefficients
+}RV34VLC;
+
+/** essential slice information */
+typedef struct SliceInfo{
+ int type; ///< slice type (intra, inter)
+ int quant; ///< quantizer used for this slice
+ int vlc_set; ///< VLCs used for this slice
+ int start, end; ///< start and end macroblocks of the slice
+ int width; ///< coded width
+ int height; ///< coded height
+ int pts; ///< frame timestamp
+}SliceInfo;
+
+/** decoder context */
+typedef struct RV34DecContext{
+ MpegEncContext s;
+ int8_t *intra_types_hist;///< old block types, used for prediction
+ int8_t *intra_types; ///< block types
+ int intra_types_stride;///< block types array stride
+ const uint8_t *luma_dc_quant_i;///< luma subblock DC quantizer for intraframes
+ const uint8_t *luma_dc_quant_p;///< luma subblock DC quantizer for interframes
+
+ RV34VLC *cur_vlcs; ///< VLC set used for current frame decoding
+ int bits; ///< slice size in bits
+ H264PredContext h; ///< functions for 4x4 and 16x16 intra block prediction
+ SliceInfo si; ///< current slice information
+
+ int *mb_type; ///< internal macroblock types
+ int block_type; ///< current block type
+ int luma_vlc; ///< which VLC set will be used for decoding of luma blocks
+ int chroma_vlc; ///< which VLC set will be used for decoding of chroma blocks
+ int is16; ///< current block has additional 16x16 specific features or not
+ int dmv[4][2]; ///< differential motion vectors for the current macroblock
+
+ int rv30; ///< indicates which RV variasnt is currently decoded
+ int rpr; ///< one field size in RV30 slice header
+
+ int cur_pts, last_pts, next_pts;
+
+ uint16_t *cbp_luma; ///< CBP values for luma subblocks
+ uint8_t *cbp_chroma; ///< CBP values for chroma subblocks
+ int *deblock_coefs; ///< deblock coefficients for each macroblock
+
+ /** 8x8 block available flags (for MV prediction) */
+ DECLARE_ALIGNED_8(uint32_t, avail_cache)[3*4];
+
+ int (*parse_slice_header)(struct RV34DecContext *r, GetBitContext *gb, SliceInfo *si);
+ int (*decode_mb_info)(struct RV34DecContext *r);
+ int (*decode_intra_types)(struct RV34DecContext *r, GetBitContext *gb, int8_t *dst);
+ void (*loop_filter)(struct RV34DecContext *r, int row);
+}RV34DecContext;
+
+/**
+ * common decoding functions
+ */
+int ff_rv34_get_start_offset(GetBitContext *gb, int blocks);
+int ff_rv34_decode_init(AVCodecContext *avctx);
+int ff_rv34_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size);
+int ff_rv34_decode_end(AVCodecContext *avctx);
+
+#endif /* AVCODEC_RV34_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34data.h
new file mode 100644
index 000000000..95e5572df
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34data.h
@@ -0,0 +1,148 @@
+/*
+ * RealVideo 4 decoder
+ * copyright (c) 2007 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file rv34data.h
+ * miscellaneous RV30/40 tables
+ */
+
+#ifndef AVCODEC_RV34DATA_H
+#define AVCODEC_RV34DATA_H
+
+#include <stdint.h>
+
+/**
+ * number of ones in nibble minus one
+ */
+static const uint8_t rv34_count_ones[16] = {
+ 0, 0, 0, 1, 0, 1, 1, 2, 0, 1, 1, 2, 1, 2, 2, 3
+};
+
+/**
+ * values used to reconstruct coded block pattern
+ */
+static const uint8_t rv34_cbp_code[16] = {
+ 0x00, 0x20, 0x10, 0x30, 0x02, 0x22, 0x12, 0x32,
+ 0x01, 0x21, 0x11, 0x31, 0x03, 0x23, 0x13, 0x33
+};
+
+/**
+ * precalculated results of division by three and modulo three for values 0-107
+ *
+ * A lot of four-tuples in RV40 are represented as c0*27+c1*9+c2*3+c3.
+ * This table allows conversion from a value back to a vector.
+ */
+static const uint8_t modulo_three_table[108][4] = {
+ { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, { 0, 0, 0, 2 }, { 0, 0, 1, 0 },
+ { 0, 0, 1, 1 }, { 0, 0, 1, 2 }, { 0, 0, 2, 0 }, { 0, 0, 2, 1 },
+ { 0, 0, 2, 2 }, { 0, 1, 0, 0 }, { 0, 1, 0, 1 }, { 0, 1, 0, 2 },
+ { 0, 1, 1, 0 }, { 0, 1, 1, 1 }, { 0, 1, 1, 2 }, { 0, 1, 2, 0 },
+ { 0, 1, 2, 1 }, { 0, 1, 2, 2 }, { 0, 2, 0, 0 }, { 0, 2, 0, 1 },
+ { 0, 2, 0, 2 }, { 0, 2, 1, 0 }, { 0, 2, 1, 1 }, { 0, 2, 1, 2 },
+ { 0, 2, 2, 0 }, { 0, 2, 2, 1 }, { 0, 2, 2, 2 }, { 1, 0, 0, 0 },
+ { 1, 0, 0, 1 }, { 1, 0, 0, 2 }, { 1, 0, 1, 0 }, { 1, 0, 1, 1 },
+ { 1, 0, 1, 2 }, { 1, 0, 2, 0 }, { 1, 0, 2, 1 }, { 1, 0, 2, 2 },
+ { 1, 1, 0, 0 }, { 1, 1, 0, 1 }, { 1, 1, 0, 2 }, { 1, 1, 1, 0 },
+ { 1, 1, 1, 1 }, { 1, 1, 1, 2 }, { 1, 1, 2, 0 }, { 1, 1, 2, 1 },
+ { 1, 1, 2, 2 }, { 1, 2, 0, 0 }, { 1, 2, 0, 1 }, { 1, 2, 0, 2 },
+ { 1, 2, 1, 0 }, { 1, 2, 1, 1 }, { 1, 2, 1, 2 }, { 1, 2, 2, 0 },
+ { 1, 2, 2, 1 }, { 1, 2, 2, 2 }, { 2, 0, 0, 0 }, { 2, 0, 0, 1 },
+ { 2, 0, 0, 2 }, { 2, 0, 1, 0 }, { 2, 0, 1, 1 }, { 2, 0, 1, 2 },
+ { 2, 0, 2, 0 }, { 2, 0, 2, 1 }, { 2, 0, 2, 2 }, { 2, 1, 0, 0 },
+ { 2, 1, 0, 1 }, { 2, 1, 0, 2 }, { 2, 1, 1, 0 }, { 2, 1, 1, 1 },
+ { 2, 1, 1, 2 }, { 2, 1, 2, 0 }, { 2, 1, 2, 1 }, { 2, 1, 2, 2 },
+ { 2, 2, 0, 0 }, { 2, 2, 0, 1 }, { 2, 2, 0, 2 }, { 2, 2, 1, 0 },
+ { 2, 2, 1, 1 }, { 2, 2, 1, 2 }, { 2, 2, 2, 0 }, { 2, 2, 2, 1 },
+ { 2, 2, 2, 2 }, { 3, 0, 0, 0 }, { 3, 0, 0, 1 }, { 3, 0, 0, 2 },
+ { 3, 0, 1, 0 }, { 3, 0, 1, 1 }, { 3, 0, 1, 2 }, { 3, 0, 2, 0 },
+ { 3, 0, 2, 1 }, { 3, 0, 2, 2 }, { 3, 1, 0, 0 }, { 3, 1, 0, 1 },
+ { 3, 1, 0, 2 }, { 3, 1, 1, 0 }, { 3, 1, 1, 1 }, { 3, 1, 1, 2 },
+ { 3, 1, 2, 0 }, { 3, 1, 2, 1 }, { 3, 1, 2, 2 }, { 3, 2, 0, 0 },
+ { 3, 2, 0, 1 }, { 3, 2, 0, 2 }, { 3, 2, 1, 0 }, { 3, 2, 1, 1 },
+ { 3, 2, 1, 2 }, { 3, 2, 2, 0 }, { 3, 2, 2, 1 }, { 3, 2, 2, 2 },
+};
+
+/**
+ * quantizer values used for AC and DC coefficients in chroma blocks
+ */
+static const uint8_t rv34_chroma_quant[2][32] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 23, 24, 24, 25, 25 },
+ { 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 15, 16, 17, 18, 18, 19, 20, 20, 21, 21, 22, 22, 23, 23 }
+};
+
+/**
+ * This table is used for dequantizing.
+ */
+static const uint16_t rv34_qscale_tab[32] = {
+ 60, 67, 76, 85, 96, 108, 121, 136,
+ 152, 171, 192, 216, 242, 272, 305, 341,
+ 383, 432, 481, 544, 606, 683, 767, 854,
+ 963, 1074, 1212, 1392, 1566, 1708, 1978, 2211
+};
+
+/**
+ * 4x4 dezigzag pattern
+ */
+static const uint8_t rv34_dezigzag[16] = {
+ 0, 1, 8, 16,
+ 9, 2, 3, 10,
+ 17, 24, 25, 18,
+ 11, 19, 26, 27
+};
+
+/**
+ * tables used to translate a quantizer value into a VLC set for decoding
+ * The first table is used for intraframes.
+ */
+static const uint8_t rv34_quant_to_vlc_set[2][31] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3,
+ 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6 },
+};
+
+/**
+ * table for obtaining the quantizer difference
+ * @todo Use with modified_quant_tab from h263data.h.
+ */
+static const uint8_t rv34_dquant_tab[2][32]={
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+{
+ 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,10,11,12,13,14,15,16,17,18,18,19,20,21,22,23,24,25,26,27,28
+},{
+ 0, 2, 3, 4, 5, 6, 7, 8, 9,10,11,13,14,15,16,17,18,19,20,21,22,24,25,26,27,28,29,30,31,31,31,26
+}
+};
+
+/**
+ * maximum number of macroblocks for each of the possible slice offset sizes
+ * @todo This is the same as ff_mba_max, maybe use it instead.
+ */
+static const uint16_t rv34_mb_max_sizes[6] = { 0x2F, 0x62, 0x18B, 0x62F, 0x18BF, 0x23FF };
+/**
+ * bits needed to code the slice offset for the given size
+ * @todo This is the same as ff_mba_length, maybe use it instead.
+ */
+static const uint8_t rv34_mb_bits_sizes[6] = { 6, 7, 9, 11, 13, 14 };
+
+#endif /* AVCODEC_RV34DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34vlc.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34vlc.h
new file mode 100644
index 000000000..ffee907f9
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv34vlc.h
@@ -0,0 +1,4054 @@
+/*
+ * RealVideo 3/4 decoder
+ * Copyright (c) 2007 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file rv34vlc.h
+ * RV30/40 VLC tables
+ */
+
+#ifndef AVCODEC_RV34VLC_H
+#define AVCODEC_RV34VLC_H
+
+#include <stdint.h>
+
+#define NUM_INTRA_TABLES 5
+#define NUM_INTER_TABLES 7
+
+#define CBPPAT_VLC_SIZE 1296
+#define CBP_VLC_SIZE 16
+#define FIRSTBLK_VLC_SIZE 864
+#define OTHERBLK_VLC_SIZE 108
+#define COEFF_VLC_SIZE 32
+
+static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE] = {
+ {
+ {
+ 8, 10, 10, 10, 10, 10, 11, 10, 10, 11, 10, 10, 10, 10, 10, 6,
+ 12, 12, 13, 12, 13, 12, 13, 11, 13, 13, 13, 12, 13, 12, 12, 8,
+ 14, 13, 16, 13, 15, 13, 16, 12, 16, 16, 16, 14, 16, 13, 14, 10,
+ 12, 13, 12, 12, 13, 13, 13, 12, 13, 13, 12, 12, 13, 12, 12, 8,
+ 13, 14, 14, 12, 14, 14, 14, 12, 14, 15, 14, 12, 14, 13, 13, 8,
+ 16, 16, 16, 12, 16, 16, 16, 13, 16, 16, 16, 13, 16, 14, 14, 9,
+ 14, 16, 13, 13, 16, 16, 16, 14, 15, 16, 14, 13, 15, 15, 14, 10,
+ 16, 16, 14, 13, 16, 16, 16, 13, 16, 16, 16, 13, 16, 15, 14, 10,
+ 16, 16, 16, 11, 16, 16, 16, 12, 16, 16, 16, 12, 16, 16, 15, 9,
+ 12, 13, 13, 13, 12, 12, 14, 12, 12, 14, 13, 12, 12, 12, 12, 8,
+ 14, 14, 16, 14, 13, 12, 14, 12, 14, 15, 14, 13, 13, 12, 13, 8,
+ 16, 16, 16, 15, 16, 13, 16, 13, 16, 16, 16, 15, 16, 13, 15, 10,
+ 14, 16, 14, 14, 14, 14, 15, 13, 14, 16, 14, 13, 13, 13, 13, 9,
+ 16, 16, 16, 14, 16, 14, 16, 12, 16, 16, 14, 13, 14, 13, 13, 8,
+ 16, 16, 16, 14, 16, 14, 16, 13, 16, 16, 16, 14, 16, 14, 14, 9,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 14, 10,
+ 16, 16, 16, 14, 16, 16, 16, 13, 16, 16, 16, 14, 16, 15, 14, 9,
+ 16, 16, 16, 13, 16, 16, 16, 12, 16, 16, 16, 13, 16, 15, 15, 8,
+ 14, 16, 16, 16, 14, 14, 16, 14, 16, 16, 16, 15, 13, 13, 14, 10,
+ 16, 16, 16, 16, 15, 13, 16, 13, 16, 16, 16, 16, 16, 13, 14, 10,
+ 16, 16, 16, 16, 16, 11, 16, 12, 16, 16, 16, 16, 16, 12, 16, 9,
+ 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 16, 16, 14, 14, 10,
+ 16, 16, 16, 16, 16, 15, 16, 13, 16, 16, 16, 15, 16, 13, 14, 9,
+ 16, 16, 16, 16, 16, 13, 16, 12, 16, 16, 16, 16, 16, 13, 14, 8,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 11,
+ 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 14, 9,
+ 16, 16, 16, 14, 16, 15, 16, 11, 16, 16, 16, 14, 16, 14, 14, 8,
+ 12, 13, 13, 13, 13, 13, 14, 12, 12, 13, 12, 12, 12, 12, 12, 8,
+ 14, 14, 16, 14, 15, 14, 16, 13, 14, 15, 14, 13, 14, 13, 13, 9,
+ 16, 16, 16, 15, 16, 16, 16, 14, 16, 16, 16, 14, 16, 14, 16, 10,
+ 14, 15, 14, 14, 15, 14, 15, 13, 14, 14, 13, 12, 13, 13, 13, 9,
+ 15, 16, 15, 14, 16, 16, 16, 13, 15, 16, 14, 12, 14, 13, 13, 8,
+ 16, 16, 16, 14, 16, 16, 16, 13, 16, 16, 16, 13, 16, 14, 14, 9,
+ 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 13, 13, 16, 16, 14, 10,
+ 16, 16, 16, 14, 16, 16, 16, 14, 16, 16, 15, 13, 16, 14, 14, 9,
+ 16, 16, 16, 12, 16, 16, 16, 13, 16, 16, 16, 12, 16, 16, 15, 8,
+ 13, 14, 14, 14, 14, 14, 16, 13, 13, 14, 14, 13, 12, 12, 12, 8,
+ 16, 16, 16, 14, 15, 14, 16, 13, 15, 16, 14, 13, 13, 12, 13, 8,
+ 16, 16, 16, 16, 16, 14, 16, 14, 16, 16, 16, 14, 16, 13, 14, 9,
+ 15, 16, 16, 15, 16, 15, 16, 13, 14, 16, 14, 13, 13, 13, 12, 8,
+ 16, 16, 16, 14, 16, 14, 15, 12, 15, 15, 14, 12, 13, 12, 12, 7,
+ 16, 16, 16, 14, 16, 14, 16, 12, 16, 16, 16, 13, 16, 13, 13, 7,
+ 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 14, 14, 16, 14, 13, 9,
+ 16, 16, 16, 14, 16, 16, 16, 13, 16, 16, 14, 12, 16, 13, 12, 7,
+ 16, 16, 16, 12, 16, 16, 16, 11, 16, 16, 15, 12, 16, 13, 13, 6,
+ 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 15, 13, 13, 14, 10,
+ 16, 16, 16, 16, 16, 14, 16, 14, 16, 16, 16, 16, 14, 13, 14, 9,
+ 16, 16, 16, 16, 16, 13, 16, 13, 16, 16, 16, 16, 16, 12, 14, 8,
+ 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 14, 14, 13, 13, 9,
+ 16, 16, 16, 16, 16, 14, 16, 13, 16, 16, 16, 13, 14, 12, 12, 7,
+ 16, 16, 16, 16, 16, 13, 16, 12, 16, 16, 16, 13, 14, 12, 13, 6,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 14, 9,
+ 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 16, 13, 15, 13, 12, 7,
+ 16, 16, 16, 13, 16, 14, 16, 11, 16, 16, 16, 12, 16, 12, 12, 5,
+ 14, 16, 15, 16, 16, 16, 16, 15, 14, 15, 14, 14, 13, 14, 13, 10,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 14, 16, 10,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 11,
+ 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 14, 14, 15, 14, 13, 10,
+ 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 14, 13, 16, 14, 14, 9,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 15, 9,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 12, 13, 16, 16, 13, 10,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13, 13, 16, 16, 13, 9,
+ 16, 16, 16, 14, 16, 16, 16, 14, 16, 16, 15, 12, 16, 16, 14, 8,
+ 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 15, 14, 13, 13, 13, 9,
+ 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 14, 14, 13, 13, 9,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 15, 9,
+ 16, 16, 16, 16, 16, 16, 16, 15, 15, 16, 14, 14, 14, 13, 13, 9,
+ 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 14, 12, 13, 12, 12, 7,
+ 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 16, 13, 16, 13, 13, 7,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13, 13, 16, 14, 13, 9,
+ 16, 16, 16, 15, 16, 16, 16, 13, 16, 16, 13, 12, 14, 13, 12, 6,
+ 16, 16, 16, 13, 16, 16, 16, 12, 16, 16, 14, 10, 15, 12, 12, 5,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 12, 13, 13, 9,
+ 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 15, 13, 12, 13, 8,
+ 16, 16, 16, 16, 16, 14, 16, 14, 16, 16, 16, 16, 15, 12, 14, 8,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 13, 13, 13, 8,
+ 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 15, 13, 12, 11, 12, 6,
+ 16, 16, 16, 15, 16, 14, 16, 12, 16, 16, 16, 12, 13, 10, 12, 5,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 14, 13, 14, 12, 8,
+ 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 13, 12, 13, 12, 10, 5,
+ 16, 16, 16, 13, 16, 13, 16, 10, 16, 16, 13, 10, 13, 10, 10, 1,
+ },
+ {
+ 2, 7, 7, 8, 7, 8, 9, 8, 7, 9, 8, 8, 8, 8, 9, 7,
+ 6, 9, 10, 10, 10, 10, 11, 10, 10, 11, 11, 11, 10, 11, 11, 9,
+ 9, 11, 12, 12, 12, 13, 14, 13, 13, 14, 14, 13, 13, 13, 14, 11,
+ 6, 10, 9, 10, 10, 11, 11, 11, 10, 11, 10, 11, 11, 11, 11, 9,
+ 6, 9, 10, 10, 10, 11, 12, 11, 10, 12, 11, 11, 11, 11, 11, 8,
+ 9, 11, 12, 12, 12, 13, 13, 13, 12, 14, 14, 13, 13, 13, 13, 10,
+ 9, 13, 11, 13, 13, 14, 14, 13, 13, 14, 13, 13, 14, 14, 14, 12,
+ 9, 12, 12, 12, 12, 14, 14, 13, 13, 14, 13, 13, 13, 14, 13, 11,
+ 8, 12, 12, 11, 12, 14, 14, 12, 13, 14, 14, 13, 13, 13, 14, 11,
+ 6, 10, 10, 11, 9, 10, 12, 11, 10, 12, 11, 11, 10, 11, 11, 9,
+ 7, 10, 10, 11, 10, 11, 12, 11, 11, 12, 11, 11, 11, 11, 11, 9,
+ 9, 12, 13, 13, 12, 12, 14, 13, 13, 14, 14, 13, 14, 13, 14, 11,
+ 8, 11, 11, 12, 11, 12, 12, 12, 11, 13, 12, 12, 12, 12, 12, 10,
+ 7, 10, 10, 11, 10, 11, 12, 11, 10, 12, 11, 11, 11, 11, 11, 8,
+ 9, 11, 12, 12, 12, 12, 13, 12, 12, 13, 13, 12, 13, 12, 13, 10,
+ 10, 13, 13, 14, 14, 14, 15, 14, 14, 15, 14, 15, 14, 14, 14, 12,
+ 9, 12, 12, 13, 12, 13, 14, 13, 12, 13, 13, 12, 13, 13, 13, 10,
+ 9, 12, 12, 12, 12, 13, 14, 12, 12, 14, 13, 12, 13, 13, 13, 10,
+ 9, 12, 13, 13, 11, 13, 14, 13, 13, 14, 14, 14, 12, 13, 13, 11,
+ 10, 12, 13, 13, 12, 12, 14, 13, 13, 14, 14, 14, 13, 13, 14, 11,
+ 10, 13, 14, 14, 13, 12, 15, 13, 14, 14, 14, 14, 15, 13, 14, 11,
+ 11, 14, 14, 14, 13, 14, 15, 14, 14, 15, 15, 14, 13, 14, 14, 12,
+ 10, 13, 12, 13, 12, 12, 14, 13, 13, 14, 13, 13, 13, 13, 13, 10,
+ 10, 12, 13, 13, 13, 12, 14, 12, 13, 14, 14, 13, 13, 13, 13, 10,
+ 13, 15, 16, 16, 15, 15, 16, 16, 15, 16, 15, 16, 16, 16, 16, 14,
+ 11, 14, 14, 14, 14, 14, 15, 14, 14, 15, 15, 14, 14, 14, 15, 11,
+ 10, 13, 13, 13, 13, 13, 14, 12, 13, 14, 14, 13, 13, 13, 13, 10,
+ 6, 10, 10, 11, 10, 11, 12, 11, 10, 12, 10, 11, 10, 11, 11, 9,
+ 8, 11, 11, 12, 11, 12, 13, 12, 11, 12, 12, 12, 12, 12, 12, 10,
+ 11, 13, 14, 14, 13, 14, 15, 14, 13, 15, 15, 14, 14, 14, 15, 12,
+ 7, 11, 10, 12, 11, 12, 12, 12, 11, 12, 11, 12, 11, 12, 12, 10,
+ 7, 10, 10, 11, 10, 11, 12, 11, 11, 12, 11, 11, 11, 11, 11, 9,
+ 10, 12, 13, 13, 12, 13, 14, 13, 13, 14, 13, 13, 13, 13, 13, 10,
+ 10, 13, 12, 14, 13, 14, 14, 14, 13, 14, 12, 14, 15, 14, 14, 11,
+ 10, 12, 12, 12, 12, 13, 14, 13, 13, 14, 13, 12, 13, 13, 13, 10,
+ 9, 12, 13, 13, 13, 14, 14, 13, 13, 14, 14, 13, 13, 13, 13, 10,
+ 7, 10, 10, 11, 10, 11, 12, 11, 10, 12, 12, 11, 9, 11, 11, 9,
+ 7, 10, 11, 11, 10, 11, 12, 11, 10, 12, 12, 11, 11, 11, 11, 9,
+ 10, 12, 13, 13, 13, 13, 15, 13, 13, 14, 13, 13, 13, 13, 13, 10,
+ 8, 11, 11, 11, 11, 11, 12, 11, 11, 12, 12, 11, 11, 12, 11, 9,
+ 6, 9, 9, 10, 9, 10, 10, 10, 9, 11, 10, 10, 9, 10, 10, 7,
+ 8, 10, 11, 11, 11, 11, 12, 11, 11, 12, 12, 11, 11, 11, 11, 8,
+ 10, 13, 13, 13, 13, 14, 14, 13, 13, 14, 13, 13, 13, 13, 13, 11,
+ 8, 11, 11, 11, 11, 12, 12, 11, 11, 12, 11, 11, 11, 11, 11, 8,
+ 8, 11, 11, 11, 11, 12, 12, 10, 11, 12, 12, 11, 11, 11, 11, 8,
+ 10, 13, 13, 13, 12, 13, 14, 13, 12, 14, 14, 14, 10, 13, 13, 11,
+ 10, 12, 12, 13, 12, 13, 14, 12, 12, 13, 13, 13, 12, 12, 13, 10,
+ 11, 13, 14, 14, 13, 13, 14, 13, 13, 15, 14, 13, 13, 13, 13, 10,
+ 10, 12, 13, 13, 12, 13, 14, 13, 13, 14, 14, 13, 12, 13, 13, 11,
+ 8, 11, 11, 11, 11, 11, 12, 11, 11, 12, 12, 11, 11, 11, 11, 8,
+ 9, 11, 12, 12, 11, 11, 12, 11, 12, 12, 12, 11, 12, 11, 11, 8,
+ 12, 15, 14, 14, 14, 15, 15, 14, 14, 15, 15, 14, 14, 14, 15, 12,
+ 10, 12, 12, 12, 12, 12, 13, 12, 12, 13, 13, 12, 12, 12, 12, 9,
+ 9, 11, 11, 11, 11, 11, 12, 10, 11, 12, 12, 11, 11, 11, 11, 7,
+ 10, 13, 13, 13, 13, 14, 15, 14, 13, 14, 14, 14, 12, 14, 15, 12,
+ 11, 14, 14, 14, 14, 15, 15, 14, 14, 15, 15, 15, 14, 15, 15, 12,
+ 13, 16, 16, 16, 15, 16, 16, 16, 15, 16, 16, 16, 16, 16, 16, 14,
+ 10, 13, 13, 14, 13, 15, 14, 14, 13, 15, 13, 14, 14, 14, 14, 12,
+ 10, 13, 13, 13, 13, 13, 14, 13, 13, 14, 13, 13, 13, 13, 14, 11,
+ 12, 14, 14, 14, 14, 15, 15, 14, 14, 15, 15, 14, 15, 14, 14, 12,
+ 11, 14, 14, 15, 14, 15, 15, 14, 14, 15, 12, 14, 15, 16, 15, 12,
+ 11, 13, 13, 14, 13, 14, 14, 14, 14, 14, 13, 13, 14, 14, 14, 11,
+ 11, 14, 14, 14, 14, 15, 15, 14, 14, 16, 14, 13, 14, 14, 14, 11,
+ 10, 13, 13, 13, 12, 14, 14, 14, 12, 15, 14, 14, 11, 13, 13, 12,
+ 10, 12, 13, 14, 12, 13, 14, 13, 13, 14, 14, 13, 12, 13, 13, 11,
+ 12, 14, 14, 15, 14, 15, 16, 15, 15, 15, 15, 15, 14, 14, 15, 12,
+ 10, 13, 13, 13, 12, 13, 14, 13, 13, 14, 13, 13, 12, 13, 13, 11,
+ 9, 11, 11, 12, 11, 12, 12, 11, 11, 12, 12, 11, 11, 11, 11, 9,
+ 10, 12, 12, 12, 12, 12, 13, 12, 12, 13, 13, 12, 13, 12, 12, 9,
+ 11, 13, 13, 15, 14, 14, 15, 14, 14, 15, 14, 14, 14, 14, 14, 11,
+ 10, 12, 12, 12, 12, 12, 13, 12, 12, 13, 11, 11, 12, 12, 12, 8,
+ 9, 12, 12, 11, 12, 12, 13, 11, 12, 12, 12, 11, 12, 11, 11, 8,
+ 10, 13, 13, 14, 12, 14, 15, 14, 13, 15, 15, 14, 10, 13, 13, 11,
+ 11, 13, 14, 13, 13, 14, 14, 13, 13, 14, 14, 14, 11, 13, 13, 11,
+ 12, 14, 14, 14, 14, 14, 15, 14, 15, 16, 15, 14, 13, 13, 14, 11,
+ 11, 14, 13, 14, 13, 14, 15, 14, 13, 15, 14, 14, 11, 13, 13, 11,
+ 9, 12, 12, 12, 11, 12, 13, 11, 12, 13, 12, 11, 10, 11, 11, 8,
+ 10, 12, 12, 12, 12, 12, 13, 11, 12, 12, 12, 11, 11, 11, 11, 8,
+ 12, 15, 14, 15, 14, 15, 16, 15, 15, 15, 15, 14, 14, 14, 14, 12,
+ 10, 12, 12, 12, 12, 12, 13, 11, 12, 13, 12, 11, 11, 11, 11, 8,
+ 8, 10, 10, 10, 10, 10, 11, 9, 10, 11, 10, 9, 10, 9, 9, 5,
+ },
+ },
+ {
+ {
+ 12, 12, 11, 9, 11, 10, 11, 9, 11, 11, 10, 9, 9, 8, 9, 5,
+ 14, 13, 14, 11, 14, 11, 13, 10, 14, 13, 12, 10, 12, 10, 11, 6,
+ 16, 13, 16, 12, 16, 12, 16, 11, 16, 14, 16, 12, 15, 12, 13, 8,
+ 14, 14, 12, 11, 14, 12, 13, 10, 13, 13, 11, 10, 12, 11, 10, 6,
+ 16, 15, 14, 11, 16, 13, 14, 10, 15, 14, 13, 10, 13, 11, 11, 7,
+ 16, 16, 16, 11, 16, 14, 16, 11, 16, 16, 15, 12, 15, 13, 13, 8,
+ 16, 16, 13, 12, 16, 16, 15, 12, 16, 16, 12, 11, 15, 13, 12, 8,
+ 16, 16, 14, 11, 16, 16, 16, 11, 16, 16, 14, 11, 15, 14, 13, 8,
+ 16, 16, 15, 10, 16, 16, 16, 10, 16, 16, 15, 11, 16, 14, 14, 8,
+ 14, 14, 14, 12, 13, 11, 13, 10, 13, 13, 12, 11, 11, 10, 10, 6,
+ 16, 15, 16, 13, 13, 11, 14, 11, 15, 14, 13, 11, 12, 10, 11, 7,
+ 16, 15, 16, 14, 16, 11, 16, 11, 16, 16, 16, 13, 16, 12, 13, 8,
+ 16, 16, 14, 13, 15, 13, 14, 11, 14, 15, 13, 11, 13, 11, 11, 7,
+ 16, 16, 15, 13, 15, 13, 14, 11, 16, 15, 14, 11, 13, 11, 11, 7,
+ 16, 16, 16, 13, 16, 13, 16, 11, 16, 16, 16, 12, 16, 12, 13, 8,
+ 16, 16, 16, 14, 16, 16, 16, 13, 16, 16, 14, 13, 15, 14, 13, 9,
+ 16, 16, 16, 13, 16, 16, 16, 12, 16, 16, 14, 12, 15, 13, 13, 8,
+ 16, 16, 16, 12, 16, 16, 16, 11, 16, 16, 15, 12, 16, 13, 13, 7,
+ 16, 16, 16, 16, 13, 12, 16, 12, 16, 16, 14, 13, 12, 11, 12, 8,
+ 16, 16, 16, 15, 14, 11, 16, 11, 16, 16, 16, 13, 14, 11, 13, 8,
+ 16, 16, 16, 16, 15, 10, 16, 11, 16, 16, 16, 14, 15, 11, 13, 8,
+ 16, 16, 16, 16, 16, 14, 16, 13, 16, 16, 14, 14, 14, 12, 13, 9,
+ 16, 16, 16, 15, 16, 13, 16, 12, 16, 16, 16, 13, 14, 12, 13, 8,
+ 16, 16, 16, 14, 16, 12, 16, 11, 16, 16, 16, 13, 15, 12, 13, 7,
+ 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 15, 16, 14, 13, 9,
+ 16, 16, 16, 16, 16, 16, 16, 12, 16, 16, 16, 14, 16, 13, 13, 8,
+ 16, 16, 16, 14, 16, 14, 16, 10, 16, 16, 16, 13, 16, 13, 13, 7,
+ 14, 14, 13, 12, 13, 12, 13, 11, 12, 13, 11, 10, 11, 10, 10, 6,
+ 16, 16, 15, 13, 16, 13, 15, 11, 14, 14, 13, 11, 13, 11, 11, 7,
+ 16, 16, 16, 14, 16, 14, 16, 12, 16, 16, 16, 13, 16, 13, 14, 9,
+ 16, 16, 13, 13, 15, 14, 14, 11, 13, 14, 11, 11, 12, 11, 11, 7,
+ 16, 16, 15, 12, 16, 14, 15, 11, 14, 14, 12, 11, 13, 11, 11, 7,
+ 16, 16, 16, 13, 16, 14, 16, 12, 16, 16, 14, 12, 16, 13, 13, 8,
+ 16, 16, 14, 14, 16, 16, 16, 13, 16, 16, 12, 11, 15, 13, 12, 8,
+ 16, 16, 15, 13, 16, 16, 16, 12, 16, 16, 13, 11, 16, 13, 12, 8,
+ 16, 16, 16, 11, 16, 16, 16, 11, 16, 16, 14, 11, 16, 14, 13, 7,
+ 16, 16, 15, 13, 14, 13, 14, 11, 14, 14, 12, 11, 11, 10, 11, 7,
+ 16, 16, 16, 13, 14, 12, 15, 11, 15, 14, 13, 11, 12, 11, 11, 7,
+ 16, 16, 16, 14, 16, 13, 16, 12, 16, 16, 16, 13, 14, 12, 13, 8,
+ 16, 16, 15, 13, 15, 14, 14, 12, 14, 14, 12, 11, 12, 11, 11, 7,
+ 16, 16, 14, 12, 15, 13, 14, 11, 15, 14, 13, 11, 12, 11, 11, 6,
+ 16, 16, 16, 13, 16, 13, 16, 11, 16, 15, 14, 11, 14, 11, 12, 6,
+ 16, 16, 16, 14, 16, 16, 16, 13, 16, 16, 13, 12, 14, 13, 12, 8,
+ 16, 16, 15, 13, 16, 14, 15, 11, 16, 16, 13, 11, 14, 12, 11, 6,
+ 16, 16, 16, 12, 16, 14, 15, 11, 16, 16, 13, 10, 14, 12, 12, 6,
+ 16, 16, 16, 16, 16, 14, 16, 13, 16, 16, 14, 13, 12, 11, 12, 8,
+ 16, 16, 16, 16, 16, 13, 16, 12, 16, 16, 15, 13, 13, 11, 12, 8,
+ 16, 16, 16, 15, 16, 12, 16, 12, 16, 16, 16, 14, 14, 11, 13, 7,
+ 16, 16, 16, 16, 16, 15, 16, 13, 16, 16, 14, 13, 13, 12, 12, 8,
+ 16, 16, 16, 14, 15, 13, 15, 11, 16, 15, 14, 12, 13, 11, 11, 6,
+ 16, 16, 16, 14, 16, 12, 15, 11, 16, 16, 15, 12, 14, 11, 12, 6,
+ 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 15, 13, 14, 13, 12, 8,
+ 16, 16, 16, 14, 16, 14, 16, 12, 16, 16, 14, 12, 13, 12, 11, 6,
+ 16, 16, 16, 13, 16, 13, 15, 10, 16, 16, 14, 11, 14, 11, 11, 5,
+ 16, 16, 15, 14, 16, 16, 16, 13, 14, 14, 12, 12, 12, 12, 12, 8,
+ 16, 16, 16, 15, 16, 16, 16, 14, 16, 16, 14, 13, 14, 13, 13, 9,
+ 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 14, 16, 13, 15, 9,
+ 16, 16, 14, 15, 16, 16, 16, 14, 14, 16, 12, 12, 13, 13, 12, 8,
+ 16, 16, 16, 14, 16, 16, 16, 13, 16, 15, 13, 12, 14, 12, 12, 8,
+ 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 14, 12, 16, 13, 13, 8,
+ 16, 16, 16, 15, 16, 16, 16, 14, 16, 16, 11, 11, 15, 14, 12, 8,
+ 16, 16, 16, 14, 16, 16, 16, 13, 16, 16, 12, 11, 15, 13, 12, 8,
+ 16, 16, 16, 13, 16, 16, 16, 12, 16, 16, 13, 10, 16, 13, 12, 7,
+ 16, 16, 16, 16, 16, 15, 16, 13, 14, 16, 13, 13, 12, 12, 12, 8,
+ 16, 16, 16, 15, 16, 14, 16, 13, 16, 16, 14, 13, 13, 12, 12, 8,
+ 16, 16, 16, 16, 16, 15, 16, 13, 16, 16, 16, 13, 15, 12, 14, 8,
+ 16, 16, 16, 15, 16, 16, 16, 13, 14, 16, 13, 12, 12, 12, 11, 8,
+ 16, 16, 16, 14, 16, 14, 16, 12, 14, 14, 13, 11, 13, 11, 11, 6,
+ 16, 16, 16, 14, 16, 14, 16, 12, 16, 15, 14, 11, 14, 11, 12, 6,
+ 16, 16, 16, 15, 16, 16, 16, 14, 16, 16, 12, 12, 14, 13, 11, 8,
+ 16, 16, 15, 14, 16, 16, 16, 12, 16, 15, 12, 11, 13, 12, 11, 6,
+ 16, 16, 16, 13, 16, 14, 16, 11, 16, 14, 13, 10, 14, 11, 11, 5,
+ 16, 16, 16, 16, 16, 16, 16, 14, 14, 16, 15, 13, 11, 11, 11, 8,
+ 16, 16, 16, 16, 16, 15, 16, 13, 16, 16, 16, 13, 12, 11, 12, 7,
+ 16, 16, 16, 16, 16, 13, 16, 12, 16, 16, 16, 13, 13, 11, 13, 7,
+ 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 14, 13, 12, 12, 11, 7,
+ 16, 16, 16, 15, 16, 14, 15, 12, 16, 14, 13, 12, 12, 11, 11, 6,
+ 16, 16, 16, 14, 16, 13, 15, 11, 16, 14, 14, 11, 13, 10, 11, 5,
+ 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 13, 12, 12, 12, 11, 7,
+ 16, 16, 16, 14, 16, 14, 15, 12, 16, 15, 12, 11, 12, 11, 10, 5,
+ 16, 16, 16, 13, 16, 13, 14, 10, 16, 14, 12, 9, 12, 10, 9, 3,
+ },
+ {
+ 2, 6, 6, 7, 6, 7, 8, 7, 7, 8, 7, 8, 7, 8, 8, 5,
+ 5, 8, 9, 9, 9, 9, 12, 10, 10, 11, 10, 10, 10, 11, 11, 8,
+ 9, 10, 13, 12, 13, 12, 15, 13, 13, 14, 13, 14, 13, 13, 14, 11,
+ 5, 10, 9, 10, 10, 10, 12, 10, 10, 12, 10, 11, 11, 11, 11, 8,
+ 6, 9, 10, 9, 10, 11, 12, 10, 10, 12, 11, 11, 10, 11, 11, 8,
+ 9, 11, 12, 11, 12, 13, 14, 12, 13, 14, 14, 12, 13, 13, 13, 11,
+ 10, 13, 11, 12, 14, 14, 15, 13, 13, 15, 12, 13, 14, 14, 14, 12,
+ 9, 12, 12, 12, 13, 13, 15, 13, 13, 14, 13, 13, 14, 13, 15, 11,
+ 8, 11, 12, 10, 12, 13, 14, 12, 13, 14, 14, 13, 13, 13, 14, 11,
+ 5, 9, 10, 10, 9, 10, 12, 11, 10, 12, 11, 11, 9, 11, 11, 9,
+ 6, 10, 10, 11, 10, 10, 12, 11, 11, 12, 11, 11, 11, 11, 11, 9,
+ 9, 11, 13, 13, 12, 11, 14, 12, 13, 15, 13, 13, 14, 13, 14, 11,
+ 8, 11, 11, 12, 11, 12, 13, 12, 12, 13, 12, 13, 12, 12, 12, 10,
+ 7, 10, 10, 11, 10, 11, 12, 11, 11, 12, 11, 11, 11, 11, 12, 9,
+ 9, 12, 12, 12, 12, 12, 14, 12, 13, 14, 13, 13, 13, 13, 13, 11,
+ 11, 14, 13, 15, 15, 16, 16, 15, 15, 16, 15, 15, 16, 16, 15, 13,
+ 10, 12, 13, 13, 13, 14, 15, 13, 13, 14, 13, 13, 14, 14, 14, 11,
+ 9, 12, 12, 12, 13, 13, 14, 12, 13, 14, 14, 13, 13, 13, 14, 11,
+ 9, 13, 13, 13, 11, 12, 15, 13, 13, 15, 14, 14, 11, 13, 14, 11,
+ 10, 13, 13, 13, 12, 12, 15, 13, 13, 15, 14, 14, 13, 13, 14, 11,
+ 10, 12, 13, 13, 12, 11, 14, 12, 13, 15, 13, 13, 13, 13, 14, 11,
+ 11, 14, 15, 15, 13, 14, 16, 14, 14, 16, 16, 14, 14, 15, 15, 13,
+ 10, 13, 13, 13, 12, 13, 14, 13, 13, 14, 14, 14, 13, 13, 14, 11,
+ 10, 12, 13, 13, 13, 12, 14, 13, 13, 14, 14, 13, 13, 13, 13, 11,
+ 13, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15,
+ 12, 15, 15, 15, 14, 15, 16, 14, 15, 16, 16, 15, 16, 15, 15, 13,
+ 10, 12, 12, 12, 13, 13, 14, 12, 13, 14, 13, 13, 13, 13, 13, 11,
+ 6, 10, 10, 11, 10, 11, 12, 11, 10, 12, 11, 11, 10, 11, 11, 9,
+ 8, 11, 12, 12, 12, 12, 13, 12, 12, 13, 12, 13, 12, 13, 13, 10,
+ 11, 13, 15, 14, 15, 14, 16, 14, 15, 16, 16, 14, 15, 15, 15, 13,
+ 7, 11, 10, 12, 11, 11, 13, 11, 11, 13, 10, 11, 12, 12, 12, 10,
+ 7, 11, 11, 11, 11, 11, 13, 11, 11, 13, 11, 12, 12, 12, 12, 9,
+ 10, 12, 13, 13, 13, 13, 15, 13, 14, 15, 14, 14, 14, 14, 15, 11,
+ 10, 13, 12, 14, 14, 14, 15, 13, 13, 15, 12, 13, 15, 15, 14, 12,
+ 10, 13, 12, 12, 13, 13, 15, 14, 13, 15, 13, 13, 14, 14, 14, 11,
+ 10, 13, 13, 12, 13, 14, 15, 13, 13, 15, 13, 13, 14, 14, 14, 11,
+ 7, 10, 11, 11, 10, 11, 12, 11, 10, 12, 12, 12, 9, 11, 12, 9,
+ 7, 11, 11, 11, 11, 11, 13, 11, 11, 13, 12, 12, 11, 12, 12, 9,
+ 10, 12, 14, 13, 13, 13, 16, 13, 14, 16, 14, 14, 13, 13, 14, 11,
+ 8, 11, 11, 12, 11, 12, 13, 12, 12, 13, 12, 12, 12, 12, 12, 10,
+ 6, 9, 9, 10, 9, 10, 11, 10, 10, 11, 10, 10, 10, 10, 10, 8,
+ 8, 11, 11, 11, 12, 11, 13, 11, 12, 13, 12, 12, 12, 12, 12, 10,
+ 11, 14, 13, 14, 14, 14, 16, 14, 14, 16, 14, 14, 15, 15, 14, 12,
+ 9, 12, 11, 12, 12, 12, 13, 12, 12, 13, 12, 12, 12, 12, 12, 10,
+ 8, 11, 11, 11, 11, 11, 13, 11, 12, 12, 12, 12, 12, 12, 12, 9,
+ 10, 13, 14, 13, 11, 13, 14, 14, 13, 15, 15, 14, 10, 13, 14, 11,
+ 10, 13, 13, 13, 12, 13, 14, 13, 13, 14, 14, 14, 13, 13, 13, 11,
+ 10, 13, 14, 13, 13, 12, 15, 13, 14, 15, 14, 14, 14, 13, 14, 12,
+ 11, 14, 14, 14, 13, 13, 15, 14, 14, 15, 14, 15, 13, 14, 14, 12,
+ 9, 11, 12, 12, 11, 11, 13, 12, 12, 13, 12, 12, 12, 12, 12, 10,
+ 9, 11, 12, 12, 12, 11, 13, 11, 12, 13, 12, 12, 12, 12, 12, 10,
+ 13, 15, 15, 16, 15, 16, 16, 15, 16, 16, 16, 15, 15, 15, 16, 14,
+ 10, 13, 13, 13, 13, 13, 14, 13, 13, 14, 13, 13, 13, 13, 13, 11,
+ 8, 11, 11, 11, 11, 11, 12, 11, 11, 12, 12, 11, 12, 11, 12, 9,
+ 11, 14, 14, 15, 14, 15, 15, 14, 13, 15, 14, 15, 12, 14, 15, 13,
+ 12, 15, 15, 15, 15, 15, 16, 15, 15, 16, 16, 16, 15, 16, 15, 13,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15,
+ 11, 14, 13, 15, 14, 14, 16, 14, 14, 16, 13, 14, 15, 14, 15, 12,
+ 11, 14, 13, 14, 14, 14, 16, 15, 14, 16, 14, 14, 15, 15, 15, 12,
+ 13, 15, 15, 15, 15, 16, 16, 15, 16, 16, 16, 15, 16, 16, 16, 13,
+ 11, 14, 13, 14, 14, 14, 15, 14, 14, 16, 12, 14, 16, 16, 14, 12,
+ 11, 14, 14, 14, 14, 15, 16, 15, 14, 16, 13, 14, 16, 15, 14, 12,
+ 12, 14, 14, 14, 14, 14, 16, 14, 15, 16, 14, 14, 14, 15, 15, 12,
+ 11, 14, 14, 14, 13, 14, 16, 15, 13, 16, 15, 15, 11, 14, 14, 12,
+ 11, 14, 14, 15, 14, 14, 16, 14, 14, 15, 14, 14, 13, 15, 15, 12,
+ 13, 15, 16, 15, 15, 15, 16, 15, 16, 16, 16, 16, 15, 15, 16, 13,
+ 11, 14, 14, 14, 14, 14, 15, 14, 14, 16, 14, 14, 14, 15, 14, 12,
+ 9, 12, 12, 12, 12, 12, 13, 12, 12, 13, 12, 13, 12, 12, 12, 10,
+ 11, 13, 13, 13, 13, 13, 14, 13, 13, 14, 13, 14, 13, 13, 14, 11,
+ 12, 15, 14, 15, 15, 15, 16, 15, 14, 16, 14, 14, 16, 16, 14, 13,
+ 10, 12, 12, 12, 12, 12, 14, 12, 13, 13, 12, 12, 13, 13, 13, 10,
+ 10, 12, 12, 12, 12, 12, 14, 12, 12, 13, 12, 12, 12, 12, 12, 10,
+ 10, 14, 14, 14, 12, 14, 16, 14, 13, 16, 16, 16, 10, 13, 14, 12,
+ 11, 14, 14, 14, 13, 14, 16, 14, 14, 16, 15, 14, 12, 13, 14, 12,
+ 12, 14, 14, 14, 14, 14, 16, 14, 14, 16, 15, 15, 14, 14, 15, 12,
+ 12, 14, 15, 15, 14, 15, 16, 14, 15, 15, 15, 15, 13, 15, 14, 12,
+ 9, 12, 12, 12, 12, 13, 13, 12, 12, 13, 13, 12, 11, 12, 12, 10,
+ 10, 12, 12, 12, 12, 12, 13, 12, 12, 13, 12, 12, 12, 12, 12, 10,
+ 13, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 15, 16, 15, 13,
+ 10, 12, 12, 13, 12, 13, 13, 12, 13, 14, 13, 13, 12, 13, 13, 10,
+ 7, 10, 10, 10, 10, 10, 11, 10, 10, 11, 10, 10, 10, 10, 10, 7,
+ },
+ },
+ {
+ {
+ 10, 10, 9, 8, 9, 8, 9, 7, 9, 9, 8, 7, 8, 7, 7, 4,
+ 13, 11, 12, 9, 12, 9, 12, 9, 12, 11, 11, 9, 10, 9, 9, 6,
+ 15, 12, 15, 11, 14, 10, 14, 10, 14, 13, 13, 11, 13, 11, 12, 7,
+ 13, 12, 11, 9, 12, 11, 12, 9, 12, 12, 10, 9, 10, 10, 9, 6,
+ 14, 13, 12, 10, 13, 12, 13, 9, 13, 12, 11, 10, 12, 11, 10, 6,
+ 16, 14, 14, 10, 15, 13, 14, 10, 15, 15, 14, 11, 14, 12, 12, 8,
+ 15, 14, 12, 11, 15, 14, 13, 11, 14, 14, 11, 10, 13, 12, 11, 8,
+ 15, 16, 13, 11, 16, 14, 14, 11, 15, 15, 12, 10, 14, 13, 12, 8,
+ 16, 15, 14, 10, 16, 14, 14, 10, 16, 15, 14, 10, 14, 13, 12, 7,
+ 13, 12, 12, 11, 11, 9, 12, 9, 12, 12, 11, 10, 10, 9, 9, 6,
+ 13, 13, 14, 12, 12, 10, 12, 10, 14, 13, 12, 11, 11, 10, 10, 7,
+ 16, 14, 16, 13, 14, 11, 15, 10, 16, 15, 14, 12, 14, 11, 12, 8,
+ 14, 14, 13, 12, 13, 12, 13, 10, 13, 13, 12, 11, 11, 10, 10, 7,
+ 15, 14, 14, 12, 14, 12, 13, 10, 14, 13, 12, 11, 12, 11, 11, 7,
+ 16, 15, 16, 13, 15, 13, 15, 10, 16, 15, 14, 12, 14, 12, 12, 7,
+ 15, 16, 14, 13, 16, 14, 14, 12, 15, 15, 12, 12, 13, 12, 12, 8,
+ 16, 16, 14, 13, 16, 14, 14, 11, 15, 15, 14, 11, 14, 12, 12, 8,
+ 16, 16, 15, 12, 16, 14, 15, 10, 16, 16, 13, 12, 14, 13, 12, 7,
+ 14, 14, 14, 13, 13, 11, 13, 11, 14, 14, 13, 12, 11, 10, 11, 8,
+ 16, 15, 16, 13, 13, 11, 14, 11, 15, 14, 14, 13, 12, 11, 12, 8,
+ 15, 15, 16, 14, 14, 10, 14, 10, 16, 15, 15, 13, 14, 10, 12, 8,
+ 16, 16, 16, 14, 15, 13, 14, 12, 15, 15, 13, 13, 13, 12, 12, 8,
+ 16, 16, 16, 14, 15, 13, 14, 11, 16, 16, 14, 13, 13, 12, 12, 8,
+ 16, 16, 16, 14, 16, 12, 15, 11, 16, 15, 15, 13, 14, 12, 12, 8,
+ 16, 16, 16, 16, 16, 15, 15, 13, 16, 16, 14, 13, 14, 13, 12, 9,
+ 16, 16, 16, 14, 16, 15, 15, 11, 16, 16, 14, 13, 15, 13, 12, 8,
+ 16, 16, 16, 14, 16, 14, 14, 10, 16, 16, 15, 13, 14, 12, 12, 7,
+ 12, 12, 12, 11, 12, 11, 12, 10, 11, 11, 10, 9, 9, 9, 9, 6,
+ 14, 13, 14, 12, 13, 12, 13, 10, 13, 13, 12, 10, 12, 10, 11, 7,
+ 16, 14, 16, 13, 15, 13, 16, 12, 15, 14, 14, 12, 14, 12, 13, 8,
+ 14, 14, 13, 11, 14, 12, 13, 11, 12, 12, 10, 10, 11, 10, 10, 7,
+ 14, 14, 13, 12, 14, 12, 13, 11, 13, 13, 12, 10, 12, 11, 10, 7,
+ 16, 15, 15, 12, 16, 14, 15, 11, 16, 14, 13, 11, 14, 12, 12, 8,
+ 16, 16, 13, 13, 16, 15, 14, 12, 14, 14, 11, 11, 13, 12, 11, 8,
+ 16, 16, 14, 12, 16, 14, 14, 12, 15, 14, 12, 11, 14, 12, 12, 8,
+ 16, 15, 14, 11, 16, 15, 15, 11, 16, 15, 13, 11, 14, 13, 12, 8,
+ 14, 13, 13, 12, 13, 11, 13, 10, 12, 13, 11, 10, 10, 10, 10, 7,
+ 15, 14, 14, 13, 13, 12, 13, 11, 14, 13, 12, 11, 12, 10, 11, 7,
+ 16, 15, 16, 14, 15, 12, 15, 11, 16, 14, 14, 12, 14, 11, 12, 8,
+ 14, 15, 13, 12, 14, 13, 13, 11, 13, 13, 11, 11, 11, 10, 10, 7,
+ 14, 14, 14, 12, 14, 13, 13, 10, 14, 13, 12, 10, 12, 10, 10, 6,
+ 16, 15, 15, 13, 16, 13, 15, 11, 15, 14, 13, 11, 13, 11, 11, 7,
+ 16, 16, 14, 13, 16, 15, 14, 12, 15, 15, 12, 11, 13, 12, 11, 8,
+ 16, 16, 14, 13, 16, 14, 14, 11, 15, 14, 12, 11, 13, 12, 11, 7,
+ 16, 16, 15, 12, 16, 14, 14, 11, 15, 15, 13, 11, 14, 12, 11, 6,
+ 16, 15, 15, 14, 14, 12, 14, 12, 13, 14, 13, 12, 11, 11, 11, 8,
+ 16, 16, 16, 14, 14, 12, 15, 12, 15, 14, 14, 12, 12, 11, 12, 8,
+ 16, 16, 16, 15, 14, 12, 15, 12, 16, 15, 14, 13, 13, 11, 12, 8,
+ 16, 16, 16, 15, 15, 14, 15, 12, 14, 14, 13, 12, 12, 11, 11, 8,
+ 16, 16, 15, 14, 14, 12, 14, 11, 14, 14, 13, 12, 12, 11, 11, 7,
+ 16, 16, 16, 14, 15, 12, 14, 11, 15, 15, 14, 12, 13, 11, 12, 7,
+ 16, 16, 16, 16, 16, 15, 16, 13, 15, 15, 14, 12, 13, 12, 11, 8,
+ 16, 16, 16, 14, 15, 14, 14, 12, 16, 15, 13, 12, 13, 12, 11, 7,
+ 16, 16, 16, 13, 16, 13, 14, 10, 16, 15, 14, 11, 13, 11, 11, 6,
+ 14, 15, 13, 13, 14, 13, 14, 12, 12, 13, 11, 11, 11, 11, 10, 8,
+ 16, 16, 15, 13, 16, 14, 16, 13, 14, 14, 13, 12, 13, 12, 12, 8,
+ 16, 16, 16, 14, 16, 14, 16, 13, 16, 14, 15, 13, 15, 13, 13, 9,
+ 15, 15, 14, 14, 15, 14, 14, 12, 13, 14, 11, 11, 12, 12, 11, 8,
+ 15, 16, 15, 13, 15, 14, 14, 12, 14, 14, 12, 11, 13, 12, 12, 8,
+ 16, 16, 16, 13, 16, 15, 15, 13, 16, 15, 14, 11, 15, 12, 13, 8,
+ 16, 16, 14, 13, 16, 15, 15, 13, 14, 14, 10, 11, 14, 12, 11, 8,
+ 16, 16, 15, 13, 16, 16, 15, 13, 15, 14, 12, 11, 14, 13, 12, 8,
+ 16, 16, 15, 13, 16, 15, 16, 12, 16, 14, 13, 10, 15, 13, 12, 7,
+ 15, 15, 15, 14, 14, 14, 15, 12, 13, 14, 12, 12, 11, 11, 11, 8,
+ 16, 15, 16, 14, 15, 13, 15, 12, 14, 14, 13, 12, 12, 11, 12, 8,
+ 16, 16, 16, 15, 16, 14, 16, 13, 16, 15, 14, 12, 14, 11, 13, 8,
+ 16, 16, 15, 14, 16, 14, 15, 13, 14, 14, 12, 11, 12, 11, 11, 8,
+ 15, 16, 15, 14, 15, 14, 14, 12, 14, 13, 12, 11, 12, 11, 11, 7,
+ 16, 16, 16, 14, 16, 13, 16, 12, 15, 14, 13, 11, 13, 11, 12, 7,
+ 16, 16, 15, 14, 16, 15, 15, 13, 14, 15, 11, 11, 13, 12, 11, 8,
+ 16, 16, 15, 13, 16, 14, 15, 12, 15, 14, 12, 11, 13, 11, 11, 7,
+ 16, 16, 15, 13, 16, 14, 16, 12, 15, 14, 13, 10, 13, 11, 11, 6,
+ 16, 16, 16, 14, 14, 14, 15, 13, 14, 14, 14, 12, 11, 11, 11, 8,
+ 16, 16, 16, 14, 15, 14, 16, 13, 15, 14, 14, 13, 12, 11, 11, 7,
+ 16, 16, 16, 16, 15, 13, 16, 12, 15, 15, 14, 12, 13, 10, 12, 7,
+ 16, 16, 16, 14, 15, 15, 14, 13, 14, 14, 13, 12, 12, 11, 11, 8,
+ 16, 15, 16, 14, 16, 13, 15, 12, 14, 14, 13, 12, 12, 10, 10, 6,
+ 16, 15, 16, 14, 16, 13, 16, 11, 16, 14, 13, 11, 13, 10, 11, 6,
+ 16, 16, 16, 15, 16, 16, 15, 13, 14, 16, 12, 12, 12, 12, 10, 7,
+ 16, 16, 16, 14, 16, 14, 14, 12, 15, 15, 12, 11, 12, 11, 10, 6,
+ 16, 16, 16, 13, 16, 13, 15, 10, 15, 14, 13, 10, 13, 10, 10, 4,
+ },
+ {
+ 1, 6, 6, 7, 6, 7, 9, 8, 7, 9, 7, 8, 7, 8, 8, 6,
+ 6, 9, 10, 10, 10, 10, 12, 11, 10, 12, 11, 11, 11, 11, 12, 9,
+ 9, 10, 13, 11, 13, 12, 14, 13, 14, 14, 14, 14, 14, 14, 14, 12,
+ 6, 10, 9, 10, 10, 11, 13, 11, 11, 13, 10, 12, 11, 12, 12, 9,
+ 6, 10, 10, 10, 10, 11, 13, 11, 11, 13, 12, 12, 11, 12, 12, 9,
+ 9, 11, 13, 12, 13, 14, 15, 13, 14, 16, 14, 14, 14, 14, 15, 12,
+ 10, 13, 11, 13, 14, 14, 16, 14, 14, 15, 13, 14, 15, 15, 16, 12,
+ 9, 13, 12, 12, 14, 14, 16, 14, 14, 15, 14, 14, 15, 15, 15, 12,
+ 8, 11, 12, 11, 13, 14, 15, 13, 13, 15, 14, 14, 13, 15, 15, 11,
+ 6, 10, 10, 11, 9, 10, 13, 11, 10, 13, 11, 12, 10, 12, 12, 9,
+ 6, 10, 10, 11, 11, 10, 13, 11, 11, 13, 11, 12, 12, 12, 13, 10,
+ 9, 12, 13, 13, 13, 12, 16, 13, 14, 15, 14, 14, 15, 14, 15, 12,
+ 8, 12, 12, 13, 12, 13, 15, 14, 13, 15, 13, 14, 13, 13, 14, 11,
+ 7, 11, 11, 12, 11, 12, 13, 12, 12, 13, 12, 13, 12, 13, 13, 10,
+ 9, 12, 13, 13, 13, 13, 16, 13, 13, 15, 14, 14, 14, 15, 15, 12,
+ 11, 15, 14, 15, 15, 16, 16, 16, 15, 16, 15, 16, 16, 16, 16, 14,
+ 10, 13, 13, 14, 14, 14, 16, 15, 14, 16, 15, 15, 15, 15, 16, 13,
+ 9, 12, 13, 13, 13, 14, 16, 14, 13, 15, 14, 14, 14, 16, 15, 12,
+ 10, 13, 14, 14, 11, 13, 16, 14, 14, 16, 15, 15, 12, 14, 15, 12,
+ 10, 13, 14, 14, 12, 12, 16, 15, 14, 16, 15, 15, 14, 14, 16, 12,
+ 9, 12, 13, 14, 13, 11, 16, 13, 14, 15, 13, 14, 14, 14, 15, 12,
+ 11, 15, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 10, 13, 14, 14, 13, 14, 16, 15, 14, 16, 16, 16, 14, 15, 16, 13,
+ 10, 13, 13, 14, 13, 13, 16, 13, 13, 14, 14, 15, 15, 14, 15, 13,
+ 13, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15,
+ 12, 16, 16, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 10, 12, 13, 14, 13, 13, 14, 13, 13, 14, 13, 14, 14, 14, 15, 12,
+ 6, 10, 11, 11, 10, 11, 13, 12, 11, 13, 11, 12, 11, 12, 12, 10,
+ 8, 12, 13, 13, 12, 13, 14, 14, 13, 15, 14, 14, 14, 14, 15, 12,
+ 12, 14, 16, 15, 15, 15, 16, 15, 16, 16, 16, 16, 16, 16, 16, 14,
+ 7, 11, 11, 12, 12, 12, 14, 13, 12, 14, 11, 12, 13, 13, 13, 11,
+ 8, 11, 12, 12, 12, 12, 14, 13, 12, 14, 12, 13, 13, 14, 14, 11,
+ 11, 13, 14, 14, 14, 14, 16, 15, 15, 16, 15, 15, 16, 16, 16, 13,
+ 10, 14, 12, 14, 14, 15, 16, 15, 13, 16, 12, 14, 16, 16, 15, 13,
+ 10, 13, 13, 14, 14, 15, 16, 15, 14, 16, 14, 15, 15, 16, 16, 12,
+ 10, 13, 14, 13, 14, 14, 16, 15, 14, 16, 15, 15, 14, 16, 16, 13,
+ 7, 11, 11, 11, 10, 12, 14, 13, 11, 14, 13, 13, 10, 12, 13, 10,
+ 8, 11, 12, 12, 11, 12, 14, 13, 12, 15, 13, 13, 12, 13, 14, 11,
+ 11, 13, 14, 14, 14, 14, 16, 15, 14, 16, 15, 16, 16, 16, 16, 14,
+ 8, 12, 12, 13, 12, 13, 15, 14, 12, 15, 13, 13, 13, 14, 14, 11,
+ 6, 10, 10, 11, 10, 11, 13, 12, 11, 13, 11, 12, 11, 12, 12, 9,
+ 9, 12, 13, 13, 13, 13, 14, 13, 13, 15, 14, 14, 14, 14, 14, 12,
+ 11, 15, 14, 15, 14, 15, 16, 16, 16, 16, 15, 16, 16, 16, 16, 14,
+ 9, 13, 12, 13, 13, 13, 15, 14, 13, 14, 13, 14, 14, 15, 14, 12,
+ 9, 12, 12, 12, 12, 13, 14, 13, 13, 14, 13, 13, 13, 13, 14, 11,
+ 10, 13, 15, 14, 12, 14, 16, 14, 14, 16, 15, 15, 12, 14, 16, 12,
+ 10, 14, 14, 14, 13, 14, 16, 15, 14, 16, 16, 16, 13, 14, 16, 13,
+ 11, 13, 14, 14, 14, 13, 16, 14, 14, 16, 15, 15, 15, 15, 16, 13,
+ 11, 15, 15, 15, 14, 15, 16, 16, 15, 16, 16, 16, 14, 16, 16, 13,
+ 9, 13, 13, 13, 12, 13, 15, 14, 13, 15, 14, 14, 13, 14, 15, 11,
+ 9, 12, 12, 13, 12, 12, 14, 13, 13, 14, 13, 14, 14, 14, 14, 11,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15,
+ 11, 14, 14, 15, 15, 14, 16, 16, 14, 16, 14, 15, 15, 16, 16, 12,
+ 9, 12, 12, 13, 12, 12, 14, 12, 12, 14, 13, 13, 13, 13, 14, 11,
+ 11, 14, 14, 16, 14, 16, 16, 16, 13, 16, 14, 16, 14, 16, 16, 13,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 15, 14, 16, 14, 15, 16, 16, 15, 16, 14, 15, 16, 16, 16, 13,
+ 11, 15, 14, 16, 15, 16, 16, 16, 15, 16, 15, 16, 16, 16, 16, 14,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 11, 14, 13, 14, 15, 14, 16, 15, 14, 16, 12, 14, 16, 16, 15, 13,
+ 11, 14, 14, 16, 14, 15, 16, 16, 15, 16, 14, 15, 16, 16, 16, 14,
+ 12, 14, 14, 15, 14, 16, 16, 15, 14, 16, 15, 15, 15, 16, 16, 13,
+ 11, 14, 15, 15, 13, 15, 16, 16, 14, 16, 16, 16, 12, 15, 15, 13,
+ 11, 15, 16, 16, 15, 16, 16, 16, 15, 16, 16, 16, 14, 16, 16, 14,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15,
+ 11, 15, 14, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13,
+ 10, 13, 13, 14, 14, 14, 16, 15, 14, 16, 14, 15, 14, 14, 15, 12,
+ 12, 15, 14, 16, 14, 15, 16, 15, 15, 16, 15, 15, 16, 15, 16, 13,
+ 12, 16, 14, 16, 15, 16, 16, 16, 16, 16, 14, 15, 16, 16, 16, 14,
+ 10, 13, 13, 14, 14, 13, 16, 14, 13, 16, 13, 14, 15, 15, 15, 12,
+ 10, 13, 13, 14, 13, 13, 16, 14, 14, 15, 14, 14, 14, 14, 15, 12,
+ 10, 14, 15, 14, 13, 15, 16, 15, 14, 16, 16, 16, 11, 14, 16, 12,
+ 11, 14, 14, 16, 14, 15, 16, 15, 15, 16, 16, 16, 13, 15, 16, 13,
+ 12, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 15, 16, 13,
+ 12, 16, 16, 16, 15, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 13,
+ 10, 13, 14, 14, 13, 14, 16, 14, 13, 16, 15, 14, 12, 14, 16, 11,
+ 10, 13, 13, 14, 13, 14, 16, 14, 14, 15, 14, 14, 13, 14, 14, 11,
+ 13, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 11, 13, 14, 15, 14, 14, 16, 15, 14, 16, 14, 15, 14, 15, 16, 12,
+ 8, 11, 11, 11, 11, 12, 13, 12, 11, 13, 11, 12, 11, 12, 12, 9,
+ },
+ },
+ {
+ {
+ 8, 8, 8, 7, 8, 7, 8, 6, 8, 8, 7, 6, 7, 6, 6, 4,
+ 11, 10, 11, 9, 11, 9, 11, 8, 11, 10, 10, 9, 10, 8, 9, 6,
+ 13, 11, 13, 10, 12, 10, 13, 9, 13, 12, 13, 10, 12, 10, 11, 7,
+ 11, 11, 10, 9, 11, 10, 11, 9, 10, 11, 9, 8, 10, 9, 9, 6,
+ 12, 12, 11, 9, 12, 11, 12, 9, 12, 12, 11, 9, 11, 10, 10, 7,
+ 14, 13, 13, 10, 15, 12, 13, 10, 15, 13, 13, 10, 13, 12, 12, 8,
+ 13, 13, 11, 10, 14, 13, 12, 10, 13, 13, 10, 10, 12, 11, 10, 8,
+ 15, 14, 13, 10, 14, 13, 13, 10, 14, 13, 12, 10, 13, 12, 11, 8,
+ 14, 14, 13, 10, 16, 13, 13, 10, 15, 14, 13, 10, 13, 12, 12, 8,
+ 11, 11, 11, 10, 10, 9, 10, 9, 10, 10, 10, 9, 9, 8, 9, 6,
+ 12, 12, 12, 11, 11, 9, 12, 9, 12, 12, 11, 10, 11, 9, 10, 7,
+ 14, 13, 14, 12, 13, 10, 13, 10, 15, 13, 14, 12, 12, 10, 12, 8,
+ 13, 13, 12, 11, 12, 11, 12, 10, 12, 12, 11, 10, 11, 10, 10, 7,
+ 14, 13, 13, 11, 13, 12, 12, 10, 13, 13, 12, 11, 12, 10, 10, 7,
+ 16, 15, 15, 12, 14, 12, 13, 10, 15, 14, 13, 12, 13, 12, 12, 8,
+ 15, 15, 13, 13, 14, 14, 14, 12, 14, 14, 12, 11, 13, 12, 11, 8,
+ 15, 16, 14, 12, 15, 14, 13, 11, 15, 15, 13, 12, 13, 12, 11, 8,
+ 16, 15, 15, 12, 16, 14, 14, 10, 15, 15, 14, 12, 14, 12, 12, 8,
+ 13, 13, 13, 13, 11, 10, 12, 10, 12, 13, 12, 11, 10, 10, 10, 8,
+ 14, 13, 14, 13, 12, 10, 13, 10, 14, 14, 13, 12, 12, 10, 11, 8,
+ 15, 14, 16, 14, 13, 10, 14, 10, 16, 14, 14, 13, 13, 10, 12, 8,
+ 15, 15, 14, 14, 14, 13, 13, 12, 14, 14, 13, 12, 12, 11, 11, 9,
+ 15, 15, 15, 14, 14, 12, 14, 11, 15, 14, 13, 13, 13, 11, 11, 8,
+ 16, 15, 16, 14, 15, 12, 14, 11, 16, 16, 15, 13, 14, 12, 12, 8,
+ 16, 16, 16, 14, 15, 14, 14, 12, 15, 15, 14, 13, 13, 12, 11, 9,
+ 16, 16, 15, 15, 16, 14, 14, 11, 16, 16, 14, 13, 14, 12, 12, 8,
+ 16, 16, 16, 13, 15, 13, 14, 10, 16, 16, 15, 13, 14, 12, 12, 8,
+ 11, 11, 11, 10, 11, 10, 11, 9, 10, 10, 9, 8, 9, 9, 9, 6,
+ 12, 12, 13, 11, 12, 11, 12, 10, 12, 12, 11, 10, 11, 10, 10, 7,
+ 15, 13, 15, 12, 14, 13, 14, 11, 14, 13, 13, 11, 13, 11, 12, 8,
+ 12, 13, 12, 11, 13, 11, 12, 10, 11, 12, 10, 9, 11, 10, 10, 7,
+ 14, 13, 13, 11, 14, 12, 13, 11, 12, 12, 11, 10, 12, 11, 10, 7,
+ 15, 14, 15, 12, 15, 13, 15, 11, 15, 14, 13, 11, 13, 12, 12, 8,
+ 14, 14, 13, 12, 15, 14, 13, 12, 13, 13, 11, 10, 13, 12, 11, 8,
+ 16, 15, 13, 12, 15, 14, 14, 12, 14, 14, 12, 11, 14, 12, 11, 8,
+ 16, 15, 14, 12, 16, 15, 15, 11, 15, 14, 13, 11, 14, 13, 12, 8,
+ 12, 12, 12, 11, 12, 11, 12, 10, 11, 11, 11, 10, 10, 9, 9, 7,
+ 13, 13, 14, 12, 13, 11, 13, 11, 13, 12, 12, 11, 11, 10, 10, 7,
+ 15, 14, 16, 13, 14, 12, 14, 11, 14, 14, 14, 12, 13, 11, 12, 8,
+ 13, 13, 13, 12, 13, 12, 13, 11, 12, 12, 11, 10, 11, 10, 10, 7,
+ 14, 14, 13, 12, 13, 12, 13, 10, 13, 13, 11, 10, 12, 10, 10, 7,
+ 16, 15, 15, 13, 15, 12, 14, 11, 15, 14, 13, 11, 13, 11, 11, 7,
+ 15, 16, 14, 13, 15, 14, 14, 12, 14, 14, 12, 11, 13, 12, 11, 8,
+ 16, 15, 14, 13, 15, 14, 14, 11, 14, 14, 12, 11, 13, 12, 11, 7,
+ 16, 15, 15, 12, 16, 14, 14, 11, 15, 14, 13, 11, 14, 12, 11, 7,
+ 14, 15, 14, 14, 13, 12, 13, 12, 13, 13, 12, 12, 11, 10, 11, 8,
+ 15, 15, 15, 14, 13, 12, 14, 12, 14, 14, 13, 12, 12, 11, 11, 8,
+ 16, 15, 16, 14, 14, 12, 15, 12, 16, 14, 14, 13, 13, 11, 12, 8,
+ 15, 15, 15, 14, 14, 13, 14, 12, 14, 14, 13, 12, 12, 11, 11, 8,
+ 15, 15, 15, 14, 14, 13, 14, 12, 14, 14, 13, 12, 12, 11, 11, 7,
+ 16, 15, 16, 14, 15, 12, 15, 11, 15, 14, 14, 12, 13, 11, 12, 7,
+ 16, 16, 16, 15, 16, 15, 14, 13, 15, 15, 13, 12, 13, 12, 11, 9,
+ 16, 16, 16, 14, 15, 14, 14, 12, 15, 15, 13, 12, 14, 12, 11, 8,
+ 16, 16, 16, 14, 16, 14, 14, 11, 15, 15, 14, 12, 14, 12, 11, 7,
+ 13, 13, 13, 12, 13, 12, 13, 11, 11, 12, 11, 10, 10, 10, 10, 8,
+ 15, 14, 14, 13, 14, 13, 14, 12, 13, 13, 12, 11, 13, 11, 11, 8,
+ 16, 15, 16, 14, 16, 14, 16, 13, 15, 14, 14, 12, 14, 12, 13, 9,
+ 14, 15, 13, 13, 14, 13, 14, 12, 12, 13, 11, 11, 12, 11, 11, 8,
+ 15, 15, 14, 13, 15, 14, 14, 12, 13, 13, 12, 11, 13, 12, 11, 8,
+ 16, 16, 16, 13, 16, 15, 16, 13, 15, 14, 14, 12, 14, 13, 13, 9,
+ 14, 15, 13, 13, 16, 15, 15, 13, 13, 14, 11, 11, 13, 12, 11, 9,
+ 16, 16, 14, 13, 16, 15, 16, 13, 14, 14, 12, 11, 14, 13, 12, 8,
+ 16, 16, 15, 12, 16, 15, 15, 12, 15, 14, 13, 11, 14, 13, 12, 8,
+ 14, 14, 14, 13, 14, 13, 14, 12, 12, 13, 12, 11, 11, 11, 11, 8,
+ 15, 15, 15, 14, 14, 13, 15, 12, 14, 13, 13, 12, 12, 11, 11, 8,
+ 16, 16, 16, 15, 15, 14, 16, 13, 15, 14, 14, 12, 14, 12, 12, 9,
+ 15, 15, 14, 14, 14, 14, 14, 13, 13, 14, 12, 11, 12, 11, 11, 8,
+ 15, 15, 15, 13, 15, 14, 14, 12, 13, 13, 12, 11, 12, 11, 11, 7,
+ 16, 15, 16, 14, 16, 14, 15, 12, 15, 14, 14, 12, 13, 12, 12, 8,
+ 16, 16, 15, 14, 16, 15, 15, 13, 14, 14, 12, 11, 13, 12, 11, 8,
+ 16, 16, 15, 13, 16, 14, 14, 12, 14, 15, 12, 11, 13, 12, 11, 7,
+ 16, 16, 16, 13, 16, 15, 15, 12, 15, 14, 13, 11, 14, 12, 11, 7,
+ 15, 15, 15, 14, 13, 13, 14, 13, 13, 14, 13, 12, 11, 11, 11, 8,
+ 16, 16, 16, 14, 15, 13, 15, 12, 14, 14, 14, 13, 12, 11, 12, 8,
+ 16, 16, 16, 14, 15, 13, 15, 12, 15, 14, 14, 12, 13, 11, 12, 8,
+ 15, 16, 16, 14, 15, 14, 15, 13, 14, 14, 12, 12, 11, 11, 11, 8,
+ 16, 15, 15, 14, 15, 14, 14, 12, 14, 14, 13, 12, 12, 11, 11, 7,
+ 16, 16, 16, 13, 15, 13, 15, 12, 15, 14, 14, 12, 13, 11, 11, 7,
+ 16, 16, 16, 15, 15, 16, 15, 13, 14, 14, 12, 12, 12, 12, 11, 8,
+ 16, 16, 16, 14, 16, 14, 14, 12, 15, 15, 13, 11, 12, 11, 10, 7,
+ 16, 16, 15, 13, 16, 14, 14, 11, 15, 14, 13, 10, 13, 11, 10, 5,
+ },
+ {
+ 1, 6, 6, 7, 6, 7, 9, 7, 6, 9, 7, 8, 7, 8, 8, 5,
+ 5, 8, 10, 10, 10, 10, 12, 11, 11, 12, 11, 11, 11, 12, 12, 9,
+ 9, 10, 12, 11, 13, 12, 15, 13, 14, 15, 15, 14, 14, 15, 15, 12,
+ 6, 10, 9, 10, 10, 11, 13, 12, 11, 13, 11, 12, 12, 12, 12, 10,
+ 6, 10, 10, 10, 11, 11, 13, 11, 11, 13, 12, 12, 11, 12, 12, 10,
+ 9, 12, 13, 12, 13, 13, 16, 13, 14, 16, 15, 14, 14, 15, 16, 12,
+ 9, 13, 11, 13, 14, 14, 16, 15, 14, 16, 13, 15, 15, 15, 15, 12,
+ 9, 13, 12, 13, 14, 15, 16, 15, 14, 16, 15, 15, 15, 15, 16, 12,
+ 8, 12, 12, 11, 13, 14, 15, 13, 13, 15, 14, 14, 14, 14, 14, 12,
+ 6, 10, 10, 11, 9, 10, 13, 11, 11, 13, 12, 12, 10, 12, 12, 9,
+ 6, 10, 11, 11, 11, 10, 13, 12, 11, 13, 12, 12, 12, 12, 13, 10,
+ 9, 12, 13, 13, 13, 12, 16, 13, 14, 16, 14, 15, 16, 14, 15, 12,
+ 8, 12, 13, 13, 13, 13, 16, 14, 13, 16, 13, 14, 14, 14, 14, 12,
+ 7, 11, 11, 12, 11, 12, 14, 13, 12, 14, 13, 13, 12, 13, 13, 11,
+ 9, 12, 13, 13, 13, 13, 15, 14, 14, 16, 16, 15, 15, 15, 16, 12,
+ 11, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 10, 14, 14, 15, 15, 16, 16, 16, 14, 16, 14, 16, 16, 16, 16, 13,
+ 9, 13, 13, 13, 14, 14, 16, 14, 14, 16, 15, 14, 14, 16, 16, 13,
+ 9, 13, 14, 14, 11, 13, 16, 14, 13, 16, 15, 16, 13, 14, 15, 12,
+ 10, 13, 14, 15, 13, 12, 16, 14, 14, 16, 15, 15, 14, 14, 16, 13,
+ 9, 12, 13, 14, 12, 11, 15, 13, 13, 15, 13, 14, 15, 14, 16, 12,
+ 11, 15, 16, 16, 14, 16, 16, 16, 15, 16, 16, 16, 16, 16, 16, 13,
+ 11, 14, 14, 15, 13, 14, 16, 15, 15, 16, 16, 16, 16, 16, 16, 13,
+ 10, 12, 13, 14, 13, 13, 16, 14, 14, 14, 14, 16, 15, 14, 16, 13,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 9, 12, 13, 14, 13, 13, 16, 13, 13, 15, 15, 16, 15, 15, 16, 12,
+ 6, 11, 11, 12, 10, 12, 13, 12, 11, 13, 11, 12, 11, 12, 13, 10,
+ 9, 12, 13, 13, 13, 13, 16, 14, 14, 15, 14, 14, 14, 14, 14, 12,
+ 12, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 7, 11, 11, 12, 12, 12, 14, 13, 12, 14, 11, 13, 13, 13, 13, 11,
+ 8, 12, 12, 13, 12, 13, 14, 13, 13, 14, 13, 13, 13, 14, 14, 11,
+ 11, 14, 14, 15, 16, 15, 16, 16, 15, 16, 16, 16, 16, 16, 16, 13,
+ 10, 14, 12, 14, 15, 15, 16, 16, 14, 16, 12, 15, 16, 16, 16, 13,
+ 11, 14, 13, 15, 15, 15, 16, 16, 14, 16, 14, 14, 16, 16, 16, 13,
+ 11, 14, 14, 14, 15, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 13,
+ 7, 11, 12, 11, 10, 12, 14, 13, 12, 14, 13, 13, 10, 12, 13, 10,
+ 8, 12, 12, 13, 12, 12, 15, 13, 13, 14, 13, 13, 13, 13, 14, 11,
+ 11, 13, 15, 16, 14, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13,
+ 9, 12, 13, 13, 13, 13, 15, 14, 13, 15, 13, 14, 13, 14, 14, 12,
+ 7, 11, 11, 11, 11, 11, 13, 12, 11, 13, 11, 12, 11, 12, 12, 10,
+ 9, 12, 13, 13, 13, 13, 16, 13, 14, 16, 15, 14, 14, 14, 16, 12,
+ 12, 14, 14, 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 16, 14,
+ 10, 13, 13, 14, 14, 14, 16, 15, 14, 16, 14, 14, 16, 15, 15, 12,
+ 9, 12, 13, 13, 13, 15, 16, 14, 13, 16, 14, 13, 13, 14, 14, 11,
+ 10, 14, 15, 14, 12, 14, 16, 15, 13, 16, 16, 16, 12, 14, 16, 12,
+ 11, 14, 14, 14, 14, 14, 16, 15, 15, 16, 16, 16, 14, 15, 16, 13,
+ 11, 14, 14, 16, 14, 13, 16, 15, 14, 16, 15, 16, 15, 15, 16, 13,
+ 12, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 14,
+ 10, 13, 14, 14, 13, 14, 16, 14, 13, 16, 15, 15, 13, 14, 14, 12,
+ 9, 12, 13, 14, 13, 12, 16, 14, 13, 16, 14, 14, 14, 14, 15, 12,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13,
+ 9, 12, 13, 13, 13, 13, 14, 13, 12, 15, 14, 14, 14, 14, 14, 11,
+ 11, 14, 14, 16, 14, 16, 16, 16, 13, 16, 14, 16, 14, 16, 16, 13,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 14, 14, 16, 16, 15, 16, 16, 15, 16, 14, 16, 16, 16, 16, 14,
+ 12, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 10, 14, 12, 15, 15, 15, 16, 16, 14, 16, 12, 14, 16, 16, 15, 14,
+ 12, 15, 14, 16, 16, 16, 16, 16, 15, 16, 14, 16, 16, 16, 16, 14,
+ 12, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 14,
+ 11, 15, 15, 16, 14, 16, 16, 16, 14, 16, 16, 16, 13, 16, 16, 13,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 11, 15, 14, 16, 14, 14, 16, 15, 14, 16, 15, 16, 15, 16, 16, 12,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13,
+ 12, 16, 15, 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 16, 14,
+ 10, 14, 13, 14, 15, 14, 16, 15, 14, 16, 13, 16, 16, 16, 15, 13,
+ 10, 14, 14, 14, 13, 14, 16, 15, 15, 16, 14, 14, 14, 16, 16, 12,
+ 10, 14, 15, 14, 13, 16, 16, 15, 13, 16, 16, 16, 12, 14, 16, 12,
+ 11, 16, 16, 16, 14, 15, 16, 16, 16, 16, 16, 16, 14, 16, 16, 13,
+ 12, 15, 14, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 14,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 14,
+ 11, 14, 14, 14, 14, 15, 16, 14, 14, 16, 16, 16, 13, 15, 15, 12,
+ 10, 14, 13, 14, 14, 14, 16, 15, 14, 16, 15, 15, 14, 14, 16, 12,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 12, 14, 14, 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 16, 13,
+ 8, 11, 11, 12, 12, 12, 13, 12, 11, 13, 12, 12, 12, 13, 12, 10,
+ },
+ },
+ {
+ {
+ 5, 6, 6, 6, 6, 6, 7, 6, 6, 7, 6, 6, 6, 6, 6, 4,
+ 9, 9, 9, 8, 9, 8, 10, 8, 10, 10, 9, 9, 9, 8, 9, 6,
+ 11, 10, 12, 10, 11, 10, 12, 9, 12, 11, 11, 10, 11, 10, 11, 8,
+ 9, 10, 9, 8, 10, 10, 10, 9, 9, 10, 8, 8, 9, 9, 8, 7,
+ 10, 11, 10, 9, 11, 11, 11, 9, 11, 11, 10, 9, 10, 10, 10, 7,
+ 13, 12, 12, 10, 13, 12, 13, 10, 13, 12, 12, 11, 13, 12, 11, 9,
+ 11, 12, 10, 10, 12, 12, 11, 10, 11, 12, 10, 10, 11, 11, 10, 8,
+ 12, 12, 11, 10, 13, 13, 13, 10, 13, 13, 12, 11, 13, 12, 11, 9,
+ 12, 12, 12, 10, 13, 13, 13, 10, 13, 13, 12, 10, 13, 12, 12, 9,
+ 9, 9, 10, 10, 9, 8, 10, 9, 9, 10, 9, 9, 8, 8, 9, 6,
+ 10, 11, 11, 11, 10, 9, 11, 9, 11, 11, 11, 10, 10, 9, 10, 7,
+ 12, 12, 13, 12, 12, 10, 13, 10, 13, 13, 13, 12, 12, 11, 11, 9,
+ 11, 12, 11, 11, 11, 11, 11, 10, 11, 12, 10, 10, 10, 10, 10, 8,
+ 12, 12, 12, 11, 12, 11, 11, 10, 12, 12, 11, 11, 11, 11, 10, 8,
+ 14, 13, 13, 12, 13, 12, 13, 10, 14, 14, 13, 12, 13, 12, 12, 9,
+ 13, 14, 13, 12, 13, 13, 13, 12, 13, 13, 12, 12, 12, 12, 11, 9,
+ 14, 14, 13, 12, 13, 13, 13, 11, 14, 14, 13, 12, 13, 12, 12, 9,
+ 14, 15, 14, 12, 15, 13, 13, 11, 15, 14, 14, 12, 14, 13, 12, 9,
+ 11, 11, 12, 12, 10, 10, 12, 11, 11, 12, 11, 11, 10, 10, 10, 8,
+ 12, 12, 14, 13, 11, 10, 12, 11, 13, 13, 13, 12, 12, 10, 11, 9,
+ 13, 13, 14, 14, 12, 10, 13, 11, 14, 14, 14, 13, 12, 11, 12, 9,
+ 13, 13, 13, 13, 12, 13, 13, 12, 13, 14, 12, 12, 12, 12, 11, 9,
+ 13, 14, 14, 13, 13, 12, 13, 11, 14, 14, 13, 12, 13, 12, 12, 9,
+ 14, 14, 14, 14, 14, 12, 13, 11, 15, 15, 15, 13, 14, 12, 12, 9,
+ 14, 15, 15, 13, 14, 14, 13, 12, 13, 14, 13, 13, 12, 12, 11, 10,
+ 16, 16, 15, 14, 15, 14, 13, 11, 15, 15, 14, 13, 13, 13, 12, 9,
+ 15, 15, 15, 13, 14, 13, 13, 11, 15, 15, 15, 13, 14, 13, 12, 9,
+ 8, 9, 9, 9, 9, 9, 10, 9, 8, 10, 9, 9, 8, 8, 9, 7,
+ 11, 11, 11, 11, 11, 11, 12, 10, 11, 11, 11, 10, 10, 10, 10, 8,
+ 13, 13, 14, 12, 13, 12, 14, 11, 13, 13, 13, 12, 13, 11, 12, 9,
+ 10, 11, 10, 11, 11, 11, 12, 10, 10, 11, 10, 10, 10, 10, 10, 8,
+ 12, 12, 12, 11, 12, 12, 12, 11, 11, 12, 11, 10, 11, 11, 10, 8,
+ 14, 13, 14, 12, 14, 13, 14, 12, 14, 13, 13, 11, 13, 12, 12, 9,
+ 12, 13, 12, 12, 13, 13, 13, 12, 12, 13, 11, 11, 12, 12, 11, 9,
+ 13, 14, 13, 12, 14, 14, 14, 12, 14, 13, 12, 11, 13, 12, 12, 9,
+ 14, 14, 13, 12, 15, 14, 15, 12, 14, 14, 13, 11, 13, 13, 12, 9,
+ 10, 11, 11, 11, 10, 10, 12, 10, 10, 11, 10, 10, 9, 9, 10, 7,
+ 12, 12, 13, 12, 12, 11, 12, 11, 12, 12, 12, 11, 11, 10, 10, 8,
+ 14, 13, 14, 13, 14, 12, 13, 12, 14, 13, 14, 12, 13, 11, 12, 9,
+ 12, 13, 12, 12, 12, 12, 12, 11, 11, 12, 11, 10, 10, 10, 10, 8,
+ 12, 12, 12, 12, 12, 12, 12, 11, 12, 12, 11, 10, 11, 10, 10, 7,
+ 14, 14, 14, 12, 14, 12, 14, 11, 14, 13, 13, 11, 13, 11, 11, 8,
+ 13, 15, 13, 13, 14, 14, 14, 12, 13, 14, 12, 12, 12, 12, 11, 9,
+ 14, 15, 13, 12, 14, 13, 13, 11, 13, 13, 12, 11, 13, 12, 11, 8,
+ 15, 15, 15, 12, 15, 14, 14, 11, 14, 14, 13, 11, 13, 12, 12, 8,
+ 12, 13, 13, 13, 12, 12, 13, 12, 12, 13, 12, 12, 11, 11, 11, 9,
+ 13, 14, 15, 14, 13, 12, 14, 12, 13, 13, 14, 12, 12, 11, 12, 9,
+ 14, 14, 15, 14, 14, 12, 14, 12, 14, 14, 14, 13, 13, 11, 12, 9,
+ 13, 14, 14, 14, 13, 13, 14, 13, 13, 13, 12, 12, 12, 12, 11, 9,
+ 14, 14, 14, 13, 13, 13, 13, 12, 13, 14, 13, 12, 12, 11, 11, 8,
+ 15, 14, 15, 14, 14, 13, 14, 11, 15, 14, 14, 12, 13, 11, 12, 8,
+ 14, 15, 14, 14, 15, 14, 14, 13, 14, 15, 13, 13, 12, 12, 11, 10,
+ 16, 15, 14, 14, 14, 14, 13, 12, 14, 14, 13, 12, 13, 12, 11, 9,
+ 15, 15, 15, 14, 16, 14, 14, 11, 15, 15, 14, 12, 13, 12, 11, 8,
+ 11, 12, 11, 12, 12, 12, 12, 11, 10, 11, 10, 10, 10, 10, 10, 8,
+ 13, 13, 13, 13, 13, 13, 14, 12, 12, 12, 12, 12, 12, 11, 12, 9,
+ 14, 14, 14, 13, 15, 13, 15, 13, 14, 14, 14, 12, 14, 12, 13, 10,
+ 12, 13, 12, 13, 13, 13, 13, 12, 11, 12, 11, 11, 12, 11, 11, 9,
+ 14, 14, 13, 13, 14, 14, 14, 12, 12, 13, 12, 11, 13, 12, 12, 9,
+ 14, 14, 15, 13, 15, 15, 15, 13, 15, 13, 13, 12, 14, 12, 13, 10,
+ 13, 15, 12, 13, 14, 14, 14, 13, 12, 13, 11, 11, 13, 12, 11, 10,
+ 14, 15, 14, 13, 15, 14, 15, 13, 14, 14, 12, 11, 13, 13, 12, 9,
+ 14, 15, 14, 13, 15, 14, 15, 13, 14, 14, 13, 11, 14, 13, 12, 9,
+ 12, 13, 13, 13, 12, 13, 13, 12, 11, 12, 12, 11, 11, 11, 11, 9,
+ 13, 14, 14, 13, 14, 13, 14, 12, 13, 13, 13, 12, 12, 11, 11, 9,
+ 15, 15, 16, 14, 15, 14, 14, 13, 15, 14, 14, 13, 13, 12, 13, 10,
+ 13, 14, 14, 13, 13, 14, 14, 13, 12, 13, 12, 12, 11, 11, 11, 9,
+ 14, 14, 14, 13, 14, 13, 14, 12, 13, 13, 12, 11, 12, 11, 11, 8,
+ 15, 15, 15, 13, 15, 14, 14, 12, 14, 13, 13, 12, 13, 12, 12, 9,
+ 14, 15, 14, 14, 15, 15, 14, 13, 13, 14, 12, 12, 13, 12, 12, 9,
+ 15, 15, 14, 13, 15, 14, 14, 13, 14, 14, 12, 11, 13, 12, 11, 8,
+ 15, 16, 14, 13, 15, 15, 15, 12, 14, 14, 13, 11, 14, 12, 12, 8,
+ 12, 14, 13, 13, 13, 13, 14, 12, 12, 13, 12, 12, 10, 11, 11, 9,
+ 14, 15, 15, 14, 13, 13, 15, 13, 13, 14, 14, 12, 12, 11, 12, 9,
+ 15, 15, 16, 14, 14, 13, 15, 13, 14, 14, 14, 13, 13, 11, 12, 9,
+ 14, 15, 14, 14, 14, 14, 14, 13, 13, 14, 13, 12, 12, 12, 11, 9,
+ 14, 15, 15, 14, 14, 14, 14, 12, 13, 14, 13, 12, 12, 11, 11, 8,
+ 15, 15, 15, 14, 14, 13, 15, 12, 15, 14, 14, 12, 13, 11, 11, 8,
+ 14, 16, 14, 14, 14, 15, 14, 13, 13, 14, 12, 12, 12, 12, 11, 9,
+ 15, 15, 15, 14, 15, 14, 14, 12, 14, 14, 13, 12, 12, 11, 11, 8,
+ 15, 15, 14, 13, 15, 13, 14, 12, 14, 14, 13, 11, 13, 11, 11, 7,
+ },
+ {
+ 1, 5, 6, 7, 6, 7, 9, 8, 6, 9, 8, 8, 7, 8, 8, 6,
+ 5, 8, 10, 10, 10, 11, 13, 12, 11, 13, 12, 12, 12, 12, 13, 10,
+ 8, 10, 13, 12, 13, 13, 16, 14, 14, 16, 16, 14, 16, 16, 16, 12,
+ 5, 10, 9, 11, 11, 12, 13, 12, 11, 13, 11, 12, 12, 12, 13, 10,
+ 6, 10, 11, 11, 11, 12, 14, 12, 11, 13, 13, 13, 12, 13, 13, 11,
+ 8, 12, 13, 12, 14, 14, 16, 14, 14, 16, 16, 16, 16, 16, 16, 13,
+ 9, 13, 11, 14, 14, 16, 16, 16, 14, 16, 14, 16, 16, 16, 16, 13,
+ 9, 13, 13, 13, 14, 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 14,
+ 8, 12, 13, 12, 13, 14, 16, 14, 14, 16, 16, 16, 14, 16, 16, 13,
+ 5, 10, 11, 12, 9, 11, 13, 12, 11, 13, 13, 13, 11, 12, 13, 10,
+ 6, 10, 11, 12, 11, 11, 14, 13, 12, 14, 12, 13, 13, 13, 13, 11,
+ 9, 12, 14, 15, 13, 13, 16, 16, 14, 16, 16, 16, 16, 16, 16, 13,
+ 8, 13, 13, 14, 13, 14, 16, 16, 14, 16, 14, 16, 14, 16, 14, 13,
+ 7, 11, 12, 13, 12, 12, 14, 13, 12, 14, 13, 14, 13, 14, 14, 12,
+ 9, 13, 14, 14, 14, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13,
+ 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 15, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 9, 13, 14, 14, 14, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 14,
+ 9, 13, 14, 15, 11, 13, 16, 14, 14, 16, 16, 16, 13, 14, 16, 13,
+ 9, 13, 14, 16, 13, 13, 16, 16, 14, 16, 16, 16, 16, 15, 16, 14,
+ 8, 12, 13, 16, 13, 12, 16, 14, 13, 16, 14, 16, 16, 16, 16, 13,
+ 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 14, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 9, 13, 16, 16, 14, 14, 16, 16, 14, 16, 16, 16, 16, 16, 16, 14,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 13, 14, 15, 14, 14, 16, 14, 13, 16, 16, 16, 14, 16, 16, 14,
+ 6, 11, 11, 12, 11, 12, 14, 13, 11, 14, 12, 13, 12, 13, 13, 11,
+ 9, 13, 13, 14, 13, 14, 16, 16, 14, 16, 16, 16, 15, 16, 16, 13,
+ 11, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 7, 11, 11, 13, 12, 13, 16, 14, 12, 16, 12, 14, 14, 14, 14, 12,
+ 8, 12, 12, 13, 12, 14, 16, 14, 13, 16, 14, 14, 14, 14, 14, 12,
+ 11, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 10, 14, 13, 16, 16, 16, 16, 16, 14, 16, 14, 16, 16, 16, 16, 16,
+ 10, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 10, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 7, 11, 12, 12, 11, 13, 16, 14, 12, 15, 14, 14, 11, 13, 13, 12,
+ 8, 12, 12, 13, 13, 13, 16, 14, 13, 16, 13, 15, 13, 14, 14, 12,
+ 11, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 8, 12, 13, 14, 13, 14, 16, 16, 14, 16, 14, 16, 14, 16, 15, 13,
+ 6, 11, 11, 12, 11, 12, 13, 13, 11, 13, 12, 13, 12, 13, 13, 11,
+ 9, 13, 14, 14, 14, 14, 16, 14, 14, 16, 16, 16, 16, 16, 14, 13,
+ 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 14, 13, 14, 14, 16, 16, 16, 14, 16, 14, 16, 16, 16, 16, 13,
+ 9, 13, 14, 13, 13, 16, 16, 14, 13, 16, 16, 16, 13, 16, 14, 13,
+ 10, 14, 14, 16, 13, 16, 16, 16, 16, 16, 16, 16, 13, 14, 16, 14,
+ 11, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 10, 14, 16, 16, 14, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 14, 14, 14, 14, 16, 16, 16, 14, 16, 16, 16, 14, 16, 16, 13,
+ 9, 13, 13, 16, 14, 14, 16, 16, 14, 16, 14, 16, 16, 16, 16, 13,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 13, 14, 14, 13, 16, 16, 14, 13, 16, 14, 16, 14, 14, 16, 12,
+ 10, 14, 14, 16, 16, 16, 16, 16, 13, 16, 16, 16, 14, 16, 16, 14,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 14, 13, 16, 16, 16, 16, 16, 14, 16, 12, 16, 16, 16, 16, 14,
+ 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 16, 16,
+ 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 14, 16, 16, 14,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15,
+ 11, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 13,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 15, 14, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 16, 14,
+ 10, 14, 14, 14, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13,
+ 10, 14, 16, 16, 13, 16, 16, 16, 14, 16, 16, 16, 12, 16, 16, 14,
+ 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16,
+ 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 15, 14, 16, 14, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 13,
+ 10, 15, 14, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 14, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 8, 12, 12, 13, 12, 14, 14, 14, 12, 16, 13, 14, 12, 14, 14, 11,
+ },
+ },
+};
+
+
+static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE] = {
+ {
+ { 0, 3, 3, 4, 3, 5, 5, 5, 2, 5, 4, 6, 4, 6, 6, 6, },
+ { 0, 2, 3, 4, 2, 5, 6, 7, 3, 6, 5, 7, 4, 7, 8, 8, },
+ { 0, 3, 4, 4, 3, 4, 5, 5, 3, 5, 4, 5, 4, 5, 5, 3, },
+ { 0, 3, 3, 4, 3, 4, 5, 5, 3, 5, 4, 6, 3, 5, 6, 5, },
+ { 0, 4, 4, 4, 4, 5, 5, 4, 4, 5, 4, 5, 4, 4, 4, 2, },
+ { 0, 3, 3, 4, 3, 4, 5, 5, 3, 5, 4, 5, 4, 5, 5, 4, },
+ { 0, 6, 6, 5, 6, 5, 6, 4, 6, 6, 5, 4, 4, 4, 4, 1, },
+ { 0, 4, 4, 4, 4, 5, 5, 4, 4, 5, 5, 4, 4, 4, 4, 2, },
+ },
+ {
+ { 0, 4, 3, 4, 3, 4, 5, 4, 3, 5, 4, 5, 3, 5, 5, 5, },
+ { 0, 2, 3, 4, 2, 5, 6, 7, 3, 6, 5, 7, 4, 7, 8, 8, },
+ { 0, 4, 4, 4, 4, 4, 5, 4, 4, 5, 4, 4, 3, 4, 4, 3, },
+ { 0, 3, 3, 4, 3, 4, 5, 5, 3, 5, 4, 6, 3, 5, 6, 5, },
+ { 0, 4, 4, 4, 4, 4, 5, 4, 4, 5, 5, 5, 4, 4, 4, 2, },
+ { 0, 3, 3, 4, 3, 4, 5, 5, 3, 5, 4, 5, 4, 5, 5, 4, },
+ { 0, 5, 6, 5, 5, 5, 6, 4, 6, 6, 5, 4, 5, 4, 4, 1, },
+ { 0, 4, 4, 4, 4, 4, 5, 4, 4, 5, 5, 4, 4, 4, 5, 2, },
+ },
+ {
+ { 0, 3, 3, 4, 3, 4, 4, 5, 3, 5, 4, 5, 4, 5, 5, 5, },
+ { 0, 2, 3, 4, 2, 4, 6, 7, 3, 6, 5, 7, 5, 7, 8, 8, },
+ { 0, 4, 4, 4, 4, 4, 5, 4, 3, 5, 4, 4, 4, 4, 4, 3, },
+ { 0, 3, 3, 4, 3, 3, 6, 6, 3, 6, 4, 6, 3, 6, 6, 5, },
+ { 0, 4, 4, 4, 3, 4, 5, 4, 4, 5, 4, 4, 4, 4, 4, 3, },
+ { 0, 3, 3, 4, 3, 4, 5, 5, 3, 5, 4, 5, 4, 5, 5, 4, },
+ { 0, 5, 5, 5, 5, 5, 6, 4, 5, 6, 5, 5, 5, 4, 4, 1, },
+ { 0, 4, 4, 4, 4, 4, 6, 4, 4, 6, 5, 4, 4, 4, 4, 2, },
+ },
+ {
+ { 0, 3, 3, 4, 3, 4, 4, 5, 3, 5, 4, 5, 4, 5, 5, 5, },
+ { 0, 2, 3, 4, 2, 4, 7, 6, 3, 7, 5, 7, 5, 7, 7, 7, },
+ { 0, 3, 3, 4, 3, 4, 5, 5, 3, 5, 4, 5, 4, 5, 5, 4, },
+ { 0, 3, 3, 3, 3, 4, 6, 6, 3, 6, 4, 6, 3, 6, 6, 5, },
+ { 0, 3, 4, 4, 3, 4, 5, 4, 4, 5, 4, 5, 4, 5, 4, 3, },
+ { 0, 3, 3, 4, 3, 4, 5, 5, 3, 5, 4, 5, 4, 5, 5, 4, },
+ { 0, 4, 5, 4, 4, 4, 5, 4, 4, 5, 5, 4, 4, 4, 4, 2, },
+ { 0, 4, 4, 4, 4, 5, 5, 4, 4, 5, 5, 4, 4, 4, 4, 2, },
+ },
+ {
+ { 0, 3, 3, 4, 3, 4, 5, 6, 2, 5, 4, 7, 4, 6, 6, 7, },
+ { 0, 2, 3, 4, 2, 4, 6, 7, 3, 7, 5, 7, 5, 7, 7, 7, },
+ { 0, 3, 3, 4, 3, 4, 5, 5, 3, 5, 4, 5, 4, 5, 5, 4, },
+ { 0, 2, 3, 4, 3, 4, 6, 5, 3, 6, 4, 6, 4, 6, 6, 6, },
+ { 0, 3, 3, 4, 3, 4, 5, 5, 3, 5, 4, 5, 4, 5, 5, 4, },
+ { 0, 3, 3, 4, 3, 4, 5, 5, 3, 5, 4, 5, 4, 5, 5, 4, },
+ { 0, 4, 4, 4, 4, 5, 5, 4, 4, 5, 5, 4, 4, 4, 4, 2, },
+ { 0, 3, 4, 4, 4, 4, 5, 4, 4, 5, 4, 4, 4, 4, 4, 3, },
+ },
+};
+
+
+static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE] = {
+ {
+ {
+ 0, 10, 5, 10, 7, 12, 9, 11, 8, 13, 9, 12, 10, 13, 11, 12,
+ 16, 16, 14, 15, 15, 16, 13, 14, 5, 12, 6, 11, 9, 13, 10, 11,
+ 9, 14, 9, 12, 11, 14, 11, 12, 16, 16, 14, 15, 15, 16, 13, 13,
+ 10, 15, 9, 12, 12, 16, 11, 12, 12, 16, 10, 13, 13, 16, 11, 12,
+ 16, 16, 13, 14, 15, 16, 13, 12, 6, 12, 8, 11, 8, 12, 10, 11,
+ 9, 14, 10, 12, 10, 13, 11, 12, 15, 16, 14, 15, 14, 16, 13, 13,
+ 8, 13, 9, 12, 10, 13, 10, 12, 10, 14, 9, 12, 11, 14, 10, 12,
+ 15, 16, 13, 15, 14, 16, 13, 13, 11, 16, 10, 13, 13, 16, 11, 12,
+ 12, 16, 11, 13, 13, 16, 11, 12, 16, 16, 13, 14, 15, 16, 12, 12,
+ 10, 16, 12, 14, 10, 14, 11, 12, 12, 16, 13, 14, 11, 14, 12, 12,
+ 16, 16, 15, 16, 14, 15, 13, 13, 11, 16, 12, 14, 11, 14, 11, 12,
+ 12, 16, 12, 14, 11, 14, 11, 12, 16, 16, 14, 15, 13, 15, 13, 12,
+ 14, 16, 13, 14, 13, 16, 12, 12, 14, 16, 13, 14, 13, 16, 12, 12,
+ 16, 16, 14, 14, 14, 15, 12, 11, 2, 10, 6, 10, 7, 12, 9, 11,
+ 8, 12, 9, 11, 10, 13, 10, 11, 15, 16, 14, 15, 14, 16, 13, 13,
+ 5, 12, 6, 11, 9, 13, 10, 11, 9, 13, 9, 11, 10, 13, 10, 11,
+ 15, 16, 13, 14, 14, 16, 13, 13, 9, 15, 8, 12, 12, 15, 11, 11,
+ 11, 16, 10, 12, 13, 15, 11, 11, 15, 16, 13, 14, 15, 16, 12, 12,
+ 6, 12, 8, 11, 8, 12, 9, 11, 9, 14, 9, 12, 10, 13, 10, 11,
+ 15, 16, 14, 15, 14, 16, 13, 13, 7, 13, 8, 11, 9, 13, 10, 11,
+ 9, 14, 9, 12, 10, 13, 10, 11, 14, 16, 13, 14, 13, 16, 12, 12,
+ 11, 16, 10, 12, 12, 15, 11, 11, 11, 16, 10, 12, 12, 15, 11, 11,
+ 15, 16, 12, 13, 14, 16, 12, 11, 9, 15, 11, 13, 9, 13, 11, 12,
+ 11, 16, 12, 14, 10, 14, 11, 12, 16, 16, 14, 15, 13, 15, 12, 12,
+ 11, 16, 11, 14, 10, 14, 11, 12, 11, 16, 12, 13, 11, 14, 11, 11,
+ 15, 16, 14, 15, 13, 14, 12, 12, 13, 16, 12, 14, 13, 15, 11, 11,
+ 13, 16, 12, 14, 13, 15, 11, 11, 16, 16, 13, 14, 13, 15, 11, 10,
+ 5, 12, 7, 11, 8, 13, 10, 11, 9, 13, 9, 12, 10, 14, 11, 12,
+ 16, 16, 14, 15, 14, 16, 13, 13, 7, 13, 7, 11, 9, 13, 10, 11,
+ 9, 14, 9, 12, 11, 14, 11, 12, 16, 16, 14, 14, 14, 16, 13, 13,
+ 9, 15, 8, 12, 12, 15, 11, 12, 11, 16, 10, 12, 13, 16, 11, 12,
+ 16, 16, 13, 14, 15, 16, 12, 12, 7, 13, 8, 12, 9, 13, 10, 11,
+ 10, 14, 10, 12, 10, 14, 11, 12, 16, 16, 14, 15, 14, 16, 13, 13,
+ 8, 14, 9, 12, 10, 13, 10, 11, 9, 14, 9, 12, 10, 14, 10, 11,
+ 15, 16, 13, 14, 14, 16, 12, 12, 11, 16, 10, 12, 12, 15, 11, 12,
+ 11, 16, 10, 12, 12, 15, 11, 11, 15, 16, 12, 14, 14, 16, 12, 11,
+ 10, 16, 11, 13, 9, 14, 11, 12, 12, 16, 12, 14, 11, 14, 11, 12,
+ 16, 16, 14, 16, 14, 15, 13, 12, 11, 16, 11, 14, 10, 14, 11, 12,
+ 11, 16, 12, 14, 11, 14, 11, 11, 15, 16, 14, 15, 13, 15, 12, 12,
+ 13, 16, 12, 14, 13, 15, 11, 11, 13, 16, 12, 14, 12, 14, 11, 11,
+ 15, 16, 12, 13, 13, 14, 11, 10, 6, 13, 8, 11, 9, 13, 10, 11,
+ 10, 14, 10, 12, 10, 13, 10, 11, 15, 16, 13, 13, 13, 14, 12, 11,
+ 7, 13, 8, 11, 9, 13, 9, 11, 10, 14, 9, 11, 10, 13, 10, 11,
+ 15, 16, 13, 13, 13, 14, 11, 11, 9, 14, 8, 11, 10, 13, 9, 10,
+ 11, 15, 9, 11, 11, 13, 9, 10, 15, 16, 12, 13, 13, 14, 10, 9,
+ 7, 13, 8, 11, 9, 13, 9, 11, 10, 14, 10, 12, 10, 13, 10, 11,
+ 15, 16, 13, 13, 13, 14, 11, 11, 8, 13, 8, 11, 9, 13, 9, 10,
+ 9, 14, 9, 11, 10, 13, 9, 10, 14, 16, 12, 13, 13, 14, 11, 10,
+ 9, 14, 8, 11, 10, 13, 9, 9, 10, 14, 8, 11, 10, 13, 9, 9,
+ 14, 16, 11, 12, 12, 14, 10, 9, 9, 14, 9, 12, 8, 12, 9, 10,
+ 11, 15, 10, 12, 10, 13, 9, 10, 15, 16, 13, 13, 12, 13, 11, 10,
+ 9, 14, 9, 12, 9, 12, 9, 10, 10, 14, 10, 12, 9, 12, 9, 9,
+ 14, 16, 12, 13, 11, 13, 10, 9, 10, 14, 9, 11, 10, 12, 8, 8,
+ 10, 14, 9, 11, 10, 12, 8, 8, 12, 14, 9, 10, 10, 11, 8, 7,
+ },
+ {
+ 0, 9, 6, 9, 6, 10, 8, 9, 7, 11, 8, 11, 9, 11, 9, 10,
+ 14, 16, 13, 14, 13, 14, 12, 11, 5, 11, 7, 10, 8, 10, 8, 9,
+ 8, 12, 8, 11, 9, 12, 9, 10, 14, 16, 12, 13, 13, 14, 11, 11,
+ 10, 14, 9, 11, 11, 13, 10, 10, 11, 15, 9, 11, 12, 13, 10, 10,
+ 15, 16, 12, 12, 13, 14, 11, 9, 6, 11, 7, 10, 7, 10, 8, 9,
+ 8, 12, 9, 11, 9, 11, 9, 10, 14, 16, 13, 13, 13, 14, 11, 11,
+ 7, 12, 8, 11, 8, 11, 9, 9, 9, 13, 9, 11, 9, 12, 9, 10,
+ 14, 16, 12, 13, 12, 14, 11, 10, 11, 14, 10, 12, 11, 13, 10, 10,
+ 12, 15, 10, 12, 12, 13, 10, 10, 15, 16, 12, 12, 13, 14, 10, 9,
+ 10, 14, 11, 13, 9, 12, 10, 10, 11, 15, 12, 13, 10, 12, 10, 10,
+ 14, 16, 13, 14, 12, 13, 11, 10, 11, 14, 11, 13, 10, 12, 10, 10,
+ 12, 15, 11, 13, 10, 12, 10, 10, 15, 16, 13, 13, 12, 13, 11, 9,
+ 13, 16, 12, 13, 12, 13, 10, 9, 14, 16, 12, 13, 12, 13, 10, 9,
+ 16, 16, 12, 12, 13, 13, 10, 7, 4, 10, 6, 9, 7, 10, 8, 9,
+ 8, 12, 9, 11, 9, 11, 9, 9, 14, 16, 13, 13, 13, 14, 11, 11,
+ 6, 11, 7, 10, 8, 11, 8, 9, 9, 12, 9, 11, 9, 12, 9, 9,
+ 14, 16, 12, 13, 13, 14, 11, 10, 10, 14, 9, 11, 11, 13, 9, 9,
+ 11, 14, 9, 11, 11, 13, 10, 9, 14, 16, 11, 12, 13, 14, 10, 9,
+ 6, 11, 8, 10, 7, 10, 8, 9, 9, 12, 9, 11, 9, 11, 9, 9,
+ 14, 16, 13, 13, 12, 13, 11, 10, 8, 12, 8, 10, 8, 11, 9, 9,
+ 9, 12, 9, 11, 9, 11, 9, 9, 14, 16, 12, 13, 12, 13, 11, 10,
+ 11, 14, 10, 11, 11, 13, 9, 9, 11, 14, 10, 11, 11, 13, 9, 9,
+ 14, 16, 11, 12, 13, 14, 10, 8, 10, 14, 11, 12, 9, 12, 10, 10,
+ 11, 14, 11, 13, 10, 12, 10, 10, 14, 16, 13, 14, 12, 13, 11, 9,
+ 11, 14, 11, 12, 10, 12, 10, 10, 11, 14, 11, 12, 10, 12, 10, 9,
+ 14, 16, 13, 13, 11, 12, 10, 9, 13, 16, 12, 13, 12, 13, 10, 9,
+ 13, 16, 11, 12, 11, 13, 10, 8, 15, 16, 12, 12, 12, 12, 9, 7,
+ 8, 12, 8, 11, 9, 12, 9, 10, 10, 14, 10, 12, 11, 13, 10, 10,
+ 16, 16, 14, 14, 14, 14, 12, 11, 8, 13, 8, 11, 9, 12, 10, 10,
+ 11, 14, 10, 12, 11, 13, 10, 10, 16, 16, 13, 14, 14, 14, 12, 11,
+ 11, 14, 9, 12, 11, 13, 10, 10, 12, 15, 10, 12, 12, 14, 10, 10,
+ 15, 16, 12, 12, 14, 14, 11, 9, 9, 13, 9, 11, 9, 12, 10, 10,
+ 11, 14, 10, 12, 10, 12, 10, 10, 15, 16, 14, 14, 13, 14, 12, 11,
+ 9, 13, 9, 11, 10, 12, 10, 10, 10, 14, 10, 12, 10, 12, 10, 10,
+ 15, 16, 13, 13, 13, 14, 11, 10, 11, 15, 10, 12, 11, 13, 10, 10,
+ 11, 15, 10, 12, 12, 13, 10, 9, 15, 16, 11, 12, 13, 14, 10, 9,
+ 11, 15, 11, 13, 10, 12, 10, 10, 12, 16, 12, 13, 11, 13, 10, 10,
+ 16, 16, 14, 14, 12, 13, 11, 9, 11, 15, 11, 13, 10, 13, 10, 10,
+ 12, 15, 12, 13, 10, 12, 10, 10, 14, 16, 13, 13, 12, 13, 10, 9,
+ 13, 16, 12, 13, 12, 13, 10, 9, 13, 16, 11, 12, 11, 13, 10, 9,
+ 14, 16, 11, 12, 12, 12, 9, 7, 10, 15, 10, 12, 11, 13, 10, 10,
+ 12, 16, 12, 13, 12, 13, 11, 10, 16, 16, 14, 14, 14, 15, 12, 10,
+ 10, 14, 10, 12, 10, 13, 10, 10, 12, 15, 11, 12, 11, 13, 10, 10,
+ 16, 16, 14, 13, 14, 14, 11, 9, 11, 14, 10, 11, 11, 12, 9, 9,
+ 12, 15, 10, 11, 11, 13, 9, 8, 16, 16, 12, 12, 13, 13, 10, 7,
+ 10, 15, 10, 12, 10, 13, 10, 10, 12, 15, 11, 12, 11, 13, 10, 10,
+ 16, 16, 14, 13, 14, 14, 11, 9, 10, 14, 10, 12, 10, 12, 10, 10,
+ 12, 15, 11, 12, 11, 13, 10, 10, 16, 16, 13, 13, 13, 14, 11, 9,
+ 11, 14, 10, 11, 10, 12, 9, 8, 11, 14, 9, 11, 11, 12, 9, 8,
+ 14, 16, 10, 11, 12, 13, 9, 7, 11, 15, 11, 12, 10, 12, 10, 9,
+ 13, 16, 11, 12, 11, 12, 10, 9, 16, 16, 13, 13, 12, 13, 10, 7,
+ 11, 15, 10, 12, 10, 12, 9, 8, 12, 15, 11, 12, 10, 12, 9, 8,
+ 14, 16, 12, 12, 11, 12, 9, 7, 11, 14, 10, 11, 10, 12, 8, 7,
+ 11, 14, 9, 10, 10, 11, 8, 6, 12, 15, 9, 9, 9, 10, 7, 4,
+ },
+ {
+ 0, 6, 3, 7, 3, 7, 6, 7, 5, 9, 6, 9, 7, 9, 8, 8,
+ 16, 16, 16, 16, 16, 16, 16, 11, 3, 8, 5, 8, 6, 8, 7, 7,
+ 7, 11, 7, 10, 8, 10, 8, 9, 16, 16, 16, 16, 16, 16, 14, 10,
+ 8, 16, 7, 11, 10, 16, 9, 9, 11, 16, 9, 14, 16, 16, 10, 9,
+ 16, 16, 16, 16, 16, 16, 16, 10, 3, 8, 5, 8, 5, 8, 7, 7,
+ 7, 11, 8, 10, 8, 10, 8, 9, 16, 16, 16, 16, 16, 16, 16, 11,
+ 6, 10, 7, 9, 7, 10, 8, 8, 8, 11, 8, 10, 8, 11, 8, 8,
+ 16, 16, 16, 16, 16, 16, 11, 10, 10, 16, 9, 13, 11, 16, 10, 9,
+ 11, 16, 9, 11, 16, 16, 10, 9, 16, 16, 11, 16, 16, 16, 11, 9,
+ 9, 16, 10, 11, 8, 11, 9, 9, 11, 16, 12, 16, 10, 16, 10, 10,
+ 16, 16, 16, 16, 16, 16, 16, 10, 10, 16, 11, 16, 10, 16, 10, 10,
+ 11, 16, 11, 16, 10, 16, 10, 9, 16, 16, 16, 16, 16, 16, 11, 9,
+ 16, 16, 16, 16, 16, 16, 11, 9, 16, 16, 16, 16, 16, 16, 11, 9,
+ 16, 16, 11, 16, 16, 16, 9, 7, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ {
+ 2, 8, 5, 9, 6, 9, 8, 8, 8, 12, 9, 11, 9, 11, 10, 10,
+ 16, 16, 14, 16, 14, 16, 14, 12, 5, 10, 6, 9, 8, 10, 8, 9,
+ 9, 12, 9, 11, 10, 12, 10, 10, 16, 16, 14, 15, 15, 16, 13, 12,
+ 10, 13, 9, 12, 11, 12, 10, 10, 12, 15, 11, 12, 12, 13, 11, 10,
+ 16, 16, 15, 14, 15, 16, 13, 12, 6, 10, 8, 10, 7, 10, 8, 9,
+ 9, 13, 10, 11, 10, 12, 10, 10, 16, 16, 14, 16, 14, 16, 13, 12,
+ 7, 11, 8, 11, 9, 11, 9, 9, 10, 13, 10, 11, 10, 12, 10, 9,
+ 16, 16, 14, 14, 14, 15, 12, 11, 11, 14, 11, 12, 11, 13, 10, 10,
+ 12, 15, 11, 13, 12, 13, 11, 10, 16, 16, 14, 16, 15, 15, 13, 11,
+ 10, 13, 11, 12, 10, 12, 10, 10, 12, 15, 12, 13, 11, 13, 11, 11,
+ 16, 16, 15, 16, 14, 15, 13, 11, 11, 14, 11, 13, 11, 12, 11, 10,
+ 12, 16, 12, 13, 12, 13, 11, 10, 16, 16, 15, 16, 13, 15, 12, 11,
+ 13, 15, 12, 13, 13, 14, 11, 11, 14, 16, 13, 13, 13, 14, 11, 11,
+ 16, 16, 15, 14, 15, 15, 12, 10, 3, 8, 6, 9, 7, 9, 8, 8,
+ 8, 12, 9, 11, 9, 11, 9, 9, 16, 16, 15, 15, 15, 16, 13, 12,
+ 6, 10, 7, 9, 8, 10, 8, 8, 9, 12, 9, 11, 10, 12, 10, 9,
+ 16, 16, 14, 14, 14, 15, 13, 11, 10, 13, 9, 11, 11, 12, 10, 10,
+ 12, 14, 11, 12, 12, 13, 11, 10, 16, 16, 14, 14, 15, 15, 13, 11,
+ 6, 10, 8, 10, 7, 10, 8, 9, 10, 12, 10, 11, 10, 11, 10, 9,
+ 16, 16, 14, 15, 14, 15, 13, 11, 8, 11, 8, 10, 9, 11, 9, 9,
+ 9, 13, 9, 11, 10, 11, 9, 9, 16, 16, 13, 14, 14, 14, 12, 10,
+ 11, 14, 10, 12, 11, 12, 10, 10, 12, 14, 11, 12, 12, 13, 10, 10,
+ 16, 16, 13, 14, 15, 15, 12, 10, 10, 13, 11, 12, 10, 12, 10, 10,
+ 12, 14, 12, 13, 11, 13, 11, 10, 16, 16, 15, 14, 14, 14, 12, 11,
+ 11, 14, 11, 12, 10, 12, 10, 10, 12, 15, 12, 12, 11, 13, 10, 10,
+ 16, 16, 14, 15, 13, 14, 12, 10, 13, 15, 12, 13, 12, 13, 11, 10,
+ 13, 16, 12, 13, 12, 13, 11, 10, 16, 16, 13, 14, 13, 15, 11, 9,
+ 6, 10, 8, 10, 8, 11, 9, 10, 11, 13, 11, 12, 11, 13, 11, 10,
+ 16, 16, 16, 16, 16, 16, 13, 12, 8, 11, 9, 11, 9, 11, 10, 10,
+ 11, 14, 11, 12, 11, 12, 11, 10, 16, 16, 15, 15, 16, 16, 13, 12,
+ 11, 14, 10, 12, 12, 13, 11, 10, 13, 16, 12, 13, 13, 14, 11, 11,
+ 16, 16, 15, 16, 16, 16, 13, 12, 8, 12, 9, 11, 9, 11, 10, 10,
+ 11, 14, 11, 12, 11, 12, 11, 10, 16, 16, 16, 16, 16, 16, 13, 12,
+ 9, 12, 10, 11, 10, 12, 10, 10, 11, 14, 11, 12, 11, 13, 10, 10,
+ 16, 16, 15, 14, 15, 15, 13, 11, 12, 14, 11, 13, 12, 13, 11, 10,
+ 12, 15, 11, 12, 13, 13, 11, 10, 16, 16, 14, 15, 16, 15, 13, 11,
+ 11, 15, 12, 13, 11, 13, 11, 10, 13, 16, 13, 14, 12, 14, 11, 11,
+ 16, 16, 16, 16, 15, 15, 13, 12, 12, 14, 12, 13, 11, 13, 11, 10,
+ 13, 15, 12, 13, 11, 13, 11, 10, 16, 16, 15, 15, 13, 15, 13, 11,
+ 13, 16, 13, 13, 13, 13, 12, 11, 13, 16, 13, 13, 13, 13, 11, 10,
+ 16, 16, 13, 15, 14, 14, 12, 9, 9, 13, 10, 12, 11, 13, 11, 11,
+ 13, 16, 13, 14, 13, 14, 12, 11, 16, 16, 16, 16, 16, 16, 14, 12,
+ 10, 14, 11, 13, 11, 13, 11, 10, 13, 16, 13, 13, 13, 14, 12, 11,
+ 16, 16, 16, 16, 16, 16, 14, 12, 11, 15, 11, 13, 12, 13, 11, 10,
+ 14, 16, 12, 13, 13, 14, 12, 10, 16, 16, 15, 16, 16, 16, 13, 11,
+ 10, 14, 11, 12, 11, 13, 11, 10, 13, 16, 12, 13, 12, 14, 12, 11,
+ 16, 16, 16, 16, 16, 16, 14, 12, 11, 14, 11, 12, 11, 13, 11, 10,
+ 13, 15, 12, 13, 12, 13, 11, 10, 16, 16, 15, 15, 16, 16, 13, 11,
+ 12, 15, 12, 13, 12, 13, 11, 10, 13, 16, 12, 13, 13, 13, 11, 10,
+ 16, 16, 14, 14, 16, 15, 13, 10, 12, 15, 12, 13, 12, 13, 11, 10,
+ 14, 16, 13, 14, 13, 14, 12, 11, 16, 16, 16, 16, 15, 16, 13, 11,
+ 12, 16, 12, 13, 12, 13, 11, 10, 13, 16, 13, 13, 12, 14, 11, 10,
+ 16, 16, 15, 16, 14, 15, 13, 10, 12, 15, 12, 14, 12, 13, 11, 10,
+ 13, 16, 12, 13, 12, 13, 11, 10, 16, 16, 13, 14, 13, 14, 11, 8,
+ },
+ },
+ {
+ {
+ 0, 11, 5, 11, 7, 13, 10, 12, 7, 13, 9, 13, 10, 14, 12, 13,
+ 16, 16, 15, 16, 16, 16, 15, 15, 4, 13, 6, 12, 10, 14, 11, 12,
+ 8, 14, 9, 13, 11, 15, 12, 13, 16, 16, 15, 16, 15, 16, 15, 14,
+ 9, 16, 9, 13, 13, 16, 12, 13, 12, 16, 10, 14, 14, 16, 13, 13,
+ 16, 16, 14, 16, 16, 16, 14, 14, 5, 13, 8, 13, 8, 13, 11, 12,
+ 9, 14, 10, 13, 10, 14, 12, 13, 16, 16, 15, 16, 15, 16, 14, 15,
+ 7, 14, 9, 13, 10, 14, 11, 13, 9, 15, 10, 13, 11, 14, 12, 13,
+ 16, 16, 14, 16, 15, 16, 14, 14, 11, 16, 11, 14, 13, 16, 12, 13,
+ 12, 16, 11, 14, 14, 16, 12, 13, 16, 16, 14, 15, 16, 16, 14, 13,
+ 10, 16, 12, 15, 10, 15, 12, 14, 12, 16, 13, 16, 11, 15, 13, 14,
+ 16, 16, 16, 16, 14, 16, 14, 14, 11, 16, 12, 15, 11, 16, 12, 13,
+ 12, 16, 13, 15, 12, 16, 12, 13, 16, 16, 16, 16, 14, 16, 14, 14,
+ 14, 16, 13, 15, 14, 16, 13, 13, 14, 16, 14, 15, 14, 16, 13, 13,
+ 16, 16, 15, 16, 15, 16, 13, 13, 2, 12, 6, 11, 7, 13, 10, 12,
+ 7, 13, 9, 12, 10, 14, 11, 12, 16, 16, 15, 16, 15, 16, 14, 15,
+ 5, 13, 6, 12, 9, 13, 10, 12, 8, 14, 9, 13, 11, 14, 11, 13,
+ 16, 16, 14, 16, 15, 16, 14, 14, 9, 16, 8, 13, 12, 16, 11, 13,
+ 11, 16, 10, 13, 13, 16, 12, 13, 16, 16, 13, 15, 16, 16, 13, 13,
+ 5, 13, 8, 12, 7, 13, 10, 12, 8, 14, 10, 13, 10, 14, 11, 13,
+ 16, 16, 14, 16, 15, 16, 14, 14, 7, 14, 8, 12, 9, 14, 11, 12,
+ 8, 14, 9, 13, 10, 14, 11, 12, 15, 16, 14, 15, 14, 16, 13, 14,
+ 11, 16, 10, 13, 13, 16, 12, 13, 11, 16, 10, 13, 13, 16, 12, 13,
+ 16, 16, 13, 15, 15, 16, 13, 13, 9, 16, 12, 15, 9, 14, 11, 13,
+ 11, 16, 13, 15, 11, 14, 12, 13, 16, 16, 15, 16, 14, 16, 14, 14,
+ 11, 16, 12, 14, 11, 15, 12, 13, 11, 16, 12, 14, 11, 15, 12, 13,
+ 16, 16, 15, 16, 14, 16, 13, 13, 13, 16, 13, 15, 13, 16, 12, 13,
+ 14, 16, 13, 15, 13, 16, 12, 12, 16, 16, 14, 15, 14, 16, 12, 12,
+ 4, 13, 7, 12, 8, 14, 11, 12, 9, 14, 10, 13, 11, 14, 12, 13,
+ 16, 16, 15, 16, 16, 16, 15, 15, 6, 14, 7, 12, 10, 14, 11, 12,
+ 9, 15, 10, 13, 11, 15, 12, 13, 16, 16, 15, 16, 16, 16, 14, 14,
+ 9, 16, 8, 13, 12, 16, 11, 13, 12, 16, 10, 14, 13, 16, 12, 13,
+ 16, 16, 14, 16, 16, 16, 14, 14, 6, 14, 8, 13, 8, 14, 11, 13,
+ 9, 15, 10, 13, 11, 14, 12, 13, 16, 16, 15, 16, 16, 16, 14, 14,
+ 7, 15, 9, 13, 10, 14, 11, 13, 9, 15, 10, 13, 11, 14, 11, 13,
+ 16, 16, 14, 16, 15, 16, 14, 14, 10, 16, 10, 13, 12, 16, 12, 13,
+ 11, 16, 10, 13, 13, 16, 12, 13, 16, 16, 13, 14, 15, 16, 13, 13,
+ 9, 16, 12, 14, 9, 14, 11, 13, 12, 16, 12, 15, 11, 15, 12, 13,
+ 16, 16, 16, 16, 15, 16, 14, 14, 10, 16, 12, 15, 11, 15, 12, 13,
+ 11, 16, 12, 14, 11, 15, 12, 13, 16, 16, 14, 16, 13, 16, 13, 13,
+ 13, 16, 13, 15, 13, 16, 12, 13, 13, 16, 12, 14, 13, 16, 12, 12,
+ 15, 16, 13, 14, 13, 16, 12, 12, 6, 14, 8, 13, 9, 14, 10, 12,
+ 10, 15, 10, 12, 11, 14, 11, 12, 16, 16, 14, 14, 14, 16, 13, 13,
+ 7, 15, 8, 13, 9, 14, 10, 12, 10, 15, 10, 13, 11, 14, 11, 12,
+ 16, 16, 14, 14, 14, 16, 13, 12, 9, 16, 8, 12, 11, 14, 10, 11,
+ 11, 16, 10, 13, 11, 14, 10, 11, 16, 16, 13, 14, 14, 16, 12, 11,
+ 7, 14, 9, 13, 9, 14, 10, 12, 10, 16, 10, 13, 11, 14, 11, 12,
+ 16, 16, 14, 14, 14, 15, 13, 12, 7, 14, 9, 13, 9, 14, 10, 12,
+ 9, 14, 10, 12, 10, 14, 11, 12, 15, 16, 13, 14, 14, 15, 12, 12,
+ 9, 15, 9, 12, 11, 14, 10, 11, 10, 15, 9, 12, 11, 14, 10, 11,
+ 14, 16, 11, 13, 13, 15, 11, 11, 9, 16, 10, 13, 9, 14, 10, 11,
+ 11, 16, 11, 13, 10, 14, 10, 11, 16, 16, 14, 15, 13, 15, 12, 12,
+ 9, 16, 10, 13, 9, 13, 10, 11, 10, 15, 10, 13, 10, 13, 10, 11,
+ 14, 16, 13, 14, 12, 14, 11, 11, 11, 16, 10, 13, 11, 13, 9, 10,
+ 11, 14, 10, 12, 10, 13, 9, 9, 13, 15, 10, 11, 11, 12, 9, 8,
+ },
+ {
+ 0, 10, 5, 10, 6, 11, 8, 10, 7, 12, 8, 11, 9, 12, 9, 10,
+ 14, 16, 13, 13, 13, 14, 12, 11, 5, 12, 6, 10, 8, 12, 9, 10,
+ 8, 13, 8, 11, 9, 12, 9, 10, 14, 16, 12, 13, 13, 14, 11, 11,
+ 9, 15, 8, 12, 11, 14, 10, 10, 11, 16, 9, 12, 12, 14, 10, 10,
+ 14, 16, 11, 12, 13, 14, 11, 10, 5, 12, 8, 11, 7, 11, 9, 10,
+ 8, 13, 9, 11, 9, 12, 9, 10, 14, 16, 12, 13, 13, 14, 11, 11,
+ 7, 13, 8, 11, 9, 12, 9, 10, 9, 13, 9, 11, 9, 12, 9, 10,
+ 14, 16, 12, 13, 12, 13, 11, 10, 11, 15, 10, 12, 12, 14, 10, 10,
+ 12, 16, 10, 12, 12, 14, 10, 10, 14, 16, 11, 12, 13, 14, 10, 9,
+ 10, 15, 11, 13, 9, 13, 10, 10, 11, 15, 12, 13, 10, 12, 10, 10,
+ 14, 16, 13, 14, 12, 13, 11, 10, 11, 16, 11, 13, 10, 13, 10, 10,
+ 11, 16, 11, 13, 10, 13, 10, 10, 14, 16, 13, 14, 12, 13, 11, 9,
+ 13, 16, 12, 13, 12, 14, 11, 10, 14, 16, 12, 13, 12, 14, 10, 9,
+ 16, 16, 12, 13, 13, 13, 10, 8, 3, 11, 6, 10, 7, 11, 9, 10,
+ 8, 12, 8, 11, 9, 12, 9, 10, 14, 16, 13, 13, 13, 14, 11, 11,
+ 5, 12, 6, 10, 8, 12, 9, 10, 8, 13, 8, 11, 9, 12, 9, 10,
+ 14, 16, 12, 13, 13, 14, 11, 10, 9, 14, 8, 11, 11, 14, 10, 10,
+ 11, 15, 9, 11, 12, 14, 10, 10, 14, 16, 11, 12, 13, 14, 11, 9,
+ 6, 12, 8, 11, 7, 11, 9, 10, 8, 13, 9, 11, 9, 12, 9, 10,
+ 14, 16, 12, 13, 13, 13, 11, 10, 7, 13, 8, 11, 8, 12, 9, 10,
+ 9, 13, 9, 11, 9, 12, 9, 9, 14, 16, 12, 13, 12, 13, 11, 10,
+ 10, 15, 9, 12, 11, 14, 10, 10, 11, 15, 9, 11, 11, 13, 10, 9,
+ 14, 16, 11, 12, 13, 14, 10, 9, 9, 15, 11, 13, 9, 12, 10, 10,
+ 11, 15, 11, 13, 9, 12, 10, 10, 14, 16, 13, 14, 12, 13, 11, 10,
+ 10, 15, 11, 13, 10, 13, 10, 10, 11, 15, 11, 13, 10, 12, 10, 10,
+ 14, 16, 12, 13, 11, 12, 10, 9, 13, 16, 12, 13, 12, 14, 10, 9,
+ 13, 16, 11, 12, 12, 13, 10, 9, 14, 16, 11, 12, 12, 13, 9, 8,
+ 7, 13, 8, 12, 9, 13, 10, 11, 10, 14, 10, 12, 11, 13, 11, 11,
+ 16, 16, 13, 14, 14, 14, 12, 11, 8, 14, 8, 12, 9, 13, 10, 10,
+ 10, 14, 10, 12, 11, 13, 10, 10, 16, 16, 13, 13, 14, 14, 12, 11,
+ 10, 15, 9, 12, 11, 14, 10, 10, 12, 16, 10, 12, 12, 14, 10, 10,
+ 16, 16, 12, 13, 14, 15, 11, 10, 8, 14, 9, 12, 9, 13, 10, 11,
+ 10, 15, 10, 12, 10, 13, 10, 11, 16, 16, 13, 14, 14, 14, 12, 11,
+ 8, 14, 9, 12, 9, 13, 10, 10, 10, 14, 10, 12, 10, 13, 10, 10,
+ 15, 16, 13, 13, 13, 14, 11, 10, 10, 15, 10, 12, 12, 14, 10, 10,
+ 11, 16, 9, 12, 12, 14, 10, 10, 14, 16, 11, 12, 13, 14, 10, 9,
+ 11, 16, 11, 13, 10, 13, 10, 10, 12, 16, 12, 13, 10, 13, 11, 10,
+ 15, 16, 14, 14, 12, 13, 11, 10, 11, 16, 11, 13, 10, 13, 10, 10,
+ 11, 16, 12, 13, 10, 12, 10, 10, 14, 16, 13, 14, 11, 13, 11, 9,
+ 13, 16, 12, 13, 12, 14, 10, 10, 12, 16, 11, 12, 12, 13, 10, 9,
+ 14, 16, 11, 12, 11, 12, 9, 8, 10, 16, 10, 13, 11, 14, 11, 11,
+ 12, 16, 11, 13, 12, 14, 11, 11, 16, 16, 14, 13, 14, 14, 12, 10,
+ 10, 15, 10, 13, 10, 13, 10, 11, 12, 16, 11, 13, 11, 13, 11, 10,
+ 16, 16, 14, 13, 14, 14, 12, 10, 11, 15, 9, 12, 11, 13, 10, 9,
+ 12, 16, 10, 12, 12, 13, 10, 9, 16, 16, 12, 12, 13, 14, 10, 8,
+ 10, 16, 10, 13, 10, 14, 11, 11, 12, 16, 11, 13, 12, 14, 11, 10,
+ 16, 16, 14, 13, 14, 14, 12, 10, 9, 15, 9, 12, 10, 13, 10, 10,
+ 11, 16, 10, 12, 11, 13, 10, 10, 16, 16, 13, 13, 13, 14, 11, 9,
+ 10, 15, 9, 11, 11, 13, 9, 9, 11, 15, 9, 11, 11, 13, 9, 8,
+ 14, 16, 10, 11, 13, 13, 10, 8, 11, 16, 11, 13, 10, 13, 10, 9,
+ 13, 16, 11, 13, 11, 13, 10, 9, 16, 16, 13, 13, 13, 13, 10, 8,
+ 11, 16, 10, 12, 10, 13, 10, 9, 11, 16, 11, 12, 10, 12, 9, 9,
+ 15, 16, 12, 13, 11, 12, 10, 8, 11, 16, 10, 12, 11, 12, 9, 8,
+ 11, 15, 9, 11, 10, 12, 9, 7, 13, 15, 9, 9, 10, 10, 7, 5,
+ },
+ {
+ 0, 7, 3, 8, 4, 9, 7, 8, 5, 10, 7, 10, 8, 11, 8, 9,
+ 16, 16, 16, 16, 16, 16, 11, 10, 2, 10, 4, 9, 7, 10, 7, 8,
+ 7, 16, 7, 10, 9, 16, 8, 9, 16, 16, 16, 16, 16, 16, 11, 10,
+ 8, 16, 7, 10, 10, 16, 9, 8, 10, 16, 9, 11, 16, 16, 9, 9,
+ 16, 16, 16, 16, 16, 16, 11, 9, 3, 10, 6, 9, 6, 11, 8, 8,
+ 7, 16, 8, 10, 9, 16, 9, 9, 16, 16, 16, 16, 16, 16, 11, 10,
+ 5, 16, 7, 10, 8, 11, 8, 8, 8, 16, 8, 10, 9, 16, 8, 8,
+ 16, 16, 11, 16, 16, 16, 10, 9, 9, 16, 9, 11, 11, 16, 9, 9,
+ 11, 16, 9, 11, 11, 16, 9, 8, 16, 16, 10, 16, 16, 16, 10, 9,
+ 8, 16, 10, 16, 8, 16, 10, 9, 12, 16, 11, 16, 10, 16, 10, 9,
+ 16, 16, 16, 16, 16, 16, 12, 10, 10, 16, 11, 16, 10, 16, 10, 9,
+ 11, 16, 11, 16, 10, 16, 10, 9, 16, 16, 16, 16, 16, 16, 11, 9,
+ 16, 16, 16, 16, 16, 16, 10, 9, 16, 16, 11, 16, 16, 16, 10, 9,
+ 16, 16, 10, 11, 11, 16, 9, 7, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ {
+ 2, 9, 6, 9, 6, 10, 8, 9, 8, 12, 9, 11, 9, 12, 10, 10,
+ 16, 16, 14, 14, 15, 15, 13, 12, 5, 11, 7, 10, 8, 11, 9, 9,
+ 9, 13, 9, 11, 10, 12, 10, 10, 15, 16, 14, 14, 14, 16, 12, 11,
+ 10, 14, 9, 11, 11, 13, 10, 10, 12, 15, 10, 12, 12, 13, 11, 10,
+ 16, 16, 14, 15, 14, 15, 12, 11, 5, 11, 8, 10, 7, 11, 9, 9,
+ 9, 13, 9, 12, 9, 12, 10, 10, 15, 16, 13, 14, 13, 16, 12, 11,
+ 7, 12, 8, 11, 9, 11, 9, 10, 9, 13, 9, 11, 10, 12, 9, 9,
+ 16, 16, 13, 14, 13, 14, 11, 11, 10, 14, 10, 12, 11, 13, 10, 10,
+ 11, 16, 11, 12, 12, 13, 10, 10, 16, 16, 13, 14, 13, 14, 12, 11,
+ 10, 14, 11, 13, 10, 13, 10, 11, 12, 16, 12, 13, 10, 12, 11, 11,
+ 16, 16, 14, 15, 13, 14, 12, 11, 11, 15, 11, 13, 11, 13, 10, 10,
+ 12, 15, 12, 13, 11, 13, 11, 10, 15, 16, 14, 15, 13, 15, 12, 11,
+ 12, 16, 12, 13, 12, 14, 11, 11, 13, 16, 12, 13, 12, 14, 11, 10,
+ 16, 16, 13, 14, 13, 14, 11, 10, 3, 10, 6, 9, 7, 10, 8, 9,
+ 9, 12, 9, 11, 10, 12, 9, 10, 15, 16, 14, 14, 14, 14, 12, 11,
+ 6, 11, 7, 10, 8, 10, 9, 9, 9, 13, 9, 11, 10, 12, 9, 9,
+ 15, 16, 13, 14, 14, 15, 12, 11, 10, 14, 9, 11, 11, 12, 10, 10,
+ 12, 14, 10, 12, 11, 13, 10, 10, 15, 16, 13, 14, 14, 16, 12, 11,
+ 6, 11, 8, 10, 7, 11, 9, 9, 9, 13, 9, 11, 9, 12, 9, 9,
+ 15, 16, 14, 14, 13, 14, 12, 11, 7, 11, 8, 11, 8, 11, 9, 9,
+ 9, 12, 9, 11, 9, 12, 9, 9, 15, 16, 13, 13, 13, 14, 11, 10,
+ 10, 13, 10, 12, 11, 13, 10, 10, 11, 15, 10, 12, 11, 13, 10, 10,
+ 14, 16, 12, 13, 13, 14, 11, 10, 10, 14, 11, 12, 9, 12, 10, 10,
+ 11, 15, 11, 13, 10, 13, 10, 10, 15, 16, 14, 14, 13, 14, 12, 11,
+ 10, 14, 10, 12, 10, 12, 10, 10, 11, 15, 11, 12, 10, 12, 10, 10,
+ 15, 16, 13, 14, 12, 14, 11, 10, 12, 16, 11, 13, 12, 14, 11, 10,
+ 12, 16, 12, 13, 11, 13, 10, 10, 15, 16, 12, 14, 12, 14, 11, 9,
+ 7, 12, 9, 11, 9, 12, 10, 10, 11, 14, 11, 12, 11, 13, 11, 11,
+ 16, 16, 15, 16, 15, 16, 13, 12, 8, 12, 9, 11, 9, 12, 10, 10,
+ 11, 14, 11, 12, 11, 13, 10, 10, 16, 16, 14, 15, 15, 16, 13, 12,
+ 11, 14, 10, 12, 11, 13, 10, 10, 12, 16, 11, 13, 12, 14, 11, 10,
+ 16, 16, 14, 15, 14, 16, 12, 11, 8, 13, 9, 11, 9, 12, 10, 10,
+ 11, 14, 11, 12, 11, 13, 10, 10, 16, 16, 15, 15, 14, 15, 13, 12,
+ 9, 13, 9, 12, 9, 12, 10, 10, 11, 14, 10, 12, 10, 12, 10, 10,
+ 16, 16, 14, 15, 14, 14, 12, 11, 11, 15, 11, 12, 11, 13, 11, 10,
+ 12, 16, 11, 12, 12, 13, 11, 10, 16, 16, 13, 15, 14, 15, 11, 11,
+ 11, 16, 11, 13, 10, 13, 11, 11, 12, 16, 12, 14, 11, 13, 11, 11,
+ 16, 16, 14, 15, 14, 15, 13, 11, 11, 16, 11, 13, 11, 13, 11, 10,
+ 12, 16, 12, 13, 10, 13, 11, 10, 16, 16, 14, 14, 12, 14, 12, 10,
+ 12, 16, 12, 14, 12, 14, 11, 11, 13, 16, 12, 14, 12, 14, 11, 10,
+ 15, 16, 12, 14, 12, 14, 11, 9, 9, 14, 11, 13, 11, 13, 11, 11,
+ 13, 16, 12, 14, 13, 14, 12, 11, 16, 16, 16, 16, 16, 16, 14, 12,
+ 9, 14, 10, 12, 10, 13, 11, 11, 12, 16, 12, 13, 13, 14, 11, 11,
+ 16, 16, 16, 16, 16, 14, 13, 12, 10, 15, 11, 13, 11, 14, 11, 10,
+ 13, 16, 12, 13, 12, 15, 11, 10, 16, 16, 14, 16, 15, 16, 13, 11,
+ 10, 14, 10, 13, 11, 14, 11, 11, 13, 16, 12, 13, 12, 14, 11, 11,
+ 16, 16, 16, 16, 15, 16, 13, 12, 10, 14, 10, 12, 10, 13, 10, 11,
+ 12, 15, 12, 13, 12, 13, 11, 10, 16, 16, 14, 14, 15, 15, 13, 11,
+ 11, 16, 11, 13, 11, 14, 11, 10, 12, 16, 11, 13, 12, 14, 11, 10,
+ 16, 16, 13, 14, 14, 15, 12, 10, 11, 16, 12, 13, 11, 14, 11, 10,
+ 13, 16, 13, 14, 12, 14, 11, 11, 16, 16, 15, 16, 15, 15, 12, 11,
+ 11, 16, 12, 13, 11, 14, 11, 10, 13, 16, 12, 13, 11, 14, 11, 10,
+ 16, 16, 14, 15, 13, 14, 12, 10, 12, 16, 12, 14, 12, 14, 10, 10,
+ 12, 16, 11, 13, 11, 14, 10, 10, 14, 16, 11, 13, 12, 13, 10, 8,
+ },
+ },
+ {
+ {
+ 0, 12, 6, 13, 7, 14, 11, 14, 8, 14, 10, 14, 11, 15, 13, 15,
+ 16, 16, 16, 16, 16, 16, 16, 16, 5, 14, 7, 13, 10, 16, 12, 14,
+ 9, 16, 10, 14, 12, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 9, 14, 14, 16, 13, 16, 12, 16, 11, 16, 16, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 5, 14, 9, 14, 8, 14, 12, 14,
+ 9, 16, 11, 14, 11, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 10, 14, 11, 16, 12, 14, 10, 16, 11, 15, 12, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 12, 16, 14, 16, 14, 16,
+ 13, 16, 12, 16, 15, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 13, 16, 11, 16, 14, 16, 13, 16, 14, 16, 13, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 14, 16, 13, 16, 14, 16,
+ 13, 16, 14, 16, 13, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 15, 16, 16, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 1, 12, 6, 12, 8, 14, 11, 13,
+ 8, 14, 10, 13, 11, 14, 13, 14, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 14, 7, 13, 10, 14, 11, 14, 9, 16, 10, 14, 12, 16, 13, 15,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 9, 14, 13, 16, 13, 14,
+ 12, 16, 11, 15, 14, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 14, 9, 13, 8, 14, 12, 14, 9, 16, 11, 14, 11, 16, 13, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 7, 15, 9, 14, 10, 16, 12, 14,
+ 9, 16, 10, 14, 11, 16, 12, 14, 16, 16, 16, 16, 16, 16, 15, 16,
+ 11, 16, 11, 15, 14, 16, 13, 15, 12, 16, 11, 15, 14, 16, 13, 14,
+ 16, 16, 14, 16, 16, 16, 14, 16, 10, 16, 13, 16, 10, 16, 13, 16,
+ 12, 16, 14, 16, 12, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 13, 16, 12, 16, 13, 16, 12, 16, 13, 16, 12, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 15, 16, 14, 16, 14, 16, 16, 16, 14, 16,
+ 15, 16, 14, 16, 14, 16, 14, 16, 16, 16, 16, 16, 16, 16, 14, 14,
+ 4, 14, 8, 13, 9, 16, 12, 14, 9, 16, 11, 14, 11, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 6, 15, 8, 13, 10, 16, 12, 14,
+ 10, 16, 11, 14, 12, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 16, 9, 14, 13, 16, 13, 15, 12, 16, 11, 16, 14, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 6, 16, 9, 14, 9, 16, 12, 14,
+ 10, 16, 11, 15, 12, 16, 13, 15, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 10, 14, 11, 16, 12, 14, 10, 16, 11, 14, 12, 16, 13, 15,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 11, 15, 14, 16, 13, 15,
+ 12, 16, 11, 15, 14, 16, 13, 14, 16, 16, 14, 16, 16, 16, 14, 16,
+ 10, 16, 13, 16, 10, 16, 13, 15, 13, 16, 14, 16, 12, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 13, 16, 12, 16, 13, 16,
+ 12, 16, 13, 16, 11, 16, 13, 16, 16, 16, 16, 16, 15, 16, 15, 16,
+ 14, 16, 14, 16, 15, 16, 14, 16, 14, 16, 14, 16, 14, 16, 14, 14,
+ 16, 16, 14, 16, 15, 16, 14, 14, 6, 16, 9, 14, 10, 16, 12, 14,
+ 10, 16, 11, 13, 12, 16, 13, 14, 16, 16, 16, 16, 16, 16, 14, 14,
+ 7, 16, 9, 14, 10, 16, 12, 13, 11, 16, 11, 14, 12, 16, 12, 14,
+ 16, 16, 16, 16, 16, 16, 14, 14, 9, 16, 9, 14, 12, 16, 11, 13,
+ 12, 16, 11, 14, 12, 16, 12, 13, 16, 16, 14, 16, 16, 16, 13, 14,
+ 7, 16, 10, 14, 10, 16, 12, 14, 11, 16, 11, 14, 11, 16, 12, 14,
+ 16, 16, 16, 16, 16, 16, 14, 14, 8, 16, 10, 14, 10, 16, 11, 13,
+ 10, 16, 11, 13, 11, 16, 12, 13, 16, 16, 14, 16, 16, 16, 14, 14,
+ 10, 16, 9, 13, 12, 16, 11, 13, 11, 16, 10, 13, 12, 16, 11, 12,
+ 16, 16, 13, 15, 15, 16, 13, 13, 9, 16, 11, 14, 9, 16, 11, 13,
+ 12, 16, 12, 16, 12, 16, 12, 13, 16, 16, 16, 16, 15, 16, 14, 14,
+ 10, 16, 11, 14, 10, 16, 11, 13, 11, 16, 12, 14, 10, 15, 11, 13,
+ 16, 16, 15, 16, 13, 16, 13, 13, 12, 16, 11, 13, 12, 16, 11, 12,
+ 12, 16, 11, 13, 11, 14, 11, 11, 13, 16, 12, 13, 12, 14, 11, 11,
+ },
+ {
+ 0, 10, 5, 10, 6, 11, 8, 10, 7, 12, 8, 11, 8, 12, 9, 10,
+ 14, 16, 13, 13, 13, 14, 12, 11, 4, 12, 6, 10, 8, 12, 9, 10,
+ 8, 13, 8, 11, 9, 12, 9, 10, 14, 16, 12, 13, 13, 14, 11, 11,
+ 9, 15, 8, 12, 12, 14, 10, 11, 11, 16, 9, 12, 12, 14, 10, 10,
+ 14, 16, 11, 13, 13, 15, 11, 11, 4, 12, 8, 11, 6, 11, 9, 10,
+ 8, 13, 9, 11, 8, 12, 9, 10, 14, 16, 12, 13, 13, 14, 11, 11,
+ 7, 13, 8, 11, 9, 12, 9, 10, 8, 13, 9, 11, 9, 12, 9, 10,
+ 14, 16, 12, 13, 12, 13, 11, 10, 11, 16, 10, 12, 12, 14, 10, 11,
+ 11, 16, 10, 12, 12, 14, 10, 10, 15, 16, 11, 13, 13, 14, 11, 10,
+ 10, 16, 11, 13, 9, 13, 10, 11, 11, 15, 12, 13, 10, 12, 10, 11,
+ 15, 16, 13, 14, 12, 13, 11, 11, 11, 16, 11, 13, 10, 13, 10, 11,
+ 12, 16, 11, 13, 10, 13, 10, 10, 15, 16, 13, 14, 12, 13, 11, 10,
+ 13, 16, 12, 13, 13, 14, 11, 11, 14, 16, 12, 13, 12, 14, 11, 10,
+ 16, 16, 13, 13, 13, 14, 11, 9, 3, 11, 6, 10, 6, 11, 9, 10,
+ 8, 12, 8, 11, 9, 12, 9, 10, 14, 16, 13, 13, 13, 13, 12, 11,
+ 5, 12, 6, 10, 8, 12, 9, 10, 8, 13, 8, 11, 9, 12, 9, 10,
+ 14, 16, 12, 13, 13, 14, 11, 11, 9, 15, 8, 11, 11, 14, 10, 10,
+ 11, 15, 9, 11, 12, 14, 10, 10, 14, 16, 11, 12, 13, 14, 11, 10,
+ 5, 12, 8, 11, 7, 11, 9, 10, 8, 13, 9, 11, 9, 12, 9, 10,
+ 14, 16, 12, 13, 13, 13, 11, 11, 7, 13, 8, 11, 8, 12, 9, 10,
+ 8, 13, 8, 11, 9, 12, 9, 9, 13, 16, 11, 13, 12, 13, 11, 10,
+ 10, 15, 9, 12, 11, 14, 10, 10, 11, 15, 9, 11, 11, 13, 10, 10,
+ 14, 16, 11, 12, 13, 14, 10, 10, 9, 16, 11, 13, 9, 12, 10, 10,
+ 11, 16, 11, 13, 9, 12, 10, 10, 15, 16, 13, 14, 12, 13, 11, 10,
+ 11, 16, 11, 13, 10, 13, 10, 10, 11, 15, 11, 13, 9, 12, 10, 10,
+ 14, 16, 12, 13, 11, 13, 10, 10, 13, 16, 12, 13, 12, 14, 11, 10,
+ 13, 16, 11, 13, 12, 13, 10, 10, 14, 16, 11, 12, 12, 13, 10, 9,
+ 7, 14, 8, 12, 9, 13, 10, 11, 10, 14, 10, 12, 11, 13, 11, 11,
+ 16, 16, 14, 14, 14, 14, 12, 12, 7, 14, 8, 12, 9, 13, 10, 11,
+ 10, 14, 10, 12, 11, 13, 10, 11, 16, 16, 13, 13, 14, 14, 12, 11,
+ 10, 15, 9, 12, 12, 14, 10, 10, 12, 16, 9, 12, 12, 14, 10, 10,
+ 16, 16, 12, 13, 14, 15, 12, 11, 8, 14, 9, 12, 9, 13, 10, 11,
+ 10, 15, 10, 12, 10, 13, 10, 11, 16, 16, 14, 14, 14, 14, 12, 11,
+ 8, 14, 9, 12, 9, 13, 10, 11, 10, 14, 10, 12, 10, 13, 10, 10,
+ 15, 16, 13, 13, 13, 14, 12, 11, 10, 15, 10, 12, 12, 14, 10, 10,
+ 11, 16, 9, 12, 12, 14, 10, 10, 14, 16, 11, 12, 13, 14, 11, 10,
+ 11, 16, 11, 14, 9, 13, 10, 11, 12, 16, 12, 14, 10, 13, 11, 11,
+ 16, 16, 14, 15, 13, 14, 12, 11, 11, 16, 12, 14, 10, 13, 11, 11,
+ 11, 16, 11, 13, 10, 13, 10, 10, 15, 16, 13, 14, 12, 13, 11, 10,
+ 13, 16, 12, 14, 13, 14, 11, 10, 12, 16, 11, 13, 12, 13, 10, 10,
+ 14, 16, 11, 12, 11, 13, 10, 9, 10, 16, 10, 13, 11, 14, 11, 11,
+ 12, 16, 11, 13, 12, 14, 11, 11, 16, 16, 14, 13, 14, 15, 12, 11,
+ 10, 16, 10, 13, 10, 14, 11, 11, 12, 16, 11, 13, 11, 14, 11, 11,
+ 16, 16, 14, 13, 14, 14, 12, 11, 11, 15, 9, 12, 11, 14, 10, 10,
+ 13, 16, 10, 12, 12, 14, 10, 10, 16, 16, 13, 13, 14, 14, 11, 10,
+ 10, 16, 10, 13, 11, 14, 11, 11, 12, 16, 11, 13, 12, 14, 11, 11,
+ 16, 16, 14, 14, 14, 14, 12, 11, 9, 16, 10, 13, 10, 14, 11, 11,
+ 11, 15, 11, 12, 11, 13, 11, 11, 16, 16, 13, 13, 14, 14, 12, 10,
+ 10, 15, 9, 12, 11, 14, 10, 10, 11, 16, 9, 11, 11, 13, 10, 9,
+ 15, 16, 11, 12, 13, 14, 10, 9, 11, 16, 11, 13, 10, 13, 10, 10,
+ 13, 16, 12, 13, 11, 13, 10, 10, 16, 16, 14, 14, 13, 13, 11, 10,
+ 11, 16, 11, 13, 10, 13, 10, 10, 12, 16, 11, 13, 10, 13, 10, 10,
+ 15, 16, 13, 13, 12, 13, 11, 9, 11, 16, 11, 12, 11, 13, 10, 9,
+ 11, 15, 10, 11, 11, 12, 9, 8, 13, 15, 10, 10, 10, 11, 8, 7,
+ },
+ {
+ 0, 9, 3, 8, 5, 9, 7, 8, 5, 11, 6, 9, 8, 11, 8, 9,
+ 16, 16, 16, 16, 16, 16, 11, 10, 2, 10, 4, 9, 7, 10, 7, 8,
+ 7, 16, 7, 10, 9, 11, 8, 9, 16, 16, 11, 16, 16, 16, 11, 10,
+ 7, 16, 7, 10, 10, 16, 8, 9, 10, 16, 8, 10, 11, 16, 9, 9,
+ 16, 16, 16, 16, 16, 16, 11, 10, 3, 11, 6, 9, 6, 11, 8, 8,
+ 7, 16, 8, 10, 8, 11, 8, 9, 16, 16, 15, 16, 16, 16, 11, 10,
+ 5, 11, 7, 9, 8, 11, 8, 8, 7, 16, 7, 10, 8, 11, 8, 8,
+ 16, 16, 11, 16, 16, 16, 10, 9, 9, 16, 8, 11, 11, 16, 9, 9,
+ 10, 16, 8, 11, 11, 16, 9, 9, 16, 16, 10, 16, 16, 16, 10, 9,
+ 8, 16, 10, 11, 8, 16, 9, 9, 11, 16, 11, 16, 10, 16, 10, 9,
+ 16, 16, 16, 16, 16, 16, 11, 10, 9, 16, 10, 16, 10, 16, 9, 9,
+ 10, 16, 10, 16, 9, 16, 9, 9, 16, 16, 16, 16, 15, 16, 10, 9,
+ 16, 16, 11, 16, 16, 16, 10, 10, 13, 16, 11, 16, 11, 16, 10, 9,
+ 16, 16, 10, 11, 11, 16, 9, 8, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ {
+ 1, 10, 6, 10, 7, 11, 9, 10, 8, 12, 9, 12, 9, 12, 10, 10,
+ 16, 16, 14, 16, 14, 15, 13, 12, 5, 12, 7, 10, 9, 11, 9, 10,
+ 9, 13, 9, 12, 10, 13, 10, 10, 16, 16, 14, 15, 14, 14, 13, 12,
+ 10, 15, 10, 12, 12, 14, 11, 11, 12, 16, 11, 13, 12, 14, 11, 11,
+ 16, 16, 14, 16, 15, 16, 13, 12, 6, 12, 8, 11, 8, 11, 10, 10,
+ 9, 13, 10, 12, 10, 12, 10, 10, 15, 16, 13, 15, 13, 14, 12, 12,
+ 7, 13, 9, 11, 9, 12, 10, 10, 9, 14, 10, 12, 10, 13, 10, 10,
+ 15, 16, 13, 15, 13, 14, 12, 11, 11, 15, 11, 13, 12, 14, 11, 11,
+ 12, 16, 11, 13, 12, 14, 11, 11, 16, 16, 13, 15, 14, 16, 12, 11,
+ 10, 16, 12, 14, 10, 13, 11, 11, 12, 16, 12, 14, 11, 13, 11, 11,
+ 16, 16, 15, 16, 14, 16, 13, 12, 11, 16, 12, 14, 11, 14, 11, 11,
+ 12, 16, 12, 14, 11, 14, 11, 11, 16, 16, 14, 15, 13, 15, 12, 12,
+ 13, 16, 13, 15, 13, 15, 12, 12, 13, 16, 12, 15, 12, 15, 11, 11,
+ 16, 16, 13, 16, 13, 15, 12, 11, 4, 11, 7, 10, 7, 11, 9, 10,
+ 9, 13, 9, 12, 10, 12, 10, 10, 15, 16, 14, 15, 14, 15, 13, 12,
+ 6, 12, 7, 11, 9, 11, 9, 10, 9, 13, 9, 12, 10, 12, 10, 10,
+ 15, 16, 14, 15, 14, 15, 12, 12, 10, 15, 9, 12, 12, 13, 11, 11,
+ 12, 15, 11, 13, 12, 14, 11, 11, 16, 16, 13, 15, 14, 16, 12, 12,
+ 6, 12, 8, 11, 8, 11, 9, 10, 9, 14, 10, 12, 10, 12, 10, 10,
+ 14, 16, 13, 14, 13, 14, 12, 12, 8, 13, 9, 11, 9, 12, 10, 10,
+ 9, 13, 9, 12, 9, 12, 9, 10, 14, 16, 13, 14, 13, 14, 12, 11,
+ 11, 15, 11, 13, 11, 14, 11, 11, 12, 16, 10, 13, 12, 13, 11, 10,
+ 15, 16, 12, 15, 13, 16, 12, 11, 10, 15, 11, 13, 10, 13, 11, 11,
+ 12, 16, 12, 14, 11, 13, 11, 11, 16, 16, 14, 15, 13, 16, 13, 12,
+ 11, 16, 11, 14, 11, 13, 11, 11, 12, 16, 12, 14, 10, 13, 11, 11,
+ 15, 16, 13, 16, 12, 14, 12, 11, 12, 16, 12, 14, 12, 15, 11, 11,
+ 13, 16, 12, 14, 12, 14, 11, 11, 15, 16, 13, 15, 13, 15, 11, 10,
+ 7, 13, 9, 12, 10, 13, 11, 11, 11, 15, 12, 13, 12, 13, 11, 11,
+ 16, 16, 15, 16, 16, 16, 14, 13, 8, 13, 9, 12, 10, 13, 11, 11,
+ 12, 15, 11, 13, 12, 13, 11, 11, 16, 16, 14, 15, 15, 16, 13, 12,
+ 11, 16, 11, 13, 12, 14, 11, 11, 13, 16, 12, 14, 13, 15, 12, 11,
+ 16, 16, 14, 16, 15, 16, 13, 12, 9, 15, 10, 13, 10, 13, 11, 11,
+ 12, 15, 11, 13, 11, 13, 11, 11, 16, 16, 14, 16, 16, 16, 13, 12,
+ 9, 14, 10, 13, 10, 13, 11, 11, 11, 14, 11, 13, 11, 13, 11, 11,
+ 16, 16, 14, 16, 14, 16, 12, 12, 11, 16, 11, 14, 12, 14, 12, 11,
+ 12, 16, 11, 13, 13, 14, 11, 11, 16, 16, 13, 15, 14, 16, 12, 11,
+ 12, 16, 12, 14, 11, 14, 11, 11, 13, 16, 13, 14, 12, 15, 12, 11,
+ 16, 16, 16, 16, 14, 16, 13, 12, 12, 16, 12, 14, 11, 14, 12, 11,
+ 12, 16, 12, 15, 11, 14, 11, 11, 16, 16, 14, 16, 13, 15, 12, 12,
+ 13, 16, 13, 16, 13, 16, 12, 12, 13, 16, 12, 15, 12, 16, 11, 11,
+ 14, 16, 12, 15, 12, 15, 11, 10, 9, 16, 11, 14, 12, 14, 12, 12,
+ 13, 16, 13, 15, 14, 16, 12, 12, 16, 16, 16, 16, 16, 16, 14, 13,
+ 10, 16, 11, 14, 12, 14, 12, 12, 13, 16, 13, 14, 13, 16, 12, 12,
+ 16, 16, 15, 16, 16, 16, 13, 12, 11, 16, 11, 14, 12, 15, 12, 11,
+ 13, 16, 12, 14, 13, 16, 12, 11, 16, 16, 14, 16, 16, 16, 13, 12,
+ 11, 16, 11, 14, 12, 14, 12, 12, 13, 16, 12, 15, 13, 16, 12, 12,
+ 16, 16, 16, 16, 16, 16, 14, 13, 10, 16, 11, 14, 11, 15, 11, 11,
+ 13, 16, 12, 14, 12, 14, 12, 11, 16, 16, 14, 16, 16, 16, 13, 12,
+ 11, 16, 11, 15, 12, 15, 12, 11, 13, 16, 11, 14, 13, 15, 12, 11,
+ 16, 16, 13, 16, 14, 16, 12, 11, 12, 16, 12, 15, 11, 15, 12, 11,
+ 14, 16, 13, 15, 12, 16, 12, 11, 16, 16, 15, 16, 14, 16, 13, 12,
+ 11, 16, 12, 15, 11, 15, 11, 11, 13, 16, 13, 16, 11, 15, 11, 11,
+ 16, 16, 14, 16, 13, 15, 12, 11, 12, 16, 12, 15, 12, 16, 11, 11,
+ 12, 16, 11, 15, 12, 14, 11, 11, 13, 16, 12, 13, 11, 13, 10, 9,
+ },
+ },
+ {
+ {
+ 0, 13, 6, 13, 8, 14, 12, 16, 8, 16, 11, 16, 12, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 4, 16, 7, 14, 11, 16, 13, 16,
+ 9, 16, 11, 16, 12, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 10, 16, 16, 16, 16, 16, 13, 16, 12, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 5, 16, 10, 16, 8, 16, 13, 16,
+ 10, 16, 12, 16, 12, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 10, 16, 11, 16, 13, 16, 10, 16, 12, 16, 12, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 12, 16, 16, 16, 16, 16,
+ 14, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 16, 16, 11, 16, 16, 16, 13, 16, 16, 16, 13, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 16, 13, 16, 16, 16,
+ 14, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 1, 13, 7, 13, 8, 16, 12, 16,
+ 8, 16, 10, 16, 11, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 4, 16, 7, 14, 10, 16, 12, 16, 9, 16, 10, 16, 12, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 10, 16, 9, 16, 14, 16, 14, 16,
+ 13, 16, 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 16, 9, 16, 8, 16, 12, 16, 9, 16, 11, 16, 11, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 7, 16, 10, 16, 11, 16, 13, 16,
+ 9, 16, 11, 16, 11, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 12, 16, 16, 16, 14, 16, 13, 16, 12, 16, 16, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 10, 16, 13, 16, 11, 16, 14, 16,
+ 13, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 14, 16, 13, 16, 14, 16, 13, 16, 14, 16, 13, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 4, 16, 8, 16, 9, 16, 13, 16, 10, 16, 11, 16, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 6, 16, 8, 16, 11, 16, 13, 16,
+ 10, 16, 11, 16, 13, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 16, 9, 16, 13, 16, 13, 16, 13, 16, 12, 16, 16, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 6, 16, 10, 16, 10, 16, 13, 16,
+ 10, 16, 12, 16, 12, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 10, 16, 11, 16, 13, 16, 10, 16, 11, 16, 12, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 11, 16, 16, 16, 14, 16,
+ 12, 16, 11, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 13, 16, 10, 16, 13, 16, 13, 16, 16, 16, 13, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 13, 16, 12, 16, 14, 16,
+ 12, 16, 15, 16, 12, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 6, 16, 10, 16, 10, 16, 13, 16,
+ 11, 16, 11, 16, 12, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 9, 16, 11, 16, 12, 16, 11, 16, 11, 16, 12, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 9, 16, 12, 16, 12, 16,
+ 13, 16, 11, 16, 13, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 10, 16, 10, 16, 12, 16, 11, 16, 12, 16, 12, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 8, 16, 10, 16, 11, 16, 12, 16,
+ 10, 16, 11, 16, 12, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 10, 14, 12, 16, 12, 16, 11, 16, 10, 14, 13, 16, 12, 14,
+ 16, 16, 14, 16, 16, 16, 14, 16, 9, 16, 12, 16, 10, 16, 12, 16,
+ 13, 16, 13, 16, 12, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 12, 16, 11, 16, 12, 16, 11, 16, 12, 16, 11, 16, 12, 15,
+ 16, 16, 16, 16, 16, 16, 16, 16, 13, 16, 12, 16, 13, 16, 12, 13,
+ 12, 16, 12, 16, 12, 16, 12, 13, 16, 16, 13, 14, 13, 16, 13, 13,
+ },
+ {
+ 0, 10, 5, 10, 5, 10, 8, 10, 6, 11, 8, 11, 8, 11, 9, 10,
+ 14, 16, 13, 14, 13, 14, 12, 12, 4, 12, 5, 10, 8, 12, 9, 10,
+ 7, 12, 8, 11, 9, 12, 9, 10, 14, 16, 12, 13, 13, 14, 12, 12,
+ 9, 16, 8, 12, 12, 14, 10, 11, 11, 16, 9, 12, 12, 14, 11, 11,
+ 14, 16, 12, 13, 14, 15, 12, 12, 4, 12, 7, 11, 6, 11, 9, 10,
+ 8, 12, 9, 11, 8, 11, 9, 10, 14, 16, 12, 14, 13, 14, 12, 12,
+ 7, 13, 8, 11, 8, 12, 9, 10, 8, 13, 8, 11, 9, 12, 9, 10,
+ 14, 16, 12, 13, 12, 13, 11, 11, 11, 16, 10, 12, 12, 14, 11, 11,
+ 12, 16, 10, 12, 12, 14, 11, 11, 16, 16, 12, 13, 14, 15, 12, 11,
+ 10, 16, 11, 14, 9, 13, 10, 11, 11, 16, 12, 14, 10, 13, 11, 11,
+ 15, 16, 13, 14, 13, 14, 12, 12, 11, 16, 12, 14, 10, 13, 11, 11,
+ 12, 16, 12, 14, 11, 13, 11, 11, 15, 16, 13, 14, 13, 14, 12, 11,
+ 14, 16, 13, 14, 13, 15, 12, 12, 14, 16, 12, 14, 13, 15, 11, 11,
+ 16, 16, 13, 14, 14, 15, 12, 11, 3, 11, 5, 10, 6, 11, 9, 10,
+ 7, 12, 8, 11, 9, 11, 9, 10, 14, 16, 13, 13, 13, 14, 12, 12,
+ 5, 12, 6, 10, 8, 12, 9, 10, 8, 13, 8, 11, 9, 12, 9, 10,
+ 14, 16, 12, 13, 13, 14, 12, 12, 9, 16, 8, 11, 11, 14, 10, 11,
+ 11, 16, 9, 12, 12, 14, 10, 11, 15, 16, 12, 13, 14, 15, 12, 11,
+ 5, 12, 7, 11, 6, 11, 9, 10, 8, 13, 9, 11, 8, 11, 9, 10,
+ 14, 16, 12, 14, 13, 13, 12, 12, 7, 13, 8, 11, 8, 12, 9, 10,
+ 8, 12, 8, 11, 9, 12, 9, 10, 13, 16, 12, 13, 12, 13, 11, 11,
+ 10, 16, 9, 12, 12, 14, 10, 11, 11, 15, 9, 12, 12, 14, 10, 11,
+ 14, 16, 11, 13, 13, 14, 11, 11, 9, 16, 11, 14, 8, 13, 10, 11,
+ 11, 16, 12, 13, 10, 13, 10, 11, 16, 16, 13, 15, 13, 14, 12, 12,
+ 11, 16, 11, 14, 10, 13, 10, 11, 11, 16, 11, 13, 10, 13, 10, 11,
+ 14, 16, 13, 14, 12, 13, 11, 11, 14, 16, 12, 14, 13, 15, 11, 11,
+ 13, 16, 12, 13, 12, 14, 11, 11, 14, 16, 12, 13, 12, 13, 11, 10,
+ 6, 14, 8, 12, 9, 13, 10, 11, 10, 14, 10, 12, 11, 13, 11, 12,
+ 16, 16, 14, 14, 14, 14, 13, 12, 7, 14, 8, 12, 9, 13, 10, 11,
+ 10, 14, 10, 12, 11, 13, 11, 11, 16, 16, 13, 14, 14, 15, 13, 12,
+ 10, 16, 8, 12, 12, 14, 10, 11, 12, 16, 10, 12, 13, 15, 11, 11,
+ 16, 16, 13, 14, 15, 16, 12, 12, 8, 15, 9, 13, 9, 13, 10, 11,
+ 10, 15, 10, 13, 10, 13, 11, 11, 16, 16, 14, 14, 14, 14, 13, 12,
+ 8, 15, 9, 12, 10, 13, 10, 11, 10, 14, 10, 12, 10, 13, 10, 11,
+ 16, 16, 13, 14, 13, 14, 12, 12, 10, 16, 10, 13, 12, 15, 11, 11,
+ 11, 16, 9, 12, 12, 14, 11, 11, 14, 16, 11, 13, 14, 16, 12, 11,
+ 10, 16, 12, 14, 9, 13, 11, 11, 12, 16, 12, 14, 11, 13, 11, 11,
+ 16, 16, 15, 16, 14, 14, 13, 12, 11, 16, 12, 14, 11, 14, 11, 11,
+ 11, 16, 12, 14, 10, 13, 11, 11, 15, 16, 13, 15, 12, 14, 12, 11,
+ 14, 16, 13, 14, 13, 16, 12, 12, 12, 16, 12, 13, 12, 14, 11, 11,
+ 14, 16, 11, 12, 12, 13, 10, 10, 9, 16, 10, 14, 11, 15, 12, 12,
+ 12, 16, 11, 13, 12, 15, 12, 12, 16, 16, 14, 13, 15, 15, 13, 12,
+ 10, 16, 10, 14, 11, 15, 11, 12, 12, 16, 11, 13, 12, 14, 12, 12,
+ 16, 16, 14, 13, 15, 14, 13, 12, 11, 16, 9, 12, 12, 14, 11, 11,
+ 13, 16, 11, 13, 13, 15, 11, 11, 16, 16, 14, 14, 16, 16, 12, 12,
+ 10, 16, 10, 14, 11, 14, 11, 12, 12, 16, 11, 13, 12, 14, 12, 12,
+ 16, 16, 14, 14, 15, 15, 13, 12, 10, 16, 10, 13, 11, 14, 11, 12,
+ 11, 16, 11, 13, 11, 14, 11, 12, 16, 16, 14, 14, 14, 14, 13, 12,
+ 11, 16, 10, 12, 12, 15, 11, 11, 12, 16, 9, 12, 12, 14, 10, 11,
+ 16, 16, 12, 13, 14, 15, 11, 11, 11, 16, 12, 14, 10, 14, 11, 11,
+ 13, 16, 12, 14, 12, 14, 11, 11, 16, 16, 15, 15, 14, 15, 12, 12,
+ 11, 16, 12, 14, 11, 14, 11, 11, 12, 16, 12, 14, 11, 13, 11, 11,
+ 16, 16, 14, 14, 13, 14, 12, 11, 12, 16, 11, 13, 12, 14, 10, 10,
+ 12, 15, 11, 12, 12, 13, 10, 10, 14, 15, 11, 11, 12, 12, 10, 9,
+ },
+ {
+ 0, 8, 3, 8, 5, 9, 7, 8, 5, 10, 6, 10, 8, 11, 8, 9,
+ 16, 16, 15, 16, 16, 16, 11, 11, 2, 10, 4, 9, 7, 10, 7, 8,
+ 7, 16, 7, 10, 9, 11, 8, 9, 16, 16, 15, 16, 16, 16, 11, 11,
+ 7, 16, 6, 11, 10, 16, 9, 10, 10, 16, 9, 11, 11, 16, 9, 10,
+ 16, 16, 16, 16, 16, 16, 16, 11, 3, 10, 6, 9, 5, 10, 7, 9,
+ 7, 16, 8, 10, 8, 11, 8, 9, 16, 16, 16, 16, 16, 16, 11, 11,
+ 5, 11, 7, 10, 8, 11, 8, 9, 7, 14, 7, 10, 8, 11, 8, 9,
+ 16, 16, 11, 16, 16, 16, 10, 11, 9, 16, 9, 11, 11, 16, 9, 10,
+ 10, 16, 8, 11, 11, 16, 9, 10, 16, 16, 11, 16, 16, 16, 11, 10,
+ 8, 16, 9, 16, 7, 16, 9, 10, 11, 16, 11, 16, 10, 16, 10, 10,
+ 16, 16, 16, 16, 16, 16, 16, 11, 9, 16, 10, 16, 9, 16, 9, 10,
+ 10, 16, 10, 16, 9, 16, 9, 10, 16, 16, 16, 16, 11, 16, 11, 11,
+ 16, 16, 16, 16, 16, 16, 10, 10, 11, 16, 11, 16, 11, 16, 10, 10,
+ 16, 16, 11, 16, 11, 16, 10, 9, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ {
+ 1, 10, 6, 10, 7, 11, 9, 10, 8, 12, 9, 12, 9, 12, 10, 10,
+ 15, 16, 14, 15, 14, 16, 13, 12, 5, 12, 7, 11, 8, 11, 9, 10,
+ 9, 13, 9, 12, 10, 12, 10, 10, 16, 16, 14, 15, 13, 16, 13, 12,
+ 10, 15, 10, 13, 12, 14, 11, 11, 12, 16, 11, 13, 12, 14, 11, 11,
+ 16, 16, 14, 16, 14, 16, 13, 12, 5, 12, 8, 11, 7, 11, 9, 10,
+ 9, 13, 10, 12, 9, 12, 10, 10, 14, 16, 14, 15, 13, 14, 13, 12,
+ 7, 13, 9, 12, 9, 12, 10, 10, 9, 13, 9, 12, 9, 12, 10, 10,
+ 14, 16, 13, 14, 13, 15, 12, 12, 11, 16, 11, 13, 12, 14, 11, 11,
+ 12, 16, 11, 14, 12, 14, 11, 11, 16, 16, 14, 16, 14, 16, 13, 12,
+ 10, 16, 11, 14, 10, 13, 11, 11, 12, 16, 12, 14, 11, 13, 11, 11,
+ 15, 16, 15, 16, 14, 16, 13, 12, 11, 16, 12, 14, 11, 14, 11, 12,
+ 12, 16, 12, 14, 11, 14, 11, 11, 15, 16, 14, 16, 13, 16, 13, 12,
+ 13, 16, 13, 16, 13, 16, 12, 12, 13, 16, 13, 16, 13, 16, 12, 12,
+ 16, 16, 14, 15, 14, 16, 13, 12, 4, 11, 7, 10, 7, 11, 9, 10,
+ 9, 13, 9, 11, 9, 12, 10, 10, 15, 16, 14, 15, 14, 15, 13, 12,
+ 6, 12, 7, 11, 8, 11, 9, 10, 9, 13, 9, 12, 10, 12, 10, 10,
+ 15, 16, 13, 14, 14, 15, 12, 12, 10, 14, 10, 12, 12, 13, 11, 11,
+ 12, 16, 11, 13, 12, 14, 11, 11, 16, 16, 14, 15, 15, 16, 13, 12,
+ 6, 12, 8, 11, 8, 11, 9, 10, 9, 13, 10, 12, 9, 12, 10, 10,
+ 16, 16, 14, 15, 13, 14, 12, 12, 8, 13, 9, 11, 9, 12, 9, 10,
+ 9, 13, 9, 12, 9, 12, 9, 10, 14, 16, 13, 14, 13, 14, 12, 11,
+ 11, 15, 11, 13, 12, 14, 11, 11, 11, 16, 10, 13, 12, 14, 11, 11,
+ 15, 16, 13, 15, 14, 16, 12, 11, 10, 16, 12, 13, 10, 13, 11, 11,
+ 11, 16, 12, 14, 10, 13, 11, 11, 16, 16, 14, 16, 13, 16, 13, 12,
+ 11, 16, 12, 14, 10, 14, 11, 11, 11, 16, 12, 14, 10, 13, 11, 11,
+ 16, 16, 14, 16, 12, 15, 12, 11, 13, 16, 13, 15, 13, 15, 12, 12,
+ 13, 16, 12, 15, 12, 15, 12, 11, 15, 16, 13, 16, 13, 16, 12, 11,
+ 8, 14, 9, 12, 10, 13, 11, 11, 11, 16, 11, 13, 11, 13, 11, 11,
+ 16, 16, 16, 16, 16, 16, 14, 13, 9, 14, 10, 12, 10, 13, 11, 11,
+ 11, 16, 11, 13, 12, 13, 11, 11, 16, 16, 15, 16, 15, 16, 14, 12,
+ 11, 16, 11, 13, 12, 14, 12, 11, 13, 16, 12, 14, 13, 14, 12, 11,
+ 16, 16, 15, 16, 16, 16, 14, 13, 9, 14, 10, 13, 10, 13, 11, 11,
+ 11, 16, 11, 13, 11, 13, 11, 11, 16, 16, 15, 16, 14, 15, 14, 13,
+ 9, 14, 10, 13, 10, 13, 11, 11, 11, 15, 11, 13, 11, 13, 11, 11,
+ 16, 16, 14, 16, 14, 16, 13, 12, 12, 16, 12, 14, 13, 14, 12, 11,
+ 12, 16, 11, 14, 12, 15, 12, 11, 16, 16, 13, 16, 15, 16, 13, 11,
+ 11, 16, 12, 14, 11, 14, 12, 11, 13, 16, 13, 15, 12, 14, 12, 12,
+ 16, 16, 16, 16, 14, 16, 14, 13, 12, 16, 13, 15, 11, 14, 12, 12,
+ 12, 16, 13, 14, 11, 15, 12, 11, 16, 16, 15, 16, 13, 15, 13, 12,
+ 13, 16, 13, 15, 13, 16, 12, 12, 13, 16, 13, 15, 13, 15, 12, 12,
+ 15, 16, 13, 16, 13, 15, 11, 10, 10, 16, 12, 14, 12, 15, 12, 12,
+ 14, 16, 13, 15, 13, 16, 13, 12, 16, 16, 16, 16, 16, 16, 15, 13,
+ 10, 16, 12, 14, 12, 15, 12, 12, 13, 16, 13, 15, 13, 15, 13, 12,
+ 16, 16, 16, 16, 16, 16, 14, 13, 11, 16, 12, 15, 13, 16, 12, 12,
+ 14, 16, 13, 16, 13, 16, 12, 12, 16, 16, 15, 16, 15, 16, 14, 12,
+ 11, 16, 12, 14, 12, 16, 12, 12, 14, 16, 13, 15, 13, 15, 13, 12,
+ 16, 16, 16, 16, 16, 16, 15, 13, 11, 16, 12, 14, 12, 15, 12, 12,
+ 13, 16, 12, 14, 12, 15, 12, 12, 16, 16, 16, 16, 16, 16, 14, 12,
+ 12, 16, 12, 15, 13, 16, 12, 12, 13, 16, 12, 15, 13, 15, 12, 11,
+ 16, 16, 14, 16, 15, 16, 13, 12, 12, 16, 13, 16, 12, 16, 12, 12,
+ 15, 16, 14, 16, 13, 16, 12, 12, 16, 16, 16, 16, 16, 16, 14, 13,
+ 12, 16, 13, 15, 12, 16, 12, 12, 13, 16, 13, 15, 12, 15, 12, 12,
+ 16, 16, 15, 16, 14, 16, 13, 12, 13, 16, 13, 16, 12, 16, 12, 12,
+ 13, 16, 12, 16, 12, 15, 12, 12, 14, 16, 12, 14, 12, 14, 11, 10,
+ },
+ },
+ {
+ {
+ 0, 16, 6, 16, 8, 16, 16, 16, 9, 16, 11, 16, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 4, 16, 7, 16, 11, 16, 16, 16,
+ 10, 16, 11, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 10, 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 5, 16, 10, 16, 9, 16, 16, 16,
+ 10, 16, 13, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 11, 16, 12, 16, 16, 16, 10, 16, 12, 16, 13, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 16, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 1, 16, 7, 16, 8, 16, 13, 16,
+ 9, 16, 11, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 4, 16, 7, 16, 11, 16, 13, 16, 9, 16, 11, 16, 13, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 9, 16, 16, 16, 16, 16,
+ 13, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 4, 16, 10, 16, 8, 16, 16, 16, 10, 16, 12, 16, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 7, 16, 10, 16, 11, 16, 16, 16,
+ 9, 16, 11, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 12, 16, 16, 16, 16, 16, 13, 16, 12, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 10, 16, 16, 16, 11, 16, 16, 16,
+ 16, 16, 16, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 13, 16, 16, 16, 13, 16, 16, 16, 13, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 4, 16, 8, 16, 10, 16, 16, 16, 10, 16, 11, 16, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 6, 16, 8, 16, 11, 16, 16, 16,
+ 10, 16, 12, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 16, 9, 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 6, 16, 10, 16, 10, 16, 16, 16,
+ 11, 16, 12, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 11, 16, 11, 16, 16, 16, 10, 16, 12, 16, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 12, 16, 16, 16, 16, 16,
+ 12, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 16, 16, 10, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 16, 16, 12, 16, 16, 16,
+ 12, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 6, 16, 10, 16, 11, 16, 16, 16,
+ 11, 16, 12, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 9, 16, 11, 16, 13, 16, 11, 16, 12, 16, 13, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 9, 16, 13, 16, 12, 16,
+ 13, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 10, 16, 10, 16, 14, 16, 12, 16, 12, 16, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 11, 16, 11, 16, 13, 16,
+ 10, 16, 12, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 10, 16, 13, 16, 12, 16, 12, 16, 11, 16, 16, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 12, 16, 10, 16, 13, 16,
+ 16, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 13, 16, 11, 16, 13, 16, 12, 16, 13, 16, 12, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 13, 16, 13, 16, 16, 16, 12, 16,
+ 13, 16, 12, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ {
+ 0, 10, 4, 10, 5, 11, 9, 11, 6, 11, 8, 11, 8, 12, 10, 12,
+ 15, 16, 13, 15, 14, 15, 13, 14, 4, 12, 5, 11, 8, 12, 9, 11,
+ 7, 13, 8, 12, 9, 13, 10, 12, 15, 16, 13, 15, 14, 16, 13, 14,
+ 10, 16, 9, 13, 12, 16, 11, 13, 11, 16, 10, 14, 13, 16, 12, 13,
+ 16, 16, 14, 16, 15, 16, 14, 15, 4, 12, 7, 12, 6, 12, 9, 11,
+ 8, 13, 9, 12, 9, 12, 10, 12, 15, 16, 13, 15, 14, 15, 13, 14,
+ 7, 13, 8, 12, 9, 13, 10, 12, 8, 13, 9, 12, 9, 13, 10, 12,
+ 14, 16, 13, 15, 13, 15, 13, 14, 12, 16, 11, 14, 13, 16, 12, 13,
+ 12, 16, 11, 14, 13, 16, 12, 13, 16, 16, 14, 16, 15, 16, 14, 15,
+ 10, 16, 12, 16, 10, 14, 12, 13, 12, 16, 13, 16, 11, 14, 12, 13,
+ 16, 16, 16, 16, 15, 16, 14, 15, 12, 16, 13, 16, 11, 15, 12, 13,
+ 13, 16, 13, 16, 12, 15, 12, 14, 16, 16, 16, 16, 14, 16, 13, 14,
+ 16, 16, 14, 16, 14, 16, 13, 15, 16, 16, 14, 16, 14, 16, 13, 15,
+ 16, 16, 16, 16, 16, 16, 14, 16, 2, 11, 5, 11, 6, 11, 9, 11,
+ 7, 12, 8, 11, 9, 12, 10, 12, 15, 16, 14, 14, 14, 14, 13, 14,
+ 4, 13, 6, 11, 8, 12, 9, 11, 8, 13, 8, 12, 10, 13, 10, 12,
+ 15, 16, 13, 15, 14, 15, 13, 14, 9, 16, 8, 13, 12, 15, 11, 12,
+ 11, 16, 10, 14, 13, 16, 12, 13, 16, 16, 14, 16, 16, 16, 14, 15,
+ 5, 13, 8, 12, 7, 12, 9, 11, 8, 13, 9, 12, 9, 12, 10, 12,
+ 16, 16, 14, 15, 14, 15, 13, 13, 7, 13, 8, 12, 9, 13, 10, 12,
+ 8, 13, 8, 12, 9, 13, 10, 12, 14, 16, 13, 14, 13, 15, 12, 13,
+ 11, 16, 10, 14, 13, 16, 12, 13, 11, 16, 10, 13, 12, 15, 11, 13,
+ 14, 16, 12, 14, 14, 16, 13, 14, 10, 16, 12, 15, 9, 14, 11, 13,
+ 12, 16, 12, 16, 11, 14, 12, 13, 16, 16, 15, 16, 16, 16, 14, 15,
+ 11, 16, 12, 15, 11, 15, 12, 13, 12, 16, 12, 15, 11, 14, 12, 13,
+ 16, 16, 14, 16, 13, 15, 13, 14, 15, 16, 14, 16, 14, 16, 13, 14,
+ 14, 16, 13, 16, 14, 16, 13, 14, 16, 16, 14, 16, 14, 16, 13, 14,
+ 7, 15, 8, 13, 9, 14, 11, 13, 10, 14, 10, 13, 11, 14, 12, 13,
+ 16, 16, 15, 16, 15, 16, 15, 14, 7, 16, 8, 13, 10, 14, 11, 13,
+ 11, 15, 10, 13, 12, 14, 12, 13, 16, 16, 16, 16, 16, 16, 15, 15,
+ 10, 16, 9, 13, 13, 16, 12, 13, 13, 16, 11, 14, 14, 16, 13, 14,
+ 16, 16, 16, 16, 16, 16, 15, 16, 8, 16, 10, 14, 9, 14, 11, 13,
+ 11, 16, 11, 14, 11, 14, 12, 13, 16, 16, 16, 16, 16, 16, 14, 15,
+ 8, 16, 10, 14, 10, 14, 11, 13, 10, 15, 10, 14, 11, 14, 12, 13,
+ 16, 16, 14, 16, 15, 16, 14, 14, 11, 16, 10, 14, 13, 16, 12, 13,
+ 11, 16, 10, 13, 13, 16, 12, 13, 16, 16, 13, 16, 16, 16, 14, 14,
+ 11, 16, 12, 16, 10, 15, 12, 13, 14, 16, 14, 16, 12, 16, 13, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 13, 16, 11, 16, 12, 14,
+ 12, 16, 13, 16, 11, 14, 12, 13, 16, 16, 16, 16, 14, 16, 14, 14,
+ 15, 16, 14, 16, 16, 16, 13, 14, 13, 16, 13, 15, 14, 16, 13, 14,
+ 14, 16, 13, 14, 14, 15, 13, 14, 9, 16, 11, 16, 12, 16, 13, 14,
+ 12, 16, 12, 15, 13, 16, 13, 14, 16, 16, 16, 15, 16, 16, 15, 15,
+ 10, 16, 10, 16, 12, 16, 12, 14, 12, 16, 12, 14, 13, 16, 13, 14,
+ 16, 16, 16, 16, 16, 16, 15, 15, 11, 16, 10, 14, 14, 16, 12, 13,
+ 14, 16, 12, 16, 14, 16, 13, 14, 16, 16, 16, 16, 16, 16, 16, 15,
+ 10, 16, 12, 16, 11, 16, 12, 14, 13, 16, 12, 15, 13, 16, 13, 14,
+ 16, 16, 16, 16, 16, 16, 16, 15, 10, 16, 11, 16, 12, 16, 12, 14,
+ 12, 16, 12, 15, 13, 16, 13, 14, 16, 16, 16, 16, 16, 16, 14, 14,
+ 11, 16, 10, 14, 14, 16, 12, 13, 13, 16, 11, 14, 14, 16, 12, 13,
+ 16, 16, 15, 16, 16, 16, 14, 14, 12, 16, 13, 16, 11, 16, 12, 14,
+ 15, 16, 14, 16, 13, 16, 13, 14, 16, 16, 16, 16, 16, 16, 14, 16,
+ 12, 16, 13, 16, 12, 16, 12, 14, 13, 16, 13, 16, 12, 16, 12, 13,
+ 16, 16, 16, 16, 16, 16, 14, 15, 13, 16, 13, 16, 14, 16, 12, 13,
+ 13, 16, 13, 14, 14, 16, 12, 13, 16, 16, 14, 14, 14, 15, 13, 13,
+ },
+ {
+ 0, 9, 3, 9, 5, 9, 7, 9, 5, 10, 7, 11, 8, 11, 9, 10,
+ 16, 16, 16, 16, 16, 16, 16, 16, 2, 11, 4, 10, 7, 11, 8, 10,
+ 7, 16, 7, 11, 9, 16, 9, 11, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 7, 16, 11, 16, 10, 11, 11, 16, 10, 16, 16, 16, 11, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 2, 11, 6, 10, 6, 10, 8, 10,
+ 7, 16, 8, 16, 8, 11, 9, 11, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 13, 7, 11, 8, 11, 9, 11, 6, 12, 8, 12, 8, 12, 9, 11,
+ 16, 16, 16, 16, 16, 16, 16, 16, 10, 16, 10, 16, 12, 16, 10, 13,
+ 10, 16, 9, 16, 11, 16, 11, 12, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 10, 16, 8, 16, 10, 12, 11, 16, 12, 16, 10, 16, 11, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 10, 16, 11, 16, 10, 16, 11, 16,
+ 11, 16, 12, 16, 10, 16, 11, 12, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ {
+ 1, 10, 5, 10, 6, 10, 9, 10, 7, 12, 9, 12, 9, 12, 10, 10,
+ 16, 16, 15, 16, 14, 16, 13, 13, 5, 12, 7, 11, 8, 11, 9, 10,
+ 8, 13, 9, 12, 10, 12, 10, 11, 16, 16, 16, 16, 14, 16, 13, 12,
+ 10, 16, 10, 13, 12, 14, 12, 12, 12, 16, 11, 14, 13, 15, 12, 12,
+ 16, 16, 16, 15, 16, 16, 14, 13, 5, 12, 8, 11, 7, 11, 9, 10,
+ 8, 13, 10, 12, 9, 12, 10, 10, 15, 16, 14, 15, 14, 14, 13, 13,
+ 7, 13, 9, 12, 9, 12, 10, 10, 9, 13, 10, 13, 10, 13, 10, 10,
+ 14, 16, 14, 15, 14, 16, 13, 12, 11, 16, 12, 14, 12, 15, 12, 12,
+ 12, 16, 12, 15, 13, 16, 12, 12, 16, 16, 15, 16, 16, 16, 14, 13,
+ 10, 16, 12, 14, 10, 14, 12, 12, 12, 16, 13, 14, 11, 14, 12, 12,
+ 16, 16, 16, 16, 14, 16, 14, 13, 11, 16, 12, 14, 11, 14, 12, 12,
+ 12, 16, 13, 16, 11, 15, 12, 12, 16, 16, 16, 16, 14, 16, 14, 13,
+ 13, 16, 14, 16, 14, 16, 13, 13, 13, 16, 13, 16, 13, 16, 13, 12,
+ 16, 16, 16, 16, 16, 16, 13, 13, 4, 11, 7, 10, 7, 11, 9, 10,
+ 8, 12, 9, 12, 9, 12, 10, 11, 15, 16, 14, 15, 14, 16, 13, 13,
+ 6, 12, 7, 11, 8, 11, 9, 10, 9, 13, 9, 12, 10, 12, 10, 10,
+ 16, 16, 15, 16, 15, 16, 13, 12, 10, 16, 10, 13, 12, 14, 12, 11,
+ 12, 16, 11, 14, 13, 14, 12, 12, 16, 16, 16, 14, 16, 16, 14, 13,
+ 6, 12, 8, 11, 7, 11, 9, 10, 9, 14, 10, 12, 9, 12, 10, 10,
+ 15, 16, 14, 16, 14, 16, 14, 13, 7, 13, 9, 12, 9, 12, 10, 10,
+ 9, 13, 9, 12, 9, 12, 10, 10, 14, 16, 13, 14, 14, 16, 12, 11,
+ 11, 16, 11, 14, 12, 14, 12, 12, 12, 16, 11, 14, 12, 14, 12, 11,
+ 16, 16, 13, 16, 14, 16, 13, 12, 10, 16, 12, 14, 10, 14, 11, 11,
+ 12, 16, 13, 14, 11, 14, 12, 12, 16, 16, 16, 16, 14, 16, 13, 13,
+ 11, 16, 12, 16, 11, 14, 12, 12, 12, 16, 12, 14, 11, 14, 12, 11,
+ 16, 16, 15, 16, 13, 15, 13, 12, 13, 16, 14, 16, 13, 16, 13, 12,
+ 13, 16, 13, 16, 13, 16, 13, 12, 16, 16, 14, 16, 14, 16, 12, 11,
+ 8, 13, 10, 12, 10, 13, 11, 11, 11, 14, 12, 14, 12, 14, 12, 12,
+ 16, 16, 16, 16, 16, 16, 14, 13, 9, 14, 10, 13, 10, 13, 11, 11,
+ 12, 16, 12, 13, 12, 13, 12, 11, 16, 16, 16, 16, 16, 16, 15, 14,
+ 12, 16, 11, 14, 13, 15, 12, 12, 13, 16, 12, 15, 14, 15, 13, 12,
+ 16, 16, 16, 16, 16, 16, 15, 13, 9, 14, 11, 13, 10, 13, 11, 11,
+ 11, 16, 12, 14, 12, 14, 12, 12, 16, 16, 16, 16, 16, 16, 16, 13,
+ 10, 15, 11, 13, 11, 14, 12, 11, 11, 16, 12, 14, 11, 14, 12, 11,
+ 16, 16, 16, 16, 16, 16, 14, 12, 12, 16, 12, 14, 13, 15, 13, 12,
+ 12, 16, 12, 14, 13, 15, 12, 12, 16, 16, 14, 16, 16, 16, 14, 12,
+ 12, 16, 13, 16, 11, 14, 12, 12, 14, 16, 14, 16, 12, 16, 13, 12,
+ 16, 16, 16, 16, 16, 16, 16, 13, 12, 16, 13, 16, 12, 16, 13, 12,
+ 12, 16, 13, 16, 12, 16, 12, 12, 16, 16, 16, 16, 14, 16, 14, 12,
+ 14, 16, 14, 16, 14, 16, 14, 13, 13, 16, 13, 16, 13, 16, 13, 12,
+ 16, 16, 13, 16, 13, 16, 12, 11, 9, 16, 12, 15, 13, 16, 13, 12,
+ 13, 16, 14, 16, 14, 16, 14, 13, 16, 16, 16, 16, 16, 16, 16, 14,
+ 10, 16, 12, 15, 12, 15, 13, 12, 13, 16, 14, 16, 13, 16, 13, 12,
+ 16, 16, 16, 16, 16, 16, 16, 13, 12, 16, 12, 16, 13, 16, 13, 12,
+ 14, 16, 13, 16, 14, 16, 13, 12, 16, 16, 16, 16, 16, 16, 16, 13,
+ 11, 16, 12, 15, 12, 16, 13, 12, 14, 16, 14, 16, 13, 16, 13, 13,
+ 16, 16, 16, 16, 16, 16, 15, 13, 11, 16, 12, 16, 12, 16, 13, 12,
+ 13, 16, 13, 15, 13, 16, 13, 12, 16, 16, 16, 16, 16, 16, 15, 13,
+ 12, 16, 12, 16, 13, 16, 13, 12, 13, 16, 12, 16, 13, 16, 13, 12,
+ 16, 16, 14, 16, 16, 16, 14, 12, 12, 16, 14, 16, 11, 16, 13, 12,
+ 14, 16, 14, 16, 13, 16, 13, 12, 16, 16, 16, 16, 16, 16, 16, 13,
+ 12, 16, 13, 16, 12, 16, 13, 12, 13, 16, 13, 16, 12, 16, 13, 12,
+ 16, 16, 16, 16, 14, 16, 14, 12, 13, 16, 13, 16, 13, 16, 13, 13,
+ 13, 16, 12, 16, 12, 16, 12, 12, 14, 16, 13, 15, 13, 16, 12, 11,
+ },
+ },
+};
+
+
+static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE] = {
+ {
+ {
+ 0, 5, 10, 3, 6, 10, 7, 8, 9, 4, 6, 10, 6, 7, 9, 8,
+ 8, 9, 8, 8, 9, 8, 9, 9, 9, 9, 8, 3, 6, 10, 4, 6,
+ 10, 7, 7, 9, 5, 7, 10, 6, 7, 9, 7, 7, 8, 7, 8, 9,
+ 8, 8, 9, 8, 8, 7, 6, 8, 10, 6, 8, 10, 7, 8, 9, 7,
+ 8, 10, 7, 8, 10, 8, 8, 8, 8, 9, 9, 8, 8, 9, 9, 8,
+ 7, 7, 8, 9, 7, 8, 9, 7, 7, 7, 8, 8, 9, 7, 8, 9,
+ 7, 7, 7, 8, 8, 8, 7, 7, 7, 7, 6, 5,
+ },
+ {
+ 0, 5, 11, 3, 6, 11, 8, 9, 11, 3, 6, 10, 6, 7, 11, 9,
+ 9, 11, 7, 9, 11, 9, 9, 11, 10, 10, 11, 2, 6, 10, 4, 7,
+ 10, 7, 9, 11, 4, 7, 11, 6, 7, 10, 9, 9, 11, 7, 9, 11,
+ 8, 9, 10, 10, 10, 10, 5, 8, 11, 6, 8, 11, 8, 9, 11, 6,
+ 8, 11, 7, 8, 11, 9, 9, 11, 8, 10, 11, 9, 9, 11, 10, 10,
+ 10, 8, 9, 11, 8, 9, 11, 9, 9, 10, 8, 9, 11, 8, 9, 11,
+ 9, 9, 10, 8, 9, 10, 9, 9, 10, 9, 9, 8,
+ },
+ },
+ {
+ {
+ 0, 5, 10, 4, 6, 10, 7, 8, 10, 4, 6, 10, 6, 7, 9, 8,
+ 8, 9, 8, 8, 9, 8, 9, 9, 9, 9, 9, 2, 6, 10, 4, 6,
+ 10, 7, 7, 9, 5, 7, 10, 6, 7, 9, 7, 7, 9, 7, 8, 9,
+ 8, 8, 9, 9, 8, 8, 6, 8, 10, 6, 8, 10, 7, 8, 9, 6,
+ 8, 10, 7, 8, 10, 8, 8, 9, 8, 9, 10, 8, 8, 9, 9, 9,
+ 8, 8, 8, 10, 7, 8, 9, 7, 8, 8, 7, 8, 10, 7, 8, 9,
+ 7, 7, 8, 8, 8, 9, 8, 8, 8, 7, 7, 6,
+ },
+ {
+ 0, 5, 12, 4, 7, 12, 8, 10, 13, 4, 7, 12, 6, 8, 12, 10,
+ 10, 12, 8, 9, 12, 10, 10, 12, 12, 12, 12, 1, 6, 12, 4, 7,
+ 12, 8, 9, 12, 4, 7, 12, 6, 8, 11, 9, 10, 12, 8, 9, 12,
+ 9, 10, 11, 11, 11, 12, 6, 8, 12, 7, 9, 12, 9, 10, 13, 6,
+ 9, 12, 8, 9, 12, 10, 10, 12, 9, 10, 12, 10, 10, 12, 12, 12,
+ 12, 8, 10, 12, 9, 10, 12, 10, 10, 12, 8, 10, 12, 9, 10, 12,
+ 10, 10, 11, 9, 10, 12, 10, 10, 11, 11, 10, 10,
+ },
+ },
+ {
+ {
+ 0, 5, 10, 3, 6, 10, 7, 8, 11, 4, 6, 10, 6, 7, 10, 8,
+ 9, 10, 8, 8, 10, 9, 9, 10, 10, 10, 10, 2, 6, 10, 4, 6,
+ 10, 7, 8, 10, 4, 7, 10, 6, 7, 10, 8, 8, 10, 7, 8, 10,
+ 8, 8, 9, 10, 9, 9, 5, 8, 11, 6, 8, 10, 7, 9, 10, 6,
+ 8, 11, 7, 8, 10, 8, 8, 10, 8, 9, 11, 9, 9, 10, 10, 9,
+ 9, 8, 9, 10, 8, 9, 10, 8, 9, 10, 8, 9, 10, 8, 8, 10,
+ 8, 8, 9, 8, 9, 10, 8, 8, 9, 9, 8, 8,
+ },
+ {
+ 0, 6, 13, 4, 7, 14, 9, 11, 14, 3, 7, 13, 7, 8, 13, 11,
+ 11, 14, 8, 10, 13, 10, 11, 13, 13, 13, 14, 1, 6, 12, 4, 8,
+ 13, 9, 10, 15, 4, 8, 13, 7, 8, 12, 11, 11, 14, 8, 10, 13,
+ 10, 10, 13, 13, 13, 14, 5, 9, 13, 7, 9, 13, 10, 11, 14, 6,
+ 10, 14, 8, 10, 14, 11, 11, 14, 9, 11, 14, 11, 11, 13, 13, 13,
+ 14, 9, 10, 14, 9, 11, 13, 11, 12, 14, 9, 11, 13, 9, 11, 14,
+ 11, 12, 13, 10, 12, 15, 11, 11, 13, 13, 12, 13,
+ },
+ },
+ {
+ {
+ 0, 5, 11, 3, 6, 11, 7, 9, 12, 3, 6, 11, 6, 7, 11, 9,
+ 9, 11, 8, 9, 11, 9, 9, 11, 11, 11, 12, 2, 6, 11, 4, 6,
+ 11, 7, 9, 11, 4, 7, 11, 5, 7, 10, 9, 9, 11, 7, 8, 11,
+ 9, 9, 10, 11, 11, 11, 5, 8, 11, 6, 8, 11, 8, 9, 12, 6,
+ 8, 11, 7, 8, 11, 9, 9, 11, 8, 9, 12, 9, 9, 11, 11, 11,
+ 11, 8, 10, 12, 8, 10, 11, 9, 10, 12, 8, 10, 12, 8, 9, 12,
+ 10, 10, 12, 9, 10, 12, 9, 9, 11, 11, 10, 11,
+ },
+ {
+ 0, 6, 13, 3, 8, 14, 10, 12, 16, 3, 8, 15, 7, 9, 15, 12,
+ 13, 15, 9, 11, 15, 11, 12, 16, 14, 16, 16, 1, 7, 13, 4, 8,
+ 14, 9, 11, 15, 4, 8, 14, 7, 9, 14, 12, 13, 15, 8, 10, 14,
+ 11, 11, 14, 16, 14, 16, 6, 9, 14, 7, 10, 14, 11, 13, 15, 7,
+ 10, 14, 9, 10, 13, 12, 12, 15, 10, 11, 14, 11, 11, 14, 14, 14,
+ 16, 9, 11, 14, 10, 11, 14, 13, 14, 15, 9, 12, 14, 10, 12, 16,
+ 13, 14, 16, 10, 13, 16, 12, 12, 14, 15, 14, 15,
+ },
+ },
+ {
+ {
+ 0, 6, 12, 3, 7, 12, 9, 11, 13, 4, 7, 12, 6, 8, 12, 10,
+ 11, 13, 8, 10, 13, 10, 11, 13, 13, 13, 14, 1, 6, 12, 4, 7,
+ 12, 9, 10, 14, 4, 7, 12, 6, 7, 12, 10, 11, 13, 8, 9, 13,
+ 10, 10, 12, 13, 13, 14, 6, 9, 13, 7, 9, 13, 10, 12, 14, 7,
+ 9, 13, 8, 10, 13, 11, 11, 14, 9, 11, 13, 11, 11, 14, 13, 13,
+ 14, 10, 12, 14, 10, 12, 14, 12, 13, 15, 10, 12, 14, 10, 12, 14,
+ 12, 13, 15, 11, 13, 15, 12, 12, 15, 14, 14, 14,
+ },
+ {
+ 0, 6, 16, 3, 8, 16, 10, 13, 16, 3, 8, 16, 7, 9, 16, 13,
+ 16, 16, 8, 10, 16, 11, 13, 16, 16, 16, 16, 1, 7, 14, 4, 8,
+ 16, 10, 12, 16, 4, 8, 13, 7, 9, 16, 13, 14, 16, 8, 10, 16,
+ 11, 11, 14, 16, 16, 16, 6, 9, 14, 8, 10, 14, 12, 16, 16, 6,
+ 10, 13, 9, 11, 16, 13, 14, 16, 9, 12, 16, 12, 11, 16, 16, 16,
+ 16, 10, 12, 16, 11, 12, 16, 16, 14, 16, 9, 12, 16, 11, 12, 16,
+ 16, 15, 16, 10, 13, 16, 12, 13, 16, 16, 16, 16,
+ },
+ },
+};
+
+static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE] = {
+ {
+ {
+ 0, 5, 10, 3, 6, 10, 7, 8, 10, 4, 7, 10, 6, 7, 10, 8,
+ 8, 10, 8, 9, 10, 9, 9, 10, 9, 9, 9, 2, 6, 10, 4, 7,
+ 10, 7, 8, 9, 5, 7, 10, 6, 7, 10, 8, 8, 9, 8, 9, 10,
+ 8, 8, 9, 9, 9, 8, 6, 8, 11, 6, 8, 10, 7, 8, 10, 6,
+ 8, 11, 7, 8, 10, 8, 8, 9, 8, 9, 10, 9, 9, 10, 9, 9,
+ 9, 7, 8, 10, 7, 8, 10, 7, 8, 8, 7, 8, 10, 7, 8, 9,
+ 7, 8, 8, 8, 8, 9, 8, 8, 8, 7, 7, 7,
+ },
+ {
+ 0, 4, 10, 3, 6, 10, 7, 8, 11, 3, 6, 10, 5, 7, 10, 9,
+ 9, 11, 9, 10, 11, 9, 10, 11, 11, 11, 11, 2, 6, 10, 4, 6,
+ 10, 7, 8, 10, 4, 7, 10, 6, 7, 10, 8, 9, 10, 8, 9, 11,
+ 9, 9, 11, 10, 10, 11, 6, 8, 11, 6, 8, 11, 8, 9, 11, 7,
+ 9, 11, 7, 8, 11, 9, 9, 11, 9, 10, 12, 10, 10, 12, 11, 11,
+ 11, 8, 9, 11, 8, 9, 11, 9, 9, 11, 9, 10, 11, 9, 10, 11,
+ 9, 10, 11, 10, 11, 12, 10, 10, 12, 10, 10, 10,
+ },
+ },
+ {
+ {
+ 0, 5, 10, 3, 6, 10, 7, 8, 10, 4, 7, 10, 6, 7, 10, 8,
+ 9, 10, 8, 9, 11, 8, 9, 10, 10, 10, 10, 2, 6, 10, 4, 6,
+ 10, 7, 8, 10, 4, 7, 10, 5, 7, 10, 8, 8, 10, 8, 9, 10,
+ 8, 9, 10, 9, 9, 9, 5, 7, 11, 6, 8, 11, 7, 8, 11, 6,
+ 8, 11, 7, 8, 10, 8, 9, 10, 8, 9, 11, 9, 9, 10, 10, 9,
+ 10, 7, 8, 10, 7, 8, 10, 8, 9, 10, 8, 9, 10, 8, 9, 10,
+ 8, 8, 10, 9, 9, 10, 9, 9, 10, 9, 9, 9,
+ },
+ {
+ 0, 5, 11, 3, 6, 11, 8, 9, 12, 4, 7, 12, 6, 7, 12, 9,
+ 10, 13, 10, 11, 13, 10, 11, 14, 12, 13, 14, 1, 6, 11, 4, 7,
+ 11, 8, 9, 12, 5, 7, 11, 6, 8, 12, 9, 10, 13, 10, 11, 14,
+ 10, 11, 13, 12, 12, 14, 6, 8, 12, 7, 9, 13, 9, 10, 14, 7,
+ 10, 13, 8, 10, 12, 11, 11, 13, 11, 13, 14, 11, 12, 14, 13, 13,
+ 15, 9, 10, 12, 9, 11, 14, 10, 11, 14, 11, 11, 13, 10, 11, 13,
+ 11, 12, 14, 12, 14, 15, 13, 13, 14, 13, 13, 14,
+ },
+ },
+ {
+ {
+ 0, 5, 11, 3, 6, 11, 7, 9, 11, 4, 6, 11, 5, 7, 10, 9,
+ 9, 11, 8, 9, 11, 9, 10, 11, 11, 11, 11, 2, 6, 10, 3, 6,
+ 10, 7, 9, 11, 4, 7, 10, 5, 7, 10, 8, 9, 11, 8, 9, 11,
+ 9, 9, 11, 11, 11, 11, 5, 8, 11, 6, 8, 11, 8, 10, 12, 6,
+ 8, 11, 7, 8, 11, 9, 10, 11, 9, 10, 12, 9, 10, 11, 11, 11,
+ 11, 8, 9, 11, 8, 10, 12, 9, 11, 12, 8, 10, 12, 9, 10, 12,
+ 10, 11, 12, 10, 11, 12, 10, 10, 11, 11, 11, 11,
+ },
+ {
+ 0, 5, 13, 2, 7, 16, 9, 11, 16, 4, 8, 16, 7, 9, 16, 12,
+ 12, 16, 12, 16, 16, 12, 16, 16, 16, 16, 16, 1, 6, 13, 4, 8,
+ 16, 9, 11, 16, 6, 9, 16, 7, 10, 16, 13, 13, 16, 13, 15, 16,
+ 12, 16, 16, 16, 16, 16, 7, 9, 16, 8, 11, 15, 11, 13, 16, 10,
+ 12, 16, 10, 12, 16, 16, 13, 16, 16, 16, 16, 14, 16, 16, 16, 16,
+ 16, 12, 12, 16, 12, 16, 16, 16, 16, 16, 13, 14, 16, 12, 13, 16,
+ 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 16, 16,
+ },
+ },
+ {
+ {
+ 0, 6, 11, 3, 7, 11, 8, 10, 12, 4, 7, 11, 6, 8, 11, 10,
+ 11, 12, 9, 10, 12, 10, 10, 12, 12, 12, 13, 1, 6, 11, 4, 7,
+ 11, 8, 10, 12, 4, 7, 11, 6, 8, 11, 10, 10, 12, 9, 10, 12,
+ 10, 10, 12, 13, 13, 13, 6, 8, 12, 7, 10, 12, 10, 12, 13, 7,
+ 9, 12, 8, 10, 12, 11, 11, 13, 11, 12, 14, 11, 11, 13, 13, 13,
+ 13, 9, 11, 13, 10, 12, 14, 12, 13, 15, 10, 12, 14, 11, 12, 14,
+ 13, 13, 14, 12, 13, 15, 13, 13, 14, 14, 14, 14,
+ },
+ {
+ 0, 5, 16, 2, 6, 16, 10, 14, 16, 4, 8, 16, 7, 9, 16, 11,
+ 16, 16, 11, 16, 16, 16, 16, 16, 16, 16, 16, 1, 6, 12, 4, 8,
+ 12, 12, 12, 16, 6, 8, 16, 8, 10, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 12, 16, 16, 7, 10, 16, 8, 11, 14, 16, 16, 16, 10,
+ 12, 16, 10, 16, 16, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ },
+ {
+ {
+ 0, 5, 11, 3, 6, 11, 10, 10, 12, 3, 7, 11, 6, 8, 11, 11,
+ 11, 12, 10, 10, 12, 11, 11, 13, 14, 13, 14, 1, 6, 11, 4, 7,
+ 11, 10, 11, 13, 5, 7, 11, 7, 8, 11, 11, 11, 13, 10, 11, 13,
+ 11, 11, 12, 13, 13, 14, 7, 10, 12, 9, 11, 13, 12, 13, 14, 9,
+ 10, 13, 9, 10, 13, 12, 11, 13, 12, 13, 16, 12, 13, 13, 14, 14,
+ 14, 11, 14, 16, 12, 14, 15, 14, 13, 16, 13, 13, 15, 13, 14, 16,
+ 14, 13, 16, 13, 13, 16, 13, 14, 15, 15, 14, 15,
+ },
+ {
+ 0, 4, 16, 2, 7, 16, 10, 16, 16, 4, 10, 16, 7, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 1, 6, 13, 4, 11,
+ 16, 16, 16, 16, 6, 10, 16, 8, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 8, 16, 16, 10, 16, 16, 16, 16, 16, 10,
+ 16, 16, 10, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ },
+};
+
+
+static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE] = {
+{
+ 1, 3, 3, 4, 4, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9,
+ 9, 9, 10, 10, 10, 11, 11, 11, 10, 10, 10, 12, 13, 14, 15, 15,
+},
+{
+ 1, 2, 3, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10, 10, 10, 11,
+ 11, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 14, 16, 16, 16, 16,
+},
+{
+ 1, 2, 3, 4, 5, 6, 8, 8, 9, 9, 10, 10, 11, 12, 12, 12,
+ 13, 13, 14, 14, 14, 14, 16, 16, 14, 16, 16, 16, 14, 16, 16, 16,
+},
+{
+ 1, 2, 3, 4, 5, 6, 8, 8, 9, 9, 10, 10, 11, 12, 12, 12,
+ 13, 13, 16, 16, 16, 16, 16, 16, 16, 16, 14, 12, 16, 16, 16, 16,
+},
+{
+ 1, 2, 3, 4, 5, 7, 7, 8, 8, 9, 10, 10, 12, 11, 13, 12,
+ 15, 13, 14, 13, 12, 15, 14, 13, 12, 12, 10, 11, 16, 16, 16, 16,
+}
+};
+
+
+static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE] = {
+{
+ 7, 9, 9, 8, 9, 8, 9, 8, 9, 9, 8, 8, 8, 8, 8, 4,
+ 7, 10, 11, 10, 11, 10, 12, 10, 12, 11, 11, 10, 11, 10, 10, 7,
+ 10, 11, 15, 12, 15, 12, 15, 12, 15, 14, 14, 12, 14, 12, 14, 9,
+ 7, 11, 10, 10, 12, 11, 11, 10, 11, 12, 10, 10, 11, 10, 10, 7,
+ 8, 12, 12, 11, 13, 12, 12, 10, 13, 13, 12, 10, 12, 11, 11, 7,
+ 11, 13, 15, 11, 15, 13, 15, 12, 16, 14, 14, 12, 15, 13, 13, 9,
+ 10, 15, 11, 12, 15, 14, 14, 12, 15, 15, 12, 12, 14, 14, 12, 9,
+ 11, 15, 13, 12, 16, 15, 14, 12, 15, 15, 13, 12, 15, 14, 13, 9,
+ 13, 15, 14, 10, 16, 15, 16, 11, 16, 16, 15, 12, 16, 15, 15, 9,
+ 7, 11, 11, 11, 11, 10, 11, 10, 11, 12, 11, 10, 10, 10, 10, 7,
+ 9, 12, 13, 12, 12, 11, 13, 10, 13, 13, 12, 11, 12, 10, 11, 7,
+ 12, 13, 16, 14, 15, 12, 16, 12, 16, 15, 15, 13, 15, 12, 14, 9,
+ 9, 13, 13, 12, 13, 12, 13, 11, 13, 13, 12, 11, 12, 11, 11, 7,
+ 9, 13, 13, 12, 13, 12, 13, 11, 13, 13, 13, 11, 12, 11, 11, 7,
+ 12, 14, 15, 13, 16, 13, 15, 11, 16, 14, 15, 12, 15, 12, 13, 8,
+ 12, 16, 14, 14, 16, 15, 15, 13, 16, 15, 14, 13, 15, 14, 13, 9,
+ 12, 15, 14, 13, 15, 14, 15, 12, 16, 15, 14, 12, 14, 13, 13, 8,
+ 13, 16, 16, 12, 16, 14, 16, 11, 16, 16, 15, 12, 16, 14, 14, 8,
+ 10, 15, 15, 15, 12, 12, 14, 12, 14, 15, 15, 14, 12, 12, 13, 9,
+ 11, 15, 16, 14, 13, 12, 15, 12, 16, 15, 15, 14, 14, 12, 13, 9,
+ 14, 15, 16, 16, 15, 11, 16, 12, 16, 16, 16, 15, 16, 12, 15, 9,
+ 12, 16, 16, 15, 14, 14, 14, 13, 16, 16, 15, 14, 14, 13, 13, 9,
+ 12, 15, 15, 14, 14, 13, 15, 12, 16, 15, 14, 13, 14, 13, 13, 8,
+ 13, 16, 16, 15, 16, 12, 16, 11, 16, 16, 16, 14, 16, 13, 14, 8,
+ 14, 16, 16, 16, 16, 16, 15, 14, 16, 16, 16, 15, 16, 15, 14, 11,
+ 13, 16, 16, 15, 16, 15, 15, 12, 16, 16, 16, 14, 15, 14, 14, 9,
+ 14, 16, 16, 13, 16, 14, 16, 10, 16, 16, 16, 13, 16, 14, 14, 8,
+ 7, 12, 11, 11, 11, 11, 12, 10, 11, 11, 10, 10, 10, 10, 10, 7,
+ 9, 13, 13, 12, 13, 12, 13, 11, 13, 13, 12, 11, 12, 11, 11, 8,
+ 12, 14, 16, 14, 16, 14, 16, 13, 16, 14, 15, 13, 15, 13, 14, 9,
+ 9, 13, 12, 12, 13, 12, 13, 11, 12, 13, 11, 10, 12, 11, 11, 7,
+ 9, 13, 13, 12, 13, 12, 13, 11, 13, 13, 12, 11, 12, 11, 11, 7,
+ 12, 14, 16, 13, 16, 14, 15, 12, 15, 15, 14, 12, 15, 13, 13, 8,
+ 11, 15, 13, 14, 15, 15, 14, 13, 15, 15, 12, 12, 14, 14, 12, 9,
+ 11, 15, 14, 13, 15, 14, 14, 12, 15, 14, 13, 11, 14, 13, 12, 8,
+ 13, 16, 15, 12, 16, 15, 16, 12, 16, 16, 14, 11, 15, 14, 14, 8,
+ 8, 13, 13, 12, 12, 12, 13, 11, 12, 13, 12, 11, 11, 10, 10, 7,
+ 9, 13, 14, 12, 13, 12, 13, 11, 13, 13, 13, 11, 12, 11, 11, 7,
+ 12, 14, 16, 14, 15, 13, 15, 12, 15, 15, 15, 13, 14, 12, 13, 8,
+ 9, 13, 13, 12, 13, 12, 13, 11, 13, 13, 12, 11, 12, 11, 11, 7,
+ 9, 13, 12, 12, 13, 12, 12, 10, 13, 13, 12, 10, 12, 10, 10, 6,
+ 11, 14, 14, 12, 14, 12, 14, 11, 14, 14, 13, 11, 13, 11, 12, 7,
+ 12, 16, 14, 14, 15, 15, 14, 13, 15, 15, 13, 12, 14, 13, 12, 8,
+ 11, 14, 13, 12, 14, 13, 13, 11, 14, 14, 13, 11, 13, 12, 11, 7,
+ 11, 14, 14, 12, 15, 13, 14, 11, 15, 14, 13, 11, 14, 12, 12, 6,
+ 11, 16, 15, 15, 13, 14, 15, 13, 14, 15, 15, 13, 12, 12, 12, 9,
+ 12, 15, 15, 14, 14, 13, 15, 12, 15, 15, 14, 13, 13, 11, 12, 8,
+ 13, 16, 16, 15, 16, 13, 16, 13, 16, 16, 15, 14, 14, 12, 14, 8,
+ 11, 16, 15, 14, 14, 14, 14, 13, 15, 15, 14, 13, 13, 12, 12, 8,
+ 11, 14, 14, 13, 13, 12, 14, 11, 14, 14, 13, 12, 12, 11, 11, 7,
+ 12, 14, 15, 13, 14, 12, 14, 11, 15, 14, 14, 12, 13, 11, 12, 7,
+ 13, 16, 16, 16, 16, 15, 16, 14, 16, 16, 15, 14, 15, 14, 13, 9,
+ 12, 15, 14, 13, 15, 13, 14, 12, 15, 15, 13, 12, 13, 12, 12, 7,
+ 11, 15, 14, 12, 14, 13, 14, 10, 15, 14, 13, 11, 13, 11, 11, 5,
+ 10, 15, 15, 15, 15, 14, 15, 13, 12, 14, 12, 12, 12, 13, 12, 9,
+ 12, 16, 16, 15, 16, 15, 16, 14, 14, 15, 14, 13, 14, 13, 13, 9,
+ 14, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 14, 16, 14, 15, 11,
+ 11, 15, 15, 14, 15, 15, 15, 14, 14, 15, 12, 12, 13, 13, 12, 9,
+ 12, 15, 15, 14, 16, 14, 15, 13, 14, 14, 13, 12, 14, 13, 12, 8,
+ 13, 16, 16, 15, 16, 16, 16, 14, 16, 16, 15, 12, 16, 14, 14, 9,
+ 13, 16, 14, 16, 16, 16, 16, 15, 15, 16, 11, 12, 16, 15, 12, 9,
+ 13, 16, 15, 14, 16, 15, 16, 14, 15, 16, 12, 11, 15, 14, 13, 8,
+ 13, 16, 16, 13, 16, 16, 16, 13, 16, 16, 13, 11, 16, 14, 14, 8,
+ 11, 15, 15, 15, 14, 14, 15, 13, 13, 15, 14, 13, 12, 12, 12, 9,
+ 11, 15, 16, 14, 15, 14, 15, 13, 14, 14, 14, 13, 13, 12, 13, 8,
+ 13, 16, 16, 16, 16, 15, 16, 14, 16, 16, 16, 13, 15, 12, 14, 9,
+ 11, 16, 15, 14, 14, 14, 15, 13, 14, 14, 13, 12, 13, 12, 11, 8,
+ 11, 14, 14, 13, 14, 13, 14, 12, 13, 13, 12, 11, 12, 11, 11, 7,
+ 12, 15, 15, 13, 15, 14, 15, 12, 15, 14, 14, 11, 13, 12, 12, 7,
+ 13, 16, 15, 15, 16, 16, 16, 14, 15, 16, 12, 12, 14, 14, 12, 8,
+ 11, 15, 14, 13, 15, 13, 14, 12, 14, 14, 12, 11, 13, 12, 11, 6,
+ 11, 14, 14, 12, 15, 13, 14, 11, 15, 14, 12, 10, 13, 11, 11, 5,
+ 12, 16, 16, 16, 15, 15, 16, 15, 14, 16, 15, 15, 10, 12, 12, 9,
+ 13, 16, 16, 16, 15, 14, 16, 13, 15, 15, 15, 14, 12, 11, 13, 8,
+ 14, 16, 16, 16, 16, 14, 16, 13, 16, 16, 16, 14, 14, 11, 14, 8,
+ 13, 16, 16, 15, 15, 15, 15, 14, 15, 16, 14, 13, 12, 12, 11, 8,
+ 11, 15, 15, 13, 14, 13, 14, 12, 14, 14, 13, 12, 12, 11, 11, 6,
+ 11, 15, 15, 13, 15, 12, 14, 11, 14, 14, 13, 11, 12, 10, 11, 5,
+ 13, 16, 16, 15, 16, 16, 16, 14, 16, 16, 14, 14, 14, 13, 11, 8,
+ 11, 14, 14, 13, 14, 13, 14, 11, 14, 14, 12, 11, 12, 11, 10, 5,
+ 10, 13, 13, 11, 13, 12, 13, 9, 13, 13, 12, 9, 12, 10, 10, 3,
+},
+{
+ 5, 7, 7, 7, 7, 7, 8, 7, 7, 8, 7, 7, 7, 7, 7, 4,
+ 7, 9, 11, 9, 11, 9, 11, 9, 11, 10, 10, 9, 10, 9, 10, 6,
+ 11, 11, 14, 11, 14, 11, 14, 11, 15, 13, 14, 12, 14, 12, 13, 9,
+ 6, 11, 10, 9, 11, 10, 11, 9, 11, 11, 9, 9, 10, 10, 9, 6,
+ 8, 11, 11, 10, 12, 11, 12, 10, 12, 12, 11, 10, 12, 11, 11, 7,
+ 11, 13, 14, 11, 15, 13, 15, 11, 15, 14, 14, 12, 14, 13, 13, 9,
+ 10, 14, 11, 11, 15, 14, 13, 12, 14, 14, 11, 11, 14, 13, 12, 9,
+ 11, 14, 13, 11, 15, 14, 14, 11, 15, 15, 13, 11, 14, 14, 13, 9,
+ 12, 14, 14, 10, 16, 15, 16, 11, 16, 16, 15, 11, 16, 15, 14, 9,
+ 6, 10, 11, 10, 10, 9, 11, 9, 10, 11, 10, 10, 9, 9, 9, 6,
+ 9, 12, 12, 11, 12, 10, 12, 10, 12, 12, 12, 11, 11, 10, 11, 7,
+ 12, 13, 15, 13, 14, 11, 15, 11, 15, 15, 14, 13, 14, 12, 14, 9,
+ 9, 12, 12, 11, 12, 11, 12, 11, 12, 13, 11, 11, 12, 11, 11, 7,
+ 9, 12, 12, 11, 13, 11, 12, 10, 13, 13, 12, 11, 12, 11, 11, 7,
+ 12, 14, 15, 12, 15, 12, 14, 11, 15, 15, 14, 12, 14, 13, 13, 8,
+ 12, 15, 14, 13, 15, 14, 14, 13, 16, 16, 14, 13, 15, 14, 13, 9,
+ 12, 15, 14, 13, 15, 14, 14, 12, 15, 15, 13, 12, 14, 13, 13, 9,
+ 13, 15, 15, 12, 16, 14, 15, 11, 16, 16, 15, 12, 15, 14, 14, 9,
+ 10, 14, 14, 14, 12, 11, 13, 12, 14, 15, 14, 13, 12, 11, 12, 9,
+ 12, 14, 15, 14, 13, 11, 14, 12, 15, 15, 15, 14, 13, 11, 13, 9,
+ 13, 15, 16, 15, 14, 11, 16, 11, 16, 16, 16, 14, 15, 12, 15, 9,
+ 12, 15, 15, 14, 14, 14, 14, 13, 15, 15, 14, 14, 14, 13, 13, 9,
+ 12, 15, 15, 14, 14, 13, 14, 12, 15, 15, 14, 13, 14, 13, 13, 9,
+ 13, 15, 16, 14, 15, 13, 16, 11, 16, 16, 15, 14, 15, 13, 14, 9,
+ 14, 16, 16, 16, 16, 16, 15, 14, 16, 16, 16, 16, 16, 16, 14, 11,
+ 14, 16, 16, 14, 16, 15, 15, 12, 16, 16, 16, 14, 15, 14, 14, 9,
+ 14, 16, 16, 14, 16, 14, 16, 11, 16, 16, 16, 14, 16, 14, 14, 9,
+ 6, 11, 10, 10, 10, 10, 11, 10, 10, 11, 9, 9, 9, 9, 9, 6,
+ 9, 12, 12, 11, 13, 11, 13, 11, 12, 12, 11, 11, 12, 11, 11, 7,
+ 12, 14, 16, 13, 16, 14, 16, 13, 15, 14, 15, 12, 15, 13, 14, 9,
+ 8, 12, 11, 11, 12, 12, 12, 11, 11, 12, 10, 10, 11, 11, 10, 7,
+ 9, 12, 12, 11, 13, 12, 13, 11, 13, 12, 11, 10, 12, 11, 11, 7,
+ 12, 14, 15, 12, 15, 14, 15, 12, 15, 14, 14, 12, 14, 13, 13, 9,
+ 11, 15, 13, 13, 15, 14, 14, 13, 14, 15, 11, 11, 14, 14, 12, 9,
+ 11, 14, 13, 12, 15, 14, 14, 12, 14, 14, 12, 11, 14, 13, 12, 8,
+ 13, 15, 15, 12, 16, 15, 15, 12, 15, 15, 14, 11, 15, 14, 14, 8,
+ 8, 12, 12, 11, 11, 11, 12, 11, 11, 12, 11, 11, 10, 10, 10, 7,
+ 9, 13, 13, 12, 13, 11, 13, 11, 12, 13, 12, 11, 11, 10, 11, 7,
+ 12, 14, 15, 14, 15, 13, 15, 12, 15, 14, 14, 13, 14, 12, 13, 9,
+ 9, 13, 12, 12, 12, 12, 12, 11, 12, 13, 11, 11, 11, 11, 10, 7,
+ 9, 12, 12, 11, 12, 11, 12, 10, 12, 12, 11, 10, 11, 10, 10, 7,
+ 11, 13, 14, 12, 14, 12, 14, 11, 14, 13, 13, 11, 13, 11, 12, 7,
+ 12, 15, 14, 13, 15, 14, 14, 13, 15, 15, 13, 12, 13, 13, 12, 9,
+ 11, 14, 13, 12, 14, 13, 13, 11, 14, 14, 12, 11, 13, 12, 11, 7,
+ 11, 14, 14, 12, 14, 13, 14, 11, 14, 14, 13, 11, 13, 12, 12, 7,
+ 11, 15, 15, 14, 13, 13, 14, 13, 14, 15, 14, 13, 11, 11, 12, 9,
+ 12, 15, 15, 14, 14, 12, 14, 12, 14, 14, 14, 13, 12, 11, 12, 8,
+ 13, 16, 16, 15, 15, 12, 16, 13, 16, 15, 15, 14, 14, 12, 14, 9,
+ 12, 15, 15, 14, 14, 14, 14, 13, 15, 15, 14, 13, 12, 12, 12, 9,
+ 11, 14, 14, 13, 13, 12, 13, 11, 14, 13, 13, 12, 12, 11, 11, 7,
+ 12, 14, 15, 13, 14, 12, 14, 11, 15, 14, 13, 12, 13, 11, 12, 7,
+ 13, 16, 16, 15, 16, 15, 15, 14, 16, 16, 15, 14, 14, 14, 12, 9,
+ 12, 15, 14, 13, 14, 13, 14, 12, 15, 14, 13, 12, 13, 12, 12, 8,
+ 12, 14, 14, 13, 15, 13, 14, 11, 14, 14, 13, 12, 13, 12, 12, 6,
+ 10, 14, 14, 13, 14, 14, 14, 13, 12, 13, 12, 12, 12, 12, 11, 9,
+ 12, 15, 15, 14, 15, 14, 16, 14, 14, 14, 13, 12, 14, 13, 13, 9,
+ 14, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 14, 16, 14, 16, 11,
+ 11, 15, 14, 14, 15, 14, 14, 14, 13, 14, 11, 12, 13, 13, 12, 9,
+ 12, 15, 14, 14, 15, 14, 15, 13, 14, 14, 13, 12, 14, 13, 13, 9,
+ 13, 16, 16, 14, 16, 15, 16, 14, 16, 15, 15, 12, 16, 14, 14, 9,
+ 13, 16, 14, 15, 16, 16, 16, 14, 14, 16, 11, 12, 15, 14, 12, 9,
+ 13, 16, 15, 14, 16, 15, 16, 14, 15, 15, 12, 11, 15, 14, 13, 9,
+ 14, 16, 16, 13, 16, 16, 16, 14, 16, 15, 13, 11, 16, 14, 14, 9,
+ 11, 15, 15, 14, 14, 14, 14, 13, 13, 14, 13, 13, 11, 11, 11, 9,
+ 12, 15, 15, 14, 15, 14, 15, 13, 14, 14, 13, 13, 13, 12, 12, 9,
+ 13, 16, 16, 16, 16, 14, 16, 14, 16, 15, 16, 14, 15, 12, 14, 9,
+ 11, 15, 14, 14, 15, 14, 14, 13, 14, 14, 12, 12, 12, 12, 11, 8,
+ 11, 14, 14, 13, 14, 13, 13, 12, 13, 13, 12, 11, 12, 11, 11, 7,
+ 12, 14, 15, 13, 15, 13, 14, 13, 14, 14, 13, 12, 13, 12, 12, 8,
+ 13, 16, 15, 15, 16, 15, 15, 14, 15, 16, 12, 12, 14, 14, 11, 9,
+ 12, 15, 14, 13, 15, 13, 14, 12, 14, 14, 12, 11, 13, 12, 11, 7,
+ 12, 14, 14, 13, 15, 13, 14, 12, 15, 14, 13, 10, 13, 12, 12, 6,
+ 12, 16, 16, 15, 14, 14, 15, 14, 13, 15, 14, 14, 10, 11, 11, 9,
+ 13, 16, 16, 15, 15, 14, 15, 14, 15, 15, 15, 14, 12, 11, 12, 8,
+ 14, 16, 16, 16, 16, 14, 16, 14, 16, 15, 15, 14, 14, 11, 14, 8,
+ 12, 16, 16, 15, 15, 14, 15, 14, 14, 16, 14, 14, 12, 12, 11, 8,
+ 11, 14, 14, 13, 14, 13, 14, 12, 14, 14, 13, 12, 12, 11, 11, 7,
+ 12, 14, 15, 13, 14, 13, 14, 12, 14, 14, 13, 12, 13, 11, 12, 6,
+ 14, 16, 16, 16, 16, 16, 15, 14, 16, 16, 14, 13, 13, 13, 11, 8,
+ 12, 15, 15, 13, 15, 13, 14, 12, 14, 14, 13, 12, 13, 12, 10, 6,
+ 11, 14, 13, 12, 14, 12, 13, 10, 14, 13, 12, 10, 12, 10, 10, 4,
+},
+{
+ 4, 6, 6, 6, 6, 6, 7, 6, 6, 7, 6, 6, 6, 6, 6, 3,
+ 6, 9, 10, 9, 10, 9, 11, 9, 10, 10, 10, 9, 10, 9, 10, 6,
+ 10, 11, 14, 11, 14, 11, 14, 11, 14, 13, 14, 11, 14, 11, 13, 9,
+ 6, 10, 9, 9, 10, 10, 10, 9, 10, 11, 9, 9, 10, 10, 9, 6,
+ 8, 11, 11, 9, 12, 11, 12, 10, 12, 12, 11, 10, 12, 11, 11, 7,
+ 11, 13, 14, 11, 15, 13, 15, 11, 15, 14, 14, 11, 15, 13, 14, 9,
+ 10, 13, 11, 11, 14, 14, 13, 11, 14, 14, 11, 11, 13, 13, 11, 9,
+ 11, 14, 12, 11, 15, 14, 14, 11, 15, 15, 13, 11, 14, 14, 13, 9,
+ 12, 14, 13, 10, 16, 15, 16, 11, 16, 16, 14, 11, 16, 14, 14, 9,
+ 6, 10, 10, 10, 9, 9, 10, 9, 10, 11, 10, 10, 9, 9, 9, 6,
+ 8, 11, 12, 11, 11, 10, 12, 10, 12, 12, 12, 11, 11, 10, 11, 7,
+ 11, 13, 15, 13, 14, 11, 15, 11, 15, 14, 14, 13, 14, 12, 14, 9,
+ 8, 12, 12, 12, 12, 12, 12, 11, 12, 13, 11, 11, 11, 11, 11, 8,
+ 9, 12, 12, 11, 12, 12, 13, 11, 13, 13, 12, 11, 12, 11, 11, 8,
+ 11, 14, 15, 13, 14, 13, 15, 11, 15, 15, 14, 13, 15, 13, 14, 9,
+ 12, 16, 14, 14, 15, 15, 14, 12, 15, 16, 14, 13, 14, 14, 13, 10,
+ 11, 15, 14, 13, 15, 14, 15, 12, 15, 16, 14, 13, 15, 14, 13, 9,
+ 13, 15, 15, 12, 16, 15, 16, 12, 16, 16, 15, 13, 15, 14, 14, 9,
+ 10, 14, 14, 14, 11, 11, 13, 11, 14, 14, 14, 13, 11, 11, 11, 9,
+ 11, 14, 15, 14, 13, 11, 14, 12, 15, 15, 15, 14, 13, 11, 13, 9,
+ 13, 14, 16, 15, 14, 11, 16, 12, 16, 16, 16, 14, 15, 12, 15, 10,
+ 12, 16, 15, 15, 14, 14, 14, 12, 16, 16, 14, 14, 14, 13, 13, 10,
+ 12, 15, 15, 14, 14, 13, 14, 12, 15, 16, 14, 14, 14, 13, 13, 9,
+ 13, 16, 16, 14, 16, 13, 16, 12, 16, 16, 16, 14, 16, 13, 15, 10,
+ 14, 16, 16, 16, 16, 16, 15, 14, 16, 16, 16, 16, 16, 16, 14, 11,
+ 13, 16, 16, 15, 16, 16, 16, 13, 16, 16, 16, 15, 16, 15, 14, 10,
+ 14, 16, 16, 14, 16, 14, 16, 12, 16, 16, 16, 15, 16, 15, 15, 10,
+ 6, 10, 10, 10, 10, 10, 11, 10, 9, 10, 9, 9, 9, 9, 9, 6,
+ 9, 12, 12, 11, 12, 11, 13, 11, 12, 12, 11, 10, 12, 11, 11, 8,
+ 12, 14, 15, 14, 15, 14, 16, 13, 15, 14, 14, 12, 15, 13, 14, 10,
+ 8, 12, 11, 11, 12, 12, 12, 11, 11, 12, 10, 10, 11, 11, 10, 7,
+ 9, 12, 12, 11, 13, 12, 13, 11, 12, 13, 11, 10, 12, 12, 11, 8,
+ 11, 14, 14, 13, 15, 14, 15, 13, 15, 14, 14, 12, 15, 13, 14, 9,
+ 11, 15, 12, 13, 15, 15, 14, 13, 14, 15, 11, 11, 14, 14, 12, 9,
+ 11, 14, 13, 13, 15, 14, 15, 13, 15, 15, 13, 11, 15, 14, 13, 9,
+ 13, 15, 15, 12, 16, 15, 16, 13, 16, 15, 14, 11, 16, 15, 14, 9,
+ 8, 12, 12, 11, 11, 11, 12, 11, 11, 12, 11, 11, 9, 10, 10, 7,
+ 9, 12, 13, 12, 12, 11, 13, 11, 12, 13, 12, 12, 11, 11, 11, 8,
+ 12, 14, 15, 14, 15, 13, 16, 13, 15, 14, 15, 13, 14, 12, 14, 9,
+ 9, 13, 12, 12, 12, 12, 13, 12, 12, 13, 11, 11, 11, 11, 10, 8,
+ 9, 12, 12, 12, 12, 12, 13, 11, 12, 13, 11, 11, 12, 11, 11, 7,
+ 11, 13, 14, 13, 14, 13, 15, 12, 14, 14, 14, 12, 14, 12, 13, 8,
+ 12, 15, 14, 14, 15, 15, 14, 13, 15, 16, 13, 13, 14, 14, 12, 9,
+ 11, 14, 13, 13, 14, 14, 14, 12, 14, 15, 13, 12, 14, 13, 12, 8,
+ 11, 14, 14, 13, 15, 14, 15, 12, 15, 15, 14, 12, 14, 13, 13, 8,
+ 11, 14, 14, 14, 13, 13, 14, 13, 13, 14, 14, 13, 11, 11, 11, 9,
+ 11, 15, 15, 14, 14, 13, 15, 13, 14, 15, 14, 14, 13, 11, 13, 9,
+ 13, 16, 16, 16, 15, 13, 16, 13, 16, 16, 16, 15, 15, 12, 15, 10,
+ 11, 15, 15, 15, 14, 14, 14, 13, 15, 15, 14, 14, 13, 13, 12, 9,
+ 11, 14, 14, 13, 13, 13, 14, 12, 14, 14, 13, 13, 13, 12, 12, 8,
+ 12, 15, 15, 14, 15, 13, 15, 12, 15, 15, 14, 13, 14, 12, 13, 8,
+ 13, 16, 16, 16, 16, 16, 16, 14, 16, 16, 15, 15, 15, 15, 13, 10,
+ 12, 15, 15, 14, 15, 14, 15, 13, 15, 16, 14, 13, 14, 14, 13, 9,
+ 12, 15, 15, 14, 15, 14, 15, 12, 15, 15, 14, 13, 14, 13, 13, 8,
+ 10, 14, 13, 13, 14, 13, 14, 13, 11, 13, 11, 11, 11, 11, 11, 9,
+ 12, 15, 16, 14, 15, 14, 16, 14, 14, 14, 14, 13, 14, 13, 13, 10,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 13, 16, 14, 16, 11,
+ 11, 15, 14, 14, 15, 14, 15, 14, 13, 14, 11, 12, 13, 13, 11, 9,
+ 12, 15, 15, 14, 15, 15, 16, 14, 14, 14, 13, 12, 14, 13, 13, 9,
+ 13, 16, 16, 15, 16, 16, 16, 15, 16, 15, 15, 12, 16, 14, 15, 10,
+ 12, 16, 14, 15, 16, 16, 16, 14, 14, 16, 11, 12, 14, 15, 12, 9,
+ 13, 16, 15, 14, 16, 16, 16, 14, 15, 16, 13, 12, 15, 15, 13, 9,
+ 14, 16, 16, 14, 16, 16, 16, 15, 16, 16, 14, 12, 16, 15, 15, 10,
+ 11, 14, 14, 14, 14, 14, 14, 13, 12, 14, 13, 13, 11, 11, 11, 9,
+ 11, 15, 15, 14, 14, 14, 16, 14, 14, 14, 14, 13, 13, 12, 13, 9,
+ 13, 16, 16, 16, 16, 15, 16, 15, 16, 15, 16, 14, 15, 13, 15, 10,
+ 11, 15, 15, 14, 14, 14, 15, 14, 14, 15, 13, 13, 12, 13, 11, 9,
+ 11, 14, 14, 13, 14, 14, 14, 13, 13, 14, 13, 12, 13, 12, 12, 8,
+ 12, 15, 15, 14, 16, 14, 16, 14, 15, 15, 15, 13, 14, 13, 14, 9,
+ 13, 16, 15, 16, 16, 16, 16, 15, 15, 16, 13, 13, 14, 14, 12, 9,
+ 12, 15, 14, 14, 15, 15, 15, 13, 14, 15, 13, 12, 14, 13, 12, 8,
+ 12, 15, 14, 14, 15, 15, 15, 13, 15, 15, 14, 12, 14, 13, 13, 8,
+ 12, 16, 15, 15, 13, 14, 15, 14, 13, 15, 14, 14, 10, 11, 11, 9,
+ 12, 16, 16, 15, 15, 14, 16, 14, 15, 15, 15, 14, 13, 12, 13, 9,
+ 14, 16, 16, 16, 16, 14, 16, 15, 16, 15, 16, 15, 14, 12, 15, 10,
+ 12, 16, 15, 15, 15, 15, 15, 14, 15, 16, 14, 14, 12, 13, 11, 9,
+ 11, 15, 15, 14, 14, 14, 15, 13, 14, 15, 14, 13, 13, 12, 12, 8,
+ 12, 15, 15, 14, 15, 14, 15, 13, 15, 15, 14, 13, 14, 12, 13, 8,
+ 14, 16, 16, 16, 16, 16, 16, 15, 16, 16, 14, 14, 14, 14, 11, 9,
+ 12, 15, 15, 14, 15, 15, 15, 13, 15, 15, 14, 13, 14, 13, 12, 8,
+ 11, 14, 14, 13, 14, 13, 14, 12, 13, 14, 13, 12, 13, 12, 12, 7,
+},
+{
+ 2, 6, 6, 5, 6, 6, 7, 6, 6, 7, 6, 6, 6, 6, 6, 3,
+ 6, 9, 10, 9, 10, 9, 11, 9, 10, 10, 10, 9, 10, 9, 10, 7,
+ 10, 11, 14, 11, 14, 11, 14, 11, 14, 13, 14, 12, 14, 12, 13, 9,
+ 6, 10, 9, 9, 10, 10, 10, 9, 10, 11, 9, 9, 10, 10, 9, 7,
+ 8, 11, 11, 9, 12, 11, 12, 10, 12, 12, 11, 10, 12, 11, 11, 8,
+ 11, 13, 14, 11, 16, 13, 15, 12, 16, 14, 14, 12, 15, 13, 14, 10,
+ 10, 13, 11, 11, 14, 14, 13, 11, 13, 14, 11, 11, 13, 13, 11, 9,
+ 11, 13, 13, 11, 15, 14, 14, 12, 15, 15, 13, 12, 15, 14, 13, 10,
+ 12, 14, 14, 11, 16, 15, 16, 12, 16, 16, 15, 12, 16, 15, 15, 10,
+ 6, 10, 10, 10, 9, 9, 10, 9, 10, 11, 10, 10, 9, 9, 9, 7,
+ 8, 11, 12, 11, 11, 10, 12, 10, 12, 12, 12, 11, 11, 10, 11, 8,
+ 12, 13, 16, 13, 14, 11, 16, 12, 16, 15, 15, 13, 14, 12, 14, 10,
+ 9, 13, 12, 12, 12, 12, 12, 11, 13, 13, 12, 12, 12, 12, 11, 8,
+ 10, 13, 13, 12, 13, 12, 13, 11, 14, 14, 13, 12, 13, 12, 12, 9,
+ 12, 14, 16, 13, 15, 13, 15, 12, 16, 16, 16, 13, 16, 14, 14, 10,
+ 12, 16, 14, 14, 16, 15, 14, 13, 16, 16, 14, 14, 15, 15, 13, 11,
+ 12, 16, 15, 14, 16, 15, 15, 12, 16, 16, 15, 14, 16, 15, 14, 10,
+ 14, 16, 16, 14, 16, 15, 16, 13, 16, 16, 16, 14, 16, 16, 15, 11,
+ 10, 14, 14, 13, 11, 11, 13, 12, 14, 14, 13, 13, 11, 11, 12, 9,
+ 12, 14, 16, 14, 13, 11, 14, 12, 16, 15, 15, 14, 14, 12, 13, 10,
+ 13, 14, 16, 15, 14, 11, 16, 12, 16, 16, 16, 15, 16, 13, 15, 11,
+ 12, 16, 15, 15, 14, 14, 14, 13, 16, 16, 15, 15, 14, 14, 13, 11,
+ 13, 16, 16, 15, 14, 14, 15, 13, 16, 16, 16, 15, 15, 14, 14, 11,
+ 14, 16, 16, 15, 16, 14, 16, 13, 16, 16, 16, 15, 16, 14, 15, 11,
+ 15, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 14, 12,
+ 15, 16, 16, 16, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 15, 12,
+ 15, 16, 16, 15, 16, 15, 16, 13, 16, 16, 16, 16, 16, 16, 16, 11,
+ 6, 10, 10, 10, 10, 10, 11, 10, 9, 10, 9, 9, 9, 9, 9, 7,
+ 9, 12, 13, 12, 13, 12, 14, 12, 12, 12, 12, 11, 12, 11, 11, 8,
+ 12, 14, 16, 14, 16, 14, 16, 14, 15, 14, 15, 13, 16, 13, 14, 11,
+ 8, 12, 11, 11, 12, 12, 12, 11, 11, 12, 10, 10, 11, 11, 10, 8,
+ 10, 13, 13, 12, 14, 13, 14, 12, 13, 13, 12, 11, 13, 12, 12, 9,
+ 12, 15, 15, 13, 16, 15, 16, 14, 16, 15, 15, 12, 16, 14, 15, 10,
+ 11, 15, 13, 13, 16, 15, 14, 13, 14, 15, 11, 12, 14, 14, 12, 10,
+ 12, 16, 14, 13, 16, 16, 16, 14, 16, 15, 13, 12, 15, 15, 14, 10,
+ 14, 16, 16, 14, 16, 16, 16, 14, 16, 16, 15, 13, 16, 16, 15, 11,
+ 8, 12, 12, 12, 11, 11, 12, 11, 11, 12, 11, 11, 9, 10, 10, 8,
+ 10, 13, 14, 13, 13, 12, 14, 12, 13, 13, 13, 12, 12, 11, 12, 9,
+ 13, 15, 16, 15, 16, 14, 16, 14, 16, 15, 16, 14, 15, 13, 15, 11,
+ 10, 13, 13, 13, 13, 13, 13, 12, 13, 14, 12, 12, 12, 12, 11, 9,
+ 10, 13, 13, 13, 13, 13, 14, 12, 13, 14, 13, 12, 12, 12, 12, 9,
+ 12, 15, 15, 14, 16, 14, 16, 13, 16, 15, 15, 13, 15, 13, 14, 10,
+ 13, 16, 15, 15, 16, 16, 15, 14, 16, 16, 13, 14, 15, 15, 12, 10,
+ 12, 16, 14, 14, 16, 16, 15, 13, 16, 16, 14, 13, 15, 14, 13, 10,
+ 13, 16, 16, 14, 16, 15, 16, 13, 16, 16, 16, 13, 16, 15, 15, 10,
+ 11, 15, 15, 14, 13, 13, 14, 13, 13, 15, 14, 14, 11, 12, 12, 10,
+ 12, 15, 16, 15, 14, 13, 16, 14, 16, 15, 16, 14, 13, 12, 13, 10,
+ 14, 16, 16, 16, 16, 14, 16, 14, 16, 16, 16, 16, 16, 13, 16, 11,
+ 12, 16, 16, 16, 15, 15, 15, 14, 15, 16, 14, 14, 13, 14, 12, 10,
+ 12, 16, 16, 15, 15, 14, 16, 13, 16, 16, 15, 14, 14, 13, 13, 10,
+ 13, 16, 16, 15, 16, 14, 16, 13, 16, 16, 16, 15, 16, 14, 15, 10,
+ 15, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 16, 16, 16, 13, 12,
+ 14, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 15, 16, 15, 14, 11,
+ 14, 16, 16, 15, 16, 16, 16, 13, 16, 16, 16, 15, 16, 15, 14, 10,
+ 10, 14, 13, 13, 13, 13, 14, 13, 11, 13, 11, 11, 11, 11, 11, 9,
+ 12, 15, 16, 15, 16, 15, 16, 14, 14, 14, 14, 13, 14, 13, 14, 11,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 14, 16, 12,
+ 11, 15, 14, 14, 15, 15, 15, 14, 13, 14, 11, 12, 13, 13, 12, 10,
+ 13, 16, 15, 15, 16, 16, 16, 15, 15, 15, 13, 12, 15, 14, 13, 10,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 16, 12,
+ 13, 16, 14, 15, 16, 16, 16, 15, 14, 16, 11, 12, 15, 15, 12, 10,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13, 13, 16, 16, 14, 11,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 16, 11,
+ 11, 15, 15, 14, 13, 14, 14, 14, 13, 14, 13, 13, 11, 12, 11, 10,
+ 12, 16, 16, 16, 16, 15, 16, 15, 15, 15, 15, 14, 13, 12, 14, 10,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 14, 16, 12,
+ 12, 16, 15, 15, 16, 16, 16, 14, 14, 15, 13, 13, 13, 13, 12, 10,
+ 12, 16, 16, 15, 15, 15, 16, 14, 14, 15, 14, 13, 14, 13, 13, 10,
+ 14, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 14, 16, 14, 15, 11,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 14, 15, 15, 12, 11,
+ 13, 16, 16, 15, 16, 16, 16, 14, 16, 16, 14, 13, 16, 14, 13, 10,
+ 14, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 14, 16, 14, 15, 10,
+ 12, 16, 16, 15, 14, 15, 16, 14, 13, 15, 14, 14, 11, 12, 12, 10,
+ 13, 16, 16, 16, 16, 15, 16, 16, 16, 16, 16, 15, 13, 12, 14, 11,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13, 16, 11,
+ 14, 16, 16, 16, 16, 16, 16, 15, 16, 16, 15, 15, 13, 14, 12, 11,
+ 13, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 14, 14, 13, 13, 10,
+ 14, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 15, 15, 14, 14, 10,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 15, 16, 13, 11,
+ 14, 16, 16, 16, 16, 16, 16, 14, 16, 16, 15, 14, 15, 14, 13, 10,
+ 12, 15, 15, 14, 15, 14, 16, 14, 14, 16, 15, 13, 14, 13, 13, 9,
+},
+{
+ 2, 5, 5, 5, 5, 5, 6, 6, 5, 6, 5, 6, 5, 6, 6, 4,
+ 6, 8, 10, 8, 10, 9, 11, 9, 10, 10, 10, 9, 10, 9, 10, 8,
+ 10, 11, 13, 11, 13, 11, 14, 11, 14, 13, 13, 12, 13, 12, 13, 10,
+ 6, 10, 8, 9, 10, 10, 10, 9, 10, 11, 9, 9, 10, 10, 9, 7,
+ 8, 11, 11, 10, 12, 11, 12, 10, 12, 12, 11, 10, 12, 12, 11, 9,
+ 11, 13, 14, 11, 15, 14, 15, 12, 16, 14, 14, 12, 15, 14, 14, 11,
+ 10, 13, 11, 11, 14, 13, 13, 12, 13, 14, 11, 11, 13, 13, 12, 10,
+ 11, 14, 13, 11, 16, 14, 14, 12, 15, 15, 14, 12, 15, 14, 14, 11,
+ 12, 14, 14, 11, 16, 16, 16, 13, 16, 16, 16, 13, 16, 16, 15, 12,
+ 6, 10, 10, 10, 8, 9, 10, 9, 10, 11, 10, 10, 9, 9, 9, 8,
+ 8, 11, 12, 12, 11, 10, 12, 11, 12, 12, 12, 12, 12, 11, 12, 9,
+ 11, 13, 16, 14, 14, 12, 15, 12, 16, 15, 16, 14, 14, 13, 14, 11,
+ 9, 13, 12, 12, 12, 12, 12, 11, 13, 13, 12, 12, 12, 12, 11, 10,
+ 10, 13, 13, 12, 13, 12, 13, 11, 14, 14, 13, 13, 13, 13, 12, 10,
+ 13, 14, 16, 14, 15, 14, 16, 13, 16, 16, 16, 14, 16, 14, 15, 12,
+ 12, 16, 14, 14, 16, 15, 14, 13, 16, 16, 14, 14, 15, 15, 13, 12,
+ 13, 16, 15, 14, 16, 16, 15, 13, 16, 16, 15, 14, 16, 16, 14, 12,
+ 14, 16, 16, 14, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16, 12,
+ 10, 13, 14, 13, 11, 11, 13, 12, 13, 14, 13, 13, 11, 12, 12, 10,
+ 11, 14, 15, 15, 13, 12, 14, 13, 16, 16, 16, 15, 14, 13, 14, 11,
+ 12, 14, 16, 16, 14, 12, 16, 13, 16, 16, 16, 16, 15, 13, 16, 12,
+ 12, 16, 15, 16, 14, 15, 14, 14, 16, 16, 15, 16, 14, 14, 13, 12,
+ 13, 16, 16, 16, 16, 14, 16, 13, 16, 16, 16, 16, 16, 14, 15, 12,
+ 14, 16, 16, 16, 16, 14, 16, 14, 16, 16, 16, 16, 16, 15, 16, 13,
+ 15, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 16, 16, 16, 15, 13,
+ 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 16, 16, 16, 16, 13,
+ 15, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16, 13,
+ 6, 10, 10, 10, 10, 10, 10, 10, 8, 10, 9, 9, 8, 9, 9, 7,
+ 9, 12, 13, 12, 13, 12, 13, 12, 12, 12, 12, 11, 12, 11, 12, 10,
+ 12, 14, 16, 14, 16, 14, 16, 14, 16, 15, 15, 14, 16, 14, 15, 12,
+ 8, 12, 11, 11, 12, 12, 12, 11, 11, 12, 10, 10, 11, 12, 10, 9,
+ 10, 13, 13, 12, 14, 13, 14, 12, 13, 13, 12, 11, 13, 13, 12, 10,
+ 13, 15, 16, 14, 16, 16, 16, 14, 16, 15, 15, 13, 16, 15, 15, 12,
+ 11, 15, 13, 13, 15, 15, 15, 14, 14, 14, 11, 12, 14, 14, 12, 11,
+ 13, 16, 14, 14, 16, 16, 16, 14, 16, 15, 13, 13, 16, 14, 14, 11,
+ 14, 16, 16, 14, 16, 16, 16, 15, 16, 16, 16, 14, 16, 16, 16, 12,
+ 8, 12, 12, 12, 11, 11, 12, 12, 11, 12, 11, 11, 9, 10, 10, 9,
+ 10, 13, 14, 13, 13, 12, 14, 13, 13, 13, 13, 13, 12, 11, 12, 10,
+ 13, 15, 16, 15, 16, 14, 16, 14, 16, 16, 16, 15, 16, 13, 15, 12,
+ 10, 14, 13, 13, 13, 13, 13, 13, 13, 14, 12, 12, 12, 12, 11, 10,
+ 10, 13, 13, 13, 13, 13, 14, 12, 13, 14, 13, 12, 13, 12, 12, 10,
+ 13, 16, 16, 14, 16, 15, 16, 14, 16, 16, 15, 14, 16, 14, 15, 11,
+ 13, 16, 15, 16, 16, 16, 15, 14, 16, 16, 14, 14, 15, 15, 13, 12,
+ 13, 16, 15, 14, 16, 16, 16, 14, 16, 16, 14, 14, 15, 15, 14, 11,
+ 14, 16, 16, 15, 16, 16, 16, 14, 16, 16, 16, 15, 16, 16, 15, 12,
+ 11, 14, 15, 14, 13, 13, 14, 14, 13, 15, 14, 14, 11, 12, 12, 11,
+ 13, 16, 16, 16, 14, 14, 16, 14, 16, 16, 16, 15, 14, 13, 14, 12,
+ 14, 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16, 14, 16, 13,
+ 13, 16, 16, 16, 15, 16, 15, 15, 16, 16, 15, 16, 14, 14, 13, 12,
+ 13, 16, 16, 15, 15, 14, 16, 14, 16, 16, 16, 15, 14, 14, 14, 11,
+ 14, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 16, 16, 15, 16, 12,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 13,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 12,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 12,
+ 9, 13, 13, 13, 13, 13, 14, 13, 10, 12, 11, 12, 11, 12, 11, 10,
+ 12, 15, 16, 15, 16, 16, 16, 16, 14, 14, 14, 13, 14, 13, 14, 12,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 14,
+ 11, 15, 14, 14, 15, 14, 15, 14, 13, 14, 11, 12, 13, 13, 12, 11,
+ 13, 16, 16, 15, 16, 16, 16, 15, 15, 15, 14, 13, 16, 15, 14, 12,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 13,
+ 12, 16, 14, 15, 16, 16, 16, 16, 14, 16, 11, 13, 15, 16, 13, 12,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 14, 16, 16, 15, 12,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 13,
+ 11, 15, 14, 14, 13, 14, 15, 14, 12, 14, 13, 13, 11, 12, 12, 11,
+ 13, 16, 16, 16, 16, 15, 16, 16, 15, 15, 15, 15, 14, 13, 14, 12,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 13,
+ 13, 16, 16, 16, 16, 16, 16, 15, 14, 16, 13, 14, 13, 14, 13, 11,
+ 13, 16, 16, 16, 16, 16, 16, 15, 15, 16, 15, 14, 14, 14, 14, 11,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 12,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 15, 16, 16, 13, 12,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 14, 16, 16, 15, 12,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 12,
+ 11, 16, 16, 15, 13, 15, 16, 15, 13, 15, 15, 15, 11, 12, 12, 11,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 14, 15, 12,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 13,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 14, 14, 13, 12,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 14, 14, 12,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 12,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 14, 13,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 12,
+ 12, 14, 14, 14, 14, 15, 16, 14, 14, 16, 15, 14, 14, 15, 14, 11,
+},
+{
+ 1, 5, 5, 6, 5, 6, 7, 7, 5, 7, 6, 7, 5, 6, 6, 6,
+ 6, 9, 10, 9, 10, 9, 11, 10, 11, 11, 11, 10, 11, 10, 11, 9,
+ 10, 11, 14, 12, 14, 12, 16, 12, 16, 13, 16, 13, 14, 13, 16, 12,
+ 6, 10, 9, 9, 10, 11, 11, 10, 10, 11, 9, 10, 10, 11, 10, 9,
+ 8, 11, 11, 10, 13, 12, 13, 12, 13, 13, 12, 12, 13, 13, 13, 11,
+ 11, 13, 16, 12, 16, 16, 16, 13, 16, 16, 16, 14, 16, 16, 16, 13,
+ 10, 14, 11, 12, 14, 14, 13, 13, 13, 16, 12, 13, 14, 16, 13, 12,
+ 11, 14, 13, 12, 16, 16, 16, 14, 16, 16, 14, 14, 16, 16, 16, 13,
+ 12, 14, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 6, 10, 10, 11, 9, 9, 11, 10, 10, 11, 11, 11, 9, 10, 10, 9,
+ 9, 12, 13, 12, 12, 11, 13, 12, 13, 13, 13, 13, 12, 12, 13, 11,
+ 12, 13, 16, 16, 16, 13, 16, 14, 16, 16, 16, 16, 16, 14, 16, 13,
+ 9, 13, 13, 13, 13, 13, 13, 13, 13, 14, 13, 13, 13, 13, 12, 11,
+ 10, 14, 14, 13, 14, 13, 14, 13, 16, 16, 14, 15, 14, 14, 14, 12,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 14, 14, 16, 11, 12, 14, 13, 14, 16, 16, 16, 12, 13, 13, 12,
+ 12, 16, 16, 16, 13, 13, 16, 14, 16, 16, 16, 16, 16, 14, 16, 13,
+ 13, 16, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 6, 10, 10, 10, 10, 11, 11, 11, 9, 11, 9, 10, 9, 10, 10, 9,
+ 9, 13, 13, 13, 13, 13, 14, 13, 12, 13, 13, 12, 13, 12, 13, 11,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 8, 13, 12, 12, 13, 13, 13, 13, 11, 13, 10, 12, 12, 13, 12, 11,
+ 10, 14, 13, 13, 16, 16, 16, 14, 14, 14, 13, 13, 14, 14, 14, 12,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 11, 16, 13, 16, 16, 16, 16, 16, 14, 16, 12, 13, 16, 16, 14, 13,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 13, 13, 13, 11, 12, 13, 13, 11, 13, 12, 13, 10, 12, 12, 11,
+ 10, 14, 16, 16, 14, 13, 16, 14, 14, 16, 16, 14, 13, 13, 14, 12,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 10, 16, 14, 16, 14, 14, 14, 14, 13, 16, 13, 14, 13, 14, 12, 12,
+ 10, 14, 14, 14, 14, 16, 16, 14, 14, 16, 14, 14, 14, 14, 14, 12,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 16, 16, 13, 16, 16, 16, 14, 16, 16, 16, 12, 14, 14, 13,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 14, 13, 14, 13, 14, 16, 16, 11, 13, 12, 13, 11, 13, 12, 12,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 14, 16, 16, 16, 16, 16, 13, 16, 12, 13, 14, 16, 13, 13,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 14, 16, 12, 16, 16, 16, 14, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 16, 16, 14, 16, 16, 16, 13, 16, 14, 16, 12, 13, 13, 13,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 14, 14,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 16, 16, 14, 16, 16, 16, 14, 16, 16, 16, 12, 16, 14, 14,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 13,
+},
+{
+ 1, 5, 5, 6, 5, 6, 7, 8, 5, 7, 6, 8, 6, 7, 7, 7,
+ 5, 9, 10, 10, 10, 10, 12, 11, 10, 11, 11, 11, 10, 11, 12, 10,
+ 9, 11, 13, 12, 13, 12, 16, 14, 16, 14, 16, 16, 16, 13, 16, 13,
+ 5, 10, 9, 10, 10, 11, 11, 11, 10, 11, 9, 11, 10, 11, 11, 10,
+ 8, 11, 11, 11, 12, 13, 13, 13, 12, 13, 12, 12, 13, 13, 13, 12,
+ 11, 13, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 14, 11, 12, 14, 16, 13, 14, 13, 16, 12, 14, 16, 16, 13, 13,
+ 11, 16, 13, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 14, 14, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 10, 11, 11, 9, 10, 11, 11, 10, 12, 11, 12, 9, 11, 11, 11,
+ 8, 12, 13, 13, 11, 11, 14, 13, 13, 14, 13, 16, 12, 12, 13, 12,
+ 11, 13, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 13, 12, 13, 12, 13, 13, 14, 13, 16, 13, 16, 13, 16, 13, 13,
+ 10, 14, 13, 14, 13, 13, 16, 14, 16, 16, 16, 16, 16, 16, 16, 13,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 14, 16, 16, 11, 12, 14, 16, 13, 16, 16, 16, 12, 14, 13, 13,
+ 11, 16, 16, 16, 13, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 14, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 10, 10, 11, 10, 11, 12, 12, 8, 11, 10, 11, 9, 11, 11, 11,
+ 9, 12, 13, 13, 13, 13, 16, 16, 12, 13, 13, 13, 13, 13, 16, 13,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 13, 11, 13, 12, 13, 13, 14, 11, 13, 10, 13, 12, 14, 12, 12,
+ 10, 14, 13, 14, 16, 16, 16, 16, 13, 16, 13, 14, 16, 16, 16, 14,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 13, 16, 16, 16, 16, 16, 13, 16, 12, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 13, 13, 13, 11, 13, 14, 16, 11, 13, 13, 14, 10, 12, 12, 12,
+ 10, 14, 16, 16, 13, 16, 16, 16, 13, 16, 16, 16, 13, 13, 16, 14,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 14, 16, 13, 16, 16, 16, 13, 16, 13, 16, 13, 16, 13, 14,
+ 10, 16, 16, 16, 14, 16, 16, 16, 14, 16, 16, 16, 13, 16, 16, 13,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 16, 16, 13, 16, 16, 16, 14, 16, 16, 16, 12, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 14, 13, 16, 13, 16, 16, 16, 10, 14, 12, 14, 11, 13, 13, 13,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 16, 16, 16, 16, 16, 16, 12, 16, 12, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 16, 16, 16, 16, 16, 16, 13, 16, 13, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 16, 16, 14, 16, 16, 16, 12, 16, 16, 16, 12, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 16, 16, 14, 16, 16, 16, 14, 16, 16, 16, 12, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 14,
+}
+};
+
+
+static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE] = {
+{
+ { 0, 6, 6, 3, 6, 4, 5, 3, 6, 5, 4, 3, 3, 4, 4, 3 },
+ { 0, 6, 6, 4, 6, 4, 5, 3, 6, 5, 4, 3, 4, 4, 4, 2 },
+ { 0, 7, 7, 4, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 1 },
+ { 0, 7, 7, 5, 7, 5, 6, 4, 7, 6, 5, 3, 5, 4, 4, 1 }
+},
+{
+ { 0, 6, 6, 3, 6, 3, 5, 4, 6, 5, 3, 4, 3, 4, 4, 3 },
+ { 0, 6, 6, 4, 6, 4, 4, 4, 6, 4, 4, 3, 4, 4, 4, 2 },
+ { 0, 6, 6, 4, 6, 4, 5, 4, 6, 5, 4, 3, 4, 4, 3, 2 },
+ { 0, 7, 7, 5, 7, 5, 6, 4, 7, 6, 5, 3, 5, 4, 4, 1 }
+},
+{
+ { 0, 6, 6, 3, 6, 3, 5, 4, 6, 5, 3, 4, 3, 4, 4, 3 },
+ { 0, 5, 5, 3, 5, 4, 5, 4, 5, 5, 4, 4, 4, 4, 4, 2 },
+ { 0, 6, 6, 4, 6, 4, 5, 3, 6, 5, 4, 3, 4, 4, 4, 2 },
+ { 0, 7, 7, 5, 7, 5, 6, 4, 7, 6, 5, 3, 5, 4, 4, 1 }
+},
+{
+ { 0, 6, 6, 3, 6, 3, 5, 4, 6, 5, 3, 4, 3, 4, 4, 3 },
+ { 0, 5, 5, 3, 5, 4, 5, 4, 5, 5, 4, 4, 4, 4, 4, 2 },
+ { 0, 6, 6, 4, 6, 4, 5, 3, 6, 5, 4, 3, 4, 4, 4, 2 },
+ { 0, 7, 7, 4, 7, 5, 6, 4, 7, 6, 5, 4, 4, 4, 4, 1 }
+},
+{
+ { 0, 5, 5, 3, 5, 3, 5, 4, 5, 5, 3, 4, 3, 4, 4, 4 },
+ { 0, 5, 5, 3, 5, 4, 5, 4, 5, 5, 3, 4, 3, 4, 4, 3 },
+ { 0, 6, 6, 4, 6, 4, 5, 4, 6, 5, 4, 3, 4, 4, 3, 2 },
+ { 0, 7, 7, 4, 7, 5, 6, 4, 7, 6, 5, 4, 4, 4, 4, 1 }
+},
+{
+ { 0, 5, 5, 3, 5, 3, 5, 4, 5, 5, 3, 4, 3, 4, 4, 4 },
+ { 0, 5, 5, 3, 5, 4, 5, 4, 5, 5, 3, 4, 3, 4, 4, 3 },
+ { 0, 5, 5, 3, 5, 4, 4, 4, 5, 4, 4, 4, 3, 4, 4, 3 },
+ { 0, 6, 6, 4, 6, 4, 5, 4, 6, 5, 4, 3, 4, 4, 3, 2 }
+},
+{
+ { 0, 4, 4, 3, 4, 3, 5, 5, 4, 5, 3, 5, 3, 5, 4, 5 },
+ { 0, 4, 4, 3, 4, 4, 5, 4, 4, 5, 3, 5, 3, 5, 4, 4 },
+ { 0, 4, 4, 3, 4, 4, 5, 4, 4, 5, 4, 4, 3, 4, 4, 4 },
+ { 0, 4, 4, 3, 5, 4, 5, 4, 5, 5, 4, 4, 3, 4, 4, 3 }
+}
+};
+
+
+static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE] = {
+ {
+ {
+ 0, 7, 5, 7, 5, 7, 6, 6, 7, 10, 7, 9, 8, 9, 8, 7,
+ 12, 14, 11, 12, 12, 12, 11, 9, 6, 9, 6, 8, 7, 9, 7, 7,
+ 8, 11, 8, 9, 9, 10, 9, 8, 13, 15, 12, 12, 12, 13, 11, 9,
+ 10, 13, 9, 10, 11, 12, 9, 8, 12, 14, 10, 11, 12, 13, 10, 9,
+ 16, 16, 12, 12, 14, 13, 11, 9, 6, 9, 7, 9, 7, 9, 8, 7,
+ 9, 11, 9, 10, 9, 10, 9, 8, 14, 16, 12, 12, 13, 13, 11, 9,
+ 8, 11, 8, 10, 9, 10, 9, 8, 10, 13, 10, 11, 10, 11, 9, 8,
+ 14, 16, 12, 12, 13, 13, 11, 9, 12, 14, 10, 11, 12, 13, 10, 9,
+ 13, 16, 11, 12, 13, 13, 10, 9, 16, 16, 13, 12, 14, 14, 11, 9,
+ 11, 13, 11, 12, 10, 11, 10, 9, 13, 14, 12, 12, 11, 12, 10, 9,
+ 16, 16, 13, 13, 13, 13, 11, 9, 12, 15, 12, 12, 11, 12, 10, 9,
+ 13, 16, 13, 13, 12, 12, 11, 9, 16, 16, 14, 13, 13, 13, 11, 9,
+ 14, 16, 13, 13, 13, 14, 11, 9, 16, 16, 13, 13, 14, 14, 11, 9,
+ 16, 16, 13, 13, 14, 13, 11, 8, 4, 9, 6, 8, 6, 9, 7, 7,
+ 8, 11, 8, 9, 9, 10, 8, 8, 13, 15, 12, 12, 13, 13, 11, 9,
+ 7, 10, 7, 9, 8, 10, 8, 8, 9, 12, 9, 10, 10, 11, 9, 8,
+ 14, 16, 12, 12, 13, 13, 11, 9, 11, 13, 9, 10, 11, 12, 9, 8,
+ 12, 14, 10, 11, 12, 13, 10, 9, 16, 16, 13, 12, 14, 14, 11, 9,
+ 7, 10, 8, 9, 8, 10, 8, 8, 10, 12, 10, 11, 10, 11, 9, 8,
+ 14, 16, 13, 13, 13, 13, 11, 9, 9, 12, 9, 10, 9, 11, 9, 8,
+ 11, 13, 10, 11, 10, 11, 10, 9, 15, 16, 13, 13, 13, 13, 11, 9,
+ 12, 14, 11, 11, 12, 13, 10, 9, 13, 16, 11, 12, 13, 13, 10, 9,
+ 16, 16, 12, 12, 14, 13, 11, 8, 11, 14, 11, 12, 10, 11, 10, 9,
+ 13, 15, 12, 13, 11, 12, 10, 9, 16, 16, 14, 13, 13, 13, 11, 9,
+ 12, 15, 12, 13, 11, 12, 10, 9, 13, 16, 13, 13, 12, 12, 11, 9,
+ 16, 16, 14, 13, 13, 13, 11, 9, 15, 16, 13, 13, 13, 13, 11, 9,
+ 16, 16, 13, 13, 13, 13, 11, 9, 16, 16, 13, 12, 13, 13, 10, 7,
+ 8, 11, 8, 10, 9, 11, 9, 9, 10, 13, 10, 11, 11, 12, 10, 9,
+ 15, 16, 13, 13, 14, 14, 12, 10, 9, 12, 9, 11, 10, 11, 9, 9,
+ 12, 14, 11, 11, 11, 12, 10, 9, 16, 16, 13, 13, 14, 14, 12, 10,
+ 12, 14, 10, 11, 12, 13, 10, 9, 14, 16, 11, 12, 13, 14, 10, 9,
+ 16, 16, 13, 13, 15, 14, 11, 9, 9, 12, 10, 11, 9, 11, 10, 9,
+ 12, 14, 11, 12, 11, 12, 10, 9, 16, 16, 14, 13, 14, 14, 12, 10,
+ 11, 14, 10, 12, 11, 12, 10, 9, 12, 15, 11, 12, 12, 13, 11, 10,
+ 16, 16, 14, 13, 14, 14, 12, 10, 13, 16, 11, 12, 13, 14, 11, 9,
+ 14, 16, 12, 12, 13, 14, 11, 9, 16, 16, 13, 13, 14, 14, 11, 9,
+ 12, 15, 12, 13, 10, 12, 10, 9, 14, 16, 13, 13, 11, 12, 11, 10,
+ 16, 16, 14, 14, 14, 13, 12, 9, 13, 16, 13, 13, 12, 13, 11, 10,
+ 14, 16, 13, 13, 12, 13, 11, 10, 16, 16, 14, 14, 13, 13, 12, 9,
+ 15, 16, 13, 13, 13, 14, 11, 9, 16, 16, 13, 13, 13, 14, 11, 9,
+ 16, 16, 13, 12, 13, 13, 10, 8, 10, 13, 10, 11, 10, 12, 10, 9,
+ 12, 14, 11, 12, 12, 13, 11, 10, 16, 16, 13, 13, 14, 14, 12, 9,
+ 11, 14, 10, 11, 11, 12, 10, 9, 13, 16, 11, 12, 12, 13, 11, 10,
+ 16, 16, 14, 13, 14, 14, 12, 9, 12, 15, 10, 11, 12, 13, 9, 8,
+ 14, 16, 11, 11, 13, 14, 10, 8, 16, 16, 12, 12, 14, 14, 10, 8,
+ 11, 14, 11, 12, 11, 12, 10, 9, 13, 16, 12, 13, 12, 13, 11, 10,
+ 16, 16, 14, 13, 14, 14, 12, 9, 12, 15, 11, 12, 11, 13, 10, 10,
+ 13, 16, 12, 13, 12, 13, 11, 10, 16, 16, 14, 13, 14, 14, 12, 9,
+ 13, 16, 11, 11, 13, 13, 10, 8, 14, 16, 11, 12, 13, 14, 10, 8,
+ 16, 16, 12, 12, 14, 14, 10, 8, 12, 15, 12, 13, 10, 11, 10, 9,
+ 14, 16, 13, 13, 11, 12, 10, 9, 16, 16, 14, 13, 13, 13, 11, 8,
+ 13, 16, 12, 13, 11, 12, 10, 9, 14, 16, 13, 13, 12, 12, 10, 9,
+ 16, 16, 14, 13, 13, 12, 10, 8, 14, 16, 12, 12, 12, 13, 10, 8,
+ 14, 16, 12, 12, 12, 13, 10, 7, 16, 16, 11, 11, 12, 11, 8, 5,
+ },
+ {
+ 0, 7, 4, 8, 5, 8, 7, 8, 6, 10, 7, 10, 8, 10, 9, 9,
+ 13, 16, 12, 13, 13, 14, 12, 12, 4, 10, 6, 9, 8, 11, 8, 9,
+ 8, 12, 8, 11, 10, 12, 10, 10, 14, 16, 12, 13, 14, 15, 12, 12,
+ 9, 14, 9, 11, 12, 14, 11, 11, 11, 15, 10, 12, 13, 14, 11, 11,
+ 15, 16, 13, 14, 15, 16, 13, 12, 5, 10, 7, 10, 7, 10, 9, 9,
+ 8, 12, 9, 11, 10, 11, 10, 10, 14, 16, 13, 14, 14, 14, 12, 12,
+ 8, 12, 8, 11, 10, 12, 10, 10, 10, 14, 10, 12, 11, 13, 10, 11,
+ 15, 16, 13, 14, 14, 15, 13, 12, 11, 16, 10, 12, 13, 15, 11, 11,
+ 13, 16, 11, 13, 14, 15, 12, 12, 16, 16, 14, 14, 16, 16, 13, 12,
+ 11, 15, 11, 13, 11, 13, 11, 11, 13, 16, 12, 14, 12, 13, 12, 12,
+ 16, 16, 14, 15, 15, 15, 13, 12, 12, 16, 12, 14, 12, 14, 12, 12,
+ 14, 16, 13, 14, 13, 14, 12, 12, 16, 16, 14, 16, 16, 16, 13, 12,
+ 14, 16, 13, 14, 15, 16, 13, 12, 16, 16, 14, 15, 16, 16, 13, 12,
+ 16, 16, 15, 16, 16, 16, 13, 12, 2, 9, 5, 8, 6, 9, 8, 9,
+ 7, 11, 8, 10, 9, 11, 9, 10, 13, 16, 12, 13, 14, 14, 12, 12,
+ 5, 11, 6, 10, 9, 11, 9, 9, 9, 13, 9, 11, 10, 12, 10, 10,
+ 14, 16, 12, 14, 14, 15, 12, 12, 9, 14, 9, 11, 12, 14, 10, 11,
+ 11, 16, 10, 12, 13, 14, 11, 11, 16, 16, 13, 14, 15, 16, 13, 12,
+ 6, 11, 7, 10, 8, 11, 9, 9, 9, 13, 9, 11, 10, 12, 10, 10,
+ 14, 16, 13, 14, 14, 14, 12, 12, 8, 13, 8, 11, 10, 12, 10, 10,
+ 10, 13, 10, 12, 11, 13, 10, 11, 14, 16, 13, 14, 14, 15, 12, 12,
+ 11, 15, 10, 12, 13, 15, 11, 11, 12, 16, 11, 13, 13, 15, 12, 11,
+ 16, 16, 13, 14, 15, 16, 13, 12, 11, 15, 11, 13, 10, 13, 11, 11,
+ 13, 16, 12, 14, 12, 13, 12, 11, 16, 16, 14, 15, 15, 15, 13, 12,
+ 12, 16, 12, 13, 12, 14, 12, 12, 13, 16, 12, 14, 13, 14, 12, 12,
+ 16, 16, 14, 15, 15, 15, 13, 12, 14, 16, 13, 14, 15, 16, 12, 12,
+ 16, 16, 13, 14, 15, 16, 12, 12, 16, 16, 14, 15, 16, 16, 13, 12,
+ 6, 12, 7, 10, 9, 12, 9, 10, 9, 13, 9, 12, 11, 13, 11, 11,
+ 14, 16, 13, 14, 15, 15, 13, 12, 8, 13, 8, 11, 10, 13, 10, 10,
+ 10, 14, 10, 12, 12, 14, 11, 11, 15, 16, 13, 14, 16, 16, 13, 12,
+ 10, 15, 9, 12, 12, 15, 11, 11, 12, 16, 11, 13, 14, 16, 12, 12,
+ 16, 16, 14, 14, 16, 16, 13, 12, 8, 13, 9, 11, 10, 12, 10, 11,
+ 11, 14, 11, 12, 11, 13, 11, 11, 16, 16, 14, 15, 15, 16, 13, 12,
+ 10, 14, 10, 12, 11, 13, 11, 11, 11, 15, 11, 13, 12, 14, 11, 11,
+ 15, 16, 13, 14, 15, 16, 13, 12, 12, 16, 11, 13, 13, 16, 12, 12,
+ 13, 16, 11, 13, 14, 16, 12, 12, 16, 16, 13, 14, 16, 16, 13, 12,
+ 12, 16, 12, 14, 11, 13, 11, 11, 13, 16, 13, 14, 12, 14, 12, 12,
+ 16, 16, 15, 16, 16, 16, 14, 13, 13, 16, 12, 14, 12, 14, 12, 12,
+ 14, 16, 13, 14, 13, 14, 12, 12, 16, 16, 14, 16, 14, 16, 13, 12,
+ 15, 16, 13, 15, 15, 16, 13, 12, 15, 16, 13, 15, 14, 16, 13, 12,
+ 16, 16, 14, 15, 15, 16, 13, 11, 8, 13, 8, 11, 10, 13, 10, 11,
+ 11, 15, 10, 12, 12, 14, 11, 11, 15, 16, 13, 14, 15, 15, 13, 12,
+ 9, 14, 9, 12, 11, 14, 10, 11, 11, 16, 10, 12, 13, 14, 11, 11,
+ 16, 16, 13, 14, 15, 16, 13, 12, 11, 15, 9, 12, 12, 14, 10, 10,
+ 12, 16, 11, 12, 14, 15, 11, 11, 16, 16, 13, 14, 16, 16, 12, 11,
+ 9, 14, 10, 12, 11, 13, 11, 11, 12, 16, 11, 13, 12, 14, 11, 11,
+ 16, 16, 14, 14, 15, 15, 13, 12, 10, 15, 10, 12, 12, 14, 11, 11,
+ 12, 16, 11, 13, 13, 14, 11, 11, 16, 16, 14, 14, 15, 16, 13, 12,
+ 12, 16, 10, 12, 13, 15, 11, 11, 13, 16, 11, 13, 14, 15, 11, 11,
+ 16, 16, 13, 13, 15, 16, 12, 11, 12, 16, 11, 13, 10, 13, 11, 11,
+ 14, 16, 13, 14, 12, 14, 11, 11, 16, 16, 15, 16, 14, 15, 13, 11,
+ 13, 16, 12, 14, 12, 14, 11, 11, 13, 16, 12, 14, 12, 14, 11, 11,
+ 16, 16, 14, 15, 14, 14, 12, 11, 14, 16, 12, 13, 13, 15, 11, 11,
+ 14, 16, 12, 13, 13, 14, 11, 11, 15, 16, 12, 13, 13, 13, 10, 9,
+ },
+ },
+ {
+ {
+ 0, 7, 4, 7, 5, 7, 6, 6, 6, 10, 7, 8, 8, 9, 8, 7,
+ 13, 14, 11, 12, 12, 12, 11, 9, 5, 9, 6, 8, 7, 9, 7, 7,
+ 8, 11, 8, 9, 9, 10, 9, 8, 13, 16, 12, 12, 12, 13, 11, 9,
+ 10, 13, 8, 10, 11, 12, 9, 9, 12, 14, 10, 11, 12, 13, 10, 9,
+ 15, 16, 12, 12, 14, 14, 11, 9, 6, 10, 7, 9, 7, 9, 8, 7,
+ 8, 11, 9, 10, 9, 10, 9, 8, 14, 16, 12, 12, 13, 12, 11, 9,
+ 8, 11, 8, 10, 9, 10, 9, 8, 10, 13, 10, 11, 10, 11, 9, 9,
+ 14, 16, 12, 12, 13, 13, 11, 9, 12, 15, 10, 11, 12, 13, 10, 9,
+ 13, 16, 11, 12, 13, 13, 10, 9, 16, 16, 12, 13, 14, 14, 11, 9,
+ 10, 14, 11, 12, 9, 11, 10, 9, 12, 15, 12, 13, 11, 12, 11, 9,
+ 16, 16, 13, 13, 13, 13, 11, 9, 12, 15, 12, 13, 11, 12, 11, 9,
+ 13, 16, 12, 13, 12, 13, 11, 10, 16, 16, 13, 13, 13, 13, 11, 9,
+ 14, 16, 13, 13, 13, 14, 11, 10, 16, 16, 13, 13, 13, 14, 11, 10,
+ 16, 16, 13, 13, 14, 14, 11, 9, 4, 9, 6, 8, 6, 9, 7, 7,
+ 8, 11, 8, 9, 9, 10, 9, 8, 13, 15, 12, 12, 13, 13, 11, 9,
+ 6, 10, 7, 9, 8, 10, 8, 8, 9, 12, 9, 10, 10, 11, 9, 8,
+ 14, 16, 12, 12, 13, 13, 11, 10, 10, 13, 8, 10, 11, 12, 9, 9,
+ 12, 15, 10, 11, 12, 13, 10, 9, 16, 16, 12, 12, 14, 14, 11, 9,
+ 7, 11, 8, 9, 7, 10, 8, 8, 9, 12, 10, 11, 9, 11, 9, 9,
+ 14, 16, 12, 13, 13, 13, 11, 10, 9, 12, 9, 10, 9, 11, 9, 9,
+ 10, 13, 10, 11, 10, 11, 10, 9, 14, 16, 12, 13, 13, 13, 11, 9,
+ 12, 15, 10, 11, 12, 13, 10, 9, 13, 16, 11, 12, 13, 13, 10, 9,
+ 16, 16, 12, 12, 14, 14, 11, 9, 10, 14, 11, 12, 9, 11, 10, 9,
+ 12, 16, 12, 13, 11, 12, 11, 9, 16, 16, 14, 14, 13, 13, 11, 9,
+ 12, 16, 12, 13, 11, 12, 10, 10, 13, 16, 12, 13, 11, 12, 11, 10,
+ 16, 16, 13, 13, 13, 13, 11, 9, 14, 16, 13, 13, 13, 14, 11, 9,
+ 15, 16, 13, 13, 13, 14, 11, 9, 16, 16, 13, 13, 13, 13, 10, 8,
+ 7, 11, 8, 10, 9, 11, 9, 9, 10, 13, 10, 11, 11, 12, 10, 10,
+ 15, 16, 13, 13, 14, 14, 12, 10, 9, 13, 9, 11, 10, 12, 10, 9,
+ 11, 14, 10, 12, 12, 13, 10, 10, 16, 16, 13, 13, 14, 14, 12, 10,
+ 11, 15, 9, 11, 12, 13, 10, 9, 13, 16, 11, 12, 13, 14, 11, 10,
+ 16, 16, 13, 13, 15, 15, 11, 10, 9, 13, 10, 11, 9, 11, 10, 9,
+ 11, 14, 11, 12, 11, 12, 11, 10, 16, 16, 14, 14, 14, 14, 12, 10,
+ 10, 14, 10, 12, 11, 12, 10, 10, 12, 15, 11, 12, 12, 13, 11, 10,
+ 16, 16, 13, 13, 14, 14, 12, 10, 13, 16, 11, 12, 13, 14, 11, 10,
+ 13, 16, 11, 12, 13, 14, 11, 10, 16, 16, 12, 13, 14, 14, 11, 9,
+ 11, 15, 12, 13, 10, 12, 10, 10, 13, 16, 13, 14, 11, 13, 11, 10,
+ 16, 16, 14, 14, 14, 14, 12, 10, 13, 16, 13, 13, 11, 13, 11, 10,
+ 14, 16, 13, 14, 12, 13, 11, 10, 16, 16, 14, 14, 13, 13, 12, 10,
+ 15, 16, 13, 14, 14, 14, 11, 10, 15, 16, 13, 13, 13, 14, 11, 10,
+ 16, 16, 12, 13, 13, 13, 10, 8, 9, 13, 10, 11, 10, 12, 10, 10,
+ 12, 15, 11, 12, 12, 13, 11, 10, 16, 16, 14, 13, 14, 14, 12, 10,
+ 10, 14, 10, 12, 11, 13, 10, 10, 13, 16, 11, 12, 12, 14, 11, 10,
+ 16, 16, 13, 13, 14, 14, 12, 10, 12, 16, 9, 11, 12, 14, 10, 9,
+ 13, 16, 10, 12, 13, 14, 10, 9, 16, 16, 12, 12, 14, 14, 11, 9,
+ 10, 14, 11, 12, 10, 12, 10, 10, 13, 16, 12, 13, 12, 13, 11, 10,
+ 16, 16, 14, 14, 14, 14, 12, 10, 11, 16, 11, 12, 11, 13, 11, 10,
+ 13, 16, 12, 13, 12, 14, 11, 10, 16, 16, 14, 14, 14, 14, 12, 10,
+ 13, 16, 11, 12, 13, 14, 10, 9, 14, 16, 11, 12, 13, 14, 10, 9,
+ 16, 16, 12, 12, 14, 14, 10, 8, 12, 16, 12, 13, 10, 12, 10, 9,
+ 14, 16, 13, 13, 11, 12, 11, 9, 16, 16, 14, 14, 13, 13, 11, 9,
+ 13, 16, 12, 13, 11, 12, 10, 9, 14, 16, 13, 13, 11, 13, 11, 9,
+ 16, 16, 14, 14, 13, 13, 11, 9, 14, 16, 12, 13, 12, 13, 10, 8,
+ 14, 16, 12, 12, 12, 13, 10, 8, 15, 16, 11, 11, 11, 12, 9, 6,
+ },
+ {
+ 0, 7, 4, 7, 5, 8, 7, 8, 5, 10, 7, 10, 8, 10, 9, 10,
+ 13, 16, 12, 14, 14, 14, 13, 12, 4, 10, 6, 9, 8, 11, 9, 9,
+ 8, 12, 8, 11, 10, 12, 10, 10, 14, 16, 13, 14, 14, 15, 13, 12,
+ 9, 14, 9, 12, 12, 14, 11, 11, 12, 16, 11, 13, 13, 15, 12, 12,
+ 15, 16, 14, 15, 15, 16, 13, 13, 5, 10, 7, 10, 7, 10, 9, 9,
+ 8, 12, 9, 11, 9, 11, 10, 10, 14, 16, 13, 14, 14, 15, 13, 12,
+ 7, 12, 8, 11, 10, 12, 10, 10, 10, 13, 10, 12, 11, 13, 11, 11,
+ 15, 16, 13, 15, 14, 16, 13, 13, 11, 16, 11, 13, 13, 16, 12, 12,
+ 13, 16, 12, 14, 14, 16, 12, 12, 16, 16, 14, 16, 16, 16, 14, 13,
+ 11, 15, 12, 14, 11, 13, 11, 12, 13, 16, 12, 14, 12, 14, 12, 12,
+ 16, 16, 14, 16, 14, 16, 13, 13, 13, 16, 12, 14, 12, 14, 12, 12,
+ 14, 16, 13, 15, 13, 15, 13, 13, 16, 16, 15, 16, 16, 16, 14, 13,
+ 15, 16, 13, 16, 15, 16, 13, 13, 16, 16, 14, 16, 16, 16, 14, 13,
+ 16, 16, 16, 16, 16, 16, 14, 13, 2, 9, 5, 8, 6, 9, 8, 9,
+ 7, 11, 8, 10, 9, 11, 9, 10, 14, 16, 13, 14, 14, 15, 13, 12,
+ 5, 11, 6, 10, 9, 11, 9, 10, 8, 13, 9, 11, 11, 12, 10, 11,
+ 14, 16, 13, 14, 14, 16, 13, 13, 9, 15, 9, 12, 12, 14, 11, 11,
+ 12, 16, 11, 13, 13, 15, 12, 12, 16, 16, 14, 15, 16, 16, 14, 13,
+ 6, 11, 7, 10, 8, 11, 9, 10, 9, 13, 9, 12, 10, 12, 10, 11,
+ 14, 16, 13, 14, 14, 15, 13, 13, 8, 12, 8, 11, 10, 12, 10, 11,
+ 9, 13, 10, 12, 11, 13, 11, 11, 14, 16, 13, 14, 14, 16, 13, 13,
+ 12, 16, 11, 13, 13, 15, 12, 12, 13, 16, 11, 13, 14, 16, 12, 12,
+ 16, 16, 14, 15, 16, 16, 13, 13, 11, 15, 11, 14, 10, 13, 11, 12,
+ 13, 16, 13, 15, 12, 14, 12, 12, 16, 16, 15, 16, 15, 16, 14, 13,
+ 12, 16, 12, 14, 12, 14, 12, 12, 13, 16, 13, 15, 13, 14, 12, 13,
+ 16, 16, 15, 16, 15, 16, 13, 13, 15, 16, 13, 16, 15, 16, 13, 13,
+ 16, 16, 14, 16, 16, 16, 13, 13, 16, 16, 15, 16, 16, 16, 14, 13,
+ 5, 12, 7, 10, 9, 12, 10, 10, 9, 13, 9, 12, 11, 13, 11, 11,
+ 15, 16, 13, 14, 15, 15, 13, 13, 7, 13, 8, 11, 10, 13, 10, 11,
+ 10, 14, 10, 12, 12, 14, 11, 12, 16, 16, 14, 15, 16, 16, 14, 13,
+ 10, 16, 9, 12, 13, 15, 11, 12, 13, 16, 11, 13, 14, 16, 12, 12,
+ 16, 16, 14, 16, 16, 16, 14, 13, 8, 13, 9, 12, 9, 12, 10, 11,
+ 11, 15, 11, 13, 11, 13, 11, 12, 16, 16, 14, 16, 16, 16, 14, 13,
+ 9, 14, 10, 12, 11, 13, 11, 12, 11, 15, 11, 13, 12, 14, 12, 12,
+ 16, 16, 14, 16, 15, 16, 14, 13, 12, 16, 11, 14, 14, 16, 12, 12,
+ 13, 16, 12, 14, 14, 16, 13, 13, 16, 16, 13, 15, 16, 16, 14, 13,
+ 11, 16, 12, 14, 10, 13, 12, 12, 13, 16, 13, 15, 12, 14, 12, 13,
+ 16, 16, 16, 16, 16, 16, 14, 14, 13, 16, 13, 15, 12, 15, 12, 13,
+ 13, 16, 13, 15, 12, 15, 13, 13, 16, 16, 15, 16, 14, 16, 14, 13,
+ 16, 16, 14, 16, 16, 16, 14, 13, 15, 16, 14, 16, 15, 16, 14, 13,
+ 16, 16, 14, 16, 15, 16, 13, 12, 8, 14, 9, 12, 10, 14, 11, 12,
+ 11, 16, 10, 13, 12, 14, 12, 12, 16, 16, 14, 15, 15, 16, 14, 13,
+ 9, 15, 9, 12, 12, 14, 11, 12, 12, 16, 11, 13, 13, 15, 12, 12,
+ 16, 16, 14, 15, 16, 16, 14, 13, 11, 16, 9, 12, 13, 15, 11, 11,
+ 13, 16, 11, 13, 14, 16, 12, 12, 16, 16, 14, 14, 16, 16, 13, 12,
+ 9, 15, 10, 13, 11, 14, 11, 12, 12, 16, 11, 14, 12, 14, 12, 12,
+ 16, 16, 14, 16, 16, 16, 14, 13, 10, 16, 10, 13, 12, 15, 12, 12,
+ 12, 16, 12, 14, 13, 15, 12, 12, 16, 16, 14, 16, 16, 16, 14, 13,
+ 12, 16, 11, 13, 13, 16, 12, 12, 13, 16, 11, 13, 14, 16, 12, 12,
+ 16, 16, 13, 14, 16, 16, 13, 12, 11, 16, 12, 14, 10, 13, 11, 12,
+ 13, 16, 13, 15, 12, 14, 12, 12, 16, 16, 16, 16, 15, 16, 13, 12,
+ 12, 16, 12, 15, 12, 14, 12, 12, 13, 16, 13, 15, 12, 14, 12, 12,
+ 16, 16, 15, 16, 14, 15, 13, 12, 14, 16, 13, 14, 13, 16, 12, 12,
+ 13, 16, 12, 14, 13, 15, 12, 12, 14, 16, 12, 13, 13, 14, 11, 10,
+ },
+ },
+ {
+ {
+ 0, 7, 4, 7, 5, 7, 6, 6, 6, 10, 7, 8, 8, 9, 8, 8,
+ 13, 14, 11, 12, 12, 12, 11, 10, 5, 9, 6, 8, 7, 9, 7, 7,
+ 8, 11, 8, 9, 9, 10, 9, 8, 13, 16, 12, 12, 13, 13, 11, 10,
+ 10, 14, 8, 10, 11, 13, 9, 9, 12, 15, 10, 11, 12, 13, 10, 10,
+ 16, 16, 12, 13, 14, 14, 11, 10, 5, 10, 7, 9, 6, 9, 8, 8,
+ 8, 11, 9, 10, 9, 10, 9, 8, 14, 16, 12, 12, 13, 13, 11, 10,
+ 8, 12, 8, 10, 9, 10, 9, 9, 10, 13, 9, 11, 10, 11, 9, 9,
+ 14, 16, 12, 13, 13, 13, 11, 10, 12, 16, 10, 12, 12, 13, 10, 10,
+ 13, 16, 11, 12, 13, 14, 11, 10, 16, 16, 12, 13, 14, 14, 11, 10,
+ 10, 14, 11, 13, 9, 11, 10, 10, 12, 16, 12, 13, 11, 12, 11, 10,
+ 16, 16, 13, 14, 13, 13, 12, 10, 12, 16, 12, 13, 11, 13, 11, 10,
+ 13, 16, 12, 13, 12, 13, 11, 10, 16, 16, 13, 14, 13, 14, 12, 10,
+ 14, 16, 13, 14, 13, 14, 12, 11, 16, 16, 13, 14, 14, 15, 12, 11,
+ 16, 16, 13, 14, 14, 14, 11, 10, 3, 9, 5, 8, 6, 9, 7, 7,
+ 8, 11, 8, 10, 9, 10, 9, 8, 14, 15, 12, 12, 13, 13, 11, 10,
+ 6, 11, 6, 9, 8, 10, 8, 8, 9, 12, 9, 10, 10, 11, 9, 9,
+ 14, 16, 12, 13, 13, 13, 11, 10, 10, 14, 8, 11, 12, 13, 9, 9,
+ 12, 16, 10, 11, 12, 13, 10, 10, 16, 16, 12, 13, 14, 14, 11, 10,
+ 6, 11, 8, 10, 7, 10, 8, 8, 9, 12, 10, 11, 9, 11, 9, 9,
+ 14, 16, 12, 13, 13, 13, 12, 10, 9, 12, 9, 11, 9, 11, 9, 9,
+ 10, 13, 10, 11, 10, 12, 10, 9, 14, 16, 12, 13, 13, 13, 11, 10,
+ 12, 16, 10, 12, 12, 14, 10, 10, 13, 16, 10, 12, 13, 14, 11, 10,
+ 16, 16, 12, 13, 14, 14, 11, 10, 10, 15, 11, 13, 9, 11, 10, 10,
+ 12, 16, 12, 13, 11, 12, 11, 10, 16, 16, 14, 14, 13, 14, 12, 10,
+ 12, 16, 12, 13, 11, 13, 11, 10, 13, 16, 12, 13, 11, 13, 11, 10,
+ 16, 16, 13, 14, 13, 13, 12, 10, 14, 16, 13, 14, 14, 14, 11, 11,
+ 15, 16, 13, 14, 13, 14, 11, 10, 16, 16, 13, 13, 13, 14, 11, 9,
+ 7, 12, 8, 11, 9, 11, 9, 10, 10, 14, 10, 12, 11, 12, 11, 10,
+ 16, 16, 13, 13, 14, 14, 12, 11, 9, 13, 9, 11, 10, 12, 10, 10,
+ 11, 15, 10, 12, 12, 13, 11, 10, 16, 16, 13, 14, 14, 14, 12, 11,
+ 11, 16, 9, 11, 12, 14, 10, 10, 13, 16, 10, 12, 13, 14, 11, 10,
+ 16, 16, 13, 14, 16, 16, 12, 11, 9, 13, 10, 12, 9, 12, 10, 10,
+ 11, 15, 11, 13, 11, 13, 11, 11, 16, 16, 14, 14, 14, 14, 13, 11,
+ 10, 14, 10, 12, 11, 13, 11, 10, 12, 16, 11, 13, 12, 13, 11, 11,
+ 16, 16, 13, 14, 14, 14, 12, 11, 13, 16, 11, 13, 13, 14, 11, 11,
+ 13, 16, 11, 13, 13, 14, 11, 11, 16, 16, 12, 13, 14, 15, 12, 10,
+ 11, 16, 12, 14, 10, 12, 11, 10, 13, 16, 13, 14, 11, 13, 11, 11,
+ 16, 16, 15, 16, 14, 14, 13, 11, 13, 16, 13, 14, 12, 13, 11, 11,
+ 13, 16, 13, 14, 12, 13, 11, 11, 16, 16, 14, 14, 13, 14, 12, 11,
+ 15, 16, 13, 14, 14, 16, 12, 11, 14, 16, 13, 14, 13, 14, 12, 11,
+ 16, 16, 12, 13, 13, 14, 11, 9, 9, 14, 10, 12, 10, 13, 11, 11,
+ 12, 16, 12, 13, 12, 14, 12, 11, 16, 16, 14, 14, 14, 14, 13, 11,
+ 10, 16, 10, 13, 12, 14, 11, 11, 13, 16, 12, 13, 13, 14, 12, 11,
+ 16, 16, 14, 14, 15, 15, 13, 11, 12, 16, 9, 12, 13, 14, 10, 10,
+ 14, 16, 11, 12, 13, 15, 11, 10, 16, 16, 13, 13, 15, 16, 11, 10,
+ 10, 16, 11, 13, 11, 13, 11, 11, 13, 16, 12, 14, 12, 14, 12, 11,
+ 16, 16, 14, 14, 14, 14, 13, 11, 11, 16, 11, 13, 12, 14, 11, 11,
+ 13, 16, 12, 14, 13, 14, 12, 11, 16, 16, 14, 14, 14, 15, 13, 11,
+ 13, 16, 11, 13, 13, 14, 11, 10, 14, 16, 11, 13, 13, 14, 11, 10,
+ 16, 16, 12, 13, 14, 15, 11, 9, 12, 16, 12, 14, 10, 13, 11, 10,
+ 14, 16, 13, 14, 11, 13, 11, 10, 16, 16, 14, 15, 13, 14, 12, 10,
+ 13, 16, 13, 14, 11, 13, 11, 10, 14, 16, 13, 14, 12, 13, 11, 10,
+ 16, 16, 14, 14, 13, 13, 11, 10, 14, 16, 12, 13, 13, 14, 11, 9,
+ 14, 16, 12, 13, 12, 13, 10, 9, 14, 16, 11, 11, 12, 12, 9, 7,
+ },
+ {
+ 0, 7, 3, 8, 5, 8, 7, 9, 5, 10, 7, 10, 8, 11, 10, 10,
+ 14, 16, 14, 15, 14, 16, 14, 14, 4, 10, 6, 10, 8, 11, 9, 10,
+ 8, 12, 9, 11, 10, 12, 11, 11, 15, 16, 14, 16, 15, 16, 14, 14,
+ 10, 16, 10, 13, 13, 16, 12, 13, 13, 16, 12, 14, 14, 16, 13, 13,
+ 16, 16, 16, 16, 16, 16, 14, 15, 4, 10, 7, 10, 7, 10, 9, 10,
+ 8, 12, 9, 12, 10, 12, 11, 12, 14, 16, 14, 16, 15, 16, 14, 14,
+ 8, 12, 9, 12, 10, 13, 11, 12, 10, 14, 11, 13, 11, 14, 12, 13,
+ 16, 16, 14, 16, 16, 16, 14, 15, 12, 16, 12, 14, 14, 16, 13, 14,
+ 14, 16, 13, 16, 16, 16, 14, 14, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 12, 16, 11, 14, 13, 13, 13, 16, 13, 16, 13, 15, 13, 14,
+ 16, 16, 16, 16, 16, 16, 15, 16, 13, 16, 13, 16, 13, 16, 13, 14,
+ 14, 16, 14, 16, 14, 16, 14, 15, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 15, 16, 16, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 2, 9, 5, 9, 6, 10, 8, 10,
+ 7, 11, 8, 11, 9, 12, 10, 11, 14, 16, 14, 16, 15, 16, 14, 14,
+ 5, 11, 6, 10, 9, 12, 10, 11, 9, 13, 9, 12, 11, 13, 11, 12,
+ 16, 16, 14, 16, 16, 16, 14, 14, 10, 16, 9, 13, 13, 16, 12, 13,
+ 13, 16, 12, 14, 14, 16, 13, 13, 16, 16, 16, 16, 16, 16, 16, 14,
+ 5, 11, 8, 11, 7, 11, 10, 11, 9, 13, 10, 13, 10, 13, 11, 12,
+ 16, 16, 14, 16, 16, 16, 14, 14, 8, 13, 9, 12, 10, 13, 11, 12,
+ 10, 14, 10, 13, 11, 14, 12, 12, 16, 16, 14, 16, 15, 16, 14, 14,
+ 12, 16, 12, 14, 14, 16, 13, 14, 14, 16, 12, 16, 15, 16, 13, 14,
+ 16, 16, 15, 16, 16, 16, 15, 15, 11, 16, 12, 16, 10, 14, 12, 13,
+ 13, 16, 14, 16, 13, 16, 13, 14, 16, 16, 16, 16, 16, 16, 15, 16,
+ 13, 16, 13, 16, 13, 16, 13, 14, 14, 16, 14, 16, 13, 16, 13, 14,
+ 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 15, 16, 16, 16, 15, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16,
+ 5, 13, 7, 12, 9, 13, 11, 12, 10, 14, 10, 13, 11, 13, 12, 13,
+ 16, 16, 16, 16, 16, 16, 16, 15, 7, 14, 8, 12, 11, 14, 11, 12,
+ 11, 16, 11, 13, 13, 16, 13, 13, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 10, 14, 14, 16, 12, 13, 13, 16, 12, 15, 16, 16, 14, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 8, 14, 10, 13, 9, 13, 11, 12,
+ 11, 16, 12, 14, 12, 14, 13, 13, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 11, 14, 12, 14, 12, 13, 11, 16, 12, 14, 12, 15, 13, 13,
+ 16, 16, 15, 16, 16, 16, 15, 16, 13, 16, 12, 16, 15, 16, 14, 14,
+ 13, 16, 12, 16, 16, 16, 14, 14, 16, 16, 14, 16, 16, 16, 14, 16,
+ 11, 16, 13, 16, 10, 14, 12, 13, 14, 16, 14, 16, 13, 16, 14, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 13, 16, 14, 16, 13, 16, 14, 15,
+ 13, 16, 14, 16, 13, 16, 14, 15, 16, 16, 16, 16, 15, 16, 15, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 15, 16,
+ 16, 16, 15, 16, 16, 16, 14, 14, 8, 16, 10, 14, 11, 16, 12, 13,
+ 12, 16, 12, 14, 13, 16, 13, 14, 16, 16, 16, 16, 16, 16, 16, 14,
+ 10, 16, 10, 14, 12, 16, 12, 13, 13, 16, 12, 15, 14, 16, 13, 14,
+ 16, 16, 16, 16, 16, 16, 16, 15, 11, 16, 10, 13, 13, 16, 12, 13,
+ 14, 16, 12, 14, 15, 16, 13, 13, 16, 16, 16, 16, 16, 16, 15, 14,
+ 10, 16, 11, 14, 11, 16, 12, 13, 13, 16, 13, 16, 13, 16, 13, 14,
+ 16, 16, 16, 16, 16, 16, 16, 15, 11, 16, 12, 14, 13, 16, 13, 14,
+ 13, 16, 13, 16, 14, 16, 13, 14, 16, 16, 16, 16, 16, 16, 16, 15,
+ 13, 16, 12, 14, 14, 16, 13, 13, 13, 16, 12, 15, 14, 16, 13, 13,
+ 16, 16, 14, 16, 16, 16, 14, 13, 11, 16, 12, 16, 11, 15, 12, 13,
+ 14, 16, 14, 16, 13, 16, 13, 14, 16, 16, 16, 16, 16, 16, 14, 14,
+ 12, 16, 13, 16, 12, 16, 13, 14, 13, 16, 14, 16, 13, 16, 13, 14,
+ 16, 16, 16, 16, 14, 16, 14, 14, 14, 16, 13, 16, 14, 16, 13, 13,
+ 13, 16, 13, 16, 14, 16, 13, 13, 15, 16, 13, 14, 13, 15, 12, 12,
+ },
+ },
+ {
+ {
+ 0, 7, 4, 6, 4, 7, 6, 7, 6, 9, 7, 8, 7, 9, 8, 8,
+ 13, 14, 12, 12, 12, 13, 11, 11, 5, 9, 5, 8, 7, 9, 7, 8,
+ 8, 11, 8, 10, 9, 10, 9, 9, 13, 15, 12, 13, 12, 13, 11, 11,
+ 9, 14, 8, 11, 11, 13, 10, 10, 11, 15, 10, 12, 12, 13, 10, 11,
+ 14, 16, 12, 13, 14, 14, 12, 11, 5, 9, 7, 9, 6, 9, 8, 8,
+ 8, 11, 8, 10, 8, 10, 9, 9, 13, 16, 12, 13, 13, 13, 11, 11,
+ 7, 11, 8, 10, 9, 11, 9, 9, 9, 13, 9, 11, 10, 11, 10, 10,
+ 14, 16, 12, 13, 13, 13, 12, 11, 11, 16, 10, 12, 12, 14, 11, 11,
+ 13, 16, 11, 13, 13, 14, 11, 11, 16, 16, 13, 14, 14, 15, 12, 11,
+ 10, 15, 11, 13, 9, 12, 10, 10, 12, 16, 12, 13, 11, 12, 11, 11,
+ 15, 16, 13, 14, 13, 14, 12, 11, 12, 16, 12, 14, 11, 13, 11, 11,
+ 13, 16, 12, 14, 12, 13, 12, 11, 16, 16, 13, 14, 14, 14, 12, 11,
+ 14, 16, 13, 14, 14, 15, 12, 12, 16, 16, 13, 14, 14, 16, 12, 12,
+ 16, 16, 14, 14, 14, 15, 12, 11, 3, 9, 5, 8, 6, 9, 7, 8,
+ 7, 11, 8, 10, 9, 10, 9, 9, 13, 14, 12, 13, 13, 13, 12, 11,
+ 6, 11, 6, 9, 8, 10, 8, 9, 9, 12, 8, 10, 10, 11, 9, 10,
+ 14, 16, 12, 13, 13, 14, 12, 11, 9, 14, 8, 11, 11, 13, 10, 10,
+ 12, 16, 10, 12, 12, 14, 11, 11, 16, 16, 12, 13, 14, 15, 12, 11,
+ 6, 11, 8, 10, 7, 10, 8, 9, 9, 12, 9, 11, 9, 11, 10, 10,
+ 14, 16, 13, 13, 13, 14, 12, 11, 8, 12, 9, 11, 9, 11, 9, 10,
+ 10, 13, 9, 11, 10, 12, 10, 10, 14, 16, 12, 13, 13, 14, 12, 11,
+ 12, 16, 10, 12, 12, 14, 11, 11, 12, 16, 10, 12, 13, 14, 11, 11,
+ 15, 16, 12, 13, 14, 14, 11, 11, 10, 15, 11, 13, 9, 12, 10, 10,
+ 12, 16, 12, 14, 11, 13, 11, 11, 16, 16, 14, 14, 14, 14, 12, 11,
+ 12, 16, 12, 14, 11, 13, 11, 11, 13, 16, 12, 14, 11, 13, 11, 11,
+ 16, 16, 13, 14, 13, 14, 12, 11, 14, 16, 13, 14, 14, 15, 12, 11,
+ 14, 16, 13, 14, 13, 15, 12, 11, 16, 16, 13, 14, 13, 14, 11, 10,
+ 6, 13, 8, 11, 9, 12, 10, 10, 10, 14, 10, 12, 11, 13, 11, 11,
+ 16, 16, 13, 14, 14, 14, 13, 12, 8, 14, 8, 12, 10, 13, 10, 11,
+ 11, 15, 10, 12, 12, 13, 11, 11, 16, 16, 14, 14, 15, 16, 13, 12,
+ 11, 16, 9, 12, 12, 14, 11, 11, 13, 16, 11, 13, 13, 16, 11, 11,
+ 16, 16, 13, 14, 16, 16, 13, 12, 8, 14, 10, 12, 9, 12, 10, 11,
+ 11, 15, 11, 13, 11, 13, 11, 11, 16, 16, 14, 15, 14, 15, 13, 12,
+ 10, 15, 10, 13, 11, 13, 11, 11, 11, 15, 11, 13, 12, 13, 11, 11,
+ 16, 16, 13, 15, 14, 15, 13, 12, 12, 16, 11, 13, 13, 15, 11, 11,
+ 13, 16, 11, 13, 13, 15, 11, 11, 16, 16, 12, 14, 14, 16, 12, 11,
+ 11, 16, 12, 14, 10, 13, 11, 11, 13, 16, 13, 15, 12, 14, 12, 12,
+ 16, 16, 15, 16, 14, 15, 13, 12, 12, 16, 13, 14, 12, 14, 12, 12,
+ 13, 16, 13, 14, 12, 14, 12, 12, 16, 16, 14, 15, 13, 14, 12, 12,
+ 15, 16, 13, 15, 14, 16, 12, 12, 14, 16, 13, 14, 13, 15, 12, 12,
+ 15, 16, 12, 13, 13, 14, 11, 10, 9, 15, 10, 13, 11, 14, 11, 12,
+ 12, 16, 12, 14, 12, 14, 12, 12, 16, 16, 14, 14, 14, 15, 13, 12,
+ 10, 16, 10, 13, 12, 14, 12, 12, 13, 16, 12, 14, 13, 15, 12, 12,
+ 16, 16, 14, 14, 15, 16, 13, 12, 11, 16, 9, 12, 13, 15, 11, 11,
+ 14, 16, 11, 13, 14, 16, 11, 11, 16, 16, 13, 14, 16, 16, 12, 11,
+ 10, 16, 11, 14, 11, 14, 12, 12, 13, 16, 13, 14, 12, 14, 12, 12,
+ 16, 16, 14, 15, 15, 15, 13, 12, 11, 16, 12, 14, 12, 15, 12, 12,
+ 13, 16, 12, 14, 13, 15, 12, 12, 16, 16, 14, 15, 15, 16, 13, 12,
+ 12, 16, 11, 13, 13, 15, 11, 11, 13, 16, 11, 13, 13, 15, 11, 11,
+ 16, 16, 12, 13, 14, 16, 12, 10, 11, 16, 12, 14, 10, 13, 11, 11,
+ 14, 16, 13, 14, 12, 14, 11, 11, 16, 16, 15, 16, 14, 14, 12, 11,
+ 12, 16, 13, 14, 11, 14, 11, 11, 13, 16, 13, 14, 12, 14, 11, 11,
+ 16, 16, 14, 15, 13, 14, 12, 11, 14, 16, 12, 14, 13, 14, 11, 10,
+ 13, 16, 12, 13, 12, 14, 11, 10, 14, 16, 11, 12, 12, 12, 10, 8,
+ },
+ {
+ 0, 8, 4, 9, 5, 9, 8, 10, 6, 11, 8, 11, 9, 12, 11, 12,
+ 16, 16, 16, 16, 16, 16, 16, 16, 4, 11, 6, 11, 9, 12, 10, 12,
+ 9, 13, 10, 13, 11, 16, 12, 13, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 11, 16, 16, 16, 14, 16, 14, 16, 13, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 4, 11, 8, 12, 7, 12, 10, 12,
+ 8, 13, 10, 13, 10, 13, 12, 14, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 13, 10, 14, 11, 16, 12, 14, 11, 16, 12, 16, 12, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 13, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 14, 16, 12, 16, 16, 16, 13, 16, 16, 16, 14, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 1, 10, 5, 10, 7, 11, 9, 11,
+ 8, 12, 9, 12, 10, 13, 12, 13, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 12, 7, 12, 10, 13, 11, 12, 9, 16, 10, 13, 12, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 10, 16, 16, 16, 14, 16,
+ 14, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 6, 12, 9, 13, 8, 12, 11, 13, 10, 16, 11, 16, 11, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 10, 14, 11, 16, 12, 16,
+ 10, 16, 11, 16, 12, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 13, 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 14, 16, 11, 16, 14, 16,
+ 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 6, 16, 9, 13, 10, 16, 12, 14, 11, 16, 12, 16, 12, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 8, 16, 9, 14, 12, 16, 13, 16,
+ 12, 16, 12, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 10, 16, 16, 16, 14, 16, 16, 16, 13, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 8, 16, 11, 16, 10, 16, 12, 16,
+ 12, 16, 13, 16, 13, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 12, 16, 13, 16, 14, 16, 11, 16, 13, 16, 13, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 13, 16, 16, 16, 16, 16,
+ 14, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 11, 16, 14, 16, 16, 16, 16, 16, 14, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 8, 16, 11, 16, 12, 16, 13, 16,
+ 13, 16, 13, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 11, 16, 13, 16, 14, 16, 14, 16, 13, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 10, 16, 16, 16, 13, 16,
+ 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 12, 16, 12, 16, 13, 16, 13, 16, 16, 16, 14, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 13, 16, 14, 16, 16, 16,
+ 13, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 12, 16, 16, 16, 14, 16, 14, 16, 13, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 13, 16, 11, 16, 13, 16,
+ 16, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 13, 16, 16, 16, 13, 16, 16, 16, 13, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 14, 15,
+ },
+ },
+ {
+ {
+ 0, 7, 3, 7, 4, 7, 6, 7, 6, 9, 7, 9, 7, 9, 8, 9,
+ 13, 14, 12, 13, 13, 13, 12, 12, 4, 9, 5, 9, 7, 9, 8, 9,
+ 7, 11, 8, 10, 9, 11, 9, 10, 13, 16, 12, 14, 13, 14, 12, 12,
+ 9, 14, 8, 12, 12, 14, 10, 11, 11, 16, 10, 13, 13, 14, 11, 12,
+ 15, 16, 13, 14, 14, 16, 12, 12, 5, 10, 7, 9, 6, 9, 8, 9,
+ 8, 11, 8, 11, 8, 10, 9, 10, 14, 16, 12, 14, 13, 14, 12, 12,
+ 7, 12, 8, 11, 9, 11, 9, 10, 9, 13, 9, 12, 10, 12, 10, 11,
+ 14, 16, 12, 14, 13, 14, 12, 12, 12, 16, 11, 13, 13, 15, 11, 12,
+ 13, 16, 11, 14, 13, 15, 12, 12, 15, 16, 13, 15, 14, 16, 13, 13,
+ 10, 15, 12, 14, 9, 13, 11, 12, 12, 16, 12, 14, 11, 13, 12, 12,
+ 15, 16, 13, 15, 14, 15, 13, 13, 12, 16, 12, 14, 12, 14, 12, 12,
+ 13, 16, 13, 14, 12, 14, 12, 12, 16, 16, 14, 15, 14, 16, 13, 13,
+ 15, 16, 13, 15, 14, 16, 12, 13, 16, 16, 14, 16, 14, 16, 13, 13,
+ 16, 16, 14, 16, 15, 16, 13, 13, 3, 9, 5, 9, 6, 9, 8, 9,
+ 7, 11, 8, 10, 9, 11, 9, 10, 14, 15, 13, 14, 13, 14, 12, 12,
+ 5, 11, 6, 10, 8, 11, 9, 10, 9, 12, 9, 11, 10, 12, 10, 11,
+ 14, 16, 13, 14, 14, 15, 13, 13, 9, 15, 8, 12, 12, 14, 10, 11,
+ 12, 16, 10, 13, 13, 15, 11, 12, 15, 16, 13, 14, 14, 16, 13, 13,
+ 6, 11, 8, 11, 7, 10, 9, 10, 9, 13, 10, 12, 9, 12, 10, 11,
+ 14, 16, 13, 14, 14, 14, 13, 13, 8, 13, 9, 12, 9, 12, 10, 11,
+ 9, 13, 9, 12, 10, 12, 10, 11, 14, 16, 12, 14, 13, 14, 12, 12,
+ 12, 16, 11, 13, 13, 15, 11, 12, 12, 16, 11, 13, 13, 15, 11, 12,
+ 14, 16, 12, 14, 14, 15, 12, 12, 10, 15, 11, 14, 9, 13, 11, 12,
+ 12, 16, 12, 14, 11, 14, 12, 12, 16, 16, 14, 16, 14, 15, 13, 13,
+ 12, 16, 12, 14, 11, 14, 12, 12, 12, 16, 13, 14, 11, 14, 12, 12,
+ 15, 16, 14, 15, 13, 15, 12, 13, 14, 16, 13, 15, 14, 16, 13, 13,
+ 14, 16, 13, 15, 14, 16, 12, 13, 15, 16, 13, 15, 13, 15, 12, 12,
+ 6, 13, 8, 12, 9, 12, 10, 11, 10, 14, 11, 13, 11, 13, 12, 12,
+ 15, 16, 14, 15, 15, 15, 14, 13, 8, 14, 9, 12, 11, 13, 11, 12,
+ 11, 15, 11, 13, 12, 14, 12, 12, 16, 16, 14, 15, 15, 16, 14, 14,
+ 10, 16, 9, 13, 12, 15, 11, 12, 13, 16, 11, 14, 13, 16, 12, 12,
+ 16, 16, 14, 15, 16, 16, 13, 13, 8, 14, 10, 13, 9, 13, 11, 12,
+ 11, 15, 12, 14, 11, 14, 12, 12, 16, 16, 15, 16, 15, 16, 14, 14,
+ 10, 15, 10, 13, 11, 14, 11, 12, 11, 15, 11, 14, 12, 14, 12, 12,
+ 15, 16, 14, 15, 14, 16, 13, 13, 12, 16, 11, 14, 13, 16, 12, 12,
+ 12, 16, 11, 14, 13, 15, 12, 12, 15, 16, 12, 14, 15, 16, 13, 13,
+ 11, 16, 12, 14, 10, 14, 11, 12, 13, 16, 13, 16, 12, 14, 12, 13,
+ 16, 16, 16, 16, 15, 16, 14, 13, 12, 16, 13, 15, 12, 14, 12, 13,
+ 13, 16, 13, 15, 12, 14, 12, 13, 16, 16, 14, 16, 13, 15, 13, 13,
+ 15, 16, 14, 16, 14, 16, 13, 13, 14, 16, 13, 15, 13, 16, 13, 13,
+ 14, 16, 12, 14, 13, 14, 12, 12, 9, 16, 11, 14, 11, 15, 12, 13,
+ 13, 16, 12, 14, 12, 15, 13, 13, 16, 16, 14, 15, 15, 15, 14, 13,
+ 11, 16, 11, 14, 12, 16, 12, 13, 13, 16, 12, 14, 13, 16, 13, 13,
+ 16, 16, 14, 15, 16, 16, 14, 13, 11, 16, 10, 13, 13, 16, 11, 12,
+ 14, 16, 11, 14, 14, 16, 12, 12, 16, 16, 14, 14, 16, 16, 13, 12,
+ 11, 16, 12, 14, 11, 15, 12, 13, 13, 16, 13, 15, 13, 16, 13, 13,
+ 16, 16, 15, 16, 15, 16, 14, 13, 11, 16, 12, 15, 12, 16, 12, 13,
+ 13, 16, 13, 15, 13, 16, 13, 13, 16, 16, 15, 16, 15, 16, 14, 13,
+ 12, 16, 11, 14, 13, 16, 12, 12, 13, 16, 11, 14, 13, 16, 12, 12,
+ 16, 16, 13, 14, 15, 16, 12, 12, 11, 16, 12, 14, 10, 14, 11, 12,
+ 14, 16, 13, 15, 12, 15, 12, 12, 16, 16, 16, 16, 14, 15, 13, 12,
+ 12, 16, 13, 15, 12, 14, 12, 12, 13, 16, 13, 15, 12, 14, 12, 12,
+ 16, 16, 14, 16, 14, 15, 12, 12, 14, 16, 13, 15, 13, 16, 11, 12,
+ 13, 16, 12, 14, 13, 15, 11, 11, 14, 16, 12, 13, 12, 13, 11, 10,
+ },
+ {
+ 0, 8, 4, 9, 5, 10, 9, 11, 5, 11, 9, 12, 9, 13, 12, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 4, 11, 6, 12, 9, 13, 11, 13,
+ 9, 16, 10, 16, 12, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 4, 12, 8, 13, 7, 12, 11, 16,
+ 8, 16, 11, 16, 11, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 10, 16, 11, 16, 13, 16, 11, 16, 12, 16, 13, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 1, 10, 6, 11, 7, 12, 10, 13,
+ 7, 12, 10, 13, 10, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 13, 7, 12, 10, 16, 12, 16, 10, 16, 11, 16, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 11, 16, 16, 16, 16, 16,
+ 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 13, 9, 16, 8, 16, 12, 16, 9, 16, 12, 16, 11, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 11, 16, 12, 16, 13, 16,
+ 10, 16, 12, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 16, 16, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 6, 16, 9, 16, 10, 16, 13, 16, 11, 16, 12, 16, 13, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 8, 16, 9, 16, 12, 16, 13, 16,
+ 12, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 10, 16, 16, 16, 16, 16, 16, 16, 13, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 8, 16, 11, 16, 11, 16, 13, 16,
+ 12, 16, 13, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 12, 16, 13, 16, 16, 16, 11, 16, 13, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 7, 16, 11, 16, 12, 16, 16, 16,
+ 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 11, 16, 13, 16, 16, 16, 16, 16, 13, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 10, 16, 16, 16, 16, 16,
+ 16, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 12, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 13, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 12, 16, 16, 16, 16, 16, 16, 16, 12, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 16, 16, 11, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 16, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ },
+ {
+ {
+ 0, 7, 4, 8, 4, 8, 7, 8, 6, 10, 7, 10, 8, 10, 9, 10,
+ 13, 16, 13, 15, 13, 15, 13, 14, 4, 10, 5, 10, 7, 10, 9, 10,
+ 7, 12, 8, 11, 9, 12, 10, 11, 13, 16, 13, 15, 13, 16, 13, 14,
+ 9, 16, 9, 13, 12, 16, 11, 13, 11, 16, 11, 14, 13, 16, 12, 14,
+ 15, 16, 14, 16, 15, 16, 14, 14, 4, 10, 7, 10, 6, 10, 9, 10,
+ 8, 12, 9, 12, 9, 11, 10, 12, 13, 16, 13, 16, 14, 16, 13, 14,
+ 7, 12, 8, 12, 9, 12, 10, 12, 9, 13, 10, 13, 10, 13, 11, 12,
+ 14, 16, 13, 16, 14, 16, 13, 14, 12, 16, 11, 14, 13, 16, 12, 14,
+ 13, 16, 12, 16, 14, 16, 13, 14, 16, 16, 14, 16, 15, 16, 14, 14,
+ 10, 16, 12, 15, 10, 14, 12, 13, 12, 16, 13, 16, 12, 14, 13, 14,
+ 15, 16, 14, 16, 14, 16, 14, 14, 12, 16, 13, 16, 12, 16, 13, 14,
+ 13, 16, 13, 16, 13, 16, 13, 14, 16, 16, 14, 16, 15, 16, 14, 15,
+ 15, 16, 14, 16, 15, 16, 13, 15, 16, 16, 14, 16, 15, 16, 14, 15,
+ 16, 16, 16, 16, 16, 16, 15, 15, 2, 9, 5, 10, 6, 10, 8, 10,
+ 7, 11, 8, 11, 9, 11, 10, 11, 14, 16, 13, 15, 14, 15, 14, 14,
+ 5, 11, 6, 11, 8, 12, 9, 11, 8, 13, 9, 12, 10, 13, 11, 12,
+ 14, 16, 13, 15, 14, 16, 14, 14, 9, 16, 8, 13, 12, 16, 11, 13,
+ 12, 16, 11, 14, 13, 16, 12, 13, 16, 16, 14, 16, 15, 16, 14, 15,
+ 5, 12, 8, 11, 7, 11, 9, 11, 9, 13, 10, 13, 10, 13, 11, 12,
+ 14, 16, 14, 16, 14, 16, 14, 15, 8, 13, 9, 13, 10, 13, 11, 12,
+ 9, 13, 10, 13, 10, 13, 11, 13, 13, 16, 13, 15, 13, 16, 13, 14,
+ 12, 16, 11, 14, 13, 16, 12, 14, 12, 16, 11, 14, 13, 16, 12, 14,
+ 14, 16, 13, 16, 14, 16, 13, 14, 10, 16, 12, 15, 9, 14, 11, 13,
+ 12, 16, 13, 16, 12, 15, 12, 14, 16, 16, 15, 16, 15, 16, 14, 14,
+ 12, 16, 13, 16, 12, 16, 12, 14, 12, 16, 13, 16, 12, 15, 13, 14,
+ 15, 16, 14, 16, 14, 16, 14, 14, 15, 16, 14, 16, 14, 16, 14, 15,
+ 14, 16, 13, 16, 14, 16, 13, 14, 16, 16, 14, 16, 14, 16, 13, 14,
+ 6, 14, 9, 13, 9, 14, 11, 12, 10, 14, 11, 13, 11, 14, 12, 13,
+ 16, 16, 15, 16, 15, 16, 14, 15, 8, 15, 9, 13, 11, 14, 11, 13,
+ 11, 16, 11, 14, 12, 15, 12, 14, 16, 16, 15, 16, 16, 16, 15, 16,
+ 10, 16, 9, 14, 12, 16, 12, 13, 13, 16, 11, 15, 14, 16, 13, 14,
+ 16, 16, 15, 16, 16, 16, 15, 16, 8, 15, 10, 13, 10, 14, 11, 13,
+ 11, 16, 12, 14, 12, 14, 12, 14, 16, 16, 16, 16, 16, 16, 15, 16,
+ 10, 16, 11, 14, 11, 15, 12, 13, 11, 16, 11, 14, 12, 15, 12, 14,
+ 16, 16, 14, 16, 15, 16, 14, 15, 12, 16, 11, 15, 13, 16, 13, 14,
+ 12, 16, 11, 14, 13, 16, 13, 14, 15, 16, 13, 16, 16, 16, 14, 15,
+ 10, 16, 12, 15, 10, 15, 12, 13, 13, 16, 13, 16, 12, 16, 13, 14,
+ 16, 16, 16, 16, 16, 16, 15, 15, 12, 16, 13, 16, 12, 16, 13, 14,
+ 12, 16, 13, 16, 12, 16, 13, 14, 16, 16, 15, 16, 14, 16, 14, 15,
+ 15, 16, 14, 16, 15, 16, 14, 15, 14, 16, 13, 16, 14, 16, 13, 14,
+ 14, 16, 13, 15, 14, 16, 13, 14, 9, 16, 11, 16, 11, 16, 12, 14,
+ 13, 16, 12, 16, 13, 16, 13, 14, 16, 16, 15, 16, 16, 16, 15, 15,
+ 11, 16, 11, 16, 12, 16, 13, 14, 13, 16, 12, 16, 13, 16, 13, 14,
+ 16, 16, 15, 16, 16, 16, 15, 15, 11, 16, 10, 15, 13, 16, 12, 13,
+ 14, 16, 12, 16, 14, 16, 13, 14, 16, 16, 15, 16, 16, 16, 14, 14,
+ 11, 16, 12, 16, 11, 16, 13, 14, 13, 16, 13, 16, 13, 16, 13, 14,
+ 16, 16, 16, 16, 16, 16, 15, 15, 11, 16, 12, 16, 12, 16, 13, 14,
+ 13, 16, 13, 16, 13, 16, 13, 14, 16, 16, 15, 16, 16, 16, 15, 14,
+ 12, 16, 12, 15, 13, 16, 12, 14, 13, 16, 12, 16, 14, 16, 13, 14,
+ 16, 16, 14, 16, 16, 16, 14, 14, 11, 16, 12, 16, 11, 16, 12, 14,
+ 14, 16, 14, 16, 12, 16, 13, 14, 16, 16, 16, 16, 16, 16, 15, 14,
+ 12, 16, 13, 16, 12, 16, 12, 14, 13, 16, 13, 16, 12, 16, 13, 14,
+ 16, 16, 16, 16, 15, 16, 14, 14, 14, 16, 13, 16, 14, 16, 12, 14,
+ 13, 16, 13, 16, 13, 16, 12, 13, 15, 16, 13, 14, 14, 15, 13, 13,
+ },
+ {
+ 0, 8, 4, 10, 5, 11, 10, 16, 5, 12, 9, 16, 10, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 4, 12, 7, 12, 9, 16, 12, 16,
+ 9, 16, 11, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 4, 12, 9, 16, 8, 16, 12, 16,
+ 8, 16, 11, 16, 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 8, 16, 11, 16, 12, 16, 16, 16, 11, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 1, 11, 6, 12, 7, 16, 11, 16,
+ 7, 16, 10, 16, 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 16, 7, 16, 10, 16, 12, 16, 9, 16, 11, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 11, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 16, 10, 16, 8, 16, 12, 16, 9, 16, 12, 16, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 11, 16, 12, 16, 16, 16,
+ 10, 16, 12, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 16, 16, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 16, 9, 16, 10, 16, 16, 16, 11, 16, 12, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 8, 16, 9, 16, 12, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 10, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 8, 16, 11, 16, 11, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 12, 16, 16, 16, 16, 16, 11, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 11, 16, 16, 16, 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 6, 16, 10, 16, 12, 16, 16, 16,
+ 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 16, 10, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 9, 16, 16, 16, 16, 16,
+ 16, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 16, 16, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 13, 16, 12, 16, 16, 16, 16, 16, 16, 16, 12, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 16, 16, 11, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ },
+ {
+ {
+ 0, 8, 4, 10, 5, 9, 8, 10, 6, 11, 8, 12, 8, 11, 10, 13,
+ 14, 16, 14, 16, 14, 16, 16, 16, 3, 11, 5, 11, 8, 12, 10, 12,
+ 7, 12, 9, 13, 10, 13, 11, 13, 14, 16, 14, 16, 16, 16, 16, 16,
+ 9, 16, 9, 16, 12, 16, 12, 16, 11, 16, 11, 16, 14, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 4, 11, 7, 12, 6, 11, 10, 12,
+ 8, 13, 9, 13, 9, 13, 11, 14, 13, 16, 14, 16, 14, 16, 16, 16,
+ 7, 13, 9, 13, 9, 13, 11, 13, 9, 14, 10, 16, 11, 16, 12, 16,
+ 15, 16, 14, 16, 16, 16, 16, 16, 12, 16, 12, 16, 14, 16, 13, 16,
+ 13, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 12, 16, 10, 16, 13, 16, 12, 16, 13, 16, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 13, 16, 13, 16, 14, 16,
+ 13, 16, 14, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 2, 10, 5, 11, 6, 11, 9, 11,
+ 7, 12, 9, 13, 9, 13, 11, 13, 14, 16, 16, 16, 16, 16, 16, 16,
+ 4, 12, 6, 12, 8, 13, 10, 13, 8, 13, 9, 13, 11, 16, 12, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 8, 16, 12, 16, 12, 16,
+ 12, 16, 11, 16, 13, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 12, 8, 13, 7, 13, 10, 13, 8, 14, 10, 14, 10, 14, 12, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 7, 14, 9, 16, 10, 16, 11, 14,
+ 8, 14, 10, 16, 11, 16, 12, 16, 13, 16, 14, 16, 16, 16, 16, 16,
+ 11, 16, 11, 16, 13, 16, 13, 16, 12, 16, 12, 16, 14, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 12, 16, 10, 16, 12, 16,
+ 12, 16, 13, 16, 12, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 13, 16, 12, 16, 14, 16, 12, 16, 13, 16, 12, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 6, 16, 8, 16, 9, 16, 11, 13, 10, 16, 11, 16, 11, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 7, 16, 9, 16, 10, 16, 12, 16,
+ 11, 16, 11, 16, 12, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 9, 16, 12, 16, 13, 16, 13, 16, 12, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 7, 16, 10, 16, 9, 16, 12, 16,
+ 11, 16, 12, 16, 12, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 16, 11, 16, 11, 16, 12, 16, 11, 16, 12, 16, 13, 16, 13, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 12, 16, 14, 16, 14, 16,
+ 12, 16, 12, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 12, 16, 10, 16, 13, 16, 13, 16, 14, 16, 13, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 13, 16, 12, 16, 14, 16,
+ 12, 16, 14, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 11, 16, 11, 16, 13, 16,
+ 12, 16, 13, 16, 13, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 11, 16, 13, 16, 13, 16, 13, 16, 13, 16, 14, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 11, 16, 14, 16, 13, 16,
+ 14, 16, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 12, 16, 12, 16, 13, 16, 13, 16, 13, 16, 13, 16, 14, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 12, 16, 13, 16, 14, 16,
+ 13, 16, 13, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 12, 16, 14, 16, 14, 16, 14, 16, 14, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 13, 16, 11, 16, 14, 16,
+ 16, 16, 16, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 14, 16, 13, 16, 14, 16, 16, 16, 16, 16, 14, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ {
+ 0, 10, 4, 12, 5, 16, 11, 16, 6, 16, 10, 16, 11, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 3, 16, 7, 16, 10, 16, 16, 16,
+ 9, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 4, 16, 10, 16, 9, 16, 16, 16,
+ 9, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 16, 12, 16, 16, 16, 16, 16, 11, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 1, 16, 6, 16, 8, 16, 16, 16,
+ 8, 16, 11, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 16, 8, 16, 11, 16, 16, 16, 10, 16, 12, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 11, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 16, 11, 16, 9, 16, 16, 16, 10, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 9, 16, 12, 16, 16, 16, 16, 16,
+ 10, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 5, 16, 9, 16, 11, 16, 16, 16, 11, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 8, 16, 10, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 8, 16, 16, 16, 11, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 16, 16, 16, 16, 16, 16, 12, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 12, 16, 16, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 6, 16, 11, 16, 15, 16, 16, 16,
+ 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 9, 16, 10, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 9, 16, 16, 16, 16, 16,
+ 16, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 10, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 11, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 12, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ },
+};
+
+static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE] = {
+ {
+ {
+ 0, 4, 8, 3, 6, 8, 6, 7, 8, 4, 6, 8, 6, 7, 8, 7,
+ 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 3, 6, 8, 4, 6,
+ 9, 7, 7, 8, 5, 7, 9, 6, 7, 9, 8, 8, 8, 7, 8, 8,
+ 8, 8, 8, 8, 8, 7, 6, 8, 9, 7, 8, 9, 7, 8, 9, 7,
+ 8, 9, 7, 8, 9, 8, 8, 9, 8, 8, 9, 8, 9, 9, 8, 8,
+ 8, 8, 9, 9, 8, 9, 9, 7, 8, 8, 8, 9, 9, 8, 9, 9,
+ 8, 8, 8, 7, 8, 8, 8, 8, 8, 7, 7, 6,
+ },
+ {
+ 0, 4, 9, 3, 6, 9, 7, 8, 10, 3, 6, 9, 5, 7, 10, 9,
+ 9, 10, 7, 8, 10, 8, 9, 10, 10, 10, 10, 2, 6, 9, 4, 7,
+ 10, 8, 9, 10, 4, 7, 10, 6, 7, 10, 9, 9, 10, 7, 8, 10,
+ 8, 9, 10, 10, 10, 10, 6, 8, 10, 7, 9, 11, 9, 10, 11, 7,
+ 9, 11, 8, 9, 11, 10, 10, 11, 8, 9, 11, 9, 10, 11, 11, 11,
+ 10, 8, 10, 11, 9, 10, 11, 9, 10, 11, 8, 10, 11, 9, 10, 11,
+ 10, 10, 11, 8, 10, 11, 9, 10, 10, 10, 10, 9,
+ },
+ },
+ {
+ {
+ 0, 4, 8, 3, 6, 8, 6, 7, 9, 4, 6, 8, 5, 7, 8, 8,
+ 8, 9, 7, 7, 8, 8, 8, 8, 8, 9, 8, 3, 6, 8, 4, 6,
+ 9, 7, 7, 9, 5, 6, 9, 6, 7, 9, 8, 8, 9, 7, 8, 8,
+ 8, 8, 8, 8, 8, 8, 6, 8, 9, 7, 8, 10, 7, 8, 9, 7,
+ 8, 10, 7, 8, 10, 8, 8, 9, 7, 8, 9, 8, 9, 9, 9, 9,
+ 8, 7, 9, 10, 8, 9, 10, 8, 8, 8, 8, 9, 10, 8, 9, 9,
+ 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 7, 6,
+ },
+ {
+ 0, 4, 9, 3, 6, 10, 8, 9, 11, 3, 5, 9, 5, 7, 10, 9,
+ 10, 11, 7, 8, 10, 9, 9, 11, 11, 11, 12, 2, 5, 10, 4, 7,
+ 10, 8, 9, 11, 4, 6, 10, 6, 7, 10, 9, 10, 11, 7, 9, 10,
+ 9, 9, 11, 11, 11, 11, 6, 8, 11, 7, 9, 11, 9, 10, 12, 7,
+ 9, 11, 8, 9, 12, 10, 10, 12, 8, 10, 11, 10, 10, 11, 12, 11,
+ 11, 8, 10, 12, 9, 11, 12, 10, 11, 12, 9, 10, 12, 10, 11, 12,
+ 11, 11, 12, 9, 10, 12, 10, 10, 11, 11, 11, 10,
+ },
+ },
+ {
+ {
+ 0, 4, 8, 3, 6, 9, 7, 8, 9, 4, 6, 8, 5, 7, 9, 8,
+ 9, 9, 7, 8, 9, 8, 8, 9, 9, 9, 9, 2, 6, 9, 4, 6,
+ 9, 7, 8, 10, 5, 7, 9, 6, 7, 9, 8, 8, 9, 7, 8, 9,
+ 8, 8, 9, 9, 9, 9, 6, 8, 10, 7, 8, 10, 8, 9, 10, 6,
+ 8, 10, 8, 8, 10, 9, 9, 10, 8, 9, 10, 9, 9, 10, 10, 10,
+ 9, 8, 9, 10, 8, 9, 10, 8, 9, 10, 8, 9, 10, 9, 9, 10,
+ 9, 9, 9, 8, 9, 9, 8, 9, 9, 9, 9, 8,
+ },
+ {
+ 0, 4, 10, 3, 6, 10, 8, 10, 12, 2, 6, 10, 6, 8, 11, 10,
+ 11, 12, 7, 9, 11, 9, 10, 12, 12, 13, 13, 2, 6, 10, 4, 7,
+ 11, 9, 10, 13, 4, 7, 11, 7, 8, 11, 10, 11, 12, 8, 9, 12,
+ 10, 10, 12, 12, 12, 13, 6, 9, 12, 8, 10, 13, 10, 12, 14, 7,
+ 10, 13, 9, 10, 13, 11, 11, 13, 9, 11, 13, 11, 11, 13, 13, 13,
+ 13, 9, 11, 13, 10, 12, 14, 11, 12, 14, 9, 11, 14, 11, 12, 14,
+ 12, 12, 14, 9, 12, 13, 11, 12, 13, 13, 12, 12,
+ },
+ },
+ {
+ {
+ 0, 4, 9, 3, 6, 9, 7, 8, 10, 3, 6, 9, 6, 7, 9, 9,
+ 9, 10, 7, 8, 9, 8, 9, 10, 10, 10, 11, 2, 6, 9, 4, 7,
+ 10, 7, 9, 10, 4, 7, 10, 6, 7, 10, 9, 9, 10, 7, 8, 10,
+ 8, 9, 10, 10, 10, 10, 6, 8, 11, 7, 9, 11, 8, 10, 11, 6,
+ 9, 11, 8, 9, 11, 9, 9, 11, 8, 9, 11, 9, 10, 11, 11, 10,
+ 10, 8, 10, 11, 9, 10, 11, 9, 10, 11, 8, 10, 11, 9, 10, 11,
+ 10, 10, 11, 8, 10, 11, 9, 10, 11, 10, 10, 10,
+ },
+ {
+ 0, 4, 12, 3, 7, 12, 10, 11, 14, 3, 6, 12, 7, 9, 13, 12,
+ 13, 14, 8, 11, 13, 11, 12, 14, 14, 14, 14, 1, 7, 12, 5, 8,
+ 13, 10, 12, 14, 4, 8, 13, 8, 9, 13, 12, 13, 14, 9, 11, 14,
+ 11, 12, 14, 14, 14, 14, 7, 10, 14, 9, 11, 14, 11, 13, 16, 8,
+ 11, 14, 10, 12, 14, 13, 13, 16, 10, 12, 15, 12, 13, 15, 15, 15,
+ 15, 10, 13, 15, 12, 13, 14, 13, 15, 15, 10, 13, 15, 12, 13, 15,
+ 13, 14, 15, 10, 13, 14, 12, 13, 14, 14, 14, 14,
+ },
+ },
+ {
+ {
+ 0, 4, 9, 3, 6, 10, 7, 9, 11, 3, 5, 9, 5, 7, 10, 9,
+ 10, 12, 7, 8, 10, 9, 10, 11, 11, 12, 12, 2, 6, 10, 4, 7,
+ 10, 7, 9, 12, 4, 7, 10, 6, 7, 11, 9, 10, 12, 7, 9, 11,
+ 9, 9, 11, 11, 11, 12, 5, 8, 11, 7, 9, 12, 9, 10, 13, 6,
+ 9, 12, 8, 9, 12, 10, 10, 12, 8, 10, 12, 10, 10, 12, 12, 12,
+ 12, 8, 10, 12, 9, 11, 13, 10, 11, 13, 9, 11, 13, 10, 11, 13,
+ 11, 11, 13, 9, 11, 12, 10, 11, 12, 11, 11, 12,
+ },
+ {
+ 0, 4, 12, 3, 7, 13, 10, 12, 15, 3, 7, 13, 7, 9, 14, 12,
+ 12, 13, 8, 11, 14, 11, 13, 15, 15, 14, 14, 1, 6, 13, 5, 8,
+ 13, 10, 13, 15, 4, 8, 13, 8, 9, 14, 13, 13, 15, 8, 11, 14,
+ 12, 12, 15, 15, 14, 14, 7, 10, 13, 9, 11, 13, 12, 14, 16, 8,
+ 11, 14, 10, 12, 15, 13, 13, 16, 10, 12, 15, 12, 13, 15, 15, 14,
+ 15, 11, 12, 14, 12, 14, 14, 13, 15, 15, 10, 12, 14, 12, 13, 15,
+ 14, 15, 15, 10, 13, 13, 12, 13, 15, 14, 14, 15,
+ },
+ },
+ {
+ {
+ 0, 5, 10, 3, 7, 11, 9, 11, 14, 3, 7, 11, 7, 8, 12, 11,
+ 12, 14, 7, 9, 12, 10, 11, 14, 13, 14, 16, 1, 7, 11, 5, 8,
+ 12, 9, 11, 15, 4, 8, 12, 7, 9, 13, 11, 12, 15, 8, 10, 13,
+ 10, 11, 14, 14, 14, 16, 6, 9, 13, 8, 11, 14, 10, 13, 16, 7,
+ 10, 14, 9, 11, 15, 12, 13, 16, 9, 11, 15, 12, 12, 15, 14, 14,
+ 16, 10, 12, 14, 11, 13, 15, 12, 14, 16, 10, 12, 15, 11, 13, 16,
+ 13, 14, 16, 10, 13, 16, 12, 13, 15, 14, 15, 16,
+ },
+ {
+ 0, 5, 16, 3, 8, 14, 11, 13, 14, 2, 8, 14, 8, 10, 16, 13,
+ 13, 14, 9, 13, 16, 12, 13, 16, 16, 14, 16, 1, 7, 14, 6, 10,
+ 14, 12, 16, 16, 5, 9, 14, 9, 11, 16, 15, 16, 16, 10, 12, 16,
+ 13, 13, 16, 16, 14, 16, 8, 11, 14, 11, 13, 14, 14, 14, 16, 8,
+ 12, 14, 11, 13, 16, 16, 16, 16, 10, 12, 15, 13, 14, 16, 16, 16,
+ 16, 11, 14, 14, 14, 15, 16, 16, 15, 16, 10, 13, 16, 13, 14, 14,
+ 16, 16, 16, 10, 13, 16, 13, 14, 16, 16, 16, 16,
+ },
+ },
+ {
+ {
+ 0, 5, 11, 3, 7, 13, 9, 12, 16, 3, 7, 12, 6, 9, 14, 11,
+ 13, 16, 7, 10, 16, 11, 12, 16, 16, 16, 16, 1, 6, 12, 5, 9,
+ 16, 9, 13, 16, 4, 8, 16, 7, 10, 16, 12, 15, 16, 7, 11, 16,
+ 11, 12, 16, 16, 16, 16, 6, 10, 15, 8, 11, 16, 11, 14, 16, 7,
+ 11, 16, 10, 12, 16, 13, 16, 16, 9, 13, 16, 13, 14, 16, 16, 16,
+ 16, 10, 12, 16, 12, 16, 16, 16, 16, 16, 11, 13, 16, 13, 16, 16,
+ 16, 16, 16, 12, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ {
+ 0, 5, 16, 3, 8, 16, 12, 12, 16, 2, 8, 16, 8, 10, 16, 13,
+ 13, 16, 9, 13, 16, 12, 13, 16, 16, 16, 16, 1, 8, 16, 6, 10,
+ 16, 12, 16, 16, 5, 9, 16, 9, 11, 16, 13, 16, 16, 9, 12, 14,
+ 12, 12, 16, 16, 16, 16, 8, 11, 13, 11, 12, 16, 14, 16, 16, 8,
+ 12, 16, 11, 13, 16, 16, 15, 16, 9, 13, 14, 12, 13, 16, 16, 16,
+ 16, 10, 12, 13, 14, 13, 16, 16, 16, 16, 9, 13, 16, 13, 12, 16,
+ 16, 16, 16, 10, 12, 16, 14, 15, 16, 16, 16, 16,
+ },
+ },
+};
+
+static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE] = {
+ {
+ {
+ 0, 5, 8, 3, 6, 9, 6, 7, 9, 4, 6, 9, 6, 7, 9, 8,
+ 8, 9, 7, 8, 9, 8, 9, 9, 9, 9, 9, 2, 6, 9, 4, 7,
+ 9, 7, 8, 9, 5, 7, 9, 6, 7, 9, 8, 8, 9, 7, 8, 9,
+ 8, 9, 9, 9, 9, 8, 5, 8, 10, 6, 8, 10, 8, 9, 9, 7,
+ 8, 10, 7, 9, 10, 8, 9, 9, 8, 9, 10, 9, 9, 10, 9, 9,
+ 9, 7, 9, 10, 8, 9, 10, 8, 8, 9, 8, 9, 10, 8, 9, 10,
+ 8, 8, 9, 8, 9, 9, 8, 9, 9, 8, 8, 7,
+ },
+ {
+ 0, 4, 9, 2, 6, 10, 7, 8, 10, 3, 6, 10, 6, 7, 10, 9,
+ 9, 10, 8, 9, 11, 9, 10, 11, 10, 11, 11, 2, 6, 10, 4, 7,
+ 10, 8, 9, 10, 5, 7, 10, 7, 8, 10, 9, 9, 10, 9, 10, 11,
+ 10, 10, 11, 11, 11, 11, 6, 9, 11, 7, 9, 11, 9, 10, 12, 8,
+ 9, 11, 8, 10, 11, 10, 10, 11, 10, 11, 12, 11, 11, 12, 11, 11,
+ 11, 9, 11, 12, 10, 11, 12, 10, 11, 12, 10, 11, 12, 10, 11, 12,
+ 11, 11, 12, 11, 12, 12, 11, 12, 12, 12, 11, 11,
+ },
+ },
+ {
+ {
+ 0, 4, 9, 3, 6, 9, 6, 8, 9, 4, 6, 9, 5, 7, 9, 8,
+ 8, 9, 7, 8, 10, 8, 9, 10, 9, 9, 9, 2, 6, 9, 4, 7,
+ 9, 7, 8, 9, 5, 7, 9, 6, 7, 10, 8, 9, 9, 7, 9, 10,
+ 8, 9, 10, 9, 9, 9, 5, 8, 10, 6, 8, 10, 8, 9, 10, 7,
+ 8, 10, 7, 9, 11, 9, 9, 10, 8, 9, 10, 9, 10, 10, 10, 10,
+ 9, 7, 9, 10, 8, 9, 11, 8, 9, 10, 8, 9, 11, 8, 9, 11,
+ 9, 9, 10, 9, 9, 10, 9, 9, 10, 9, 9, 8,
+ },
+ {
+ 0, 4, 9, 2, 5, 10, 7, 8, 11, 3, 6, 10, 6, 7, 10, 9,
+ 10, 11, 8, 9, 11, 9, 10, 11, 11, 11, 12, 2, 6, 10, 4, 7,
+ 10, 8, 9, 11, 5, 7, 10, 6, 8, 10, 9, 10, 11, 9, 10, 12,
+ 10, 10, 12, 11, 12, 12, 6, 9, 11, 8, 9, 12, 9, 11, 13, 8,
+ 10, 12, 9, 10, 12, 11, 11, 12, 10, 12, 13, 11, 12, 13, 13, 12,
+ 13, 10, 11, 13, 10, 12, 13, 11, 12, 13, 11, 12, 13, 11, 12, 13,
+ 12, 12, 13, 12, 13, 14, 13, 13, 14, 13, 13, 13,
+ },
+ },
+ {
+ {
+ 0, 4, 9, 3, 6, 9, 7, 8, 10, 3, 6, 9, 5, 7, 10, 8,
+ 9, 10, 7, 9, 10, 8, 9, 10, 10, 10, 10, 2, 6, 9, 4, 7,
+ 10, 7, 9, 10, 4, 7, 10, 6, 8, 10, 8, 9, 10, 8, 9, 10,
+ 9, 9, 10, 10, 10, 10, 5, 8, 11, 7, 9, 11, 8, 10, 11, 7,
+ 9, 11, 8, 9, 11, 9, 10, 11, 9, 10, 11, 10, 10, 11, 11, 11,
+ 11, 8, 10, 11, 9, 10, 11, 9, 10, 11, 9, 10, 12, 9, 10, 12,
+ 10, 11, 11, 9, 10, 11, 10, 11, 11, 10, 10, 10,
+ },
+ {
+ 0, 4, 10, 3, 6, 11, 8, 10, 12, 3, 6, 11, 6, 8, 11, 10,
+ 11, 13, 9, 10, 13, 11, 12, 14, 13, 13, 14, 1, 6, 10, 5, 8,
+ 12, 9, 10, 13, 5, 8, 11, 7, 9, 12, 11, 11, 13, 10, 12, 13,
+ 11, 12, 14, 14, 13, 15, 7, 10, 12, 9, 11, 14, 11, 12, 15, 9,
+ 11, 13, 10, 11, 14, 12, 12, 14, 12, 14, 16, 13, 13, 16, 14, 14,
+ 16, 12, 13, 15, 12, 14, 15, 13, 14, 16, 13, 14, 16, 14, 14, 16,
+ 14, 15, 16, 14, 16, 16, 15, 16, 16, 15, 15, 16,
+ },
+ },
+ {
+ {
+ 0, 4, 9, 2, 6, 9, 7, 9, 11, 3, 6, 10, 6, 7, 10, 9,
+ 10, 11, 7, 9, 10, 9, 10, 11, 11, 11, 12, 2, 6, 10, 4, 7,
+ 10, 8, 9, 11, 5, 7, 10, 7, 8, 10, 9, 10, 11, 8, 9, 11,
+ 9, 10, 11, 11, 12, 11, 6, 9, 11, 7, 10, 12, 9, 11, 12, 7,
+ 10, 12, 9, 10, 12, 11, 11, 12, 9, 11, 12, 10, 11, 12, 12, 12,
+ 12, 9, 11, 12, 9, 11, 13, 11, 12, 13, 9, 11, 13, 10, 12, 13,
+ 11, 12, 13, 11, 12, 13, 11, 12, 13, 12, 13, 12,
+ },
+ {
+ 0, 4, 11, 2, 6, 12, 9, 11, 16, 4, 7, 12, 7, 9, 15, 11,
+ 12, 16, 10, 11, 16, 11, 13, 16, 16, 16, 16, 1, 6, 11, 5, 8,
+ 16, 9, 12, 16, 6, 9, 15, 8, 10, 16, 12, 13, 16, 12, 14, 16,
+ 12, 16, 16, 16, 16, 16, 8, 11, 14, 10, 12, 16, 12, 16, 16, 10,
+ 13, 16, 12, 16, 16, 13, 14, 16, 14, 16, 16, 16, 16, 16, 16, 16,
+ 16, 13, 13, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ },
+ {
+ {
+ 0, 5, 9, 3, 6, 10, 8, 10, 12, 3, 6, 10, 6, 8, 10, 10,
+ 11, 12, 8, 9, 11, 10, 10, 12, 13, 13, 13, 1, 6, 10, 5, 8,
+ 11, 9, 11, 13, 5, 8, 11, 7, 9, 11, 11, 11, 13, 8, 9, 11,
+ 10, 10, 12, 13, 13, 14, 6, 9, 12, 8, 11, 13, 11, 13, 15, 8,
+ 10, 13, 10, 11, 13, 12, 13, 15, 10, 12, 13, 12, 12, 13, 14, 14,
+ 14, 9, 12, 14, 11, 13, 15, 13, 15, 16, 11, 13, 15, 12, 14, 15,
+ 14, 15, 16, 13, 14, 15, 14, 14, 15, 15, 16, 16,
+ },
+ {
+ 0, 4, 16, 2, 7, 16, 10, 13, 16, 3, 8, 16, 7, 10, 16, 16,
+ 16, 16, 12, 16, 16, 15, 16, 16, 16, 16, 16, 1, 7, 16, 6, 9,
+ 16, 10, 16, 16, 7, 12, 16, 9, 13, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 9, 16, 16, 11, 13, 16, 16, 16, 16, 12,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ },
+ {
+ {
+ 0, 4, 9, 3, 6, 11, 9, 12, 16, 3, 6, 10, 6, 8, 11, 12,
+ 13, 16, 8, 9, 12, 10, 11, 13, 16, 16, 16, 1, 6, 10, 5, 8,
+ 12, 10, 13, 16, 5, 8, 11, 8, 9, 13, 13, 14, 16, 9, 10, 14,
+ 11, 12, 15, 16, 16, 16, 6, 10, 13, 9, 12, 16, 14, 16, 16, 9,
+ 12, 14, 11, 13, 16, 16, 16, 16, 12, 14, 16, 14, 16, 16, 16, 16,
+ 16, 11, 16, 16, 13, 16, 16, 16, 16, 16, 12, 16, 16, 13, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ {
+ 0, 4, 16, 2, 8, 16, 10, 16, 16, 3, 9, 16, 8, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 1, 7, 16, 5, 10,
+ 16, 16, 16, 16, 7, 16, 16, 11, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 10, 15, 16, 10, 16, 16, 16, 16, 16, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ },
+ {
+ {
+ 0, 3, 9, 3, 7, 11, 11, 15, 16, 3, 6, 11, 7, 9, 12, 16,
+ 16, 16, 8, 10, 16, 11, 16, 16, 16, 16, 16, 1, 6, 11, 6, 9,
+ 15, 16, 16, 16, 5, 8, 16, 9, 11, 16, 16, 16, 16, 10, 16, 16,
+ 16, 16, 16, 16, 16, 16, 7, 11, 16, 11, 16, 16, 16, 16, 16, 11,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ {
+ 0, 4, 16, 2, 8, 16, 16, 16, 16, 3, 12, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 1, 7, 16, 5, 12,
+ 16, 16, 16, 16, 6, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 9, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ },
+ },
+};
+
+
+static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE] = {
+{
+ 1, 2, 4, 4, 5, 5, 6, 7, 7, 7, 8, 8, 8, 9, 9, 10,
+ 10, 10, 10, 11, 11, 11, 11, 12, 11, 11, 11, 13, 14, 15, 16, 16,
+},
+{
+ 1, 2, 3, 5, 5, 6, 6, 7, 7, 8, 9, 9, 9, 10, 10, 10,
+ 11, 11, 12, 12, 12, 12, 13, 13, 12, 12, 13, 14, 16, 16, 16, 16,
+},
+{
+ 1, 2, 3, 4, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12,
+ 12, 12, 13, 13, 13, 14, 14, 15, 14, 14, 16, 16, 16, 16, 16, 16,
+},
+{
+ 1, 2, 3, 4, 5, 6, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
+ 13, 13, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+},
+{
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+},
+{
+ 1, 2, 3, 4, 5, 6, 7, 8, 10, 10, 11, 11, 13, 12, 12, 13,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+},
+{
+ 1, 2, 3, 4, 5, 6, 7, 8, 10, 10, 10, 11, 14, 13, 15, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 16,
+}
+};
+
+#endif /* AVCODEC_RV34VLC_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40.c
new file mode 100644
index 000000000..3738d2d45
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40.c
@@ -0,0 +1,684 @@
+/*
+ * RV40 decoder
+ * Copyright (c) 2007 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/rv40.c
+ * RV40 decoder
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+#include "golomb.h"
+
+#include "rv34.h"
+#include "rv40vlc2.h"
+#include "rv40data.h"
+
+static VLC aic_top_vlc;
+static VLC aic_mode1_vlc[AIC_MODE1_NUM], aic_mode2_vlc[AIC_MODE2_NUM];
+static VLC ptype_vlc[NUM_PTYPE_VLCS], btype_vlc[NUM_BTYPE_VLCS];
+
+static const int16_t mode2_offs[] = {
+ 0, 614, 1222, 1794, 2410, 3014, 3586, 4202, 4792, 5382, 5966, 6542,
+ 7138, 7716, 8292, 8864, 9444, 10030, 10642, 11212, 11814
+};
+
+/**
+ * Initialize all tables.
+ */
+static av_cold void rv40_init_tables(void)
+{
+ int i;
+ static VLC_TYPE aic_table[1 << AIC_TOP_BITS][2];
+ static VLC_TYPE aic_mode1_table[AIC_MODE1_NUM << AIC_MODE1_BITS][2];
+ static VLC_TYPE aic_mode2_table[11814][2];
+ static VLC_TYPE ptype_table[NUM_PTYPE_VLCS << PTYPE_VLC_BITS][2];
+ static VLC_TYPE btype_table[NUM_BTYPE_VLCS << BTYPE_VLC_BITS][2];
+
+ aic_top_vlc.table = aic_table;
+ aic_top_vlc.table_allocated = 1 << AIC_TOP_BITS;
+ init_vlc(&aic_top_vlc, AIC_TOP_BITS, AIC_TOP_SIZE,
+ rv40_aic_top_vlc_bits, 1, 1,
+ rv40_aic_top_vlc_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);
+ for(i = 0; i < AIC_MODE1_NUM; i++){
+ // Every tenth VLC table is empty
+ if((i % 10) == 9) continue;
+ aic_mode1_vlc[i].table = &aic_mode1_table[i << AIC_MODE1_BITS];
+ aic_mode1_vlc[i].table_allocated = 1 << AIC_MODE1_BITS;
+ init_vlc(&aic_mode1_vlc[i], AIC_MODE1_BITS, AIC_MODE1_SIZE,
+ aic_mode1_vlc_bits[i], 1, 1,
+ aic_mode1_vlc_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
+ }
+ for(i = 0; i < AIC_MODE2_NUM; i++){
+ aic_mode2_vlc[i].table = &aic_mode2_table[mode2_offs[i]];
+ aic_mode2_vlc[i].table_allocated = mode2_offs[i + 1] - mode2_offs[i];
+ init_vlc(&aic_mode2_vlc[i], AIC_MODE2_BITS, AIC_MODE2_SIZE,
+ aic_mode2_vlc_bits[i], 1, 1,
+ aic_mode2_vlc_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
+ }
+ for(i = 0; i < NUM_PTYPE_VLCS; i++){
+ ptype_vlc[i].table = &ptype_table[i << PTYPE_VLC_BITS];
+ ptype_vlc[i].table_allocated = 1 << PTYPE_VLC_BITS;
+ init_vlc_sparse(&ptype_vlc[i], PTYPE_VLC_BITS, PTYPE_VLC_SIZE,
+ ptype_vlc_bits[i], 1, 1,
+ ptype_vlc_codes[i], 1, 1,
+ ptype_vlc_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
+ }
+ for(i = 0; i < NUM_BTYPE_VLCS; i++){
+ btype_vlc[i].table = &btype_table[i << BTYPE_VLC_BITS];
+ btype_vlc[i].table_allocated = 1 << BTYPE_VLC_BITS;
+ init_vlc_sparse(&btype_vlc[i], BTYPE_VLC_BITS, BTYPE_VLC_SIZE,
+ btype_vlc_bits[i], 1, 1,
+ btype_vlc_codes[i], 1, 1,
+ btype_vlc_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
+ }
+}
+
+/**
+ * Get stored dimension from bitstream.
+ *
+ * If the width/height is the standard one then it's coded as a 3-bit index.
+ * Otherwise it is coded as escaped 8-bit portions.
+ */
+static int get_dimension(GetBitContext *gb, const int *dim)
+{
+ int t = get_bits(gb, 3);
+ int val = dim[t];
+ if(val < 0)
+ val = dim[get_bits1(gb) - val];
+ if(!val){
+ do{
+ t = get_bits(gb, 8);
+ val += t << 2;
+ }while(t == 0xFF);
+ }
+ return val;
+}
+
+/**
+ * Get encoded picture size - usually this is called from rv40_parse_slice_header.
+ */
+static void rv40_parse_picture_size(GetBitContext *gb, int *w, int *h)
+{
+ *w = get_dimension(gb, rv40_standard_widths);
+ *h = get_dimension(gb, rv40_standard_heights);
+}
+
+static int rv40_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
+{
+ int mb_bits;
+ int w = r->s.width, h = r->s.height;
+ int mb_size;
+
+ memset(si, 0, sizeof(SliceInfo));
+ if(get_bits1(gb))
+ return -1;
+ si->type = get_bits(gb, 2);
+ if(si->type == 1) si->type = 0;
+ si->quant = get_bits(gb, 5);
+ if(get_bits(gb, 2))
+ return -1;
+ si->vlc_set = get_bits(gb, 2);
+ skip_bits1(gb);
+ si->pts = get_bits(gb, 13);
+ if(!si->type || !get_bits1(gb))
+ rv40_parse_picture_size(gb, &w, &h);
+ if(avcodec_check_dimensions(r->s.avctx, w, h) < 0)
+ return -1;
+ si->width = w;
+ si->height = h;
+ mb_size = ((w + 15) >> 4) * ((h + 15) >> 4);
+ mb_bits = ff_rv34_get_start_offset(gb, mb_size);
+ si->start = get_bits(gb, mb_bits);
+
+ return 0;
+}
+
+/**
+ * Decode 4x4 intra types array.
+ */
+static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst)
+{
+ MpegEncContext *s = &r->s;
+ int i, j, k, v;
+ int A, B, C;
+ int pattern;
+ int8_t *ptr;
+
+ for(i = 0; i < 4; i++, dst += r->intra_types_stride){
+ if(!i && s->first_slice_line){
+ pattern = get_vlc2(gb, aic_top_vlc.table, AIC_TOP_BITS, 1);
+ dst[0] = (pattern >> 2) & 2;
+ dst[1] = (pattern >> 1) & 2;
+ dst[2] = pattern & 2;
+ dst[3] = (pattern << 1) & 2;
+ continue;
+ }
+ ptr = dst;
+ for(j = 0; j < 4; j++){
+ /* Coefficients are read using VLC chosen by the prediction pattern
+ * The first one (used for retrieving a pair of coefficients) is
+ * constructed from the top, top right and left coefficients
+ * The second one (used for retrieving only one coefficient) is
+ * top + 10 * left.
+ */
+ A = ptr[-r->intra_types_stride + 1]; // it won't be used for the last coefficient in a row
+ B = ptr[-r->intra_types_stride];
+ C = ptr[-1];
+ pattern = A + (B << 4) + (C << 8);
+ for(k = 0; k < MODE2_PATTERNS_NUM; k++)
+ if(pattern == rv40_aic_table_index[k])
+ break;
+ if(j < 3 && k < MODE2_PATTERNS_NUM){ //pattern is found, decoding 2 coefficients
+ v = get_vlc2(gb, aic_mode2_vlc[k].table, AIC_MODE2_BITS, 2);
+ *ptr++ = v/9;
+ *ptr++ = v%9;
+ j++;
+ }else{
+ if(B != -1 && C != -1)
+ v = get_vlc2(gb, aic_mode1_vlc[B + C*10].table, AIC_MODE1_BITS, 1);
+ else{ // tricky decoding
+ v = 0;
+ switch(C){
+ case -1: // code 0 -> 1, 1 -> 0
+ if(B < 2)
+ v = get_bits1(gb) ^ 1;
+ break;
+ case 0:
+ case 2: // code 0 -> 2, 1 -> 0
+ v = (get_bits1(gb) ^ 1) << 1;
+ break;
+ }
+ }
+ *ptr++ = v;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Decode macroblock information.
+ */
+static int rv40_decode_mb_info(RV34DecContext *r)
+{
+ MpegEncContext *s = &r->s;
+ GetBitContext *gb = &s->gb;
+ int q, i;
+ int prev_type = 0;
+ int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+ int blocks[RV34_MB_TYPES] = {0};
+ int count = 0;
+
+ if(!r->s.mb_skip_run)
+ r->s.mb_skip_run = svq3_get_ue_golomb(gb) + 1;
+
+ if(--r->s.mb_skip_run)
+ return RV34_MB_SKIP;
+
+ if(r->avail_cache[6-1])
+ blocks[r->mb_type[mb_pos - 1]]++;
+ if(r->avail_cache[6-4]){
+ blocks[r->mb_type[mb_pos - s->mb_stride]]++;
+ if(r->avail_cache[6-2])
+ blocks[r->mb_type[mb_pos - s->mb_stride + 1]]++;
+ if(r->avail_cache[6-5])
+ blocks[r->mb_type[mb_pos - s->mb_stride - 1]]++;
+ }
+
+ for(i = 0; i < RV34_MB_TYPES; i++){
+ if(blocks[i] > count){
+ count = blocks[i];
+ prev_type = i;
+ }
+ }
+ if(s->pict_type == FF_P_TYPE){
+ prev_type = block_num_to_ptype_vlc_num[prev_type];
+ q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
+ if(q < PBTYPE_ESCAPE)
+ return q;
+ q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
+ av_log(s->avctx, AV_LOG_ERROR, "Dquant for P-frame\n");
+ }else{
+ prev_type = block_num_to_btype_vlc_num[prev_type];
+ q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1);
+ if(q < PBTYPE_ESCAPE)
+ return q;
+ q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1);
+ av_log(s->avctx, AV_LOG_ERROR, "Dquant for B-frame\n");
+ }
+ return 0;
+}
+
+#define CLIP_SYMM(a, b) av_clip(a, -(b), b)
+/**
+ * weaker deblocking very similar to the one described in 4.4.2 of JVT-A003r1
+ */
+static inline void rv40_weak_loop_filter(uint8_t *src, const int step,
+ const int filter_p1, const int filter_q1,
+ const int alpha, const int beta,
+ const int lim_p0q0,
+ const int lim_q1, const int lim_p1,
+ const int diff_p1p0, const int diff_q1q0,
+ const int diff_p1p2, const int diff_q1q2)
+{
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ int t, u, diff;
+
+ t = src[0*step] - src[-1*step];
+ if(!t)
+ return;
+ u = (alpha * FFABS(t)) >> 7;
+ if(u > 3 - (filter_p1 && filter_q1))
+ return;
+
+ t <<= 2;
+ if(filter_p1 && filter_q1)
+ t += src[-2*step] - src[1*step];
+ diff = CLIP_SYMM((t + 4) >> 3, lim_p0q0);
+ src[-1*step] = cm[src[-1*step] + diff];
+ src[ 0*step] = cm[src[ 0*step] - diff];
+ if(FFABS(diff_p1p2) <= beta && filter_p1){
+ t = (diff_p1p0 + diff_p1p2 - diff) >> 1;
+ src[-2*step] = cm[src[-2*step] - CLIP_SYMM(t, lim_p1)];
+ }
+ if(FFABS(diff_q1q2) <= beta && filter_q1){
+ t = (diff_q1q0 + diff_q1q2 + diff) >> 1;
+ src[ 1*step] = cm[src[ 1*step] - CLIP_SYMM(t, lim_q1)];
+ }
+}
+
+static inline void rv40_adaptive_loop_filter(uint8_t *src, const int step,
+ const int stride, const int dmode,
+ const int lim_q1, const int lim_p1,
+ const int alpha,
+ const int beta, const int beta2,
+ const int chroma, const int edge)
+{
+ int diff_p1p0[4], diff_q1q0[4], diff_p1p2[4], diff_q1q2[4];
+ int sum_p1p0 = 0, sum_q1q0 = 0, sum_p1p2 = 0, sum_q1q2 = 0;
+ uint8_t *ptr;
+ int flag_strong0 = 1, flag_strong1 = 1;
+ int filter_p1, filter_q1;
+ int i;
+ int lims;
+
+ for(i = 0, ptr = src; i < 4; i++, ptr += stride){
+ diff_p1p0[i] = ptr[-2*step] - ptr[-1*step];
+ diff_q1q0[i] = ptr[ 1*step] - ptr[ 0*step];
+ sum_p1p0 += diff_p1p0[i];
+ sum_q1q0 += diff_q1q0[i];
+ }
+ filter_p1 = FFABS(sum_p1p0) < (beta<<2);
+ filter_q1 = FFABS(sum_q1q0) < (beta<<2);
+ if(!filter_p1 && !filter_q1)
+ return;
+
+ for(i = 0, ptr = src; i < 4; i++, ptr += stride){
+ diff_p1p2[i] = ptr[-2*step] - ptr[-3*step];
+ diff_q1q2[i] = ptr[ 1*step] - ptr[ 2*step];
+ sum_p1p2 += diff_p1p2[i];
+ sum_q1q2 += diff_q1q2[i];
+ }
+
+ if(edge){
+ flag_strong0 = filter_p1 && (FFABS(sum_p1p2) < beta2);
+ flag_strong1 = filter_q1 && (FFABS(sum_q1q2) < beta2);
+ }else{
+ flag_strong0 = flag_strong1 = 0;
+ }
+
+ lims = filter_p1 + filter_q1 + ((lim_q1 + lim_p1) >> 1) + 1;
+ if(flag_strong0 && flag_strong1){ /* strong filtering */
+ for(i = 0; i < 4; i++, src += stride){
+ int sflag, p0, q0, p1, q1;
+ int t = src[0*step] - src[-1*step];
+
+ if(!t) continue;
+ sflag = (alpha * FFABS(t)) >> 7;
+ if(sflag > 1) continue;
+
+ p0 = (25*src[-3*step] + 26*src[-2*step]
+ + 26*src[-1*step]
+ + 26*src[ 0*step] + 25*src[ 1*step] + rv40_dither_l[dmode + i]) >> 7;
+ q0 = (25*src[-2*step] + 26*src[-1*step]
+ + 26*src[ 0*step]
+ + 26*src[ 1*step] + 25*src[ 2*step] + rv40_dither_r[dmode + i]) >> 7;
+ if(sflag){
+ p0 = av_clip(p0, src[-1*step] - lims, src[-1*step] + lims);
+ q0 = av_clip(q0, src[ 0*step] - lims, src[ 0*step] + lims);
+ }
+ p1 = (25*src[-4*step] + 26*src[-3*step]
+ + 26*src[-2*step]
+ + 26*p0 + 25*src[ 0*step] + rv40_dither_l[dmode + i]) >> 7;
+ q1 = (25*src[-1*step] + 26*q0
+ + 26*src[ 1*step]
+ + 26*src[ 2*step] + 25*src[ 3*step] + rv40_dither_r[dmode + i]) >> 7;
+ if(sflag){
+ p1 = av_clip(p1, src[-2*step] - lims, src[-2*step] + lims);
+ q1 = av_clip(q1, src[ 1*step] - lims, src[ 1*step] + lims);
+ }
+ src[-2*step] = p1;
+ src[-1*step] = p0;
+ src[ 0*step] = q0;
+ src[ 1*step] = q1;
+ if(!chroma){
+ src[-3*step] = (25*src[-1*step] + 26*src[-2*step] + 51*src[-3*step] + 26*src[-4*step] + 64) >> 7;
+ src[ 2*step] = (25*src[ 0*step] + 26*src[ 1*step] + 51*src[ 2*step] + 26*src[ 3*step] + 64) >> 7;
+ }
+ }
+ }else if(filter_p1 && filter_q1){
+ for(i = 0; i < 4; i++, src += stride)
+ rv40_weak_loop_filter(src, step, 1, 1, alpha, beta, lims, lim_q1, lim_p1,
+ diff_p1p0[i], diff_q1q0[i], diff_p1p2[i], diff_q1q2[i]);
+ }else{
+ for(i = 0; i < 4; i++, src += stride)
+ rv40_weak_loop_filter(src, step, filter_p1, filter_q1,
+ alpha, beta, lims>>1, lim_q1>>1, lim_p1>>1,
+ diff_p1p0[i], diff_q1q0[i], diff_p1p2[i], diff_q1q2[i]);
+ }
+}
+
+static void rv40_v_loop_filter(uint8_t *src, int stride, int dmode,
+ int lim_q1, int lim_p1,
+ int alpha, int beta, int beta2, int chroma, int edge){
+ rv40_adaptive_loop_filter(src, 1, stride, dmode, lim_q1, lim_p1,
+ alpha, beta, beta2, chroma, edge);
+}
+static void rv40_h_loop_filter(uint8_t *src, int stride, int dmode,
+ int lim_q1, int lim_p1,
+ int alpha, int beta, int beta2, int chroma, int edge){
+ rv40_adaptive_loop_filter(src, stride, 1, dmode, lim_q1, lim_p1,
+ alpha, beta, beta2, chroma, edge);
+}
+
+enum RV40BlockPos{
+ POS_CUR,
+ POS_TOP,
+ POS_LEFT,
+ POS_BOTTOM,
+};
+
+#define MASK_CUR 0x0001
+#define MASK_RIGHT 0x0008
+#define MASK_BOTTOM 0x0010
+#define MASK_TOP 0x1000
+#define MASK_Y_TOP_ROW 0x000F
+#define MASK_Y_LAST_ROW 0xF000
+#define MASK_Y_LEFT_COL 0x1111
+#define MASK_Y_RIGHT_COL 0x8888
+#define MASK_C_TOP_ROW 0x0003
+#define MASK_C_LAST_ROW 0x000C
+#define MASK_C_LEFT_COL 0x0005
+#define MASK_C_RIGHT_COL 0x000A
+
+static const int neighbour_offs_x[4] = { 0, 0, -1, 0 };
+static const int neighbour_offs_y[4] = { 0, -1, 0, 1 };
+
+/**
+ * RV40 loop filtering function
+ */
+static void rv40_loop_filter(RV34DecContext *r, int row)
+{
+ MpegEncContext *s = &r->s;
+ int mb_pos, mb_x;
+ int i, j, k;
+ uint8_t *Y, *C;
+ int alpha, beta, betaY, betaC;
+ int q;
+ int mbtype[4]; ///< current macroblock and its neighbours types
+ /**
+ * flags indicating that macroblock can be filtered with strong filter
+ * it is set only for intra coded MB and MB with DCs coded separately
+ */
+ int mb_strong[4];
+ int clip[4]; ///< MB filter clipping value calculated from filtering strength
+ /**
+ * coded block patterns for luma part of current macroblock and its neighbours
+ * Format:
+ * LSB corresponds to the top left block,
+ * each nibble represents one row of subblocks.
+ */
+ int cbp[4];
+ /**
+ * coded block patterns for chroma part of current macroblock and its neighbours
+ * Format is the same as for luma with two subblocks in a row.
+ */
+ int uvcbp[4][2];
+ /**
+ * This mask represents the pattern of luma subblocks that should be filtered
+ * in addition to the coded ones because because they lie at the edge of
+ * 8x8 block with different enough motion vectors
+ */
+ int mvmasks[4];
+
+ mb_pos = row * s->mb_stride;
+ for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
+ int mbtype = s->current_picture_ptr->mb_type[mb_pos];
+ if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
+ r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF;
+ if(IS_INTRA(mbtype))
+ r->cbp_chroma[mb_pos] = 0xFF;
+ }
+ mb_pos = row * s->mb_stride;
+ for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
+ int y_h_deblock, y_v_deblock;
+ int c_v_deblock[2], c_h_deblock[2];
+ int clip_left;
+ int avail[4];
+ int y_to_deblock, c_to_deblock[2];
+
+ q = s->current_picture_ptr->qscale_table[mb_pos];
+ alpha = rv40_alpha_tab[q];
+ beta = rv40_beta_tab [q];
+ betaY = betaC = beta * 3;
+ if(s->width * s->height <= 176*144)
+ betaY += beta;
+
+ avail[0] = 1;
+ avail[1] = row;
+ avail[2] = mb_x;
+ avail[3] = row < s->mb_height - 1;
+ for(i = 0; i < 4; i++){
+ if(avail[i]){
+ int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride;
+ mvmasks[i] = r->deblock_coefs[pos];
+ mbtype [i] = s->current_picture_ptr->mb_type[pos];
+ cbp [i] = r->cbp_luma[pos];
+ uvcbp[i][0] = r->cbp_chroma[pos] & 0xF;
+ uvcbp[i][1] = r->cbp_chroma[pos] >> 4;
+ }else{
+ mvmasks[i] = 0;
+ mbtype [i] = mbtype[0];
+ cbp [i] = 0;
+ uvcbp[i][0] = uvcbp[i][1] = 0;
+ }
+ mb_strong[i] = IS_INTRA(mbtype[i]) || IS_SEPARATE_DC(mbtype[i]);
+ clip[i] = rv40_filter_clip_tbl[mb_strong[i] + 1][q];
+ }
+ y_to_deblock = mvmasks[POS_CUR]
+ | (mvmasks[POS_BOTTOM] << 16);
+ /* This pattern contains bits signalling that horizontal edges of
+ * the current block can be filtered.
+ * That happens when either of adjacent subblocks is coded or lies on
+ * the edge of 8x8 blocks with motion vectors differing by more than
+ * 3/4 pel in any component (any edge orientation for some reason).
+ */
+ y_h_deblock = y_to_deblock
+ | ((cbp[POS_CUR] << 4) & ~MASK_Y_TOP_ROW)
+ | ((cbp[POS_TOP] & MASK_Y_LAST_ROW) >> 12);
+ /* This pattern contains bits signalling that vertical edges of
+ * the current block can be filtered.
+ * That happens when either of adjacent subblocks is coded or lies on
+ * the edge of 8x8 blocks with motion vectors differing by more than
+ * 3/4 pel in any component (any edge orientation for some reason).
+ */
+ y_v_deblock = y_to_deblock
+ | ((cbp[POS_CUR] << 1) & ~MASK_Y_LEFT_COL)
+ | ((cbp[POS_LEFT] & MASK_Y_RIGHT_COL) >> 3);
+ if(!mb_x)
+ y_v_deblock &= ~MASK_Y_LEFT_COL;
+ if(!row)
+ y_h_deblock &= ~MASK_Y_TOP_ROW;
+ if(row == s->mb_height - 1 || (mb_strong[POS_CUR] || mb_strong[POS_BOTTOM]))
+ y_h_deblock &= ~(MASK_Y_TOP_ROW << 16);
+ /* Calculating chroma patterns is similar and easier since there is
+ * no motion vector pattern for them.
+ */
+ for(i = 0; i < 2; i++){
+ c_to_deblock[i] = (uvcbp[POS_BOTTOM][i] << 4) | uvcbp[POS_CUR][i];
+ c_v_deblock[i] = c_to_deblock[i]
+ | ((uvcbp[POS_CUR] [i] << 1) & ~MASK_C_LEFT_COL)
+ | ((uvcbp[POS_LEFT][i] & MASK_C_RIGHT_COL) >> 1);
+ c_h_deblock[i] = c_to_deblock[i]
+ | ((uvcbp[POS_TOP][i] & MASK_C_LAST_ROW) >> 2)
+ | (uvcbp[POS_CUR][i] << 2);
+ if(!mb_x)
+ c_v_deblock[i] &= ~MASK_C_LEFT_COL;
+ if(!row)
+ c_h_deblock[i] &= ~MASK_C_TOP_ROW;
+ if(row == s->mb_height - 1 || mb_strong[POS_CUR] || mb_strong[POS_BOTTOM])
+ c_h_deblock[i] &= ~(MASK_C_TOP_ROW << 4);
+ }
+
+ for(j = 0; j < 16; j += 4){
+ Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
+ for(i = 0; i < 4; i++, Y += 4){
+ int ij = i + j;
+ int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
+ int dither = j ? ij : i*4;
+
+ // if bottom block is coded then we can filter its top edge
+ // (or bottom edge of this block, which is the same)
+ if(y_h_deblock & (MASK_BOTTOM << ij)){
+ rv40_h_loop_filter(Y+4*s->linesize, s->linesize, dither,
+ y_to_deblock & (MASK_BOTTOM << ij) ? clip[POS_CUR] : 0,
+ clip_cur,
+ alpha, beta, betaY, 0, 0);
+ }
+ // filter left block edge in ordinary mode (with low filtering strength)
+ if(y_v_deblock & (MASK_CUR << ij) && (i || !(mb_strong[POS_CUR] || mb_strong[POS_LEFT]))){
+ if(!i)
+ clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0;
+ else
+ clip_left = y_to_deblock & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0;
+ rv40_v_loop_filter(Y, s->linesize, dither,
+ clip_cur,
+ clip_left,
+ alpha, beta, betaY, 0, 0);
+ }
+ // filter top edge of the current macroblock when filtering strength is high
+ if(!j && y_h_deblock & (MASK_CUR << i) && (mb_strong[POS_CUR] || mb_strong[POS_TOP])){
+ rv40_h_loop_filter(Y, s->linesize, dither,
+ clip_cur,
+ mvmasks[POS_TOP] & (MASK_TOP << i) ? clip[POS_TOP] : 0,
+ alpha, beta, betaY, 0, 1);
+ }
+ // filter left block edge in edge mode (with high filtering strength)
+ if(y_v_deblock & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] || mb_strong[POS_LEFT])){
+ clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0;
+ rv40_v_loop_filter(Y, s->linesize, dither,
+ clip_cur,
+ clip_left,
+ alpha, beta, betaY, 0, 1);
+ }
+ }
+ }
+ for(k = 0; k < 2; k++){
+ for(j = 0; j < 2; j++){
+ C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
+ for(i = 0; i < 2; i++, C += 4){
+ int ij = i + j*2;
+ int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
+ if(c_h_deblock[k] & (MASK_CUR << (ij+2))){
+ int clip_bot = c_to_deblock[k] & (MASK_CUR << (ij+2)) ? clip[POS_CUR] : 0;
+ rv40_h_loop_filter(C+4*s->uvlinesize, s->uvlinesize, i*8,
+ clip_bot,
+ clip_cur,
+ alpha, beta, betaC, 1, 0);
+ }
+ if((c_v_deblock[k] & (MASK_CUR << ij)) && (i || !(mb_strong[POS_CUR] || mb_strong[POS_LEFT]))){
+ if(!i)
+ clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0;
+ else
+ clip_left = c_to_deblock[k] & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0;
+ rv40_v_loop_filter(C, s->uvlinesize, j*8,
+ clip_cur,
+ clip_left,
+ alpha, beta, betaC, 1, 0);
+ }
+ if(!j && c_h_deblock[k] & (MASK_CUR << ij) && (mb_strong[POS_CUR] || mb_strong[POS_TOP])){
+ int clip_top = uvcbp[POS_TOP][k] & (MASK_CUR << (ij+2)) ? clip[POS_TOP] : 0;
+ rv40_h_loop_filter(C, s->uvlinesize, i*8,
+ clip_cur,
+ clip_top,
+ alpha, beta, betaC, 1, 1);
+ }
+ if(c_v_deblock[k] & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] || mb_strong[POS_LEFT])){
+ clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0;
+ rv40_v_loop_filter(C, s->uvlinesize, j*8,
+ clip_cur,
+ clip_left,
+ alpha, beta, betaC, 1, 1);
+ }
+ }
+ }
+ }
+ }
+}
+
+/**
+ * Initialize decoder.
+ */
+static av_cold int rv40_decode_init(AVCodecContext *avctx)
+{
+ RV34DecContext *r = avctx->priv_data;
+
+ r->rv30 = 0;
+ ff_rv34_decode_init(avctx);
+ if(!aic_top_vlc.bits)
+ rv40_init_tables();
+ r->parse_slice_header = rv40_parse_slice_header;
+ r->decode_intra_types = rv40_decode_intra_types;
+ r->decode_mb_info = rv40_decode_mb_info;
+ r->loop_filter = rv40_loop_filter;
+ r->luma_dc_quant_i = rv40_luma_dc_quant[0];
+ r->luma_dc_quant_p = rv40_luma_dc_quant[1];
+ return 0;
+}
+
+AVCodec rv40_decoder = {
+ "rv40",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_RV40,
+ sizeof(RV34DecContext),
+ /*.init = */rv40_decode_init,
+ /*.encode = */NULL,
+ /*.decode = */ff_rv34_decode_end,
+ /*.close = */ff_rv34_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ /*.next = */NULL,
+ /*.flush = */ff_mpeg_flush,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("RealVideo 4.0"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40data.h
new file mode 100644
index 000000000..b81b39318
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40data.h
@@ -0,0 +1,115 @@
+/*
+ * RealVideo 4 decoder
+ * copyright (c) 2007 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file rv40data.h
+ * miscellaneous RV40 tables
+ */
+
+#ifndef AVCODEC_RV40DATA_H
+#define AVCODEC_RV40DATA_H
+
+#include <stdint.h>
+
+/**
+ * standard widths and heights coded in RV40
+ */
+//@{
+static const int rv40_standard_widths[] = { 160, 172, 240, 320, 352, 640, 704, 0};
+static const int rv40_standard_heights[] = { 120, 132, 144, 240, 288, 480, -8, -10, 180, 360, 576, 0};
+//@}
+
+#define MODE2_PATTERNS_NUM 20
+/**
+ * intra types table
+ *
+ * These values are actually coded 3-tuples
+ * used for detecting standard block configurations.
+ */
+static const uint16_t rv40_aic_table_index[MODE2_PATTERNS_NUM] = {
+ 0x000, 0x100, 0x200,
+ 0x011, 0x111, 0x211, 0x511, 0x611,
+ 0x022, 0x122, 0x222, 0x722,
+ 0x272, 0x227,
+ 0x822, 0x282, 0x228,
+ 0x112, 0x116, 0x221
+};
+
+/**
+ * luma quantizer values
+ * The second table is used for inter blocks.
+ */
+static const uint8_t rv40_luma_dc_quant[2][32] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 17, 18, 18, 18, 19, 19, 19, 20, 20, 20, 22, 22, 22, 22 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 20, 21, 21, 22, 23, 23, 23, 24, 24, 24, 24 }
+};
+
+/**
+ * @begingroup loopfilter coefficients used by the RV40 loop filter
+ * @{
+ */
+/**
+ * dither values for deblocking filter - left/top values
+ */
+static const uint8_t rv40_dither_l[16] = {
+ 0x40, 0x50, 0x20, 0x60, 0x30, 0x50, 0x40, 0x30,
+ 0x50, 0x40, 0x50, 0x30, 0x60, 0x20, 0x50, 0x40
+};
+/**
+ * dither values for deblocking filter - right/bottom values
+ */
+static const uint8_t rv40_dither_r[16] = {
+ 0x40, 0x30, 0x60, 0x20, 0x50, 0x30, 0x30, 0x40,
+ 0x40, 0x40, 0x50, 0x30, 0x20, 0x60, 0x30, 0x40
+};
+
+/** alpha parameter for RV40 loop filter - almost the same as in JVT-A003r1 */
+static const uint8_t rv40_alpha_tab[32] = {
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 122, 96, 75, 59, 47, 37,
+ 29, 23, 18, 15, 13, 11, 10, 9,
+ 8, 7, 6, 5, 4, 3, 2, 1
+};
+/** beta parameter for RV40 loop filter - almost the same as in JVT-A003r1 */
+static const uint8_t rv40_beta_tab[32] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 4, 4, 4, 6, 6,
+ 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 13, 14, 15, 16, 17
+};
+/** clip table for RV40 loop filter - the same as in JVT-A003r1 */
+static const uint8_t rv40_filter_clip_tbl[3][32] = {
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 7, 8, 9
+ }
+};
+/** @} */ // end loopfilter group
+
+#endif /* AVCODEC_RV40DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40dsp.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40dsp.c
new file mode 100644
index 000000000..b48c4e85b
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40dsp.c
@@ -0,0 +1,353 @@
+/*
+ * RV40 decoder motion compensation functions
+ * Copyright (c) 2008 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file rv40dsp.c
+ * RV40 decoder motion compensation functions
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+
+#define RV40_LOWPASS(OPNAME, OP) \
+static av_unused void OPNAME ## rv40_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\
+ const int h, const int C1, const int C2, const int SHIFT){\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<h; i++)\
+ {\
+ OP(dst[0], (src[-2] + src[ 3] - 5*(src[-1]+src[2]) + src[0]*C1 + src[1]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[1], (src[-1] + src[ 4] - 5*(src[ 0]+src[3]) + src[1]*C1 + src[2]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[2], (src[ 0] + src[ 5] - 5*(src[ 1]+src[4]) + src[2]*C1 + src[3]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[3], (src[ 1] + src[ 6] - 5*(src[ 2]+src[5]) + src[3]*C1 + src[4]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[4], (src[ 2] + src[ 7] - 5*(src[ 3]+src[6]) + src[4]*C1 + src[5]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[5], (src[ 3] + src[ 8] - 5*(src[ 4]+src[7]) + src[5]*C1 + src[6]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[6], (src[ 4] + src[ 9] - 5*(src[ 5]+src[8]) + src[6]*C1 + src[7]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[7], (src[ 5] + src[10] - 5*(src[ 6]+src[9]) + src[7]*C1 + src[8]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }\
+}\
+\
+static void OPNAME ## rv40_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\
+ const int w, const int C1, const int C2, const int SHIFT){\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<w; i++)\
+ {\
+ const int srcB = src[-2*srcStride];\
+ const int srcA = src[-1*srcStride];\
+ const int src0 = src[0 *srcStride];\
+ const int src1 = src[1 *srcStride];\
+ const int src2 = src[2 *srcStride];\
+ const int src3 = src[3 *srcStride];\
+ const int src4 = src[4 *srcStride];\
+ const int src5 = src[5 *srcStride];\
+ const int src6 = src[6 *srcStride];\
+ const int src7 = src[7 *srcStride];\
+ const int src8 = src[8 *srcStride];\
+ const int src9 = src[9 *srcStride];\
+ const int src10= src[10*srcStride];\
+ OP(dst[0*dstStride], (srcB + src3 - 5*(srcA+src2) + src0*C1 + src1*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[1*dstStride], (srcA + src4 - 5*(src0+src3) + src1*C1 + src2*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[2*dstStride], (src0 + src5 - 5*(src1+src4) + src2*C1 + src3*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[3*dstStride], (src1 + src6 - 5*(src2+src5) + src3*C1 + src4*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[4*dstStride], (src2 + src7 - 5*(src3+src6) + src4*C1 + src5*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[5*dstStride], (src3 + src8 - 5*(src4+src7) + src5*C1 + src6*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[6*dstStride], (src4 + src9 - 5*(src5+src8) + src6*C1 + src7*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ OP(dst[7*dstStride], (src5 + src10 - 5*(src6+src9) + src7*C1 + src8*C2 + (1<<(SHIFT-1))) >> SHIFT);\
+ dst++;\
+ src++;\
+ }\
+}\
+\
+static void OPNAME ## rv40_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\
+ const int w, const int C1, const int C2, const int SHIFT){\
+ OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\
+ OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, w-8, C1, C2, SHIFT);\
+ OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, w-8, C1, C2, SHIFT);\
+}\
+\
+static void OPNAME ## rv40_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\
+ const int h, const int C1, const int C2, const int SHIFT){\
+ OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\
+ OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, h-8, C1, C2, SHIFT);\
+ OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, h-8, C1, C2, SHIFT);\
+}\
+\
+
+#define RV40_MC(OPNAME, SIZE) \
+static void OPNAME ## rv40_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 20, 20, 5);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
+ OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
+ OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\
+ OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 20, 20, 5);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
+ OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
+ OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\
+ OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
+ OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\
+}\
+\
+static void OPNAME ## rv40_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
+ OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\
+}\
+\
+
+#define op_avg(a, b) a = (((a)+cm[b]+1)>>1)
+#define op_put(a, b) a = cm[b]
+
+RV40_LOWPASS(put_ , op_put)
+RV40_LOWPASS(avg_ , op_avg)
+
+#undef op_avg
+#undef op_put
+
+RV40_MC(put_, 8)
+RV40_MC(put_, 16)
+RV40_MC(avg_, 8)
+RV40_MC(avg_, 16)
+
+static const int rv40_bias[4][4] = {
+ { 0, 16, 32, 16 },
+ { 32, 28, 32, 28 },
+ { 0, 32, 16, 32 },
+ { 32, 28, 32, 28 }
+};
+
+#define RV40_CHROMA_MC(OPNAME, OP)\
+static void OPNAME ## rv40_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
+ const int A=(8-x)*(8-y);\
+ const int B=( x)*(8-y);\
+ const int C=(8-x)*( y);\
+ const int D=( x)*( y);\
+ int i;\
+ int bias = rv40_bias[y>>1][x>>1];\
+ \
+ assert(x<8 && y<8 && x>=0 && y>=0);\
+\
+ if(D){\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\
+ OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\
+ OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\
+ OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }else{\
+ const int E= B+C;\
+ const int step= C ? stride : 1;\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + E*src[step+0] + bias));\
+ OP(dst[1], (A*src[1] + E*src[step+1] + bias));\
+ OP(dst[2], (A*src[2] + E*src[step+2] + bias));\
+ OP(dst[3], (A*src[3] + E*src[step+3] + bias));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }\
+}\
+\
+static void OPNAME ## rv40_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
+ const int A=(8-x)*(8-y);\
+ const int B=( x)*(8-y);\
+ const int C=(8-x)*( y);\
+ const int D=( x)*( y);\
+ int i;\
+ int bias = rv40_bias[y>>1][x>>1];\
+ \
+ assert(x<8 && y<8 && x>=0 && y>=0);\
+\
+ if(D){\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\
+ OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\
+ OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\
+ OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\
+ OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + bias));\
+ OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + bias));\
+ OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + bias));\
+ OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + bias));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }else{\
+ const int E= B+C;\
+ const int step= C ? stride : 1;\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + E*src[step+0] + bias));\
+ OP(dst[1], (A*src[1] + E*src[step+1] + bias));\
+ OP(dst[2], (A*src[2] + E*src[step+2] + bias));\
+ OP(dst[3], (A*src[3] + E*src[step+3] + bias));\
+ OP(dst[4], (A*src[4] + E*src[step+4] + bias));\
+ OP(dst[5], (A*src[5] + E*src[step+5] + bias));\
+ OP(dst[6], (A*src[6] + E*src[step+6] + bias));\
+ OP(dst[7], (A*src[7] + E*src[step+7] + bias));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }\
+}
+
+#define op_avg(a, b) a = (((a)+((b)>>6)+1)>>1)
+#define op_put(a, b) a = ((b)>>6)
+
+RV40_CHROMA_MC(put_, op_put)
+RV40_CHROMA_MC(avg_, op_avg)
+
+void ff_rv40dsp_init(DSPContext* c, AVCodecContext *avctx) {
+ c->put_rv40_qpel_pixels_tab[0][ 0] = c->put_h264_qpel_pixels_tab[0][0];
+ c->put_rv40_qpel_pixels_tab[0][ 1] = put_rv40_qpel16_mc10_c;
+ c->put_rv40_qpel_pixels_tab[0][ 2] = put_rv40_qpel16_mc20_c;
+ c->put_rv40_qpel_pixels_tab[0][ 3] = put_rv40_qpel16_mc30_c;
+ c->put_rv40_qpel_pixels_tab[0][ 4] = put_rv40_qpel16_mc01_c;
+ c->put_rv40_qpel_pixels_tab[0][ 5] = put_rv40_qpel16_mc11_c;
+ c->put_rv40_qpel_pixels_tab[0][ 6] = put_rv40_qpel16_mc21_c;
+ c->put_rv40_qpel_pixels_tab[0][ 7] = put_rv40_qpel16_mc31_c;
+ c->put_rv40_qpel_pixels_tab[0][ 8] = put_rv40_qpel16_mc02_c;
+ c->put_rv40_qpel_pixels_tab[0][ 9] = put_rv40_qpel16_mc12_c;
+ c->put_rv40_qpel_pixels_tab[0][10] = put_rv40_qpel16_mc22_c;
+ c->put_rv40_qpel_pixels_tab[0][11] = put_rv40_qpel16_mc32_c;
+ c->put_rv40_qpel_pixels_tab[0][12] = put_rv40_qpel16_mc03_c;
+ c->put_rv40_qpel_pixels_tab[0][13] = put_rv40_qpel16_mc13_c;
+ c->put_rv40_qpel_pixels_tab[0][14] = put_rv40_qpel16_mc23_c;
+ c->avg_rv40_qpel_pixels_tab[0][ 0] = c->avg_h264_qpel_pixels_tab[0][0];
+ c->avg_rv40_qpel_pixels_tab[0][ 1] = avg_rv40_qpel16_mc10_c;
+ c->avg_rv40_qpel_pixels_tab[0][ 2] = avg_rv40_qpel16_mc20_c;
+ c->avg_rv40_qpel_pixels_tab[0][ 3] = avg_rv40_qpel16_mc30_c;
+ c->avg_rv40_qpel_pixels_tab[0][ 4] = avg_rv40_qpel16_mc01_c;
+ c->avg_rv40_qpel_pixels_tab[0][ 5] = avg_rv40_qpel16_mc11_c;
+ c->avg_rv40_qpel_pixels_tab[0][ 6] = avg_rv40_qpel16_mc21_c;
+ c->avg_rv40_qpel_pixels_tab[0][ 7] = avg_rv40_qpel16_mc31_c;
+ c->avg_rv40_qpel_pixels_tab[0][ 8] = avg_rv40_qpel16_mc02_c;
+ c->avg_rv40_qpel_pixels_tab[0][ 9] = avg_rv40_qpel16_mc12_c;
+ c->avg_rv40_qpel_pixels_tab[0][10] = avg_rv40_qpel16_mc22_c;
+ c->avg_rv40_qpel_pixels_tab[0][11] = avg_rv40_qpel16_mc32_c;
+ c->avg_rv40_qpel_pixels_tab[0][12] = avg_rv40_qpel16_mc03_c;
+ c->avg_rv40_qpel_pixels_tab[0][13] = avg_rv40_qpel16_mc13_c;
+ c->avg_rv40_qpel_pixels_tab[0][14] = avg_rv40_qpel16_mc23_c;
+ c->put_rv40_qpel_pixels_tab[1][ 0] = c->put_h264_qpel_pixels_tab[1][0];
+ c->put_rv40_qpel_pixels_tab[1][ 1] = put_rv40_qpel8_mc10_c;
+ c->put_rv40_qpel_pixels_tab[1][ 2] = put_rv40_qpel8_mc20_c;
+ c->put_rv40_qpel_pixels_tab[1][ 3] = put_rv40_qpel8_mc30_c;
+ c->put_rv40_qpel_pixels_tab[1][ 4] = put_rv40_qpel8_mc01_c;
+ c->put_rv40_qpel_pixels_tab[1][ 5] = put_rv40_qpel8_mc11_c;
+ c->put_rv40_qpel_pixels_tab[1][ 6] = put_rv40_qpel8_mc21_c;
+ c->put_rv40_qpel_pixels_tab[1][ 7] = put_rv40_qpel8_mc31_c;
+ c->put_rv40_qpel_pixels_tab[1][ 8] = put_rv40_qpel8_mc02_c;
+ c->put_rv40_qpel_pixels_tab[1][ 9] = put_rv40_qpel8_mc12_c;
+ c->put_rv40_qpel_pixels_tab[1][10] = put_rv40_qpel8_mc22_c;
+ c->put_rv40_qpel_pixels_tab[1][11] = put_rv40_qpel8_mc32_c;
+ c->put_rv40_qpel_pixels_tab[1][12] = put_rv40_qpel8_mc03_c;
+ c->put_rv40_qpel_pixels_tab[1][13] = put_rv40_qpel8_mc13_c;
+ c->put_rv40_qpel_pixels_tab[1][14] = put_rv40_qpel8_mc23_c;
+ c->avg_rv40_qpel_pixels_tab[1][ 0] = c->avg_h264_qpel_pixels_tab[1][0];
+ c->avg_rv40_qpel_pixels_tab[1][ 1] = avg_rv40_qpel8_mc10_c;
+ c->avg_rv40_qpel_pixels_tab[1][ 2] = avg_rv40_qpel8_mc20_c;
+ c->avg_rv40_qpel_pixels_tab[1][ 3] = avg_rv40_qpel8_mc30_c;
+ c->avg_rv40_qpel_pixels_tab[1][ 4] = avg_rv40_qpel8_mc01_c;
+ c->avg_rv40_qpel_pixels_tab[1][ 5] = avg_rv40_qpel8_mc11_c;
+ c->avg_rv40_qpel_pixels_tab[1][ 6] = avg_rv40_qpel8_mc21_c;
+ c->avg_rv40_qpel_pixels_tab[1][ 7] = avg_rv40_qpel8_mc31_c;
+ c->avg_rv40_qpel_pixels_tab[1][ 8] = avg_rv40_qpel8_mc02_c;
+ c->avg_rv40_qpel_pixels_tab[1][ 9] = avg_rv40_qpel8_mc12_c;
+ c->avg_rv40_qpel_pixels_tab[1][10] = avg_rv40_qpel8_mc22_c;
+ c->avg_rv40_qpel_pixels_tab[1][11] = avg_rv40_qpel8_mc32_c;
+ c->avg_rv40_qpel_pixels_tab[1][12] = avg_rv40_qpel8_mc03_c;
+ c->avg_rv40_qpel_pixels_tab[1][13] = avg_rv40_qpel8_mc13_c;
+ c->avg_rv40_qpel_pixels_tab[1][14] = avg_rv40_qpel8_mc23_c;
+
+ c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_c;
+ c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_c;
+ c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_c;
+ c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_c;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40vlc2.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40vlc2.h
new file mode 100644
index 000000000..3b17d8faa
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/rv40vlc2.h
@@ -0,0 +1,706 @@
+/*
+ * RealVideo 4 decoder
+ * copyright (c) 2007 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file rv40vlc2.h
+ * RV40 VLC tables used for macroblock information decoding
+ */
+
+#ifndef AVCODEC_RV40VLC2_H
+#define AVCODEC_RV40VLC2_H
+
+#include <stdint.h>
+
+/**
+ * codes used for the first four block types
+ */
+//@{
+#define AIC_TOP_BITS 8
+#define AIC_TOP_SIZE 16
+static const uint8_t rv40_aic_top_vlc_codes[AIC_TOP_SIZE] = {
+ 0x01, 0x05, 0x01, 0x00, 0x03, 0x3D, 0x1D, 0x02,
+ 0x04, 0x3C, 0x3F, 0x1C, 0x0D, 0x3E, 0x0C, 0x01
+};
+
+static const uint8_t rv40_aic_top_vlc_bits[AIC_TOP_SIZE] = {
+ 1, 4, 5, 5, 5, 7, 6, 5, 4, 7, 7, 6, 5, 7, 5, 3
+};
+//@}
+
+/**
+ * codes used for determining a pair of block types
+ */
+//@{
+#define AIC_MODE2_NUM 20
+#define AIC_MODE2_SIZE 81
+#define AIC_MODE2_BITS 9
+
+static const uint16_t aic_mode2_vlc_codes[AIC_MODE2_NUM][AIC_MODE2_SIZE] = {
+{ 0x0001, 0x0001, 0x0005, 0x01F5, 0x0011, 0x0049, 0x0000, 0x0048, 0x004B,
+ 0x0035, 0x0003, 0x0034, 0x03C9, 0x01F4, 0x00C9, 0x004A, 0x0FD9, 0x03C8,
+ 0x0010, 0x0037, 0x0001, 0x00C8, 0x0075, 0x01F7, 0x00CB, 0x0074, 0x0002,
+ 0x01F6, 0x00CA, 0x01F1, 0x01F0, 0x1F81, 0x07F9, 0x1F80, 0x1F83, 0x07F8,
+ 0x0077, 0x00F5, 0x0036, 0x07FB, 0x0076, 0x1F82, 0x00F4, 0x00F7, 0x07FA,
+ 0x0071, 0x00F6, 0x03CB, 0x03CA, 0x0FD8, 0x00F1, 0x03F5, 0x1F8D, 0x07E5,
+ 0x0013, 0x0031, 0x00F0, 0x0FDB, 0x00F3, 0x07E4, 0x0030, 0x01F3, 0x07E7,
+ 0x03F4, 0x07E6, 0x0070, 0x3F19, 0x01F2, 0x3F18, 0x0FDA, 0x0033, 0x07E1,
+ 0x01FD, 0x01FC, 0x0073, 0x01FF, 0x0FC5, 0x0FC4, 0x0FC7, 0x03F7, 0x0072, },
+{ 0x0005, 0x0005, 0x0005, 0x0079, 0x0005, 0x000D, 0x001D, 0x0078, 0x0069,
+ 0x0004, 0x0001, 0x0007, 0x0068, 0x001C, 0x001F, 0x0004, 0x006B, 0x000C,
+ 0x0004, 0x001E, 0x0006, 0x006A, 0x0015, 0x000F, 0x0014, 0x0017, 0x0007,
+ 0x0016, 0x000E, 0x0011, 0x0009, 0x00D1, 0x00D0, 0x0181, 0x00D3, 0x007B,
+ 0x0010, 0x0013, 0x0004, 0x00D2, 0x0007, 0x0319, 0x0008, 0x007A, 0x00DD,
+ 0x0019, 0x0006, 0x000B, 0x0065, 0x00DC, 0x0012, 0x0064, 0x0180, 0x00DF,
+ 0x0006, 0x0018, 0x0001, 0x00DE, 0x001D, 0x00D9, 0x001B, 0x0067, 0x000A,
+ 0x00D8, 0x00DB, 0x001C, 0x0318, 0x00DA, 0x0635, 0x0183, 0x0000, 0x00C5,
+ 0x0066, 0x0061, 0x0035, 0x00C4, 0x0182, 0x0634, 0x031B, 0x00C7, 0x001F, },
+{ 0x0005, 0x0001, 0x001D, 0x01C1, 0x0035, 0x00F1, 0x006D, 0x00F0, 0x0049,
+ 0x0000, 0x0004, 0x0003, 0x00F3, 0x0048, 0x0034, 0x006C, 0x01C0, 0x01C3,
+ 0x0007, 0x0006, 0x0001, 0x006F, 0x0002, 0x004B, 0x006E, 0x001C, 0x0005,
+ 0x0069, 0x0068, 0x006B, 0x0037, 0x01C2, 0x00F2, 0x0395, 0x01CD, 0x00FD,
+ 0x006A, 0x0036, 0x0015, 0x01CC, 0x0014, 0x0394, 0x004A, 0x00FC, 0x00FF,
+ 0x0017, 0x0031, 0x00FE, 0x01CF, 0x0397, 0x00F9, 0x01CE, 0x0725, 0x0396,
+ 0x0016, 0x0030, 0x0075, 0x0724, 0x00F8, 0x0727, 0x0033, 0x0391, 0x0390,
+ 0x0011, 0x0032, 0x001F, 0x00FB, 0x0074, 0x0726, 0x00FA, 0x001E, 0x0077,
+ 0x0019, 0x0018, 0x0004, 0x0010, 0x003D, 0x0076, 0x0071, 0x0013, 0x0001, },
+{ 0x000D, 0x0019, 0x0011, 0x0015, 0x0061, 0x0019, 0x0014, 0x01AD, 0x0060,
+ 0x0018, 0x0001, 0x0005, 0x001B, 0x0010, 0x0019, 0x0005, 0x0017, 0x0018,
+ 0x0016, 0x0004, 0x0004, 0x0013, 0x000C, 0x0012, 0x001A, 0x0018, 0x0005,
+ 0x000F, 0x001B, 0x0004, 0x001D, 0x0011, 0x001C, 0x0010, 0x000E, 0x001B,
+ 0x0013, 0x001F, 0x001A, 0x0029, 0x0005, 0x0063, 0x001E, 0x0009, 0x0062,
+ 0x0008, 0x0007, 0x0007, 0x0019, 0x0004, 0x001A, 0x0018, 0x006D, 0x0007,
+ 0x001B, 0x0007, 0x001A, 0x006C, 0x0006, 0x0012, 0x0005, 0x006F, 0x000B,
+ 0x006E, 0x0069, 0x001D, 0x0359, 0x0028, 0x002B, 0x002A, 0x001C, 0x00D5,
+ 0x0358, 0x001F, 0x0001, 0x001E, 0x0068, 0x00D4, 0x00D7, 0x0019, 0x0000, },
+{ 0x00B9, 0x0061, 0x0060, 0x00B8, 0x02B5, 0x01AD, 0x00BB, 0x0AF5, 0x0151,
+ 0x0001, 0x0001, 0x0005, 0x0000, 0x0003, 0x0005, 0x0004, 0x0063, 0x0025,
+ 0x00BA, 0x0004, 0x0007, 0x0062, 0x00A5, 0x0024, 0x006D, 0x0002, 0x006C,
+ 0x02B4, 0x000D, 0x006F, 0x0027, 0x00A4, 0x0026, 0x01AC, 0x0150, 0x01AF,
+ 0x01AE, 0x0021, 0x006E, 0x02B7, 0x0020, 0x0153, 0x0023, 0x00A7, 0x0152,
+ 0x00A6, 0x0006, 0x000C, 0x0022, 0x01A9, 0x0019, 0x002D, 0x02B6, 0x01A8,
+ 0x000F, 0x0007, 0x000E, 0x00A1, 0x0069, 0x002C, 0x0001, 0x01AB, 0x00A0,
+ 0x02B1, 0x00A3, 0x002F, 0x0AF4, 0x02B0, 0x0AF7, 0x02B3, 0x0068, 0x015D,
+ 0x0AF6, 0x01AA, 0x0055, 0x015C, 0x02B2, 0x0579, 0x0578, 0x015F, 0x00A2, },
+{ 0x0905, 0x013D, 0x013C, 0x0904, 0x121D, 0x049D, 0x049C, 0x243D, 0x0907,
+ 0x00ED, 0x0001, 0x0015, 0x0041, 0x013F, 0x0031, 0x0014, 0x025D, 0x025C,
+ 0x013E, 0x000D, 0x0000, 0x0040, 0x0139, 0x0043, 0x0030, 0x0017, 0x0033,
+ 0x0906, 0x0032, 0x0042, 0x00EC, 0x025F, 0x00EF, 0x025E, 0x049F, 0x0138,
+ 0x0901, 0x013B, 0x0259, 0x121C, 0x049E, 0x0900, 0x0258, 0x243C, 0x121F,
+ 0x0903, 0x003D, 0x00EE, 0x025B, 0x025A, 0x004D, 0x013A, 0x0902, 0x0245,
+ 0x00E9, 0x0016, 0x00E8, 0x0499, 0x0125, 0x0244, 0x004C, 0x0498, 0x090D,
+ 0x00EB, 0x003C, 0x0011, 0x049B, 0x049A, 0x0485, 0x00EA, 0x003F, 0x0124,
+ 0x090C, 0x003E, 0x0039, 0x0095, 0x0247, 0x0246, 0x0484, 0x0094, 0x0038, },
+{ 0x0F09, 0x00CD, 0x01FD, 0x0791, 0x1E6D, 0x0790, 0x03D9, 0x3CD1, 0x3CD0,
+ 0x0075, 0x0001, 0x0001, 0x0035, 0x00CC, 0x0011, 0x0000, 0x03D8, 0x01FC,
+ 0x03DB, 0x0010, 0x0003, 0x00CF, 0x03DA, 0x00CE, 0x0074, 0x0034, 0x0077,
+ 0x0793, 0x0013, 0x0076, 0x0071, 0x03C5, 0x0070, 0x01FF, 0x0792, 0x01FE,
+ 0x01F9, 0x0037, 0x00C9, 0x0F08, 0x01F8, 0x03C4, 0x00C8, 0x0F0B, 0x079D,
+ 0x03C7, 0x0001, 0x0012, 0x0073, 0x00CB, 0x0005, 0x0036, 0x03C6, 0x0072,
+ 0x007D, 0x0002, 0x00CA, 0x079C, 0x01FB, 0x00F5, 0x0031, 0x079F, 0x0F0A,
+ 0x0F35, 0x079E, 0x01FA, 0x1E6C, 0x1E6F, 0x3CD3, 0x0799, 0x03C1, 0x1E6E,
+ 0x3CD2, 0x0030, 0x00F4, 0x007C, 0x03C0, 0x03C3, 0x0798, 0x01E5, 0x00F7, },
+{ 0x01A5, 0x0001, 0x001D, 0x0021, 0x00A1, 0x000D, 0x0061, 0x06B9, 0x00A0,
+ 0x0060, 0x0001, 0x0005, 0x000C, 0x0020, 0x001C, 0x0004, 0x01A4, 0x01A7,
+ 0x00A3, 0x001F, 0x001E, 0x0023, 0x0022, 0x002D, 0x002C, 0x0063, 0x0062,
+ 0x1A81, 0x01A6, 0x01A1, 0x06B8, 0x06BB, 0x00A2, 0x06BA, 0x0D59, 0x06A5,
+ 0x01A0, 0x000F, 0x006D, 0x06A4, 0x002F, 0x00AD, 0x006C, 0x06A7, 0x00AC,
+ 0x0D58, 0x000E, 0x01A3, 0x00AF, 0x00AE, 0x006F, 0x01A2, 0x0D5B, 0x00A9,
+ 0x0019, 0x0001, 0x0009, 0x00A8, 0x006E, 0x002E, 0x0000, 0x01AD, 0x00AB,
+ 0x00AA, 0x0355, 0x0029, 0x1A80, 0x1A83, 0x1A82, 0x0354, 0x01AC, 0x0D5A,
+ 0x1A8D, 0x01AF, 0x0357, 0x0D45, 0x0D44, 0x0D47, 0x1A8C, 0x06A6, 0x06A1, },
+{ 0x0001, 0x0011, 0x0005, 0x0775, 0x00F9, 0x00F8, 0x0031, 0x0030, 0x0049,
+ 0x00FB, 0x0010, 0x0033, 0x0EC9, 0x038D, 0x038C, 0x00FA, 0x038F, 0x0774,
+ 0x0048, 0x0032, 0x0000, 0x01D5, 0x00E5, 0x038E, 0x00E4, 0x0013, 0x000D,
+ 0x0389, 0x0777, 0x0388, 0x038B, 0x1DF9, 0x0EC8, 0x3BC9, 0x1DF8, 0x038A,
+ 0x03B5, 0x0776, 0x00E7, 0x3BC8, 0x01D4, 0x3BCB, 0x0ECB, 0x0771, 0x0ECA,
+ 0x01D7, 0x03B4, 0x01D6, 0x1DFB, 0x0EF5, 0x0770, 0x0EF4, 0x3BCA, 0x0773,
+ 0x00E6, 0x03B7, 0x004B, 0x1DFA, 0x03B6, 0x0EF7, 0x00E1, 0x0EF6, 0x0EF1,
+ 0x03B1, 0x01D1, 0x003D, 0x0EF0, 0x0772, 0x077D, 0x077C, 0x003C, 0x01D0,
+ 0x03B0, 0x01D3, 0x003F, 0x03B3, 0x01D2, 0x0EF3, 0x077F, 0x00E0, 0x004A, },
+{ 0x0015, 0x0049, 0x0014, 0x07D1, 0x03FD, 0x03FC, 0x01C1, 0x01C0, 0x00F1,
+ 0x0017, 0x0001, 0x0001, 0x01C3, 0x0048, 0x004B, 0x0016, 0x0031, 0x01C2,
+ 0x004A, 0x0011, 0x0000, 0x01CD, 0x00F0, 0x01CC, 0x0075, 0x0010, 0x000D,
+ 0x03FF, 0x01CF, 0x01CE, 0x07D0, 0x0F81, 0x07D3, 0x1F1D, 0x0F80, 0x07D2,
+ 0x01C9, 0x03FE, 0x0074, 0x07DD, 0x00F3, 0x1F1C, 0x07DC, 0x03F9, 0x07DF,
+ 0x00F2, 0x00FD, 0x0077, 0x07DE, 0x07D9, 0x01C8, 0x07D8, 0x0F83, 0x03F8,
+ 0x0030, 0x0076, 0x0013, 0x0F82, 0x00FC, 0x03FB, 0x0033, 0x03FA, 0x03E5,
+ 0x03E4, 0x01CB, 0x0032, 0x1F1F, 0x03E7, 0x07DB, 0x07DA, 0x003D, 0x01CA,
+ 0x07C5, 0x03E6, 0x0071, 0x0F8D, 0x07C4, 0x1F1E, 0x0F8C, 0x03E1, 0x01F5, },
+{ 0x0019, 0x0065, 0x0018, 0x0351, 0x0350, 0x0353, 0x0021, 0x0020, 0x0064,
+ 0x001D, 0x0005, 0x0005, 0x01A5, 0x0023, 0x0067, 0x0005, 0x0066, 0x0022,
+ 0x001B, 0x0004, 0x0001, 0x0004, 0x001C, 0x0061, 0x001A, 0x0005, 0x0004,
+ 0x0007, 0x002D, 0x0006, 0x002C, 0x01A4, 0x002F, 0x0352, 0x035D, 0x0060,
+ 0x0001, 0x002E, 0x001F, 0x035C, 0x0000, 0x06B1, 0x01A7, 0x0029, 0x01A6,
+ 0x0028, 0x0063, 0x0062, 0x035F, 0x01A1, 0x002B, 0x06B0, 0x06B3, 0x01A0,
+ 0x0003, 0x006D, 0x001E, 0x035E, 0x006C, 0x06B2, 0x0002, 0x01A3, 0x01A2,
+ 0x000D, 0x0005, 0x0007, 0x01AD, 0x006F, 0x002A, 0x006E, 0x0004, 0x0004,
+ 0x000C, 0x0007, 0x0006, 0x000F, 0x000E, 0x00D5, 0x0009, 0x0006, 0x0007, },
+{ 0x0065, 0x0181, 0x0064, 0x36C9, 0x06D5, 0x0DB5, 0x0379, 0x0180, 0x0183,
+ 0x00D5, 0x001D, 0x001C, 0x0DB4, 0x0182, 0x0378, 0x00D4, 0x00D7, 0x06D4,
+ 0x0067, 0x001F, 0x0001, 0x00D6, 0x00D1, 0x018D, 0x0066, 0x0001, 0x0000,
+ 0x037B, 0x06D7, 0x037A, 0x0DB7, 0x36C8, 0x06D6, 0x0DB6, 0x1B79, 0x0DB1,
+ 0x018C, 0x0365, 0x00D0, 0x1B78, 0x00D3, 0x1B7B, 0x0364, 0x06D1, 0x06D0,
+ 0x018F, 0x018E, 0x00D2, 0x36CB, 0x0367, 0x0366, 0x06D3, 0x0DB0, 0x06D2,
+ 0x0361, 0x06DD, 0x0189, 0x36CA, 0x0360, 0x36F5, 0x0188, 0x0DB3, 0x36F4,
+ 0x0009, 0x0008, 0x0005, 0x06DC, 0x00DD, 0x018B, 0x00DC, 0x0004, 0x000B,
+ 0x018A, 0x0061, 0x0003, 0x0363, 0x00DF, 0x06DF, 0x0362, 0x000A, 0x001E, },
+{ 0x001D, 0x0061, 0x000D, 0x0D55, 0x06B9, 0x06B8, 0x01A5, 0x0021, 0x0020,
+ 0x0023, 0x000C, 0x0060, 0x0D54, 0x00AD, 0x00AC, 0x0022, 0x00AF, 0x06BB,
+ 0x000F, 0x001C, 0x0001, 0x002D, 0x0063, 0x01A4, 0x000E, 0x0001, 0x0005,
+ 0x01A7, 0x06BA, 0x01A6, 0x06A5, 0x0D57, 0x0D56, 0x1ABD, 0x0D51, 0x00AE,
+ 0x002C, 0x00A9, 0x002F, 0x0D50, 0x01A1, 0x1ABC, 0x06A4, 0x06A7, 0x06A6,
+ 0x00A8, 0x06A1, 0x01A0, 0x1ABF, 0x0D53, 0x06A0, 0x0D52, 0x1ABE, 0x06A3,
+ 0x0062, 0x002E, 0x0009, 0x0D5D, 0x01A3, 0x0D5C, 0x006D, 0x00AB, 0x06A2,
+ 0x006C, 0x001F, 0x0001, 0x06AD, 0x0029, 0x01A2, 0x0028, 0x0004, 0x001E,
+ 0x01AD, 0x006F, 0x0000, 0x01AC, 0x01AF, 0x06AC, 0x00AA, 0x006E, 0x0019, },
+{ 0x0019, 0x007D, 0x0018, 0x01B5, 0x000D, 0x01B4, 0x007C, 0x007F, 0x01B7,
+ 0x000C, 0x001B, 0x001A, 0x01B6, 0x000F, 0x00D5, 0x0019, 0x007E, 0x00D4,
+ 0x0018, 0x001B, 0x0001, 0x000E, 0x0011, 0x0009, 0x0005, 0x0005, 0x0005,
+ 0x00D7, 0x01B1, 0x0008, 0x01B0, 0x0079, 0x06FD, 0x0371, 0x0370, 0x00D6,
+ 0x0078, 0x01B3, 0x0010, 0x0373, 0x0013, 0x06FC, 0x007B, 0x007A, 0x00D1,
+ 0x00D0, 0x00D3, 0x0065, 0x0372, 0x06FF, 0x0064, 0x06FE, 0x037D, 0x00D2,
+ 0x00DD, 0x0067, 0x0004, 0x037C, 0x0012, 0x01B2, 0x0007, 0x0066, 0x01BD,
+ 0x0006, 0x0061, 0x0004, 0x01BC, 0x001A, 0x0060, 0x001D, 0x0004, 0x001C,
+ 0x0063, 0x0001, 0x0007, 0x000B, 0x0000, 0x0062, 0x000A, 0x0005, 0x0007, },
+{ 0x0069, 0x0045, 0x0068, 0x04BD, 0x0255, 0x04BC, 0x00E5, 0x00E4, 0x0031,
+ 0x0030, 0x0019, 0x0001, 0x0121, 0x00E7, 0x00E6, 0x0033, 0x00E1, 0x00E0,
+ 0x006B, 0x0018, 0x0001, 0x0044, 0x0032, 0x0047, 0x006A, 0x001B, 0x0005,
+ 0x003D, 0x0046, 0x0015, 0x0041, 0x0120, 0x0123, 0x04BF, 0x0122, 0x0040,
+ 0x003C, 0x00E3, 0x0014, 0x0254, 0x0043, 0x0975, 0x012D, 0x00E2, 0x00ED,
+ 0x0042, 0x00EC, 0x004D, 0x0257, 0x0256, 0x0251, 0x04BE, 0x0974, 0x0250,
+ 0x00EF, 0x00EE, 0x004C, 0x04B9, 0x012C, 0x04B8, 0x004F, 0x04BB, 0x0253,
+ 0x003F, 0x0017, 0x0001, 0x0252, 0x00E9, 0x00E8, 0x00EB, 0x0000, 0x0003,
+ 0x0016, 0x0002, 0x0004, 0x004E, 0x003E, 0x00EA, 0x0049, 0x000D, 0x0007, },
+{ 0x000D, 0x01BD, 0x000C, 0x0D31, 0x0D30, 0x0D33, 0x0359, 0x0358, 0x002D,
+ 0x0065, 0x001D, 0x001C, 0x0D32, 0x035B, 0x035A, 0x002C, 0x01BC, 0x0345,
+ 0x000F, 0x001F, 0x0001, 0x002F, 0x0064, 0x01BF, 0x0067, 0x0001, 0x0005,
+ 0x0066, 0x002E, 0x0061, 0x0029, 0x0695, 0x0694, 0x0697, 0x0696, 0x0060,
+ 0x01BE, 0x0D3D, 0x0028, 0x1A49, 0x0344, 0x1A48, 0x1A4B, 0x0D3C, 0x0691,
+ 0x002B, 0x01B9, 0x002A, 0x0D3F, 0x0690, 0x0347, 0x0D3E, 0x1A4A, 0x0346,
+ 0x00D5, 0x0341, 0x0063, 0x0D39, 0x0340, 0x0D38, 0x01B8, 0x0D3B, 0x0D3A,
+ 0x00D4, 0x0062, 0x0000, 0x0693, 0x01BB, 0x0343, 0x0342, 0x001E, 0x000E,
+ 0x006D, 0x0009, 0x0001, 0x006C, 0x00D7, 0x034D, 0x01BA, 0x0008, 0x0004, },
+{ 0x0075, 0x00CD, 0x0035, 0x03C1, 0x03C0, 0x07F9, 0x03C3, 0x1F8D, 0x00CC,
+ 0x0074, 0x0011, 0x0010, 0x03C2, 0x0FD9, 0x01F1, 0x00CF, 0x03CD, 0x00CE,
+ 0x0034, 0x0001, 0x0001, 0x0037, 0x00C9, 0x00C8, 0x0036, 0x0000, 0x0001,
+ 0x0FD8, 0x03CC, 0x00CB, 0x01F0, 0x07F8, 0x03CF, 0x07FB, 0x07FA, 0x00CA,
+ 0x01F3, 0x03CE, 0x00F5, 0x0FDB, 0x00F4, 0x07E5, 0x07E4, 0x07E7, 0x01F2,
+ 0x07E6, 0x03C9, 0x01FD, 0x0FDA, 0x1F8C, 0x07E1, 0x1F8F, 0x1F8E, 0x03C8,
+ 0x03CB, 0x0077, 0x0076, 0x0FC5, 0x03CA, 0x07E0, 0x00F7, 0x0FC4, 0x03F5,
+ 0x00F6, 0x01FC, 0x0003, 0x03F4, 0x0071, 0x03F7, 0x00F1, 0x0013, 0x0031,
+ 0x0030, 0x0070, 0x0005, 0x0012, 0x0073, 0x01FF, 0x0072, 0x007D, 0x0002, },
+{ 0x0061, 0x0055, 0x0060, 0x02C9, 0x02C8, 0x02CB, 0x0171, 0x00B5, 0x0054,
+ 0x0001, 0x0001, 0x0001, 0x0057, 0x0001, 0x0063, 0x001D, 0x0062, 0x0039,
+ 0x006D, 0x0000, 0x0005, 0x0038, 0x0056, 0x00B4, 0x006C, 0x0003, 0x001C,
+ 0x006F, 0x003B, 0x0002, 0x003A, 0x0170, 0x00B7, 0x0173, 0x0051, 0x006E,
+ 0x0025, 0x0050, 0x0069, 0x02CA, 0x0024, 0x0027, 0x0172, 0x00B6, 0x00B1,
+ 0x000D, 0x000C, 0x001F, 0x017D, 0x0026, 0x0068, 0x0053, 0x017C, 0x006B,
+ 0x001E, 0x000F, 0x0004, 0x017F, 0x006A, 0x02F5, 0x0019, 0x0021, 0x0052,
+ 0x02F4, 0x02F7, 0x0020, 0x0BCD, 0x05E5, 0x05E4, 0x0BCC, 0x0023, 0x00B0,
+ 0x02F6, 0x00B3, 0x0022, 0x02F1, 0x02F0, 0x0BCF, 0x0BCE, 0x017E, 0x005D, },
+{ 0x00BD, 0x0025, 0x01A1, 0x0159, 0x0299, 0x00BC, 0x0024, 0x0505, 0x0504,
+ 0x01A0, 0x0001, 0x001D, 0x006D, 0x001C, 0x0001, 0x0005, 0x0027, 0x01A3,
+ 0x0158, 0x001F, 0x001E, 0x01A2, 0x0026, 0x0021, 0x000D, 0x0020, 0x0023,
+ 0x0298, 0x006C, 0x0022, 0x00BF, 0x00BE, 0x01AD, 0x002D, 0x029B, 0x00B9,
+ 0x01AC, 0x00B8, 0x01AF, 0x029A, 0x006F, 0x015B, 0x006E, 0x0285, 0x0284,
+ 0x01AE, 0x0019, 0x002C, 0x01A9, 0x01A8, 0x000C, 0x000F, 0x015A, 0x00BB,
+ 0x000E, 0x0000, 0x0069, 0x01AB, 0x0018, 0x01AA, 0x0004, 0x0055, 0x00BA,
+ 0x0507, 0x0145, 0x0054, 0x0506, 0x00A5, 0x0501, 0x00A4, 0x0057, 0x0500,
+ 0x0A05, 0x0144, 0x00A7, 0x0287, 0x0286, 0x0503, 0x0147, 0x0A04, 0x0146, },
+{ 0x0759, 0x0041, 0x00E5, 0x03BD, 0x0E9D, 0x012D, 0x012C, 0x3A1D, 0x03BC,
+ 0x012F, 0x000D, 0x0040, 0x00E4, 0x03BF, 0x0043, 0x0042, 0x0758, 0x03BE,
+ 0x00E7, 0x0001, 0x0000, 0x003D, 0x00E6, 0x0015, 0x0014, 0x0017, 0x003C,
+ 0x743D, 0x012E, 0x03B9, 0x03B8, 0x0E9C, 0x03BB, 0x075B, 0x3A1C, 0x0E9F,
+ 0x0129, 0x00E1, 0x0128, 0x0E9E, 0x012B, 0x075A, 0x00E0, 0x0E99, 0x0745,
+ 0x3A1F, 0x03BA, 0x0744, 0x0E98, 0x1D0D, 0x03A5, 0x0E9B, 0x743C, 0x0E9A,
+ 0x012A, 0x004D, 0x00E3, 0x0E85, 0x01D5, 0x0E84, 0x004C, 0x0747, 0x1D0C,
+ 0x01D4, 0x003F, 0x0016, 0x0746, 0x03A4, 0x0741, 0x004F, 0x003E, 0x01D7,
+ 0x0740, 0x000C, 0x0011, 0x004E, 0x00E2, 0x00ED, 0x00EC, 0x0049, 0x0048, },
+};
+
+static const uint8_t aic_mode2_vlc_bits[AIC_MODE2_NUM][AIC_MODE2_SIZE] = {
+{ 1, 5, 4, 10, 6, 8, 5, 8, 8,
+ 7, 5, 7, 11, 10, 9, 8, 13, 11,
+ 6, 7, 3, 9, 8, 10, 9, 8, 5,
+ 10, 9, 10, 10, 14, 12, 14, 14, 12,
+ 8, 9, 7, 12, 8, 14, 9, 9, 12,
+ 8, 9, 11, 11, 13, 9, 11, 14, 12,
+ 6, 7, 9, 13, 9, 12, 7, 10, 12,
+ 11, 12, 8, 15, 10, 15, 13, 7, 12,
+ 10, 10, 8, 10, 13, 13, 13, 11, 8, },
+{ 4, 6, 5, 11, 8, 10, 7, 11, 9,
+ 4, 1, 4, 9, 7, 7, 5, 9, 10,
+ 6, 7, 4, 9, 9, 10, 9, 9, 6,
+ 9, 10, 9, 10, 12, 12, 13, 12, 11,
+ 9, 9, 8, 12, 8, 14, 10, 11, 12,
+ 7, 8, 10, 11, 12, 9, 11, 13, 12,
+ 6, 7, 8, 12, 9, 12, 7, 11, 10,
+ 12, 12, 9, 14, 12, 15, 13, 8, 12,
+ 11, 11, 10, 12, 13, 15, 14, 12, 9, },
+{ 5, 7, 6, 12, 9, 11, 8, 11, 10,
+ 7, 5, 7, 11, 10, 9, 8, 12, 12,
+ 5, 5, 1, 8, 7, 10, 8, 6, 4,
+ 8, 8, 8, 9, 12, 11, 13, 12, 11,
+ 8, 9, 8, 12, 8, 13, 10, 11, 11,
+ 8, 9, 11, 12, 13, 11, 12, 14, 13,
+ 8, 9, 10, 14, 11, 14, 9, 13, 13,
+ 8, 9, 6, 11, 10, 14, 11, 6, 10,
+ 6, 6, 4, 8, 9, 10, 10, 8, 5, },
+{ 11, 7, 8, 10, 12, 9, 10, 14, 12,
+ 7, 1, 5, 7, 8, 6, 4, 10, 9,
+ 10, 5, 4, 8, 11, 8, 7, 6, 7,
+ 11, 6, 7, 8, 10, 8, 10, 11, 9,
+ 10, 8, 9, 13, 9, 12, 8, 11, 12,
+ 11, 4, 7, 8, 9, 6, 8, 12, 9,
+ 8, 5, 8, 12, 9, 10, 6, 12, 11,
+ 12, 12, 10, 15, 13, 13, 13, 10, 13,
+ 15, 10, 9, 10, 12, 13, 13, 10, 9, },
+{ 11, 8, 8, 11, 13, 10, 11, 15, 12,
+ 7, 1, 4, 7, 7, 5, 4, 8, 9,
+ 11, 5, 5, 8, 11, 9, 8, 7, 8,
+ 13, 7, 8, 9, 11, 9, 10, 12, 10,
+ 10, 9, 8, 13, 9, 12, 9, 11, 12,
+ 11, 5, 7, 9, 10, 6, 9, 13, 10,
+ 7, 4, 7, 11, 8, 9, 5, 10, 11,
+ 13, 11, 9, 15, 13, 15, 13, 8, 12,
+ 15, 10, 10, 12, 13, 14, 14, 12, 11, },
+{ 12, 9, 9, 12, 13, 11, 11, 14, 12,
+ 8, 2, 5, 7, 9, 6, 5, 10, 10,
+ 9, 4, 2, 7, 9, 7, 6, 5, 6,
+ 12, 6, 7, 8, 10, 8, 10, 11, 9,
+ 12, 9, 10, 13, 11, 12, 10, 14, 13,
+ 12, 6, 8, 10, 10, 7, 9, 12, 10,
+ 8, 5, 8, 11, 9, 10, 7, 11, 12,
+ 8, 6, 5, 11, 11, 11, 8, 6, 9,
+ 12, 6, 6, 8, 10, 10, 11, 8, 6, },
+{ 13, 9, 10, 12, 14, 12, 11, 15, 15,
+ 8, 1, 5, 7, 9, 6, 5, 11, 10,
+ 11, 6, 5, 9, 11, 9, 8, 7, 8,
+ 12, 6, 8, 8, 11, 8, 10, 12, 10,
+ 10, 7, 9, 13, 10, 11, 9, 13, 12,
+ 11, 3, 6, 8, 9, 4, 7, 11, 8,
+ 8, 5, 9, 12, 10, 9, 7, 12, 13,
+ 13, 12, 10, 14, 14, 15, 12, 11, 14,
+ 15, 7, 9, 8, 11, 11, 12, 10, 9, },
+{ 10, 5, 6, 9, 11, 7, 8, 12, 11,
+ 8, 1, 4, 7, 9, 6, 4, 10, 10,
+ 11, 6, 6, 9, 9, 9, 9, 8, 8,
+ 14, 10, 10, 12, 12, 11, 12, 13, 12,
+ 10, 7, 8, 12, 9, 11, 8, 12, 11,
+ 13, 7, 10, 11, 11, 8, 10, 13, 11,
+ 6, 3, 7, 11, 8, 9, 5, 10, 11,
+ 11, 11, 9, 14, 14, 14, 11, 10, 13,
+ 14, 10, 11, 13, 13, 13, 14, 12, 12, },
+{ 2, 5, 3, 11, 8, 8, 6, 6, 7,
+ 8, 5, 6, 12, 10, 10, 8, 10, 11,
+ 7, 6, 2, 9, 8, 10, 8, 5, 4,
+ 10, 11, 10, 10, 13, 12, 14, 13, 10,
+ 10, 11, 8, 14, 9, 14, 12, 11, 12,
+ 9, 10, 9, 13, 12, 11, 12, 14, 11,
+ 8, 10, 7, 13, 10, 12, 8, 12, 12,
+ 10, 9, 6, 12, 11, 11, 11, 6, 9,
+ 10, 9, 6, 10, 9, 12, 11, 8, 7, },
+{ 6, 8, 6, 12, 11, 11, 10, 10, 9,
+ 6, 1, 3, 10, 8, 8, 6, 7, 10,
+ 8, 6, 3, 10, 9, 10, 8, 6, 5,
+ 11, 10, 10, 12, 13, 12, 14, 13, 12,
+ 10, 11, 8, 12, 9, 14, 12, 11, 12,
+ 9, 9, 8, 12, 12, 10, 12, 13, 11,
+ 7, 8, 6, 13, 9, 11, 7, 11, 11,
+ 11, 10, 7, 14, 11, 12, 12, 7, 10,
+ 12, 11, 8, 13, 12, 14, 13, 11, 10, },
+{ 7, 10, 7, 13, 13, 13, 11, 11, 10,
+ 8, 5, 6, 12, 11, 10, 9, 10, 11,
+ 7, 5, 1, 9, 8, 10, 7, 4, 4,
+ 9, 11, 9, 11, 12, 11, 13, 13, 10,
+ 9, 11, 8, 13, 9, 14, 12, 11, 12,
+ 11, 10, 10, 13, 12, 11, 14, 14, 12,
+ 9, 10, 8, 13, 10, 14, 9, 12, 12,
+ 9, 7, 4, 12, 10, 11, 10, 6, 7,
+ 9, 7, 4, 9, 9, 11, 9, 7, 5, },
+{ 7, 9, 7, 14, 11, 12, 10, 9, 9,
+ 8, 5, 5, 12, 9, 10, 8, 8, 11,
+ 7, 5, 2, 8, 8, 9, 7, 4, 4,
+ 10, 11, 10, 12, 14, 11, 12, 13, 12,
+ 9, 10, 8, 13, 8, 13, 10, 11, 11,
+ 9, 9, 8, 14, 10, 10, 11, 12, 11,
+ 10, 11, 9, 14, 10, 14, 9, 12, 14,
+ 6, 6, 3, 11, 8, 9, 8, 3, 6,
+ 9, 7, 4, 10, 8, 11, 10, 6, 5, },
+{ 6, 8, 7, 13, 12, 12, 10, 9, 9,
+ 9, 7, 8, 13, 11, 11, 9, 11, 12,
+ 7, 6, 1, 9, 8, 10, 7, 5, 4,
+ 10, 12, 10, 12, 13, 13, 14, 13, 11,
+ 9, 11, 9, 13, 10, 14, 12, 12, 12,
+ 11, 12, 10, 14, 13, 12, 13, 14, 12,
+ 8, 9, 7, 13, 10, 13, 8, 11, 12,
+ 8, 6, 3, 12, 9, 10, 9, 4, 6,
+ 10, 8, 5, 10, 10, 12, 11, 8, 6, },
+{ 7, 10, 7, 12, 9, 12, 10, 10, 12,
+ 9, 7, 7, 12, 9, 11, 6, 10, 11,
+ 6, 6, 1, 9, 8, 9, 7, 4, 5,
+ 11, 12, 9, 12, 10, 14, 13, 13, 11,
+ 10, 12, 8, 13, 8, 14, 10, 10, 11,
+ 11, 11, 10, 13, 14, 10, 14, 13, 11,
+ 11, 10, 7, 13, 8, 12, 7, 10, 12,
+ 7, 10, 4, 12, 6, 10, 8, 5, 8,
+ 10, 7, 4, 9, 7, 10, 9, 6, 5, },
+{ 7, 9, 7, 13, 12, 13, 10, 10, 8,
+ 8, 5, 6, 11, 10, 10, 8, 10, 10,
+ 7, 5, 2, 9, 8, 9, 7, 5, 3,
+ 8, 9, 7, 9, 11, 11, 13, 11, 9,
+ 8, 10, 7, 12, 9, 14, 11, 10, 10,
+ 9, 10, 9, 12, 12, 12, 13, 14, 12,
+ 10, 10, 9, 13, 11, 13, 9, 13, 12,
+ 8, 7, 4, 12, 10, 10, 10, 6, 6,
+ 7, 6, 3, 9, 8, 10, 9, 6, 3, },
+{ 7, 10, 7, 13, 13, 13, 11, 11, 9,
+ 8, 6, 6, 13, 11, 11, 9, 10, 11,
+ 7, 6, 1, 9, 8, 10, 8, 5, 4,
+ 8, 9, 8, 9, 12, 12, 12, 12, 8,
+ 10, 13, 9, 14, 11, 14, 14, 13, 12,
+ 9, 10, 9, 13, 12, 11, 13, 14, 11,
+ 9, 11, 8, 13, 11, 13, 10, 13, 13,
+ 9, 8, 5, 12, 10, 11, 11, 6, 7,
+ 8, 7, 3, 8, 9, 11, 10, 7, 4, },
+{ 8, 9, 7, 11, 11, 12, 11, 14, 9,
+ 8, 6, 6, 11, 13, 10, 9, 11, 9,
+ 7, 5, 1, 7, 9, 9, 7, 5, 3,
+ 13, 11, 9, 10, 12, 11, 12, 12, 9,
+ 10, 11, 9, 13, 9, 12, 12, 12, 10,
+ 12, 11, 10, 13, 14, 12, 14, 14, 11,
+ 11, 8, 8, 13, 11, 12, 9, 13, 11,
+ 9, 10, 5, 11, 8, 11, 9, 6, 7,
+ 7, 8, 4, 6, 8, 10, 8, 8, 5, },
+{ 8, 10, 8, 13, 13, 13, 12, 11, 10,
+ 5, 1, 3, 10, 7, 8, 6, 8, 9,
+ 8, 7, 4, 9, 10, 11, 8, 7, 6,
+ 8, 9, 7, 9, 12, 11, 12, 10, 8,
+ 9, 10, 8, 13, 9, 9, 12, 11, 11,
+ 7, 7, 6, 12, 9, 8, 10, 12, 8,
+ 6, 7, 4, 12, 8, 13, 6, 9, 10,
+ 13, 13, 9, 15, 14, 14, 15, 9, 11,
+ 13, 11, 9, 13, 13, 15, 15, 12, 10, },
+{ 10, 8, 9, 11, 12, 10, 8, 13, 13,
+ 9, 2, 5, 7, 5, 4, 3, 8, 9,
+ 11, 5, 5, 9, 8, 8, 6, 8, 8,
+ 12, 7, 8, 10, 10, 9, 8, 12, 10,
+ 9, 10, 9, 12, 7, 11, 7, 12, 12,
+ 9, 5, 8, 9, 9, 6, 6, 11, 10,
+ 6, 4, 7, 9, 5, 9, 3, 9, 10,
+ 13, 11, 9, 13, 10, 13, 10, 9, 13,
+ 14, 11, 10, 12, 12, 13, 11, 14, 11, },
+{ 11, 7, 8, 10, 12, 9, 9, 14, 10,
+ 9, 4, 7, 8, 10, 7, 7, 11, 10,
+ 8, 2, 2, 6, 8, 5, 5, 5, 6,
+ 15, 9, 10, 10, 12, 10, 11, 14, 12,
+ 9, 8, 9, 12, 9, 11, 8, 12, 11,
+ 14, 10, 11, 12, 13, 10, 12, 15, 12,
+ 9, 7, 8, 12, 9, 12, 7, 11, 13,
+ 9, 6, 5, 11, 10, 11, 7, 6, 9,
+ 11, 4, 5, 7, 8, 8, 8, 7, 7, },
+};
+//@}
+
+/**
+ * Codes used for determining block type
+ */
+//@{
+#define AIC_MODE1_NUM 90
+#define AIC_MODE1_SIZE 9
+#define AIC_MODE1_BITS 7
+
+static const uint8_t aic_mode1_vlc_codes[AIC_MODE1_NUM][AIC_MODE1_SIZE] = {
+ { 0x01, 0x01, 0x01, 0x11, 0x00, 0x09, 0x03, 0x10, 0x05,},
+ { 0x09, 0x01, 0x01, 0x05, 0x11, 0x00, 0x03, 0x21, 0x20,},
+ { 0x01, 0x01, 0x01, 0x11, 0x09, 0x10, 0x05, 0x00, 0x03,},
+ { 0x01, 0x01, 0x00, 0x03, 0x21, 0x05, 0x09, 0x20, 0x11,},
+ { 0x01, 0x09, 0x00, 0x29, 0x08, 0x15, 0x03, 0x0B, 0x28,},
+ { 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x03, 0x02,},
+ { 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x01, 0x09, 0x08,},
+ { 0x01, 0x01, 0x01, 0x09, 0x01, 0x08, 0x00, 0x03, 0x05,},
+ { 0x01, 0x01, 0x01, 0x00, 0x05, 0x11, 0x09, 0x10, 0x03,},
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,},
+
+ { 0x01, 0x01, 0x01, 0x05, 0x01, 0x00, 0x03, 0x09, 0x08,},
+ { 0x09, 0x01, 0x01, 0x05, 0x11, 0x00, 0x03, 0x21, 0x20,},
+ { 0x01, 0x01, 0x01, 0x0D, 0x05, 0x04, 0x00, 0x07, 0x0C,},
+ { 0x01, 0x01, 0x00, 0x05, 0x11, 0x03, 0x09, 0x21, 0x20,},
+ { 0x05, 0x01, 0x01, 0x11, 0x00, 0x09, 0x03, 0x21, 0x20,},
+ { 0x09, 0x01, 0x01, 0x00, 0x05, 0x01, 0x03, 0x11, 0x10,},
+ { 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x01, 0x03, 0x02,},
+ { 0x01, 0x01, 0x01, 0x09, 0x00, 0x05, 0x01, 0x03, 0x08,},
+ { 0x01, 0x01, 0x01, 0x09, 0x11, 0x05, 0x00, 0x10, 0x03,},
+ { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,},
+
+ { 0x01, 0x00, 0x01, 0x09, 0x08, 0x15, 0x14, 0x0B, 0x03,},
+ { 0x0D, 0x01, 0x01, 0x05, 0x0C, 0x04, 0x01, 0x00, 0x07,},
+ { 0x01, 0x01, 0x01, 0x05, 0x00, 0x04, 0x03, 0x01, 0x01,},
+ { 0x05, 0x01, 0x01, 0x04, 0x19, 0x07, 0x18, 0x0D, 0x00,},
+ { 0x11, 0x09, 0x01, 0x21, 0x05, 0x20, 0x01, 0x00, 0x03,},
+ { 0x41, 0x01, 0x00, 0x05, 0x40, 0x03, 0x09, 0x21, 0x11,},
+ { 0x29, 0x01, 0x00, 0x28, 0x09, 0x15, 0x03, 0x08, 0x0B,},
+ { 0x01, 0x00, 0x01, 0x11, 0x09, 0x10, 0x05, 0x01, 0x03,},
+ { 0x05, 0x01, 0x01, 0x04, 0x0D, 0x0C, 0x07, 0x00, 0x01,},
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,},
+
+ { 0x01, 0x00, 0x03, 0x05, 0x11, 0x10, 0x25, 0x24, 0x13,},
+ { 0x21, 0x01, 0x01, 0x00, 0x11, 0x03, 0x05, 0x20, 0x09,},
+ { 0x01, 0x01, 0x01, 0x00, 0x09, 0x11, 0x10, 0x05, 0x03,},
+ { 0x21, 0x05, 0x01, 0x01, 0x09, 0x00, 0x11, 0x20, 0x03,},
+ { 0x05, 0x01, 0x00, 0x04, 0x01, 0x19, 0x07, 0x18, 0x0D,},
+ { 0x11, 0x01, 0x00, 0x01, 0x09, 0x01, 0x03, 0x10, 0x05,},
+ { 0x1D, 0x01, 0x05, 0x0D, 0x0C, 0x04, 0x00, 0x1C, 0x0F,},
+ { 0x05, 0x19, 0x01, 0x04, 0x00, 0x18, 0x1B, 0x1A, 0x07,},
+ { 0x09, 0x01, 0x00, 0x01, 0x05, 0x03, 0x11, 0x10, 0x01,},
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,},
+
+ { 0x01, 0x00, 0x03, 0x41, 0x05, 0x40, 0x09, 0x11, 0x21,},
+ { 0x05, 0x01, 0x01, 0x19, 0x04, 0x07, 0x00, 0x18, 0x0D,},
+ { 0x01, 0x01, 0x01, 0x05, 0x01, 0x04, 0x01, 0x00, 0x03,},
+ { 0x01, 0x05, 0x00, 0x0D, 0x01, 0x04, 0x07, 0x19, 0x18,},
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x03, 0x02,},
+ { 0x31, 0x01, 0x05, 0x19, 0x04, 0x07, 0x00, 0x30, 0x0D,},
+ { 0x01, 0x00, 0x03, 0x11, 0x01, 0x05, 0x01, 0x09, 0x10,},
+ { 0x01, 0x05, 0x01, 0x11, 0x01, 0x10, 0x00, 0x03, 0x09,},
+ { 0x01, 0x09, 0x00, 0x29, 0x03, 0x08, 0x28, 0x15, 0x0B,},
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,},
+
+ { 0x01, 0x01, 0x00, 0x09, 0x15, 0x03, 0x08, 0x14, 0x0B,},
+ { 0x11, 0x01, 0x01, 0x00, 0x09, 0x01, 0x03, 0x10, 0x05,},
+ { 0x01, 0x00, 0x03, 0x25, 0x11, 0x05, 0x10, 0x24, 0x13,},
+ { 0x11, 0x01, 0x00, 0x01, 0x09, 0x01, 0x05, 0x10, 0x03,},
+ { 0x05, 0x01, 0x00, 0x0D, 0x0C, 0x04, 0x0F, 0x1D, 0x1C,},
+ { 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x03, 0x02,},
+ { 0x21, 0x01, 0x05, 0x09, 0x11, 0x00, 0x03, 0x41, 0x40,},
+ { 0x05, 0x01, 0x00, 0x1D, 0x1C, 0x0D, 0x0C, 0x0F, 0x04,},
+ { 0x05, 0x01, 0x00, 0x0D, 0x31, 0x04, 0x19, 0x30, 0x07,},
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,},
+
+ { 0x01, 0x01, 0x00, 0x21, 0x05, 0x11, 0x03, 0x09, 0x20,},
+ { 0x01, 0x01, 0x00, 0x11, 0x03, 0x05, 0x01, 0x09, 0x10,},
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x03, 0x02,},
+ { 0x05, 0x01, 0x04, 0x19, 0x07, 0x0D, 0x00, 0x31, 0x30,},
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x03, 0x02,},
+ { 0x05, 0x01, 0x01, 0x11, 0x09, 0x00, 0x03, 0x21, 0x20,},
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x03, 0x02,},
+ { 0x01, 0x01, 0x01, 0x00, 0x01, 0x03, 0x01, 0x01, 0x02,},
+ { 0x09, 0x01, 0x00, 0x29, 0x08, 0x15, 0x03, 0x28, 0x0B,},
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,},
+
+ { 0x01, 0x01, 0x01, 0x05, 0x01, 0x04, 0x00, 0x01, 0x03,},
+ { 0x09, 0x01, 0x00, 0x29, 0x28, 0x15, 0x08, 0x03, 0x0B,},
+ { 0x01, 0x00, 0x01, 0x11, 0x05, 0x10, 0x09, 0x01, 0x03,},
+ { 0x05, 0x04, 0x01, 0x1D, 0x0D, 0x0C, 0x1C, 0x00, 0x0F,},
+ { 0x09, 0x11, 0x01, 0x41, 0x00, 0x40, 0x05, 0x03, 0x21,},
+ { 0x0D, 0x05, 0x01, 0x1D, 0x1C, 0x0C, 0x04, 0x00, 0x0F,},
+ { 0x41, 0x09, 0x01, 0x40, 0x00, 0x11, 0x05, 0x03, 0x21,},
+ { 0x01, 0x01, 0x01, 0x05, 0x01, 0x04, 0x00, 0x01, 0x03,},
+ { 0x05, 0x04, 0x01, 0x0D, 0x01, 0x0C, 0x07, 0x01, 0x00,},
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,},
+
+ { 0x05, 0x04, 0x01, 0x07, 0x19, 0x31, 0x30, 0x0D, 0x00,},
+ { 0x21, 0x01, 0x01, 0x00, 0x11, 0x09, 0x20, 0x05, 0x03,},
+ { 0x05, 0x01, 0x01, 0x04, 0x07, 0x0D, 0x0C, 0x00, 0x01,},
+ { 0x21, 0x09, 0x01, 0x00, 0x20, 0x05, 0x23, 0x22, 0x03,},
+ { 0x31, 0x0D, 0x01, 0x19, 0x05, 0x30, 0x04, 0x07, 0x00,},
+ { 0x31, 0x05, 0x01, 0x04, 0x19, 0x00, 0x0D, 0x30, 0x07,},
+ { 0x31, 0x01, 0x00, 0x0D, 0x05, 0x19, 0x04, 0x30, 0x07,},
+ { 0x01, 0x01, 0x01, 0x00, 0x01, 0x03, 0x02, 0x01, 0x01,},
+ { 0x01, 0x00, 0x01, 0x01, 0x05, 0x09, 0x08, 0x03, 0x01,},
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,},
+};
+
+static const uint8_t aic_mode1_vlc_bits[AIC_MODE1_NUM][AIC_MODE1_SIZE] = {
+ { 1, 4, 2, 7, 4, 6, 4, 7, 5,},
+ { 5, 1, 3, 4, 6, 3, 3, 7, 7,},
+ { 1, 4, 2, 7, 6, 7, 5, 4, 4,},
+ { 1, 3, 3, 3, 7, 4, 5, 7, 6,},
+ { 2, 4, 2, 6, 4, 5, 2, 4, 6,},
+ { 7, 2, 3, 4, 7, 1, 5, 7, 7,},
+ { 5, 1, 3, 6, 5, 5, 2, 7, 7,},
+ { 2, 5, 1, 7, 3, 7, 5, 5, 6,},
+ { 2, 4, 1, 4, 5, 7, 6, 7, 4,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+
+ { 2, 1, 3, 6, 5, 5, 5, 7, 7,},
+ { 5, 1, 3, 4, 6, 3, 3, 7, 7,},
+ { 4, 1, 2, 6, 5, 5, 4, 5, 6,},
+ { 3, 1, 3, 4, 6, 3, 5, 7, 7,},
+ { 4, 1, 3, 6, 3, 5, 3, 7, 7,},
+ { 6, 1, 4, 4, 5, 2, 4, 7, 7,},
+ { 7, 1, 5, 7, 4, 3, 2, 7, 7,},
+ { 5, 3, 2, 7, 5, 6, 1, 5, 7,},
+ { 4, 1, 2, 6, 7, 5, 4, 7, 4,},
+ { 1, 0, 1, 0, 0, 0, 0, 0, 0,},
+
+ { 3, 3, 1, 5, 5, 6, 6, 5, 3,},
+ { 6, 2, 1, 5, 6, 5, 4, 4, 5,},
+ { 6, 4, 1, 7, 6, 7, 6, 3, 2,},
+ { 4, 3, 1, 4, 6, 4, 6, 5, 3,},
+ { 6, 5, 1, 7, 4, 7, 3, 3, 3,},
+ { 7, 2, 2, 3, 7, 2, 4, 6, 5,},
+ { 6, 2, 2, 6, 4, 5, 2, 4, 4,},
+ { 4, 4, 1, 7, 6, 7, 5, 2, 4,},
+ { 5, 4, 1, 5, 6, 6, 5, 4, 2,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+
+ { 2, 2, 2, 3, 5, 5, 6, 6, 5,},
+ { 7, 1, 3, 3, 6, 3, 4, 7, 5,},
+ { 2, 4, 1, 4, 6, 7, 7, 5, 4,},
+ { 7, 4, 3, 1, 5, 3, 6, 7, 3,},
+ { 4, 3, 3, 4, 1, 6, 4, 6, 5,},
+ { 7, 4, 4, 2, 6, 1, 4, 7, 5,},
+ { 5, 2, 3, 4, 4, 3, 2, 5, 4,},
+ { 3, 5, 2, 3, 2, 5, 5, 5, 3,},
+ { 6, 4, 4, 2, 5, 4, 7, 7, 1,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+
+ { 2, 2, 2, 7, 3, 7, 4, 5, 6,},
+ { 4, 1, 3, 6, 4, 4, 3, 6, 5,},
+ { 2, 4, 1, 7, 3, 7, 6, 6, 6,},
+ { 3, 4, 3, 5, 1, 4, 4, 6, 6,},
+ { 4, 5, 2, 7, 1, 7, 3, 7, 7,},
+ { 6, 2, 3, 5, 3, 3, 2, 6, 4,},
+ { 4, 4, 4, 7, 2, 5, 1, 6, 7,},
+ { 4, 5, 2, 7, 1, 7, 4, 4, 6,},
+ { 2, 4, 2, 6, 2, 4, 6, 5, 4,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+
+ { 1, 3, 3, 5, 6, 3, 5, 6, 5,},
+ { 7, 1, 4, 4, 6, 2, 4, 7, 5,},
+ { 2, 2, 2, 6, 5, 3, 5, 6, 5,},
+ { 7, 4, 4, 2, 6, 1, 5, 7, 4,},
+ { 3, 2, 2, 4, 4, 3, 4, 5, 5,},
+ { 7, 2, 5, 3, 7, 1, 4, 7, 7,},
+ { 6, 2, 3, 4, 5, 2, 2, 7, 7,},
+ { 3, 2, 2, 5, 5, 4, 4, 4, 3,},
+ { 3, 2, 2, 4, 6, 3, 5, 6, 3,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+
+ { 1, 3, 3, 7, 4, 6, 3, 5, 7,},
+ { 4, 1, 4, 7, 4, 5, 2, 6, 7,},
+ { 2, 4, 1, 7, 5, 7, 3, 7, 7,},
+ { 3, 2, 3, 5, 3, 4, 2, 6, 6,},
+ { 3, 5, 4, 7, 2, 7, 1, 7, 7,},
+ { 4, 1, 3, 6, 5, 3, 3, 7, 7,},
+ { 4, 2, 5, 7, 3, 7, 1, 7, 7,},
+ { 7, 4, 1, 7, 3, 7, 2, 5, 7,},
+ { 4, 2, 2, 6, 4, 5, 2, 6, 4,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+
+ { 3, 4, 1, 7, 6, 7, 6, 2, 6,},
+ { 4, 2, 2, 6, 6, 5, 4, 2, 4,},
+ { 4, 4, 1, 7, 5, 7, 6, 2, 4,},
+ { 3, 3, 2, 5, 4, 4, 5, 2, 4,},
+ { 4, 5, 2, 7, 2, 7, 3, 2, 6,},
+ { 4, 3, 2, 5, 5, 4, 3, 2, 4,},
+ { 7, 4, 2, 7, 2, 5, 3, 2, 6,},
+ { 4, 6, 2, 7, 3, 7, 6, 1, 6,},
+ { 5, 5, 1, 6, 4, 6, 5, 2, 4,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+
+ { 3, 3, 2, 3, 5, 6, 6, 4, 2,},
+ { 7, 1, 3, 3, 6, 5, 7, 4, 3,},
+ { 5, 4, 1, 5, 5, 6, 6, 4, 2,},
+ { 6, 4, 2, 2, 6, 3, 6, 6, 2,},
+ { 6, 4, 2, 5, 3, 6, 3, 3, 2,},
+ { 6, 3, 2, 3, 5, 2, 4, 6, 3,},
+ { 6, 2, 2, 4, 3, 5, 3, 6, 3,},
+ { 7, 5, 1, 7, 4, 7, 7, 3, 2,},
+ { 5, 5, 2, 3, 6, 7, 7, 5, 1,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+};
+
+//@}
+
+#define PBTYPE_ESCAPE 0xFF
+
+/** tables used for P-frame macroblock type decoding */
+//@{
+#define NUM_PTYPE_VLCS 7
+#define PTYPE_VLC_SIZE 8
+#define PTYPE_VLC_BITS 7
+
+static const uint8_t ptype_vlc_codes[NUM_PTYPE_VLCS][PTYPE_VLC_SIZE] = {
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 },
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 },
+ { 0x0D, 0x05, 0x01, 0x04, 0x01, 0x00, 0x07, 0x0C },
+ { 0x09, 0x11, 0x01, 0x00, 0x05, 0x03, 0x21, 0x20 },
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 },
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 },
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 }
+};
+
+static const uint8_t ptype_vlc_bits[NUM_PTYPE_VLCS][PTYPE_VLC_SIZE] = {
+ { 1, 2, 3, 6, 5, 4, 7, 7 },
+ { 3, 1, 2, 7, 6, 5, 4, 7 },
+ { 5, 4, 1, 4, 3, 3, 4, 5 },
+ { 4, 5, 2, 2, 3, 2, 6, 6 },
+ { 5, 6, 1, 4, 2, 3, 7, 7 },
+ { 5, 6, 1, 4, 3, 2, 7, 7 },
+ { 6, 3, 2, 7, 5, 4, 1, 7 }
+};
+
+static const uint8_t ptype_vlc_syms[PTYPE_VLC_SIZE] = {
+ 0, 1, 2, 3, 8, 9, 11, PBTYPE_ESCAPE
+};
+
+/** reverse of ptype_vlc_syms */
+static const uint8_t block_num_to_ptype_vlc_num[12] = {
+ 0, 1, 2, 3, 0, 0, 2, 0, 4, 5, 0, 6
+};
+//@}
+
+/** tables used for P-frame macroblock type decoding */
+//@{
+#define NUM_BTYPE_VLCS 6
+#define BTYPE_VLC_SIZE 7
+#define BTYPE_VLC_BITS 6
+
+static const uint8_t btype_vlc_codes[NUM_BTYPE_VLCS][BTYPE_VLC_SIZE] = {
+ { 0x01, 0x05, 0x00, 0x03, 0x11, 0x09, 0x10 },
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 },
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 },
+ { 0x09, 0x01, 0x00, 0x01, 0x05, 0x03, 0x08 },
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 },
+ { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 }
+};
+
+static const uint8_t btype_vlc_bits[NUM_BTYPE_VLCS][PTYPE_VLC_SIZE] = {
+ { 2, 3, 2, 2, 5, 4, 5 },
+ { 4, 1, 3, 2, 6, 5, 6 },
+ { 6, 4, 1, 2, 5, 3, 6 },
+ { 5, 3, 3, 1, 4, 3, 5 },
+ { 6, 5, 3, 2, 4, 1, 6 },
+ { 6, 5, 3, 1, 4, 2, 6 }
+};
+
+static const uint8_t btype_vlc_syms[BTYPE_VLC_SIZE] = {
+ 0, 1, 4, 5, 10, 7, PBTYPE_ESCAPE
+};
+
+/** reverse of btype_vlc_syms */
+static const uint8_t block_num_to_btype_vlc_num[12] = {
+ 0, 1, 0, 0, 2, 3, 0, 5, 0, 0, 4, 0
+};
+//@}
+#endif /* AVCODEC_RV40VLC2_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/simple_idct.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/simple_idct.c
new file mode 100644
index 000000000..659863d6a
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/simple_idct.c
@@ -0,0 +1,582 @@
+/*
+ * Simple IDCT
+ *
+ * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/simple_idct.c
+ * simpleidct in C.
+ */
+
+/*
+ based upon some outcommented c code from mpeg2dec (idct_mmx.c
+ written by Aaron Holtzman <aholtzma@ess.engr.uvic.ca>)
+ */
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mathops.h"
+#include "simple_idct.h"
+
+#if 0
+#define W1 2841 /* 2048*sqrt (2)*cos (1*pi/16) */
+#define W2 2676 /* 2048*sqrt (2)*cos (2*pi/16) */
+#define W3 2408 /* 2048*sqrt (2)*cos (3*pi/16) */
+#define W4 2048 /* 2048*sqrt (2)*cos (4*pi/16) */
+#define W5 1609 /* 2048*sqrt (2)*cos (5*pi/16) */
+#define W6 1108 /* 2048*sqrt (2)*cos (6*pi/16) */
+#define W7 565 /* 2048*sqrt (2)*cos (7*pi/16) */
+#define ROW_SHIFT 8
+#define COL_SHIFT 17
+#else
+#define W1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define ROW_SHIFT 11
+#define COL_SHIFT 20 // 6
+#endif
+
+static inline void idctRowCondDC (DCTELEM * row)
+{
+ int a0, a1, a2, a3, b0, b1, b2, b3;
+#if HAVE_FAST_64BIT
+ uint64_t temp;
+#else
+ uint32_t temp;
+#endif
+
+#if HAVE_FAST_64BIT
+#if HAVE_BIGENDIAN
+#define ROW0_MASK 0xffff000000000000LL
+#else
+#define ROW0_MASK 0xffffLL
+#endif
+ if(sizeof(DCTELEM)==2){
+ if ( ((((uint64_t *)row)[0] & ~ROW0_MASK) |
+ ((uint64_t *)row)[1]) == 0) {
+ temp = (row[0] << 3) & 0xffff;
+ temp += temp << 16;
+ temp += temp << 32;
+ ((uint64_t *)row)[0] = temp;
+ ((uint64_t *)row)[1] = temp;
+ return;
+ }
+ }else{
+ if (!(row[1]|row[2]|row[3]|row[4]|row[5]|row[6]|row[7])) {
+ row[0]=row[1]=row[2]=row[3]=row[4]=row[5]=row[6]=row[7]= row[0] << 3;
+ return;
+ }
+ }
+#else
+ if(sizeof(DCTELEM)==2){
+ if (!(((uint32_t*)row)[1] |
+ ((uint32_t*)row)[2] |
+ ((uint32_t*)row)[3] |
+ row[1])) {
+ temp = (row[0] << 3) & 0xffff;
+ temp += temp << 16;
+ ((uint32_t*)row)[0]=((uint32_t*)row)[1] =
+ ((uint32_t*)row)[2]=((uint32_t*)row)[3] = temp;
+ return;
+ }
+ }else{
+ if (!(row[1]|row[2]|row[3]|row[4]|row[5]|row[6]|row[7])) {
+ row[0]=row[1]=row[2]=row[3]=row[4]=row[5]=row[6]=row[7]= row[0] << 3;
+ return;
+ }
+ }
+#endif
+
+ a0 = (W4 * row[0]) + (1 << (ROW_SHIFT - 1));
+ a1 = a0;
+ a2 = a0;
+ a3 = a0;
+
+ /* no need to optimize : gcc does it */
+ a0 += W2 * row[2];
+ a1 += W6 * row[2];
+ a2 -= W6 * row[2];
+ a3 -= W2 * row[2];
+
+ b0 = MUL16(W1, row[1]);
+ MAC16(b0, W3, row[3]);
+ b1 = MUL16(W3, row[1]);
+ MAC16(b1, -W7, row[3]);
+ b2 = MUL16(W5, row[1]);
+ MAC16(b2, -W1, row[3]);
+ b3 = MUL16(W7, row[1]);
+ MAC16(b3, -W5, row[3]);
+
+#if HAVE_FAST_64BIT
+ temp = ((uint64_t*)row)[1];
+#else
+ temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3];
+#endif
+ if (temp != 0) {
+ a0 += W4*row[4] + W6*row[6];
+ a1 += - W4*row[4] - W2*row[6];
+ a2 += - W4*row[4] + W2*row[6];
+ a3 += W4*row[4] - W6*row[6];
+
+ MAC16(b0, W5, row[5]);
+ MAC16(b0, W7, row[7]);
+
+ MAC16(b1, -W1, row[5]);
+ MAC16(b1, -W5, row[7]);
+
+ MAC16(b2, W7, row[5]);
+ MAC16(b2, W3, row[7]);
+
+ MAC16(b3, W3, row[5]);
+ MAC16(b3, -W1, row[7]);
+ }
+
+ row[0] = (a0 + b0) >> ROW_SHIFT;
+ row[7] = (a0 - b0) >> ROW_SHIFT;
+ row[1] = (a1 + b1) >> ROW_SHIFT;
+ row[6] = (a1 - b1) >> ROW_SHIFT;
+ row[2] = (a2 + b2) >> ROW_SHIFT;
+ row[5] = (a2 - b2) >> ROW_SHIFT;
+ row[3] = (a3 + b3) >> ROW_SHIFT;
+ row[4] = (a3 - b3) >> ROW_SHIFT;
+}
+
+static inline void idctSparseColPut (uint8_t *dest, int line_size,
+ DCTELEM * col)
+{
+ int a0, a1, a2, a3, b0, b1, b2, b3;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ /* XXX: I did that only to give same values as previous code */
+ a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4));
+ a1 = a0;
+ a2 = a0;
+ a3 = a0;
+
+ a0 += + W2*col[8*2];
+ a1 += + W6*col[8*2];
+ a2 += - W6*col[8*2];
+ a3 += - W2*col[8*2];
+
+ b0 = MUL16(W1, col[8*1]);
+ b1 = MUL16(W3, col[8*1]);
+ b2 = MUL16(W5, col[8*1]);
+ b3 = MUL16(W7, col[8*1]);
+
+ MAC16(b0, + W3, col[8*3]);
+ MAC16(b1, - W7, col[8*3]);
+ MAC16(b2, - W1, col[8*3]);
+ MAC16(b3, - W5, col[8*3]);
+
+ if(col[8*4]){
+ a0 += + W4*col[8*4];
+ a1 += - W4*col[8*4];
+ a2 += - W4*col[8*4];
+ a3 += + W4*col[8*4];
+ }
+
+ if (col[8*5]) {
+ MAC16(b0, + W5, col[8*5]);
+ MAC16(b1, - W1, col[8*5]);
+ MAC16(b2, + W7, col[8*5]);
+ MAC16(b3, + W3, col[8*5]);
+ }
+
+ if(col[8*6]){
+ a0 += + W6*col[8*6];
+ a1 += - W2*col[8*6];
+ a2 += + W2*col[8*6];
+ a3 += - W6*col[8*6];
+ }
+
+ if (col[8*7]) {
+ MAC16(b0, + W7, col[8*7]);
+ MAC16(b1, - W5, col[8*7]);
+ MAC16(b2, + W3, col[8*7]);
+ MAC16(b3, - W1, col[8*7]);
+ }
+
+ dest[0] = cm[(a0 + b0) >> COL_SHIFT];
+ dest += line_size;
+ dest[0] = cm[(a1 + b1) >> COL_SHIFT];
+ dest += line_size;
+ dest[0] = cm[(a2 + b2) >> COL_SHIFT];
+ dest += line_size;
+ dest[0] = cm[(a3 + b3) >> COL_SHIFT];
+ dest += line_size;
+ dest[0] = cm[(a3 - b3) >> COL_SHIFT];
+ dest += line_size;
+ dest[0] = cm[(a2 - b2) >> COL_SHIFT];
+ dest += line_size;
+ dest[0] = cm[(a1 - b1) >> COL_SHIFT];
+ dest += line_size;
+ dest[0] = cm[(a0 - b0) >> COL_SHIFT];
+}
+
+static inline void idctSparseColAdd (uint8_t *dest, int line_size,
+ DCTELEM * col)
+{
+ int a0, a1, a2, a3, b0, b1, b2, b3;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ /* XXX: I did that only to give same values as previous code */
+ a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4));
+ a1 = a0;
+ a2 = a0;
+ a3 = a0;
+
+ a0 += + W2*col[8*2];
+ a1 += + W6*col[8*2];
+ a2 += - W6*col[8*2];
+ a3 += - W2*col[8*2];
+
+ b0 = MUL16(W1, col[8*1]);
+ b1 = MUL16(W3, col[8*1]);
+ b2 = MUL16(W5, col[8*1]);
+ b3 = MUL16(W7, col[8*1]);
+
+ MAC16(b0, + W3, col[8*3]);
+ MAC16(b1, - W7, col[8*3]);
+ MAC16(b2, - W1, col[8*3]);
+ MAC16(b3, - W5, col[8*3]);
+
+ if(col[8*4]){
+ a0 += + W4*col[8*4];
+ a1 += - W4*col[8*4];
+ a2 += - W4*col[8*4];
+ a3 += + W4*col[8*4];
+ }
+
+ if (col[8*5]) {
+ MAC16(b0, + W5, col[8*5]);
+ MAC16(b1, - W1, col[8*5]);
+ MAC16(b2, + W7, col[8*5]);
+ MAC16(b3, + W3, col[8*5]);
+ }
+
+ if(col[8*6]){
+ a0 += + W6*col[8*6];
+ a1 += - W2*col[8*6];
+ a2 += + W2*col[8*6];
+ a3 += - W6*col[8*6];
+ }
+
+ if (col[8*7]) {
+ MAC16(b0, + W7, col[8*7]);
+ MAC16(b1, - W5, col[8*7]);
+ MAC16(b2, + W3, col[8*7]);
+ MAC16(b3, - W1, col[8*7]);
+ }
+
+ dest[0] = cm[dest[0] + ((a0 + b0) >> COL_SHIFT)];
+ dest += line_size;
+ dest[0] = cm[dest[0] + ((a1 + b1) >> COL_SHIFT)];
+ dest += line_size;
+ dest[0] = cm[dest[0] + ((a2 + b2) >> COL_SHIFT)];
+ dest += line_size;
+ dest[0] = cm[dest[0] + ((a3 + b3) >> COL_SHIFT)];
+ dest += line_size;
+ dest[0] = cm[dest[0] + ((a3 - b3) >> COL_SHIFT)];
+ dest += line_size;
+ dest[0] = cm[dest[0] + ((a2 - b2) >> COL_SHIFT)];
+ dest += line_size;
+ dest[0] = cm[dest[0] + ((a1 - b1) >> COL_SHIFT)];
+ dest += line_size;
+ dest[0] = cm[dest[0] + ((a0 - b0) >> COL_SHIFT)];
+}
+
+static inline void idctSparseCol (DCTELEM * col)
+{
+ int a0, a1, a2, a3, b0, b1, b2, b3;
+
+ /* XXX: I did that only to give same values as previous code */
+ a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4));
+ a1 = a0;
+ a2 = a0;
+ a3 = a0;
+
+ a0 += + W2*col[8*2];
+ a1 += + W6*col[8*2];
+ a2 += - W6*col[8*2];
+ a3 += - W2*col[8*2];
+
+ b0 = MUL16(W1, col[8*1]);
+ b1 = MUL16(W3, col[8*1]);
+ b2 = MUL16(W5, col[8*1]);
+ b3 = MUL16(W7, col[8*1]);
+
+ MAC16(b0, + W3, col[8*3]);
+ MAC16(b1, - W7, col[8*3]);
+ MAC16(b2, - W1, col[8*3]);
+ MAC16(b3, - W5, col[8*3]);
+
+ if(col[8*4]){
+ a0 += + W4*col[8*4];
+ a1 += - W4*col[8*4];
+ a2 += - W4*col[8*4];
+ a3 += + W4*col[8*4];
+ }
+
+ if (col[8*5]) {
+ MAC16(b0, + W5, col[8*5]);
+ MAC16(b1, - W1, col[8*5]);
+ MAC16(b2, + W7, col[8*5]);
+ MAC16(b3, + W3, col[8*5]);
+ }
+
+ if(col[8*6]){
+ a0 += + W6*col[8*6];
+ a1 += - W2*col[8*6];
+ a2 += + W2*col[8*6];
+ a3 += - W6*col[8*6];
+ }
+
+ if (col[8*7]) {
+ MAC16(b0, + W7, col[8*7]);
+ MAC16(b1, - W5, col[8*7]);
+ MAC16(b2, + W3, col[8*7]);
+ MAC16(b3, - W1, col[8*7]);
+ }
+
+ col[0 ] = ((a0 + b0) >> COL_SHIFT);
+ col[8 ] = ((a1 + b1) >> COL_SHIFT);
+ col[16] = ((a2 + b2) >> COL_SHIFT);
+ col[24] = ((a3 + b3) >> COL_SHIFT);
+ col[32] = ((a3 - b3) >> COL_SHIFT);
+ col[40] = ((a2 - b2) >> COL_SHIFT);
+ col[48] = ((a1 - b1) >> COL_SHIFT);
+ col[56] = ((a0 - b0) >> COL_SHIFT);
+}
+
+void ff_simple_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ int i;
+ for(i=0; i<8; i++)
+ idctRowCondDC(block + i*8);
+
+ for(i=0; i<8; i++)
+ idctSparseColPut(dest + i, line_size, block + i);
+}
+
+void ff_simple_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ int i;
+ for(i=0; i<8; i++)
+ idctRowCondDC(block + i*8);
+
+ for(i=0; i<8; i++)
+ idctSparseColAdd(dest + i, line_size, block + i);
+}
+
+void ff_simple_idct(DCTELEM *block)
+{
+ int i;
+ for(i=0; i<8; i++)
+ idctRowCondDC(block + i*8);
+
+ for(i=0; i<8; i++)
+ idctSparseCol(block + i);
+}
+
+/* 2x4x8 idct */
+
+#define CN_SHIFT 12
+#define C_FIX(x) ((int)((x) * (1 << CN_SHIFT) + 0.5))
+#define C1 C_FIX(0.6532814824)
+#define C2 C_FIX(0.2705980501)
+
+/* row idct is multiple by 16 * sqrt(2.0), col idct4 is normalized,
+ and the butterfly must be multiplied by 0.5 * sqrt(2.0) */
+#define C_SHIFT (4+1+12)
+
+static inline void idct4col_put(uint8_t *dest, int line_size, const DCTELEM *col)
+{
+ int c0, c1, c2, c3, a0, a1, a2, a3;
+ const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ a0 = col[8*0];
+ a1 = col[8*2];
+ a2 = col[8*4];
+ a3 = col[8*6];
+ c0 = ((a0 + a2) << (CN_SHIFT - 1)) + (1 << (C_SHIFT - 1));
+ c2 = ((a0 - a2) << (CN_SHIFT - 1)) + (1 << (C_SHIFT - 1));
+ c1 = a1 * C1 + a3 * C2;
+ c3 = a1 * C2 - a3 * C1;
+ dest[0] = cm[(c0 + c1) >> C_SHIFT];
+ dest += line_size;
+ dest[0] = cm[(c2 + c3) >> C_SHIFT];
+ dest += line_size;
+ dest[0] = cm[(c2 - c3) >> C_SHIFT];
+ dest += line_size;
+ dest[0] = cm[(c0 - c1) >> C_SHIFT];
+}
+
+#define BF(k) \
+{\
+ int a0, a1;\
+ a0 = ptr[k];\
+ a1 = ptr[8 + k];\
+ ptr[k] = a0 + a1;\
+ ptr[8 + k] = a0 - a1;\
+}
+
+/* only used by DV codec. The input must be interlaced. 128 is added
+ to the pixels before clamping to avoid systematic error
+ (1024*sqrt(2)) offset would be needed otherwise. */
+/* XXX: I think a 1.0/sqrt(2) normalization should be needed to
+ compensate the extra butterfly stage - I don't have the full DV
+ specification */
+void ff_simple_idct248_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ int i;
+ DCTELEM *ptr;
+
+ /* butterfly */
+ ptr = block;
+ for(i=0;i<4;i++) {
+ BF(0);
+ BF(1);
+ BF(2);
+ BF(3);
+ BF(4);
+ BF(5);
+ BF(6);
+ BF(7);
+ ptr += 2 * 8;
+ }
+
+ /* IDCT8 on each line */
+ for(i=0; i<8; i++) {
+ idctRowCondDC(block + i*8);
+ }
+
+ /* IDCT4 and store */
+ for(i=0;i<8;i++) {
+ idct4col_put(dest + i, 2 * line_size, block + i);
+ idct4col_put(dest + line_size + i, 2 * line_size, block + 8 + i);
+ }
+}
+
+/* 8x4 & 4x8 WMV2 IDCT */
+#undef CN_SHIFT
+#undef C_SHIFT
+#undef C_FIX
+#undef C1
+#undef C2
+#define CN_SHIFT 12
+#define C_FIX(x) ((int)((x) * 1.414213562 * (1 << CN_SHIFT) + 0.5))
+#define C1 C_FIX(0.6532814824)
+#define C2 C_FIX(0.2705980501)
+#define C3 C_FIX(0.5)
+#define C_SHIFT (4+1+12)
+static inline void idct4col_add(uint8_t *dest, int line_size, const DCTELEM *col)
+{
+ int c0, c1, c2, c3, a0, a1, a2, a3;
+ const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ a0 = col[8*0];
+ a1 = col[8*1];
+ a2 = col[8*2];
+ a3 = col[8*3];
+ c0 = (a0 + a2)*C3 + (1 << (C_SHIFT - 1));
+ c2 = (a0 - a2)*C3 + (1 << (C_SHIFT - 1));
+ c1 = a1 * C1 + a3 * C2;
+ c3 = a1 * C2 - a3 * C1;
+ dest[0] = cm[dest[0] + ((c0 + c1) >> C_SHIFT)];
+ dest += line_size;
+ dest[0] = cm[dest[0] + ((c2 + c3) >> C_SHIFT)];
+ dest += line_size;
+ dest[0] = cm[dest[0] + ((c2 - c3) >> C_SHIFT)];
+ dest += line_size;
+ dest[0] = cm[dest[0] + ((c0 - c1) >> C_SHIFT)];
+}
+
+#define RN_SHIFT 15
+#define R_FIX(x) ((int)((x) * 1.414213562 * (1 << RN_SHIFT) + 0.5))
+#define R1 R_FIX(0.6532814824)
+#define R2 R_FIX(0.2705980501)
+#define R3 R_FIX(0.5)
+#define R_SHIFT 11
+static inline void idct4row(DCTELEM *row)
+{
+ int c0, c1, c2, c3, a0, a1, a2, a3;
+ //const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ a0 = row[0];
+ a1 = row[1];
+ a2 = row[2];
+ a3 = row[3];
+ c0 = (a0 + a2)*R3 + (1 << (R_SHIFT - 1));
+ c2 = (a0 - a2)*R3 + (1 << (R_SHIFT - 1));
+ c1 = a1 * R1 + a3 * R2;
+ c3 = a1 * R2 - a3 * R1;
+ row[0]= (c0 + c1) >> R_SHIFT;
+ row[1]= (c2 + c3) >> R_SHIFT;
+ row[2]= (c2 - c3) >> R_SHIFT;
+ row[3]= (c0 - c1) >> R_SHIFT;
+}
+
+void ff_simple_idct84_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ int i;
+
+ /* IDCT8 on each line */
+ for(i=0; i<4; i++) {
+ idctRowCondDC(block + i*8);
+ }
+
+ /* IDCT4 and store */
+ for(i=0;i<8;i++) {
+ idct4col_add(dest + i, line_size, block + i);
+ }
+}
+
+void ff_simple_idct48_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ int i;
+
+ /* IDCT4 on each line */
+ for(i=0; i<8; i++) {
+ idct4row(block + i*8);
+ }
+
+ /* IDCT8 and store */
+ for(i=0; i<4; i++){
+ idctSparseColAdd(dest + i, line_size, block + i);
+ }
+}
+
+void ff_simple_idct44_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ int i;
+
+ /* IDCT4 on each line */
+ for(i=0; i<4; i++) {
+ idct4row(block + i*8);
+ }
+
+ /* IDCT4 and store */
+ for(i=0; i<4; i++){
+ idct4col_add(dest + i, line_size, block + i);
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/simple_idct.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/simple_idct.h
new file mode 100644
index 000000000..12b96f6ed
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/simple_idct.h
@@ -0,0 +1,46 @@
+/*
+ * Simple IDCT
+ *
+ * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file simple_idct.h
+ * simple idct header.
+ */
+
+#ifndef AVCODEC_SIMPLE_IDCT_H
+#define AVCODEC_SIMPLE_IDCT_H
+
+#include "dsputil.h"
+
+void ff_simple_idct_put(uint8_t *dest, int line_size, DCTELEM *block);
+void ff_simple_idct_add(uint8_t *dest, int line_size, DCTELEM *block);
+void ff_simple_idct_mmx(int16_t *block);
+void ff_simple_idct_add_mmx(uint8_t *dest, int line_size, int16_t *block);
+void ff_simple_idct_put_mmx(uint8_t *dest, int line_size, int16_t *block);
+void ff_simple_idct(DCTELEM *block);
+
+void ff_simple_idct248_put(uint8_t *dest, int line_size, DCTELEM *block);
+
+void ff_simple_idct84_add(uint8_t *dest, int line_size, DCTELEM *block);
+void ff_simple_idct48_add(uint8_t *dest, int line_size, DCTELEM *block);
+void ff_simple_idct44_add(uint8_t *dest, int line_size, DCTELEM *block);
+
+#endif /* AVCODEC_SIMPLE_IDCT_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/sp5x.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/sp5x.h
new file mode 100644
index 000000000..fab31e269
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/sp5x.h
@@ -0,0 +1,334 @@
+/*
+ * Sunplus JPEG tables
+ * Copyright (c) 2003 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_SP5X_H
+#define AVCODEC_SP5X_H
+
+#include <stdint.h>
+
+static const uint8_t sp5x_data_sof[] =
+{
+ 0xFF, 0xC0, /* SOF */
+ 0x00, 0x11, /* len */
+ 0x08, /* bits */
+ 0x00, 0xf0, /* height (default: 240) */
+ 0x01, 0x40, /* width (default: 240) */
+ 0x03, /* nb components */
+ 0x01, 0x22, 0x00, /* 21 vs 22 ? */
+ 0x02, 0x11, 0x01,
+ 0x03, 0x11, 0x01
+};
+
+static const uint8_t sp5x_data_sos[] =
+{
+ 0xFF, 0xDA, /* SOS */
+ 0x00, 0x0C, /* len */
+ 0x03, /* nb components */
+ 0x01, 0x00,
+ 0x02, 0x11,
+ 0x03, 0x11,
+ 0x00, /* Ss */
+ 0x3F, /* Se */
+ 0x00 /* Ah/Al */
+};
+
+static const uint8_t sp5x_data_dqt[] =
+{
+ 0xFF, 0xDB, /* DQT */
+ 0x00, 0x84, /* len */
+ 0x00,
+ 0x05, 0x03, 0x04, 0x04, 0x04, 0x03, 0x05, 0x04,
+ 0x04, 0x04, 0x06, 0x05, 0x05, 0x06, 0x08, 0x0D,
+ 0x08, 0x08, 0x07, 0x07, 0x08, 0x10, 0x0C, 0x0C,
+ 0x0A, 0x0D, 0x14, 0x11, 0x15, 0x14, 0x13, 0x11,
+ 0x13, 0x13, 0x16, 0x18, 0x1F, 0x1A, 0x16, 0x17,
+ 0x1E, 0x17, 0x13, 0x13, 0x1B, 0x25, 0x1C, 0x1E,
+ 0x20, 0x21, 0x23, 0x23, 0x23, 0x15, 0x1A, 0x27,
+ 0x29, 0x26, 0x22, 0x29, 0x1F, 0x22, 0x23, 0x22,
+ 0x01,
+ 0x05, 0x06, 0x06, 0x08, 0x07, 0x08, 0x10, 0x08,
+ 0x08, 0x10, 0x22, 0x16, 0x13, 0x16, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22
+};
+
+static const uint8_t sp5x_data_dht[] = {
+ 0xFF, 0xC4, /* DHT */
+ 0x01, 0xA2, /* len */
+ 0x00, 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
+ 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x01, 0x00, 0x03,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
+ 0x0A, 0x0B, 0x10, 0x00, 0x02, 0x01, 0x03, 0x03,
+ 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00,
+ 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04,
+ 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13,
+ 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81,
+ 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15,
+ 0x52, 0xD1, 0xF0, 0x24, 0x33, 0x62, 0x72, 0x82,
+ 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25,
+ 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36,
+ 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46,
+ 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56,
+ 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
+ 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76,
+ 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86,
+ 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95,
+ 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4,
+ 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3,
+ 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2,
+ 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA,
+ 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9,
+ 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5,
+ 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0x11, 0x00, 0x02,
+ 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05,
+ 0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01,
+ 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06,
+ 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22,
+ 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1,
+ 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62,
+ 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25,
+ 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28,
+ 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A,
+ 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A,
+ 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A,
+ 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A,
+ 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A,
+ 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+ 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6,
+ 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5,
+ 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4,
+ 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3,
+ 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2,
+ 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA
+};
+
+
+static const uint8_t sp5x_quant_table[20][64]=
+{
+ /* index 0, Q50 */
+ { 16, 11, 12, 14, 12, 10, 16, 14, 13, 14, 18, 17, 16, 19, 24, 40,
+ 26, 24, 22, 22, 24, 49, 35, 37, 29, 40, 58, 51, 61, 60, 57, 51,
+ 56, 55, 64, 72, 92, 78, 64, 68, 87, 69, 55, 56, 80,109, 81, 87,
+ 95, 98,103,104,103, 62, 77,113,121,112,100,120, 92,101,103, 99 },
+ { 17, 18, 18, 24, 21, 24, 47, 26, 26, 47, 99, 66, 56, 66, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99 },
+
+ /* index 1, Q70 */
+ { 10, 7, 7, 8, 7, 6, 10, 8, 8, 8, 11, 10, 10, 11, 14, 24,
+ 16, 14, 13, 13, 14, 29, 21, 22, 17, 24, 35, 31, 37, 36, 34, 31,
+ 34, 33, 38, 43, 55, 47, 38, 41, 52, 41, 33, 34, 48, 65, 49, 52,
+ 57, 59, 62, 62, 62, 37, 46, 68, 73, 67, 60, 72, 55, 61, 62, 59 },
+ { 10, 11, 11, 14, 13, 14, 28, 16, 16, 28, 59, 40, 34, 40, 59, 59,
+ 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
+ 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
+ 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59 },
+
+ /* index 2, Q80 */
+ { 6, 4, 5, 6, 5, 4, 6, 6, 5, 6, 7, 7, 6, 8, 10, 16,
+ 10, 10, 9, 9, 10, 20, 14, 15, 12, 16, 23, 20, 24, 24, 23, 20,
+ 22, 22, 26, 29, 37, 31, 26, 27, 35, 28, 22, 22, 32, 44, 32, 35,
+ 38, 39, 41, 42, 41, 25, 31, 45, 48, 45, 40, 48, 37, 40, 41, 40 },
+ { 7, 7, 7, 10, 8, 10, 19, 10, 10, 19, 40, 26, 22, 26, 40, 40,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40 },
+
+ /* index 3, Q85 */
+ { 5, 3, 4, 4, 4, 3, 5, 4, 4, 4, 5, 5, 5, 6, 7, 12,
+ 8, 7, 7, 7, 7, 15, 11, 11, 9, 12, 17, 15, 18, 18, 17, 15,
+ 17, 17, 19, 22, 28, 23, 19, 20, 26, 21, 17, 17, 24, 33, 24, 26,
+ 29, 29, 31, 31, 31, 19, 23, 34, 36, 34, 30, 36, 28, 30, 31, 30 },
+ { 5, 5, 5, 7, 6, 7, 14, 8, 8, 14, 30, 20, 17, 20, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30 },
+
+ /* index 4, Q90 */
+ { 3, 2, 2, 3, 2, 2, 3, 3, 3, 3, 4, 3, 3, 4, 5, 8,
+ 5, 5, 4, 4, 5, 10, 7, 7, 6, 8, 12, 10, 12, 12, 11, 10,
+ 11, 11, 13, 14, 18, 16, 13, 14, 17, 14, 11, 11, 16, 22, 16, 17,
+ 19, 20, 21, 21, 21, 12, 15, 23, 24, 22, 20, 24, 18, 20, 21, 20 },
+ { 3, 4, 4, 5, 4, 5, 9, 5, 5, 9, 20, 13, 11, 13, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20 },
+
+ /* index 5, Q60 */
+ { 13, 9, 10, 11, 10, 8, 13, 11, 10, 11, 14, 14, 13, 15, 19, 32,
+ 21, 19, 18, 18, 19, 39, 28, 30, 23, 32, 46, 41, 49, 48, 46, 41,
+ 45, 44, 51, 58, 74, 62, 51, 54, 70, 55, 44, 45, 64, 87, 65, 70,
+ 76, 78, 82, 83, 82, 50, 62, 90, 97, 90, 80, 96, 74, 81, 82, 79 },
+ { 14, 14, 14, 19, 17, 19, 38, 21, 21, 38, 79, 53, 45, 53, 79, 79,
+ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79,
+ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79,
+ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79 },
+
+ /* index 6, Q25 */
+ { 32, 22, 24, 28, 24, 20, 32, 28, 26, 28, 36, 34, 32, 38, 48, 80,
+ 52, 48, 44, 44, 48, 98, 70, 74, 58, 80,116,102,122,120,114,102,
+ 112,110,128,144,184,156,128,136,174,138,110,112,160,218,162,174,
+ 190,196,206,208,206,124,154,226,242,224,200,240,184,202,206,198 },
+ { 34, 36, 36, 48, 42, 48, 94, 52, 52, 94,198,132,112,132,198,198,
+ 198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,
+ 198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,
+ 198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,198 },
+
+ /* index 7, Q95 */
+ { 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 2, 2, 4,
+ 3, 2, 2, 2, 2, 5, 4, 4, 3, 4, 6, 5, 6, 6, 6, 5,
+ 6, 6, 6, 7, 9, 8, 6, 7, 9, 7, 6, 6, 8, 11, 8, 9,
+ 10, 10, 10, 10, 10, 6, 8, 11, 12, 11, 10, 12, 9, 10, 10, 10 },
+ { 2, 2, 2, 2, 2, 2, 5, 3, 3, 5, 10, 7, 6, 7, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 },
+
+ /* index 8, Q93 */
+ { 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 3, 2, 2, 3, 3, 6,
+ 4, 3, 3, 3, 3, 7, 5, 5, 4, 6, 8, 7, 9, 8, 8, 7,
+ 8, 8, 9, 10, 13, 11, 9, 10, 12, 10, 8, 8, 11, 15, 11, 12,
+ 13, 14, 14, 15, 14, 9, 11, 16, 17, 16, 14, 17, 13, 14, 14, 14 },
+ { 2, 3, 3, 3, 3, 3, 7, 4, 4, 7, 14, 9, 8, 9, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14 },
+
+ /* index 9, Q40 */
+ { 20, 14, 15, 18, 15, 13, 20, 18, 16, 18, 23, 21, 20, 24, 30, 50,
+ 33, 30, 28, 28, 30, 61, 44, 46, 36, 50, 73, 64, 76, 75, 71, 64,
+ 70, 69, 80, 90,115, 98, 80, 85,109, 86, 69, 70,100,136,101,109,
+ 119,123,129,130,129, 78, 96,141,151,140,125,150,115,126,129,124 },
+ { 21, 23, 23, 30, 26, 30, 59, 33, 33, 59,124, 83, 70, 83,124,124,
+ 124,124,124,124,124,124,124,124,124,124,124,124,124,124,124,124,
+ 124,124,124,124,124,124,124,124,124,124,124,124,124,124,124,124,
+ 124,124,124,124,124,124,124,124,124,124,124,124,124,124,124,124 }
+};
+
+#if 0
+/* 4NF-M, not ZigZag */
+static const uint8_t sp5x_quant_table_orig[18][64] =
+{
+ /* index 0, Q50 */
+ { 16, 11, 10, 16, 24, 40, 51, 61, 12, 12, 14, 19, 26, 58, 60, 55,
+ 14, 13, 16, 24, 40, 57, 69, 56, 14, 17, 22, 29, 51, 87, 80, 62,
+ 18, 22, 37, 56, 68,109,103, 77, 24, 35, 55, 64, 81,104,113, 92,
+ 49, 64, 78, 87,103,121,120,101, 72, 92, 95, 98,112,100,103, 99 },
+ { 17, 18, 24, 47, 99, 99, 99, 99, 18, 21, 26, 66, 99, 99, 99, 99,
+ 24, 26, 56, 99, 99, 99, 99, 99, 47, 66, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99 },
+
+ /* index 1, Q70 */
+ { 10, 7, 6, 10, 14, 24, 31, 37, 7, 7, 8, 11, 16, 35, 36, 33,
+ 8, 8, 10, 14, 24, 34, 41, 34, 8, 10, 13, 17, 31, 52, 48, 37,
+ 11, 13, 22, 34, 41, 65, 62, 46, 14, 21, 33, 38, 49, 62, 68, 55,
+ 29, 38, 47, 52, 62, 73, 72, 61, 43, 55, 57, 59, 67, 60, 62, 59 },
+ { 10, 11, 14, 28, 59, 59, 59, 59, 11, 13, 16, 40, 59, 59, 59, 59,
+ 14, 16, 34, 59, 59, 59, 59, 59, 28, 40, 59, 59, 59, 59, 59, 59,
+ 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
+ 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59 },
+
+ /* index 2, Q80 */
+ { 6, 4, 4, 6, 10, 16, 20, 24, 5, 5, 6, 8, 10, 23, 24, 22,
+ 6, 5, 6, 10, 16, 23, 28, 22, 6, 7, 9, 12, 20, 35, 32, 25,
+ 7, 9, 15, 22, 27, 44, 41, 31, 10, 14, 22, 26, 32, 42, 45, 37,
+ 20, 26, 31, 35, 41, 48, 48, 40, 29, 37, 38, 39, 45, 40, 41, 40 },
+ { 7, 7, 10, 19, 40, 40, 40, 40, 7, 8, 10, 26, 40, 40, 40, 40,
+ 10, 10, 22, 40, 40, 40, 40, 40, 19, 26, 40, 40, 40, 40, 40, 40,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40 },
+
+ /* index 3, Q85 */
+ { 5, 3, 3, 5, 7, 12, 15, 18, 4, 4, 4, 6, 8, 17, 18, 17,
+ 4, 4, 5, 7, 12, 17, 21, 17, 4, 5, 7, 9, 15, 26, 24, 19,
+ 5, 7, 11, 17, 20, 33, 31, 23, 7, 11, 17, 19, 24, 31, 34, 28,
+ 15, 19, 23, 26, 31, 36, 36, 30, 22, 28, 29, 29, 34, 30, 31, 30 },
+ { 5, 5, 7, 14, 30, 30, 30, 30, 5, 6, 8, 20, 30, 30, 30, 30,
+ 7, 8, 17, 30, 30, 30, 30, 30, 14, 20, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30 },
+
+ /* index 4, Q90 */
+ { 3, 2, 2, 3, 5, 8, 10, 12, 2, 2, 3, 4, 5, 12, 12, 11,
+ 3, 3, 3, 5, 8, 11, 14, 11, 3, 3, 4, 6, 10, 17, 16, 12,
+ 4, 4, 7, 11, 14, 22, 21, 15, 5, 7, 11, 13, 16, 21, 23, 18,
+ 10, 13, 16, 17, 21, 24, 24, 20, 14, 18, 19, 20, 22, 20, 21, 20 },
+ { 3, 4, 5, 9, 20, 20, 20, 20, 4, 4, 5, 13, 20, 20, 20, 20,
+ 5, 5, 11, 20, 20, 20, 20, 20, 9, 13, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20 },
+
+ /* index 5, Q60 */
+ { 13, 9, 8, 13, 19, 32, 41, 49, 10, 10, 11, 15, 21, 46, 48, 44,
+ 11, 10, 13, 19, 32, 46, 55, 45, 11, 14, 18, 23, 41, 70, 64, 50,
+ 14, 18, 30, 45, 54, 87, 82, 62, 19, 28, 44, 51, 65, 83, 90, 74,
+ 39, 51, 62, 70, 82, 97, 96, 81, 58, 74, 76, 78, 90, 80, 82, 79 },
+ { 14, 14, 19, 38, 79, 79, 79, 79, 14, 17, 21, 53, 79, 79, 79, 79,
+ 19, 21, 45, 79, 79, 79, 79, 79, 38, 53, 79, 79, 79, 79, 79, 79,
+ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79,
+ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79 },
+
+ /* index 6, Q25 */
+ { 32, 22, 20, 32, 48, 80,102,122, 24, 24, 28, 38, 52,116,120,110,
+ 28, 26, 32, 48, 80,114,138,112, 28, 34, 44, 58,102,174,160,124,
+ 36, 44, 74,112,136,218,206,154, 48, 70,110,128,162,208,226,184,
+ 98,128,156,174,206,242,240,202,144,184,190,196,224,200,206,198 },
+ { 34, 36, 48, 94,198,198,198,198, 36, 42, 52,132,198,198,198,198,
+ 48, 52,112,198,198,198,198,198, 94,132,198,198,198,198,198,198,
+ 198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,
+ 198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,198 },
+
+ /* index 7, Q95 */
+ { 2, 1, 1, 2, 2, 4, 5, 6, 1, 1, 1, 2, 3, 6, 6, 6,
+ 1, 1, 2, 2, 4, 6, 7, 6, 1, 2, 2, 3, 5, 9, 8, 6,
+ 2, 2, 4, 6, 7, 11, 10, 8, 2, 4, 6, 6, 8, 10, 11, 9,
+ 5, 6, 8, 9, 10, 12, 12, 10, 7, 9, 10, 10, 11, 10, 10, 10 },
+ { 2, 2, 2, 5, 10, 10, 10, 10, 2, 2, 3, 7, 10, 10, 10, 10,
+ 2, 3, 6, 10, 10, 10, 10, 10, 5, 7, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 },
+
+ /* index 8, Q93 */
+ { 2, 2, 1, 2, 3, 6, 7, 9, 2, 2, 2, 3, 4, 8, 8, 8,
+ 2, 2, 2, 3, 6, 8, 10, 8, 2, 2, 3, 4, 7, 12, 11, 9,
+ 3, 3, 5, 8, 10, 15, 14, 11, 3, 5, 8, 9, 11, 15, 16, 13,
+ 7, 9, 11, 12, 14, 17, 17, 14, 10, 13, 13, 14, 16, 14, 14, 14 },
+ { 2, 3, 3, 7, 14, 14, 14, 14, 3, 3, 4, 9, 14, 14, 14, 14,
+ 3, 4, 8, 14, 14, 14, 14, 14, 7, 9, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14 }
+};
+#endif
+
+#endif /* AVCODEC_SP5X_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/sp5xdec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/sp5xdec.c
new file mode 100644
index 000000000..4d7c9f765
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/sp5xdec.c
@@ -0,0 +1,222 @@
+/*
+ * Sunplus JPEG decoder (SP5X)
+ * Copyright (c) 2003 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/sp5xdec.c
+ * Sunplus JPEG decoder (SP5X).
+ */
+
+#include "avcodec.h"
+#include "mjpeg.h"
+#include "mjpegdec.h"
+#include "sp5x.h"
+
+
+static int sp5x_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+#if 0
+ MJpegDecodeContext *s = avctx->priv_data;
+#endif
+ const int qscale = 5;
+ const uint8_t *buf_ptr;
+ uint8_t *recoded;
+ int i = 0, j = 0;
+
+ if (!avctx->width || !avctx->height)
+ return -1;
+
+ buf_ptr = buf;
+
+#if 1
+ recoded = av_mallocz(buf_size + 1024);
+ if (!recoded)
+ return -1;
+
+ /* SOI */
+ recoded[j++] = 0xFF;
+ recoded[j++] = 0xD8;
+
+ memcpy(recoded+j, &sp5x_data_dqt[0], sizeof(sp5x_data_dqt));
+ memcpy(recoded+j+5, &sp5x_quant_table[qscale * 2], 64);
+ memcpy(recoded+j+70, &sp5x_quant_table[(qscale * 2) + 1], 64);
+ j += sizeof(sp5x_data_dqt);
+
+ memcpy(recoded+j, &sp5x_data_dht[0], sizeof(sp5x_data_dht));
+ j += sizeof(sp5x_data_dht);
+
+ memcpy(recoded+j, &sp5x_data_sof[0], sizeof(sp5x_data_sof));
+ AV_WB16(recoded+j+5, avctx->coded_height);
+ AV_WB16(recoded+j+7, avctx->coded_width);
+ j += sizeof(sp5x_data_sof);
+
+ memcpy(recoded+j, &sp5x_data_sos[0], sizeof(sp5x_data_sos));
+ j += sizeof(sp5x_data_sos);
+
+ if(avctx->codec_id==CODEC_ID_AMV)
+ for (i = 2; i < buf_size-2 && j < buf_size+1024-2; i++)
+ recoded[j++] = buf[i];
+ else
+ for (i = 14; i < buf_size && j < buf_size+1024-2; i++)
+ {
+ recoded[j++] = buf[i];
+ if (buf[i] == 0xff)
+ recoded[j++] = 0;
+ }
+
+ /* EOI */
+ recoded[j++] = 0xFF;
+ recoded[j++] = 0xD9;
+
+ avctx->flags &= ~CODEC_FLAG_EMU_EDGE;
+ i = ff_mjpeg_decode_frame(avctx, data, data_size, recoded, j);
+
+ av_free(recoded);
+
+#else
+ /* SOF */
+ s->bits = 8;
+ s->width = avctx->coded_width;
+ s->height = avctx->coded_height;
+ s->nb_components = 3;
+ s->component_id[0] = 0;
+ s->h_count[0] = 2;
+ s->v_count[0] = 2;
+ s->quant_index[0] = 0;
+ s->component_id[1] = 1;
+ s->h_count[1] = 1;
+ s->v_count[1] = 1;
+ s->quant_index[1] = 1;
+ s->component_id[2] = 2;
+ s->h_count[2] = 1;
+ s->v_count[2] = 1;
+ s->quant_index[2] = 1;
+ s->h_max = 2;
+ s->v_max = 2;
+
+ s->qscale_table = av_mallocz((s->width+15)/16);
+ avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV420P : PIX_FMT_YUVJ420;
+ s->interlaced = 0;
+
+ s->picture.reference = 0;
+ if (avctx->get_buffer(avctx, &s->picture) < 0)
+ {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+
+ s->picture.pict_type = FF_I_TYPE;
+ s->picture.key_frame = 1;
+
+ for (i = 0; i < 3; i++)
+ s->linesize[i] = s->picture.linesize[i] << s->interlaced;
+
+ /* DQT */
+ for (i = 0; i < 64; i++)
+ {
+ j = s->scantable.permutated[i];
+ s->quant_matrixes[0][j] = sp5x_quant_table[(qscale * 2) + i];
+ }
+ s->qscale[0] = FFMAX(
+ s->quant_matrixes[0][s->scantable.permutated[1]],
+ s->quant_matrixes[0][s->scantable.permutated[8]]) >> 1;
+
+ for (i = 0; i < 64; i++)
+ {
+ j = s->scantable.permutated[i];
+ s->quant_matrixes[1][j] = sp5x_quant_table[(qscale * 2) + 1 + i];
+ }
+ s->qscale[1] = FFMAX(
+ s->quant_matrixes[1][s->scantable.permutated[1]],
+ s->quant_matrixes[1][s->scantable.permutated[8]]) >> 1;
+
+ /* DHT */
+
+ /* SOS */
+ s->comp_index[0] = 0;
+ s->nb_blocks[0] = s->h_count[0] * s->v_count[0];
+ s->h_scount[0] = s->h_count[0];
+ s->v_scount[0] = s->v_count[0];
+ s->dc_index[0] = 0;
+ s->ac_index[0] = 0;
+
+ s->comp_index[1] = 1;
+ s->nb_blocks[1] = s->h_count[1] * s->v_count[1];
+ s->h_scount[1] = s->h_count[1];
+ s->v_scount[1] = s->v_count[1];
+ s->dc_index[1] = 1;
+ s->ac_index[1] = 1;
+
+ s->comp_index[2] = 2;
+ s->nb_blocks[2] = s->h_count[2] * s->v_count[2];
+ s->h_scount[2] = s->h_count[2];
+ s->v_scount[2] = s->v_count[2];
+ s->dc_index[2] = 1;
+ s->ac_index[2] = 1;
+
+ for (i = 0; i < 3; i++)
+ s->last_dc[i] = 1024;
+
+ s->mb_width = (s->width * s->h_max * 8 -1) / (s->h_max * 8);
+ s->mb_height = (s->height * s->v_max * 8 -1) / (s->v_max * 8);
+
+ init_get_bits(&s->gb, buf+14, (buf_size-14)*8);
+
+ return mjpeg_decode_scan(s);
+#endif
+
+ return i;
+}
+
+AVCodec sp5x_decoder = {
+ "sp5x",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_SP5X,
+ sizeof(MJpegDecodeContext),
+ /*.init = */ff_mjpeg_decode_init,
+ /*.encode = */NULL,
+ /*.close = */ff_mjpeg_decode_end,
+ /*.decode = */sp5x_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("Sunplus JPEG (SP5X)"),
+};
+
+AVCodec amv_decoder = {
+ "amv",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_AMV,
+ sizeof(MJpegDecodeContext),
+ /*.init = */ff_mjpeg_decode_init,
+ /*.encode = */NULL,
+ /*.close = */ff_mjpeg_decode_end,
+ /*.decode = */sp5x_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("AMV Video"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1.c
new file mode 100644
index 000000000..a1583fef8
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1.c
@@ -0,0 +1,44 @@
+/*
+ * SVQ1 decoder
+ * ported to MPlayer by Arpi <arpi@thot.banki.hu>
+ * ported to libavcodec by Nick Kurshev <nickols_k@mail.ru>
+ *
+ * Copyright (C) 2002 the xine project
+ * Copyright (C) 2002 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/svq1.c
+ * Sorenson Vector Quantizer #1 (SVQ1) video codec.
+ * For more information of the SVQ1 algorithm, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ */
+#ifdef _MSC_VER
+#include "libavutil/common.h"
+#endif
+
+#include "svq1.h"
+#include "svq1_cb.h"
+#include "svq1_vlc.h"
+
+/* standard video sizes */
+const struct svq1_frame_size ff_svq1_frame_size_table[7] = {
+ { 160, 120 }, { 128, 96 }, { 176, 144 }, { 352, 288 },
+ { 704, 576 }, { 240, 180 }, { 320, 240 }
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1.h
new file mode 100644
index 000000000..e89d1c384
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1.h
@@ -0,0 +1,62 @@
+/*
+ * SVQ1 decoder
+ * ported to MPlayer by Arpi <arpi@thot.banki.hu>
+ * ported to libavcodec by Nick Kurshev <nickols_k@mail.ru>
+ *
+ * Copyright (C) 2002 the xine project
+ * Copyright (C) 2002 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/svq1.h
+ * Sorenson Vector Quantizer #1 (SVQ1) video codec.
+ * For more information of the SVQ1 algorithm, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ */
+
+#ifndef AVCODEC_SVQ1_H
+#define AVCODEC_SVQ1_H
+
+#include <stdint.h>
+
+#define SVQ1_BLOCK_SKIP 0
+#define SVQ1_BLOCK_INTER 1
+#define SVQ1_BLOCK_INTER_4V 2
+#define SVQ1_BLOCK_INTRA 3
+
+struct svq1_frame_size {
+ uint16_t width;
+ uint16_t height;
+};
+
+uint16_t ff_svq1_packet_checksum (const uint8_t *data, const int length,
+ int value);
+
+extern const int8_t* const ff_svq1_inter_codebooks[6];
+extern const int8_t* const ff_svq1_intra_codebooks[6];
+
+extern const uint8_t ff_svq1_block_type_vlc[4][2];
+extern const uint8_t ff_svq1_intra_multistage_vlc[6][8][2];
+extern const uint8_t ff_svq1_inter_multistage_vlc[6][8][2];
+extern const uint16_t ff_svq1_intra_mean_vlc[256][2];
+extern const uint16_t ff_svq1_inter_mean_vlc[512][2];
+
+extern const struct svq1_frame_size ff_svq1_frame_size_table[7];
+
+#endif /* AVCODEC_SVQ1_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1_cb.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1_cb.h
new file mode 100644
index 000000000..504066961
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1_cb.h
@@ -0,0 +1,1525 @@
+/*
+ * SVQ1 decoder
+ * ported to MPlayer by Arpi <arpi@thot.banki.hu>
+ * ported to libavcodec by Nick Kurshev <nickols_k@mail.ru>
+ *
+ * Copyright (C) 2002 the xine project
+ * Copyright (C) 2002 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/svq1_cb.h
+ * svq1 code books.
+ */
+
+#ifndef AVCODEC_SVQ1_CB_H
+#define AVCODEC_SVQ1_CB_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "libavutil/mem.h"
+
+/* 6x16-entry codebook for inter-coded 4x2 vectors */
+DECLARE_ALIGNED(4, static const int8_t, svq1_inter_codebook_4x2)[768] = {
+ 7, 2, -6, -7, 7, 3, -3, -4, -7, -2, 7, 8, -8, -4, 3, 4,
+ 19, 17, 9, 3,-14,-16,-12, -8,-18,-16, -8, -3, 11, 14, 12, 8,
+ 7,-16,-10, 20, 7,-17,-10, 20, -6, 18, 8,-21, -7, 18, 9,-20,
+ 25, 3,-20,-14, 29, 7,-18,-13,-29, -4, 21, 14,-31, -6, 20, 14,
+ -19,-26,-28,-24, 31, 32, 22, 10, 15, 24, 31, 28,-32,-32,-22,-13,
+ 2, -8,-23,-26, -9, 3, 27, 35, 3, 11, 21, 21, 8, -4,-27,-34,
+ -30,-31, 12, 47,-29,-30, 13, 47, 38, 30,-17,-46, 34, 26,-19,-46,
+ -42,-50,-51,-43, 34, 48, 55, 48, 48, 54, 51, 42,-44,-52,-53,-47,
+ 4, 5, 0, -6, -2, -2, 0, 1,-11, -6, -1, -2, 1, 8, 9, 1,
+ 0, 1, -6, 5, 8, 1,-12, 2, 7,-14, -7, 8, 5, -8, 0, 8,
+ 1, 4, 11, 8,-12, -8, 0, -5, -1, 1, 0, 4,-15, -8, 3, 16,
+ 17, 8, -4, -6, 9, -4,-13, -8, 2, 6, 1,-18, -1, 11, 11,-12,
+ 6, 0, 2, 0, 14, 6, -7,-21, 1, -1,-13,-20, 1, 1, 10, 21,
+ -22, -5, 7, 13,-11, -1, 4, 12, -7, 0, 14, 19, -4, 3, -5,-19,
+ -26,-14, 10, 15, 18, 4, -6, -2, 25, 19, -5,-18,-20, -7, 4, 2,
+ -13, -6, -1, -4, 25, 37, -2,-35, 5, 4, 1, 1,-21,-36, 2, 43,
+ 2, -2, -1, 3, 8, -2, -6, -1, -2, -3, 2, 12, -5, -2, -2, -1,
+ -3, -1, -1, -5, -1, 7, 8, -2, 2, 7, 5, -3, 1, 1, -3, -8,
+ -3, -1, -3, -2, -2, -3, 2, 13, 15, 0,-11, -6, 3, 0, 0, 0,
+ -6, -9, -5, -4, 18, 4, 1, 3, 12, 3, 0, 4,-16, -3, 3, -3,
+ -17, 3, 18, 2, -1, -3, -1, -1, -6, 16, -8, 0, -9, 14, -7, 0,
+ 3,-13, 14, -5, 3,-13, 14, -4, -7, 20, 14,-23, 8, -7, -8, 4,
+ 8,-15,-19, 16,-10, 13, 11, -3, 9, -1, 1, 26, 5,-15,-27, 2,
+ -20, 7, 16, -4,-40, 9, 31, 1, 26,-12,-30, -7, 40, -2,-19, 4,
+ 6, 0, 0, 0, -6, -2, 1, 2, 0, -1, 0, -6, 9, 0, -2, -1,
+ -7, 8, 2, -3, -1, 2, -3, 2, 7, -4, -2, 4, 2, 0, 0, -6,
+ -3, -2, 9, 2, -2, -1, 0, -4, -3, -3, 0, -3, -6, 2, 10, 4,
+ 3, 0,-10, 8, 0, 0, -4, 4, -1, 1, 4, 2, 3, -7, -9, 7,
+ 2, 1, -9, -4, -1, 12, 0, 0, 3, -1, 7, -4, 3,-14, 4, 2,
+ -12, -9, 1, 11, 2, 5, 1, 0, 3, 1, 0, 2, 0, 8, 6,-19,
+ -6,-10, -7, -4, 9, 7, 5, 7, 6, 21, 3, -3,-11, -9, -5, -2,
+ -4, -9,-16, -1, -2, -5, 1, 36, 8, 11, 19, 0, 2, 5, -4,-41,
+ -1, -1, -2, -1, -2, -2, 1, 6, 0, 4, 1, -8, 1, 1, 1, 0,
+ -2, -3, 4, 0, 2, -1, 3, -3, 1, 3, -4, 1, -1, 3, 0, -5,
+ 3, 4, 2, 3, -2, -3, -6, -1, -2, -3, -2, 2, -4, 8, 1, 0,
+ -7, 4, 2, 6, -7, -1, 1, 0, -2, 2, -4, 1, 8, -6, 2, -1,
+ -6, 2, 0, 2, 5, 4, -8, -1, -1,-11, 0, 9, 0, -2, 2, 2,
+ 17, -5, -4, -1, -1, -4, -2, -2, 0,-13, 9, -3, -1, 12, -7, 2,
+ 0, -2, -5, 2, -7, -5, 20, -3, 7, 7, -1,-30, 3, 5, 8, 1,
+ -6, 3, -1, -4, 2, -2,-11, 18, 0, -7, 3, 14, 20, -3,-18, -9,
+ 7, -2, 0, -1, -2, 0, 0, -1, -4, -1, 1, 0, -2, 2, 0, 4,
+ 1, -3, 2, 1, 3, 1, -5, 1, -3, 0, -1, -2, 7, 1, 0, -3,
+ 2, 5, 0, -2, 2, -5, -1, 1, -1, -2, 4, -1, 0, -3, 5, 0,
+ 0, 3, -1, -2, -4, 1, 5, -1, -1, 0, -1, 9, -1, -2, -1, -1,
+ -2, 5, 5, -1, -2, 2, -3, -2, 1, 2,-11, 1, 2, 1, 3, 2,
+ 2,-10, -1, -2, 4, 2, 4, 1, 4, 5, -5, 1, 0, 6,-11, 1,
+ 1, 0, 6, 6, 0, 2, 1,-15, 7, 3, 5, 9,-30, 2, 2, 2,
+ -34, 1, 9, 2, 5, 8, 8, 2, 7, 2, 6, 6, 2,-27, 1, 4
+};
+
+/* 6x16-entry codebook for inter-coded 4x4 vectors */
+DECLARE_ALIGNED(4, static const int8_t, svq1_inter_codebook_4x4)[1536] = {
+ 4, 0, -6, -7, -4, -8,-13, -9, -8, -8, -1, 6, -2, 5, 22, 27,
+ -16, -7, 11, 10,-18, -7, 13, 10,-15, -4, 12, 8, -9, -1, 9, 5,
+ -2, 2, 15,-16, -3, 2, 19,-19, -3, 2, 19,-19, -2, 3, 15,-14,
+ 17, 22, 22, 16, -6, -7, -5, -2,-12,-16,-16,-12, 1, 1, -1, -3,
+ 11,-17, 0, 8, 14,-21, -1, 9, 14,-21, -2, 8, 11,-16, -2, 6,
+ 7, -2,-16, 11, 9, -2,-21, 14, 10, -1,-22, 14, 8, -1,-18, 10,
+ -10, 16, 3, -9,-13, 20, 4,-11,-14, 21, 4,-10,-11, 16, 3, -8,
+ 11, 4, -9, -9, 15, 6,-12,-14, 17, 8,-12,-14, 16, 10, -7,-11,
+ 4, 10, 14, 13, -1, 7, 15, 16,-12, -7, 3, 8,-20,-23,-18,-10,
+ -10,-18,-26,-25, 4, 1, -6,-11, 13, 15, 11, 3, 12, 15, 13, 8,
+ -16,-19,-16,-11, 7, 12, 15, 11, 11, 16, 16, 11, -6, -9,-11,-10,
+ 18, 19, 12, 5, 18, 16, 5, -4, 6, 0,-10,-15, -9,-17,-23,-22,
+ -10,-14, -1, 21,-11,-17, 0, 29,-11,-16, 1, 30,-10,-14, 0, 23,
+ -16,-17,-12, -6,-19,-19,-14, -7, -3, -1, 1, 2, 27, 35, 29, 19,
+ -37, -8, 23, 23,-42, -9, 28, 29,-43,-10, 26, 28,-38,-11, 19, 22,
+ 32, 16,-16,-33, 39, 20,-18,-37, 38, 19,-19,-38, 32, 15,-17,-34,
+ 24, 9, -6, -4, -1,-10, -6, 3, -8, -9, -1, 3, 3, 7, 2, -6,
+ -1, -3, -1, 0, -1, 4, 2, -7, -3, 11, 3,-16, 1, 20, 9,-18,
+ -3, -8, 6, 12, -5,-10, 7, 13, -6, -9, 5, 7, -5, -5, 2, -1,
+ -8, 12, -3, -1,-10, 15, -3, 1,-11, 13, -4, 1,-11, 8, -3, 2,
+ 9, 6, -5,-12, 3, 0, -8,-13, -4, -4, -1, -1, -4, 1, 15, 18,
+ 9, 13, 14, 12, 4, 3, -1, -2, -2, -5, -8, -5, -7,-11, -9, -4,
+ 7, -5, -7, -4, 14, -2, -7, -4, 17, 0, -8, -5, 15, 1, -7, -5,
+ -10, -1, 6, 4,-15, -9, 2, 4, 2, -1, -3, 0, 25, 13, -8,-10,
+ 7, 11, -3,-16, 7, 11, -3,-15, 6, 7, -2, -9, 4, 2, -3, -5,
+ -7, -1, -1, 0, -9, -2, 2, 6,-12, -4, 6, 14,-13, -6, 8, 19,
+ -18,-18,-11, -5, -3, 0, 3, 4, 6, 8, 6, 6, 6, 6, 6, 6,
+ -5, 3, 13,-10, -6, 1, 15, -9, -6, -3, 15, -6, -6, -6, 10, -3,
+ 9, 1, -9, -9, 11, 9, 6, 5, 0, 3, 8, 7,-15,-14, -6, -5,
+ -11, -6, 11, 19, -2, -5, -9, -8, 6, 2, -9,-10, 6, 5, 4, 5,
+ -7, -3, 8, 15, -1, 3, 10, 15, 5, 5, -1, -2, 4, -2,-21,-25,
+ 6, -6, -6, 5, 8, -9, -7, 9, 8,-12, -7, 13, 4,-14, -7, 14,
+ -4, -3, 1, 1, -3, -5, -2, -3, 7, 0, -2, -4, 20, 7, -4, -4,
+ -3,-20, -6, 10, 6, 0, 0, 1, 5, 8, 5, -1, -3, 0, 0, -2,
+ 13, 6, -1, 2, 5, 3, 2, 3, -3, 0, 3, 0,-16, -8, -2, -5,
+ -2, -7, -6, 0, -3, -6, -3, 1, -5, -1, 2, -1, -1, 12, 16, 5,
+ -7, 1, 9, 8,-10, -2, 5, 3, -6, 2, 7, 3, -4, 0, -1, -7,
+ 3, 4, -9,-24, 0, 2, 6, 3, -1, -1, 4, 7, 5, 3, -1, -2,
+ 3, 6, -9, 2, 1, 6,-13, 1, 1, 8,-10, 2, 1, 8, -7, 1,
+ -3, -3, 2, 22, -2, -3, -5, 12, -2, -3,-10, 2, -3, -1, -4, 2,
+ 11, 12, 8, 2, -5, -5, -5, -8, -6, -4, 0, -3, -2, -1, 3, 3,
+ 12, -6, -2, -1, 12, -8, -2, -2, 9, -7, 0, -3, 4, -6, 2, -2,
+ -19, 1, 12, -3, -4, 4, 5, -4, 6, 1, -2, -1, 4, -4, -2, 7,
+ -3, -4, -7, -8, -4, -4, -2, 0, -1, 2, 14, 16, -4, -2, 4, 4,
+ -1, 7, 2, -5, -2, 0, -1, 1, 4, -3, -1, 13, 6,-12,-14, 8,
+ -1, 5, 4, -5, -2, 5, 3, -9, -2, 7, 4,-12, -1, 7, 4, -9,
+ -6, -3, 1, 1, 11, 11, 0, -6, 6, 4, -2, -7,-12,-10, 3, 10,
+ -2, -3, -3, -2, 6, 11, 14, 10, -9,-11,-10,-10, 2, 2, 3, 2,
+ -7, -5, -7, -1, -1, 2, 0, 7, -1, 1, 0, 9, 3, 4, -5, -1,
+ 10, -1,-15, -1, 4, 1, -5, 2, -3, 1, -1, 1, -3, 1, 4, 4,
+ 2, -1, 4, 10, 6, 2, -1, 0, 2, 2, -7,-12, -4, 2, 0, -3,
+ -1, -4, -1, -8, 3, -1, 2, -9, 4, 0, 5, -5, 2, 0, 8, 3,
+ 3, 2, 1, 1, 4, -2, 0, 3, 2, -1, 4, 1, 0, 6, -1,-25,
+ -1, -2, -2, -4, -3, 0, -1, -4, -1, -1, -4, 2, 0, -6, 2, 25,
+ -11, -1, 5, 0, 7, 0, -2, 2, 10, -1, -3, 4, -5, -5, -2, -1,
+ 0, 6, 3, -1, -2, -1, -1, 1, -1, -7,-12, -5, 8, 6, 2, 4,
+ 2, 6, -1, -6, 9, 10, -1, -4, 1, 0, -4, 0, 3, -2, -9, -5,
+ -4, 3, 4, 0, -4, 3, 3, 0,-11, 0, 3, 2,-11, 3, 7, 2,
+ 2, -4, 7, 3, 1, -8, 7, 1, -1,-12, 4, 1, 3, -9, 2, 2,
+ 2, -2, -2, 9,-17, -3, 3, 1, -4, 7, 1, -6, 5, 4, -1, 3,
+ -1, 2, 0, -4, -7, 8, 12, -1, -2, 5, 4, -5, 3, -5, -8, -2,
+ 0, 0, -5, -2, -2, -8, 3, 27, -1, -4, -3, 6, -3, 1, -2, -7,
+ 4, 4, 1, -1, -7,-10, -7, -3, 10, 10, 5, 3, -2, -2, -4, -3,
+ 0, 1, 5, 7, 4, -2,-16,-20, 0, 4, 7, 8, 2, 0, -2, -1,
+ -2, 1, 3, 17, -3, 1, -2, -1, -1, -2, -1, -2, -1, -5, -1, 0,
+ 5, -3, 1, 0, 6, -2, 0, 0, -1, -2, 0, -3,-11, 1, 8, -1,
+ 3, 0, 0, 0, 0, 2, 4, 1, 2, 0, 6, 1, -2,-18, -3, 2,
+ -14, 0, 6, 1, -5, -2, -1, 1, -1, 1, 0, 1, 1, 7, 4, 0,
+ -1, 0, 1, -4, 1, 8, 3, -4, -3, 4, 1, 3, -6, 1, -4, 1,
+ 1,-12, 3, 3, -1,-10, 0, -1, 2, 0, 2, 1, 3, 2, 2, 4,
+ 3, 0, 0, 3, 2, 0, -2, 1, 5, 2, -5, 0, 6, -1,-14, -1,
+ -2, -6, -3, -3, 2, -1, 4, 5, 6, -1, -2, 0, 4, 4, -1, -5,
+ -4, 1,-11, 0, -1, 2, -4, 1, 2, -3, 3, -1, 1, -2, 15, 0,
+ 1, -1, 0, -2, 1, -4, -7, 1, -2, -6, -1, 21, -2, 2, -1, 1,
+ 21, -1, -2, 0, -1, -3, 1, -2, -9, -2, 2, -1, 2, 1, -4, -1,
+ 1, 8, 2, -6,-10, -1, 4, 0, -4, -3, 3, 3, 5, 0, -1, -1,
+ 3, 2, 1, -2, -2, -2, 4, 3, 5, 2, -4,-17, 0, -2, 4, 3,
+ -7, -4, 0, 3, 9, 9, 2, -1,-11, -6, 0, -1, 5, 1, 0, 1,
+ 0, 17, 5,-11, 3, -2, -6, 0, 2, -2, -4, 1, -4, 1, 2, -1,
+ -5, -1, -5, -3, -3, 5, -3, -2, 4, 16, 2, -5, -2, 5, -1, -1,
+ 0, 0, -4, 1, -1, 2, 5, 11, -1, -1, -2, 1, -4, -2, -3, -1,
+ -5, -1, 10, 0, 6, 1, 0, -3, 0, -4, 1, 0, -2, -4, 3, -1,
+ 6, 9, 3, 0, -2, 1, -2, 0, -2, -3, -2, -2, 1, 0, 1, -6,
+ 1, 0, 2, 1, -1, 3, -2, 1, 0, -1,-15, 0, -1, 5, 2, 6,
+ 2, 0, 2, 2, 0,-12, -4, 6, 0, 1, 4, -1, 1, 2, 1, -4,
+ 1, -2, -7, 0, 0, 0, 0, -1, -5, 2, 11, 3, 1, 3, 0, -6,
+ 0, -3, -9, -4, 1, 3, -1, 0, 4, 1, -2, 0, 7, -3, -1, 6,
+ 1, -2, 6, 2, 0, -1, 3, -2, -2, 4, 0, 2, -1, 2,-14, 2,
+ 2, 2, 0, -1, -2, 3, -3,-14, 0, 2, 3, -3, 5, 1, 3, 2,
+ 1, -3, 4,-14, 1, -2, 11, -1, 0, -1, 3, 0, -1, 1, 0, 2,
+ -2, 3, -3, 2, -4, -1, -4, 3, -1, 2, 1, 3, -6, -2, 2, 7,
+ -2, 1, 2, 0, -2, 0, 0, -1, 12, 5, -1, 2, -8, -1, 1, -7,
+ 2, -2, -4, 2, 11, 0,-11, -2, 3, 1, -3, -1, 0, 3, 1, -1,
+ 0, 3, 0, -2, 0, -6, -1, -3, 12, -7, -2, 0, 7, -2, 1, 1,
+ 1, 2, 2, 2, -1, 2, 0, 2,-23, 0, 4, 0, 3, 2, 1, 3,
+ -4, -5, -1, 5, -3, 5, 10, -1, 0, 0, 3, -4, 1, -1, 2, -5
+};
+
+/* 6x16-entry codebook for inter-coded 8x4 vectors */
+DECLARE_ALIGNED(4, static const int8_t, svq1_inter_codebook_8x4)[3072] = {
+ 9, 8, 4, 0, -3, -4, -4, -3, 9, 8, 4, -1, -4, -5, -5, -3,
+ 8, 7, 3, -2, -5, -5, -5, -4, 6, 4, 1, -2, -4, -5, -4, -3,
+ -12,-14,-11, -4, 1, 5, 6, 6, -8,-10, -7, -5, -2, 1, 1, 1,
+ 5, 4, 3, 1, 0, 0, -1, -1, 13, 13, 9, 6, 3, 0, -1, -2,
+ -4, -4, -3, -1, 1, 4, 8, 11, -5, -6, -4, -2, 0, 3, 8, 12,
+ -7, -7, -6, -4, -2, 2, 7, 10, -7, -7, -5, -4, -2, 1, 5, 8,
+ -3, -2, -1, 1, 3, 6, 7, 6, 2, 3, 5, 7, 8, 8, 6, 4,
+ 4, 5, 4, 3, 1, -2, -6, -7, 1, 0, -2, -7,-10,-14,-17,-16,
+ -5, -4, 1, 8, 9, 3, -3, -7, -7, -6, 1, 11, 12, 5, -3, -8,
+ -8, -7, 0, 9, 11, 5, -3, -7, -8, -6, -1, 5, 8, 4, -2, -6,
+ -4, -5, -7, -8, -9, -9, -8, -6, -4, -5, -6, -7, -7, -6, -4, -2,
+ 0, 1, 2, 3, 5, 8, 10, 9, 1, 2, 3, 6, 9, 12, 14, 13,
+ 5, 6, 6, 5, 4, 3, 2, 1, 5, 6, 7, 7, 6, 6, 6, 4,
+ -1, 0, 1, 1, 3, 5, 5, 5,-13,-16,-17,-17,-14,-10, -6, -4,
+ 9, 11, 13, 16, 15, 13, 12, 10, -4, -5, -6, -7, -7, -7, -6, -5,
+ -6, -6, -7, -7, -7, -7, -6, -5, -2, -1, 0, 0, 0, 0, 0, -1,
+ -11,-13,-15,-16,-16,-14,-12,-10, 2, 3, 4, 5, 4, 3, 3, 3,
+ 6, 7, 8, 8, 8, 7, 6, 5, 3, 4, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 1, -2, -7,-13,-17, 5, 7, 7, 5, 1, -5,-13,-19,
+ 6, 8, 9, 8, 5, -1, -9,-16, 6, 8, 10, 10, 7, 2, -4,-11,
+ 18, 9, -1,-10,-13, -9, -4, 0, 22, 12, -1,-12,-15,-10, -4, 2,
+ 23, 13, 0,-10,-13, -9, -3, 2, 20, 12, 2, -6, -9, -6, -2, 2,
+ -6, -6, -6, -7, -7, -7, -7, -6, -6, -7, -8, -8, -9, -9, -9, -8,
+ -3, -3, -3, -3, -3, -3, -3, -3, 12, 15, 18, 21, 21, 19, 17, 14,
+ 14, 16, 18, 18, 18, 16, 15, 13, 5, 6, 6, 5, 5, 4, 4, 3,
+ -6, -7, -9,-10,-10,-10, -9, -7,-10,-11,-13,-14,-14,-13,-12,-10,
+ -27,-17, -4, 5, 9, 10, 10, 7,-32,-19, -3, 7, 11, 12, 11, 8,
+ -30,-16, -2, 8, 12, 12, 10, 7,-23,-12, 0, 7, 10, 11, 9, 6,
+ 16, 17, 16, 12, 6, -1, -8,-12, 17, 18, 15, 10, 1, -8,-15,-18,
+ 15, 14, 10, 4, -5,-14,-20,-23, 10, 8, 4, -1, -9,-16,-21,-22,
+ -10,-12,-12,-11, -5, 4, 14, 20,-11,-13,-15,-12, -4, 7, 19, 27,
+ -11,-13,-14,-11, -3, 8, 21, 28,-10,-11,-12, -9, -2, 8, 18, 25,
+ -1, -1, -1, 1, 4, 6, 6, 5, 0, 0, 0, 2, 4, 3, 1, -2,
+ 0, 0, 2, 4, 4, -1, -7,-10, 0, 0, 3, 5, 3, -3,-11,-15,
+ -14,-13, -8, -1, 3, 3, -1, -4, -5, -4, -1, 4, 8, 8, 3, 0,
+ 3, 2, 2, 3, 4, 5, 3, 1, 5, 3, 0, -2, -2, -1, -1, -1,
+ 9, 1, -6, -6, -5, -3, -2, -1, 12, 1, -6, -6, -4, -2, -1, 0,
+ 14, 4, -4, -4, -2, -2, -1, -1, 14, 6, -1, -1, -1, -1, -1, -1,
+ 4, 6, 8, 10, 11, 9, 7, 5, -1, -1, -1, 0, 0, -1, -1, -2,
+ -2, -4, -4, -5, -5, -5, -5, -4, -2, -3, -3, -4, -4, -3, -2, -1,
+ 2, 3, 4, 4, 3, 1, 0, 0, -1, 1, 4, 5, 6, 5, 4, 3,
+ -8, -6, -2, 2, 3, 4, 4, 3,-14,-13, -9, -5, -2, -1, 0, 0,
+ -3, -4, -5, -4, 0, 7, 12, 13, -3, -4, -5, -5, -2, 4, 9, 10,
+ -2, -3, -4, -5, -4, -1, 3, 4, -1, -1, -2, -3, -3, -2, 0, 1,
+ 9, 5, -2, -8,-11,-10, -7, -4, 12, 10, 6, 2, 0, -1, 0, 0,
+ 2, 2, 3, 4, 3, 1, 1, 1, -9, -8, -4, 0, 1, 2, 1, 0,
+ 6, 8, 8, 5, 1, -5,-11,-13, 0, 1, 2, 2, -1, -4, -8,-11,
+ -3, -2, 1, 3, 3, 1, -1, -4, -2, -1, 2, 5, 6, 6, 4, 1,
+ 3, 4, 5, 5, 4, 1, -3, -6, 5, 6, 4, 2, 2, 2, 0, -3,
+ 6, 5, 0, -5, -5, -2, -1, -2, 7, 4, -3,-11,-12, -7, -3, -2,
+ 1, 0, -1, -1, -1, 0, 0, 0, 2, 3, 4, 4, 5, 5, 4, 3,
+ -7, -9, -9,-10,-10, -9, -7, -6, 3, 4, 5, 6, 5, 5, 5, 5,
+ -7, -7, -7, -7, -6, -6, -5, -4, -5, -4, -3, -1, -1, -1, 0, 0,
+ -3, -2, 1, 4, 5, 5, 5, 5, -2, -1, 3, 6, 9, 10, 10, 9,
+ -14, 1, 10, 3, -2, 0, 1, 1,-16, 2, 13, 3, -3, -1, 1, 0,
+ -15, 2, 12, 3, -4, -2, 1, 1,-10, 3, 10, 2, -3, -1, 1, 1,
+ 0, 1, 4, 2, -5,-10, -3, 11, -1, 1, 4, 2, -6,-13, -2, 15,
+ -1, 0, 3, 1, -6,-12, -1, 15, -1, 1, 2, 1, -4, -8, 0, 11,
+ 10, 5, -2, -2, 2, 5, 1, -4, 7, 0, -8, -6, 1, 5, 2, -4,
+ 2, -5,-12, -7, 2, 7, 4, -1, -1, -7,-10, -4, 4, 9, 7, 2,
+ -5, -5, -4, -6, -6, -5, -5, -3, -1, -2, -2, -4, -5, -6, -5, -4,
+ 6, 7, 7, 4, 0, -2, -3, -3, 13, 14, 13, 10, 5, 1, -1, -2,
+ 1, 1, 2, 2, 2, 2, 2, 2, -5, -6, -8, -9, -9, -8, -7, -6,
+ 7, 9, 10, 11, 11, 9, 7, 5, -1, -2, -3, -3, -4, -4, -4, -3,
+ -1, -1, 0, 0, 0, 0, -1, -1, -3, -3, -4, -5, -4, -3, -3, -2,
+ 2, 1, -1, -3, -3, -2, -1, 0, 12, 12, 8, 3, 1, 0, 0, 1,
+ -6, -8, -8, -6, -2, 2, 6, 8, 1, 1, -1, -2, 0, 3, 5, 7,
+ 3, 3, 1, -1, -1, 0, 0, 2, 0, 1, 0, -1, -1, -1, -2, -1,
+ 1, 0, 0, 0, 0, 0, 2, 4, 2, 1, 3, 4, 3, 1, 0, 2,
+ 2, 1, 0, 0, -1, -1, 0, 3, 5, 1, -6,-12,-13, -8, -1, 4,
+ -2, 0, -1, -2, -1, 0, 2, 3, -6, -3, -2, 0, 1, 1, 1, 1,
+ -9, -5, 0, 4, 5, 3, 1, 0, -8, -3, 3, 7, 8, 4, 1, 0,
+ 1, 2, 2, 3, 3, 1, -1, -3, 4, 5, 5, 6, 6, 5, 2, 0,
+ 0, 0, 0, 0, 1, 0, -2, -4, -3, -3, -4, -3, -3, -4, -7, -8,
+ 14, 12, 6, -1, -3, -3, 0, 0, 7, 5, 1, -3, -5, -4, -2, -1,
+ -2, -2, -2, -2, -2, -2, -1, -1, -6, -4, -1, 1, 1, 1, 0, -1,
+ 2, 2, 1, -3, -6, -7, -6, -3, 1, 0, -1, -3, -2, 1, 4, 6,
+ 0, 0, 1, 2, 4, 7, 8, 7, 0, 0, 0, 0, -1, -4, -7, -8,
+ 0, 2, 1, -2, -3, -3, -2, -1, -1, 1, 0, -3, -5, -2, 0, 2,
+ -2, -1, -2, -5, -4, 1, 6, 9, -3, -2, -3, -4, -2, 5, 11, 13,
+ -4, -2, 2, 6, 4, -3,-10,-14, -2, -1, 1, 4, 4, 1, -1, -2,
+ 0, 0, -1, -2, -2, 0, 4, 6, 2, 2, 0, -3, -3, 0, 5, 9,
+ -4, -4, -2, 1, 6, 9, 3, -7, -2, -2, -2, -1, 4, 8, 0,-11,
+ 1, 1, 0, 0, 2, 6, -1,-10, 2, 2, 1, 0, 2, 4, 0, -7,
+ -1, -2, -3, -6, -7, -8, -8, -8, 2, 3, 3, 1, -1, -2, -3, -4,
+ 5, 5, 5, 4, 3, 2, 0, -1, 3, 3, 3, 3, 2, 2, 1, 1,
+ 3, 3, 2, -2, -3, 0, 7, 10, 1, 2, 2, -2, -5, -4, 0, 3,
+ 0, 3, 4, 2, -3, -5, -6, -4, 0, 2, 4, 4, 1, -4, -7, -7,
+ 2, 4, 5, 5, 5, 5, 6, 6, -4, -4, -3, -5, -5, -3, -3, -2,
+ -3, -4, -4, -5, -4, -2, -2, -2, 1, 1, 0, 0, 2, 4, 5, 4,
+ -2, 0, 3, 4, 4, 3, 2, 2, -9, -7, -4, 0, 3, 6, 6, 6,
+ -5, -5, -3, -2, 0, 1, 3, 4, 5, 5, 2, -2, -4, -6, -5, -3,
+ 1, -6, -4, 7, 5, -2, -2, 1, 5, -5, -4, 6, 4, -5, -4, 1,
+ 5, -5, -4, 6, 4, -5, -3, 1, 1, -7, -3, 8, 7, -1, -3, 1,
+ -8, -7, -4, 0, 2, 4, 5, 5, 5, 6, 5, 2, -1, -5, -7, -7,
+ 5, 6, 4, 1, -3, -5, -6, -5, -7, -7, -5, -2, 1, 6, 9, 10,
+ 6, 3, 0, 1, 3, 0, -8,-14, 3, 0, -1, 1, 4, 3, 0, -4,
+ 1, 0, 0, 1, 2, 1, 1, 1, -1, -1, 1, 2, 1, -1, -1, 0,
+ 1, 1, 1, 1, 0, -2, -3, 0, 1, 2, 1, 0, -2, -8, -9, -4,
+ 1, 3, 3, 2, 1, -3, -3, 1, 0, 1, 1, 1, 1, 1, 4, 8,
+ 2, 5, 9, 7, 2, -1, -1, 1, -4, -1, 1, 0, -3, -4, -1, 2,
+ -3, 0, 3, 3, 0, -1, 0, 2, -4, -1, 1, 1, -2, -4, -5, -4,
+ 1, -1, -2, -2, -1, 2, 4, 5, 2, 1, 1, 0, -1, -1, 0, 0,
+ 2, 3, 4, 5, 4, 2, 1, 0, -9, -9, -6, -3, -1, -1, -1, -1,
+ -6, -6, 4, 7, 0, -2, -1, -2, -1, -2, 5, 6, -1, -2, 0, -1,
+ 4, -1, 1, 0, -4, -2, 0, -2, 7, 1, -1, -2, -3, 1, 3, 1,
+ 4, 2, 1, 3, 3, 1, 1, 2, 2, -2, -4, 0, 3, 1, 0, 0,
+ 1, -4, -8, -4, 1, 2, 1, 0, 2, -3, -9, -6, 0, 3, 3, 2,
+ -1, -1, 0, -1, -1, 0, 1, 2, 3, 1, -4, -8, -7, -3, 1, 2,
+ 2, -1, -3, -2, -1, 0, 1, 0, -1, 0, 5, 11, 9, 3, -1, -3,
+ -1, -2, -2, -1, 1, 1, 1, 1, 0, -1, 0, 3, 6, 6, 5, 5,
+ 2, 1, -1, -1, -2, -5, -6, -4, 2, 2, 2, 1, -1, -4, -5, -5,
+ -1, -3, -6, -7, -6, -4, -1, 1, 5, 5, 3, 4, 4, 3, 4, 5,
+ -1, -2, -3, -2, -2, -2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 3,
+ -6, -6, -4, -1, 2, 2, 2, 2, -6, -7, -5, -2, 0, -1, -1, 0,
+ 2, 2, 2, 4, 4, 3, 3, 4, 2, 1, 0, -1, 0, 0, 2, 4,
+ 12, 5, -5, -8, -5, 0, 2, 2, 2, -3, -6, -3, 0, 0, -1, -2,
+ -2, -3, -1, 3, 4, 1, -2, -3, 2, 2, 3, 4, 3, 1, -1, -1,
+ 3, 2, 1, 0, 1, 4, 3, 0, 4, 3, 0, -5, -6, 0, 3, 3,
+ 2, 3, 1, -7,-12, -6, 1, 3, 1, 3, 4, -1, -6, -4, 0, 1,
+ -9, -4, 2, 6, 7, 4, 1, 0, -7, -1, 4, 6, 4, 0, -3, -3,
+ -6, 0, 4, 4, 1, -2, -3, -2, -4, 1, 3, 2, 0, -2, -1, 0,
+ 0, 5, 2, -5, -3, 3, 1, -4, -2, 4, 2, -6, -3, 6, 4, -3,
+ -1, 5, 3, -5, -1, 7, 3, -4, -1, 2, 0, -6, -3, 5, 3, -3,
+ -8, -3, 3, 5, 3, 1, -2, -2, 2, 4, 4, -2, -4, -3, 1, 3,
+ 2, 1, -3, -5, -3, 3, 4, 3, -5, -6, -5, 3, 10, 8, -1, -5,
+ 0, 3, 2, -4, -9, -7, 0, 6, -5, -1, 5, 7, 4, -1, -3, -3,
+ -5, -5, -2, 3, 6, 5, -1, -4, 9, 6, 0, -4, -2, 1, 1, -1,
+ -1, -1, -1, 1, 1, 0, -1, 0, -1, 0, 0, 0, 0, -1, -1, 0,
+ 2, 1, -2, -1, 1, 1, 0, 0, 12, 8, 2, -1, -1, -4, -7, -7,
+ 2, 1, 3, 6, 7, 4, 2, 0, 1, 0, -1, 0, -1, -4, -7, -8,
+ 0, 0, -1, 0, 0, 0, -1, -3, 0, 0, 0, 0, 1, 1, 0, -2,
+ -1, 0, 1, 1, 0, 0, -1, -2, 0, 0, -1, -3, -4, -3, -1, 1,
+ -1, 0, 0, 0, 1, 4, 10, 12, -1, 0, -2, -2, -3, -3, -1, 1,
+ -3, -1, -2, -4, 2, 9, 9, 7, -3, 0, -1, -3, 0, 2, -1, 1,
+ -1, 1, -2, -3, 0, -1, -3, 0, 0, 0, -3, -2, 0, -1, -1, 1,
+ -1, -2, -1, -1, -2, -1, -1, -2, 2, -1, -2, -1, 0, 1, 0, -2,
+ 3, -1, -2, 2, 5, 3, -1, -3, 1, -5, -5, 1, 6, 6, 2, 0,
+ 1, 2, 0, -1, 0, 1, 0, -2, -5, -3, -1, 0, 1, 2, 1, -2,
+ -7, -5, -2, -2, -2, -2, 0, 1, -1, 0, 1, 1, 0, 3, 9, 12,
+ 0, 6, 5, 1, -2, -3, 0, 3, 0, 6, 5, 1, 1, 1, 2, 3,
+ -5, -2, -2, -3, 0, 0, 0, 0, -6, -3, -3, -2, 0, 0, -1, -2,
+ 4, 4, 2, 1, 0, -1, -1, 0, -2, -2, 0, 1, 2, 1, 1, 0,
+ 2, 2, 1, -1, -3, -5, -9,-10, 2, 1, -1, -1, 1, 4, 4, 1,
+ 4, 0, -2, -2, -2, -2, -1, 0, 7, 1, -4, -3, -2, 0, 1, 1,
+ 10, 5, -1, -2, 0, 1, 1, 0, 5, 1, -3, -4, -3, -1, -1, -2,
+ 2, 1, -1, -3, -3, 1, 1, -1, -2, -1, 3, 0, -1, 1, 1, 0,
+ -3, 1, 7, 2, -3, -2, -1, 0, -2, 4, 8, -1, -8, -5, 0, 2,
+ -4, -1, 1, 2, 1, -3, -4, -2, -5, -3, -2, 1, 4, 4, 4, 6,
+ -3, -2, -4, -3, 0, 1, 1, 2, 2, 2, 2, 1, 2, 1, -1, -1,
+ -4, -1, 0, -1, -3, -3, -1, -1, 1, 4, 4, 2, 0, -1, -2, -3,
+ 4, 6, 5, 3, 2, 1, -2, -4, 0, 1, 1, 1, 1, -1, -4, -6,
+ 1, 2, 2, -1, -6, -5, -1, 2, -3, -2, 1, 1, -4, -3, 2, 5,
+ -2, -1, 2, 2, -3, -4, 0, 3, -2, -2, 2, 6, 5, 2, 1, 2,
+ 2, -3, -3, 0, 0, 2, 3, 1, 3, -1, 1, 3, 1, 2, -1, -5,
+ -5, -7, -4, -2, 1, 8, 8, 1, -1, 0, 2, 0, -3, 0, 1, -3,
+ -2, -5, -5, -2, -3, -1, 0, -2, -1, -4, 0, 4, 0, 2, 4, 0,
+ 0, 0, 8, 10, 2, 1, 3, -1, -4, -3, 2, 3, -3, -3, 1, -1,
+ 1, -2, -4, 2, 7, 3, -2, -1, 6, 4, -2, -1, 2, 0, -1, 3,
+ 1, 1, -2, -2, -2, -5, -3, 4, -6, -2, 1, 1, -1, -4, -2, 4,
+ -2, -1, -2, -2, 0, 1, 0, -2, -1, 1, 0, -1, 0, 0, -1, -3,
+ 0, 1, -2, -4, -3, -1, 0, 0, 6, 8, 5, 0, 0, 1, 2, 3,
+ -2, -2, 2, 5, 2, 0, 0, 1, 2, -2, -2, -1, -1, 1, 2, 4,
+ 2, -1, 0, 1, 0, 0, 0, 1, -8, -7, -1, 1, -1, -1, 1, 3,
+ 0, 3, 6, 2, -2, 1, 2, 0,-10, -7, -1, 0, -3, -1, 2, 1,
+ 0, 0, 2, 2, 1, 1, 1, -1, 3, 0, -2, -2, 0, 2, 1, 0,
+ 8, 1, 0, 0, -2, -3, -1, 0, 2, -2, 2, 5, 1, -2, -1, 1,
+ -3, -6, -3, -1, -3, -3, -1, 2, 2, 0, 1, 2, 2, 1, 0, 0,
+ 1, -1, -1, -2, -1, 0, 1, 0, 15, 9, 2, -1, -2, -3, -3, -3,
+ 0, -3, -2, 0, 0, -1, -1, -1, 1, 0, 1, 0, 0, -1, -1, -1,
+ 0, 2, 2, -2, -3, -3, -7, -8, 0, 2, 2, 0, 1, 2, 1, 1,
+ 1, 2, 2, 2, 3, 1, 0, 3, 1, 0, -1, -2, -1, -2, 0, 5,
+ -11, -6, -1, 1, 2, 3, 1, -3, 1, 4, 3, -1, -2, 1, 2, -1,
+ 2, 2, 1, -1, -2, 0, 1, -1, 0, 0, -1, -1, 0, 2, 3, 2,
+ 1, 1, 2, 1, -1, 1, 0, -4, 0, 0, 0, -2, -2, 2, 4, -2,
+ -2, -3, 0, 0, -1, 2, 1, -6, 0, 2, 5, 5, 3, 2, -1, -7,
+ 4, 2, 0, 0, 3, 3, 1, -1, 0, -1, -1, 3, 6, 4, 1, -1,
+ -2, -2, 0, 2, 2, 0, -2, -2, -1, 0, -1, -5, -7, -5, -1, 1,
+ 5, -1, -2, 0, 2, 4, 2, -5, 0, -5, -2, 2, 1, 2, 0, -6,
+ 6, 1, 0, 1, -2, -1, 4, 2, 2, -3, -3, 0, -1, -2, 0, 0,
+ 1, -1, 0, 2, 0, 0, 6, 11, 2, -1, -1, 0, -3, -2, 3, 5,
+ 0, -2, -1, 0, -1, 0, 0, -3, 1, -1, -1, -1, -2, -1, -3, -7,
+ 1, 1, -2, -2, 1, 3, 1, -2, -1, 2, 0, -1, -1, 1, 0, 0,
+ -4, 2, 3, -1, -2, -2, 0, 1,-11, -2, 4, 5, 6, 2, -1, -2,
+ -6, -2, 1, -1, -3, -4, 1, 9, -3, 0, 3, 3, 2, -3, -3, 3,
+ 1, 1, 0, 0, 1, -1, -2, 3, 2, 0, -3, -3, 0, -1, -1, 3,
+ 1, -1, -3, 1, 2, -6, -4, 6, 0, -2, -5, -2, 0, -3, -2, 3,
+ 2, 2, 1, -2, -2, 1, 2, -1, -1, 1, 1, -2, -1, 6, 7, -1,
+ 1, 0, -4, -2, 1, -2, -3, 1, -4, 0, -3, -2, 2, 0, -3, 0,
+ -3, 4, 3, 1, 8, 7, 0, -1, -3, 4, 1, -4, 2, 3, -2, -3,
+ -3, 6, 1, -4, 1, 1, -1, -1, -2, 4, -3, -3, 3, 0, -1, -1,
+ 1, 2, -4, 2, 4, -3, -1, 2, 3, -1, -4, 5, 4, -6, -3, 2
+};
+
+/* 6x16-entry codebook for inter-coded 8x8 vectors */
+DECLARE_ALIGNED(4, static const int8_t, svq1_inter_codebook_8x8)[6144] = {
+ -4, -3, 4, 5, 2, 1, 1, 0, -5, -3, 5, 5, 2, 1, 0, 0,
+ -6, -4, 5, 5, 2, 1, 0, 0, -7, -4, 4, 5, 2, 1, 0, 0,
+ -8, -5, 3, 4, 2, 1, 0, 0, -8, -6, 3, 4, 1, 1, 1, 0,
+ -8, -6, 2, 4, 2, 1, 1, 0, -8, -6, 2, 4, 1, 1, 1, 1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2,
+ -2, -3, -3, -3, -3, -3, -3, -3, -2, -3, -3, -3, -3, -3, -4, -3,
+ -2, -2, -2, -2, -2, -3, -3, -2, 1, 1, 1, 1, 1, 0, -1, -1,
+ 4, 5, 5, 5, 4, 3, 3, 2, 7, 7, 8, 8, 8, 7, 6, 5,
+ 2, 1, 2, 4, 4, 0, -4, -6, 1, 1, 2, 5, 5, 1, -5, -7,
+ 1, 2, 1, 4, 5, 1, -5, -8, 1, 1, 1, 5, 5, 0, -6, -8,
+ 0, 1, 1, 5, 6, 1, -6, -9, 0, 0, 1, 4, 5, 0, -5, -8,
+ 0, 0, 1, 4, 5, 0, -5, -7, 0, 0, 1, 4, 4, 1, -4, -7,
+ 1, 2, 3, 0, -3, -4, -3, -1, 1, 3, 4, 0, -3, -4, -3, -1,
+ 2, 4, 5, 1, -3, -4, -3, -2, 2, 5, 6, 1, -3, -5, -4, -2,
+ 3, 6, 6, 1, -3, -5, -4, -2, 3, 6, 6, 1, -3, -5, -4, -2,
+ 3, 6, 6, 1, -3, -5, -4, -2, 3, 5, 5, 1, -3, -4, -4, -2,
+ 2, 2, 2, 2, 1, 0, 0, -1, 4, 4, 4, 3, 2, 1, 1, 0,
+ 4, 5, 4, 4, 3, 3, 2, 1, 4, 4, 4, 4, 4, 3, 2, 2,
+ 2, 3, 3, 3, 3, 3, 2, 1, -1, -1, -1, -1, 0, 0, 0, 0,
+ -5, -6, -6, -5, -5, -4, -3, -3, -7, -9, -9, -8, -7, -6, -6, -5,
+ 6, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2,
+ 0, -1, -1, -1, -2, -2, -1, -1, -3, -5, -6, -6, -6, -6, -5, -4,
+ -3, -5, -6, -7, -6, -6, -5, -4, -1, -2, -2, -2, -2, -2, -1, -1,
+ 0, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3,
+ 2, 1, -2, -5, -4, 0, 2, 5, 2, 1, -2, -6, -5, 0, 3, 5,
+ 2, 1, -2, -6, -6, -1, 3, 6, 3, 2, -2, -7, -6, 0, 4, 7,
+ 2, 1, -2, -7, -5, 0, 5, 7, 2, 1, -2, -6, -5, 0, 4, 7,
+ 2, 1, -2, -6, -4, 0, 4, 6, 1, 1, -2, -5, -4, 0, 3, 6,
+ -10, -9, -6, -4, -1, 2, 3, 2,-10, -9, -5, -3, 0, 4, 4, 3,
+ -9, -7, -3, -1, 2, 5, 5, 3, -7, -5, -2, 0, 3, 5, 5, 3,
+ -6, -3, 0, 1, 4, 6, 5, 3, -4, -2, 1, 2, 3, 5, 4, 2,
+ -2, 0, 1, 2, 2, 4, 3, 1, -1, 1, 2, 2, 2, 3, 3, 1,
+ -4, -5, -5, -6, -6, -6, -6, -5, -3, -3, -4, -4, -4, -4, -4, -4,
+ 0, 0, 0, 0, -1, -1, -1, -1, 5, 5, 6, 5, 5, 4, 3, 2,
+ 5, 6, 7, 7, 7, 6, 5, 4, 3, 3, 4, 4, 4, 4, 3, 2,
+ 0, -1, 0, 0, -1, -1, 0, -1, -3, -3, -4, -4, -4, -4, -3, -3,
+ 1, -2, -5, 1, 5, 4, 2, 0, 1, -3, -6, 1, 6, 5, 2, 0,
+ 0, -4, -7, 0, 6, 6, 2, 1, -1, -5, -9, -1, 6, 6, 3, 1,
+ -1, -6,-10, -2, 6, 6, 3, 1, -1, -6, -9, -2, 5, 6, 3, 1,
+ -2, -6, -9, -2, 5, 5, 3, 1, -2, -6, -7, -2, 4, 4, 2, 1,
+ -5, -7, -8, -9, -9, -8, -7, -6, -5, -6, -6, -7, -7, -6, -6, -5,
+ -3, -3, -3, -4, -5, -5, -4, -4, -1, 0, 0, -1, -1, -1, -1, -1,
+ 0, 1, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 5, 5, 5, 4,
+ 3, 4, 5, 6, 8, 8, 8, 7, 3, 4, 5, 6, 7, 7, 7, 6,
+ 5, 6, 7, 8, 9, 10, 10, 9, 3, 4, 6, 7, 8, 9, 9, 8,
+ 0, 1, 2, 3, 4, 5, 5, 5, -1, -2, -1, -1, 0, 1, 2, 2,
+ -2, -3, -3, -3, -3, -2, -1, 0, -3, -4, -5, -5, -5, -5, -5, -4,
+ -4, -5, -5, -6, -7, -7, -6, -5, -3, -4, -5, -6, -7, -7, -6, -6,
+ 13, 7, 0, -3, -3, -4, -4, -5, 14, 7, 0, -3, -3, -4, -4, -4,
+ 15, 8, -1, -4, -4, -4, -5, -4, 15, 8, -1, -4, -4, -5, -4, -3,
+ 15, 7, -1, -4, -5, -5, -5, -4, 14, 7, -1, -4, -4, -4, -4, -3,
+ 12, 6, -1, -4, -4, -4, -4, -3, 11, 5, -1, -4, -4, -4, -4, -3,
+ -17, -4, 5, 4, 4, 4, 3, 3,-18, -5, 5, 4, 4, 4, 3, 3,
+ -19, -5, 6, 4, 4, 4, 3, 2,-20, -5, 6, 4, 4, 4, 3, 3,
+ -20, -4, 6, 4, 4, 5, 3, 3,-19, -5, 6, 4, 4, 5, 3, 3,
+ -18, -4, 5, 4, 4, 4, 3, 2,-17, -5, 4, 3, 4, 4, 3, 3,
+ -6, -6, -6, -4, -2, 1, 6, 11, -6, -7, -7, -4, -2, 2, 8, 13,
+ -8, -8, -7, -4, -2, 3, 9, 14, -8, -8, -7, -5, -1, 4, 10, 16,
+ -8, -8, -7, -5, -1, 4, 10, 17, -8, -8, -7, -4, 0, 5, 10, 16,
+ -8, -8, -6, -3, 0, 4, 9, 15, -7, -7, -5, -3, 0, 4, 8, 12,
+ 8, 7, 7, 5, 2, -2, -8,-14, 8, 8, 7, 5, 2, -2, -8,-15,
+ 8, 8, 7, 5, 1, -3, -9,-16, 8, 8, 7, 5, 1, -3,-10,-17,
+ 8, 9, 8, 5, 1, -3,-10,-17, 8, 8, 7, 4, 1, -4,-10,-16,
+ 7, 7, 7, 4, 1, -3, -9,-14, 6, 7, 6, 3, 0, -3, -9,-13,
+ 5, 1, -4, -4, -3, -1, 0, 0, 7, 2, -3, -3, -2, -1, 1, 0,
+ 7, 1, -3, -3, -1, 0, 1, 1, 6, 1, -3, -2, -1, 1, 1, 0,
+ 6, 0, -4, -2, -1, 0, 1, 0, 5, 0, -4, -3, -1, 0, 0, -1,
+ 5, 0, -3, -1, 0, 0, 0, -2, 4, 1, -2, -1, 0, 1, 0, -1,
+ 2, 2, 1, 1, -2, -6, -8, -8, 1, 1, 1, 1, -2, -5, -8, -8,
+ 1, 1, 1, 0, -1, -3, -5, -5, 0, 0, 0, 0, -1, -1, -1, -2,
+ 0, -1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 2, 3, 2,
+ 2, 1, 1, 1, 2, 3, 4, 3, 3, 3, 3, 3, 4, 4, 5, 4,
+ -4, -4, -3, -2, 0, 0, 1, 1, -4, -4, -3, -2, -1, 0, 0, 1,
+ -2, -2, -2, -1, -1, -1, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1,
+ 2, 2, 2, 2, 2, 2, 1, 1, 3, 4, 4, 4, 4, 4, 4, 3,
+ 1, 1, 1, 3, 3, 4, 3, 3, -5, -6, -5, -4, -3, -3, -2, -2,
+ -4, -2, -1, -1, -1, -1, 0, 1, -4, -2, -1, -1, -1, -1, 0, 1,
+ -3, -2, -1, -1, -1, 0, 1, 2, -4, -3, -2, -1, -1, 1, 3, 3,
+ -4, -3, -3, -1, -1, 1, 4, 5, -4, -3, -2, -2, -1, 1, 4, 7,
+ -2, -2, -1, -1, 0, 2, 6, 8, -1, 0, 0, 1, 1, 4, 7, 8,
+ -3, -3, -3, -2, -2, -1, -1, 0, -1, -1, 0, 1, 2, 2, 3, 3,
+ 0, 1, 2, 4, 5, 6, 6, 5, -1, 0, 2, 3, 5, 6, 5, 3,
+ -1, -1, 0, 2, 3, 3, 2, 1, -2, -2, -1, 0, -1, -3, -4, -4,
+ 0, 0, -1, -1, -2, -4, -8, -7, 1, 2, 1, 0, -1, -4, -6, -7,
+ -2, 4, 1, -6, 0, 3, 0, 0, -2, 5, 1, -7, 0, 3, 0, 0,
+ -3, 5, 1, -8, 0, 3, -1, -1, -2, 6, 1, -9, 0, 3, 0, -1,
+ -2, 6, 2, -8, 0, 4, 0, -1, -3, 5, 1, -7, 1, 4, 0, 0,
+ -2, 4, 1, -7, 0, 4, 1, 0, -1, 4, 1, -6, 0, 3, 1, 0,
+ 0, 0, 0, 3, 4, 5, 4, 1, 1, 1, 1, 2, 3, 3, 2, 0,
+ 2, 2, 1, 2, 2, 1, -1, -2, 4, 3, 1, 1, 0, -1, -3, -5,
+ 5, 3, 1, -1, -2, -3, -4, -6, 5, 3, 0, -2, -3, -5, -6, -7,
+ 4, 3, 0, -2, -3, -4, -5, -5, 4, 3, 0, -1, -2, -2, -3, -3,
+ 0, 0, 0, 0, -1, -5, -2, 6, 0, 0, 0, 1, -1, -6, -2, 8,
+ 0, 0, 0, 2, 0, -6, -3, 9, 0, -1, 0, 2, 0, -7, -2, 10,
+ 0, -1, 0, 2, -1, -8, -3, 10, 0, -1, -1, 2, -1, -7, -3, 9,
+ 0, -1, 0, 1, -1, -6, -3, 8, 0, 0, 0, 1, 0, -5, -2, 7,
+ 2, 3, 3, 2, 1, 0, -1, -1, 3, 4, 3, 2, 1, 0, -1, -2,
+ 3, 4, 4, 2, 1, -1, -2, -3, 2, 3, 3, 2, 0, -1, -2, -3,
+ -1, 0, 1, 1, 0, -1, -2, -2, -5, -4, -3, -1, 0, 1, 1, 1,
+ -8, -8, -5, -1, 1, 3, 4, 3,-10, -9, -5, 0, 3, 5, 6, 5,
+ -5, -1, 4, 5, 3, 1, 0, 0, -6, -1, 4, 5, 2, 0, -1, -2,
+ -6, -1, 5, 4, 2, -1, -2, -2, -7, -1, 4, 4, 1, -2, -3, -3,
+ -6, -1, 5, 4, 1, -2, -3, -3, -5, 0, 4, 4, 1, -1, -2, -2,
+ -4, 0, 5, 4, 1, -1, -1, -2, -3, 1, 4, 3, 1, -1, -1, -2,
+ -2, -3, -2, 1, 4, 6, 5, 3, -3, -4, -4, 0, 3, 5, 4, 2,
+ -3, -5, -5, -1, 2, 4, 3, 1, -4, -6, -4, -1, 2, 4, 2, -1,
+ -2, -4, -3, 1, 2, 4, 2, -1, -2, -4, -2, 1, 3, 3, 1, -2,
+ -2, -3, -2, 1, 3, 3, 1, -2, -2, -2, -1, 1, 3, 3, 0, -2,
+ -4, -4, -3, -2, -1, 2, 5, 7, -4, -4, -3, -3, -2, 1, 5, 7,
+ -2, -3, -2, -3, -3, -1, 3, 5, -1, -1, 0, -2, -3, -2, 2, 4,
+ 1, 1, 1, -1, -4, -3, 1, 3, 4, 3, 2, -1, -4, -3, -1, 1,
+ 6, 4, 3, 0, -3, -3, -2, 0, 6, 5, 3, 1, -2, -3, -2, -1,
+ 12, 11, 8, 4, 0, -2, -2, -1, 10, 9, 6, 2, -1, -2, -1, 0,
+ 4, 3, 2, 0, -1, -1, 0, 1, -1, -1, -1, -1, -2, 0, 1, 2,
+ -3, -5, -4, -2, -2, 0, 2, 3, -5, -5, -4, -2, -1, 0, 1, 2,
+ -5, -5, -4, -2, -1, 0, 1, 1, -4, -4, -3, -2, -2, -1, 0, 0,
+ 3, 3, 2, -1, -3, -4, -3, -2, 3, 2, 0, -2, -4, -4, -3, -2,
+ 2, 2, 1, -1, -3, -5, -4, -3, 3, 3, 3, 1, -2, -3, -3, -3,
+ 4, 4, 4, 3, 0, -2, -2, -2, 5, 5, 5, 3, 0, -1, -2, -2,
+ 5, 5, 4, 2, -1, -2, -3, -2, 3, 3, 3, 0, -2, -4, -4, -4,
+ -1, -1, 4, -2, -2, 6, 2, -5, -1, 0, 4, -2, -3, 6, 2, -6,
+ -1, 0, 4, -2, -3, 7, 3, -7, -1, -1, 4, -3, -4, 8, 3, -7,
+ 0, -1, 4, -3, -4, 7, 3, -6, -1, -1, 4, -3, -4, 7, 3, -6,
+ -1, -1, 3, -3, -4, 6, 3, -6, -1, 0, 3, -2, -3, 6, 3, -5,
+ 1, -2, -7, 2, 5, -2, -1, 1, 1, -2, -8, 3, 6, -3, -1, 2,
+ 2, -2, -9, 4, 7, -4, -2, 2, 3, -1, -9, 5, 7, -4, -1, 3,
+ 3, -1, -9, 4, 7, -4, -2, 2, 3, -1, -7, 4, 6, -4, -2, 1,
+ 2, 0, -6, 4, 6, -4, -1, 1, 2, 0, -5, 3, 4, -3, -1, 1,
+ -2, 2, 2, 0, 0, -1, -3, -4, -2, 2, 2, 1, 1, 0, -2, -4,
+ -2, 2, 2, 2, 2, 1, -1, -2, -3, 2, 3, 3, 4, 2, 0, -2,
+ -3, 2, 3, 2, 4, 2, 0, -3, -4, 1, 2, 1, 2, 1, -1, -3,
+ -5, 0, 1, 0, 1, 1, -2, -3, -4, 0, 0, 0, 1, 0, -2, -3,
+ 0, 0, -1, -2, -2, 2, 7, 8, 0, 0, -1, -3, -2, 1, 6, 7,
+ 0, 1, -1, -3, -3, 0, 4, 5, 0, 1, 0, -1, -1, 0, 1, 3,
+ 0, 2, 1, 1, 0, -1, 0, 1, -2, 0, 1, 2, 1, 0, -1, -1,
+ -5, -2, 0, 1, 1, 0, -3, -3, -6, -4, -1, 1, 1, -1, -3, -4,
+ -4, -2, 2, 5, 6, 4, 3, 2, -5, -3, 1, 4, 4, 2, 0, 0,
+ -4, -2, 0, 2, 1, -1, -2, -2, -2, -1, 0, 1, 0, -2, -3, -2,
+ -2, 0, 0, 0, -1, -1, -2, -1, -2, -1, -1, 0, 0, 0, 1, 2,
+ -2, -2, -1, -1, 0, 1, 3, 4, -2, -3, -2, -1, 0, 2, 4, 5,
+ 2, 1, -2, -2, -1, 0, 1, 0, 1, 0, -3, -3, -1, 0, 1, 0,
+ 0, -1, -3, -3, -1, 1, 1, 1, 0, 0, -3, -1, 1, 2, 3, 3,
+ 0, -1, -3, -1, 1, 3, 3, 3, -2, -2, -4, -2, 1, 3, 4, 4,
+ -3, -3, -4, -2, 1, 3, 3, 4, -2, -3, -5, -2, 1, 2, 3, 3,
+ 4, 5, 3, 4, 4, 4, 4, 5, 3, 3, 1, 0, 0, 0, 0, 1,
+ 1, 1, -1, -2, -3, -4, -3, -2, 2, 2, 0, -2, -2, -4, -3, -2,
+ 2, 3, 1, -1, -1, -3, -3, -2, 1, 2, 0, 0, -1, -2, -2, -1,
+ 0, 1, 0, -1, -1, -3, -2, -1, 1, 1, 0, -1, -1, -2, -2, -2,
+ -2, -1, -1, 0, 1, 2, 1, 0, 1, 2, 3, 5, 6, 5, 5, 3,
+ 1, 2, 3, 4, 5, 5, 4, 3, -2, -2, -3, -3, -2, -1, 0, 0,
+ -3, -3, -4, -5, -4, -3, -2, -1, -1, -1, -2, -2, -2, -1, 0, 0,
+ 0, 1, 0, -1, -1, 0, 0, 1, -1, 0, -1, -2, -3, -2, -2, -1,
+ 7, 7, 6, 5, 4, 2, -1, -2, 3, 3, 2, 2, 1, 0, -2, -3,
+ 0, -1, -1, -1, 0, -1, -2, -2, -1, -3, -2, -1, 0, 0, 0, 1,
+ 0, -2, -2, -1, -1, 1, 2, 2, 3, 1, -1, -1, -1, 1, 2, 2,
+ 3, 1, -2, -3, -2, -1, 1, 2, 1, -2, -5, -6, -5, -3, -2, 0,
+ 0, -1, -2, -3, -1, 0, -2, -2, 0, 0, -1, -1, 0, 1, -1, -2,
+ 0, 0, -2, -1, 0, 0, 0, -2, -1, -2, -3, -3, -2, -1, -3, -3,
+ -1, -2, -3, -3, -2, -2, -3, -4, 2, 2, 0, 0, 0, 0, -1, -2,
+ 5, 5, 3, 2, 2, 2, 0, -1, 8, 8, 6, 5, 4, 4, 2, 1,
+ -7, -8, -6, -3, -1, -1, -2, -1, -5, -5, -3, 0, 2, 1, 0, 0,
+ -1, -1, 0, 3, 4, 3, 1, 1, 2, 1, 1, 3, 4, 3, 2, 2,
+ 3, 2, 0, 2, 3, 2, 1, 2, 4, 2, -1, -1, 0, 1, 1, 1,
+ 3, 2, -2, -3, -2, -1, 0, 1, 3, 1, -3, -4, -3, -2, 0, 1,
+ -4, -2, -1, 2, 3, 3, 1, 0, -7, -5, -4, -2, 0, 0, -1, -2,
+ -6, -5, -5, -4, -2, -2, -2, -3, -1, 0, -1, -1, 0, 0, 0, -1,
+ 2, 3, 2, 2, 2, 2, 1, 0, 3, 5, 4, 3, 1, 0, 1, 0,
+ 3, 4, 3, 2, 0, -1, -1, -1, 5, 5, 3, 1, 0, -1, -1, -1,
+ 1, 1, 0, -1, -3, -5, -6, -4, 1, 1, 0, 0, 0, -3, -3, -1,
+ 0, -1, -1, 0, 1, 0, 1, 3, -2, -2, -3, -1, 2, 2, 4, 7,
+ -2, -2, -2, 0, 2, 2, 3, 6, -1, 0, 0, 1, 1, 0, 0, 3,
+ 0, 3, 3, 3, 1, -2, -3, -1, 1, 3, 4, 3, 0, -3, -5, -4,
+ 0, 2, 0, -1, -3, -4, -2, -2, 1, 4, 2, 0, -2, -3, -2, -1,
+ 3, 6, 3, 1, -2, -2, 0, -1, 4, 7, 4, 1, -2, -3, -1, 0,
+ 3, 6, 3, 0, -3, -3, -1, 0, 1, 3, 0, -1, -3, -2, 1, 1,
+ 0, 1, -1, -2, -3, -1, 2, 2, -2, -1, -3, -3, -3, -1, 1, 2,
+ 3, 1, -1, 0, 1, 0, 0, 0, 2, -1, -2, -1, 1, 0, -1, -1,
+ 1, -1, -2, 0, 1, 0, -2, -3, 0, -2, -1, 1, 3, 1, -3, -5,
+ 0, -2, -1, 2, 5, 2, -3, -5, 0, -2, -1, 4, 6, 3, -2, -5,
+ 0, -2, 0, 4, 7, 4, -2, -4, 0, -2, 0, 4, 6, 4, -2, -4,
+ -2, -2, -3, -4, -3, -2, -1, 0, 1, 1, 0, -1, -1, -1, 0, 1,
+ 3, 3, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 1, 0, 0, 1,
+ 0, 0, 0, 0, -1, -1, -1, -1, -4, -4, -4, -4, -4, -4, -4, -3,
+ -3, -3, -2, -3, -2, -1, -1, 0, 3, 4, 4, 5, 5, 6, 6, 7,
+ -1, -2, 7, -2, -4, -1, -1, 0, -1, -2, 9, -1, -4, -1, -1, 0,
+ -1, -3, 10, -1, -4, -1, -1, 1, -1, -3, 10, -2, -3, -1, -1, 2,
+ -1, -2, 10, -2, -4, -1, -1, 2, -1, -2, 9, -2, -4, -1, -1, 2,
+ -1, -2, 8, -2, -4, 0, -1, 1, 0, -2, 7, -2, -3, -1, 0, 2,
+ 3, -4, 1, 3, -3, -2, 1, 0, 3, -5, 1, 4, -3, -2, 1, 0,
+ 3, -6, 2, 5, -3, -1, 3, 0, 3, -6, 2, 5, -3, -1, 2, 0,
+ 3, -6, 1, 5, -4, -2, 3, 0, 3, -6, 1, 5, -3, -2, 2, 0,
+ 2, -6, 1, 4, -3, -1, 1, 0, 2, -6, 1, 4, -2, -1, 1, 0,
+ 0, 0, 1, 1, 1, 0, 0, 2, 0, -1, 1, 1, 1, 0, 0, 2,
+ 0, -1, 0, 0, 0, 0, 0, 2, 0, -1, 0, 0, 0, 0, -1, 0,
+ 1, 0, 1, 0, 0, -1, -2, -1, 3, 1, 1, 0, 0, -2, -4, -3,
+ 5, 3, 2, 1, 0, -3, -5, -4, 5, 4, 2, 0, -1, -4, -5, -5,
+ 1, 0, -1, -2, -2, -3, -6, -9, 2, 0, -1, -1, 0, 0, -3, -6,
+ 1, 0, 0, -1, 0, 0, -2, -5, 2, 1, 1, 1, 1, 2, -1, -3,
+ 1, 1, 2, 1, 2, 2, 1, -1, 1, 1, 2, 1, 1, 1, 1, 1,
+ 0, 0, 2, 1, 0, 0, 2, 2, 0, 1, 2, 2, 0, 0, 2, 2,
+ -4, -3, 0, 1, 4, 6, 4, 3, -3, -2, 0, 0, 2, 4, 1, 0,
+ -1, -1, 0, 0, 1, 1, -2, -3, 1, 1, 1, 0, 1, 1, -3, -5,
+ 1, 1, 1, 0, 1, 1, -3, -5, -1, 0, 0, -1, 1, 1, -2, -4,
+ -1, 0, 0, -1, 1, 2, 0, -2, -1, 0, 0, 0, 2, 3, 1, 0,
+ -1, 0, 3, 4, 0, -4, -5, -5, 0, 0, 4, 5, 2, -2, -3, -2,
+ 0, -1, 2, 4, 2, -1, -1, 0, 0, -2, -1, 1, 0, -2, 0, 1,
+ 1, -2, -2, 0, 0, -1, -1, 1, 1, -2, -3, 0, 1, 0, -1, 0,
+ 1, -2, -2, 1, 3, 1, 0, 0, 1, -2, -1, 2, 4, 2, 0, 0,
+ 1, 2, 3, 2, 0, 2, 2, 1, -1, 0, 1, 0, -3, 1, 1, 1,
+ -1, 0, 0, -2, -4, 0, 2, 1, -1, 2, 2, -1, -5, 0, 2, 1,
+ -1, 3, 4, -1, -5, 0, 2, 1, -2, 2, 4, 0, -4, -1, 0, 0,
+ -4, 0, 2, 0, -4, -2, 0, 0, -5, -1, 2, 1, -2, 1, 3, 2,
+ 1, 0, 1, 0, 1, 2, -1, -2, 2, 0, -1, -2, 1, 3, 0, -1,
+ 3, 0, -2, -4, 0, 3, 1, 0, 5, 1, -3, -5, -2, 2, 1, 1,
+ 6, 1, -2, -5, -2, 1, 0, 1, 5, 1, -1, -5, -2, 0, -1, 0,
+ 3, 0, -2, -4, -2, 0, -1, 0, 1, -1, 0, -2, 0, 1, 0, 1,
+ 1, 1, 2, 3, 2, 1, 1, 2, -1, -1, 0, 1, 1, 0, 1, 1,
+ -4, -3, 0, 0, 1, 1, 1, 2, -4, -3, 0, 2, 2, 2, 3, 2,
+ -5, -4, 0, 1, 1, 1, 1, 2, -5, -4, -1, -1, -2, -2, -1, 0,
+ -3, -2, 0, 0, -2, -3, -2, -1, 2, 3, 4, 4, 2, 0, 0, 0,
+ -4, -2, 0, 1, 0, 0, 0, 0, -3, -1, 1, 1, 0, 0, 0, 0,
+ -2, 0, 2, 2, 0, 0, 0, 2, -1, 1, 2, 1, -1, 0, 3, 5,
+ 0, 2, 1, -1, -2, 0, 5, 6, 0, 1, 0, -3, -3, 0, 4, 6,
+ 1, 1, -2, -4, -4, -3, 1, 2, 1, 0, -2, -4, -5, -4, -2, 0,
+ -1, -3, -3, -3, -3, -2, -1, -1, 3, 2, 1, 0, 0, 1, 1, 1,
+ 5, 4, 3, 2, 1, 1, 2, 2, 2, 1, 0, -2, -2, -2, -1, -1,
+ 0, 0, 0, -1, -2, -2, -2, -2, 0, 1, 3, 3, 2, 1, -1, -1,
+ 0, 1, 3, 4, 3, 2, 1, -1, -4, -3, -1, 1, 0, -2, -3, -3,
+ -3, -4, -7, -8, -7, -4, -1, 2, 0, -1, -3, -4, -4, -2, 0, 2,
+ 1, 0, 0, -1, -3, -2, 0, 2, 2, 1, 1, 0, -1, -1, 0, 2,
+ 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 1, 2, 3, 3, 2, 2, 0, 0, 1, 3, 4, 4, 3, 2,
+ 3, 3, 3, 0, -1, 0, 1, 2, 1, 1, 1, -1, -2, -1, -1, 1,
+ -2, -2, -1, -3, -3, -2, -2, 0, -4, -4, -2, -2, -2, -2, -3, 0,
+ -4, -4, -1, 1, 1, 0, -1, 2, -3, -1, 2, 3, 4, 3, 3, 5,
+ -2, 0, 2, 3, 3, 3, 3, 3, -2, -2, 0, 0, 0, 0, 0, 1,
+ 0, 2, 1, -1, -3, -1, 3, -2, -1, 0, -1, -1, -3, 0, 4, -2,
+ -2, -2, -2, -2, -2, 1, 5, -2, -3, -2, -3, -1, -2, 1, 4, -3,
+ -2, 0, -1, 0, -1, 0, 3, -5, 1, 2, 1, 2, 0, 0, 2, -5,
+ 2, 4, 2, 3, 1, 1, 3, -3, 1, 2, 1, 1, 0, 1, 4, -2,
+ 4, -3, -4, -1, 3, 3, 1, 3, 4, -4, -4, -1, 3, 2, 0, 2,
+ 4, -3, -4, 0, 2, 2, -1, 1, 4, -3, -2, 1, 2, 1, -2, 0,
+ 2, -4, -2, 1, 2, 0, -3, 0, 2, -3, -2, 0, 1, 0, -2, 2,
+ 3, -1, -1, 0, 0, 0, 0, 3, 2, -2, -2, -2, -1, -1, -1, 2,
+ 2, 2, 3, 4, 3, 1, 0, -1, 1, 0, 1, 2, 1, -1, -2, -2,
+ 2, 1, 2, 1, 1, 0, -1, -1, 4, 3, 4, 3, 2, 1, 1, 1,
+ 3, 2, 2, 2, 1, 1, 1, 1, -1, -2, -1, 0, -1, -1, -1, -1,
+ -3, -3, -2, -1, -2, -2, -2, -2, -4, -4, -3, -3, -4, -4, -3, -3,
+ 2, 1, -1, -3, -4, -2, 3, 4, 2, 2, 1, -1, -3, -2, 1, 2,
+ 1, 2, 3, 3, 0, -2, -1, -2, -1, 0, 2, 4, 2, 0, -1, -3,
+ -2, -2, 0, 3, 3, 2, 0, -3, 0, -2, -3, -1, 1, 2, 2, -1,
+ 3, -1, -4, -5, -3, 0, 2, 0, 6, 3, -2, -6, -5, 0, 3, 1,
+ -2, 3, -2, 0, 3, -2, -2, 1, -3, 4, -3, 0, 3, -2, -1, 2,
+ -3, 5, -3, 0, 4, -2, -1, 2, -2, 4, -4, -1, 3, -3, -2, 2,
+ -3, 4, -3, 0, 3, -3, -1, 2, -2, 5, -2, 0, 3, -3, -1, 2,
+ -2, 4, -3, 1, 3, -2, -1, 2, -2, 3, -2, 1, 3, -2, 0, 2,
+ 1, 0, 0, -1, 1, 2, -4, -1, 2, 0, 0, -1, 1, 2, -4, -2,
+ 1, 1, 1, -1, 2, 4, -2, 0, 0, -1, 1, -1, 2, 5, -1, 1,
+ 0, -1, 0, -2, 1, 5, -1, 1, 0, -1, -1, -2, 0, 3, -3, -1,
+ 1, 1, 0, -2, 0, 3, -3, -1, 1, 1, 0, -3, 0, 3, -2, 0,
+ 1, 0, -1, 1, 1, 2, 4, 5, 1, 0, -1, 1, 1, 1, 5, 7,
+ 0, 0, -2, -1, -1, 0, 3, 5, 0, -1, -2, -1, -1, -1, 2, 3,
+ 0, -1, -3, -1, -1, -1, 1, 2, -1, -2, -4, -2, -2, -2, 0, 0,
+ -1, -2, -2, -1, -2, -2, 0, 0, 0, -1, -1, 0, -1, -1, 0, 0,
+ 3, 3, 0, -1, -1, 1, 4, 4, 2, 3, 0, -2, -2, 0, 1, 1,
+ 2, 3, 1, -1, -1, 0, 1, 0, 1, 2, 0, -1, -1, -1, 0, -2,
+ 0, 1, 0, -1, -2, -1, 0, -2, 0, 1, 0, -1, -2, -1, 1, 0,
+ 1, 1, -1, -3, -4, -3, 1, 3, 1, 2, -1, -3, -5, -4, 1, 3,
+ -3, -2, 0, 1, 1, 1, 0, -2, 0, 1, 1, 1, 0, 0, -1, -3,
+ 1, 2, 1, 1, 0, -1, -1, -2, 0, -1, -3, -1, -1, -1, 0, -1,
+ 0, -3, -6, -3, -2, -1, 1, 1, 2, -1, -4, -3, -2, 0, 2, 2,
+ 5, 4, 1, 1, 0, 1, 3, 2, 5, 4, 2, 1, 0, -1, 0, 1,
+ -2, 0, -2, -5, -6, -3, 0, 0, -2, 0, 1, 0, -1, 1, 2, 2,
+ -2, 0, 1, 3, 2, 2, 2, 1, -2, 0, 2, 4, 3, 2, 1, 1,
+ -2, 0, 2, 3, 2, 0, -1, 0, -3, -1, 1, 1, 0, -1, -1, 1,
+ -4, -1, 1, 0, -1, -2, 0, 2, -4, -1, 0, -1, -1, -2, 1, 4,
+ -3, 0, 0, -1, 1, 1, 1, 0, -3, 1, 0, -1, 0, 0, -1, -1,
+ -1, 3, 3, 0, 1, 0, 0, 1, -3, 2, 2, -2, -1, 0, 0, 1,
+ -5, 0, 0, -2, -1, 1, 0, 2, -7, -2, 1, 0, 1, 2, 2, 2,
+ -5, 0, 3, 2, 3, 3, 2, 2, -3, 2, 4, 1, 0, 0, -2, -3,
+ 5, 2, -2, -2, 0, -1, -1, -1, 2, -1, -4, -3, -1, -2, -1, -1,
+ 0, -2, -2, 1, 2, -1, 0, 1, -1, -2, -1, 3, 3, -1, 0, 2,
+ 1, 0, 0, 3, 3, -2, -1, 2, 2, 1, 1, 3, 2, -2, -2, 0,
+ 1, 0, -1, 1, 1, -3, -3, -2, 1, 0, 1, 2, 3, 0, 0, 0,
+ -4, -5, -3, 0, 1, -1, -2, -1, -2, -3, -1, 1, 2, 0, 0, 0,
+ 1, 1, 2, 1, 2, 1, 1, 1, 3, 4, 3, 1, 0, -2, -1, -1,
+ 3, 3, 2, 0, -2, -3, -3, -2, 1, 1, 0, -1, -2, -4, -2, -2,
+ 2, 1, 0, 0, 0, -1, 0, 1, 2, 1, 1, 1, 1, 1, 1, 3,
+ 0, 0, 0, -1, -2, -1, 1, 0, -2, -1, -1, -2, -3, -2, 0, 0,
+ -1, 0, 0, -1, -2, 0, 1, 1, 1, 1, 0, -1, -1, 1, 3, 1,
+ 2, 2, 0, -2, -1, 2, 3, 0, 3, 1, -1, -1, 1, 4, 2, -2,
+ 2, 0, -3, -1, 3, 5, 0, -5, 1, -1, -2, 0, 3, 3, -1, -6,
+ -1, 0, 3, 4, 2, 0, 1, 2, -2, -1, 0, 1, -1, -2, 0, 1,
+ -2, -3, -2, -3, -6, -7, -6, -3, 2, 2, 3, 1, -1, -2, -3, -2,
+ 2, 2, 3, 1, 0, 0, 0, 0, 2, 1, 1, 0, 1, 1, 0, 1,
+ 1, 0, 0, 0, 0, 1, 1, 2, 1, 0, -1, 0, 0, 2, 2, 1,
+ 1, 1, 3, 1, -1, -1, -1, 1, -2, -1, 0, 0, -2, -2, -1, 2,
+ -2, -2, 1, 1, 1, 0, 1, 3, -2, -2, 0, -1, 0, -1, 0, 2,
+ 0, 0, 1, 0, -1, -1, -2, 1, 3, 2, 2, 1, 0, -2, -2, 1,
+ 5, 3, 3, 2, 1, 1, 1, 4, 0, -3, -4, -5, -4, -3, -1, 1,
+ -6, -4, -1, 2, 2, 0, 0, -1, -4, -2, 1, 3, 3, 2, 2, 0,
+ -3, -2, -1, 2, 3, 3, 2, 0, -3, -2, -2, 1, 2, 1, 1, -1,
+ -2, -2, -2, 0, 2, 2, 1, -1, -1, -1, -1, 1, 2, 3, 2, 0,
+ -1, -1, -2, 1, 2, 2, 2, -1, 0, -1, -2, 0, 2, 1, 0, -1,
+ 6, 4, 2, 1, 0, 0, 0, 1, 4, 2, -1, -2, -2, -2, -1, -1,
+ 2, 1, -1, -2, -2, -2, -2, -1, 2, 2, 0, -2, -2, -2, -1, 0,
+ 0, 0, -1, -2, -2, -1, 0, 1, -3, -3, -2, -1, -1, -2, -1, 0,
+ -3, -2, 2, 3, 2, 0, -1, -2, -2, 0, 4, 5, 5, 2, 0, -1,
+ 5, 4, 2, 0, -1, -2, -1, -1, 4, 3, 2, 1, 0, -1, 0, -1,
+ 1, 1, 0, 1, 1, 0, 1, -1, -2, -1, -1, 0, 0, -2, -2, -3,
+ -1, 0, 0, 0, -1, -3, -3, -5, 0, 1, 1, -1, -1, -2, -2, -3,
+ -1, -1, -1, -2, -1, 1, 3, 1, -1, -2, -2, -1, 2, 5, 6, 5,
+ -3, -3, -2, 1, 1, -2, -1, -1, 1, 2, 3, 4, 1, -3, -1, -3,
+ 3, 2, 0, 1, -1, -3, -1, -3, 1, 0, -1, 0, -1, -1, 1, 0,
+ 1, 1, 0, 1, 2, 2, 5, 3, 1, 1, 1, 2, 2, 2, 3, 0,
+ -3, -1, -2, -2, -3, -3, -1, -3, -1, 1, 1, 0, -1, -1, 0, -2,
+ 2, 0, -2, -2, 2, 4, 1, -2, 1, 0, -2, -1, 3, 5, 2, -1,
+ -1, -2, -3, -2, 1, 3, 1, -2, -1, -2, -1, -1, 0, 2, 1, -1,
+ 0, 0, 1, 1, 1, 2, 2, 0, 0, 1, 4, 4, 2, 2, 3, 1,
+ -2, -1, 2, 1, -2, -3, -2, -3, -1, 0, 1, 0, -3, -4, -4, -5,
+ 4, 0, -3, -4, -4, -4, -2, -1, 5, 0, -1, 0, -1, -3, -2, -1,
+ 4, 0, 0, 1, 1, 0, 0, 0, 0, -3, -2, -1, 0, 0, 1, 0,
+ 0, -2, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 1, 1, 1, 0,
+ 2, 0, -1, -1, 1, 1, 1, 0, 1, -1, -2, -2, 0, 2, 2, 2,
+ -3, -5, -2, 0, -1, -3, -3, 0, 0, -2, 0, 2, 2, 0, 0, 3,
+ 2, -1, -2, 0, 0, -1, -1, 2, 5, 2, -1, -1, -1, -1, -1, 2,
+ 5, 2, 0, -1, -1, 0, -1, 2, 2, 1, 0, 0, 0, 1, 0, 2,
+ -1, -1, 1, 1, 2, 2, 1, 2, -3, -2, 0, 0, 0, 0, -2, -1,
+ 0, 3, 2, 0, -2, -3, -3, -3, 0, 3, 3, 1, 0, 0, 1, 2,
+ -1, 0, -1, -2, -1, -1, 1, 3, -1, 0, -1, -2, -1, -1, 0, 2,
+ -1, 0, -1, -2, 0, 0, -1, 2, -1, 0, -1, -2, -1, -1, -2, 1,
+ 0, 1, 0, -3, -1, -1, -1, 2, 5, 5, 2, -1, -1, -1, 1, 3,
+ 0, 0, 1, -1, -3, -2, 0, 2, 1, 1, 3, 0, -2, -2, 0, 1,
+ 1, 1, 3, 1, 0, 0, -1, -1, 0, -1, 2, 1, 1, 0, -1, -3,
+ -1, -2, 1, 1, 1, 0, -2, -4, -1, 0, 2, 1, 1, 0, -1, -3,
+ 1, 1, 3, 2, 1, 0, -2, -3, 2, 2, 4, 2, 1, -1, -2, -4,
+ 1, 2, 2, 2, 0, -2, 0, 2, -1, -1, -2, -3, -4, -5, -3, 1,
+ 0, 1, 1, 0, -1, -1, -1, 1, 0, 1, 1, 1, 0, 0, 0, 2,
+ 0, 1, 1, 2, 1, 1, 1, 2, -1, -1, 0, 2, 2, 2, 2, 3,
+ -2, -4, -4, -1, -2, -2, -2, 0, 1, 0, 0, 1, 0, 0, 0, 1,
+ 0, -1, -3, -2, 0, 2, 2, 1, 0, -1, -2, -3, 0, 1, 1, 2,
+ 1, 0, -2, -3, -1, 0, 0, 1, -1, 0, -1, -2, 0, 0, -1, 0,
+ -1, 1, 1, 0, 2, 2, 0, 0, 0, 2, 3, 1, 3, 5, 3, 2,
+ -1, 1, 1, -2, 0, 3, 1, 1, -1, 0, 0, -4, -4, -1, -1, -1,
+ -1, 1, 1, 0, 1, 2, 1, 2, -3, 0, 1, 0, 1, 1, 0, 2,
+ -5, -3, -1, -1, 0, 1, 0, 1, -4, -3, -2, -3, -2, -1, -1, 0,
+ 0, 0, -1, -2, -2, -2, -2, 0, 3, 4, 2, 0, 0, 0, 0, 1,
+ 2, 1, 0, 0, 0, 0, -1, 0, 0, 1, 2, 3, 4, 4, 3, 2,
+ -1, 4, 7, 4, 0, 0, 0, 0, -1, 4, 6, 3, 0, 1, 1, 1,
+ 0, 3, 4, 0, -1, 0, 0, 1, 0, 1, 1, -2, -1, 0, -1, -1,
+ -1, 0, -1, -1, -1, 0, 0, 0, -1, -1, -1, 0, 0, 0, 0, 0,
+ -1, -3, -3, 0, 1, -1, -2, -1, -3, -4, -4, -2, -1, -2, -2, -1,
+ 2, 2, 1, 0, 1, 1, 0, -3, -2, -1, 0, 0, 1, 1, 0, -3,
+ -2, -1, 0, 1, 2, 1, 1, -2, 1, 2, 2, 2, 3, 3, 2, -1,
+ 1, 2, 1, 0, 1, 1, 2, -1, 0, 1, -2, -4, -2, 0, 1, -1,
+ 1, 1, -1, -3, -2, 0, -1, -3, 1, 2, 0, -1, 0, 1, -1, -4,
+ -1, -1, -2, -2, 0, 3, 4, 3, 1, 1, -1, -3, -2, 0, 0, 0,
+ 2, 2, 2, 2, 2, 1, -1, -1, 1, 1, 1, 3, 3, 0, -2, -2,
+ 0, -1, -1, -1, 0, -2, -1, -1, -1, -3, -4, -3, -2, -2, 0, 2,
+ -1, -1, 0, 1, 2, 2, 3, 5, -2, -1, -1, 0, 0, 0, 0, 1,
+ -2, -3, 2, 0, 0, 1, 1, -1, -1, -4, 1, -2, -1, 2, 2, 0,
+ 1, -4, 0, -2, -2, 1, 1, -1, 2, -3, 1, -1, -1, 1, 1, -1,
+ 3, -2, 3, 1, 0, 1, 1, -1, 1, -3, 2, 1, 0, 1, 0, -1,
+ -1, -5, 1, 0, -1, 0, 1, 1, 0, -3, 3, 3, 1, 2, 3, 3,
+ 0, -1, -2, 1, 5, 5, 2, -1, 1, -1, -2, -1, 1, 1, -2, -5,
+ 1, 1, -1, -2, -1, -1, -1, -3, 1, 1, -1, -1, -1, 2, 4, 3,
+ -1, -1, -1, -1, -1, 0, 4, 3, -1, -1, 0, 1, -1, -3, -1, -1,
+ 0, 0, 0, 2, 2, 0, 0, -1, 0, -2, -3, 0, 1, 1, 3, 2,
+ 2, 3, 2, 1, 0, 0, -2, -2, 2, 3, 0, 1, 1, 3, 3, 2,
+ 0, 0, -3, -1, -1, 2, 2, 3, -2, -2, -3, 1, 1, 2, 1, 1,
+ -2, -1, -2, 2, 1, 1, -1, -2, 0, 1, 0, 2, 0, 0, -2, -2,
+ 0, 1, 0, 2, 0, 0, -2, -2, -3, -2, -2, 0, -1, -2, -2, -3,
+ 0, 1, -1, 3, -1, 1, 3, -1, 0, 1, -1, 3, -1, -1, 2, -3,
+ 1, 1, -2, 3, -1, -3, 0, -3, 2, 2, -2, 3, 0, -2, 1, -2,
+ 1, 1, -3, 3, -1, -2, 1, -3, 1, 1, -3, 3, 0, -1, 1, -2,
+ 1, 2, -1, 4, 0, -1, 1, -2, 0, 1, -1, 3, -1, -3, 0, -3,
+ -3, -3, -1, 1, 2, 1, -1, -2, -2, -2, 0, 2, 1, 0, -2, -2,
+ -3, -2, 1, 2, 1, -1, -2, -1, -3, -2, 2, 4, 0, -2, -2, 1,
+ -3, -1, 2, 4, 0, -2, -2, 2, -1, 1, 4, 3, -1, -3, -2, 2,
+ 0, 2, 4, 2, -1, -2, -1, 2, 0, 1, 2, 0, -1, 0, 1, 3,
+ 3, 0, -5, 1, 4, 0, 0, 1, 1, -2, -5, 2, 5, -1, -2, 1,
+ -1, 0, 0, 3, 3, 1, 0, -1, -2, 3, 4, -2, -3, -1, 0, -2,
+ -3, 3, 5, -3, -3, 0, 0, -2, -1, 3, 2, -2, -2, 2, 2, -1,
+ 2, 0, 0, -1, 0, 0, 0, 0, 0, -3, -2, 1, 3, 0, -2, -2
+};
+
+/* list of codebooks for inter-coded vectors */
+const int8_t* const ff_svq1_inter_codebooks[6] = {
+ svq1_inter_codebook_4x2, svq1_inter_codebook_4x4,
+ svq1_inter_codebook_8x4, svq1_inter_codebook_8x8,
+ NULL, NULL,
+};
+
+/* 6x16-entry codebook for intra-coded 4x2 vectors */
+DECLARE_ALIGNED(4, static const int8_t, svq1_intra_codebook_4x2)[768] = {
+ 12, 13, 13, 11, -7,-10,-15,-17,-16,-15,-12,-10, 11, 15, 15, 12,
+ 2, 17, 20, 15,-45,-24, 2, 13, 21, 20, -6,-36, 12, 16, -1,-27,
+ -18,-21, 10, 45,-11,-20, -7, 21, 43, -8,-28, 0, 33,-16,-28, 3,
+ -12,-18,-18, -6,-20,-10, 28, 55, -5,-18,-21,-18, 56, 30, -6,-20,
+ -34, 27, 29,-22,-30, 29, 26,-25, 30, 34, 33, 26,-25,-31,-35,-33,
+ -31,-35,-36,-32, 29, 36, 37, 31,-71,-12, 38, 34,-63, -1, 42, 33,
+ 58, 37,-31,-60, 55, 34,-33,-61,-57,-57, 22, 93,-57,-58, 21, 93,
+ 59, 69, 70, 62,-63,-68,-68,-60,-64,-71,-71,-64, 63, 73, 72, 62,
+ -2, 0, 7, 15,-11,-10, -3, 5, -5, -8,-10,-10, 1, 9, 14, 9,
+ 15, 8, -4,-11, 12, 2,-11,-12, -8, 0, 19, 28, 4, -1,-15,-26,
+ -15, 27, 2,-14,-14, 22, 1, -9, -4, -6,-13,-10, -6,-14, 6, 47,
+ -35,-20, 6, 23, 6, 9, 6, 4, -6, 2, 23,-22, -7, 4, 28,-21,
+ 20,-22, -2, 6, 22,-28, -5, 8,-10,-18,-16,-12, 36, 19, 2, -1,
+ -3, 0, 4, 8,-45,-10, 23, 23, 40, 15,-20,-35, -4, -1, 4, 1,
+ 9, -5,-33, 24, 8, 3,-26, 19, -1, 4, 6, -3, 32, 25,-13,-49,
+ 24, 24, 15, 7,-17,-27,-19, -7,-47, 0, 39, 24,-21, -6, 7, 4,
+ -1, 0,-10,-13, 1, 1, 5, 16, 20, 5, -3, -9, -1, -4, -2, -6,
+ -17, -7, 1, 4, 12, 7, 0, 0, 3, 0, 12, 11, -3, 1, 0,-23,
+ 4, 17, -6, 0, 6, 3,-25, 0,-17, 10, 8, 5,-14, 4, 1, 4,
+ 13, 10, 4, 2,-23, -9, 1, 2, 3, -3, 1, 7, 1,-23, -7, 20,
+ -7,-18, 2, 12, -5, -4, 10, 9, 4, 10, 7,-24, 6, 3, 4,-10,
+ 22,-14,-22, 6, 0, 5, 5, -1, -4, 3,-11, -4, -7, 31, 7,-14,
+ -5,-16, -1, 42, -4, -2, -9, -5, 5, -8, -6, -3, 42, -4,-21, -5,
+ -18, 12, 20,-12, 13,-13,-10, 7, -8, -9, -2,-18,-16, 6, 40, 8,
+ 10, -1, 0, 4, -3, 4, -1,-13, -2, 6, 1,-15, 5, 3, 1, 2,
+ -4, -2, 1, 3, 15, 0, -9, -4, -3, -4, -4, -4, -3, 5, 16, -3,
+ 2, 13, 3, 4, -3, -8,-10, 0, -6, -2, -4, -1, -2, -3, -6, 23,
+ 6, -6, 7, 1, 4,-18, 5, 1, -1, 1,-15, 14, -5, 6, -4, 4,
+ 2, 2, 2, 6,-24, 2, 7, 3,-26, 0, 3, 3, 5, 7, 1, 6,
+ 14, -2,-18, -3, 7, 5, -4, 2, -6, 3, 32, 1, -6, -6, -6,-12,
+ 5,-36, 7, 6, 9, -1, 11, 0, 4, 4, 5, 3, 4, 15, 3,-38,
+ 10, 23, -5,-42, 0, 4, 4, 4, 23, 17, -6,-13,-13,-37, 1, 29,
+ 5,-14, -1, 1, 5, 0, 3, 1, 0, 4, -5, 2, 8, 0, 0,-10,
+ 4, 7, -2, -3,-10, 3, 1, 1,-12, -1, 13, 3, 0, -1, 1, -3,
+ 0, -1, 3, 1, -6, -9, 3, 9, -6, 1, -4, -6, 8, -1, 0, 8,
+ -3, -3, 0, 18, -5, -1, -4, -1, -8, -2, 3, -4, 0, 17, -1, -5,
+ 5, -2, 9,-10, 1, -5, 6, -5, 4, 2, 2, 3, 10,-14, -8, 1,
+ -1, -2,-18, -1, -1, 20, 1, 2, -1, 1, -9, 1, -1, -9, 22, -4,
+ 6, -4, 8, -3, -1, 7,-19, 5, -7, 31, -4, -4, -6, 0, -5, -5,
+ -7, -8,-19, -4, 1, 1, 4, 32, 38, -1, -8, 4, -7, -8, -6,-12,
+ -1, 0, -7, 1, -1, 9, -1, 0, 9, -1, -1, 0, 2, -6, 1, -3,
+ -12, 0, 2, 1, 1, 1, 8, 0, 9, 1, 0, 2, -2, 1,-11, 0,
+ 0, 8, 2,-10, -1, 2, -1, 0, -2, -4, 0, -5, -2, -1, -1, 14,
+ -3, 7, -1, 5, 0,-10, 1, 1, -1, -5, 14, -1, -2, 1, -3, -2,
+ -6, 0, 0, 6, 2, 3, -9, 4, 4, -5, -1, -1, -7, 3, 8, -1,
+ 2, -4, -1,-11, 11, 2, 1, 0, -1, 2, 3, 9, 0, 2, 0,-15,
+ 3, 5,-20, 3, 3, -1, 3, 3, 1, -1, 16, 1, 2,-29, 9, 2,
+ -13, -6, -1, -3, 36, -1, -8, -3, 2, 5, 4, 2,-37, 9, 11, 3
+};
+
+/* 6x16-entry codebook for intra-coded 4x4 vectors */
+DECLARE_ALIGNED(4, static const int8_t, svq1_intra_codebook_4x4)[1536] = {
+ -11, -3, 3, 6,-10, -1, 5, 7, -9, -1, 6, 7, -9, -1, 4, 6,
+ 5, 7, 0,-14, 6, 9, 2,-15, 6, 9, 2,-15, 4, 6, 0,-14,
+ 16, 3, -5, -6, 16, 1, -8, -8, 14, -1, -9, -9, 12, 0, -8, -8,
+ 8, 12, 16, 17, -2, 2, 6, 9,-10, -8, -4, 0,-15,-14,-11, -7,
+ -7,-10, -2, 16, -7,-11, -3, 18, -7,-11, -1, 20, -6, -8, 1, 19,
+ -9,-13,-16,-17, 2, -2, -7, -9, 11, 8, 4, -1, 16, 15, 11, 7,
+ -22, -2, 13, 15,-24, -2, 14, 16,-25, -4, 13, 15,-25, -6, 10, 13,
+ 26, 26, 22, 16, 17, 15, 9, 3, -2, -6,-11,-14,-20,-25,-28,-28,
+ -27,-27,-25,-21,-16,-15,-11, -7, 3, 8, 12, 13, 23, 28, 31, 30,
+ 20, 16, -7,-33, 22, 19, -6,-35, 22, 19, -6,-34, 20, 17, -6,-32,
+ -20,-20, 2, 38,-21,-22, 2, 40,-21,-22, 2, 40,-20,-20, 3, 38,
+ -47, -4, 24, 26,-50, -3, 26, 27,-50, -3, 26, 27,-47, -4, 24, 26,
+ 45, 6,-23,-27, 48, 5,-25,-28, 48, 5,-26,-28, 44, 6,-24,-27,
+ -30,-36,-10, 76,-31,-37,-11, 78,-31,-37,-11, 78,-31,-36,-10, 77,
+ -53,-32, 35, 52,-54,-34, 36, 52,-54,-34, 36, 52,-53,-33, 34, 51,
+ -93,-34, 62, 65,-93,-34, 62, 66,-93,-34, 62, 65,-93,-34, 60, 64,
+ -7, 0, 2, 2, -8, -1, 3, 3, -8, 0, 4, 5, -6, 1, 5, 5,
+ 3, 7, 11, 11, 2, 2, 3, 3, 1, -2, -6, -7, 1, -5,-11,-13,
+ 3, -2, -4, -3, 7, 0, -5, -5, 12, 4, -5, -7, 14, 6, -4, -7,
+ 18, 14, 3, -2, 6, 4, 0, -3, -8, -5, -2, 0,-16,-11, -2, 2,
+ -8, -6, 7, 18, -7, -8, 2, 13, -4, -6, -2, 6, 0, -4, -3, 1,
+ 1, -3,-13,-18, 0, -1, -5, -7, -1, 1, 6, 7, -2, 4, 15, 17,
+ -15,-14, -7, -2, -6, -5, -1, 0, 6, 6, 3, 1, 15, 13, 6, 1,
+ 2, -2,-11, 10, 2, -1,-12, 11, 3, -1,-12, 11, 2, -2,-11, 11,
+ -9, 14, -1, -5, -9, 15, -2, -5, -8, 16, -2, -5, -7, 15, -1, -4,
+ 2, 6, 8, 8, -2, 3, 9, 12,-11, -5, 4, 10,-19,-16, -8, 0,
+ 14, 8, -7,-15, 12, 7, -7,-14, 8, 5, -4, -9, 5, 3, -1, -4,
+ 12,-14, -2, 2, 13,-15, -1, 3, 14,-15, -1, 3, 13,-14, -1, 3,
+ 0, 6, 10,-13, 0, 6, 10,-15, 0, 7, 9,-17, 1, 6, 8,-16,
+ -8, -5, 15, -2, -8, -6, 17, -2, -8, -6, 16, -3, -8, -5, 15, -2,
+ -9,-11,-11,-10, 9, 10, 9, 8, 8, 10, 10, 9, -8, -9, -8, -7,
+ 9, 10, 9, 7, -8,-10,-10,-10, -7,-10,-11,-11, 11, 12, 11, 8,
+ 0, 10, 7, 0, 0, 7, 0, -6, 0, 2, -5, -6, -2, -1, -4, -1,
+ 5, 0, -6, -9, 2, 2, 2, 1, -2, 0, 5, 7, -6, -5, 1, 4,
+ 3, -8, 2, -1, 4, -9, 3, 0, 5, -7, 3, 0, 7, -5, 3, 0,
+ -5, -3, 2, 9, -6, -3, 1, 8, -6, -3, 1, 7, -5, -2, 0, 4,
+ 13, 8, 3, 1, -3, -5, -4, -1, -8, -7, -3, 0, -1, 1, 3, 2,
+ 3, 2, -5,-12, 4, 3, -2, -9, 3, 4, 1, -4, 3, 5, 4, -1,
+ -9, -8, -4, 0, 8, 6, 2, 0, 10, 8, 3, 0, -6, -5, -3, -1,
+ -3, -9,-12, -5, 0, -3, -5, 0, 2, 3, 2, 4, 5, 8, 7, 6,
+ -1, -2, 5, 12, -1, -1, 5, 9, 2, 1, -1, -2, 2, -1,-11,-17,
+ -7, 3, 3, -1, -9, 3, 4, -1,-10, 4, 6, -1, -9, 5, 7, 0,
+ -18, -7, 2, 2, -8, 1, 5, 3, 3, 4, 1, 0, 9, 5, -2, -3,
+ -2, 0, 6, 8, -4, -5, -5, -3, 1, -2, -6, -8, 10, 9, 3, -1,
+ 0, -2, -2, 0, 0, -4, -5, 0, -2, -8, -4, 8, -5, -7, 6, 24,
+ 9, 1, -7, 1, 9, 1, -8, 1, 8, 0,-10, 1, 8, -1,-11, -1,
+ 8, 8, 6, 3, 5, 4, 3, 2, -2, -3, -1, 0,-10,-13, -8, -4,
+ 0, 4, 2, -3, 0, 6, 3, -5, 3, 10, 2,-12, 5, 10, -4,-22,
+ 0, -4, -1, 3, 1, -4, -1, 5, 1, -5, 0, 8, -1, -6, -2, 7,
+ -1, -1, -2, -4, -1, -2, -4, -6, -1, -1, -1, -2, 1, 5, 10, 9,
+ 10, 3, 0, -2, 6, -1, -2, -5, 3, -1, -2, -6, 2, 0, 0, -5,
+ 6, 3, 0, 0, 6, 3, 1, 1, 4, -2, -2, 1, 0, -9, -9, -2,
+ -11, -3, 1, 2, -6, 2, 4, 5, -3, 2, 3, 4, -2, 1, 1, 2,
+ -6, -4, -1, -2, 2, -1, -1, -2, 10, 2, -2, -2, 11, 2, -4, -1,
+ 6, 0, -2, 2, 3, 3, 0, 0, -6, 3, 3, 0,-17, -1, 5, 0,
+ -1, 4, 10, 11, -3, -2, 0, 1, -3, -4, -5, -3, -1, -2, -2, -1,
+ 2, -3, -9,-12, 3, 3, 3, 2, 2, 2, 4, 4, 2, 1, -1, -2,
+ -2, 9, 5,-10, -3, 5, 5, -5, -2, 1, 2, 0, -1, -2, -2, 1,
+ -2, -3, 7, -2, -1, -3, 7, -3, -1, -2, 8, -4, -2, -2, 7, -3,
+ 1, -8, -3, 12, 2, -2, -2, 4, 1, 3, 0, -5, -1, 5, 2, -7,
+ -1, 3, 1, -5, -7, -2, 3, 1, -2, -7, -2, 2, 20, 3, -5, -1,
+ 5, 0, -3, -2, -7, -7, 0, 6, -6, 0, 7, 6, 2, 6, 0, -7,
+ -2, 6, -7, 1, -2, 7, -8, 3, -2, 7, -7, 3, -1, 7, -6, 2,
+ -5, -2, 5, 7, 4, 1, -4, -8, 6, 3, -2, -5, -7, -5, 3, 7,
+ -1, -1, 6, 5, 0, -1, 1, -4, 2, 1, 0, -7, 1, 0, 0, -4,
+ -8, 0, 3, 1, -2, 1, -1, -1, 1, -1, -3, 1, 1, -2, 1, 9,
+ 5, 2, -3, -4, -1, 0, -1, -3, -3, 1, 3, 1, -4, 0, 4, 2,
+ 2, -2, -2, 12, 0, -2, -5, 3, -1, 0, -3, 1, -3, -1, -2, 1,
+ 1, 5, 3, 0, -6, -4, -2, 1, 0, -2, -2, 2, 6, 1, -4, -1,
+ -3, -5, -5, -1, 3, 5, 5, 4, 0, 3, 1, -1, -2, 1, -2, -3,
+ 2, -4, -5, -3, 4, -2, -3, -2, 6, 0, -1, -1, 7, 1, 0, 0,
+ -3, -2, -2, 0, -2, -3, -5, -1, -2, 2, 0, -1, -1, 11, 9, -1,
+ 0, 1, -1,-10, -1, 1, 0, -6, 1, 0, 1, 4, 2, -5, -1, 13,
+ -2, 4, 5, 0, -5, 1, 6, 3, -6, -2, 3, 2, -5, -2, 0, -2,
+ -1, 1, 1, -2, -1, -2, 0, 2, 5, 5, 5, 7, 0, -4, -8, -7,
+ 0, 2, -1, -5, -1, 2, 2, -3, 0, 5, 3, -5, 3, 8, 2,-12,
+ 8, 4, 0, -2, 10, -1, -4, -1, 3, -6, -3, 0, -4, -5, 0, 0,
+ 0,-10, -4, 2, -1, -6, 3, 5, -1, -3, 6, 4, 0, -2, 4, 2,
+ 0, 8, 1, -1, 0, 11, 1, -3, -1, 6, -2, -4, -3, -2, -7, -4,
+ 0, -1, -1, -1, 4, 5, 6, 5, -5, -9, -8, -5, 2, 2, 3, 2,
+ 0, 2, 6, 1, 2, 0, 3, 0, 1, -2, -1, -2, 0, -1, -3, -6,
+ 0, 0, 2, 0, 4, 0, 2, 1, 5, -2, 0, 0, -2, -9, -1, 2,
+ 0, 1, 0,-10, -1, 1, 8, 0, -1, -2, 4, 0, 1, -1, 2, -1,
+ -3, -2, 2, -1, -3, -1, 2, -3, 0, -1, 1, 0, 8, 1, -1, 3,
+ 0, 1, 1, 2, 0, -4, -2, 0, -1, -5, 1, -1, -2, -1, 11, 2,
+ 1, 5, -2, -2, 0, 2, -4, 0, -2, 1, -5, 1, 0, 5, 0, 1,
+ -5, -3, 0, 6, -4, 2, 0, 0, -3, 5, 1, 0, -3, 3, 0, 0,
+ 3, -2, -3, 1, 1, -4, 0, 8, -2, -3, -2, 3, 1, 2, -1, -1,
+ 1, 1, 0, 2, 2, 0, 1, 6, 1, -1, 2, 1, 0, 3, 0,-19,
+ 1, -3, -2, 2, 6, 5, -2, -7, -3, 1, 3, 1, -1, -1, 0, 2,
+ -8, -1, -1, -4, 1, 1, -1, 2, 4, 3, 2, 3, -5, 1, 3, 0,
+ 0, 2, -1, 1, -3, 0, 0, 5, -5, -2, 0, 8, -4, -4, -4, 6,
+ 1, 2, 1, 2, 2, 2, -3, 2, 4, 0, -9, 0, 7, 0,-11, 1,
+ 0, 0, 0, -2, 3, 3, -1, -6, 4, 3, -3,-10, -1, 2, 6, 2,
+ 7, -2, -3, 5, -4, 0, 3, -1, -4, 2, 1, -7, 2, -1, -1, 3,
+ 3, 2, 2, 2, -5, -7, -7, -5, 5, 6, 4, 2, -2, -1, 0, 1
+};
+
+/* 6x16-entry codebook for intra-coded 8x4 vectors */
+DECLARE_ALIGNED(4, static const int8_t, svq1_intra_codebook_8x4)[3072] = {
+ 5, 6, 6, 6, 7, 7, 8, 8, 0, 0, 0, 0, 0, 1, 2, 3,
+ -3, -4, -4, -5, -5, -4, -3, -2, -4, -4, -4, -5, -4, -4, -3, -3,
+ 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 4, 4, 5, 5, 5,
+ -1, 0, 1, 1, 2, 3, 4, 4, -9,-10, -9, -9, -8, -7, -6, -5,
+ -4, -4, -5, -6, -6, -7, -7, -7, 0, -1, -2, -2, -3, -3, -4, -4,
+ 4, 4, 3, 3, 2, 1, 1, 0, 7, 7, 7, 6, 6, 5, 4, 4,
+ 2, 4, 5, 6, 4, 1, -3, -6, 3, 4, 5, 5, 4, 0, -5, -8,
+ 2, 3, 4, 4, 2, -2, -7,-10, 2, 2, 2, 1, 0, -4, -9,-12,
+ -9, -7, -3, 1, 4, 4, 3, 3,-10, -7, -2, 3, 5, 5, 3, 3,
+ -9, -6, -2, 3, 6, 5, 4, 3, -8, -6, -1, 3, 4, 4, 3, 2,
+ -5, -5, -5, -5, -3, 1, 4, 7, -5, -5, -5, -4, -2, 1, 6, 8,
+ -4, -5, -4, -3, -1, 3, 8, 10, -3, -4, -3, -2, 1, 5, 9, 11,
+ -2, -2, -2, -2, -2, -2, -2, -2, -4, -5, -5, -5, -5, -5, -5, -4,
+ -3, -4, -4, -4, -4, -4, -4, -3, 9, 10, 10, 11, 11, 11, 10, 10,
+ 7, 4, 1, -2, -4, -6, -9,-10, 9, 7, 3, 0, -2, -4, -8, -9,
+ 11, 8, 4, 2, 0, -3, -6, -8, 11, 9, 5, 3, 1, -2, -5, -7,
+ -13,-13,-13,-12,-11,-10, -8, -8, 0, 1, 2, 3, 4, 4, 4, 3,
+ 3, 4, 5, 6, 6, 6, 5, 4, 3, 4, 4, 4, 3, 3, 3, 2,
+ 10, 10, 11, 10, 9, 9, 8, 7, 6, 6, 6, 6, 5, 4, 3, 2,
+ 0, 0, 0, -1, -2, -3, -4, -4,-10,-10,-11,-12,-13,-14,-14,-14,
+ 16, 16, 17, 16, 15, 13, 12, 11, -1, -2, -3, -4, -4, -4, -4, -3,
+ -4, -5, -6, -6, -6, -6, -6, -6, -5, -6, -6, -6, -6, -6, -5, -5,
+ -13,-13,-13,-12,-11,-10, -8, -6, -9, -8, -7, -6, -4, -2, 0, 1,
+ -2, -1, 1, 3, 5, 7, 8, 9, 5, 7, 9, 11, 13, 14, 15, 15,
+ 16, 14, 11, 7, 2, -3, -7, -9, 14, 12, 8, 3, -1, -6, -9,-11,
+ 11, 9, 4, 0, -4, -8,-11,-13, 8, 5, 1, -3, -6,-10,-12,-14,
+ -18,-15, -9, -3, 1, 6, 9, 11,-17,-13, -7, -1, 3, 7, 11, 12,
+ -15,-11, -5, 1, 5, 9, 12, 13,-13, -9, -3, 2, 5, 9, 11, 13,
+ 22, 21, 19, 15, 10, 3, -4, -9, 20, 18, 15, 9, 2, -5,-12,-17,
+ 16, 13, 8, 1, -7,-14,-20,-24, 10, 6, -1, -8,-15,-21,-25,-27,
+ -25,-23,-20,-14, -7, 1, 9, 14,-23,-21,-16, -9, 0, 9, 16, 21,
+ -20,-16,-10, -1, 8, 16, 22, 25,-15,-11, -3, 6, 14, 20, 25, 27,
+ -4, -2, 0, 1, 2, 2, 2, 2, -5, -2, 0, 2, 3, 3, 3, 3,
+ -6, -4, -1, 1, 2, 3, 3, 3, -7, -5, -2, 0, 1, 1, 2, 2,
+ 2, 1, 1, 1, 1, 0, -2, -3, 3, 3, 2, 1, 0, -1, -3, -4,
+ 4, 3, 2, 1, 0, -2, -4, -6, 5, 4, 3, 1, -1, -3, -5, -6,
+ 5, 6, 6, 4, 2, 0, -2, -3, 3, 4, 4, 4, 3, 1, 0, -1,
+ -2, -2, -1, -1, -1, -1, -2, -2, -5, -4, -3, -2, -2, -2, -3, -3,
+ -1, -1, -1, -1, -1, -1, -1, -1, -3, -4, -4, -4, -3, -3, -3, -3,
+ -1, -1, -1, -1, -1, -1, -1, -2, 5, 6, 6, 6, 6, 5, 4, 3,
+ 4, 4, 4, 4, 4, 5, 6, 7, 0, -1, -1, -1, -1, 0, 1, 2,
+ -2, -3, -3, -3, -3, -2, -1, 0, -3, -3, -4, -4, -4, -3, -2, -1,
+ 0, -2, -4, -4, -2, 0, 2, 3, 0, -2, -3, -3, -1, 2, 4, 5,
+ -1, -2, -4, -3, 0, 3, 5, 6, -2, -3, -4, -3, -1, 2, 4, 5,
+ 9, 4, 0, -3, -3, -1, 0, 1, 8, 4, -1, -4, -3, -1, 1, 2,
+ 6, 2, -3, -5, -4, -2, 0, 1, 5, 1, -3, -4, -4, -2, 0, 1,
+ 5, 3, 1, -1, -4, -8,-10,-10, 3, 3, 2, 1, 0, -2, -3, -4,
+ 1, 1, 1, 2, 3, 2, 1, 0, -1, 0, 1, 2, 3, 4, 3, 2,
+ 0, 1, 2, 2, 1, -1, -3, -3, 0, 1, 1, 1, -1, -2, -4, -3,
+ -3, -3, -3, -3, -3, -3, -1, 2, -4, -4, -3, 0, 3, 7, 12, 14,
+ -5, -5, -6, -6, -6, -6, -6, -5, 2, 2, 2, 1, 0, 0, 0, 0,
+ 4, 4, 3, 2, 1, 0, 0, 0, 6, 6, 5, 4, 2, 2, 1, 1,
+ -7, -7, -6, -3, 0, 4, 7, 8, -1, -2, -3, -3, -2, -1, 1, 2,
+ 3, 3, 1, -1, -2, -2, -2, -1, 6, 6, 4, 2, 0, -2, -2, -2,
+ -6, -5, -2, 2, 5, 9, 11, 12, -4, -4, -2, 0, 2, 4, 5, 6,
+ -3, -2, -2, -2, -2, -1, 0, 1, -2, -2, -2, -3, -3, -3, -3, -2,
+ -7, -3, 1, 3, 3, 0, -3, -5, -6, -2, 3, 5, 4, 1, -3, -5,
+ -5, -1, 4, 6, 5, 2, -3, -4, -4, 0, 5, 7, 6, 3, -1, -3,
+ 0, 0, 0, 0, 0, 0, 0, 0, -2, -2, -3, -3, -3, -3, -2, -1,
+ 6, 7, 8, 9, 9, 8, 7, 6, -4, -4, -5, -5, -6, -6, -5, -4,
+ -9, -8, -6, -4, 0, 3, 6, 6, -5, -4, -1, 3, 5, 6, 5, 3,
+ 1, 3, 6, 6, 4, 1, -2, -5, 6, 7, 5, 1, -3, -7,-10,-11,
+ 10, 9, 5, 1, -3, -6, -6, -4, 5, 3, -1, -5, -6, -5, -2, 2,
+ -2, -4, -6, -6, -4, 1, 6, 10, -6, -7, -7, -4, 1, 7, 11, 12,
+ 6, 5, 3, 2, 0, 0, 0, 0, 2, 1, -1, -2, -3, -2, -1, -1,
+ 0, -1, -2, -4, -4, -2, -1, 1, 0, 0, -1, -2, -1, 0, 2, 3,
+ 0, -1, -2, -2, -2, -2, -1, -1, 5, 4, 2, 1, 0, 0, 0, 0,
+ 6, 5, 3, 1, 0, 0, 0, 0, 2, 0, -2, -4, -4, -3, -2, -2,
+ -7, -4, 0, 2, 2, 2, 2, 1, -7, -3, 0, 0, 0, 0, 0, 0,
+ -4, -1, 1, 1, 0, 0, 0, 1, -1, 1, 2, 2, 2, 2, 3, 3,
+ -2, 0, 2, 2, 1, 1, 1, 1, -1, 1, 2, 2, 1, 0, 0, -1,
+ 0, 2, 4, 2, 0, -1, -2, -3, 1, 2, 3, 1, -2, -4, -6, -6,
+ 1, 2, 2, 4, 5, 6, 4, 1, 0, -1, -1, -1, 0, 0, -2, -4,
+ 0, 0, -1, -2, -2, -2, -4, -6, 2, 1, 0, 0, 1, 1, -1, -3,
+ 1, 1, 1, 1, 1, 2, 3, 3, 0, 0, 1, 0, 1, 2, 4, 4,
+ -1, -1, -1, -1, 0, 1, 2, 3, -4, -4, -5, -5, -5, -3, -1, 0,
+ -6, -5, -5, -4, -3, -2, -1, -1, -1, 0, 0, 1, 1, 2, 3, 3,
+ 0, 1, 1, 1, 2, 2, 3, 4, 0, 0, -1, -1, 0, 1, 2, 3,
+ 0, 1, 1, 1, 0, 0, -1, -1, 1, 3, 3, 2, 1, -1, -2, -2,
+ -2, 0, 2, 2, 2, 2, 1, 1, -9, -8, -4, -2, 1, 3, 3, 3,
+ -1, -1, -1, -2, -3, -3, -3, -4, 0, 0, 0, -1, -2, -2, -3, -3,
+ 2, 2, 2, 0, -1, -1, -1, -1, 5, 5, 4, 3, 2, 2, 2, 2,
+ 6, 3, -1, -4, -3, -1, 1, 1, 2, -1, -3, -4, -1, 2, 2, 0,
+ -1, -2, -2, 1, 4, 4, 1, -3, -2, -1, 1, 4, 6, 3, -3, -8,
+ 3, 3, 2, 1, -1, -2, -2, -2, -4, -4, -2, -1, 1, 3, 4, 4,
+ -4, -5, -5, -4, -2, 0, 2, 2, 7, 7, 4, 1, -1, -2, -3, -2,
+ -1, 1, 3, 0, -4, -6, 0, 6, -2, 1, 4, 1, -4, -6, -1, 7,
+ -3, 1, 4, 2, -3, -6, -1, 6, -2, 0, 3, 2, -2, -5, -1, 4,
+ 1, -1, -2, 1, 4, 4, -1, -7, 1, -1, -4, -1, 5, 6, 0, -6,
+ 3, 0, -4, -3, 3, 6, 2, -4, 3, 0, -5, -4, 1, 4, 1, -3,
+ 2, 2, 3, 3, 3, 3, 2, 2, -4, -5, -6, -7, -7, -7, -7, -6,
+ 1, 2, 3, 3, 3, 3, 2, 2, 0, 0, 1, 1, 1, 2, 2, 1,
+ 3, -3, -3, 3, 4, -2, -2, 2, 3, -4, -4, 4, 4, -4, -4, 2,
+ 4, -4, -4, 4, 4, -4, -3, 3, 3, -3, -4, 3, 3, -3, -3, 3,
+ -2, -2, -2, -2, -2, -2, -1, -1, 6, 7, 8, 8, 8, 7, 6, 5,
+ -5, -6, -7, -7, -8, -7, -6, -5, 1, 1, 2, 2, 2, 2, 1, 1,
+ 0, 0, 0, 0, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0,
+ -2, -3, -2, -2, -2, -3, -3, -3, 2, 3, 5, 6, 4, 2, 1, 0,
+ 8, 6, 2, 0, 0, 0, 0, 0, 4, 1, 0, 0, 0, -1, -1, -1,
+ 1, -1, 0, 0, 0, -1, -2, -3, -2, -2, -1, 0, 0, -2, -4, -5,
+ 3, 1, -1, -2, -3, -4, -5, -5, 2, 1, 0, 0, 1, 1, 0, 0,
+ 0, -1, -1, 0, 2, 2, 2, 2, -1, -2, -1, 1, 2, 2, 2, 2,
+ 0, -1, -2, -1, -1, -1, -1, 0, -1, -2, -2, -1, -1, 0, 0, 1,
+ 2, 1, 1, 2, 2, 1, 1, 0, 6, 5, 3, 1, 0, -2, -4, -4,
+ -3, -2, -1, 0, 1, 1, 0, -1, 0, 1, 3, 4, 5, 5, 3, 1,
+ -1, -1, -1, 0, 1, 0, -1, -2, -2, -2, -2, -1, 0, -1, -2, -3,
+ 0, -1, -2, -2, -1, -1, 0, 2, 1, -1, -2, -1, -1, -1, 0, 2,
+ 1, 0, -2, -2, -2, -2, 1, 5, 1, -1, -2, -2, -2, 0, 5, 10,
+ 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, 0, 0, 0, 1, 2,
+ 1, 2, 2, 3, 4, 4, 6, 5, -3, -3, -3, -2, -2, -3, -3, -3,
+ 1, -1, -2, -2, 0, 3, 5, 7, 2, 0, -2, -3, -2, 0, 2, 3,
+ 3, 1, -2, -3, -3, -2, -1, -1, 3, 1, 0, -1, -1, -1, -1, -1,
+ 1, 3, 5, 4, 2, -1, -3, -4, -3, -2, 1, 2, 1, 0, -1, -2,
+ -5, -3, 0, 2, 2, 1, 0, 0, -3, -1, 1, 2, 2, 1, 0, 0,
+ 0, -1, -1, -1, 1, 2, 3, 4, -3, -4, -4, -3, -1, 0, 0, 1,
+ -2, -3, -2, -1, 1, 1, 1, 1, -2, -2, 0, 3, 4, 4, 3, 2,
+ -4, -4, -3, -2, -1, 1, 2, 3, 0, 1, 1, 1, -1, -2, -3, -3,
+ 3, 4, 5, 4, 2, -1, -3, -3, -2, -2, 0, 2, 2, 2, 1, 0,
+ -4, 0, 5, 7, 4, -1, -4, -4, -1, 2, 4, 3, 0, -3, -3, -2,
+ 2, 1, 0, -1, -2, -2, 0, 1, 0, 0, -1, -2, -2, -1, 1, 2,
+ -4, -3, -2, -1, 0, 1, 2, 2, 10, 9, 5, 0, -3, -4, -3, -2,
+ 1, -1, -2, -2, -1, 0, 0, 0, -2, -2, -1, 1, 1, 1, 0, -1,
+ -5, -3, 0, 3, 4, 2, 0, -2, -2, -1, 0, 1, 1, 0, -1, -1,
+ 3, 2, -1, -2, -2, -1, 1, 1, 7, 5, -1, -5, -6, -2, 2, 4,
+ -2, 3, 3, -3, -4, 1, 2, -2, -3, 3, 4, -3, -4, 2, 3, -2,
+ -3, 3, 4, -3, -4, 2, 3, -2, -4, 2, 4, -2, -3, 1, 2, -1,
+ 4, 3, -1, -3, -3, -1, 1, 2, -4, -6, -4, 0, 4, 5, 4, 1,
+ 0, 2, 5, 6, 2, -3, -5, -4, 1, 1, -1, -3, -5, -2, 2, 4,
+ -1, 0, 1, 2, 2, 3, 3, 4, -1, 0, 1, 1, 0, -1, -1, -1,
+ -1, 0, 1, 2, 2, 1, -1, -2, -3, -2, -1, 0, 0, -1, -2, -3,
+ 1, 1, 1, 1, 0, 0, 1, 2, 1, 0, -1, 0, 0, 1, 1, 0,
+ 1, -2, -4, -1, 1, 2, 1, 0, 1, -4, -7, -3, 1, 3, 2, 1,
+ 1, 1, 1, 1, 1, 1, 0, -1, 1, 1, 1, 0, 1, 2, 2, 0,
+ 1, 1, 0, 0, 0, 2, 0, -3, 3, 2, 0, -1, -1, -2, -6, -9,
+ 0, 0, 0, 1, 0, 0, 1, 2, 1, 0, 0, 0, -1, -1, 0, 2,
+ 0, 1, 1, 1, -1, -3, -2, 0, -7, -5, 1, 6, 6, 2, -1, -1,
+ 3, 1, -1, -3, -4, -2, 1, 4, 2, 0, -2, -3, -4, -3, -1, 2,
+ 2, 2, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1,
+ -1, 1, 1, -2, -5, -6, -4, -1, -1, 1, 4, 3, 2, 0, 1, 2,
+ -1, 0, 2, 3, 1, 0, 0, 1, -1, 0, 1, 0, 0, -1, -1, 0,
+ 0, 1, 2, 2, 0, -2, -1, 1, -2, -1, -1, -2, -1, 2, 6, 8,
+ -1, -1, -2, -3, -2, 0, 1, 2, -1, 0, 0, -1, -1, 0, -1, -1,
+ 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, -1, -1, 1,
+ -1, 0, 2, 2, -1, -3, -2, 3, 0, 2, 3, 0, -5, -7, -2, 4,
+ -1, 0, 0, 0, -1, -2, -3, -3, -1, 0, -1, -2, -2, -2, -2, -2,
+ 1, 1, 0, 0, 1, 2, 0, -1, 1, 2, 1, 2, 5, 6, 2, 0,
+ -2, -4, -3, 0, 2, 2, 0, -3, 3, 1, 0, 1, 2, 1, -2, -3,
+ 3, 1, 0, 0, 0, 0, 0, -1, 1, -1, -2, -2, -1, 1, 3, 3,
+ 3, 2, 1, 2, 4, 3, 1, -2, -2, -4, -4, -3, -1, 0, -2, -3,
+ 1, 0, -1, -1, 0, 1, 0, -1, 3, 2, 0, 0, 0, 1, 1, 0,
+ 1, 1, 0, 0, 0, 0, 0, 0, 2, 3, 3, 2, 2, 2, 1, 1,
+ 0, -1, -2, -3, -5, -5, -5, -4, 1, 1, 0, -1, 0, 1, 3, 3,
+ -9, -6, -2, 0, 1, 1, 2, 2, -6, -2, 1, 2, 1, 1, 0, 1,
+ -2, 1, 2, 2, 1, 1, 1, 1, 0, 2, 2, 1, 0, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, -1, -3, -2, 0,
+ -3, -3, -3, -2, -1, 3, 7, 9, 1, 2, 2, 2, 0, -2, -4, -3,
+ 2, 0, -2, -1, 3, 4, -1, -6, 1, 0, -2, -3, -1, 3, 3, 0,
+ 0, 3, 3, 0, -2, -1, 1, 1, -6, -1, 3, 2, -1, -2, 0, 1,
+ 5, 3, 0, -2, -3, 0, 2, 1, 1, 1, 2, 2, 0, -2, -4, -7,
+ -3, -2, 1, 2, 2, 1, -1, -4, 2, 2, 0, -2, -2, 0, 2, 2,
+ 0, 0, -2, -3, -2, -1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2,
+ -2, -1, 0, 1, 0, 1, 2, 3, -4, -2, 0, 0, -1, 0, 2, 3,
+ -2, -2, -2, -1, -1, 0, 2, 4, 0, 0, 0, 0, -1, -1, 0, 1,
+ 0, -1, -1, -1, -1, -1, 0, 0, 6, 4, 2, 0, -1, -2, -1, -1,
+ 0, 1, 1, 1, 1, -1, -5,-10, 1, 1, 1, 1, 1, 1, 0, -4,
+ 1, 0, 1, 1, 1, 1, 1, -1, 2, 1, 1, 1, 0, 0, 0, 0,
+ -3, 1, 4, 3, 3, 1, -1, 0, -4, 0, 1, 0, -1, 0, 0, 0,
+ -5, 0, 2, 1, 1, 1, 0, -1, -1, 2, 1, -2, -2, -1, 0, -1,
+ 2, 4, 5, 3, 0, -1, 1, 2, 0, 0, 1, 0, -2, -2, -1, -1,
+ -2, -2, -2, -2, -3, -2, -1, 0, 0, 0, 1, 0, 0, 0, 1, 2,
+ 0, -2, -2, -3, -1, 2, 2, -1, 1, 0, 0, 0, 1, 5, 3, -2,
+ -1, -1, 0, -1, 0, 2, 0, -5, -1, 0, 1, 0, 0, 2, 2, -2,
+ 3, 1, -1, -1, 0, 1, 1, 2, 1, 0, 0, 1, 1, 1, 1, 1,
+ -10, -8, -2, 1, 2, 1, 1, 1, -1, 1, 2, 1, 0, 0, 0, 0,
+ -1, -1, 0, 1, 2, 2, 2, 1, -1, -1, -1, 0, -1, -3, -5, -4,
+ 1, 1, 2, 1, 1, 0, 0, 2, -1, -2, -1, -1, -1, 0, 2, 4,
+ -3, -7, -5, 0, 2, 0, 0, 0, 3, -1, -2, 1, 2, 1, 1, 2,
+ 1, -2, -1, 1, 2, 1, 0, 1, 0, -1, 0, 3, 2, -1, -1, -1,
+ 2, 1, 1, 0, 0, 0, 0, 0, -9, -7, -2, 3, 3, 2, 1, 1,
+ 3, 2, 0, -2, -2, -1, 1, 1, 0, -1, 0, 0, 1, 1, 0, 0,
+ -2, -1, 1, 1, 1, 0, 0, 0, 1, 2, 1, -2, -4, -3, 1, 2,
+ 1, 2, 1, -2, -3, 0, 3, 1, -1, -1, 0, 0, 1, 3, 0, -4,
+ 2, 0, -1, 1, 2, -2, -2, 3, 2, 0, -1, 2, 3, -2, -4, 1,
+ 0, 1, 1, 1, 2, -2, -6, -2, -1, 0, 0, 0, 2, 0, -2, -1,
+ -1, -1, 1, 2, 1, -2, -3, -2, 3, -1, -2, -1, -1, 0, 1, 2,
+ 10, 4, 0, 0, -1, -2, -2, -1, 3, -1, -2, -1, 0, -1, -1, 0,
+ -5, 2, 7, 1, -4, -2, 1, 0, -2, 2, 3, -1, -3, 0, 2, 0,
+ 2, 1, 0, 0, 1, 1, -1, -2, 1, -2, -2, -1, -1, -2, 0, 0,
+ 0, 3, -2, -7, -1, 3, 0, 0, 1, 3, -3, -5, 2, 3, -1, 0,
+ 0, 2, -2, -2, 4, 2, -2, 0, -1, 1, -1, 0, 2, -1, -2, 1,
+ 4, 0, -3, -4, -2, 1, 2, 1, 0, 0, 3, 5, 3, 1, -1, -2,
+ 1, 1, 1, -1, -3, -1, 1, 1, 1, -1, -2, -2, 0, 0, -1, -2
+};
+
+/* 6x16-entry codebook for intra-coded 8x8 vectors */
+DECLARE_ALIGNED(4, static const int8_t, svq1_intra_codebook_8x8)[6144] = {
+ 4, 4, 3, 2, 2, 1, 0, -1, 4, 3, 3, 2, 1, 0, -1, -1,
+ 3, 3, 2, 2, 1, 0, -1, -2, 3, 2, 2, 1, 0, -1, -2, -3,
+ 2, 2, 1, 0, -1, -1, -2, -3, 2, 1, 0, 0, -1, -2, -3, -4,
+ 1, 0, 0, -1, -2, -3, -4, -4, 0, 0, -1, -2, -2, -3, -4, -4,
+ 2, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3,
+ 1, 2, 2, 2, 2, 2, 2, 2, 0, 1, 1, 1, 1, 1, 1, 1,
+ -1, 0, 0, 0, 0, 0, 1, 1, -2, -2, -1, -1, -1, -1, -1, -1,
+ -3, -3, -3, -3, -3, -3, -2, -2, -5, -4, -4, -4, -4, -4, -4, -3,
+ -4, -2, -1, 0, 1, 2, 2, 3, -4, -2, -1, 0, 1, 2, 3, 3,
+ -4, -3, -1, 0, 1, 2, 3, 3, -4, -3, -1, 0, 1, 2, 3, 3,
+ -5, -3, -1, 0, 1, 2, 3, 3, -5, -3, -1, 0, 1, 2, 3, 3,
+ -5, -3, -1, 0, 1, 1, 2, 3, -5, -3, -2, -1, 0, 1, 2, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 2, 2, 2, 3, 3, 4, 4, 4,
+ 0, 0, 0, 0, 1, 1, 1, 2, -2, -2, -2, -2, -1, -1, -1, 0,
+ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
+ -1, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -2, -2, -2, -2,
+ 5, 3, 1, -1, -2, -3, -3, -3, 5, 3, 1, -1, -2, -3, -3, -3,
+ 5, 3, 1, -1, -2, -3, -3, -3, 5, 3, 1, -1, -2, -3, -3, -3,
+ 5, 4, 1, 0, -2, -3, -3, -3, 6, 4, 2, 0, -2, -2, -3, -3,
+ 6, 4, 2, 0, -1, -2, -2, -3, 6, 4, 2, 1, -1, -2, -2, -2,
+ -1, 1, 3, 3, 2, 0, -3, -6, -1, 1, 3, 4, 3, 0, -3, -6,
+ -1, 1, 4, 4, 3, 1, -3, -6, -1, 1, 3, 4, 3, 1, -3, -6,
+ -2, 1, 3, 4, 3, 1, -3, -6, -2, 1, 3, 4, 3, 1, -3, -7,
+ -2, 1, 3, 3, 2, 0, -3, -7, -2, 0, 2, 3, 2, 0, -3, -6,
+ 10, 9, 8, 6, 6, 5, 4, 4, 6, 5, 4, 3, 2, 2, 2, 1,
+ 2, 1, 0, -1, -2, -2, -2, -1, -1, -2, -3, -4, -4, -4, -4, -3,
+ -2, -3, -4, -4, -5, -4, -4, -3, -2, -2, -3, -3, -3, -3, -2, -2,
+ -1, -1, -1, -1, -1, -1, -1, 0, 1, 1, 1, 1, 1, 1, 1, 2,
+ -2, -1, 1, 2, 4, 5, 7, 8, -3, -2, 0, 1, 3, 5, 7, 8,
+ -4, -3, -1, 0, 2, 4, 6, 7, -5, -4, -2, -1, 1, 3, 5, 7,
+ -6, -5, -3, -2, 0, 2, 4, 6, -6, -5, -4, -2, -1, 1, 3, 5,
+ -7, -6, -5, -3, -2, 0, 2, 3, -8, -7, -5, -4, -3, -1, 1, 2,
+ 11, 9, 7, 5, 3, 1, -1, -1, 10, 8, 6, 3, 1, 0, -2, -2,
+ 9, 7, 5, 2, 0, -2, -3, -4, 8, 6, 3, 1, -1, -3, -4, -4,
+ 6, 4, 2, -1, -3, -4, -5, -5, 5, 3, 0, -2, -4, -5, -6, -6,
+ 3, 1, -1, -3, -5, -6, -7, -7, 2, 0, -2, -4, -6, -6, -7, -7,
+ 5, 6, 7, 7, 7, 8, 8, 8, 3, 4, 5, 5, 6, 6, 6, 6,
+ 0, 2, 2, 3, 4, 4, 4, 5, -2, -1, 0, 1, 2, 2, 3, 3,
+ -4, -3, -2, -1, 0, 1, 1, 2, -6, -5, -4, -3, -2, -2, -1, 0,
+ -8, -7, -6, -6, -5, -4, -3, -3,-10, -9, -8, -8, -7, -6, -6, -5,
+ 6, 5, 3, 1, -1, -3, -6, -8, 6, 5, 4, 2, -1, -3, -6, -8,
+ 6, 5, 4, 2, 0, -3, -6, -8, 6, 5, 4, 2, 0, -3, -6, -8,
+ 6, 6, 4, 2, 0, -3, -6, -8, 6, 5, 4, 2, 0, -3, -6, -8,
+ 6, 5, 4, 2, 0, -3, -6, -8, 6, 5, 4, 2, -1, -3, -5, -8,
+ 11, 10, 9, 8, 7, 6, 5, 4, 8, 8, 7, 6, 5, 4, 3, 2,
+ 6, 5, 4, 4, 2, 2, 1, 0, 3, 3, 2, 1, 0, 0, -1, -2,
+ 1, 1, 0, -1, -2, -2, -3, -3, -1, -1, -2, -3, -4, -4, -5, -5,
+ -3, -4, -4, -5, -6, -6, -7, -7, -5, -5, -6, -7, -8, -8, -8, -8,
+ -14,-13,-12,-11, -9, -7, -6, -4,-12,-11,-10, -9, -7, -5, -3, -1,
+ -10, -9, -7, -6, -3, -2, 0, 2, -8, -6, -4, -2, 0, 2, 4, 5,
+ -5, -3, 0, 2, 4, 5, 7, 8, -2, 0, 2, 4, 6, 8, 9, 10,
+ 0, 3, 5, 7, 8, 10, 11, 12, 3, 5, 7, 8, 10, 11, 12, 12,
+ -19,-19,-18,-18,-17,-16,-15,-14,-15,-15,-14,-13,-12,-11,-10, -9,
+ -11,-10, -9, -8, -6, -5, -4, -3, -6, -5, -3, -2, -1, 0, 1, 2,
+ -1, 0, 2, 3, 4, 5, 6, 6, 4, 6, 7, 8, 9, 10, 10, 10,
+ 9, 10, 11, 12, 13, 14, 14, 14, 12, 14, 14, 15, 16, 16, 16, 16,
+ 22, 21, 19, 17, 14, 11, 9, 5, 20, 19, 17, 14, 11, 8, 4, 1,
+ 17, 15, 13, 10, 6, 3, 0, -4, 13, 11, 8, 5, 1, -2, -5, -9,
+ 9, 6, 3, -1, -4, -7,-11,-13, 4, 0, -3, -6, -9,-12,-15,-17,
+ -2, -5, -8,-11,-14,-16,-18,-20, -8,-10,-13,-16,-17,-19,-21,-22,
+ 17, 18, 18, 18, 17, 16, 16, 14, 16, 16, 15, 15, 14, 13, 12, 11,
+ 12, 12, 11, 10, 9, 8, 7, 5, 7, 6, 6, 4, 3, 2, 1, -1,
+ 1, 0, -1, -2, -3, -4, -5, -6, -5, -6, -7, -8, -9,-10,-11,-12,
+ -11,-12,-13,-14,-15,-16,-16,-17,-16,-17,-17,-18,-19,-20,-20,-20,
+ 0, 0, 0, 0, -1, -1, -2, -3, 1, 0, 0, 0, 0, -1, -2, -3,
+ 1, 1, 0, 0, -1, -1, -2, -2, 1, 1, 1, 0, 0, -1, -1, -2,
+ 2, 1, 1, 1, 0, -1, -1, -2, 2, 2, 1, 1, 0, 0, -1, -2,
+ 2, 2, 1, 1, 1, 0, -1, -1, 2, 2, 1, 1, 1, 0, 0, -2,
+ 0, -1, -1, 0, 0, 1, 2, 3, 0, -1, -1, 0, 1, 1, 2, 2,
+ -1, -1, -1, -1, 0, 1, 2, 2, -1, -1, -2, -1, 0, 1, 1, 2,
+ -1, -2, -2, -1, 0, 0, 1, 2, -1, -2, -2, -2, -1, 0, 1, 2,
+ -1, -1, -2, -1, 0, 0, 1, 2, -1, -1, -1, -1, 0, 1, 1, 2,
+ 3, 2, 2, 2, 1, 1, 0, 0, 3, 2, 2, 2, 2, 1, 0, 0,
+ 2, 2, 2, 1, 1, 1, 0, 0, 2, 2, 1, 1, 1, 0, 0, -1,
+ 1, 1, 1, 0, 0, 0, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1,
+ -2, -2, -2, -2, -2, -2, -2, -2, -2, -3, -3, -3, -2, -2, -2, -2,
+ 5, 2, 0, 0, -1, 0, 0, 0, 4, 2, 0, -1, -1, -1, 0, -1,
+ 4, 1, -1, -1, -2, -1, -1, -1, 4, 1, -1, -1, -2, -1, -1, -1,
+ 4, 1, -1, -2, -2, -1, -1, -1, 4, 1, -1, -2, -2, -1, -1, -1,
+ 4, 1, -1, -1, -1, -1, -1, -1, 4, 2, 0, -1, 0, 0, 0, -1,
+ -2, -1, 0, 1, 1, 1, 1, 1, -3, -1, 0, 1, 1, 1, 1, 1,
+ -3, -1, 0, 1, 1, 1, 1, 1, -3, -1, 0, 1, 1, 1, 1, 1,
+ -3, -2, 0, 1, 2, 2, 1, 1, -4, -2, 0, 1, 2, 2, 2, 2,
+ -5, -3, -1, 1, 1, 2, 1, 2, -5, -3, -2, 0, 1, 1, 1, 1,
+ 3, 3, 1, 0, -2, -4, -4, -5, 3, 3, 2, 0, -1, -2, -3, -4,
+ 2, 2, 1, 1, 0, -1, -2, -2, 1, 1, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1, -2, -1, -1, 0, 0, 1, 2, 2,
+ -3, -2, -2, -1, 0, 1, 2, 3, -3, -3, -2, -1, 0, 1, 2, 3,
+ -3, -3, -3, -3, -3, -2, -2, -2, -3, -3, -2, -2, -2, -1, -1, -1,
+ -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 2, 2, 2, 2,
+ 1, 1, 1, 2, 2, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3,
+ -8, -7, -5, -3, -2, -1, 0, -1, -4, -3, -1, 0, 1, 2, 1, 1,
+ -1, 1, 2, 3, 3, 2, 2, 1, 1, 2, 3, 3, 2, 2, 1, 0,
+ 2, 3, 3, 2, 1, 0, 0, -1, 1, 2, 1, 0, -1, -1, -1, -1,
+ 1, 1, 0, -1, -1, -2, -2, -1, 1, 1, 0, 0, -1, -1, 0, -1,
+ -4, -3, -2, 0, 1, 2, 3, 3, -4, -3, -2, 0, 1, 2, 2, 2,
+ -3, -3, -2, -1, 0, 1, 1, 1, -2, -2, -2, -1, -1, 0, 0, 0,
+ 0, -1, -1, -1, -1, -1, -1, -1, 2, 1, 1, 0, 0, -1, -1, -2,
+ 3, 3, 3, 1, 0, -1, -2, -2, 5, 4, 4, 2, 1, 0, -1, -2,
+ 0, 0, 0, 0, 1, 2, 3, 3, 0, -1, 0, 0, 1, 2, 3, 3,
+ 0, -1, 0, 0, 1, 2, 3, 2, 0, 0, 0, 1, 1, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 1, 0, 0, -1, -2,
+ 2, 1, 0, 0, -2, -3, -5, -6, 0, -1, -1, -3, -5, -6, -8, -9,
+ -2, 0, 1, 2, 2, 1, -1, -4, -2, 0, 2, 2, 2, 1, -1, -4,
+ -2, 0, 2, 2, 2, 1, -1, -3, -2, 0, 2, 2, 2, 1, -1, -3,
+ -2, -1, 2, 2, 2, 1, -1, -3, -2, -1, 1, 2, 2, 1, -1, -3,
+ -3, -1, 1, 2, 2, 1, -1, -3, -2, -1, 1, 2, 2, 1, -1, -3,
+ -1, 1, 1, -1, -3, -3, 0, 4, -1, 1, 1, -1, -3, -3, 0, 4,
+ -1, 1, 1, 0, -3, -3, 0, 4, -1, 1, 2, 0, -3, -3, 0, 5,
+ 0, 1, 2, 0, -3, -4, 0, 4, 0, 1, 2, 0, -3, -4, 0, 5,
+ 0, 1, 2, 0, -3, -3, 0, 4, 0, 1, 2, -1, -2, -2, 0, 4,
+ 6, 6, 5, 6, 5, 5, 5, 5, 2, 2, 2, 2, 2, 2, 2, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -2, -2, -2, -2,
+ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
+ -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 0, 1, 1, 0, 0, 0, 0, 0,
+ -1, -2, -2, -2, -2, -2, -2, -1, -3, -3, -3, -3, -3, -3, -3, -2,
+ -3, -4, -4, -3, -3, -3, -2, -2, -2, -2, -2, -2, -1, -1, 0, 0,
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6,
+ 4, 1, -2, -3, -3, -1, 1, 3, 4, 1, -2, -4, -3, -1, 1, 3,
+ 5, 1, -2, -4, -3, -1, 1, 4, 5, 1, -2, -3, -3, -1, 2, 4,
+ 5, 1, -2, -3, -3, -1, 2, 4, 4, 0, -3, -4, -3, -1, 2, 4,
+ 4, 0, -3, -3, -3, -1, 1, 3, 3, 0, -2, -3, -2, -1, 1, 3,
+ -3, -4, -4, -4, -4, -4, -4, -4, -1, -1, -1, -1, -1, -1, -2, -2,
+ 2, 1, 1, 2, 2, 1, 1, 1, 3, 3, 3, 4, 4, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 3, 3, 1, 2, 1, 2, 2, 2, 2, 2,
+ -2, -2, -2, -1, -1, -1, 0, 0, -4, -4, -4, -4, -3, -3, -3, -3,
+ -1, -2, -3, -3, -2, -2, -1, 0, 0, -1, -2, -2, -2, -1, 0, 1,
+ 2, 1, -1, -1, -1, -1, 0, 1, 3, 1, 0, -1, -1, 0, 0, 1,
+ 3, 2, 0, -1, 0, 0, 0, 1, 3, 1, 0, -1, 0, 0, 0, 1,
+ 3, 1, 0, -1, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 1, 1, 2, 3, 4, 0, 0, -1, 0, 0, 0, 2, 3,
+ 0, -1, -1, -1, -1, -1, 0, 1, 0, -1, -1, -1, -1, -1, -1, 0,
+ 0, 0, -1, -1, -1, -2, -2, -1, 1, 0, 0, -1, -1, -2, -2, -1,
+ 2, 2, 1, 0, -1, -1, -1, -1, 3, 3, 2, 1, 0, -1, -1, 0,
+ 1, 0, 1, 0, 0, -1, -2, -1, 0, 0, 0, 0, -1, -1, -2, -1,
+ 0, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0,
+ -1, -1, -1, 0, 0, 0, 1, 1, -1, -1, -1, 0, 1, 1, 2, 3,
+ -2, -2, -1, 0, 1, 2, 3, 4, -2, -2, -1, 0, 1, 2, 4, 5,
+ -3, -1, 1, 0, 0, -1, 0, 1, -3, 0, 1, 0, -1, -1, 0, 2,
+ -3, 0, 1, 0, -1, -1, 0, 2, -2, 1, 2, 0, -1, -1, 0, 2,
+ -2, 1, 2, 0, -1, -1, 0, 2, -2, 1, 2, 0, -1, -1, 0, 2,
+ -1, 2, 2, 0, -1, -1, 0, 2, -1, 1, 1, 0, -1, -1, -1, 1,
+ -2, -2, -1, 1, 3, 4, 3, 1, -2, -2, -1, 0, 2, 3, 2, 0,
+ -2, -2, -1, 0, 1, 2, 1, -1, -1, -1, -1, 0, 1, 2, 1, -1,
+ -1, -1, -1, 0, 1, 1, 0, -2, 0, -1, -1, 0, 1, 1, 0, -1,
+ 0, -1, -1, 0, 1, 1, 1, -1, 0, -1, -1, 0, 0, 1, 0, -1,
+ -2, -1, 0, 1, 1, 1, 1, 1, -2, -1, 0, 0, 0, 0, 0, 0,
+ -2, -1, -1, 0, -1, -1, -2, -2, -2, -1, -1, -1, -1, -2, -2, -3,
+ -1, 0, 1, 1, 0, -1, -2, -2, 1, 2, 3, 3, 2, 1, 0, 0,
+ 1, 2, 3, 3, 3, 2, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0,
+ 0, -1, -1, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, -1, 0, 0, 1, 1, 0, 0, 0,
+ -3, -2, -1, -1, -1, -1, 0, -1, -5, -5, -4, -3, -2, -2, -2, -1,
+ 1, 1, 1, 1, 2, 1, 0, -1, 1, 1, 1, 2, 1, 1, 0, -1,
+ 1, 1, 1, 1, 1, 1, 0, -2, 2, 1, 1, 1, 1, 1, 0, -2,
+ 1, 1, 0, 0, 0, 0, -1, -3, 1, 1, 0, 0, 0, -1, -2, -3,
+ 1, 1, 0, 0, -1, -1, -2, -4, 1, 0, 0, -1, -2, -2, -3, -4,
+ 8, 7, 5, 3, 2, 1, 1, 1, 2, 1, 0, 0, -1, -1, -2, -1,
+ -1, -1, -1, -2, -2, -2, -2, -1, -1, -1, -1, -1, 0, -1, -1, 0,
+ 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ -1, 0, 0, 0, 0, 0, -1, -1, -2, -2, -1, -1, -1, -2, -2, -1,
+ 9, 4, 0, -2, -2, -2, -1, -1, 7, 2, -1, -2, -2, -1, 0, 0,
+ 4, 0, -2, -2, -1, 0, 1, 1, 1, -2, -2, -2, -1, 0, 1, 1,
+ -1, -2, -2, -1, 0, 1, 1, 1, -1, -2, -1, 0, 1, 1, 1, 0,
+ -1, -1, 0, 1, 1, 1, 0, -1, 0, -1, 0, 1, 0, 0, -1, -1,
+ 0, 1, 1, 1, 1, 1, 0, 0, 1, 2, 2, 2, 1, 0, 0, 0,
+ 2, 2, 2, 2, 1, 0, -1, -1, 1, 1, 1, 0, -1, -2, -2, -2,
+ 0, 0, 0, -1, -2, -3, -2, -2, -1, -1, -1, -2, -2, -2, -1, 0,
+ -1, -1, -1, -1, 0, 0, 1, 2, -1, -1, -1, 0, 1, 2, 3, 4,
+ -1, -1, 0, 0, -1, -2, -3, -3, -1, -1, 0, 0, 0, -1, -1, -1,
+ -2, -2, -1, 0, 1, 1, 1, 1, -2, -2, -2, 0, 1, 2, 3, 3,
+ -1, -1, -1, 0, 1, 3, 3, 3, 1, 0, 0, 0, 1, 1, 2, 2,
+ 2, 2, 1, 0, 0, -1, -1, -1, 3, 2, 1, 0, -1, -2, -3, -3,
+ -1, -1, -1, -2, -2, -3, -4, -5, 0, 0, 0, -1, -1, -3, -3, -4,
+ 1, 1, 1, 0, 0, -1, -2, -3, 2, 2, 2, 1, 1, 0, -1, -1,
+ 2, 2, 2, 2, 1, 1, 0, -1, 2, 2, 2, 2, 2, 1, 0, 0,
+ 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, -1,
+ -2, 2, 3, 1, -1, 1, 1, -1, -3, 2, 3, 0, -1, 1, 1, -1,
+ -3, 2, 3, 0, -1, 1, 1, -1, -4, 2, 3, 0, -1, 1, 1, -2,
+ -4, 1, 3, 0, -1, 1, 1, -2, -4, 1, 3, -1, -2, 1, 1, -2,
+ -3, 1, 2, 0, -1, 1, 1, -2, -3, 1, 2, 0, -1, 1, 1, -1,
+ -1, -1, -1, -2, -2, -2, -2, -2, 1, 1, 1, 1, 0, 0, 0, 0,
+ 1, 2, 2, 2, 2, 2, 2, 2, 0, 0, 1, 1, 1, 2, 2, 2,
+ -2, -2, -1, -1, -1, 0, 0, 0, -3, -3, -3, -3, -3, -3, -3, -2,
+ -1, -1, -1, -1, -2, -2, -2, -2, 4, 4, 4, 4, 4, 3, 3, 2,
+ -3, -3, -2, -1, 0, 1, 2, 5, -3, -3, -3, -2, -1, 1, 3, 6,
+ -3, -3, -2, -2, 0, 2, 3, 5, -3, -2, -2, -2, 0, 1, 3, 5,
+ -2, -2, -2, -1, -1, 1, 3, 5, -2, -2, -1, -1, 0, 1, 2, 4,
+ -1, -1, -1, -1, 0, 1, 1, 4, -1, -1, -1, -1, 0, 1, 2, 3,
+ 0, -1, 0, 1, 1, 0, -1, -1, 0, 0, 0, 1, 2, 0, -1, -1,
+ 1, 0, -1, 0, 1, 0, 0, 0, 1, -1, -2, -1, 0, 0, 0, 0,
+ 1, -2, -3, -1, 0, 0, 0, 1, 1, -1, -3, -2, 0, 1, 1, 2,
+ 1, -1, -2, -1, 0, 1, 1, 2, 2, 0, -1, 0, 1, 1, 2, 2,
+ 1, 1, 1, 1, 0, 0, 1, 2, -1, 0, 0, -1, 0, 0, 0, 1,
+ -3, -2, -1, -1, -1, 0, 1, 1, -4, -2, -1, 0, 0, 1, 1, 1,
+ -3, -2, 0, 0, 1, 1, 1, 1, -3, -1, 0, 1, 1, 1, 0, 0,
+ -1, 0, 1, 1, 1, 0, 0, -1, 0, 1, 2, 2, 1, 0, 0, -1,
+ -4, -4, -4, -3, -2, -1, -1, -1, -2, -2, -2, -1, 0, 0, 0, 0,
+ -1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,
+ 0, 0, 1, 1, 2, 2, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0,
+ 0, 0, 0, 1, 1, 1, 1, 0, -1, 0, 0, 1, 1, 1, 0, 0,
+ 1, 2, 2, 2, 1, -1, -2, -4, 1, 1, 2, 2, 1, 0, -2, -4,
+ 0, 1, 1, 1, 1, 0, -1, -3, -1, 0, 1, 1, 0, 0, -1, -2,
+ -1, 0, 1, 1, 1, 0, 0, -1, -2, -1, 0, 0, 0, 0, 0, -1,
+ -1, -1, 0, 1, 1, 0, 0, 0, -1, 0, 1, 1, 1, 1, 1, 0,
+ 2, 2, 0, -1, -2, -1, -1, -2, 1, 1, -1, -2, -2, -1, -1, -2,
+ 1, 1, -1, -2, -2, 0, 0, -1, 1, 1, 0, -2, -1, 1, 1, 0,
+ 1, 1, 0, -1, -1, 1, 2, 1, 1, 1, 0, -1, -1, 1, 2, 1,
+ 1, 1, 0, -1, -1, 1, 1, 1, 1, 1, 0, -1, 0, 1, 1, 1,
+ 0, 0, -1, -2, -4, -4, -4, -4, 3, 3, 3, 2, 1, 0, 0, 0,
+ 3, 3, 3, 3, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 1,
+ -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, 0, 0, -1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, 0,
+ -1, -1, 0, -1, -1, 1, 2, -1, 1, 1, 0, 0, 0, 2, 3, -1,
+ 1, 1, 0, -1, -1, 1, 3, -1, 1, 1, 0, -2, -2, 0, 1, -2,
+ 1, 0, 0, -2, -2, 0, 1, -3, 0, 0, 0, 0, -1, 1, 1, -3,
+ 0, 1, 1, 0, 1, 2, 1, -3, -1, 0, 1, 1, 1, 2, 1, -4,
+ -4, -3, 0, 1, 1, 1, 0, 0, -4, -2, 0, 1, 1, 1, 0, -1,
+ -3, -1, 1, 1, 1, 0, -1, -1, -1, 1, 1, 1, 1, 0, -1, 0,
+ 1, 2, 2, 1, 0, -1, 0, 0, 2, 2, 1, 0, -1, -1, 0, 1,
+ 2, 1, 0, -1, -2, -1, 0, 1, 2, 2, 0, -1, -2, -1, 1, 1,
+ 1, 1, 0, 0, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1,
+ 1, 0, 0, -1, -1, -1, -1, -1, 2, 1, 0, 0, -1, -1, -1, -1,
+ 5, 3, 2, 1, 0, 0, 0, 0, 6, 5, 3, 2, 1, 0, 0, 0,
+ 4, 4, 3, 1, 0, 0, 0, 1, 3, 3, 2, 1, 0, 0, 0, 1,
+ 2, 2, 1, 0, -1, -1, 0, 1, 0, 0, 0, -1, -1, -1, 0, 1,
+ 0, 0, -1, -1, -2, -1, 0, 2, 0, -1, -1, -2, -2, -2, 0, 1,
+ 0, -1, -1, -2, -2, -2, -1, 0, 0, 0, -1, -2, -2, -2, -1, 0,
+ 0, 0, -1, -1, -1, 0, 2, 3, 0, -1, -2, -2, -1, -1, 1, 2,
+ 1, 0, -1, -1, -1, 0, 0, 0, 1, 1, 1, 0, 0, 0, -1, -1,
+ 1, 2, 1, 0, 0, -1, -1, -1, -1, 0, 0, 0, -1, -1, -1, -1,
+ -3, -2, -1, -1, 0, 1, 1, 2, -4, -3, -1, 1, 2, 3, 5, 5,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, -1, 0, 0, 0, 1, -1, -1, -2, -2, -2, -1, -1, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3,
+ 1, 1, 1, 1, 2, 2, 1, 1, -4, -3, -4, -4, -4, -4, -3, -3,
+ -1, 0, 1, 2, 2, 3, 3, 3, -1, -1, -1, -1, 0, 0, 0, 0,
+ 0, 0, -1, -2, -2, -3, -3, -2, 3, 2, 1, 0, -1, -2, -2, -2,
+ 4, 3, 2, 1, 1, 0, 0, 0, 2, 2, 1, 1, 0, 1, 1, 1,
+ 0, -1, -1, -1, -1, 0, 0, 1, -2, -2, -2, -2, -2, -1, 0, 0,
+ 1, -1, 0, 2, 1, -2, -1, 1, 1, -1, 0, 2, 1, -2, -2, 1,
+ 1, -1, 0, 3, 2, -2, -1, 1, 0, -2, 0, 3, 2, -2, -2, 1,
+ 0, -2, 0, 3, 2, -2, -2, 1, 0, -2, 0, 3, 1, -2, -1, 1,
+ 0, -2, 0, 2, 1, -2, -2, 1, 0, -1, 0, 2, 1, -2, -1, 1,
+ 0, 1, 2, 2, 3, 3, 2, 2, 0, 1, 1, 2, 3, 3, 2, 1,
+ 0, 0, 1, 2, 2, 2, 2, 1, -1, 0, 0, 1, 1, 1, 1, 1,
+ -1, -1, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1,
+ -2, -2, -2, -2, -2, -2, -2, -1, -2, -2, -2, -2, -2, -2, -2, -1,
+ 0, 0, -1, -2, -1, 0, 3, 5, 0, 0, -1, -1, -1, 0, 2, 4,
+ 1, 1, 0, 0, -1, -1, 1, 2, 1, 2, 1, 1, 0, -1, -1, 0,
+ 0, 1, 2, 1, 0, -1, -2, -2, -1, 0, 1, 2, 1, 0, -3, -3,
+ -2, -1, 1, 2, 2, 0, -2, -4, -2, -1, 0, 2, 2, 1, -1, -3,
+ 0, 0, 0, 0, 0, 0, -1, -1, 0, 0, -1, 0, 0, 0, 0, 0,
+ -1, -1, -1, -1, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, 0,
+ -1, -1, -1, -1, -1, -1, -1, 0, -1, 0, 0, 0, 0, -1, -1, 0,
+ 0, 0, 1, 1, 0, 0, 0, 1, 3, 3, 3, 4, 3, 3, 3, 3,
+ 5, 1, -2, -2, 0, 0, 0, -1, 4, -1, -3, -1, 0, 0, 0, -1,
+ 3, -1, -1, 0, 1, 1, 0, -1, 2, 0, 0, 1, 1, 1, 0, -2,
+ 1, 0, 0, 1, 1, 1, 0, -2, 0, -1, -1, -1, 0, 0, 0, -1,
+ 0, -1, -1, -1, -1, 0, 0, -1, 2, 1, 0, 0, 0, 1, 0, 0,
+ 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0,
+ 1, -1, -1, 0, 0, 0, 0, 0, 2, 0, -1, -1, -1, -1, -1, 0,
+ 3, 1, -1, -1, -2, -2, -2, -1, 4, 2, 1, 0, -1, -2, -2, -1,
+ 2, 1, 0, 0, -1, -1, 0, 0, 0, -1, -1, -1, -1, 0, 1, 1,
+ 0, 1, 2, 2, 2, 1, -1, -3, 0, 0, 1, 1, 1, 0, -1, -2,
+ 0, 0, 0, 0, 0, 0, -1, -1, 0, 0, -1, 0, 0, 1, 1, 0,
+ 0, 0, -1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0,
+ 0, 0, 1, 1, 2, 1, -1, -3, 0, 0, 0, 1, 1, -1, -4, -5,
+ -2, -2, -2, -1, 0, 2, 2, 2, 0, 0, 0, 0, 1, 1, 1, 0,
+ 1, 1, 1, 1, 1, 0, -2, -3, 0, 0, 1, 1, 0, -1, -3, -4,
+ -1, -1, 0, 1, 0, 0, -2, -3, -1, -1, 0, 1, 1, 1, 0, -1,
+ 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 1, 0, 0, 1, 1, 1, 2, 1, 2, 0, 0, 0, 0, -1, 1,
+ 0, 2, 0, -1, 1, 0, -1, 0, 0, 1, 0, 0, 2, 1, 0, 1,
+ 0, 1, -1, 0, 2, 2, 0, 1, -1, 0, -1, -1, 2, 1, 1, 2,
+ -2, -2, -3, -2, 0, 1, 1, 1, -2, -2, -3, -3, -1, -1, -1, 0,
+ -3, -1, 0, 1, 2, 1, 1, 0, -3, -1, 0, 1, 2, 1, 1, 1,
+ -2, 0, 0, 1, 1, 1, 1, 1, -1, 0, 0, 0, 0, 0, 0, 0,
+ -2, 0, 0, 0, 0, -1, -1, 0, -2, 0, 0, 0, 0, 0, -1, -1,
+ -3, 0, 1, 1, 1, 1, 0, 1, -5, -2, 0, 1, 2, 2, 1, 2,
+ -2, -1, -1, 0, 0, 1, 2, 3, 0, 0, 1, 1, 0, 0, 1, 2,
+ 0, 0, 1, 0, -1, -1, 0, 1, -1, -1, -1, -1, -2, -2, -1, 0,
+ -2, -2, -2, -2, -2, -1, 0, 1, 0, 0, 0, -1, 0, 1, 2, 2,
+ 2, 1, 0, 0, 0, 1, 2, 2, 2, 1, 0, -1, -1, -1, 0, 0,
+ 0, 1, 1, 1, 1, 1, -1, -4, -1, -1, 0, 1, 1, 1, 0, -3,
+ -2, -1, 0, 0, 1, 2, 2, -2, -1, 0, 0, 0, 0, 2, 3, -1,
+ -1, 0, 0, 0, 0, 1, 2, 0, 0, 0, -1, -2, -1, 1, 1, 0,
+ 0, 0, -1, -2, -2, 0, 2, 1, 0, 0, -1, -2, -1, 1, 2, 2,
+ 1, 0, 0, 0, -2, -3, -2, -3, 0, 0, 1, 0, -2, -2, -1, -1,
+ 0, -1, 1, 1, -1, -1, 0, 0, 0, -1, 1, 1, -1, -1, 0, 0,
+ 0, 1, 2, 1, -1, -1, 0, 1, 1, 2, 3, 2, 0, 0, 1, 2,
+ -1, 0, 2, 1, 0, 0, 2, 3, -2, -1, 0, 0, -1, 0, 1, 2,
+ 1, 1, 0, -1, -2, -2, -1, 1, 1, 1, 1, -1, -2, -2, 0, 2,
+ 1, 1, 1, -1, -1, -1, 0, 2, 0, 0, 0, 0, 0, 0, 1, 2,
+ -1, -1, -1, 0, 0, 0, 1, 2, -1, -2, -1, 1, 1, 1, 0, 0,
+ -1, -2, -1, 1, 2, 2, 0, -1, -1, -2, -1, 2, 2, 2, 0, -1,
+ -1, -1, -1, -2, -1, -1, 0, 1, 0, 0, -1, -1, -1, 0, 1, 2,
+ 1, 0, 0, 0, 0, 1, 1, 2, 1, 1, 0, 0, 1, 1, 1, 1,
+ 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, -1, -1, -1,
+ 1, 2, 1, 0, -1, -2, -2, -3, 2, 2, 1, 0, -2, -3, -4, -4,
+ -4, -2, 1, 1, 1, 1, 0, 0, -2, 0, 1, 0, 0, 0, 0, 0,
+ 0, 1, 1, -2, -2, -1, 0, 1, 2, 2, 1, -2, -2, -1, 1, 2,
+ 1, 2, 1, -2, -2, -1, 1, 2, -1, 1, 1, -1, -1, -1, 0, 1,
+ -2, 0, 1, 1, 0, -1, -1, 0, -2, 0, 2, 2, 1, -1, -1, 0,
+ 1, 1, 0, 0, 0, 1, 0, 0, -2, -3, -3, -2, -2, -1, 0, 0,
+ -3, -4, -3, -2, -1, 0, 0, 0, -1, -1, 0, 1, 2, 3, 2, 1,
+ 0, 1, 2, 3, 3, 3, 2, 1, 1, 1, 1, 2, 1, 0, 0, -1,
+ 0, 0, 0, 0, -1, -1, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0,
+ 1, 1, 0, 0, -1, -1, 0, 2, 0, 0, 1, 0, -1, -1, 1, 1,
+ -2, -1, 0, 1, 1, 1, 1, 1, -3, -3, 0, 2, 2, 1, 1, 0,
+ -2, -2, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, -1, -1,
+ 3, 1, -1, -3, -2, -1, 0, 1, 4, 2, -1, -3, -3, -1, 1, 2,
+ 0, 0, 0, -1, -1, -1, -1, -1, 1, 2, 1, 0, 0, 0, -1, -1,
+ 2, 3, 3, 2, 1, 0, -1, -1, 3, 4, 4, 2, 1, 0, -1, -2,
+ 3, 3, 2, 1, 0, -1, -2, -2, 1, 1, 0, -1, -1, -2, -2, -3,
+ 0, 0, 0, -1, -1, -2, -2, -2, -1, -1, -1, -1, -1, -2, -2, -1,
+ 1, 2, 2, 2, 2, 1, 2, 2, 0, 1, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 0, -1, -2, 0, 0, 0, 0, 1, 0, -1, -4,
+ 1, 0, 0, 0, 0, 0, -2, -5, 1, 0, 0, 0, 0, 0, -1, -4,
+ 1, 0, -1, 0, 0, 0, -1, -3, 0, -1, -1, 0, 1, 1, 1, -1,
+ -2, -1, 0, 0, -1, -1, -1, -2, -1, 0, 0, 0, -1, -1, -2, -2,
+ 0, 1, 1, 0, -1, -1, -1, -2, 0, 1, 1, 0, 0, 0, -1, -1,
+ 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 2, 2, 1,
+ 1, 1, 0, 0, 1, 2, 2, 1, 1, 1, 0, -1, 0, 1, 1, 0,
+ 4, 2, 1, 0, 0, 1, 1, 1, 4, 2, 1, 0, 0, 0, 0, 1,
+ 3, 1, 0, 0, -1, -1, -1, 0, 1, 0, 0, -1, -1, -2, -1, 0,
+ 0, 0, 0, 0, -1, -1, -1, 0, -1, -1, 0, 0, -1, -1, 0, 1,
+ -2, -1, 0, -1, -1, 0, 0, 1, -2, -2, -1, -2, -1, 0, 0, 1,
+ 0, 1, 1, 1, 2, 1, 0, -1, -1, -1, -1, 0, 0, -1, -2, -2,
+ -1, 0, -1, 0, 0, -1, -2, -1, 0, 0, 0, 0, 0, 0, 1, 2,
+ 0, 0, 0, 0, 0, 0, 2, 3, -1, 0, -1, -1, -1, -1, 0, 3,
+ -1, 0, 0, -1, -1, -2, 0, 3, 0, 0, 0, 0, -1, -1, 1, 4,
+ 2, 2, 0, 0, 0, 0, 0, 1, 1, 1, -1, -2, -1, -2, -1, 1,
+ -1, -1, -2, -2, -2, -3, -2, 0, -1, 0, -1, -1, -1, -2, -1, 1,
+ 1, 1, 0, 0, 1, 0, 0, 1, 2, 2, 0, 0, 1, 0, 0, 1,
+ 2, 2, 0, 0, 0, 0, -1, -1, 2, 2, 0, 0, 1, 0, -1, -1,
+ -1, 0, 1, 1, 0, -1, -1, -1, 1, 2, 3, 2, 1, 0, 0, 0,
+ 0, 1, 1, 1, 0, -1, 0, 0, -2, -2, -1, 0, 1, 0, 0, 0,
+ -2, -2, -1, 2, 2, 2, 1, 0, -2, -1, 0, 1, 1, 0, 0, -1,
+ -1, -1, 0, 0, -1, -2, -1, -2, 0, 1, 1, 1, 0, 0, 1, 1,
+ -3, -3, -3, -2, -1, -1, -2, -2, -1, -1, 0, 1, 2, 1, 0, 0,
+ 1, 1, 1, 2, 2, 1, 0, 0, 1, 1, 1, 1, 1, 0, -1, 1,
+ 1, 0, -1, -1, 0, 0, -1, 1, 0, -1, -1, -1, 0, -1, -1, 1,
+ 1, 0, -1, 0, 0, -1, 0, 2, 2, 0, -1, 0, 0, 0, 0, 2,
+ 1, 0, -2, -1, 0, 1, 1, 0, 2, 0, -1, -1, 0, 1, 1, 0,
+ 1, 0, -2, -1, 0, 1, 0, -1, 1, 0, -1, -1, 0, 1, 0, -1,
+ 0, 1, 1, 0, 1, 1, 0, 0, -2, 1, 2, 1, 0, 0, 0, 1,
+ -5, 0, 2, 1, 0, -1, 0, 1, -6, -1, 2, 1, 0, -1, 0, 0,
+ 5, 3, 0, -1, -2, -1, -1, -1, 1, 1, 0, -1, -1, 0, -1, -1,
+ -1, 0, 1, 1, 2, 2, 1, 0, -2, -1, 0, 1, 2, 1, 1, 1,
+ -2, -1, -1, -1, 0, -1, 0, 1, 0, 1, 0, 0, -1, -1, 0, 0,
+ 0, 1, 1, 1, 1, 0, 0, 0, -3, -2, 0, 1, 1, 0, 0, -1,
+ -1, 0, 1, 0, -1, 0, 2, 3, -1, 0, 0, -2, -4, -2, -1, 0,
+ 0, 1, 1, 0, -2, -1, 0, -1, 1, 2, 3, 1, 0, 1, 1, 0,
+ -1, 0, 1, 1, 1, 1, 1, 0, -2, -3, -2, 0, 0, 0, 1, 0,
+ -1, -2, -2, 0, 1, 0, 0, -1, 3, 1, 0, 0, 1, 0, -1, -1,
+ -2, -1, 0, 0, -1, -1, 0, 0, -1, 0, 0, 0, 0, 1, 1, 1,
+ -1, -1, -1, 0, 1, 1, 1, 1, 0, -2, -3, -1, 1, 0, 0, 0,
+ 1, -1, -3, -1, 1, 1, 0, -1, 3, 1, -1, 1, 2, 2, 0, -1,
+ 3, 1, 0, 1, 2, 1, 1, 0, 0, -2, -2, -1, -1, 0, 0, 0,
+ 1, 0, -1, -1, 1, 2, 1, 0, 0, -1, -2, -1, 1, 2, 2, 1,
+ -1, -1, -1, 0, 0, 1, 2, 0, -2, 0, 0, 0, 0, 0, 1, -1,
+ -1, 0, 1, 0, -1, -1, -1, -1, 0, 1, 1, 2, 0, -2, -1, 0,
+ 1, 2, 2, 2, 1, -1, -1, 0, 0, 1, 1, 1, 0, -2, -2, -1,
+ 0, 0, -1, -1, -1, -1, -2, -2, 0, 0, -1, 0, 1, 2, 2, 1,
+ 0, 0, -1, -1, 0, 1, 2, 2, 1, 1, -1, -2, -1, -1, -1, -1,
+ 2, 2, 1, 0, 0, -1, -2, -2, 1, 2, 2, 1, 0, 0, -2, -2,
+ 0, 0, 0, 0, 1, 1, 0, -1, 0, -1, -1, -1, 2, 3, 2, 1,
+ 0, -2, 1, 2, -1, 0, 0, 1, -1, -2, 2, 3, -1, 0, 0, 0,
+ 0, -2, 2, 3, -1, -1, 0, 0, 0, -1, 3, 2, -2, 0, 1, 0,
+ 0, -1, 3, 1, -2, 0, 1, 0, 0, -1, 2, 1, -1, 1, 0, -1,
+ 0, 0, 1, -1, -2, 0, 0, -1, 1, 0, 0, -2, -2, -1, -1, -1,
+ 1, 1, 1, 1, 1, -1, -1, -2, 0, 0, 0, 1, 1, 1, 1, 1,
+ 0, 0, 0, 1, 1, 1, 2, 3, 1, 0, 0, -1, 0, 0, 1, 2,
+ 0, -1, -1, -2, -1, 0, 1, 2, -2, -2, -2, -2, -1, 0, 1, 1,
+ -1, -1, -1, -1, 0, 0, 0, -1, 2, 2, 2, 0, -1, -1, -2, -4,
+ -1, -2, -1, -1, 0, 1, 2, 3, -1, -1, -1, -1, 0, 1, 2, 3,
+ 1, 0, -1, 0, -1, 0, 1, 2, 1, 0, 0, 0, -1, 0, 2, 2,
+ 1, 0, -1, -1, -2, 0, 1, 2, 0, -2, -2, -2, -3, -1, 0, 1,
+ 0, -2, -2, -2, -2, -1, 1, 1, 0, 0, 0, 0, 0, 1, 2, 2
+};
+
+/* list of codebooks for intra-coded vectors */
+const int8_t* const ff_svq1_intra_codebooks[6] = {
+ svq1_intra_codebook_4x2, svq1_intra_codebook_4x4,
+ svq1_intra_codebook_8x4, svq1_intra_codebook_8x8,
+ NULL, NULL,
+};
+
+#endif /* AVCODEC_SVQ1_CB_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1_vlc.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1_vlc.h
new file mode 100644
index 000000000..b89562620
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1_vlc.h
@@ -0,0 +1,283 @@
+/*
+ * copyright (C) 2003 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_SVQ1_VLC_H
+#define AVCODEC_SVQ1_VLC_H
+
+#include <stdint.h>
+
+/* values in this table range from 0..3; adjust retrieved value by +0 */
+const uint8_t ff_svq1_block_type_vlc[4][2] = {
+ /* { code, length } */
+ { 0x1, 1 }, { 0x1, 2 }, { 0x1, 3 }, { 0x0, 3 }
+
+};
+
+/* values in this table range from -1..6; adjust retrieved value by -1 */
+const uint8_t ff_svq1_intra_multistage_vlc[6][8][2] = {
+ /* { code, length } */
+{
+ { 0x1, 5 }, { 0x1, 1 }, { 0x3, 3 }, { 0x2, 3 },
+ { 0x3, 4 }, { 0x2, 4 }, { 0x0, 5 }, { 0x1, 4 }
+},{
+ { 0x1, 4 }, { 0x3, 2 }, { 0x5, 3 }, { 0x4, 3 },
+ { 0x3, 3 }, { 0x2, 3 }, { 0x0, 4 }, { 0x1, 3 }
+},{
+ { 0x1, 5 }, { 0x1, 1 }, { 0x3, 3 }, { 0x0, 5 },
+ { 0x3, 4 }, { 0x2, 3 }, { 0x2, 4 }, { 0x1, 4 }
+},{
+ { 0x1, 6 }, { 0x1, 1 }, { 0x1, 2 }, { 0x0, 6 },
+ { 0x3, 4 }, { 0x2, 4 }, { 0x1, 5 }, { 0x1, 4 }
+},{
+ { 0x1, 6 }, { 0x1, 1 }, { 0x1, 2 }, { 0x3, 5 },
+ { 0x2, 5 }, { 0x0, 6 }, { 0x1, 5 }, { 0x1, 3 }
+},{
+ { 0x1, 7 }, { 0x1, 1 }, { 0x1, 2 }, { 0x1, 3 },
+ { 0x1, 4 }, { 0x1, 6 }, { 0x0, 7 }, { 0x1, 5 }
+}
+};
+
+/* values in this table range from -1..6; adjust retrieved value by -1 */
+const uint8_t ff_svq1_inter_multistage_vlc[6][8][2] = {
+ /* { code, length } */
+{
+ { 0x3, 2 }, { 0x5, 3 }, { 0x4, 3 }, { 0x3, 3 },
+ { 0x2, 3 }, { 0x1, 3 }, { 0x1, 4 }, { 0x0, 4 }
+},{
+ { 0x3, 2 }, { 0x5, 3 }, { 0x4, 3 }, { 0x3, 3 },
+ { 0x2, 3 }, { 0x1, 3 }, { 0x1, 4 }, { 0x0, 4 }
+},{
+ { 0x1, 1 }, { 0x3, 3 }, { 0x2, 3 }, { 0x3, 4 },
+ { 0x2, 4 }, { 0x1, 4 }, { 0x1, 5 }, { 0x0, 5 }
+},{
+ { 0x1, 1 }, { 0x3, 3 }, { 0x2, 3 }, { 0x3, 4 },
+ { 0x2, 4 }, { 0x1, 4 }, { 0x1, 5 }, { 0x0, 5 }
+},{
+ { 0x1, 1 }, { 0x3, 3 }, { 0x2, 3 }, { 0x3, 4 },
+ { 0x2, 4 }, { 0x1, 4 }, { 0x1, 5 }, { 0x0, 5 }
+},{
+ { 0x1, 1 }, { 0x1, 2 }, { 0x1, 3 }, { 0x3, 5 },
+ { 0x2, 5 }, { 0x1, 5 }, { 0x1, 6 }, { 0x0, 6 }
+}
+};
+
+/* values in this table range from 0..255; adjust retrieved value by +0 */
+const uint16_t ff_svq1_intra_mean_vlc[256][2] = {
+ /* { code, length } */
+ { 0x37, 6 }, { 0x56, 7 }, { 0x1, 17 }, { 0x1, 20 },
+ { 0x2, 20 }, { 0x3, 20 }, { 0x0, 20 }, { 0x4, 20 },
+ { 0x5, 20 }, { 0x3, 19 }, { 0x15, 11 }, { 0x42, 9 },
+ { 0x14, 11 }, { 0x3, 14 }, { 0x2, 14 }, { 0x1, 15 },
+ { 0x1, 16 }, { 0x1, 12 }, { 0x2B, 10 }, { 0x18, 11 },
+ { 0xC, 11 }, { 0x41, 9 }, { 0x78, 8 }, { 0x6C, 8 },
+ { 0x55, 7 }, { 0xF, 4 }, { 0xE, 4 }, { 0x34, 6 },
+ { 0x51, 7 }, { 0x72, 8 }, { 0x6E, 8 }, { 0x40, 9 },
+ { 0x3F, 9 }, { 0x3E, 9 }, { 0x3D, 9 }, { 0x3C, 9 },
+ { 0x3B, 9 }, { 0x3A, 9 }, { 0x39, 9 }, { 0x38, 9 },
+ { 0x37, 9 }, { 0x43, 9 }, { 0x46, 9 }, { 0x47, 9 },
+ { 0x45, 9 }, { 0x44, 9 }, { 0x49, 9 }, { 0x48, 9 },
+ { 0x4A, 8 }, { 0x79, 8 }, { 0x76, 8 }, { 0x77, 8 },
+ { 0x71, 8 }, { 0x75, 8 }, { 0x74, 8 }, { 0x73, 8 },
+ { 0x6A, 8 }, { 0x55, 8 }, { 0x70, 8 }, { 0x6F, 8 },
+ { 0x52, 8 }, { 0x6D, 8 }, { 0x4C, 8 }, { 0x6B, 8 },
+ { 0x40, 7 }, { 0x69, 8 }, { 0x68, 8 }, { 0x67, 8 },
+ { 0x66, 8 }, { 0x65, 8 }, { 0x64, 8 }, { 0x63, 8 },
+ { 0x62, 8 }, { 0x61, 8 }, { 0x60, 8 }, { 0x5F, 8 },
+ { 0x5E, 8 }, { 0x5D, 8 }, { 0x5C, 8 }, { 0x5B, 8 },
+ { 0x5A, 8 }, { 0x59, 8 }, { 0x58, 8 }, { 0x57, 8 },
+ { 0x56, 8 }, { 0x3D, 7 }, { 0x54, 8 }, { 0x53, 8 },
+ { 0x3F, 7 }, { 0x51, 8 }, { 0x50, 8 }, { 0x4F, 8 },
+ { 0x4E, 8 }, { 0x4D, 8 }, { 0x41, 7 }, { 0x4B, 8 },
+ { 0x53, 7 }, { 0x3E, 7 }, { 0x48, 8 }, { 0x4F, 7 },
+ { 0x52, 7 }, { 0x45, 8 }, { 0x50, 7 }, { 0x43, 8 },
+ { 0x42, 8 }, { 0x41, 8 }, { 0x42, 7 }, { 0x43, 7 },
+ { 0x3E, 8 }, { 0x44, 7 }, { 0x3C, 8 }, { 0x45, 7 },
+ { 0x46, 7 }, { 0x47, 7 }, { 0x48, 7 }, { 0x49, 7 },
+ { 0x4A, 7 }, { 0x4B, 7 }, { 0x4C, 7 }, { 0x4D, 7 },
+ { 0x4E, 7 }, { 0x58, 7 }, { 0x59, 7 }, { 0x5A, 7 },
+ { 0x5B, 7 }, { 0x5C, 7 }, { 0x5D, 7 }, { 0x44, 8 },
+ { 0x49, 8 }, { 0x29, 8 }, { 0x3F, 8 }, { 0x3D, 8 },
+ { 0x3B, 8 }, { 0x2C, 8 }, { 0x28, 8 }, { 0x25, 8 },
+ { 0x26, 8 }, { 0x5E, 7 }, { 0x57, 7 }, { 0x54, 7 },
+ { 0x5F, 7 }, { 0x62, 7 }, { 0x63, 7 }, { 0x64, 7 },
+ { 0x61, 7 }, { 0x65, 7 }, { 0x67, 7 }, { 0x66, 7 },
+ { 0x35, 6 }, { 0x36, 6 }, { 0x60, 7 }, { 0x39, 8 },
+ { 0x3A, 8 }, { 0x38, 8 }, { 0x37, 8 }, { 0x36, 8 },
+ { 0x35, 8 }, { 0x34, 8 }, { 0x33, 8 }, { 0x32, 8 },
+ { 0x31, 8 }, { 0x30, 8 }, { 0x2D, 8 }, { 0x2B, 8 },
+ { 0x2A, 8 }, { 0x27, 8 }, { 0x40, 8 }, { 0x46, 8 },
+ { 0x47, 8 }, { 0x26, 9 }, { 0x25, 9 }, { 0x24, 9 },
+ { 0x23, 9 }, { 0x22, 9 }, { 0x2E, 8 }, { 0x2F, 8 },
+ { 0x1F, 9 }, { 0x36, 9 }, { 0x1D, 9 }, { 0x21, 9 },
+ { 0x1B, 9 }, { 0x1C, 9 }, { 0x19, 9 }, { 0x1A, 9 },
+ { 0x18, 9 }, { 0x17, 9 }, { 0x16, 9 }, { 0x1E, 9 },
+ { 0x20, 9 }, { 0x27, 9 }, { 0x28, 9 }, { 0x29, 9 },
+ { 0x2A, 9 }, { 0x2B, 9 }, { 0x2C, 9 }, { 0x2D, 9 },
+ { 0x2E, 9 }, { 0x2F, 9 }, { 0x30, 9 }, { 0x35, 9 },
+ { 0x31, 9 }, { 0x32, 9 }, { 0x33, 9 }, { 0x34, 9 },
+ { 0x19, 10 }, { 0x2A, 10 }, { 0x17, 10 }, { 0x16, 10 },
+ { 0x15, 10 }, { 0x28, 10 }, { 0x26, 10 }, { 0x25, 10 },
+ { 0x22, 10 }, { 0x21, 10 }, { 0x18, 10 }, { 0x14, 10 },
+ { 0x29, 10 }, { 0x12, 10 }, { 0xD, 10 }, { 0xE, 10 },
+ { 0xF, 10 }, { 0x10, 10 }, { 0x11, 10 }, { 0x1A, 10 },
+ { 0x1B, 10 }, { 0x1C, 10 }, { 0x1D, 10 }, { 0x1E, 10 },
+ { 0x1F, 10 }, { 0x20, 10 }, { 0x13, 10 }, { 0x23, 10 },
+ { 0x24, 10 }, { 0x9, 11 }, { 0x8, 11 }, { 0x7, 11 },
+ { 0x27, 10 }, { 0x5, 11 }, { 0xB, 11 }, { 0x6, 11 },
+ { 0x4, 11 }, { 0x3, 11 }, { 0x2, 11 }, { 0x1, 11 },
+ { 0xA, 11 }, { 0x16, 11 }, { 0x19, 11 }, { 0x17, 11 },
+ { 0xD, 11 }, { 0xE, 11 }, { 0xF, 11 }, { 0x10, 11 },
+ { 0x11, 11 }, { 0x12, 11 }, { 0x13, 11 }, { 0x1, 14 }
+};
+
+/* values in this table range from -256..255; adjust retrieved value by -256 */
+const uint16_t ff_svq1_inter_mean_vlc[512][2] = {
+ /* { code, length } */
+ { 0x5A, 22 }, { 0xD4, 22 }, { 0xD5, 22 }, { 0xD6, 22 },
+ { 0xD7, 22 }, { 0xD8, 22 }, { 0xD9, 22 }, { 0xDA, 22 },
+ { 0xDB, 22 }, { 0xDC, 22 }, { 0xDD, 22 }, { 0xDE, 22 },
+ { 0xDF, 22 }, { 0xE0, 22 }, { 0xE1, 22 }, { 0xE2, 22 },
+ { 0xE3, 22 }, { 0xE4, 22 }, { 0xE5, 22 }, { 0xE6, 22 },
+ { 0xE8, 22 }, { 0xCB, 22 }, { 0xE9, 22 }, { 0xEA, 22 },
+ { 0xE7, 22 }, { 0xEC, 22 }, { 0xED, 22 }, { 0xEE, 22 },
+ { 0xEF, 22 }, { 0xF0, 22 }, { 0xF1, 22 }, { 0xF2, 22 },
+ { 0xF3, 22 }, { 0xF4, 22 }, { 0xF5, 22 }, { 0xF6, 22 },
+ { 0xF7, 22 }, { 0xF8, 22 }, { 0x102, 22 }, { 0xEB, 22 },
+ { 0xF9, 22 }, { 0xFC, 22 }, { 0xFD, 22 }, { 0xFE, 22 },
+ { 0x100, 22 }, { 0x5C, 22 }, { 0x60, 22 }, { 0x101, 22 },
+ { 0x71, 22 }, { 0x104, 22 }, { 0x105, 22 }, { 0xFB, 22 },
+ { 0xFF, 22 }, { 0x86, 21 }, { 0xFA, 22 }, { 0x7C, 22 },
+ { 0x75, 22 }, { 0x103, 22 }, { 0x78, 22 }, { 0xD3, 22 },
+ { 0x7B, 22 }, { 0x82, 22 }, { 0xD2, 22 }, { 0xD1, 22 },
+ { 0xD0, 22 }, { 0xCF, 22 }, { 0xCE, 22 }, { 0xCD, 22 },
+ { 0xCC, 22 }, { 0xC3, 22 }, { 0xCA, 22 }, { 0xC9, 22 },
+ { 0xC8, 22 }, { 0xC7, 22 }, { 0xC6, 22 }, { 0xC5, 22 },
+ { 0x8B, 22 }, { 0xC4, 22 }, { 0xC2, 22 }, { 0xC1, 22 },
+ { 0xC0, 22 }, { 0xBF, 22 }, { 0xBE, 22 }, { 0xBD, 22 },
+ { 0xBC, 22 }, { 0xBB, 22 }, { 0xBA, 22 }, { 0xB9, 22 },
+ { 0x61, 22 }, { 0x84, 22 }, { 0x85, 22 }, { 0x86, 22 },
+ { 0x87, 22 }, { 0x88, 22 }, { 0x89, 22 }, { 0x8A, 22 },
+ { 0x8C, 22 }, { 0x8D, 22 }, { 0x8E, 22 }, { 0x8F, 22 },
+ { 0x90, 22 }, { 0x91, 22 }, { 0x92, 22 }, { 0x93, 22 },
+ { 0x94, 22 }, { 0x95, 22 }, { 0x96, 22 }, { 0x97, 22 },
+ { 0x98, 22 }, { 0x99, 22 }, { 0x9A, 22 }, { 0x9B, 22 },
+ { 0x9C, 22 }, { 0x9D, 22 }, { 0x9E, 22 }, { 0x9F, 22 },
+ { 0xA0, 22 }, { 0xA1, 22 }, { 0xA2, 22 }, { 0xA3, 22 },
+ { 0xA4, 22 }, { 0xA5, 22 }, { 0xA6, 22 }, { 0xA7, 22 },
+ { 0xA8, 22 }, { 0xA9, 22 }, { 0xAA, 22 }, { 0xAB, 22 },
+ { 0x7F, 22 }, { 0x8F, 21 }, { 0xAC, 22 }, { 0xAD, 22 },
+ { 0xAE, 22 }, { 0xAF, 22 }, { 0xB0, 22 }, { 0xB1, 22 },
+ { 0x53, 20 }, { 0x90, 21 }, { 0xB2, 22 }, { 0x91, 21 },
+ { 0xB3, 22 }, { 0xB4, 22 }, { 0x54, 20 }, { 0xB5, 22 },
+ { 0xB6, 22 }, { 0x8C, 21 }, { 0x34, 19 }, { 0x3D, 18 },
+ { 0x55, 20 }, { 0xB7, 22 }, { 0xB8, 22 }, { 0x8B, 21 },
+ { 0x56, 20 }, { 0x3D, 19 }, { 0x57, 20 }, { 0x58, 20 },
+ { 0x40, 19 }, { 0x43, 19 }, { 0x47, 19 }, { 0x2A, 18 },
+ { 0x2E, 19 }, { 0x2C, 18 }, { 0x46, 19 }, { 0x59, 20 },
+ { 0x49, 19 }, { 0x2D, 19 }, { 0x38, 18 }, { 0x36, 18 },
+ { 0x39, 18 }, { 0x45, 19 }, { 0x28, 18 }, { 0x30, 18 },
+ { 0x35, 18 }, { 0x20, 17 }, { 0x44, 19 }, { 0x32, 18 },
+ { 0x31, 18 }, { 0x1F, 17 }, { 0x2F, 18 }, { 0x2E, 18 },
+ { 0x2D, 18 }, { 0x21, 17 }, { 0x22, 17 }, { 0x23, 17 },
+ { 0x24, 17 }, { 0x27, 16 }, { 0x23, 16 }, { 0x20, 16 },
+ { 0x1D, 16 }, { 0x25, 16 }, { 0x1E, 16 }, { 0x24, 16 },
+ { 0x2A, 16 }, { 0x26, 16 }, { 0x21, 15 }, { 0x29, 16 },
+ { 0x22, 15 }, { 0x23, 15 }, { 0x24, 15 }, { 0x1B, 15 },
+ { 0x1A, 15 }, { 0x1D, 15 }, { 0x1F, 15 }, { 0x27, 15 },
+ { 0x17, 14 }, { 0x18, 14 }, { 0x19, 14 }, { 0x1B, 14 },
+ { 0x1C, 14 }, { 0x1E, 14 }, { 0x25, 14 }, { 0x20, 14 },
+ { 0x21, 14 }, { 0x13, 13 }, { 0x14, 13 }, { 0x15, 13 },
+ { 0x16, 13 }, { 0x17, 13 }, { 0x18, 13 }, { 0x19, 13 },
+ { 0x1A, 13 }, { 0x18, 12 }, { 0x17, 12 }, { 0x15, 12 },
+ { 0x14, 12 }, { 0x13, 12 }, { 0x12, 12 }, { 0xF, 11 },
+ { 0x10, 11 }, { 0x12, 11 }, { 0x13, 11 }, { 0x1B, 11 },
+ { 0x1A, 11 }, { 0xE, 10 }, { 0x13, 10 }, { 0xF, 10 },
+ { 0x10, 10 }, { 0x11, 10 }, { 0x12, 10 }, { 0xD, 9 },
+ { 0x14, 9 }, { 0x15, 9 }, { 0xC, 9 }, { 0x13, 9 },
+ { 0xF, 8 }, { 0xE, 8 }, { 0x10, 8 }, { 0x11, 8 },
+ { 0xC, 7 }, { 0x9, 7 }, { 0xA, 7 }, { 0x8, 6 },
+ { 0x9, 6 }, { 0x9, 5 }, { 0x8, 5 }, { 0x5, 4 },
+ { 0x1, 1 }, { 0x3, 3 }, { 0x7, 5 }, { 0x6, 5 },
+ { 0xB, 6 }, { 0xA, 6 }, { 0xE, 7 }, { 0xF, 7 },
+ { 0xB, 7 }, { 0xD, 7 }, { 0xB, 8 }, { 0xD, 8 },
+ { 0xC, 8 }, { 0xF, 9 }, { 0x10, 9 }, { 0x11, 9 },
+ { 0xE, 9 }, { 0x12, 9 }, { 0x17, 10 }, { 0x14, 10 },
+ { 0x16, 10 }, { 0x15, 10 }, { 0x19, 11 }, { 0x18, 11 },
+ { 0x17, 11 }, { 0x16, 11 }, { 0x15, 11 }, { 0x14, 11 },
+ { 0x11, 11 }, { 0x19, 12 }, { 0x1A, 12 }, { 0x16, 12 },
+ { 0x1D, 12 }, { 0x1B, 12 }, { 0x1C, 12 }, { 0x20, 13 },
+ { 0x1C, 13 }, { 0x23, 13 }, { 0x22, 13 }, { 0x21, 13 },
+ { 0x1F, 13 }, { 0x1E, 13 }, { 0x1B, 13 }, { 0x1D, 13 },
+ { 0x24, 14 }, { 0x16, 14 }, { 0x1A, 14 }, { 0x22, 14 },
+ { 0x1D, 14 }, { 0x1F, 14 }, { 0x15, 14 }, { 0x23, 14 },
+ { 0x18, 15 }, { 0x20, 15 }, { 0x29, 15 }, { 0x28, 15 },
+ { 0x26, 15 }, { 0x25, 15 }, { 0x19, 15 }, { 0x1C, 15 },
+ { 0x1E, 15 }, { 0x17, 15 }, { 0x2C, 16 }, { 0x2B, 16 },
+ { 0x1C, 16 }, { 0x21, 16 }, { 0x2D, 16 }, { 0x28, 16 },
+ { 0x1F, 16 }, { 0x1B, 16 }, { 0x1A, 16 }, { 0x22, 16 },
+ { 0x2D, 17 }, { 0x32, 17 }, { 0x2C, 17 }, { 0x27, 17 },
+ { 0x31, 17 }, { 0x33, 17 }, { 0x2F, 17 }, { 0x2B, 17 },
+ { 0x37, 18 }, { 0x2A, 17 }, { 0x2E, 17 }, { 0x30, 17 },
+ { 0x29, 17 }, { 0x28, 17 }, { 0x26, 17 }, { 0x25, 17 },
+ { 0x2F, 19 }, { 0x33, 18 }, { 0x34, 18 }, { 0x30, 19 },
+ { 0x3A, 18 }, { 0x3B, 18 }, { 0x31, 19 }, { 0x3C, 18 },
+ { 0x2B, 18 }, { 0x29, 18 }, { 0x48, 19 }, { 0x27, 18 },
+ { 0x42, 19 }, { 0x41, 19 }, { 0x26, 18 }, { 0x52, 20 },
+ { 0x51, 20 }, { 0x3F, 19 }, { 0x3E, 19 }, { 0x39, 19 },
+ { 0x3C, 19 }, { 0x3B, 19 }, { 0x3A, 19 }, { 0x25, 18 },
+ { 0x38, 19 }, { 0x50, 20 }, { 0x37, 19 }, { 0x36, 19 },
+ { 0x87, 21 }, { 0x4F, 20 }, { 0x35, 19 }, { 0x4E, 20 },
+ { 0x33, 19 }, { 0x32, 19 }, { 0x4D, 20 }, { 0x4C, 20 },
+ { 0x83, 22 }, { 0x4B, 20 }, { 0x81, 22 }, { 0x80, 22 },
+ { 0x8E, 21 }, { 0x7E, 22 }, { 0x7D, 22 }, { 0x84, 21 },
+ { 0x8D, 21 }, { 0x7A, 22 }, { 0x79, 22 }, { 0x4A, 20 },
+ { 0x77, 22 }, { 0x76, 22 }, { 0x89, 21 }, { 0x74, 22 },
+ { 0x73, 22 }, { 0x72, 22 }, { 0x49, 20 }, { 0x70, 22 },
+ { 0x6F, 22 }, { 0x6E, 22 }, { 0x6D, 22 }, { 0x6C, 22 },
+ { 0x6B, 22 }, { 0x6A, 22 }, { 0x69, 22 }, { 0x68, 22 },
+ { 0x67, 22 }, { 0x66, 22 }, { 0x65, 22 }, { 0x64, 22 },
+ { 0x63, 22 }, { 0x62, 22 }, { 0x8A, 21 }, { 0x88, 21 },
+ { 0x5F, 22 }, { 0x5E, 22 }, { 0x5D, 22 }, { 0x85, 21 },
+ { 0x5B, 22 }, { 0x83, 21 }, { 0x59, 22 }, { 0x58, 22 },
+ { 0x57, 22 }, { 0x56, 22 }, { 0x55, 22 }, { 0x54, 22 },
+ { 0x53, 22 }, { 0x52, 22 }, { 0x51, 22 }, { 0x50, 22 },
+ { 0x4F, 22 }, { 0x4E, 22 }, { 0x4D, 22 }, { 0x4C, 22 },
+ { 0x4B, 22 }, { 0x4A, 22 }, { 0x49, 22 }, { 0x48, 22 },
+ { 0x47, 22 }, { 0x46, 22 }, { 0x45, 22 }, { 0x44, 22 },
+ { 0x43, 22 }, { 0x42, 22 }, { 0x41, 22 }, { 0x40, 22 },
+ { 0x3F, 22 }, { 0x3E, 22 }, { 0x3D, 22 }, { 0x3C, 22 },
+ { 0x3B, 22 }, { 0x3A, 22 }, { 0x39, 22 }, { 0x38, 22 },
+ { 0x37, 22 }, { 0x36, 22 }, { 0x35, 22 }, { 0x34, 22 },
+ { 0x33, 22 }, { 0x32, 22 }, { 0x31, 22 }, { 0x30, 22 },
+ { 0x2F, 22 }, { 0x2E, 22 }, { 0x2D, 22 }, { 0x2C, 22 },
+ { 0x2B, 22 }, { 0x2A, 22 }, { 0x29, 22 }, { 0x28, 22 },
+ { 0x27, 22 }, { 0x26, 22 }, { 0x25, 22 }, { 0x24, 22 },
+ { 0x23, 22 }, { 0x22, 22 }, { 0x21, 22 }, { 0x20, 22 },
+ { 0x1F, 22 }, { 0x1E, 22 }, { 0x1D, 22 }, { 0x1C, 22 },
+ { 0x1B, 22 }, { 0x1A, 22 }, { 0x19, 22 }, { 0x18, 22 },
+ { 0x17, 22 }, { 0x16, 22 }, { 0x15, 22 }, { 0x14, 22 },
+ { 0x13, 22 }, { 0x12, 22 }, { 0x11, 22 }, { 0x10, 22 },
+ { 0xF, 22 }, { 0xE, 22 }, { 0xD, 22 }, { 0xC, 22 },
+ { 0xB, 22 }, { 0xA, 22 }, { 0x9, 22 }, { 0x8, 22 },
+ { 0x7, 22 }, { 0x6, 22 }, { 0x5, 22 }, { 0x4, 22 },
+ { 0x3, 22 }, { 0x2, 22 }, { 0x1, 22 }, { 0x0, 22 }
+};
+
+#endif /* AVCODEC_SVQ1_VLC_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1dec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1dec.c
new file mode 100644
index 000000000..9c810cdc7
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq1dec.c
@@ -0,0 +1,848 @@
+/*
+ * SVQ1 decoder
+ * ported to MPlayer by Arpi <arpi@thot.banki.hu>
+ * ported to libavcodec by Nick Kurshev <nickols_k@mail.ru>
+ *
+ * Copyright (C) 2002 the xine project
+ * Copyright (C) 2002 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/svq1.c
+ * Sorenson Vector Quantizer #1 (SVQ1) video codec.
+ * For more information of the SVQ1 algorithm, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ */
+
+
+//#define DEBUG_SVQ1
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+#include "mathops.h"
+
+#include "svq1.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+extern const uint8_t mvtab[33][2];
+
+static VLC svq1_block_type;
+static VLC svq1_motion_component;
+static VLC svq1_intra_multistage[6];
+static VLC svq1_inter_multistage[6];
+static VLC svq1_intra_mean;
+static VLC svq1_inter_mean;
+
+/* motion vector (prediction) */
+typedef struct svq1_pmv_s {
+ int x;
+ int y;
+} svq1_pmv;
+
+static const uint16_t checksum_table[256] = {
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
+ 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
+ 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
+ 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
+ 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
+ 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
+ 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
+ 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
+ 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
+ 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
+ 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
+ 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
+ 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
+ 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
+ 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
+ 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
+ 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
+ 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
+ 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
+ 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
+ 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
+ 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
+ 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
+};
+
+static const uint8_t string_table[256] = {
+ 0x00, 0xD5, 0x7F, 0xAA, 0xFE, 0x2B, 0x81, 0x54,
+ 0x29, 0xFC, 0x56, 0x83, 0xD7, 0x02, 0xA8, 0x7D,
+ 0x52, 0x87, 0x2D, 0xF8, 0xAC, 0x79, 0xD3, 0x06,
+ 0x7B, 0xAE, 0x04, 0xD1, 0x85, 0x50, 0xFA, 0x2F,
+ 0xA4, 0x71, 0xDB, 0x0E, 0x5A, 0x8F, 0x25, 0xF0,
+ 0x8D, 0x58, 0xF2, 0x27, 0x73, 0xA6, 0x0C, 0xD9,
+ 0xF6, 0x23, 0x89, 0x5C, 0x08, 0xDD, 0x77, 0xA2,
+ 0xDF, 0x0A, 0xA0, 0x75, 0x21, 0xF4, 0x5E, 0x8B,
+ 0x9D, 0x48, 0xE2, 0x37, 0x63, 0xB6, 0x1C, 0xC9,
+ 0xB4, 0x61, 0xCB, 0x1E, 0x4A, 0x9F, 0x35, 0xE0,
+ 0xCF, 0x1A, 0xB0, 0x65, 0x31, 0xE4, 0x4E, 0x9B,
+ 0xE6, 0x33, 0x99, 0x4C, 0x18, 0xCD, 0x67, 0xB2,
+ 0x39, 0xEC, 0x46, 0x93, 0xC7, 0x12, 0xB8, 0x6D,
+ 0x10, 0xC5, 0x6F, 0xBA, 0xEE, 0x3B, 0x91, 0x44,
+ 0x6B, 0xBE, 0x14, 0xC1, 0x95, 0x40, 0xEA, 0x3F,
+ 0x42, 0x97, 0x3D, 0xE8, 0xBC, 0x69, 0xC3, 0x16,
+ 0xEF, 0x3A, 0x90, 0x45, 0x11, 0xC4, 0x6E, 0xBB,
+ 0xC6, 0x13, 0xB9, 0x6C, 0x38, 0xED, 0x47, 0x92,
+ 0xBD, 0x68, 0xC2, 0x17, 0x43, 0x96, 0x3C, 0xE9,
+ 0x94, 0x41, 0xEB, 0x3E, 0x6A, 0xBF, 0x15, 0xC0,
+ 0x4B, 0x9E, 0x34, 0xE1, 0xB5, 0x60, 0xCA, 0x1F,
+ 0x62, 0xB7, 0x1D, 0xC8, 0x9C, 0x49, 0xE3, 0x36,
+ 0x19, 0xCC, 0x66, 0xB3, 0xE7, 0x32, 0x98, 0x4D,
+ 0x30, 0xE5, 0x4F, 0x9A, 0xCE, 0x1B, 0xB1, 0x64,
+ 0x72, 0xA7, 0x0D, 0xD8, 0x8C, 0x59, 0xF3, 0x26,
+ 0x5B, 0x8E, 0x24, 0xF1, 0xA5, 0x70, 0xDA, 0x0F,
+ 0x20, 0xF5, 0x5F, 0x8A, 0xDE, 0x0B, 0xA1, 0x74,
+ 0x09, 0xDC, 0x76, 0xA3, 0xF7, 0x22, 0x88, 0x5D,
+ 0xD6, 0x03, 0xA9, 0x7C, 0x28, 0xFD, 0x57, 0x82,
+ 0xFF, 0x2A, 0x80, 0x55, 0x01, 0xD4, 0x7E, 0xAB,
+ 0x84, 0x51, 0xFB, 0x2E, 0x7A, 0xAF, 0x05, 0xD0,
+ 0xAD, 0x78, 0xD2, 0x07, 0x53, 0x86, 0x2C, 0xF9
+};
+
+#define SVQ1_PROCESS_VECTOR()\
+ for (; level > 0; i++) {\
+ /* process next depth */\
+ if (i == m) {\
+ m = n;\
+ if (--level == 0)\
+ break;\
+ }\
+ /* divide block if next bit set */\
+ if (get_bits1 (bitbuf) == 0)\
+ break;\
+ /* add child nodes */\
+ list[n++] = list[i];\
+ list[n++] = list[i] + (((level & 1) ? pitch : 1) << ((level / 2) + 1));\
+ }
+
+#define SVQ1_ADD_CODEBOOK()\
+ /* add codebook entries to vector */\
+ for (j=0; j < stages; j++) {\
+ n3 = codebook[entries[j]] ^ 0x80808080;\
+ n1 += ((n3 & 0xFF00FF00) >> 8);\
+ n2 += (n3 & 0x00FF00FF);\
+ }\
+\
+ /* clip to [0..255] */\
+ if (n1 & 0xFF00FF00) {\
+ n3 = ((( n1 >> 15) & 0x00010001) | 0x01000100) - 0x00010001;\
+ n1 += 0x7F007F00;\
+ n1 |= (((~n1 >> 15) & 0x00010001) | 0x01000100) - 0x00010001;\
+ n1 &= (n3 & 0x00FF00FF);\
+ }\
+\
+ if (n2 & 0xFF00FF00) {\
+ n3 = ((( n2 >> 15) & 0x00010001) | 0x01000100) - 0x00010001;\
+ n2 += 0x7F007F00;\
+ n2 |= (((~n2 >> 15) & 0x00010001) | 0x01000100) - 0x00010001;\
+ n2 &= (n3 & 0x00FF00FF);\
+ }
+
+#define SVQ1_DO_CODEBOOK_INTRA()\
+ for (y=0; y < height; y++) {\
+ for (x=0; x < (width / 4); x++, codebook++) {\
+ n1 = n4;\
+ n2 = n4;\
+ SVQ1_ADD_CODEBOOK()\
+ /* store result */\
+ dst[x] = (n1 << 8) | n2;\
+ }\
+ dst += (pitch / 4);\
+ }
+
+#define SVQ1_DO_CODEBOOK_NONINTRA()\
+ for (y=0; y < height; y++) {\
+ for (x=0; x < (width / 4); x++, codebook++) {\
+ n3 = dst[x];\
+ /* add mean value to vector */\
+ n1 = ((n3 & 0xFF00FF00) >> 8) + n4;\
+ n2 = (n3 & 0x00FF00FF) + n4;\
+ SVQ1_ADD_CODEBOOK()\
+ /* store result */\
+ dst[x] = (n1 << 8) | n2;\
+ }\
+ dst += (pitch / 4);\
+ }
+
+#define SVQ1_CALC_CODEBOOK_ENTRIES(cbook)\
+ codebook = (const uint32_t *) cbook[level];\
+ bit_cache = get_bits (bitbuf, 4*stages);\
+ /* calculate codebook entries for this vector */\
+ for (j=0; j < stages; j++) {\
+ entries[j] = (((bit_cache >> (4*(stages - j - 1))) & 0xF) + 16*j) << (level + 1);\
+ }\
+ mean -= (stages * 128);\
+ n4 = ((mean + (mean >> 31)) << 16) | (mean & 0xFFFF);
+
+static int svq1_decode_block_intra (GetBitContext *bitbuf, uint8_t *pixels, int pitch ) {
+ uint32_t bit_cache;
+ uint8_t *list[63];
+ uint32_t *dst;
+ const uint32_t *codebook;
+ int entries[6];
+ int i, j, m, n;
+ int mean, stages;
+ unsigned x, y, width, height, level;
+ uint32_t n1, n2, n3, n4;
+
+ /* initialize list for breadth first processing of vectors */
+ list[0] = pixels;
+
+ /* recursively process vector */
+ for (i=0, m=1, n=1, level=5; i < n; i++) {
+ SVQ1_PROCESS_VECTOR();
+
+ /* destination address and vector size */
+ dst = (uint32_t *) list[i];
+ width = 1 << ((4 + level) /2);
+ height = 1 << ((3 + level) /2);
+
+ /* get number of stages (-1 skips vector, 0 for mean only) */
+ stages = get_vlc2(bitbuf, svq1_intra_multistage[level].table, 3, 3) - 1;
+
+ if (stages == -1) {
+ for (y=0; y < height; y++) {
+ memset (&dst[y*(pitch / 4)], 0, width);
+ }
+ continue; /* skip vector */
+ }
+
+ if ((stages > 0) && (level >= 4)) {
+#ifdef DEBUG_SVQ1
+ av_log(s->avctx, AV_LOG_INFO, "Error (svq1_decode_block_intra): invalid vector: stages=%i level=%i\n",stages,level);
+#endif
+ return -1; /* invalid vector */
+ }
+
+ mean = get_vlc2(bitbuf, svq1_intra_mean.table, 8, 3);
+
+ if (stages == 0) {
+ for (y=0; y < height; y++) {
+ memset (&dst[y*(pitch / 4)], mean, width);
+ }
+ } else {
+ SVQ1_CALC_CODEBOOK_ENTRIES(ff_svq1_intra_codebooks);
+ SVQ1_DO_CODEBOOK_INTRA()
+ }
+ }
+
+ return 0;
+}
+
+static int svq1_decode_block_non_intra (GetBitContext *bitbuf, uint8_t *pixels, int pitch ) {
+ uint32_t bit_cache;
+ uint8_t *list[63];
+ uint32_t *dst;
+ const uint32_t *codebook;
+ int entries[6];
+ int i, j, m, n;
+ int mean, stages;
+ int x, y, width, height, level;
+ uint32_t n1, n2, n3, n4;
+
+ /* initialize list for breadth first processing of vectors */
+ list[0] = pixels;
+
+ /* recursively process vector */
+ for (i=0, m=1, n=1, level=5; i < n; i++) {
+ SVQ1_PROCESS_VECTOR();
+
+ /* destination address and vector size */
+ dst = (uint32_t *) list[i];
+ width = 1 << ((4 + level) /2);
+ height = 1 << ((3 + level) /2);
+
+ /* get number of stages (-1 skips vector, 0 for mean only) */
+ stages = get_vlc2(bitbuf, svq1_inter_multistage[level].table, 3, 2) - 1;
+
+ if (stages == -1) continue; /* skip vector */
+
+ if ((stages > 0) && (level >= 4)) {
+#ifdef DEBUG_SVQ1
+ av_log(s->avctx, AV_LOG_INFO, "Error (svq1_decode_block_non_intra): invalid vector: stages=%i level=%i\n",stages,level);
+#endif
+ return -1; /* invalid vector */
+ }
+
+ mean = get_vlc2(bitbuf, svq1_inter_mean.table, 9, 3) - 256;
+
+ SVQ1_CALC_CODEBOOK_ENTRIES(ff_svq1_inter_codebooks);
+ SVQ1_DO_CODEBOOK_NONINTRA()
+ }
+ return 0;
+}
+
+static int svq1_decode_motion_vector (GetBitContext *bitbuf, svq1_pmv *mv, svq1_pmv **pmv) {
+ int diff;
+ int i;
+
+ for (i=0; i < 2; i++) {
+
+ /* get motion code */
+ diff = get_vlc2(bitbuf, svq1_motion_component.table, 7, 2);
+ if(diff<0)
+ return -1;
+ else if(diff){
+ if(get_bits1(bitbuf)) diff= -diff;
+ }
+
+ /* add median of motion vector predictors and clip result */
+ if (i == 1)
+ mv->y = ((diff + mid_pred(pmv[0]->y, pmv[1]->y, pmv[2]->y)) << 26) >> 26;
+ else
+ mv->x = ((diff + mid_pred(pmv[0]->x, pmv[1]->x, pmv[2]->x)) << 26) >> 26;
+ }
+
+ return 0;
+}
+
+static void svq1_skip_block (uint8_t *current, uint8_t *previous, int pitch, int x, int y) {
+ uint8_t *src;
+ uint8_t *dst;
+ int i;
+
+ src = &previous[x + y*pitch];
+ dst = current;
+
+ for (i=0; i < 16; i++) {
+ memcpy (dst, src, 16);
+ src += pitch;
+ dst += pitch;
+ }
+}
+
+static int svq1_motion_inter_block (MpegEncContext *s, GetBitContext *bitbuf,
+ uint8_t *current, uint8_t *previous, int pitch,
+ svq1_pmv *motion, int x, int y) {
+ uint8_t *src;
+ uint8_t *dst;
+ svq1_pmv mv;
+ svq1_pmv *pmv[3];
+ int result;
+
+ /* predict and decode motion vector */
+ pmv[0] = &motion[0];
+ if (y == 0) {
+ pmv[1] =
+ pmv[2] = pmv[0];
+ }
+ else {
+ pmv[1] = &motion[(x / 8) + 2];
+ pmv[2] = &motion[(x / 8) + 4];
+ }
+
+ result = svq1_decode_motion_vector (bitbuf, &mv, pmv);
+
+ if (result != 0)
+ return result;
+
+ motion[0].x =
+ motion[(x / 8) + 2].x =
+ motion[(x / 8) + 3].x = mv.x;
+ motion[0].y =
+ motion[(x / 8) + 2].y =
+ motion[(x / 8) + 3].y = mv.y;
+
+ if(y + (mv.y >> 1)<0)
+ mv.y= 0;
+ if(x + (mv.x >> 1)<0)
+ mv.x= 0;
+
+#if 0
+ int w= (s->width+15)&~15;
+ int h= (s->height+15)&~15;
+ if(x + (mv.x >> 1)<0 || y + (mv.y >> 1)<0 || x + (mv.x >> 1) + 16 > w || y + (mv.y >> 1) + 16> h)
+ av_log(s->avctx, AV_LOG_INFO, "%d %d %d %d\n", x, y, x + (mv.x >> 1), y + (mv.y >> 1));
+#endif
+
+ src = &previous[(x + (mv.x >> 1)) + (y + (mv.y >> 1))*pitch];
+ dst = current;
+
+ s->dsp.put_pixels_tab[0][((mv.y & 1) << 1) | (mv.x & 1)](dst,src,pitch,16);
+
+ return 0;
+}
+
+static int svq1_motion_inter_4v_block (MpegEncContext *s, GetBitContext *bitbuf,
+ uint8_t *current, uint8_t *previous, int pitch,
+ svq1_pmv *motion,int x, int y) {
+ uint8_t *src;
+ uint8_t *dst;
+ svq1_pmv mv;
+ svq1_pmv *pmv[4];
+ int i, result;
+
+ /* predict and decode motion vector (0) */
+ pmv[0] = &motion[0];
+ if (y == 0) {
+ pmv[1] =
+ pmv[2] = pmv[0];
+ }
+ else {
+ pmv[1] = &motion[(x / 8) + 2];
+ pmv[2] = &motion[(x / 8) + 4];
+ }
+
+ result = svq1_decode_motion_vector (bitbuf, &mv, pmv);
+
+ if (result != 0)
+ return result;
+
+ /* predict and decode motion vector (1) */
+ pmv[0] = &mv;
+ if (y == 0) {
+ pmv[1] =
+ pmv[2] = pmv[0];
+ }
+ else {
+ pmv[1] = &motion[(x / 8) + 3];
+ }
+ result = svq1_decode_motion_vector (bitbuf, &motion[0], pmv);
+
+ if (result != 0)
+ return result;
+
+ /* predict and decode motion vector (2) */
+ pmv[1] = &motion[0];
+ pmv[2] = &motion[(x / 8) + 1];
+
+ result = svq1_decode_motion_vector (bitbuf, &motion[(x / 8) + 2], pmv);
+
+ if (result != 0)
+ return result;
+
+ /* predict and decode motion vector (3) */
+ pmv[2] = &motion[(x / 8) + 2];
+ pmv[3] = &motion[(x / 8) + 3];
+
+ result = svq1_decode_motion_vector (bitbuf, pmv[3], pmv);
+
+ if (result != 0)
+ return result;
+
+ /* form predictions */
+ for (i=0; i < 4; i++) {
+ int mvx= pmv[i]->x + (i&1)*16;
+ int mvy= pmv[i]->y + (i>>1)*16;
+
+ ///XXX /FIXME clipping or padding?
+ if(y + (mvy >> 1)<0)
+ mvy= 0;
+ if(x + (mvx >> 1)<0)
+ mvx= 0;
+
+#if 0
+ int w= (s->width+15)&~15;
+ int h= (s->height+15)&~15;
+ if(x + (mvx >> 1)<0 || y + (mvy >> 1)<0 || x + (mvx >> 1) + 8 > w || y + (mvy >> 1) + 8> h)
+ av_log(s->avctx, AV_LOG_INFO, "%d %d %d %d\n", x, y, x + (mvx >> 1), y + (mvy >> 1));
+#endif
+ src = &previous[(x + (mvx >> 1)) + (y + (mvy >> 1))*pitch];
+ dst = current;
+
+ s->dsp.put_pixels_tab[1][((mvy & 1) << 1) | (mvx & 1)](dst,src,pitch,8);
+
+ /* select next block */
+ if (i & 1) {
+ current += 8*(pitch - 1);
+ } else {
+ current += 8;
+ }
+ }
+
+ return 0;
+}
+
+static int svq1_decode_delta_block (MpegEncContext *s, GetBitContext *bitbuf,
+ uint8_t *current, uint8_t *previous, int pitch,
+ svq1_pmv *motion, int x, int y) {
+ uint32_t block_type;
+ int result = 0;
+
+ /* get block type */
+ block_type = get_vlc2(bitbuf, svq1_block_type.table, 2, 2);
+
+ /* reset motion vectors */
+ if (block_type == SVQ1_BLOCK_SKIP || block_type == SVQ1_BLOCK_INTRA) {
+ motion[0].x =
+ motion[0].y =
+ motion[(x / 8) + 2].x =
+ motion[(x / 8) + 2].y =
+ motion[(x / 8) + 3].x =
+ motion[(x / 8) + 3].y = 0;
+ }
+
+ switch (block_type) {
+ case SVQ1_BLOCK_SKIP:
+ svq1_skip_block (current, previous, pitch, x, y);
+ break;
+
+ case SVQ1_BLOCK_INTER:
+ result = svq1_motion_inter_block (s, bitbuf, current, previous, pitch, motion, x, y);
+
+ if (result != 0)
+ {
+#ifdef DEBUG_SVQ1
+ av_log(s->avctx, AV_LOG_INFO, "Error in svq1_motion_inter_block %i\n",result);
+#endif
+ break;
+ }
+ result = svq1_decode_block_non_intra (bitbuf, current, pitch);
+ break;
+
+ case SVQ1_BLOCK_INTER_4V:
+ result = svq1_motion_inter_4v_block (s, bitbuf, current, previous, pitch, motion, x, y);
+
+ if (result != 0)
+ {
+#ifdef DEBUG_SVQ1
+ av_log(s->avctx, AV_LOG_INFO, "Error in svq1_motion_inter_4v_block %i\n",result);
+#endif
+ break;
+ }
+ result = svq1_decode_block_non_intra (bitbuf, current, pitch);
+ break;
+
+ case SVQ1_BLOCK_INTRA:
+ result = svq1_decode_block_intra (bitbuf, current, pitch);
+ break;
+ }
+
+ return result;
+}
+
+uint16_t ff_svq1_packet_checksum (const uint8_t *data, const int length, int value) {
+ int i;
+
+ for (i=0; i < length; i++) {
+ value = checksum_table[data[i] ^ (value >> 8)] ^ ((value & 0xFF) << 8);
+ }
+
+ return value;
+}
+
+static void svq1_parse_string (GetBitContext *bitbuf, uint8_t *out) {
+ uint8_t seed;
+ int i;
+
+ out[0] = get_bits (bitbuf, 8);
+
+ seed = string_table[out[0]];
+
+ for (i=1; i <= out[0]; i++) {
+ out[i] = get_bits (bitbuf, 8) ^ seed;
+ seed = string_table[out[i] ^ seed];
+ }
+}
+
+static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) {
+ int frame_size_code;
+ int temporal_reference;
+
+ temporal_reference = get_bits (bitbuf, 8);
+
+ /* frame type */
+ s->pict_type= get_bits (bitbuf, 2)+1;
+ if(s->pict_type==4)
+ return -1;
+
+ if (s->pict_type == FF_I_TYPE) {
+
+ /* unknown fields */
+ if (s->f_code == 0x50 || s->f_code == 0x60) {
+ int csum = get_bits (bitbuf, 16);
+
+ csum = ff_svq1_packet_checksum (bitbuf->buffer, bitbuf->size_in_bits>>3, csum);
+
+// av_log(s->avctx, AV_LOG_INFO, "%s checksum (%02x) for packet data\n",
+// (csum == 0) ? "correct" : "incorrect", csum);
+ }
+
+ if ((s->f_code ^ 0x10) >= 0x50) {
+ uint8_t msg[256];
+
+ svq1_parse_string (bitbuf, msg);
+
+ av_log(s->avctx, AV_LOG_INFO, "embedded message: \"%s\"\n", (char *) msg);
+ }
+
+ skip_bits (bitbuf, 2);
+ skip_bits (bitbuf, 2);
+ skip_bits1 (bitbuf);
+
+ /* load frame size */
+ frame_size_code = get_bits (bitbuf, 3);
+
+ if (frame_size_code == 7) {
+ /* load width, height (12 bits each) */
+ s->width = get_bits (bitbuf, 12);
+ s->height = get_bits (bitbuf, 12);
+
+ if (!s->width || !s->height)
+ return -1;
+ } else {
+ /* get width, height from table */
+ s->width = ff_svq1_frame_size_table[frame_size_code].width;
+ s->height = ff_svq1_frame_size_table[frame_size_code].height;
+ }
+ }
+
+ /* unknown fields */
+ if (get_bits1 (bitbuf) == 1) {
+ skip_bits1 (bitbuf); /* use packet checksum if (1) */
+ skip_bits1 (bitbuf); /* component checksums after image data if (1) */
+
+ if (get_bits (bitbuf, 2) != 0)
+ return -1;
+ }
+
+ if (get_bits1 (bitbuf) == 1) {
+ skip_bits1 (bitbuf);
+ skip_bits (bitbuf, 4);
+ skip_bits1 (bitbuf);
+ skip_bits (bitbuf, 2);
+
+ while (get_bits1 (bitbuf) == 1) {
+ skip_bits (bitbuf, 8);
+ }
+ }
+
+ return 0;
+}
+
+static int svq1_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ MpegEncContext *s=avctx->priv_data;
+ uint8_t *current, *previous;
+ int result, i, x, y, width, height;
+ AVFrame *pict = data;
+
+ /* initialize bit buffer */
+ init_get_bits(&s->gb,buf,buf_size*8);
+
+ /* decode frame header */
+ s->f_code = get_bits (&s->gb, 22);
+
+ if ((s->f_code & ~0x70) || !(s->f_code & 0x60))
+ return -1;
+
+ /* swap some header bytes (why?) */
+ if (s->f_code != 0x20) {
+ uint32_t *src = (uint32_t *) (buf + 4);
+
+ for (i=0; i < 4; i++) {
+ src[i] = ((src[i] << 16) | (src[i] >> 16)) ^ src[7 - i];
+ }
+ }
+
+ result = svq1_decode_frame_header (&s->gb, s);
+
+ if (result != 0)
+ {
+#ifdef DEBUG_SVQ1
+ av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_frame_header %i\n",result);
+#endif
+ return result;
+ }
+
+ //FIXME this avoids some confusion for "B frames" without 2 references
+ //this should be removed after libavcodec can handle more flexible picture types & ordering
+ if(s->pict_type==FF_B_TYPE && s->last_picture_ptr==NULL) return buf_size;
+
+ if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return buf_size;
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
+ ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
+ || avctx->skip_frame >= AVDISCARD_ALL)
+ return buf_size;
+
+ if(MPV_frame_start(s, avctx) < 0)
+ return -1;
+
+ /* decode y, u and v components */
+ for (i=0; i < 3; i++) {
+ int linesize;
+ if (i == 0) {
+ width = FFALIGN(s->width, 16);
+ height = FFALIGN(s->height, 16);
+ linesize= s->linesize;
+ } else {
+ if(s->flags&CODEC_FLAG_GRAY) break;
+ width = FFALIGN(s->width/4, 16);
+ height = FFALIGN(s->height/4, 16);
+ linesize= s->uvlinesize;
+ }
+
+ current = s->current_picture.data[i];
+
+ if(s->pict_type==FF_B_TYPE){
+ previous = s->next_picture.data[i];
+ }else{
+ previous = s->last_picture.data[i];
+ }
+
+ if (s->pict_type == FF_I_TYPE) {
+ /* keyframe */
+ for (y=0; y < height; y+=16) {
+ for (x=0; x < width; x+=16) {
+ result = svq1_decode_block_intra (&s->gb, &current[x], linesize);
+ if (result != 0)
+ {
+//#ifdef DEBUG_SVQ1
+ av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_block %i (keyframe)\n",result);
+//#endif
+ return result;
+ }
+ }
+ current += 16*linesize;
+ }
+ } else {
+ #if __STDC_VERSION__ >= 199901L
+ svq1_pmv pmv[width/8+3];
+ #else
+ svq1_pmv *pmv=_alloca(sizeof(svq1_pmv)*(width/8+3));
+ #endif
+ /* delta frame */
+ memset (pmv, 0, ((width / 8) + 3) * sizeof(svq1_pmv));
+
+ for (y=0; y < height; y+=16) {
+ for (x=0; x < width; x+=16) {
+ result = svq1_decode_delta_block (s, &s->gb, &current[x], previous,
+ linesize, pmv, x, y);
+ if (result != 0)
+ {
+#ifdef DEBUG_SVQ1
+ av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_delta_block %i\n",result);
+#endif
+ return result;
+ }
+ }
+
+ pmv[0].x =
+ pmv[0].y = 0;
+
+ current += 16*linesize;
+ }
+ }
+ }
+
+ *pict = *(AVFrame*)&s->current_picture;
+
+
+ MPV_frame_end(s);
+
+ *data_size=sizeof(AVFrame);
+ return buf_size;
+}
+
+static av_cold int svq1_decode_init(AVCodecContext *avctx)
+{
+ MpegEncContext *s = avctx->priv_data;
+ int i;
+ int offset = 0;
+
+ MPV_decode_defaults(s);
+
+ s->avctx = avctx;
+ s->width = (avctx->width+3)&~3;
+ s->height = (avctx->height+3)&~3;
+ s->codec_id= avctx->codec->id;
+ avctx->pix_fmt = PIX_FMT_YUV410P;
+ avctx->has_b_frames= 1; // not true, but DP frames and these behave like unidirectional b frames
+ s->flags= avctx->flags;
+ if (MPV_common_init(s) < 0) return -1;
+
+ INIT_VLC_STATIC(&svq1_block_type, 2, 4,
+ &ff_svq1_block_type_vlc[0][1], 2, 1,
+ &ff_svq1_block_type_vlc[0][0], 2, 1, 6);
+
+ INIT_VLC_STATIC(&svq1_motion_component, 7, 33,
+ &mvtab[0][1], 2, 1,
+ &mvtab[0][0], 2, 1, 176);
+
+ for (i = 0; i < 6; i++) {
+ static const uint8_t sizes[2][6] = {{14, 10, 14, 18, 16, 18}, {10, 10, 14, 14, 14, 16}};
+ static VLC_TYPE table[168][2];
+ svq1_intra_multistage[i].table = &table[offset];
+ svq1_intra_multistage[i].table_allocated = sizes[0][i];
+ offset += sizes[0][i];
+ init_vlc(&svq1_intra_multistage[i], 3, 8,
+ &ff_svq1_intra_multistage_vlc[i][0][1], 2, 1,
+ &ff_svq1_intra_multistage_vlc[i][0][0], 2, 1, INIT_VLC_USE_NEW_STATIC);
+ svq1_inter_multistage[i].table = &table[offset];
+ svq1_inter_multistage[i].table_allocated = sizes[1][i];
+ offset += sizes[1][i];
+ init_vlc(&svq1_inter_multistage[i], 3, 8,
+ &ff_svq1_inter_multistage_vlc[i][0][1], 2, 1,
+ &ff_svq1_inter_multistage_vlc[i][0][0], 2, 1, INIT_VLC_USE_NEW_STATIC);
+ }
+
+ INIT_VLC_STATIC(&svq1_intra_mean, 8, 256,
+ &ff_svq1_intra_mean_vlc[0][1], 4, 2,
+ &ff_svq1_intra_mean_vlc[0][0], 4, 2, 632);
+
+ INIT_VLC_STATIC(&svq1_inter_mean, 9, 512,
+ &ff_svq1_inter_mean_vlc[0][1], 4, 2,
+ &ff_svq1_inter_mean_vlc[0][0], 4, 2, 1434);
+
+ return 0;
+}
+
+static av_cold int svq1_decode_end(AVCodecContext *avctx)
+{
+ MpegEncContext *s = avctx->priv_data;
+
+ MPV_common_end(s);
+ return 0;
+}
+
+
+AVCodec svq1_decoder = {
+ "svq1",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_SVQ1,
+ sizeof(MpegEncContext),
+ /*.init=*/svq1_decode_init,
+ /*.encode=*/NULL,
+ /*.close=*/svq1_decode_end,
+ /*.decode=*/svq1_decode_frame,
+ /*.capabilities=*/CODEC_CAP_DR1,
+ /*.next=*/NULL,
+ /*.flush=*/ff_mpeg_flush,
+ /*.supported_framerates = */NULL,
+#if __STDC_VERSION__ >= 199901L
+ .pix_fmts=(const enum PixelFormat[]){PIX_FMT_YUV410P, PIX_FMT_NONE},
+#else
+ /*.pix_fmts = */NULL,
+#endif
+ /*.long_name= */NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq3.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq3.c
new file mode 100644
index 000000000..36d263f39
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/svq3.c
@@ -0,0 +1,1084 @@
+/*
+ * Copyright (c) 2003 The FFmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * How to use this decoder:
+ * SVQ3 data is transported within Apple Quicktime files. Quicktime files
+ * have stsd atoms to describe media trak properties. A stsd atom for a
+ * video trak contains 1 or more ImageDescription atoms. These atoms begin
+ * with the 4-byte length of the atom followed by the codec fourcc. Some
+ * decoders need information in this atom to operate correctly. Such
+ * is the case with SVQ3. In order to get the best use out of this decoder,
+ * the calling app must make the SVQ3 ImageDescription atom available
+ * via the AVCodecContext's extradata[_size] field:
+ *
+ * AVCodecContext.extradata = pointer to ImageDescription, first characters
+ * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
+ * AVCodecContext.extradata_size = size of ImageDescription atom memory
+ * buffer (which will be the same as the ImageDescription atom size field
+ * from the QT file, minus 4 bytes since the length is missing)
+ *
+ * You will know you have these parameters passed correctly when the decoder
+ * correctly decodes this file:
+ * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
+ */
+#include "internal.h"
+#include "dsputil.h"
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "h264.h"
+
+#include "h264data.h" //FIXME FIXME FIXME
+
+#include "h264_mvpred.h"
+#include "golomb.h"
+#include "rectangle.h"
+//#include "vdpau_internal.h"
+
+#if CONFIG_ZLIB
+#include <zlib.h>
+#endif
+
+#include "svq1.h"
+
+/**
+ * @file libavcodec/svq3.c
+ * svq3 decoder.
+ */
+
+#define FULLPEL_MODE 1
+#define HALFPEL_MODE 2
+#define THIRDPEL_MODE 3
+#define PREDICT_MODE 4
+
+/* dual scan (from some older h264 draft)
+ o-->o-->o o
+ | /|
+ o o o / o
+ | / | |/ |
+ o o o o
+ /
+ o-->o-->o-->o
+*/
+static const uint8_t svq3_scan[16] = {
+ 0+0*4, 1+0*4, 2+0*4, 2+1*4,
+ 2+2*4, 3+0*4, 3+1*4, 3+2*4,
+ 0+1*4, 0+2*4, 1+1*4, 1+2*4,
+ 0+3*4, 1+3*4, 2+3*4, 3+3*4,
+};
+
+static const uint8_t svq3_pred_0[25][2] = {
+ { 0, 0 },
+ { 1, 0 }, { 0, 1 },
+ { 0, 2 }, { 1, 1 }, { 2, 0 },
+ { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
+ { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
+ { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
+ { 2, 4 }, { 3, 3 }, { 4, 2 },
+ { 4, 3 }, { 3, 4 },
+ { 4, 4 }
+};
+
+static const int8_t svq3_pred_1[6][6][5] = {
+ { { 2,-1,-1,-1,-1 }, { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 },
+ { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 }, { 1, 2,-1,-1,-1 } },
+ { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
+ { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
+ { { 2, 0,-1,-1,-1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
+ { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
+ { { 2, 0,-1,-1,-1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
+ { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
+ { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
+ { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
+ { { 0, 2,-1,-1,-1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
+ { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
+};
+
+static const struct { uint8_t run; uint8_t level; } svq3_dct_tables[2][16] = {
+ { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
+ { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
+ { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
+ { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
+};
+
+static const uint32_t svq3_dequant_coeff[32] = {
+ 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
+ 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
+ 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
+ 61694, 68745, 77615, 89113,100253,109366,126635,141533
+};
+
+
+void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp)
+{
+ const int qmul = svq3_dequant_coeff[qp];
+#define stride 16
+ int i;
+ int temp[16];
+ static const int x_offset[4] = {0, 1*stride, 4* stride, 5*stride};
+ static const int y_offset[4] = {0, 2*stride, 8* stride, 10*stride};
+
+ for (i = 0; i < 4; i++){
+ const int offset = y_offset[i];
+ const int z0 = 13*(block[offset+stride*0] + block[offset+stride*4]);
+ const int z1 = 13*(block[offset+stride*0] - block[offset+stride*4]);
+ const int z2 = 7* block[offset+stride*1] - 17*block[offset+stride*5];
+ const int z3 = 17* block[offset+stride*1] + 7*block[offset+stride*5];
+
+ temp[4*i+0] = z0+z3;
+ temp[4*i+1] = z1+z2;
+ temp[4*i+2] = z1-z2;
+ temp[4*i+3] = z0-z3;
+ }
+
+ for (i = 0; i < 4; i++){
+ const int offset = x_offset[i];
+ const int z0 = 13*(temp[4*0+i] + temp[4*2+i]);
+ const int z1 = 13*(temp[4*0+i] - temp[4*2+i]);
+ const int z2 = 7* temp[4*1+i] - 17*temp[4*3+i];
+ const int z3 = 17* temp[4*1+i] + 7*temp[4*3+i];
+
+ block[stride*0 +offset] = ((z0 + z3)*qmul + 0x80000) >> 20;
+ block[stride*2 +offset] = ((z1 + z2)*qmul + 0x80000) >> 20;
+ block[stride*8 +offset] = ((z1 - z2)*qmul + 0x80000) >> 20;
+ block[stride*10+offset] = ((z0 - z3)*qmul + 0x80000) >> 20;
+ }
+}
+#undef stride
+
+void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp,
+ int dc)
+{
+ const int qmul = svq3_dequant_coeff[qp];
+ int i;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ if (dc) {
+ dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2));
+ block[0] = 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ const int z0 = 13*(block[0 + 4*i] + block[2 + 4*i]);
+ const int z1 = 13*(block[0 + 4*i] - block[2 + 4*i]);
+ const int z2 = 7* block[1 + 4*i] - 17*block[3 + 4*i];
+ const int z3 = 17* block[1 + 4*i] + 7*block[3 + 4*i];
+
+ block[0 + 4*i] = z0 + z3;
+ block[1 + 4*i] = z1 + z2;
+ block[2 + 4*i] = z1 - z2;
+ block[3 + 4*i] = z0 - z3;
+ }
+
+ for (i = 0; i < 4; i++) {
+ const int z0 = 13*(block[i + 4*0] + block[i + 4*2]);
+ const int z1 = 13*(block[i + 4*0] - block[i + 4*2]);
+ const int z2 = 7* block[i + 4*1] - 17*block[i + 4*3];
+ const int z3 = 17* block[i + 4*1] + 7*block[i + 4*3];
+ const int rr = (dc + 0x80000);
+
+ dst[i + stride*0] = cm[ dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) ];
+ dst[i + stride*1] = cm[ dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) ];
+ dst[i + stride*2] = cm[ dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) ];
+ dst[i + stride*3] = cm[ dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) ];
+ }
+}
+
+static inline int svq3_decode_block(GetBitContext *gb, DCTELEM *block,
+ int index, const int type)
+{
+ static const uint8_t *const scan_patterns[4] =
+ { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
+
+ int run, level, sign, vlc, limit;
+ const int intra = (3 * type) >> 2;
+ const uint8_t *const scan = scan_patterns[type];
+
+ for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
+ for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
+
+ if (vlc == INVALID_VLC)
+ return -1;
+
+ sign = (vlc & 0x1) - 1;
+ vlc = (vlc + 1) >> 1;
+
+ if (type == 3) {
+ if (vlc < 3) {
+ run = 0;
+ level = vlc;
+ } else if (vlc < 4) {
+ run = 1;
+ level = 1;
+ } else {
+ run = (vlc & 0x3);
+ level = ((vlc + 9) >> 2) - run;
+ }
+ } else {
+ if (vlc < 16) {
+ run = svq3_dct_tables[intra][vlc].run;
+ level = svq3_dct_tables[intra][vlc].level;
+ } else if (intra) {
+ run = (vlc & 0x7);
+ level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
+ } else {
+ run = (vlc & 0xF);
+ level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
+ }
+ }
+
+ if ((index += run) >= limit)
+ return -1;
+
+ block[scan[index]] = (level ^ sign) - sign;
+ }
+
+ if (type != 2) {
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static inline void svq3_mc_dir_part(MpegEncContext *s,
+ int x, int y, int width, int height,
+ int mx, int my, int dxy,
+ int thirdpel, int dir, int avg)
+{
+ const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture;
+ uint8_t *src, *dest;
+ int i, emu = 0;
+ int blocksize = 2 - (width>>3); //16->0, 8->1, 4->2
+
+ mx += x;
+ my += y;
+
+ if (mx < 0 || mx >= (s->h_edge_pos - width - 1) ||
+ my < 0 || my >= (s->v_edge_pos - height - 1)) {
+
+ if ((s->flags & CODEC_FLAG_EMU_EDGE)) {
+ emu = 1;
+ }
+
+ mx = av_clip (mx, -16, (s->h_edge_pos - width + 15));
+ my = av_clip (my, -16, (s->v_edge_pos - height + 15));
+ }
+
+ /* form component predictions */
+ dest = s->current_picture.data[0] + x + y*s->linesize;
+ src = pic->data[0] + mx + my*s->linesize;
+
+ if (emu) {
+ ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
+ mx, my, s->h_edge_pos, s->v_edge_pos);
+ src = s->edge_emu_buffer;
+ }
+ if (thirdpel)
+ (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize, width, height);
+ else
+ (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize, height);
+
+ if (!(s->flags & CODEC_FLAG_GRAY)) {
+ mx = (mx + (mx < (int) x)) >> 1;
+ my = (my + (my < (int) y)) >> 1;
+ width = (width >> 1);
+ height = (height >> 1);
+ blocksize++;
+
+ for (i = 1; i < 3; i++) {
+ dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize;
+ src = pic->data[i] + mx + my*s->uvlinesize;
+
+ if (emu) {
+ ff_emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
+ mx, my, (s->h_edge_pos >> 1), (s->v_edge_pos >> 1));
+ src = s->edge_emu_buffer;
+ }
+ if (thirdpel)
+ (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->uvlinesize, width, height);
+ else
+ (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->uvlinesize, height);
+ }
+ }
+}
+
+static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir,
+ int avg)
+{
+ int i, j, k, mx, my, dx, dy, x, y;
+ MpegEncContext *const s = (MpegEncContext *) h;
+ const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
+ const int part_height = 16 >> ((unsigned) (size + 1) / 3);
+ const int extra_width = (mode == PREDICT_MODE) ? -16*6 : 0;
+ const int h_edge_pos = 6*(s->h_edge_pos - part_width ) - extra_width;
+ const int v_edge_pos = 6*(s->v_edge_pos - part_height) - extra_width;
+
+ for (i = 0; i < 16; i += part_height) {
+ for (j = 0; j < 16; j += part_width) {
+ const int b_xy = (4*s->mb_x + (j >> 2)) + (4*s->mb_y + (i >> 2))*h->b_stride;
+ int dxy;
+ x = 16*s->mb_x + j;
+ y = 16*s->mb_y + i;
+ k = ((j >> 2) & 1) + ((i >> 1) & 2) + ((j >> 1) & 4) + (i & 8);
+
+ if (mode != PREDICT_MODE) {
+ pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my);
+ } else {
+ mx = s->next_picture.motion_val[0][b_xy][0]<<1;
+ my = s->next_picture.motion_val[0][b_xy][1]<<1;
+
+ if (dir == 0) {
+ mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
+ my = ((my * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
+ } else {
+ mx = ((mx * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
+ my = ((my * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
+ }
+ }
+
+ /* clip motion vector prediction to frame border */
+ mx = av_clip(mx, extra_width - 6*x, h_edge_pos - 6*x);
+ my = av_clip(my, extra_width - 6*y, v_edge_pos - 6*y);
+
+ /* get (optional) motion vector differential */
+ if (mode == PREDICT_MODE) {
+ dx = dy = 0;
+ } else {
+ dy = svq3_get_se_golomb(&s->gb);
+ dx = svq3_get_se_golomb(&s->gb);
+
+ if (dx == INVALID_VLC || dy == INVALID_VLC) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n");
+ return -1;
+ }
+ }
+
+ /* compute motion vector */
+ if (mode == THIRDPEL_MODE) {
+ int fx, fy;
+ mx = ((mx + 1)>>1) + dx;
+ my = ((my + 1)>>1) + dy;
+ fx = ((unsigned)(mx + 0x3000))/3 - 0x1000;
+ fy = ((unsigned)(my + 0x3000))/3 - 0x1000;
+ dxy = (mx - 3*fx) + 4*(my - 3*fy);
+
+ svq3_mc_dir_part(s, x, y, part_width, part_height, fx, fy, dxy, 1, dir, avg);
+ mx += mx;
+ my += my;
+ } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
+ mx = ((unsigned)(mx + 1 + 0x3000))/3 + dx - 0x1000;
+ my = ((unsigned)(my + 1 + 0x3000))/3 + dy - 0x1000;
+ dxy = (mx&1) + 2*(my&1);
+
+ svq3_mc_dir_part(s, x, y, part_width, part_height, mx>>1, my>>1, dxy, 0, dir, avg);
+ mx *= 3;
+ my *= 3;
+ } else {
+ mx = ((unsigned)(mx + 3 + 0x6000))/6 + dx - 0x1000;
+ my = ((unsigned)(my + 3 + 0x6000))/6 + dy - 0x1000;
+
+ svq3_mc_dir_part(s, x, y, part_width, part_height, mx, my, 0, 0, dir, avg);
+ mx *= 6;
+ my *= 6;
+ }
+
+ /* update mv_cache */
+ if (mode != PREDICT_MODE) {
+ int32_t mv = pack16to32(mx,my);
+
+ if (part_height == 8 && i < 8) {
+ *(int32_t *) h->mv_cache[dir][scan8[k] + 1*8] = mv;
+
+ if (part_width == 8 && j < 8) {
+ *(int32_t *) h->mv_cache[dir][scan8[k] + 1 + 1*8] = mv;
+ }
+ }
+ if (part_width == 8 && j < 8) {
+ *(int32_t *) h->mv_cache[dir][scan8[k] + 1] = mv;
+ }
+ if (part_width == 4 || part_height == 4) {
+ *(int32_t *) h->mv_cache[dir][scan8[k]] = mv;
+ }
+ }
+
+ /* write back motion vectors */
+ fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4);
+ }
+ }
+
+ return 0;
+}
+
+static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
+{
+ int i, j, k, m, dir, mode;
+ int cbp = 0;
+ uint32_t vlc;
+ int8_t *top, *left;
+ MpegEncContext *const s = (MpegEncContext *) h;
+ const int mb_xy = h->mb_xy;
+ const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
+
+ h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
+ h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
+ h->topright_samples_available = 0xFFFF;
+
+ if (mb_type == 0) { /* SKIP */
+ if (s->pict_type == FF_P_TYPE || s->next_picture.mb_type[mb_xy] == -1) {
+ svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
+
+ if (s->pict_type == FF_B_TYPE) {
+ svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
+ }
+
+ mb_type = MB_TYPE_SKIP;
+ } else {
+ mb_type = FFMIN(s->next_picture.mb_type[mb_xy], 6);
+ if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
+ return -1;
+ if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
+ return -1;
+
+ mb_type = MB_TYPE_16x16;
+ }
+ } else if (mb_type < 8) { /* INTER */
+ if (h->thirdpel_flag && h->halfpel_flag == !get_bits1 (&s->gb)) {
+ mode = THIRDPEL_MODE;
+ } else if (h->halfpel_flag && h->thirdpel_flag == !get_bits1 (&s->gb)) {
+ mode = HALFPEL_MODE;
+ } else {
+ mode = FULLPEL_MODE;
+ }
+
+ /* fill caches */
+ /* note ref_cache should contain here:
+ ????????
+ ???11111
+ N??11111
+ N??11111
+ N??11111
+ */
+
+ for (m = 0; m < 2; m++) {
+ if (s->mb_x > 0 && h->intra4x4_pred_mode[mb_xy - 1][0] != -1) {
+ for (i = 0; i < 4; i++) {
+ *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride];
+ }
+ } else {
+ for (i = 0; i < 4; i++) {
+ *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0;
+ }
+ }
+ if (s->mb_y > 0) {
+ memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
+ memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[mb_xy - s->mb_stride][4] == -1) ? PART_NOT_AVAILABLE : 1, 4);
+
+ if (s->mb_x < (s->mb_width - 1)) {
+ *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4];
+ h->ref_cache[m][scan8[0] + 4 - 1*8] =
+ (h->intra4x4_pred_mode[mb_xy - s->mb_stride + 1][0] == -1 ||
+ h->intra4x4_pred_mode[mb_xy - s->mb_stride ][4] == -1) ? PART_NOT_AVAILABLE : 1;
+ }else
+ h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
+ if (s->mb_x > 0) {
+ *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1];
+ h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] == -1) ? PART_NOT_AVAILABLE : 1;
+ }else
+ h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
+ }else
+ memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
+
+ if (s->pict_type != FF_B_TYPE)
+ break;
+ }
+
+ /* decode motion vector(s) and form prediction(s) */
+ if (s->pict_type == FF_P_TYPE) {
+ if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0)
+ return -1;
+ } else { /* FF_B_TYPE */
+ if (mb_type != 2) {
+ if (svq3_mc_dir(h, 0, mode, 0, 0) < 0)
+ return -1;
+ } else {
+ for (i = 0; i < 4; i++) {
+ memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
+ }
+ }
+ if (mb_type != 1) {
+ if (svq3_mc_dir(h, 0, mode, 1, (mb_type == 3)) < 0)
+ return -1;
+ } else {
+ for (i = 0; i < 4; i++) {
+ memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
+ }
+ }
+ }
+
+ mb_type = MB_TYPE_16x16;
+ } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
+ memset(h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t));
+
+ if (mb_type == 8) {
+ if (s->mb_x > 0) {
+ for (i = 0; i < 4; i++) {
+ h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[mb_xy - 1][i];
+ }
+ if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) {
+ h->left_samples_available = 0x5F5F;
+ }
+ }
+ if (s->mb_y > 0) {
+ h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][4];
+ h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][5];
+ h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][6];
+ h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][3];
+
+ if (h->intra4x4_pred_mode_cache[4+8*0] == -1) {
+ h->top_samples_available = 0x33FF;
+ }
+ }
+
+ /* decode prediction codes for luma blocks */
+ for (i = 0; i < 16; i+=2) {
+ vlc = svq3_get_ue_golomb(&s->gb);
+
+ if (vlc >= 25){
+ av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
+ return -1;
+ }
+
+ left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
+ top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
+
+ left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
+ left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
+
+ if (left[1] == -1 || left[2] == -1){
+ av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n");
+ return -1;
+ }
+ }
+ } else { /* mb_type == 33, DC_128_PRED block type */
+ for (i = 0; i < 4; i++) {
+ memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4);
+ }
+ }
+
+ ff_h264_write_back_intra_pred_mode(h);
+
+ if (mb_type == 8) {
+ ff_h264_check_intra4x4_pred_mode(h);
+
+ h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
+ h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
+ } else {
+ for (i = 0; i < 4; i++) {
+ memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4);
+ }
+
+ h->top_samples_available = 0x33FF;
+ h->left_samples_available = 0x5F5F;
+ }
+
+ mb_type = MB_TYPE_INTRA4x4;
+ } else { /* INTRA16x16 */
+ dir = i_mb_type_info[mb_type - 8].pred_mode;
+ dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
+
+ if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir)) == -1){
+ av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
+ return -1;
+ }
+
+ cbp = i_mb_type_info[mb_type - 8].cbp;
+ mb_type = MB_TYPE_INTRA16x16;
+ }
+
+ if (!IS_INTER(mb_type) && s->pict_type != FF_I_TYPE) {
+ for (i = 0; i < 4; i++) {
+ memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
+ }
+ if (s->pict_type == FF_B_TYPE) {
+ for (i = 0; i < 4; i++) {
+ memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
+ }
+ }
+ }
+ if (!IS_INTRA4x4(mb_type)) {
+ memset(h->intra4x4_pred_mode[mb_xy], DC_PRED, 8);
+ }
+ if (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE) {
+ memset(h->non_zero_count_cache + 8, 0, 4*9*sizeof(uint8_t));
+ s->dsp.clear_blocks(h->mb);
+ }
+
+ if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE)) {
+ if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){
+ av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
+ return -1;
+ }
+
+ cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
+ }
+ if (IS_INTRA16x16(mb_type) || (s->pict_type != FF_I_TYPE && s->adaptive_quant && cbp)) {
+ s->qscale += svq3_get_se_golomb(&s->gb);
+
+ if (s->qscale > 31){
+ av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
+ return -1;
+ }
+ }
+ if (IS_INTRA16x16(mb_type)) {
+ if (svq3_decode_block(&s->gb, h->mb, 0, 0)){
+ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n");
+ return -1;
+ }
+ }
+
+ if (cbp) {
+ const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
+ const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
+
+ for (i = 0; i < 4; i++) {
+ if ((cbp & (1 << i))) {
+ for (j = 0; j < 4; j++) {
+ k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j);
+ h->non_zero_count_cache[ scan8[k] ] = 1;
+
+ if (svq3_decode_block(&s->gb, &h->mb[16*k], index, type)){
+ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n");
+ return -1;
+ }
+ }
+ }
+ }
+
+ if ((cbp & 0x30)) {
+ for (i = 0; i < 2; ++i) {
+ if (svq3_decode_block(&s->gb, &h->mb[16*(16 + 4*i)], 0, 3)){
+ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n");
+ return -1;
+ }
+ }
+
+ if ((cbp & 0x20)) {
+ for (i = 0; i < 8; i++) {
+ h->non_zero_count_cache[ scan8[16+i] ] = 1;
+
+ if (svq3_decode_block(&s->gb, &h->mb[16*(16 + i)], 1, 1)){
+ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n");
+ return -1;
+ }
+ }
+ }
+ }
+ }
+
+ h->cbp= cbp;
+ s->current_picture.mb_type[mb_xy] = mb_type;
+
+ if (IS_INTRA(mb_type)) {
+ h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8);
+ }
+
+ return 0;
+}
+
+static int svq3_decode_slice_header(H264Context *h)
+{
+ MpegEncContext *const s = (MpegEncContext *) h;
+ const int mb_xy = h->mb_xy;
+ int i, header;
+
+ header = get_bits(&s->gb, 8);
+
+ if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
+ /* TODO: what? */
+ av_log(h->s.avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
+ return -1;
+ } else {
+ int length = (header >> 5) & 3;
+
+ h->next_slice_index = get_bits_count(&s->gb) + 8*show_bits(&s->gb, 8*length) + 8*length;
+
+ if (h->next_slice_index > s->gb.size_in_bits) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "slice after bitstream end\n");
+ return -1;
+ }
+
+ s->gb.size_in_bits = h->next_slice_index - 8*(length - 1);
+ skip_bits(&s->gb, 8);
+
+ if (h->svq3_watermark_key) {
+ uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1]);
+ AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1], header ^ h->svq3_watermark_key);
+ }
+ if (length > 0) {
+ memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3],
+ &s->gb.buffer[s->gb.size_in_bits >> 3], (length - 1));
+ }
+ skip_bits_long(&s->gb, 0);
+ }
+
+ if ((i = svq3_get_ue_golomb(&s->gb)) == INVALID_VLC || i >= 3){
+ av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", i);
+ return -1;
+ }
+
+ h->slice_type = golomb_to_pict_type[i];
+
+ if ((header & 0x9F) == 2) {
+ i = (s->mb_num < 64) ? 6 : (1 + av_log2 (s->mb_num - 1));
+ s->mb_skip_run = get_bits(&s->gb, i) - (s->mb_x + (s->mb_y * s->mb_width));
+ } else {
+ skip_bits1(&s->gb);
+ s->mb_skip_run = 0;
+ }
+
+ h->slice_num = get_bits(&s->gb, 8);
+ s->qscale = get_bits(&s->gb, 5);
+ s->adaptive_quant = get_bits1(&s->gb);
+
+ /* unknown fields */
+ skip_bits1(&s->gb);
+
+ if (h->unknown_svq3_flag) {
+ skip_bits1(&s->gb);
+ }
+
+ skip_bits1(&s->gb);
+ skip_bits(&s->gb, 2);
+
+ while (get_bits1(&s->gb)) {
+ skip_bits(&s->gb, 8);
+ }
+
+ /* reset intra predictors and invalidate motion vector references */
+ if (s->mb_x > 0) {
+ memset(h->intra4x4_pred_mode[mb_xy - 1], -1, 4*sizeof(int8_t));
+ memset(h->intra4x4_pred_mode[mb_xy - s->mb_x], -1, 8*sizeof(int8_t)*s->mb_x);
+ }
+ if (s->mb_y > 0) {
+ memset(h->intra4x4_pred_mode[mb_xy - s->mb_stride], -1, 8*sizeof(int8_t)*(s->mb_width - s->mb_x));
+
+ if (s->mb_x > 0) {
+ h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] = -1;
+ }
+ }
+
+ return 0;
+}
+
+static av_cold int svq3_decode_init(AVCodecContext *avctx)
+{
+ MpegEncContext *const s = avctx->priv_data;
+ H264Context *const h = avctx->priv_data;
+ int m;
+ unsigned char *extradata;
+ unsigned int size;
+
+ if(avctx->thread_count > 1){
+ av_log(avctx, AV_LOG_ERROR, "SVQ3 does not support multithreaded decoding, patch welcome! (check latest SVN too)\n");
+ return -1;
+ }
+
+ if (ff_h264_decode_init(avctx) < 0)
+ return -1;
+
+ s->flags = avctx->flags;
+ s->flags2 = avctx->flags2;
+ s->unrestricted_mv = 1;
+ h->is_complex=1;
+
+ if (!s->context_initialized) {
+ s->width = avctx->width;
+ s->height = avctx->height;
+ h->halfpel_flag = 1;
+ h->thirdpel_flag = 1;
+ h->unknown_svq3_flag = 0;
+ h->chroma_qp[0] = h->chroma_qp[1] = 4;
+
+ if (MPV_common_init(s) < 0)
+ return -1;
+
+ h->b_stride = 4*s->mb_width;
+
+ ff_h264_alloc_tables(h);
+
+ /* prowl for the "SEQH" marker in the extradata */
+ extradata = (unsigned char *)avctx->extradata;
+ for (m = 0; m < avctx->extradata_size; m++) {
+ if (!memcmp(extradata, "SEQH", 4))
+ break;
+ extradata++;
+ }
+
+ /* if a match was found, parse the extra data */
+ if (extradata && !memcmp(extradata, "SEQH", 4)) {
+
+ GetBitContext gb;
+ int frame_size_code;
+
+ size = AV_RB32(&extradata[4]);
+ init_get_bits(&gb, extradata + 8, size*8);
+
+ /* 'frame size code' and optional 'width, height' */
+ frame_size_code = get_bits(&gb, 3);
+ switch (frame_size_code) {
+ case 0: avctx->width = 160; avctx->height = 120; break;
+ case 1: avctx->width = 128; avctx->height = 96; break;
+ case 2: avctx->width = 176; avctx->height = 144; break;
+ case 3: avctx->width = 352; avctx->height = 288; break;
+ case 4: avctx->width = 704; avctx->height = 576; break;
+ case 5: avctx->width = 240; avctx->height = 180; break;
+ case 6: avctx->width = 320; avctx->height = 240; break;
+ case 7:
+ avctx->width = get_bits(&gb, 12);
+ avctx->height = get_bits(&gb, 12);
+ break;
+ }
+
+ h->halfpel_flag = get_bits1(&gb);
+ h->thirdpel_flag = get_bits1(&gb);
+
+ /* unknown fields */
+ skip_bits1(&gb);
+ skip_bits1(&gb);
+ skip_bits1(&gb);
+ skip_bits1(&gb);
+
+ s->low_delay = get_bits1(&gb);
+
+ /* unknown field */
+ skip_bits1(&gb);
+
+ while (get_bits1(&gb)) {
+ skip_bits(&gb, 8);
+ }
+
+ h->unknown_svq3_flag = get_bits1(&gb);
+ avctx->has_b_frames = !s->low_delay;
+ if (h->unknown_svq3_flag) {
+#if CONFIG_ZLIB
+ unsigned watermark_width = svq3_get_ue_golomb(&gb);
+ unsigned watermark_height = svq3_get_ue_golomb(&gb);
+ int u1 = svq3_get_ue_golomb(&gb);
+ int u2 = get_bits(&gb, 8);
+ int u3 = get_bits(&gb, 2);
+ int u4 = svq3_get_ue_golomb(&gb);
+ unsigned buf_len = watermark_width*watermark_height*4;
+ int offset = (get_bits_count(&gb)+7)>>3;
+ uint8_t *buf;
+
+ if ((uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
+ return -1;
+
+ buf = av_malloc(buf_len);
+ av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n", watermark_width, watermark_height);
+ av_log(avctx, AV_LOG_DEBUG, "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n", u1, u2, u3, u4, offset);
+ if (uncompress(buf, (uLong*)&buf_len, extradata + 8 + offset, size - offset) != Z_OK) {
+ av_log(avctx, AV_LOG_ERROR, "could not uncompress watermark logo\n");
+ av_free(buf);
+ return -1;
+ }
+ h->svq3_watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
+ h->svq3_watermark_key = h->svq3_watermark_key << 16 | h->svq3_watermark_key;
+ av_log(avctx, AV_LOG_DEBUG, "watermark key %#x\n", h->svq3_watermark_key);
+ av_free(buf);
+#else
+ av_log(avctx, AV_LOG_ERROR, "this svq3 file contains watermark which need zlib support compiled in\n");
+ return -1;
+#endif
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int svq3_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ MpegEncContext *const s = avctx->priv_data;
+ H264Context *const h = avctx->priv_data;
+ int m, mb_type;
+
+ /* special case for last picture */
+ if (buf_size == 0) {
+ if (s->next_picture_ptr && !s->low_delay) {
+ *(AVFrame *) data = *(AVFrame *) &s->next_picture;
+ s->next_picture_ptr = NULL;
+ *data_size = sizeof(AVFrame);
+ }
+ return 0;
+ }
+
+ init_get_bits (&s->gb, buf, 8*buf_size);
+
+ s->mb_x = s->mb_y = h->mb_xy = 0;
+
+ if (svq3_decode_slice_header(h))
+ return -1;
+
+ s->pict_type = h->slice_type;
+ s->picture_number = h->slice_num;
+
+ if (avctx->debug&FF_DEBUG_PICT_INFO){
+ av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
+ av_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag,
+ s->adaptive_quant, s->qscale, h->slice_num);
+ }
+
+ /* for hurry_up == 5 */
+ s->current_picture.pict_type = s->pict_type;
+ s->current_picture.key_frame = (s->pict_type == FF_I_TYPE);
+
+ /* Skip B-frames if we do not have reference frames. */
+ if (s->last_picture_ptr == NULL && s->pict_type == FF_B_TYPE)
+ return 0;
+ /* Skip B-frames if we are in a hurry. */
+ if (avctx->hurry_up && s->pict_type == FF_B_TYPE)
+ return 0;
+ /* Skip everything if we are in a hurry >= 5. */
+ if (avctx->hurry_up >= 5)
+ return 0;
+ if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
+ ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
+ || avctx->skip_frame >= AVDISCARD_ALL)
+ return 0;
+
+ if (s->next_p_frame_damaged) {
+ if (s->pict_type == FF_B_TYPE)
+ return 0;
+ else
+ s->next_p_frame_damaged = 0;
+ }
+
+ if (ff_h264_frame_start(h) < 0)
+ return -1;
+
+ if (s->pict_type == FF_B_TYPE) {
+ h->frame_num_offset = (h->slice_num - h->prev_frame_num);
+
+ if (h->frame_num_offset < 0) {
+ h->frame_num_offset += 256;
+ }
+ if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
+ return -1;
+ }
+ } else {
+ h->prev_frame_num = h->frame_num;
+ h->frame_num = h->slice_num;
+ h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num);
+
+ if (h->prev_frame_num_offset < 0) {
+ h->prev_frame_num_offset += 256;
+ }
+ }
+
+ for (m = 0; m < 2; m++){
+ int i;
+ for (i = 0; i < 4; i++){
+ int j;
+ for (j = -1; j < 4; j++)
+ h->ref_cache[m][scan8[0] + 8*i + j]= 1;
+ if (i < 3)
+ h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE;
+ }
+ }
+
+ for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
+ for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
+ h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
+
+ if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits &&
+ ((get_bits_count(&s->gb) & 7) == 0 || show_bits(&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) {
+
+ skip_bits(&s->gb, h->next_slice_index - get_bits_count(&s->gb));
+ s->gb.size_in_bits = 8*buf_size;
+
+ if (svq3_decode_slice_header(h))
+ return -1;
+
+ /* TODO: support s->mb_skip_run */
+ }
+
+ mb_type = svq3_get_ue_golomb(&s->gb);
+
+ if (s->pict_type == FF_I_TYPE) {
+ mb_type += 8;
+ } else if (s->pict_type == FF_B_TYPE && mb_type >= 4) {
+ mb_type += 4;
+ }
+ if (mb_type > 33 || svq3_decode_mb(h, mb_type)) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
+ if (mb_type != 0) {
+ ff_h264_hl_decode_mb (h);
+ }
+
+ if (s->pict_type != FF_B_TYPE && !s->low_delay) {
+ s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
+ (s->pict_type == FF_P_TYPE && mb_type < 8) ? (mb_type - 1) : -1;
+ }
+ }
+
+ ff_draw_horiz_band(s, 16*s->mb_y, 16);
+ }
+
+ MPV_frame_end(s);
+
+ if (s->pict_type == FF_B_TYPE || s->low_delay) {
+ *(AVFrame *) data = *(AVFrame *) &s->current_picture;
+ } else {
+ *(AVFrame *) data = *(AVFrame *) &s->last_picture;
+ }
+
+ /* Do not output the last pic after seeking. */
+ if (s->last_picture_ptr || s->low_delay) {
+ *data_size = sizeof(AVFrame);
+ }
+
+ return buf_size;
+}
+
+
+AVCodec svq3_decoder = {
+ "svq3",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_SVQ3,
+ sizeof(H264Context),
+ svq3_decode_init,
+ NULL,
+ ff_h264_decode_end,
+ svq3_decode_frame,
+ /*.capabilities = */CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/tableprint.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/tableprint.c
new file mode 100644
index 000000000..bca94e8d4
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/tableprint.c
@@ -0,0 +1,76 @@
+/*
+ * Generate a file for hardcoded tables
+ *
+ * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include "tableprint.h"
+
+#define WRITE_1D_FUNC(name, type, fmtstr, linebrk)\
+void write_##name##_array(const void *arg, int len, int dummy)\
+{\
+ const type *data = arg;\
+ int i;\
+ printf(" ");\
+ for (i = 0; i < len - 1; i++) {\
+ printf(" "fmtstr",", data[i]);\
+ if ((i & linebrk) == linebrk) printf("\n ");\
+ }\
+ printf(" "fmtstr"\n", data[i]);\
+}
+
+WRITE_1D_FUNC(int8, int8_t, "%3"PRIi8, 15)
+WRITE_1D_FUNC(uint32, uint32_t, "0x%08"PRIx32, 7)
+WRITE_1D_FUNC(float, float, "%.18e", 3)
+
+#define WRITE_2D_FUNC(name, type)\
+void write_##name##_2d_array(const void *arg, int len, int len2)\
+{\
+ const type *data = arg;\
+ int i;\
+ printf(" {\n");\
+ for (i = 0; i < len; i++) {\
+ write_##name##_array(data + i * len2, len2, 0);\
+ printf(i == len - 1 ? " }\n" : " }, {\n");\
+ }\
+}
+
+WRITE_2D_FUNC(int8, int8_t)
+WRITE_2D_FUNC(uint32, uint32_t)
+
+// ==> Start patch MPC
+//int main(int argc, char *argv[])
+//{
+// int i;
+//
+// printf("/* This file was generated by libavcodec/tableprint */\n");
+// printf("#include <stdint.h>/n");
+// tableinit();
+//
+// for (i = 0; tables[i].declaration; i++) {
+// printf(tables[i].declaration);
+// printf(" = {\n");
+// tables[i].printfunc(tables[i].data, tables[i].size, tables[i].size2);
+// printf("};\n");
+// }
+// return 0;
+//}
+// <== End patch MPC
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/tableprint.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/tableprint.h
new file mode 100644
index 000000000..706e29653
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/tableprint.h
@@ -0,0 +1,60 @@
+/*
+ * Generate a file for hardcoded tables
+ *
+ * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_TABLEPRINT_H
+#define AVCODEC_TABLEPRINT_H
+
+#include <stdint.h>
+
+/**
+ * \defgroup printfuncs Predefined functions for printing tables
+ *
+ * \{
+ */
+void write_int8_array (const void *, int, int);
+void write_uint32_array (const void *, int, int);
+void write_float_array (const void *, int, int);
+void write_int8_2d_array (const void *, int, int);
+void write_uint32_2d_array(const void *, int, int);
+/** \} */ // end of printfuncs group
+
+struct tabledef {
+ /** String that declares the array. Adding " = { ..." after it should
+ * make a valid initializer, adding "extern" before and ";" if possible
+ * should make a valid extern declaration. */
+ const char *declaration;
+ /** Function used to print the table data (i.e. the part in {}).
+ * Should be one of the predefined write_*_array functions. */
+ void (*printfunc)(const void *, int, int);
+ /** Pointer passed to the printfunc, usually a pointer to the start
+ * of the array to be printed. */
+ const void *data;
+ int size; ///< size of the first dimension of the array
+ int size2; ///< size of the second dimension of the array if any
+};
+
+/** Initializes all the tables described in the tables array */
+void tableinit(void);
+/** Describes the tables that should be printed */
+extern const struct tabledef tables[];
+
+#endif /* AVCODEC_TABLEPRINT_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/unary.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/unary.h
new file mode 100644
index 000000000..bcc31ab59
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/unary.h
@@ -0,0 +1,56 @@
+/*
+ * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_UNARY_H
+#define AVCODEC_UNARY_H
+
+#include "get_bits.h"
+
+/**
+ * Get unary code of limited length
+ * @param gb GetBitContext
+ * @param[in] stop The bitstop value (unary code of 1's or 0's)
+ * @param[in] len Maximum length
+ * @return Unary length/index
+ */
+static inline int get_unary(GetBitContext *gb, int stop, int len)
+{
+ int i;
+
+ for(i = 0; i < len && get_bits1(gb) != stop; i++);
+ return i;
+}
+
+/**
+ * Get unary code terminated by a 0 with a maximum length of 33
+ * @param gb GetBitContext
+ * @return Unary length/index
+ */
+static inline int get_unary_0_33(GetBitContext *gb)
+{
+ return get_unary(gb, 0, 33);
+}
+
+static inline int get_unary_0_9(GetBitContext *gb)
+{
+ return get_unary(gb, 0, 9);
+}
+
+#endif /* AVCODEC_UNARY_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/utils.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/utils.c
new file mode 100644
index 000000000..d1a9b941f
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/utils.c
@@ -0,0 +1,825 @@
+/*
+ * utils for libavcodec
+ * Copyright (c) 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/utils.c
+ * utils.
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/crc.h"
+#include "avcodec.h"
+#include "dsputil.h"
+#include "imgconvert.h"
+#include "audioconvert.h"
+#include "internal.h"
+#include <stdlib.h>
+#include <stdarg.h>
+#include <limits.h>
+#include <float.h>
+#if defined(__MINGW32__) || __STDC_VERSION__ < 199901L
+#include <fcntl.h>
+#endif
+
+//static int volatile entangled_thread_counter=0; /* ffdshow custom comment out */
+int (*ff_lockmgr_cb)(void **mutex, enum AVLockOp op);
+static void *codec_mutex;
+
+void *av_fast_realloc(void *ptr, unsigned int *size, unsigned int min_size)
+{
+ if(min_size < *size)
+ return ptr;
+
+ *size= FFMAX(17*min_size/16 + 32, min_size);
+
+ ptr= av_realloc(ptr, *size);
+ if(!ptr) //we could set this to the unmodified min_size but this is safer if the user lost the ptr and uses NULL now
+ *size= 0;
+
+ return ptr;
+}
+
+void av_fast_malloc(void *ptr, unsigned int *size, unsigned int min_size)
+{
+ void **p = ptr;
+ if (min_size < *size)
+ return;
+ *size= FFMAX(17*min_size/16 + 32, min_size);
+ av_free(*p);
+ *p = av_malloc(*size);
+ if (!*p) *size = 0;
+}
+
+/* ffdshow custom code (begin) */
+static unsigned int last_static = 0;
+static void** array_static = NULL;
+
+void av_free_static(void)
+{
+ while(last_static){
+ av_freep(&array_static[--last_static]);
+ }
+ av_freep(&array_static);
+}
+/* ffdshow custom code (end) */
+
+/* encoder management */
+static AVCodec *first_avcodec = NULL;
+
+AVCodec *av_codec_next(AVCodec *c){
+ if(c) return c->next;
+ else return first_avcodec;
+}
+
+void avcodec_register(AVCodec *codec)
+{
+ AVCodec **p;
+ avcodec_init();
+ p = &first_avcodec;
+ while (*p != NULL) p = &(*p)->next;
+ *p = codec;
+ codec->next = NULL;
+}
+
+void avcodec_set_dimensions(AVCodecContext *s, int width, int height){
+ s->coded_width = width;
+ s->coded_height= height;
+ s->width = -((-width )>>s->lowres);
+ s->height= -((-height)>>s->lowres);
+}
+
+typedef struct InternalBuffer{
+ int last_pic_num;
+ uint8_t *base[4];
+ uint8_t *data[4];
+ int linesize[4];
+ int width, height;
+ enum PixelFormat pix_fmt;
+}InternalBuffer;
+
+#define INTERNAL_BUFFER_SIZE 32
+
+void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){
+ int w_align= 1;
+ int h_align= 1;
+
+ switch(s->pix_fmt){
+ case PIX_FMT_YUV420P:
+ case PIX_FMT_YUYV422:
+ case PIX_FMT_UYVY422:
+ case PIX_FMT_YUV422P:
+ case PIX_FMT_YUV440P:
+ case PIX_FMT_YUV444P:
+ case PIX_FMT_GRAY8:
+ case PIX_FMT_GRAY16BE:
+ case PIX_FMT_GRAY16LE:
+ case PIX_FMT_YUVJ420P:
+ case PIX_FMT_YUVJ422P:
+ case PIX_FMT_YUVJ440P:
+ case PIX_FMT_YUVJ444P:
+ case PIX_FMT_YUVA420P:
+ w_align= 16; //FIXME check for non mpeg style codecs and use less alignment
+ h_align= 16;
+ if(s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG || s->codec_id == CODEC_ID_AMV)
+ h_align= 32; // interlaced is rounded up to 2 MBs
+ break;
+ case PIX_FMT_YUV411P:
+ case PIX_FMT_UYYVYY411:
+ w_align=32;
+ h_align=8;
+ break;
+ case PIX_FMT_YUV410P:
+ if(s->codec_id == CODEC_ID_SVQ1){
+ w_align=64;
+ h_align=64;
+ }
+ case PIX_FMT_RGB555:
+ if(s->codec_id == CODEC_ID_RPZA){
+ w_align=4;
+ h_align=4;
+ }
+ break;
+ case PIX_FMT_BGR24:
+ if((s->codec_id == CODEC_ID_MSZH) || (s->codec_id == CODEC_ID_ZLIB)){
+ w_align=4;
+ h_align=4;
+ }
+ break;
+ default:
+ w_align= 1;
+ h_align= 1;
+ break;
+ }
+
+ *width = FFALIGN(*width , w_align);
+ *height= FFALIGN(*height, h_align);
+ if(s->codec_id == CODEC_ID_H264)
+ *height+=2; // some of the optimized chroma MC reads one line too much
+}
+
+int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h){
+ if((int)w>0 && (int)h>0 && (w+128)*(uint64_t)(h+128) < INT_MAX/8)
+ return 0;
+
+ av_log(av_log_ctx, AV_LOG_ERROR, "picture size invalid (%ux%u)\n", w, h);
+ return -1;
+}
+
+int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
+ int i;
+ int w= s->width;
+ int h= s->height;
+ InternalBuffer *buf;
+ int *picture_number;
+
+ if(pic->data[0]!=NULL) {
+ av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n");
+ return -1;
+ }
+ if(s->internal_buffer_count >= INTERNAL_BUFFER_SIZE) {
+ av_log(s, AV_LOG_ERROR, "internal_buffer_count overflow (missing release_buffer?)\n");
+ return -1;
+ }
+
+ if(avcodec_check_dimensions(s,w,h))
+ return -1;
+
+ if(s->internal_buffer==NULL){
+ s->internal_buffer= av_mallocz((INTERNAL_BUFFER_SIZE+1)*sizeof(InternalBuffer));
+ }
+#if 0
+ s->internal_buffer= av_fast_realloc(
+ s->internal_buffer,
+ &s->internal_buffer_size,
+ sizeof(InternalBuffer)*FFMAX(99, s->internal_buffer_count+1)/*FIXME*/
+ );
+#endif
+
+ buf= &((InternalBuffer*)s->internal_buffer)[s->internal_buffer_count];
+ picture_number= &(((InternalBuffer*)s->internal_buffer)[INTERNAL_BUFFER_SIZE]).last_pic_num; //FIXME ugly hack
+ (*picture_number)++;
+
+ if(buf->base[0] && (buf->width != w || buf->height != h || buf->pix_fmt != s->pix_fmt)){
+ for(i=0; i<4; i++){
+ av_freep(&buf->base[i]);
+ buf->data[i]= NULL;
+ }
+ }
+
+ if(buf->base[0]){
+ pic->age= *picture_number - buf->last_pic_num;
+ buf->last_pic_num= *picture_number;
+ }else{
+ int h_chroma_shift, v_chroma_shift;
+ int size[4] = {0};
+ int tmpsize;
+ int unaligned;
+ AVPicture picture;
+ int stride_align[4];
+
+ avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
+
+ avcodec_align_dimensions(s, &w, &h);
+
+ if(!(s->flags&CODEC_FLAG_EMU_EDGE)){
+ w+= EDGE_WIDTH*2;
+ h+= EDGE_WIDTH*2;
+ }
+
+ do {
+ // NOTE: do not align linesizes individually, this breaks e.g. assumptions
+ // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
+ ff_fill_linesize(&picture, s->pix_fmt, w);
+ // increase alignment of w for next try (rhs gives the lowest bit set in w)
+ w += w & ~(w-1);
+
+ unaligned = 0;
+ for (i=0; i<4; i++){
+//STRIDE_ALIGN is 8 for SSE* but this does not work for SVQ1 chroma planes
+//we could change STRIDE_ALIGN to 16 for x86/sse but it would increase the
+//picture size unneccessarily in some cases. The solution here is not
+//pretty and better ideas are welcome!
+#if HAVE_MMX
+ if(s->codec_id == CODEC_ID_SVQ1)
+ stride_align[i]= 16;
+ else
+#endif
+ stride_align[i] = STRIDE_ALIGN;
+ unaligned |= picture.linesize[i] % stride_align[i];
+ }
+ } while (unaligned);
+
+ tmpsize = ff_fill_pointer(&picture, NULL, s->pix_fmt, h);
+ if (tmpsize < 0)
+ return -1;
+
+ for (i=0; i<3 && picture.data[i+1]; i++)
+ size[i] = picture.data[i+1] - picture.data[i];
+ size[i] = tmpsize - (picture.data[i] - picture.data[0]);
+
+ buf->last_pic_num= -256*256*256*64;
+ memset(buf->base, 0, sizeof(buf->base));
+ memset(buf->data, 0, sizeof(buf->data));
+
+ for(i=0; i<4 && size[i]; i++){
+ const int h_shift= i==0 ? 0 : h_chroma_shift;
+ const int v_shift= i==0 ? 0 : v_chroma_shift;
+
+ buf->linesize[i]= picture.linesize[i];
+
+ buf->base[i]= av_malloc(size[i]+16); //FIXME 16
+ if(buf->base[i]==NULL) return -1;
+ memset(buf->base[i], 128, size[i]);
+
+ // no edge if EDEG EMU or not planar YUV
+ if((s->flags&CODEC_FLAG_EMU_EDGE) || !size[2])
+ buf->data[i] = buf->base[i];
+ else
+ buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift), stride_align[i]);
+ }
+ if(size[1] && !size[2])
+ ff_set_systematic_pal((uint32_t*)buf->data[1], s->pix_fmt);
+ buf->width = s->width;
+ buf->height = s->height;
+ buf->pix_fmt= s->pix_fmt;
+ pic->age= 256*256*256*64;
+ }
+ pic->type= FF_BUFFER_TYPE_INTERNAL;
+
+ for(i=0; i<4; i++){
+ pic->base[i]= buf->base[i];
+ pic->data[i]= buf->data[i];
+ pic->linesize[i]= buf->linesize[i];
+ }
+ s->internal_buffer_count++;
+
+ pic->reordered_opaque= s->reordered_opaque;
+ pic->reordered_opaque2= s->reordered_opaque2; /* ffdshow custom code */
+ pic->reordered_opaque3= s->reordered_opaque3; /* ffdshow custom code */
+
+ if(s->debug&FF_DEBUG_BUFFERS)
+ av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p, %d buffers used\n", pic, s->internal_buffer_count);
+
+ return 0;
+}
+
+void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){
+ int i;
+ InternalBuffer *buf, *last;
+
+ assert(pic->type==FF_BUFFER_TYPE_INTERNAL);
+ assert(s->internal_buffer_count);
+
+ buf = NULL; /* avoids warning */
+ for(i=0; i<s->internal_buffer_count; i++){ //just 3-5 checks so is not worth to optimize
+ buf= &((InternalBuffer*)s->internal_buffer)[i];
+ if(buf->data[0] == pic->data[0])
+ break;
+ }
+ assert(i < s->internal_buffer_count);
+ s->internal_buffer_count--;
+ last = &((InternalBuffer*)s->internal_buffer)[s->internal_buffer_count];
+
+ FFSWAP(InternalBuffer, *buf, *last);
+
+ for(i=0; i<4; i++){
+ pic->data[i]=NULL;
+// pic->base[i]=NULL;
+ }
+//printf("R%X\n", pic->opaque);
+
+ if(s->debug&FF_DEBUG_BUFFERS)
+ av_log(s, AV_LOG_DEBUG, "default_release_buffer called on pic %p, %d buffers used\n", pic, s->internal_buffer_count);
+}
+
+int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){
+ AVFrame temp_pic;
+ int i;
+
+ /* If no picture return a new buffer */
+ if(pic->data[0] == NULL) {
+ /* We will copy from buffer, so must be readable */
+ pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
+ return s->get_buffer(s, pic);
+ }
+
+ /* If internal buffer type return the same buffer */
+ if(pic->type == FF_BUFFER_TYPE_INTERNAL) {
+ pic->reordered_opaque= s->reordered_opaque;
+ return 0;
+ }
+
+ /*
+ * Not internal type and reget_buffer not overridden, emulate cr buffer
+ */
+ temp_pic = *pic;
+ for(i = 0; i < 4; i++)
+ pic->data[i] = pic->base[i] = NULL;
+ pic->opaque = NULL;
+ /* Allocate new frame */
+ if (s->get_buffer(s, pic))
+ return -1;
+ /* Copy image data from old buffer to new buffer */
+ av_picture_copy((AVPicture*)pic, (AVPicture*)&temp_pic, s->pix_fmt, s->width,
+ s->height);
+ s->release_buffer(s, &temp_pic); // Release old frame
+ return 0;
+}
+
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size){
+ int i;
+
+ for(i=0; i<count; i++){
+ int r= func(c, (char*)arg + i*size);
+ if(ret) ret[i]= r;
+ }
+ return 0;
+}
+
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr),void *arg, int *ret, int count){
+ int i;
+
+ for(i=0; i<count; i++){
+ int r= func(c, arg, i, 0);
+ if(ret) ret[i]= r;
+ }
+ return 0;
+}
+
+enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat *fmt){
+ return fmt[0];
+}
+
+void avcodec_get_frame_defaults(AVFrame *pic){
+ memset(pic, 0, sizeof(AVFrame));
+
+ pic->pts= AV_NOPTS_VALUE;
+ pic->key_frame= 1;
+ pic->YCbCr_RGB_matrix_coefficients = YCbCr_RGB_coeff_Unspecified; // ffdshow custom code
+ pic->video_full_range_flag = VIDEO_FULL_RANGE_INVALID; // ffdshow custom code
+}
+
+AVFrame *avcodec_alloc_frame(void){
+ AVFrame *pic= av_malloc(sizeof(AVFrame));
+
+ if(pic==NULL) return NULL;
+
+ avcodec_get_frame_defaults(pic);
+
+ return pic;
+}
+
+int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
+{
+ int ret= -1;
+
+ /* If there is a user-supplied mutex locking routine, call it. */
+ if (ff_lockmgr_cb) {
+ if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN))
+ return -1;
+ }
+
+ /* ffdshow custom comment out */
+ //entangled_thread_counter++;
+ //if(entangled_thread_counter != 1){
+ // av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n");
+ // goto end;
+ //}
+
+ if(avctx->codec || !codec)
+ goto end;
+
+ if (codec->priv_data_size > 0) {
+ avctx->priv_data = av_mallocz(codec->priv_data_size);
+ if (!avctx->priv_data) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ } else {
+ avctx->priv_data = NULL;
+ }
+
+ if(avctx->coded_width && avctx->coded_height)
+ avcodec_set_dimensions(avctx, avctx->coded_width, avctx->coded_height);
+ else if(avctx->width && avctx->height)
+ avcodec_set_dimensions(avctx, avctx->width, avctx->height);
+
+#define SANE_NB_CHANNELS 128U
+ if (((avctx->coded_width || avctx->coded_height)
+ && avcodec_check_dimensions(avctx, avctx->coded_width, avctx->coded_height))
+ || avctx->channels > SANE_NB_CHANNELS) {
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+
+ avctx->codec = codec;
+ avctx->codec_id = codec->id; /* ffdshow custom code */
+ avctx->frame_number = 0;
+ if(avctx->codec->init){
+ ret = avctx->codec->init(avctx);
+ if (ret < 0) {
+ goto free_and_end;
+ }
+ }
+ ret=0;
+end:
+ //entangled_thread_counter--; /* ffdshow custom comment out */
+
+ /* Release any user-supplied mutex. */
+ if (ff_lockmgr_cb) {
+ (*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE);
+ }
+ return ret;
+free_and_end:
+ av_freep(&avctx->priv_data);
+ avctx->codec= NULL;
+ goto end;
+}
+
+int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const short *samples)
+{
+ if(buf_size < FF_MIN_BUFFER_SIZE && 0){
+ av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n");
+ return -1;
+ }
+ if((avctx->codec->capabilities & CODEC_CAP_DELAY) || samples){
+ int ret = avctx->codec->encode(avctx, buf, buf_size, samples);
+ avctx->frame_number++;
+ return ret;
+ }else
+ return 0;
+}
+
+int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVFrame *pict)
+{
+ if(buf_size < FF_MIN_BUFFER_SIZE){
+ av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n");
+ return -1;
+ }
+ if(avcodec_check_dimensions(avctx,avctx->width,avctx->height))
+ return -1;
+ if((avctx->codec->capabilities & CODEC_CAP_DELAY) || pict){
+ int ret = avctx->codec->encode(avctx, buf, buf_size, pict);
+ avctx->frame_number++;
+ emms_c(); //needed to avoid an emms_c() call before every return;
+
+ return ret;
+ }else
+ return 0;
+}
+
+int attribute_align_arg avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
+ int *got_picture_ptr,
+ const uint8_t *buf, int buf_size)
+{
+ int ret;
+
+ *got_picture_ptr= 0;
+ if((avctx->coded_width||avctx->coded_height) && avcodec_check_dimensions(avctx,avctx->coded_width,avctx->coded_height))
+ return -1;
+ if((avctx->codec->capabilities & CODEC_CAP_DELAY) || buf_size){
+ ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
+ buf, buf_size);
+
+ emms_c(); //needed to avoid an emms_c() call before every return;
+
+ if (*got_picture_ptr)
+ avctx->frame_number++;
+ }else
+ ret= 0;
+
+ return ret;
+}
+
+int attribute_align_arg avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples,
+ int *frame_size_ptr,
+ const uint8_t *buf, int buf_size)
+{
+ int ret;
+
+ if((avctx->codec->capabilities & CODEC_CAP_DELAY) || buf_size){
+ //FIXME remove the check below _after_ ensuring that all audio check that the available space is enough
+ if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){
+ av_log(avctx, AV_LOG_ERROR, "buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\n");
+ return -1;
+ }
+ if(*frame_size_ptr < FF_MIN_BUFFER_SIZE ||
+ *frame_size_ptr < avctx->channels * avctx->frame_size * sizeof(int16_t)){
+ av_log(avctx, AV_LOG_ERROR, "buffer %d too small\n", *frame_size_ptr);
+ return -1;
+ }
+
+ ret = avctx->codec->decode(avctx, samples, frame_size_ptr,
+ buf, buf_size);
+ avctx->frame_number++;
+ }else{
+ ret= 0;
+ *frame_size_ptr=0;
+ }
+ return ret;
+}
+
+int avcodec_close(AVCodecContext *avctx)
+{
+ /* If there is a user-supplied mutex locking routine, call it. */
+ if (ff_lockmgr_cb) {
+ if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN))
+ return -1;
+ }
+
+ /* ffdshow custom comment out */
+ //entangled_thread_counter++;
+ //if(entangled_thread_counter != 1){
+ // av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n");
+ // entangled_thread_counter--;
+ // return -1;
+ //}
+
+ //if (HAVE_THREADS && avctx->thread_opaque)
+ // avcodec_thread_free(avctx);
+ if (avctx->codec->close)
+ avctx->codec->close(avctx);
+ avcodec_default_free_buffers(avctx);
+ av_freep(&avctx->priv_data);
+ avctx->codec = NULL;
+ //entangled_thread_counter--; /* ffdshow custom comment out */
+
+ /* Release any user-supplied mutex. */
+ if (ff_lockmgr_cb) {
+ (*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE);
+ }
+ return 0;
+}
+
+AVCodec *avcodec_find_encoder(enum CodecID id)
+{
+ AVCodec *p;
+ p = first_avcodec;
+ while (p) {
+ if (p->encode != NULL && p->id == id)
+ return p;
+ p = p->next;
+ }
+ return NULL;
+}
+
+AVCodec *avcodec_find_encoder_by_name(const char *name)
+{
+ AVCodec *p;
+ if (!name)
+ return NULL;
+ p = first_avcodec;
+ while (p) {
+ if (p->encode != NULL && strcmp(name,p->name) == 0)
+ return p;
+ p = p->next;
+ }
+ return NULL;
+}
+
+AVCodec *avcodec_find_decoder(enum CodecID id)
+{
+ AVCodec *p;
+ p = first_avcodec;
+ while (p) {
+ if (p->decode != NULL && p->id == id)
+ return p;
+ p = p->next;
+ }
+ return NULL;
+}
+
+AVCodec *avcodec_find_decoder_by_name(const char *name)
+{
+ AVCodec *p;
+ if (!name)
+ return NULL;
+ p = first_avcodec;
+ while (p) {
+ if (p->decode != NULL && strcmp(name,p->name) == 0)
+ return p;
+ p = p->next;
+ }
+ return NULL;
+}
+
+unsigned avcodec_version( void )
+{
+ return LIBAVCODEC_VERSION_INT;
+}
+
+void avcodec_init(void)
+{
+ static int initialized = 0;
+
+ if (initialized != 0)
+ return;
+ initialized = 1;
+#if __STDC_VERSION__ < 199901L
+ {
+ extern void avpicture_init_pixfmtinfo(void);
+ avpicture_init_pixfmtinfo();
+ }
+#endif
+
+ dsputil_static_init();
+}
+
+void avcodec_flush_buffers(AVCodecContext *avctx)
+{
+ if(avctx->codec->flush)
+ avctx->codec->flush(avctx);
+}
+
+void avcodec_default_free_buffers(AVCodecContext *s){
+ int i, j;
+
+ if(s->internal_buffer==NULL) return;
+
+ if (s->internal_buffer_count)
+ av_log(s, AV_LOG_WARNING, "Found %i unreleased buffers!\n", s->internal_buffer_count);
+ for(i=0; i<INTERNAL_BUFFER_SIZE; i++){
+ InternalBuffer *buf= &((InternalBuffer*)s->internal_buffer)[i];
+ for(j=0; j<4; j++){
+ av_freep(&buf->base[j]);
+ buf->data[j]= NULL;
+ }
+ }
+ av_freep(&s->internal_buffer);
+
+ s->internal_buffer_count=0;
+}
+
+char av_get_pict_type_char(int pict_type){
+ switch(pict_type){
+ case FF_I_TYPE: return 'I';
+ case FF_P_TYPE: return 'P';
+ case FF_B_TYPE: return 'B';
+ case FF_S_TYPE: return 'S';
+ case FF_SI_TYPE:return 'i';
+ case FF_SP_TYPE:return 'p';
+ case FF_BI_TYPE:return 'b';
+ default: return '?';
+ }
+}
+
+int av_get_bits_per_sample(enum CodecID codec_id){
+ switch(codec_id){
+ case CODEC_ID_ADPCM_SBPRO_2:
+ return 2;
+ case CODEC_ID_ADPCM_SBPRO_3:
+ return 3;
+ case CODEC_ID_ADPCM_SBPRO_4:
+ case CODEC_ID_ADPCM_CT:
+ case CODEC_ID_ADPCM_IMA_WAV:
+ case CODEC_ID_ADPCM_MS:
+ case CODEC_ID_ADPCM_YAMAHA:
+ return 4;
+ case CODEC_ID_PCM_ALAW:
+ case CODEC_ID_PCM_MULAW:
+ return 8;
+ default:
+ return 0;
+ }
+}
+
+int av_get_bits_per_sample_format(enum SampleFormat sample_fmt) {
+ switch (sample_fmt) {
+ case SAMPLE_FMT_U8:
+ return 8;
+ case SAMPLE_FMT_S16:
+ return 16;
+ case SAMPLE_FMT_S32:
+ case SAMPLE_FMT_FLT:
+ return 32;
+ case SAMPLE_FMT_DBL:
+ return 64;
+ default:
+ return 0;
+ }
+}
+
+#if !HAVE_THREADS
+int avcodec_thread_init(AVCodecContext *s, int thread_count){
+ s->thread_count = thread_count;
+ return -1;
+}
+#endif
+
+unsigned int av_xiphlacing(unsigned char *s, unsigned int v)
+{
+ unsigned int n = 0;
+
+ while(v >= 0xff) {
+ *s++ = 0xff;
+ v -= 0xff;
+ n++;
+ }
+ *s = v;
+ n++;
+ return n;
+}
+
+int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b){
+ int i;
+ for(i=0; i<size && !(tab[i][0]==a && tab[i][1]==b); i++);
+ return i;
+}
+
+void av_log_missing_feature(void *avc, const char *feature, int want_sample)
+{
+ av_log(avc, AV_LOG_WARNING, "%s not implemented. Update your FFmpeg "
+ "version to the newest one from SVN. If the problem still "
+ "occurs, it means that your file has a feature which has not "
+ "been implemented.", feature);
+ if(want_sample)
+ av_log_ask_for_sample(avc, NULL);
+ else
+ av_log(avc, AV_LOG_WARNING, "\n");
+}
+
+void av_log_ask_for_sample(void *avc, const char *msg)
+{
+ if (msg)
+ av_log(avc, AV_LOG_WARNING, "%s ", msg);
+ av_log(avc, AV_LOG_WARNING, "If you want to help, upload a sample "
+ "of this file to ftp://upload.ffmpeg.org/MPlayer/incoming/ "
+ "and contact the ffmpeg-devel mailing list.\n");
+}
+
+int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op))
+{
+ if (ff_lockmgr_cb) {
+ if (ff_lockmgr_cb(&codec_mutex, AV_LOCK_DESTROY))
+ return -1;
+ }
+
+ ff_lockmgr_cb = cb;
+
+ if (ff_lockmgr_cb) {
+ if (ff_lockmgr_cb(&codec_mutex, AV_LOCK_CREATE))
+ return -1;
+ }
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1.c
new file mode 100644
index 000000000..ea15b4018
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1.c
@@ -0,0 +1,1031 @@
+/*
+ * VC-1 and WMV3 decoder common code
+ * Copyright (c) 2006-2007 Konstantin Shishkov
+ * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/vc1.c
+ * VC-1 and WMV3 decoder common code
+ *
+ */
+#include "internal.h"
+#include "dsputil.h"
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "vc1.h"
+#include "vc1data.h"
+#include "msmpeg4data.h"
+#include "unary.h"
+#include "simple_idct.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+/***********************************************************************/
+/**
+ * @defgroup vc1bitplane VC-1 Bitplane decoding
+ * @see 8.7, p56
+ * @{
+ */
+
+/**
+ * Imode types
+ * @{
+ */
+enum Imode {
+ IMODE_RAW,
+ IMODE_NORM2,
+ IMODE_DIFF2,
+ IMODE_NORM6,
+ IMODE_DIFF6,
+ IMODE_ROWSKIP,
+ IMODE_COLSKIP
+};
+/** @} */ //imode defines
+
+/** Decode rows by checking if they are skipped
+ * @param plane Buffer to store decoded bits
+ * @param[in] width Width of this buffer
+ * @param[in] height Height of this buffer
+ * @param[in] stride of this buffer
+ */
+static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
+ int x, y;
+
+ for (y=0; y<height; y++){
+ if (!get_bits1(gb)) //rowskip
+ memset(plane, 0, width);
+ else
+ for (x=0; x<width; x++)
+ plane[x] = get_bits1(gb);
+ plane += stride;
+ }
+}
+
+/** Decode columns by checking if they are skipped
+ * @param plane Buffer to store decoded bits
+ * @param[in] width Width of this buffer
+ * @param[in] height Height of this buffer
+ * @param[in] stride of this buffer
+ * @todo FIXME: Optimize
+ */
+static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
+ int x, y;
+
+ for (x=0; x<width; x++){
+ if (!get_bits1(gb)) //colskip
+ for (y=0; y<height; y++)
+ plane[y*stride] = 0;
+ else
+ for (y=0; y<height; y++)
+ plane[y*stride] = get_bits1(gb);
+ plane ++;
+ }
+}
+
+/** Decode a bitplane's bits
+ * @param data bitplane where to store the decode bits
+ * @param[out] raw_flag pointer to the flag indicating that this bitplane is not coded explicitly
+ * @param v VC-1 context for bit reading and logging
+ * @return Status
+ * @todo FIXME: Optimize
+ */
+static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
+{
+ GetBitContext *gb = &v->s.gb;
+
+ int imode, x, y, code, offset;
+ uint8_t invert, *planep = data;
+ int width, height, stride;
+
+ width = v->s.mb_width;
+ height = v->s.mb_height;
+ stride = v->s.mb_stride;
+ invert = get_bits1(gb);
+ imode = get_vlc2(gb, ff_vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
+
+ *raw_flag = 0;
+ switch (imode)
+ {
+ case IMODE_RAW:
+ //Data is actually read in the MB layer (same for all tests == "raw")
+ *raw_flag = 1; //invert ignored
+ return invert;
+ case IMODE_DIFF2:
+ case IMODE_NORM2:
+ if ((height * width) & 1)
+ {
+ *planep++ = get_bits1(gb);
+ offset = 1;
+ }
+ else offset = 0;
+ // decode bitplane as one long line
+ for (y = offset; y < height * width; y += 2) {
+ code = get_vlc2(gb, ff_vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
+ *planep++ = code & 1;
+ offset++;
+ if(offset == width) {
+ offset = 0;
+ planep += stride - width;
+ }
+ *planep++ = code >> 1;
+ offset++;
+ if(offset == width) {
+ offset = 0;
+ planep += stride - width;
+ }
+ }
+ break;
+ case IMODE_DIFF6:
+ case IMODE_NORM6:
+ if(!(height % 3) && (width % 3)) { // use 2x3 decoding
+ for(y = 0; y < height; y+= 3) {
+ for(x = width & 1; x < width; x += 2) {
+ code = get_vlc2(gb, ff_vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
+ if(code < 0){
+ av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
+ return -1;
+ }
+ planep[x + 0] = (code >> 0) & 1;
+ planep[x + 1] = (code >> 1) & 1;
+ planep[x + 0 + stride] = (code >> 2) & 1;
+ planep[x + 1 + stride] = (code >> 3) & 1;
+ planep[x + 0 + stride * 2] = (code >> 4) & 1;
+ planep[x + 1 + stride * 2] = (code >> 5) & 1;
+ }
+ planep += stride * 3;
+ }
+ if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
+ } else { // 3x2
+ planep += (height & 1) * stride;
+ for(y = height & 1; y < height; y += 2) {
+ for(x = width % 3; x < width; x += 3) {
+ code = get_vlc2(gb, ff_vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
+ if(code < 0){
+ av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
+ return -1;
+ }
+ planep[x + 0] = (code >> 0) & 1;
+ planep[x + 1] = (code >> 1) & 1;
+ planep[x + 2] = (code >> 2) & 1;
+ planep[x + 0 + stride] = (code >> 3) & 1;
+ planep[x + 1 + stride] = (code >> 4) & 1;
+ planep[x + 2 + stride] = (code >> 5) & 1;
+ }
+ planep += stride * 2;
+ }
+ x = width % 3;
+ if(x) decode_colskip(data , x, height , stride, &v->s.gb);
+ if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
+ }
+ break;
+ case IMODE_ROWSKIP:
+ decode_rowskip(data, width, height, stride, &v->s.gb);
+ break;
+ case IMODE_COLSKIP:
+ decode_colskip(data, width, height, stride, &v->s.gb);
+ break;
+ default: break;
+ }
+
+ /* Applying diff operator */
+ if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
+ {
+ planep = data;
+ planep[0] ^= invert;
+ for (x=1; x<width; x++)
+ planep[x] ^= planep[x-1];
+ for (y=1; y<height; y++)
+ {
+ planep += stride;
+ planep[0] ^= planep[-stride];
+ for (x=1; x<width; x++)
+ {
+ if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
+ else planep[x] ^= planep[x-1];
+ }
+ }
+ }
+ else if (invert)
+ {
+ planep = data;
+ for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
+ }
+ return (imode<<1) + invert;
+}
+
+/** @} */ //Bitplane group
+
+/***********************************************************************/
+/** VOP Dquant decoding
+ * @param v VC-1 Context
+ */
+static int vop_dquant_decoding(VC1Context *v)
+{
+ GetBitContext *gb = &v->s.gb;
+ int pqdiff;
+
+ //variable size
+ if (v->dquant == 2)
+ {
+ pqdiff = get_bits(gb, 3);
+ if (pqdiff == 7) v->altpq = get_bits(gb, 5);
+ else v->altpq = v->pq + pqdiff + 1;
+ }
+ else
+ {
+ v->dquantfrm = get_bits1(gb);
+ if ( v->dquantfrm )
+ {
+ v->dqprofile = get_bits(gb, 2);
+ switch (v->dqprofile)
+ {
+ case DQPROFILE_SINGLE_EDGE:
+ case DQPROFILE_DOUBLE_EDGES:
+ v->dqsbedge = get_bits(gb, 2);
+ break;
+ case DQPROFILE_ALL_MBS:
+ v->dqbilevel = get_bits1(gb);
+ if(!v->dqbilevel)
+ v->halfpq = 0;
+ default: break; //Forbidden ?
+ }
+ if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
+ {
+ pqdiff = get_bits(gb, 3);
+ if (pqdiff == 7) v->altpq = get_bits(gb, 5);
+ else v->altpq = v->pq + pqdiff + 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
+
+/**
+ * Decode Simple/Main Profiles sequence header
+ * @see Figure 7-8, p16-17
+ * @param avctx Codec context
+ * @param gb GetBit context initialized from Codec context extra_data
+ * @return Status
+ */
+int vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
+{
+ av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
+ v->profile = get_bits(gb, 2);
+ if (v->profile == PROFILE_COMPLEX)
+ {
+ av_log(avctx, AV_LOG_ERROR, "WMV3 Complex Profile is not fully supported\n");
+ }
+
+ if (v->profile == PROFILE_ADVANCED)
+ {
+ v->zz_8x4 = ff_vc1_adv_progressive_8x4_zz;
+ v->zz_4x8 = ff_vc1_adv_progressive_4x8_zz;
+ return decode_sequence_header_adv(v, gb);
+ }
+ else
+ {
+ v->zz_8x4 = wmv2_scantableA;
+ v->zz_4x8 = wmv2_scantableB;
+ v->res_sm = get_bits(gb, 2); //reserved
+ if (v->res_sm)
+ {
+ av_log(avctx, AV_LOG_ERROR,
+ "Reserved RES_SM=%i is forbidden\n", v->res_sm);
+ return -1;
+ }
+ }
+
+ // (fps-2)/4 (->30)
+ v->frmrtq_postproc = get_bits(gb, 3); //common
+ // (bitrate-32kbps)/64kbps
+ v->bitrtq_postproc = get_bits(gb, 5); //common
+ v->s.loop_filter = get_bits1(gb); //common
+ if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
+ {
+ av_log(avctx, AV_LOG_ERROR,
+ "LOOPFILTER shall not be enabled in Simple Profile\n");
+ }
+ if(v->s.avctx->skip_loop_filter >= AVDISCARD_ALL)
+ v->s.loop_filter = 0;
+
+ v->res_x8 = get_bits1(gb); //reserved
+ v->multires = get_bits1(gb);
+ v->res_fasttx = get_bits1(gb);
+ if (!v->res_fasttx)
+ {
+ v->s.dsp.vc1_inv_trans_8x8 = ff_simple_idct;
+ v->s.dsp.vc1_inv_trans_8x4 = ff_simple_idct84_add;
+ v->s.dsp.vc1_inv_trans_4x8 = ff_simple_idct48_add;
+ v->s.dsp.vc1_inv_trans_4x4 = ff_simple_idct44_add;
+ v->s.dsp.vc1_inv_trans_8x8_dc = ff_simple_idct_add;
+ v->s.dsp.vc1_inv_trans_8x4_dc = ff_simple_idct84_add;
+ v->s.dsp.vc1_inv_trans_4x8_dc = ff_simple_idct48_add;
+ v->s.dsp.vc1_inv_trans_4x4_dc = ff_simple_idct44_add;
+ }
+
+ v->fastuvmc = get_bits1(gb); //common
+ if (!v->profile && !v->fastuvmc)
+ {
+ av_log(avctx, AV_LOG_ERROR,
+ "FASTUVMC unavailable in Simple Profile\n");
+ return -1;
+ }
+ v->extended_mv = get_bits1(gb); //common
+ if (!v->profile && v->extended_mv)
+ {
+ av_log(avctx, AV_LOG_ERROR,
+ "Extended MVs unavailable in Simple Profile\n");
+ return -1;
+ }
+ v->dquant = get_bits(gb, 2); //common
+ v->vstransform = get_bits1(gb); //common
+
+ v->res_transtab = get_bits1(gb);
+ if (v->res_transtab)
+ {
+ av_log(avctx, AV_LOG_ERROR,
+ "1 for reserved RES_TRANSTAB is forbidden\n");
+ return -1;
+ }
+
+ v->overlap = get_bits1(gb); //common
+
+ v->s.resync_marker = get_bits1(gb);
+ v->rangered = get_bits1(gb);
+ if (v->rangered && v->profile == PROFILE_SIMPLE)
+ {
+ av_log(avctx, AV_LOG_INFO,
+ "RANGERED should be set to 0 in Simple Profile\n");
+ }
+
+ v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
+ v->quantizer_mode = get_bits(gb, 2); //common
+
+ v->finterpflag = get_bits1(gb); //common
+ v->res_rtm_flag = get_bits1(gb); //reserved
+ if (!v->res_rtm_flag)
+ {
+// av_log(avctx, AV_LOG_ERROR,
+// "0 for reserved RES_RTM_FLAG is forbidden\n");
+ av_log(avctx, AV_LOG_ERROR,
+ "Old WMV3 version detected, only I-frames will be decoded\n");
+ //return -1;
+ }
+ //TODO: figure out what they mean (always 0x402F)
+ if(!v->res_fasttx) skip_bits(gb, 16);
+ av_log(avctx, AV_LOG_DEBUG,
+ "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
+ "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
+ "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
+ "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
+ v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
+ v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
+ v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
+ v->dquant, v->quantizer_mode, avctx->max_b_frames
+ );
+ return 0;
+}
+
+static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
+{
+ v->res_rtm_flag = 1;
+ v->level = get_bits(gb, 3);
+ if(v->level >= 5)
+ {
+ av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
+ }
+ v->chromaformat = get_bits(gb, 2);
+ if (v->chromaformat != 1)
+ {
+ av_log(v->s.avctx, AV_LOG_ERROR,
+ "Only 4:2:0 chroma format supported\n");
+ return -1;
+ }
+
+ // (fps-2)/4 (->30)
+ v->frmrtq_postproc = get_bits(gb, 3); //common
+ // (bitrate-32kbps)/64kbps
+ v->bitrtq_postproc = get_bits(gb, 5); //common
+ v->postprocflag = get_bits1(gb); //common
+
+ v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
+ v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
+ v->s.avctx->width = v->s.avctx->coded_width;
+ v->s.avctx->height = v->s.avctx->coded_height;
+ v->broadcast = get_bits1(gb);
+ v->interlace = get_bits1(gb);
+ v->tfcntrflag = get_bits1(gb);
+ v->finterpflag = get_bits1(gb);
+ skip_bits1(gb); // reserved
+
+ v->s.h_edge_pos = v->s.avctx->coded_width;
+ v->s.v_edge_pos = v->s.avctx->coded_height;
+
+ av_log(v->s.avctx, AV_LOG_DEBUG,
+ "Advanced Profile level %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
+ "LoopFilter=%i, ChromaFormat=%i, Pulldown=%i, Interlace: %i\n"
+ "TFCTRflag=%i, FINTERPflag=%i\n",
+ v->level, v->frmrtq_postproc, v->bitrtq_postproc,
+ v->s.loop_filter, v->chromaformat, v->broadcast, v->interlace,
+ v->tfcntrflag, v->finterpflag
+ );
+
+ v->psf = get_bits1(gb);
+ if(v->psf) { //PsF, 6.1.13
+ av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
+ return -1;
+ }
+ v->s.max_b_frames = v->s.avctx->max_b_frames = 7;
+ if(get_bits1(gb)) { //Display Info - decoding is not affected by it
+ int w, h, ar = 0;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "Display extended info:\n");
+ v->s.avctx->width = w = get_bits(gb, 14) + 1;
+ v->s.avctx->height = h = get_bits(gb, 14) + 1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "Display dimensions: %ix%i\n", w, h);
+ if(get_bits1(gb))
+ ar = get_bits(gb, 4);
+ if(ar && ar < 14){
+ v->s.avctx->sample_aspect_ratio = ff_vc1_pixel_aspect[ar];
+ }else if(ar == 15){
+ w = get_bits(gb, 8);
+ h = get_bits(gb, 8);
+ v->s.avctx->sample_aspect_ratio.num = w; v->s.avctx->sample_aspect_ratio.den = h;
+ }
+ av_log(v->s.avctx, AV_LOG_DEBUG, "Aspect: %i:%i\n", v->s.avctx->sample_aspect_ratio.num, v->s.avctx->sample_aspect_ratio.den);
+
+ if(get_bits1(gb)){ //framerate stuff
+ if(get_bits1(gb)) {
+ v->s.avctx->time_base.num = 32;
+ v->s.avctx->time_base.den = get_bits(gb, 16) + 1;
+ } else {
+ int nr, dr;
+ nr = get_bits(gb, 8);
+ dr = get_bits(gb, 4);
+ if(nr && nr < 8 && dr && dr < 3){
+ v->s.avctx->time_base.num = ff_vc1_fps_dr[dr - 1];
+ v->s.avctx->time_base.den = ff_vc1_fps_nr[nr - 1] * 1000;
+ }
+ }
+ }
+
+ if(get_bits1(gb)){
+ v->color_prim = get_bits(gb, 8);
+ v->transfer_char = get_bits(gb, 8);
+ v->matrix_coef = get_bits(gb, 8);
+ }
+ }
+
+ v->hrd_param_flag = get_bits1(gb);
+ if(v->hrd_param_flag) {
+ int i;
+ v->hrd_num_leaky_buckets = get_bits(gb, 5);
+ skip_bits(gb, 4); //bitrate exponent
+ skip_bits(gb, 4); //buffer size exponent
+ for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
+ skip_bits(gb, 16); //hrd_rate[n]
+ skip_bits(gb, 16); //hrd_buffer[n]
+ }
+ }
+ return 0;
+}
+
+int vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
+{
+ int i;
+
+ av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
+ v->broken_link = get_bits1(gb);
+ v->closed_entry = get_bits1(gb);
+ v->panscanflag = get_bits1(gb);
+ v->refdist_flag = get_bits1(gb);
+ v->s.loop_filter = get_bits1(gb);
+ v->fastuvmc = get_bits1(gb);
+ v->extended_mv = get_bits1(gb);
+ v->dquant = get_bits(gb, 2);
+ v->vstransform = get_bits1(gb);
+ v->overlap = get_bits1(gb);
+ v->quantizer_mode = get_bits(gb, 2);
+
+ if(v->hrd_param_flag){
+ for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
+ skip_bits(gb, 8); //hrd_full[n]
+ }
+ }
+
+ if(get_bits1(gb)){
+ avctx->coded_width = (get_bits(gb, 12)+1)<<1;
+ avctx->coded_height = (get_bits(gb, 12)+1)<<1;
+ }
+ if(v->extended_mv)
+ v->extended_dmv = get_bits1(gb);
+ if((v->range_mapy_flag = get_bits1(gb))) {
+ av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
+ v->range_mapy = get_bits(gb, 3);
+ }
+ if((v->range_mapuv_flag = get_bits1(gb))) {
+ av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
+ v->range_mapuv = get_bits(gb, 3);
+ }
+
+ av_log(avctx, AV_LOG_DEBUG, "Entry point info:\n"
+ "BrokenLink=%i, ClosedEntry=%i, PanscanFlag=%i\n"
+ "RefDist=%i, Postproc=%i, FastUVMC=%i, ExtMV=%i\n"
+ "DQuant=%i, VSTransform=%i, Overlap=%i, Qmode=%i\n",
+ v->broken_link, v->closed_entry, v->panscanflag, v->refdist_flag, v->s.loop_filter,
+ v->fastuvmc, v->extended_mv, v->dquant, v->vstransform, v->overlap, v->quantizer_mode);
+
+ return 0;
+}
+
+int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
+{
+ int pqindex, lowquant, status;
+
+ if(v->finterpflag) v->interpfrm = get_bits1(gb);
+ skip_bits(gb, 2); //framecnt unused
+ v->rangeredfrm = 0;
+ if (v->rangered) v->rangeredfrm = get_bits1(gb);
+ v->s.pict_type = get_bits1(gb);
+ if (v->s.avctx->max_b_frames) {
+ if (!v->s.pict_type) {
+ if (get_bits1(gb)) v->s.pict_type = FF_I_TYPE;
+ else v->s.pict_type = FF_B_TYPE;
+ } else v->s.pict_type = FF_P_TYPE;
+ } else v->s.pict_type = v->s.pict_type ? FF_P_TYPE : FF_I_TYPE;
+
+ v->bi_type = 0;
+ if(v->s.pict_type == FF_B_TYPE) {
+ v->bfraction_lut_index = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
+ v->bfraction = ff_vc1_bfraction_lut[v->bfraction_lut_index];
+ if(v->bfraction == 0) {
+ v->s.pict_type = FF_BI_TYPE;
+ }
+ }
+ if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
+ skip_bits(gb, 7); // skip buffer fullness
+
+ if(v->parse_only)
+ return 0;
+
+ /* calculate RND */
+ if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
+ v->rnd = 1;
+ if(v->s.pict_type == FF_P_TYPE)
+ v->rnd ^= 1;
+
+ /* Quantizer stuff */
+ pqindex = get_bits(gb, 5);
+ if(!pqindex) return -1;
+ if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
+ v->pq = ff_vc1_pquant_table[0][pqindex];
+ else
+ v->pq = ff_vc1_pquant_table[1][pqindex];
+
+ v->pquantizer = 1;
+ if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
+ v->pquantizer = pqindex < 9;
+ if (v->quantizer_mode == QUANT_NON_UNIFORM)
+ v->pquantizer = 0;
+ v->pqindex = pqindex;
+ if (pqindex < 9) v->halfpq = get_bits1(gb);
+ else v->halfpq = 0;
+ if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
+ v->pquantizer = get_bits1(gb);
+ v->dquantfrm = 0;
+ if (v->extended_mv == 1) v->mvrange = get_unary(gb, 0, 3);
+ v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
+ v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
+ v->range_x = 1 << (v->k_x - 1);
+ v->range_y = 1 << (v->k_y - 1);
+ if (v->multires && v->s.pict_type != FF_B_TYPE) v->respic = get_bits(gb, 2);
+
+ if(v->res_x8 && (v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)){
+ v->x8_type = get_bits1(gb);
+ }else v->x8_type = 0;
+//av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
+// (v->s.pict_type == FF_P_TYPE) ? 'P' : ((v->s.pict_type == FF_I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
+
+ if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_P_TYPE) v->use_ic = 0;
+
+ switch(v->s.pict_type) {
+ case FF_P_TYPE:
+ if (v->pq < 5) v->tt_index = 0;
+ else if(v->pq < 13) v->tt_index = 1;
+ else v->tt_index = 2;
+
+ lowquant = (v->pq > 12) ? 0 : 1;
+ v->mv_mode = ff_vc1_mv_pmode_table[lowquant][get_unary(gb, 1, 4)];
+ if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
+ {
+ int scale, shift, i;
+ v->mv_mode2 = ff_vc1_mv_pmode_table2[lowquant][get_unary(gb, 1, 3)];
+ v->lumscale = get_bits(gb, 6);
+ v->lumshift = get_bits(gb, 6);
+ v->use_ic = 1;
+ /* fill lookup tables for intensity compensation */
+ if(!v->lumscale) {
+ scale = -64;
+ shift = (255 - v->lumshift * 2) << 6;
+ if(v->lumshift > 31)
+ shift += 128 << 6;
+ } else {
+ scale = v->lumscale + 32;
+ if(v->lumshift > 31)
+ shift = (v->lumshift - 64) << 6;
+ else
+ shift = v->lumshift << 6;
+ }
+ for(i = 0; i < 256; i++) {
+ v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6);
+ v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
+ }
+ }
+ if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
+ v->s.quarter_sample = 0;
+ else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
+ if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
+ v->s.quarter_sample = 0;
+ else
+ v->s.quarter_sample = 1;
+ } else
+ v->s.quarter_sample = 1;
+ v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
+
+ if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
+ v->mv_mode2 == MV_PMODE_MIXED_MV)
+ || v->mv_mode == MV_PMODE_MIXED_MV)
+ {
+ status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+ } else {
+ v->mv_type_is_raw = 0;
+ memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
+ }
+ status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+
+ /* Hopefully this is correct for P frames */
+ v->s.mv_table_index = get_bits(gb, 2); //but using ff_vc1_ tables
+ v->cbpcy_vlc = &ff_vc1_cbpcy_p_vlc[get_bits(gb, 2)];
+
+ if (v->dquant)
+ {
+ av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+ vop_dquant_decoding(v);
+ }
+
+ v->ttfrm = 0; //FIXME Is that so ?
+ if (v->vstransform)
+ {
+ v->ttmbf = get_bits1(gb);
+ if (v->ttmbf)
+ {
+ v->ttfrm = ff_vc1_ttfrm_to_tt[get_bits(gb, 2)];
+ }
+ } else {
+ v->ttmbf = 1;
+ v->ttfrm = TT_8X8;
+ }
+ break;
+ case FF_B_TYPE:
+ if (v->pq < 5) v->tt_index = 0;
+ else if(v->pq < 13) v->tt_index = 1;
+ else v->tt_index = 2;
+
+ v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
+ v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
+ v->s.mspel = v->s.quarter_sample;
+
+ status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+ status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+
+ v->s.mv_table_index = get_bits(gb, 2);
+ v->cbpcy_vlc = &ff_vc1_cbpcy_p_vlc[get_bits(gb, 2)];
+
+ if (v->dquant)
+ {
+ av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+ vop_dquant_decoding(v);
+ }
+
+ v->ttfrm = 0;
+ if (v->vstransform)
+ {
+ v->ttmbf = get_bits1(gb);
+ if (v->ttmbf)
+ {
+ v->ttfrm = ff_vc1_ttfrm_to_tt[get_bits(gb, 2)];
+ }
+ } else {
+ v->ttmbf = 1;
+ v->ttfrm = TT_8X8;
+ }
+ break;
+ }
+
+ if(!v->x8_type)
+ {
+ /* AC Syntax */
+ v->c_ac_table_index = decode012(gb);
+ if (v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
+ {
+ v->y_ac_table_index = decode012(gb);
+ }
+ /* DC Syntax */
+ v->s.dc_table_index = get_bits1(gb);
+ }
+
+ if(v->s.pict_type == FF_BI_TYPE) {
+ v->s.pict_type = FF_B_TYPE;
+ v->bi_type = 1;
+ }
+ return 0;
+}
+
+int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
+{
+ int pqindex, lowquant;
+ int status;
+
+ v->p_frame_skipped = 0;
+
+ if(v->interlace){
+ v->fcm = decode012(gb);
+ // ==> Start patch MPC
+ if(v->fcm && !v->allow_interlaced) return -1; // interlaced frames/fields are not implemented
+ // <== End patch MPC
+ }
+ switch(get_unary(gb, 0, 4)) {
+ case 0:
+ v->s.pict_type = FF_P_TYPE;
+ break;
+ case 1:
+ v->s.pict_type = FF_B_TYPE;
+ break;
+ case 2:
+ v->s.pict_type = FF_I_TYPE;
+ break;
+ case 3:
+ v->s.pict_type = FF_BI_TYPE;
+ break;
+ case 4:
+ v->s.pict_type = FF_P_TYPE; // skipped pic
+ v->p_frame_skipped = 1;
+ return 0;
+ }
+ if(v->tfcntrflag)
+ skip_bits(gb, 8);
+ if(v->broadcast) {
+ if(!v->interlace || v->psf) {
+ v->rptfrm = get_bits(gb, 2);
+ } else {
+ v->tff = get_bits1(gb);
+ v->rptfrm = get_bits1(gb);
+ }
+ }
+ if(v->panscanflag) {
+ //...
+ }
+ v->rnd = get_bits1(gb);
+ if(v->interlace)
+ v->uvsamp = get_bits1(gb);
+ if(v->finterpflag) v->interpfrm = get_bits1(gb);
+ if(v->s.pict_type == FF_B_TYPE) {
+ v->bfraction_lut_index = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
+ v->bfraction = ff_vc1_bfraction_lut[v->bfraction_lut_index];
+ if(v->bfraction == 0) {
+ v->s.pict_type = FF_BI_TYPE; /* XXX: should not happen here */
+ }
+ }
+ pqindex = get_bits(gb, 5);
+ if(!pqindex) return -1;
+ v->pqindex = pqindex;
+ if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
+ v->pq = ff_vc1_pquant_table[0][pqindex];
+ else
+ v->pq = ff_vc1_pquant_table[1][pqindex];
+
+ v->pquantizer = 1;
+ if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
+ v->pquantizer = pqindex < 9;
+ if (v->quantizer_mode == QUANT_NON_UNIFORM)
+ v->pquantizer = 0;
+ v->pqindex = pqindex;
+ if (pqindex < 9) v->halfpq = get_bits1(gb);
+ else v->halfpq = 0;
+ if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
+ v->pquantizer = get_bits1(gb);
+ if(v->postprocflag)
+ v->postproc = get_bits(gb, 2);
+
+ if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_P_TYPE) v->use_ic = 0;
+
+ if(v->parse_only)
+ return 0;
+
+ switch(v->s.pict_type) {
+ case FF_I_TYPE:
+ case FF_BI_TYPE:
+ status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+ v->condover = CONDOVER_NONE;
+ if(v->overlap && v->pq <= 8) {
+ v->condover = decode012(gb);
+ if(v->condover == CONDOVER_SELECT) {
+ status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+ }
+ }
+ break;
+ case FF_P_TYPE:
+ if (v->extended_mv) v->mvrange = get_unary(gb, 0, 3);
+ else v->mvrange = 0;
+ v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
+ v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
+ v->range_x = 1 << (v->k_x - 1);
+ v->range_y = 1 << (v->k_y - 1);
+
+ if (v->pq < 5) v->tt_index = 0;
+ else if(v->pq < 13) v->tt_index = 1;
+ else v->tt_index = 2;
+
+ lowquant = (v->pq > 12) ? 0 : 1;
+ v->mv_mode = ff_vc1_mv_pmode_table[lowquant][get_unary(gb, 1, 4)];
+ if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
+ {
+ int scale, shift, i;
+ v->mv_mode2 = ff_vc1_mv_pmode_table2[lowquant][get_unary(gb, 1, 3)];
+ v->lumscale = get_bits(gb, 6);
+ v->lumshift = get_bits(gb, 6);
+ /* fill lookup tables for intensity compensation */
+ if(!v->lumscale) {
+ scale = -64;
+ shift = (255 - v->lumshift * 2) << 6;
+ if(v->lumshift > 31)
+ shift += 128 << 6;
+ } else {
+ scale = v->lumscale + 32;
+ if(v->lumshift > 31)
+ shift = (v->lumshift - 64) << 6;
+ else
+ shift = v->lumshift << 6;
+ }
+ for(i = 0; i < 256; i++) {
+ v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6);
+ v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
+ }
+ v->use_ic = 1;
+ }
+ if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
+ v->s.quarter_sample = 0;
+ else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
+ if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
+ v->s.quarter_sample = 0;
+ else
+ v->s.quarter_sample = 1;
+ } else
+ v->s.quarter_sample = 1;
+ v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
+
+ if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
+ v->mv_mode2 == MV_PMODE_MIXED_MV)
+ || v->mv_mode == MV_PMODE_MIXED_MV)
+ {
+ status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+ } else {
+ v->mv_type_is_raw = 0;
+ memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
+ }
+ status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+
+ /* Hopefully this is correct for P frames */
+ v->s.mv_table_index = get_bits(gb, 2); //but using ff_vc1_ tables
+ v->cbpcy_vlc = &ff_vc1_cbpcy_p_vlc[get_bits(gb, 2)];
+ if (v->dquant)
+ {
+ av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+ vop_dquant_decoding(v);
+ }
+
+ v->ttfrm = 0; //FIXME Is that so ?
+ if (v->vstransform)
+ {
+ v->ttmbf = get_bits1(gb);
+ if (v->ttmbf)
+ {
+ v->ttfrm = ff_vc1_ttfrm_to_tt[get_bits(gb, 2)];
+ }
+ } else {
+ v->ttmbf = 1;
+ v->ttfrm = TT_8X8;
+ }
+ break;
+ case FF_B_TYPE:
+ if (v->extended_mv) v->mvrange = get_unary(gb, 0, 3);
+ else v->mvrange = 0;
+ v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
+ v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
+ v->range_x = 1 << (v->k_x - 1);
+ v->range_y = 1 << (v->k_y - 1);
+
+ if (v->pq < 5) v->tt_index = 0;
+ else if(v->pq < 13) v->tt_index = 1;
+ else v->tt_index = 2;
+
+ v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
+ v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
+ v->s.mspel = v->s.quarter_sample;
+
+ status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+ status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+
+ v->s.mv_table_index = get_bits(gb, 2);
+ v->cbpcy_vlc = &ff_vc1_cbpcy_p_vlc[get_bits(gb, 2)];
+
+ if (v->dquant)
+ {
+ av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+ vop_dquant_decoding(v);
+ }
+
+ v->ttfrm = 0;
+ if (v->vstransform)
+ {
+ v->ttmbf = get_bits1(gb);
+ if (v->ttmbf)
+ {
+ v->ttfrm = ff_vc1_ttfrm_to_tt[get_bits(gb, 2)];
+ }
+ } else {
+ v->ttmbf = 1;
+ v->ttfrm = TT_8X8;
+ }
+ break;
+ }
+
+ /* AC Syntax */
+ v->c_ac_table_index = decode012(gb);
+ if (v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
+ {
+ v->y_ac_table_index = decode012(gb);
+ }
+ /* DC Syntax */
+ v->s.dc_table_index = get_bits1(gb);
+ if ((v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE) && v->dquant) {
+ av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+ vop_dquant_decoding(v);
+ }
+
+ v->bi_type = 0;
+ if(v->s.pict_type == FF_BI_TYPE) {
+ v->s.pict_type = FF_B_TYPE;
+ v->bi_type = 1;
+ }
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1.h
new file mode 100644
index 000000000..24921dd5e
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1.h
@@ -0,0 +1,372 @@
+/*
+ * VC-1 and WMV3 decoder
+ * Copyright (c) 2006-2007 Konstantin Shishkov
+ * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VC1_H
+#define AVCODEC_VC1_H
+
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "intrax8.h"
+
+/** Markers used in VC-1 AP frame data */
+//@{
+enum VC1Code{
+ VC1_CODE_RES0 = 0x00000100,
+ VC1_CODE_ENDOFSEQ = 0x0000010A,
+ VC1_CODE_SLICE,
+ VC1_CODE_FIELD,
+ VC1_CODE_FRAME,
+ VC1_CODE_ENTRYPOINT,
+ VC1_CODE_SEQHDR,
+};
+//@}
+
+#define IS_MARKER(x) (((x) & ~0xFF) == VC1_CODE_RES0)
+
+/** Available Profiles */
+//@{
+enum Profile {
+ PROFILE_SIMPLE,
+ PROFILE_MAIN,
+ PROFILE_COMPLEX, ///< TODO: WMV9 specific
+ PROFILE_ADVANCED
+};
+//@}
+
+/** Sequence quantizer mode */
+//@{
+enum QuantMode {
+ QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
+ QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
+ QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
+ QUANT_UNIFORM ///< Uniform quant used for all frames
+};
+//@}
+
+/** Where quant can be changed */
+//@{
+enum DQProfile {
+ DQPROFILE_FOUR_EDGES,
+ DQPROFILE_DOUBLE_EDGES,
+ DQPROFILE_SINGLE_EDGE,
+ DQPROFILE_ALL_MBS
+};
+//@}
+
+/** @name Where quant can be changed
+ */
+//@{
+enum DQSingleEdge {
+ DQSINGLE_BEDGE_LEFT,
+ DQSINGLE_BEDGE_TOP,
+ DQSINGLE_BEDGE_RIGHT,
+ DQSINGLE_BEDGE_BOTTOM
+};
+//@}
+
+/** Which pair of edges is quantized with ALTPQUANT */
+//@{
+enum DQDoubleEdge {
+ DQDOUBLE_BEDGE_TOPLEFT,
+ DQDOUBLE_BEDGE_TOPRIGHT,
+ DQDOUBLE_BEDGE_BOTTOMRIGHT,
+ DQDOUBLE_BEDGE_BOTTOMLEFT
+};
+//@}
+
+/** MV modes for P frames */
+//@{
+enum MVModes {
+ MV_PMODE_1MV_HPEL_BILIN,
+ MV_PMODE_1MV,
+ MV_PMODE_1MV_HPEL,
+ MV_PMODE_MIXED_MV,
+ MV_PMODE_INTENSITY_COMP
+};
+//@}
+
+/** @name MV types for B frames */
+//@{
+enum BMVTypes {
+ BMV_TYPE_BACKWARD,
+ BMV_TYPE_FORWARD,
+ BMV_TYPE_INTERPOLATED
+};
+//@}
+
+/** @name Block types for P/B frames */
+//@{
+enum TransformTypes {
+ TT_8X8,
+ TT_8X4_BOTTOM,
+ TT_8X4_TOP,
+ TT_8X4, //Both halves
+ TT_4X8_RIGHT,
+ TT_4X8_LEFT,
+ TT_4X8, //Both halves
+ TT_4X4
+};
+//@}
+
+enum CodingSet {
+ CS_HIGH_MOT_INTRA = 0,
+ CS_HIGH_MOT_INTER,
+ CS_LOW_MOT_INTRA,
+ CS_LOW_MOT_INTER,
+ CS_MID_RATE_INTRA,
+ CS_MID_RATE_INTER,
+ CS_HIGH_RATE_INTRA,
+ CS_HIGH_RATE_INTER
+};
+
+/** @name Overlap conditions for Advanced Profile */
+//@{
+enum COTypes {
+ CONDOVER_NONE = 0,
+ CONDOVER_ALL,
+ CONDOVER_SELECT
+};
+//@}
+
+
+/** The VC1 Context
+ * @todo Change size wherever another size is more efficient
+ * Many members are only used for Advanced Profile
+ */
+typedef struct VC1Context{
+ MpegEncContext s;
+ IntraX8Context x8;
+
+ int bits;
+
+ /** Simple/Main Profile sequence header */
+ //@{
+ int res_sm; ///< reserved, 2b
+ int res_x8; ///< reserved
+ int multires; ///< frame-level RESPIC syntax element present
+ int res_fasttx; ///< reserved, always 1
+ int res_transtab; ///< reserved, always 0
+ int rangered; ///< RANGEREDFRM (range reduction) syntax element present
+ ///< at frame level
+ int res_rtm_flag; ///< reserved, set to 1
+ int reserved; ///< reserved
+ //@}
+
+ /** Advanced Profile */
+ //@{
+ int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
+ int chromaformat; ///< 2bits, 2=4:2:0, only defined
+ int postprocflag; ///< Per-frame processing suggestion flag present
+ int broadcast; ///< TFF/RFF present
+ int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
+ int tfcntrflag; ///< TFCNTR present
+ int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
+ int refdist_flag; ///< REFDIST syntax element present in II, IP, PI or PP field picture headers
+ int extended_dmv; ///< Additional extended dmv range at P/B frame-level
+ int color_prim; ///< 8bits, chroma coordinates of the color primaries
+ int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
+ int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
+ int hrd_param_flag; ///< Presence of Hypothetical Reference
+ ///< Decoder parameters
+ int psf; ///< Progressive Segmented Frame
+ //@}
+
+ /** Sequence header data for all Profiles
+ * TODO: choose between ints, uint8_ts and monobit flags
+ */
+ //@{
+ int profile; ///< 2bits, Profile
+ int frmrtq_postproc; ///< 3bits,
+ int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
+ int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
+ int extended_mv; ///< Ext MV in P/B (not in Simple)
+ int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
+ int vstransform; ///< variable-size [48]x[48] transform type + info
+ int overlap; ///< overlapped transforms in use
+ int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
+ int finterpflag; ///< INTERPFRM present
+ //@}
+
+ /** Frame decoding info for all profiles */
+ //@{
+ uint8_t mv_mode; ///< MV coding monde
+ uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
+ int k_x; ///< Number of bits for MVs (depends on MV range)
+ int k_y; ///< Number of bits for MVs (depends on MV range)
+ int range_x, range_y; ///< MV range
+ uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
+ const uint8_t* zz_8x4;///< Zigzag scan table for TT_8x4 coding mode
+ const uint8_t* zz_4x8;///< Zigzag scan table for TT_4x8 coding mode
+ /** pquant parameters */
+ //@{
+ uint8_t dquantfrm;
+ uint8_t dqprofile;
+ uint8_t dqsbedge;
+ uint8_t dqbilevel;
+ //@}
+ /** AC coding set indexes
+ * @see 8.1.1.10, p(1)10
+ */
+ //@{
+ int c_ac_table_index; ///< Chroma index from ACFRM element
+ int y_ac_table_index; ///< Luma index from AC2FRM element
+ //@}
+ int ttfrm; ///< Transform type info present at frame level
+ uint8_t ttmbf; ///< Transform type flag
+ uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
+ int codingset; ///< index of current table set from 11.8 to use for luma block decoding
+ int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
+ int pqindex; ///< raw pqindex used in coding set selection
+ int a_avail, c_avail;
+ uint8_t *mb_type_base, *mb_type[3];
+
+
+ /** Luma compensation parameters */
+ //@{
+ uint8_t lumscale;
+ uint8_t lumshift;
+ //@}
+ int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
+ uint8_t halfpq; ///< Uniform quant over image and qp+.5
+ uint8_t respic; ///< Frame-level flag for resized images
+ int buffer_fullness; ///< HRD info
+ /** Ranges:
+ * -# 0 -> [-64n 63.f] x [-32, 31.f]
+ * -# 1 -> [-128, 127.f] x [-64, 63.f]
+ * -# 2 -> [-512, 511.f] x [-128, 127.f]
+ * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
+ */
+ uint8_t mvrange;
+ uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
+ VLC *cbpcy_vlc; ///< CBPCY VLC table
+ int tt_index; ///< Index for Transform Type tables
+ uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
+ uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
+ int mv_type_is_raw; ///< mv type mb plane is not coded
+ int dmb_is_raw; ///< direct mb plane is raw
+ int skip_is_raw; ///< skip mb plane is not coded
+ uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
+ int use_ic; ///< use intensity compensation in B-frames
+ int rnd; ///< rounding control
+
+ /** Frame decoding info for S/M profiles only */
+ //@{
+ uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
+ uint8_t interpfrm;
+ //@}
+
+ /** Frame decoding info for Advanced profile */
+ //@{
+ uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
+ uint8_t numpanscanwin;
+ uint8_t tfcntr;
+ uint8_t rptfrm, tff, rff;
+ uint16_t topleftx;
+ uint16_t toplefty;
+ uint16_t bottomrightx;
+ uint16_t bottomrighty;
+ uint8_t uvsamp;
+ uint8_t postproc;
+ int hrd_num_leaky_buckets;
+ uint8_t bit_rate_exponent;
+ uint8_t buffer_size_exponent;
+ uint8_t* acpred_plane; ///< AC prediction flags bitplane
+ int acpred_is_raw;
+ uint8_t* over_flags_plane; ///< Overflags bitplane
+ int overflg_is_raw;
+ uint8_t condover;
+ uint16_t *hrd_rate, *hrd_buffer;
+ uint8_t *hrd_fullness;
+ uint8_t range_mapy_flag;
+ uint8_t range_mapuv_flag;
+ uint8_t range_mapy;
+ uint8_t range_mapuv;
+ //@}
+
+ int p_frame_skipped;
+ int bi_type;
+ int x8_type;
+
+ uint32_t *cbp_base, *cbp;
+ uint8_t bfraction_lut_index;///< Index for BFRACTION value (see Table 40, reproduced into ff_vc1_bfraction_lut[])
+ uint8_t broken_link; ///< Broken link flag (BROKEN_LINK syntax element)
+ uint8_t closed_entry; ///< Closed entry point flag (CLOSED_ENTRY syntax element)
+
+ int parse_only; ///< Context is used within parser
+
+ int warn_interlaced;
+
+ // ==> Start patch MPC
+ int allow_interlaced;
+ // <== End patch MPC
+} VC1Context;
+
+/** Find VC-1 marker in buffer
+ * @return position where next marker starts or end of buffer if no marker found
+ */
+static av_always_inline const uint8_t* find_next_marker(const uint8_t *src, const uint8_t *end)
+{
+ uint32_t mrk = 0xFFFFFFFF;
+
+ if(end-src < 4) return end;
+ while(src < end){
+ mrk = (mrk << 8) | *src++;
+ if(IS_MARKER(mrk))
+ return src-4;
+ }
+ return end;
+}
+
+static av_always_inline int vc1_unescape_buffer(const uint8_t *src, int size, uint8_t *dst)
+{
+ int dsize = 0, i;
+
+ if(size < 4){
+ for(dsize = 0; dsize < size; dsize++) *dst++ = *src++;
+ return size;
+ }
+ for(i = 0; i < size; i++, src++) {
+ if(src[0] == 3 && i >= 2 && !src[-1] && !src[-2] && i < size-1 && src[1] < 4) {
+ dst[dsize++] = src[1];
+ src++;
+ i++;
+ } else
+ dst[dsize++] = *src;
+ }
+ return dsize;
+}
+
+/**
+ * Decode Simple/Main Profiles sequence header
+ * @see Figure 7-8, p16-17
+ * @param avctx Codec context
+ * @param gb GetBit context initialized from Codec context extra_data
+ * @return Status
+ */
+int vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb);
+
+int vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb);
+
+int vc1_parse_frame_header (VC1Context *v, GetBitContext *gb);
+int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext *gb);
+
+#endif /* AVCODEC_VC1_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1_dxva.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1_dxva.c
new file mode 100644
index 000000000..c482e7f0a
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1_dxva.c
@@ -0,0 +1,103 @@
+/*
+ * This file is part of Media Player Classic HomeCinema.
+ *
+ * MPC-HC is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * MPC-HC is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+//static int vc1_decode_frame(AVCodecContext *avctx,
+// void *data, int *data_size,
+// uint8_t *buf, int buf_size)
+int av_vc1_decode_frame(AVCodecContext *avctx,
+ uint8_t *buf, int buf_size)
+{
+ VC1Context *v = avctx->priv_data;
+ MpegEncContext *s = &v->s;
+ //AVFrame *pict = data;
+ uint8_t *buf2 = NULL;
+
+ v->allow_interlaced = 1;
+ v->lumshift = 0;
+ v->lumscale = 32;
+
+ /* We need to set current_picture_ptr before reading the header,
+ * otherwise we cannot store anything in there. */
+ if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
+ int i= ff_find_unused_picture(s, 0);
+ s->current_picture_ptr= &s->picture[i];
+ }
+
+ //for advanced profile we may need to parse and unescape data
+ if (avctx->codec_id == CODEC_ID_VC1) {
+ int buf_size2 = 0;
+ buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if(IS_MARKER(AV_RB32(buf))){ /* frame starts with marker and needs to be parsed */
+ const uint8_t *start, *end, *next;
+ int size;
+
+ next = buf;
+ for(start = buf, end = buf + buf_size; next < end; start = next){
+ next = find_next_marker(start + 4, end);
+ size = next - start - 4;
+ if(size <= 0) continue;
+ switch(AV_RB32(start)){
+ case VC1_CODE_FRAME:
+ buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
+ break;
+ case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
+ buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
+ init_get_bits(&s->gb, buf2, buf_size2*8);
+ vc1_decode_entry_point(avctx, v, &s->gb);
+ break;
+ case VC1_CODE_SLICE:
+ av_log(avctx, AV_LOG_ERROR, "Sliced decoding is not implemented (yet)\n");
+ av_free(buf2);
+ return -1;
+ }
+ }
+ }else if(v->interlace && ((buf[0] & 0xC0) == 0xC0)){ /* WVC1 interlaced stores both fields divided by marker */
+ const uint8_t *divider;
+
+ divider = find_next_marker(buf, buf + buf_size);
+ if((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD){
+ av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
+ av_free(buf2);
+ return -1;
+ }
+
+ buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
+ // TODO
+ av_free(buf2);return -1;
+ }else{
+ buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
+ }
+ init_get_bits(&s->gb, buf2, buf_size2*8);
+ } else
+ init_get_bits(&s->gb, buf, buf_size*8);
+ // do parse frame header
+ if(v->profile < PROFILE_ADVANCED) {
+ if(vc1_parse_frame_header(v, &s->gb) == -1) {
+ av_free(buf2);
+ return -1;
+ }
+ } else {
+ if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
+ av_free(buf2);
+ return -1;
+ }
+ }
+ av_free(buf2);
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1acdata.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1acdata.h
new file mode 100644
index 000000000..78de0f9a2
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1acdata.h
@@ -0,0 +1,592 @@
+/*
+ * VC-1 and WMV3 decoder
+ * copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VC1ACDATA_H
+#define AVCODEC_VC1ACDATA_H
+
+#include <stdint.h>
+
+#define AC_MODES 8
+
+static const int vc1_ac_sizes[AC_MODES] = {
+ 186, 169, 133, 149, 103, 103, 163, 175
+};
+
+static const uint32_t vc1_ac_tables[AC_MODES][186][2] = {
+{
+{ 0x0001, 2}, { 0x0005, 3}, { 0x000D, 4}, { 0x0012, 5}, { 0x000E, 6}, { 0x0015, 7},
+{ 0x0013, 8}, { 0x003F, 8}, { 0x004B, 9}, { 0x011F, 9}, { 0x00B8, 10}, { 0x03E3, 10},
+{ 0x0172, 11}, { 0x024D, 12}, { 0x03DA, 12}, { 0x02DD, 13}, { 0x1F55, 13}, { 0x05B9, 14},
+{ 0x3EAE, 14}, { 0x0000, 4}, { 0x0010, 5}, { 0x0008, 7}, { 0x0020, 8}, { 0x0029, 9},
+{ 0x01F4, 9}, { 0x0233, 10}, { 0x01E0, 11}, { 0x012A, 12}, { 0x03DD, 12}, { 0x050A, 13},
+{ 0x1F29, 13}, { 0x0A42, 14}, { 0x1272, 15}, { 0x1737, 15}, { 0x0003, 5}, { 0x0011, 7},
+{ 0x00C4, 8}, { 0x004B, 10}, { 0x00B4, 11}, { 0x07D4, 11}, { 0x0345, 12}, { 0x02D7, 13},
+{ 0x07BF, 13}, { 0x0938, 14}, { 0x0BBB, 14}, { 0x095E, 15}, { 0x0013, 5}, { 0x0078, 7},
+{ 0x0069, 9}, { 0x0232, 10}, { 0x0461, 11}, { 0x03EC, 12}, { 0x0520, 13}, { 0x1F2A, 13},
+{ 0x3E50, 14}, { 0x3E51, 14}, { 0x1486, 15}, { 0x000C, 6}, { 0x0024, 9}, { 0x0094, 11},
+{ 0x08C0, 12}, { 0x0F09, 14}, { 0x1EF0, 15}, { 0x003D, 6}, { 0x0053, 9}, { 0x01A0, 11},
+{ 0x02D6, 13}, { 0x0F08, 14}, { 0x0013, 7}, { 0x007C, 9}, { 0x07C1, 11}, { 0x04AC, 14},
+{ 0x001B, 7}, { 0x00A0, 10}, { 0x0344, 12}, { 0x0F79, 14}, { 0x0079, 7}, { 0x03E1, 10},
+{ 0x02D4, 13}, { 0x2306, 14}, { 0x0021, 8}, { 0x023C, 10}, { 0x0FAE, 12}, { 0x23DE, 14},
+{ 0x0035, 8}, { 0x0175, 11}, { 0x07B3, 13}, { 0x00C5, 8}, { 0x0174, 11}, { 0x0785, 13},
+{ 0x0048, 9}, { 0x01A3, 11}, { 0x049E, 13}, { 0x002C, 9}, { 0x00FA, 10}, { 0x07D6, 11},
+{ 0x0092, 10}, { 0x05CC, 13}, { 0x1EF1, 15}, { 0x00A3, 10}, { 0x03ED, 12}, { 0x093E, 14},
+{ 0x01E2, 11}, { 0x1273, 15}, { 0x07C4, 11}, { 0x1487, 15}, { 0x0291, 12}, { 0x0293, 12},
+{ 0x0F8A, 12}, { 0x0509, 13}, { 0x0508, 13}, { 0x078D, 13}, { 0x07BE, 13}, { 0x078C, 13},
+{ 0x04AE, 14}, { 0x0BBA, 14}, { 0x2307, 14}, { 0x0B9A, 14}, { 0x1736, 15}, { 0x000E, 4},
+{ 0x0045, 7}, { 0x01F3, 9}, { 0x047A, 11}, { 0x05DC, 13}, { 0x23DF, 14}, { 0x0019, 5},
+{ 0x0028, 9}, { 0x0176, 11}, { 0x049D, 13}, { 0x23DD, 14}, { 0x0030, 6}, { 0x00A2, 10},
+{ 0x02EF, 12}, { 0x05B8, 14}, { 0x003F, 6}, { 0x00A5, 10}, { 0x03DB, 12}, { 0x093F, 14},
+{ 0x0044, 7}, { 0x07CB, 11}, { 0x095F, 15}, { 0x0063, 7}, { 0x03C3, 12}, { 0x0015, 8},
+{ 0x08F6, 12}, { 0x0017, 8}, { 0x0498, 13}, { 0x002C, 8}, { 0x07B2, 13}, { 0x002F, 8},
+{ 0x1F54, 13}, { 0x008D, 8}, { 0x07BD, 13}, { 0x008E, 8}, { 0x1182, 13}, { 0x00FB, 8},
+{ 0x050B, 13}, { 0x002D, 8}, { 0x07C0, 11}, { 0x0079, 9}, { 0x1F5F, 13}, { 0x007A, 9},
+{ 0x1F56, 13}, { 0x0231, 10}, { 0x03E4, 10}, { 0x01A1, 11}, { 0x0143, 11}, { 0x01F7, 11},
+{ 0x016F, 12}, { 0x0292, 12}, { 0x02E7, 12}, { 0x016C, 12}, { 0x016D, 12}, { 0x03DC, 12},
+{ 0x0F8B, 12}, { 0x0499, 13}, { 0x03D8, 12}, { 0x078E, 13}, { 0x02D5, 13}, { 0x1F5E, 13},
+{ 0x1F2B, 13}, { 0x078F, 13}, { 0x04AD, 14}, { 0x3EAF, 14}, { 0x23DC, 14}, { 0x004A, 9}
+},
+{
+{ 0x0000, 3}, { 0x0003, 4}, { 0x000B, 5}, { 0x0014, 6}, { 0x003F, 6}, { 0x005D, 7},
+{ 0x00A2, 8}, { 0x00AC, 9}, { 0x016E, 9}, { 0x020A, 10}, { 0x02E2, 10}, { 0x0432, 11},
+{ 0x05C9, 11}, { 0x0827, 12}, { 0x0B54, 12}, { 0x04E6, 13}, { 0x105F, 13}, { 0x172A, 13},
+{ 0x20B2, 14}, { 0x2D4E, 14}, { 0x39F0, 14}, { 0x4175, 15}, { 0x5A9E, 15}, { 0x0004, 4},
+{ 0x001E, 5}, { 0x0042, 7}, { 0x00B6, 8}, { 0x0173, 9}, { 0x0395, 10}, { 0x072E, 11},
+{ 0x0B94, 12}, { 0x16A4, 13}, { 0x20B3, 14}, { 0x2E45, 14}, { 0x0005, 5}, { 0x0040, 7},
+{ 0x0049, 9}, { 0x028F, 10}, { 0x05CB, 11}, { 0x048A, 13}, { 0x09DD, 14}, { 0x73E2, 15},
+{ 0x0018, 5}, { 0x0025, 8}, { 0x008A, 10}, { 0x051B, 11}, { 0x0E5F, 12}, { 0x09C9, 14},
+{ 0x139C, 15}, { 0x0029, 6}, { 0x004F, 9}, { 0x0412, 11}, { 0x048D, 13}, { 0x2E41, 14},
+{ 0x0038, 6}, { 0x010E, 9}, { 0x05A8, 11}, { 0x105C, 13}, { 0x39F2, 14}, { 0x0058, 7},
+{ 0x021F, 10}, { 0x0E7E, 12}, { 0x39FF, 14}, { 0x0023, 8}, { 0x02E3, 10}, { 0x04E5, 13},
+{ 0x2E40, 14}, { 0x00A1, 8}, { 0x05BE, 11}, { 0x09C8, 14}, { 0x0083, 8}, { 0x013A, 11},
+{ 0x1721, 13}, { 0x0044, 9}, { 0x0276, 12}, { 0x39F6, 14}, { 0x008B, 10}, { 0x04EF, 13},
+{ 0x5A9B, 15}, { 0x0208, 10}, { 0x1CFE, 13}, { 0x0399, 10}, { 0x1CB4, 13}, { 0x039E, 10},
+{ 0x39F3, 14}, { 0x05AB, 11}, { 0x73E3, 15}, { 0x0737, 11}, { 0x5A9F, 15}, { 0x082D, 12},
+{ 0x0E69, 12}, { 0x0E68, 12}, { 0x0433, 11}, { 0x0B7B, 12}, { 0x2DF8, 14}, { 0x2E56, 14},
+{ 0x2E57, 14}, { 0x39F7, 14}, { 0x51A5, 15}, { 0x0003, 3}, { 0x002A, 6}, { 0x00E4, 8},
+{ 0x028E, 10}, { 0x0735, 11}, { 0x1058, 13}, { 0x1CFA, 13}, { 0x2DF9, 14}, { 0x4174, 15},
+{ 0x0009, 4}, { 0x0054, 8}, { 0x0398, 10}, { 0x048B, 13}, { 0x139D, 15}, { 0x000D, 4},
+{ 0x00AD, 9}, { 0x0826, 12}, { 0x2D4C, 14}, { 0x0011, 5}, { 0x016B, 9}, { 0x0B7F, 12},
+{ 0x51A4, 15}, { 0x0019, 5}, { 0x021B, 10}, { 0x16FD, 13}, { 0x001D, 5}, { 0x0394, 10},
+{ 0x28D3, 14}, { 0x002B, 6}, { 0x05BC, 11}, { 0x5A9A, 15}, { 0x002F, 6}, { 0x0247, 12},
+{ 0x0010, 7}, { 0x0A35, 12}, { 0x003E, 6}, { 0x0B7A, 12}, { 0x0059, 7}, { 0x105E, 13},
+{ 0x0026, 8}, { 0x09CF, 14}, { 0x0055, 8}, { 0x1CB5, 13}, { 0x0057, 8}, { 0x0E5B, 12},
+{ 0x00A0, 8}, { 0x1468, 13}, { 0x0170, 9}, { 0x0090, 10}, { 0x01CE, 9}, { 0x021A, 10},
+{ 0x0218, 10}, { 0x0168, 9}, { 0x021E, 10}, { 0x0244, 12}, { 0x0736, 11}, { 0x0138, 11},
+{ 0x0519, 11}, { 0x0E5E, 12}, { 0x072C, 11}, { 0x0B55, 12}, { 0x09DC, 14}, { 0x20BB, 14},
+{ 0x048C, 13}, { 0x1723, 13}, { 0x2E44, 14}, { 0x16A5, 13}, { 0x0518, 11}, { 0x39FE, 14},
+{ 0x0169, 9}
+},
+{
+{ 0x0001, 2}, { 0x0006, 3}, { 0x000F, 4}, { 0x0016, 5}, { 0x0020, 6}, { 0x0018, 7},
+{ 0x0008, 8}, { 0x009A, 8}, { 0x0056, 9}, { 0x013E, 9}, { 0x00F0, 10}, { 0x03A5, 10},
+{ 0x0077, 11}, { 0x01EF, 11}, { 0x009A, 12}, { 0x005D, 13}, { 0x0001, 4}, { 0x0011, 5},
+{ 0x0002, 7}, { 0x000B, 8}, { 0x0012, 9}, { 0x01D6, 9}, { 0x027E, 10}, { 0x0191, 11},
+{ 0x00EA, 12}, { 0x03DC, 12}, { 0x013B, 13}, { 0x0004, 5}, { 0x0014, 7}, { 0x009E, 8},
+{ 0x0009, 10}, { 0x01AC, 11}, { 0x01E2, 11}, { 0x03CA, 12}, { 0x005F, 13}, { 0x0017, 5},
+{ 0x004E, 7}, { 0x005E, 9}, { 0x00F3, 10}, { 0x01AD, 11}, { 0x00EC, 12}, { 0x05F0, 13},
+{ 0x000E, 6}, { 0x00E1, 8}, { 0x03A4, 10}, { 0x009C, 12}, { 0x013D, 13}, { 0x003B, 6},
+{ 0x001C, 9}, { 0x0014, 11}, { 0x09BE, 12}, { 0x0006, 7}, { 0x007A, 9}, { 0x0190, 11},
+{ 0x0137, 13}, { 0x001B, 7}, { 0x0008, 10}, { 0x075C, 11}, { 0x0071, 7}, { 0x00D7, 10},
+{ 0x09BF, 12}, { 0x0007, 8}, { 0x00AF, 10}, { 0x04CC, 11}, { 0x0034, 8}, { 0x0265, 10},
+{ 0x009F, 12}, { 0x00E0, 8}, { 0x0016, 11}, { 0x0327, 12}, { 0x0015, 9}, { 0x017D, 11},
+{ 0x0EBB, 12}, { 0x0014, 9}, { 0x00F6, 10}, { 0x01E4, 11}, { 0x00CB, 10}, { 0x099D, 12},
+{ 0x00CA, 10}, { 0x02FC, 12}, { 0x017F, 11}, { 0x04CD, 11}, { 0x02FD, 12}, { 0x04FE, 11},
+{ 0x013A, 13}, { 0x000A, 4}, { 0x0042, 7}, { 0x01D3, 9}, { 0x04DD, 11}, { 0x0012, 5},
+{ 0x00E8, 8}, { 0x004C, 11}, { 0x0136, 13}, { 0x0039, 6}, { 0x0264, 10}, { 0x0EBA, 12},
+{ 0x0000, 7}, { 0x00AE, 10}, { 0x099C, 12}, { 0x001F, 7}, { 0x04DE, 11}, { 0x0043, 7},
+{ 0x04DC, 11}, { 0x0003, 8}, { 0x03CB, 12}, { 0x0006, 8}, { 0x099E, 12}, { 0x002A, 8},
+{ 0x05F1, 13}, { 0x000F, 8}, { 0x09FE, 12}, { 0x0033, 8}, { 0x09FF, 12}, { 0x0098, 8},
+{ 0x099F, 12}, { 0x00EA, 8}, { 0x013C, 13}, { 0x002E, 8}, { 0x0192, 11}, { 0x0136, 9},
+{ 0x006A, 9}, { 0x0015, 11}, { 0x03AF, 10}, { 0x01E3, 11}, { 0x0074, 11}, { 0x00EB, 12},
+{ 0x02F9, 12}, { 0x005C, 13}, { 0x00ED, 12}, { 0x03DD, 12}, { 0x0326, 12}, { 0x005E, 13},
+{ 0x0016, 7}
+},
+{
+{ 0x0004, 3}, { 0x0014, 5}, { 0x0017, 7}, { 0x007F, 8}, { 0x0154, 9}, { 0x01F2, 10},
+{ 0x00BF, 11}, { 0x0065, 12}, { 0x0AAA, 12}, { 0x0630, 13}, { 0x1597, 13}, { 0x03B7, 14},
+{ 0x2B22, 14}, { 0x0BE6, 15}, { 0x000B, 4}, { 0x0037, 7}, { 0x0062, 9}, { 0x0007, 11},
+{ 0x0166, 12}, { 0x00CE, 13}, { 0x1590, 13}, { 0x05F6, 14}, { 0x0BE7, 15}, { 0x0007, 5},
+{ 0x006D, 8}, { 0x0003, 11}, { 0x031F, 12}, { 0x05F2, 14}, { 0x0002, 6}, { 0x0061, 9},
+{ 0x0055, 12}, { 0x01DF, 14}, { 0x001A, 6}, { 0x001E, 10}, { 0x0AC9, 12}, { 0x2B23, 14},
+{ 0x001E, 6}, { 0x001F, 10}, { 0x0AC3, 12}, { 0x2B2B, 14}, { 0x0006, 7}, { 0x0004, 11},
+{ 0x02F8, 13}, { 0x0019, 7}, { 0x0006, 11}, { 0x063D, 13}, { 0x0057, 7}, { 0x0182, 11},
+{ 0x2AA2, 14}, { 0x0004, 8}, { 0x0180, 11}, { 0x059C, 14}, { 0x007D, 8}, { 0x0164, 12},
+{ 0x076D, 15}, { 0x0002, 9}, { 0x018D, 11}, { 0x1581, 13}, { 0x00AD, 8}, { 0x0060, 12},
+{ 0x0C67, 14}, { 0x001C, 9}, { 0x00EE, 13}, { 0x0003, 9}, { 0x02CF, 13}, { 0x00D9, 9},
+{ 0x1580, 13}, { 0x0002, 11}, { 0x0183, 11}, { 0x0057, 12}, { 0x0061, 12}, { 0x0031, 11},
+{ 0x0066, 12}, { 0x0631, 13}, { 0x0632, 13}, { 0x00AC, 13}, { 0x031D, 12}, { 0x0076, 12},
+{ 0x003A, 11}, { 0x0165, 12}, { 0x0C66, 14}, { 0x0003, 2}, { 0x0054, 7}, { 0x02AB, 10},
+{ 0x0016, 13}, { 0x05F7, 14}, { 0x0005, 4}, { 0x00F8, 9}, { 0x0AA9, 12}, { 0x005F, 15},
+{ 0x0004, 4}, { 0x001C, 10}, { 0x1550, 13}, { 0x0004, 5}, { 0x0077, 11}, { 0x076C, 15},
+{ 0x000E, 5}, { 0x000A, 12}, { 0x000C, 5}, { 0x0562, 11}, { 0x0004, 6}, { 0x031C, 12},
+{ 0x0006, 6}, { 0x00C8, 13}, { 0x000D, 6}, { 0x01DA, 13}, { 0x0007, 6}, { 0x00C9, 13},
+{ 0x0001, 7}, { 0x002E, 14}, { 0x0014, 7}, { 0x1596, 13}, { 0x000A, 7}, { 0x0AC2, 12},
+{ 0x0016, 7}, { 0x015B, 14}, { 0x0015, 7}, { 0x015A, 14}, { 0x000F, 8}, { 0x005E, 15},
+{ 0x007E, 8}, { 0x00AB, 8}, { 0x002D, 9}, { 0x00D8, 9}, { 0x000B, 9}, { 0x0014, 10},
+{ 0x02B3, 10}, { 0x01F3, 10}, { 0x003A, 10}, { 0x0000, 10}, { 0x0058, 10}, { 0x002E, 9},
+{ 0x005E, 10}, { 0x0563, 11}, { 0x00EC, 12}, { 0x0054, 12}, { 0x0AC1, 12}, { 0x1556, 13},
+{ 0x02FA, 13}, { 0x0181, 11}, { 0x1557, 13}, { 0x059D, 14}, { 0x2AA3, 14}, { 0x2B2A, 14},
+{ 0x01DE, 14}, { 0x063C, 13}, { 0x00CF, 13}, { 0x1594, 13}, { 0x000D, 9}
+},
+{
+{ 0x0002, 2}, { 0x0006, 3}, { 0x000F, 4}, { 0x000D, 5}, { 0x000C, 5}, { 0x0015, 6},
+{ 0x0013, 6}, { 0x0012, 6}, { 0x0017, 7}, { 0x001F, 8}, { 0x001E, 8}, { 0x001D, 8},
+{ 0x0025, 9}, { 0x0024, 9}, { 0x0023, 9}, { 0x0021, 9}, { 0x0021, 10}, { 0x0020, 10},
+{ 0x000F, 10}, { 0x000E, 10}, { 0x0007, 11}, { 0x0006, 11}, { 0x0020, 11}, { 0x0021, 11},
+{ 0x0050, 12}, { 0x0051, 12}, { 0x0052, 12}, { 0x000E, 4}, { 0x0014, 6}, { 0x0016, 7},
+{ 0x001C, 8}, { 0x0020, 9}, { 0x001F, 9}, { 0x000D, 10}, { 0x0022, 11}, { 0x0053, 12},
+{ 0x0055, 12}, { 0x000B, 5}, { 0x0015, 7}, { 0x001E, 9}, { 0x000C, 10}, { 0x0056, 12},
+{ 0x0011, 6}, { 0x001B, 8}, { 0x001D, 9}, { 0x000B, 10}, { 0x0010, 6}, { 0x0022, 9},
+{ 0x000A, 10}, { 0x000D, 6}, { 0x001C, 9}, { 0x0008, 10}, { 0x0012, 7}, { 0x001B, 9},
+{ 0x0054, 12}, { 0x0014, 7}, { 0x001A, 9}, { 0x0057, 12}, { 0x0019, 8}, { 0x0009, 10},
+{ 0x0018, 8}, { 0x0023, 11}, { 0x0017, 8}, { 0x0019, 9}, { 0x0018, 9}, { 0x0007, 10},
+{ 0x0058, 12}, { 0x0007, 4}, { 0x000C, 6}, { 0x0016, 8}, { 0x0017, 9}, { 0x0006, 10},
+{ 0x0005, 11}, { 0x0004, 11}, { 0x0059, 12}, { 0x000F, 6}, { 0x0016, 9}, { 0x0005, 10},
+{ 0x000E, 6}, { 0x0004, 10}, { 0x0011, 7}, { 0x0024, 11}, { 0x0010, 7}, { 0x0025, 11},
+{ 0x0013, 7}, { 0x005A, 12}, { 0x0015, 8}, { 0x005B, 12}, { 0x0014, 8}, { 0x0013, 8},
+{ 0x001A, 8}, { 0x0015, 9}, { 0x0014, 9}, { 0x0013, 9}, { 0x0012, 9}, { 0x0011, 9},
+{ 0x0026, 11}, { 0x0027, 11}, { 0x005C, 12}, { 0x005D, 12}, { 0x005E, 12}, { 0x005F, 12},
+{ 0x0003, 7}
+},
+{
+{ 0x0002, 2}, { 0x000F, 4}, { 0x0015, 6}, { 0x0017, 7}, { 0x001F, 8}, { 0x0025, 9},
+{ 0x0024, 9}, { 0x0021, 10}, { 0x0020, 10}, { 0x0007, 11}, { 0x0006, 11}, { 0x0020, 11},
+{ 0x0006, 3}, { 0x0014, 6}, { 0x001E, 8}, { 0x000F, 10}, { 0x0021, 11}, { 0x0050, 12},
+{ 0x000E, 4}, { 0x001D, 8}, { 0x000E, 10}, { 0x0051, 12}, { 0x000D, 5}, { 0x0023, 9},
+{ 0x000D, 10}, { 0x000C, 5}, { 0x0022, 9}, { 0x0052, 12}, { 0x000B, 5}, { 0x000C, 10},
+{ 0x0053, 12}, { 0x0013, 6}, { 0x000B, 10}, { 0x0054, 12}, { 0x0012, 6}, { 0x000A, 10},
+{ 0x0011, 6}, { 0x0009, 10}, { 0x0010, 6}, { 0x0008, 10}, { 0x0016, 7}, { 0x0055, 12},
+{ 0x0015, 7}, { 0x0014, 7}, { 0x001C, 8}, { 0x001B, 8}, { 0x0021, 9}, { 0x0020, 9},
+{ 0x001F, 9}, { 0x001E, 9}, { 0x001D, 9}, { 0x001C, 9}, { 0x001B, 9}, { 0x001A, 9},
+{ 0x0022, 11}, { 0x0023, 11}, { 0x0056, 12}, { 0x0057, 12}, { 0x0007, 4}, { 0x0019, 9},
+{ 0x0005, 11}, { 0x000F, 6}, { 0x0004, 11}, { 0x000E, 6}, { 0x000D, 6}, { 0x000C, 6},
+{ 0x0013, 7}, { 0x0012, 7}, { 0x0011, 7}, { 0x0010, 7}, { 0x001A, 8}, { 0x0019, 8},
+{ 0x0018, 8}, { 0x0017, 8}, { 0x0016, 8}, { 0x0015, 8}, { 0x0014, 8}, { 0x0013, 8},
+{ 0x0018, 9}, { 0x0017, 9}, { 0x0016, 9}, { 0x0015, 9}, { 0x0014, 9}, { 0x0013, 9},
+{ 0x0012, 9}, { 0x0011, 9}, { 0x0007, 10}, { 0x0006, 10}, { 0x0005, 10}, { 0x0004, 10},
+{ 0x0024, 11}, { 0x0025, 11}, { 0x0026, 11}, { 0x0027, 11}, { 0x0058, 12}, { 0x0059, 12},
+{ 0x005A, 12}, { 0x005B, 12}, { 0x005C, 12}, { 0x005D, 12}, { 0x005E, 12}, { 0x005F, 12},
+{ 0x0003, 7}
+},
+{
+{ 0x0000, 2}, { 0x0003, 3}, { 0x000D, 4}, { 0x0005, 4}, { 0x001C, 5}, { 0x0016, 5},
+{ 0x003F, 6}, { 0x003A, 6}, { 0x002E, 6}, { 0x0022, 6}, { 0x007B, 7}, { 0x0067, 7},
+{ 0x005F, 7}, { 0x0047, 7}, { 0x0026, 7}, { 0x00EF, 8}, { 0x00CD, 8}, { 0x00C1, 8},
+{ 0x00A9, 8}, { 0x004F, 8}, { 0x01F2, 9}, { 0x01DD, 9}, { 0x0199, 9}, { 0x0185, 9},
+{ 0x015D, 9}, { 0x011B, 9}, { 0x03EF, 10}, { 0x03E1, 10}, { 0x03C8, 10}, { 0x0331, 10},
+{ 0x0303, 10}, { 0x02F1, 10}, { 0x02A0, 10}, { 0x0233, 10}, { 0x0126, 10}, { 0x07C0, 11},
+{ 0x076F, 11}, { 0x076C, 11}, { 0x0661, 11}, { 0x0604, 11}, { 0x0572, 11}, { 0x0551, 11},
+{ 0x046A, 11}, { 0x0274, 11}, { 0x0F27, 12}, { 0x0F24, 12}, { 0x0EDB, 12}, { 0x0C8E, 12},
+{ 0x0C0B, 12}, { 0x0C0A, 12}, { 0x0AE3, 12}, { 0x08D6, 12}, { 0x0490, 12}, { 0x0495, 12},
+{ 0x1F19, 13}, { 0x1DB5, 13}, { 0x0009, 4}, { 0x0010, 5}, { 0x0029, 6}, { 0x0062, 7},
+{ 0x00F3, 8}, { 0x00AD, 8}, { 0x01E5, 9}, { 0x0179, 9}, { 0x009C, 9}, { 0x03B1, 10},
+{ 0x02AE, 10}, { 0x0127, 10}, { 0x076E, 11}, { 0x0570, 11}, { 0x0275, 11}, { 0x0F25, 12},
+{ 0x0EC0, 12}, { 0x0AA0, 12}, { 0x08D7, 12}, { 0x1E4C, 13}, { 0x0008, 5}, { 0x0063, 7},
+{ 0x00AF, 8}, { 0x017B, 9}, { 0x03B3, 10}, { 0x07DD, 11}, { 0x0640, 11}, { 0x0F8D, 12},
+{ 0x0BC1, 12}, { 0x0491, 12}, { 0x0028, 6}, { 0x00C3, 8}, { 0x0151, 9}, { 0x02A1, 10},
+{ 0x0573, 11}, { 0x0EC3, 12}, { 0x1F35, 13}, { 0x0065, 7}, { 0x01DA, 9}, { 0x02AF, 10},
+{ 0x0277, 11}, { 0x08C9, 12}, { 0x1781, 13}, { 0x0025, 7}, { 0x0118, 9}, { 0x0646, 11},
+{ 0x0AA6, 12}, { 0x1780, 13}, { 0x00C9, 8}, { 0x0321, 10}, { 0x0F9B, 12}, { 0x191E, 13},
+{ 0x0048, 8}, { 0x07CC, 11}, { 0x0AA1, 12}, { 0x0180, 9}, { 0x0465, 11}, { 0x1905, 13},
+{ 0x03E2, 10}, { 0x0EC1, 12}, { 0x3C9B, 14}, { 0x02F4, 10}, { 0x08C8, 12}, { 0x07C1, 11},
+{ 0x0928, 13}, { 0x05E1, 11}, { 0x320D, 14}, { 0x0EC2, 12}, { 0x6418, 15}, { 0x1F34, 13},
+{ 0x0078, 7}, { 0x0155, 9}, { 0x0552, 11}, { 0x191F, 13}, { 0x00FA, 8}, { 0x07DC, 11},
+{ 0x1907, 13}, { 0x00AC, 8}, { 0x0249, 11}, { 0x13B1, 14}, { 0x01F6, 9}, { 0x0AE2, 12},
+{ 0x01DC, 9}, { 0x04ED, 12}, { 0x0184, 9}, { 0x1904, 13}, { 0x0156, 9}, { 0x09D9, 13},
+{ 0x03E7, 10}, { 0x0929, 13}, { 0x03B2, 10}, { 0x3B68, 14}, { 0x02F5, 10}, { 0x13B0, 14},
+{ 0x0322, 10}, { 0x3B69, 14}, { 0x0234, 10}, { 0x7935, 15}, { 0x07C7, 11}, { 0xC833, 16},
+{ 0x0660, 11}, { 0x7934, 15}, { 0x024B, 11}, { 0xC832, 16}, { 0x0AA7, 12}, { 0x1F18, 13},
+{ 0x007A, 7}
+},
+{
+{ 0x0002, 2}, { 0x0000, 3}, { 0x001E, 5}, { 0x0004, 5}, { 0x0012, 6}, { 0x0070, 7},
+{ 0x001A, 7}, { 0x005F, 8}, { 0x0047, 8}, { 0x01D3, 9}, { 0x00B5, 9}, { 0x0057, 9},
+{ 0x03B5, 10}, { 0x016D, 10}, { 0x0162, 10}, { 0x07CE, 11}, { 0x0719, 11}, { 0x0691, 11},
+{ 0x02C6, 11}, { 0x0156, 11}, { 0x0F92, 12}, { 0x0D2E, 12}, { 0x0D20, 12}, { 0x059E, 12},
+{ 0x0468, 12}, { 0x02A6, 12}, { 0x1DA2, 13}, { 0x1C60, 13}, { 0x1A43, 13}, { 0x0B1D, 13},
+{ 0x08C0, 13}, { 0x055D, 13}, { 0x0003, 3}, { 0x000A, 5}, { 0x0077, 7}, { 0x00E5, 8},
+{ 0x01D9, 9}, { 0x03E5, 10}, { 0x0166, 10}, { 0x0694, 11}, { 0x0152, 11}, { 0x059F, 12},
+{ 0x1F3C, 13}, { 0x1A4B, 13}, { 0x055E, 13}, { 0x000C, 4}, { 0x007D, 7}, { 0x0044, 8},
+{ 0x03E0, 10}, { 0x0769, 11}, { 0x0E31, 12}, { 0x1F26, 13}, { 0x055C, 13}, { 0x001B, 5},
+{ 0x00E2, 8}, { 0x03A5, 10}, { 0x02C9, 11}, { 0x1F23, 13}, { 0x3B47, 14}, { 0x0007, 5},
+{ 0x01D8, 9}, { 0x02D8, 11}, { 0x1F27, 13}, { 0x3494, 14}, { 0x0035, 6}, { 0x03E1, 10},
+{ 0x059C, 12}, { 0x38C3, 14}, { 0x000C, 6}, { 0x0165, 10}, { 0x1D23, 13}, { 0x1638, 14},
+{ 0x0068, 7}, { 0x0693, 11}, { 0x3A45, 14}, { 0x0020, 7}, { 0x0F90, 12}, { 0x7CF6, 15},
+{ 0x00E8, 8}, { 0x058F, 12}, { 0x2CEF, 15}, { 0x0045, 8}, { 0x0B3A, 13}, { 0x01F1, 9},
+{ 0x3B46, 14}, { 0x01A7, 9}, { 0x1676, 14}, { 0x0056, 9}, { 0x692A, 15}, { 0x038D, 10},
+{ 0xE309, 16}, { 0x00AA, 10}, { 0x1C611, 17}, { 0x02DF, 11}, { 0xB3B9, 17}, { 0x02C8, 11},
+{ 0x38C20, 18}, { 0x01B0, 11}, { 0x16390, 18}, { 0x0F9F, 12}, { 0x16771, 18}, { 0x0ED0, 12},
+{ 0x71843, 19}, { 0x0D2A, 12}, { 0xF9E8C, 20}, { 0x0461, 12}, { 0xF9E8E, 20}, { 0x0B67, 13},
+{ 0x055F, 13}, { 0x003F, 6}, { 0x006D, 9}, { 0x0E90, 12}, { 0x054E, 13}, { 0x0013, 6},
+{ 0x0119, 10}, { 0x0B66, 13}, { 0x000B, 6}, { 0x0235, 11}, { 0x7CF5, 15}, { 0x0075, 7},
+{ 0x0D24, 12}, { 0xF9E9, 16}, { 0x002E, 7}, { 0x1F22, 13}, { 0x0021, 7}, { 0x054F, 13},
+{ 0x0014, 7}, { 0x3A44, 14}, { 0x00E4, 8}, { 0x7CF7, 15}, { 0x005E, 8}, { 0x7185, 15},
+{ 0x0037, 8}, { 0x2C73, 15}, { 0x01DB, 9}, { 0x59DD, 16}, { 0x01C7, 9}, { 0x692B, 15},
+{ 0x01A6, 9}, { 0x58E5, 16}, { 0x00B4, 9}, { 0x1F3D0, 17}, { 0x00B0, 9}, { 0xB1C9, 17},
+{ 0x03E6, 10}, { 0x16770, 18}, { 0x016E, 10}, { 0x3E7A2, 18}, { 0x011B, 10}, { 0xF9E8D, 20},
+{ 0x00D9, 10}, { 0xF9E8F, 20}, { 0x00A8, 10}, { 0x2C723, 19}, { 0x0749, 11}, { 0xE3084, 20},
+{ 0x0696, 11}, { 0x58E45, 20}, { 0x02DE, 11}, { 0xB1C88, 21}, { 0x0231, 11}, { 0x1C610A, 21},
+{ 0x01B1, 11}, { 0x71842D, 23}, { 0x0D2B, 12}, { 0x38C217, 22}, { 0x0D2F, 12}, { 0x163913, 22},
+{ 0x05B2, 12}, { 0x163912, 22}, { 0x0469, 12}, { 0x71842C, 23}, { 0x1A42, 13}, { 0x08C1, 13},
+{ 0x0073, 7}
+}
+};
+
+/* which indexes point to last=1 entries in tables */
+static const int vc1_last_decode_table[AC_MODES] = {
+ 119, 99, 85, 81, 67, 58, 126, 109
+};
+
+static const uint8_t vc1_index_decode_table[AC_MODES][185][2] = {
+{
+{ 0, 1}, { 0, 2}, { 0, 3}, { 0, 4}, { 0, 5}, { 0, 6}, { 0, 7}, { 0, 8},
+{ 0, 9}, { 0, 10}, { 0, 11}, { 0, 12}, { 0, 13}, { 0, 14}, { 0, 15}, { 0, 16},
+{ 0, 17}, { 0, 18}, { 0, 19}, { 1, 1}, { 1, 2}, { 1, 3}, { 1, 4}, { 1, 5},
+{ 1, 6}, { 1, 7}, { 1, 8}, { 1, 9}, { 1, 10}, { 1, 11}, { 1, 12}, { 1, 13},
+{ 1, 14}, { 1, 15}, { 2, 1}, { 2, 2}, { 2, 3}, { 2, 4}, { 2, 5}, { 2, 6},
+{ 2, 7}, { 2, 8}, { 2, 9}, { 2, 10}, { 2, 11}, { 2, 12}, { 3, 1}, { 3, 2},
+{ 3, 3}, { 3, 4}, { 3, 5}, { 3, 6}, { 3, 7}, { 3, 8}, { 3, 9}, { 3, 10},
+{ 3, 11}, { 4, 1}, { 4, 2}, { 4, 3}, { 4, 4}, { 4, 5}, { 4, 6}, { 5, 1},
+{ 5, 2}, { 5, 3}, { 5, 4}, { 5, 5}, { 6, 1}, { 6, 2}, { 6, 3}, { 6, 4},
+{ 7, 1}, { 7, 2}, { 7, 3}, { 7, 4}, { 8, 1}, { 8, 2}, { 8, 3}, { 8, 4},
+{ 9, 1}, { 9, 2}, { 9, 3}, { 9, 4}, { 10, 1}, { 10, 2}, { 10, 3}, { 11, 1},
+{ 11, 2}, { 11, 3}, { 12, 1}, { 12, 2}, { 12, 3}, { 13, 1}, { 13, 2}, { 13, 3},
+{ 14, 1}, { 14, 2}, { 14, 3}, { 15, 1}, { 15, 2}, { 15, 3}, { 16, 1}, { 16, 2},
+{ 17, 1}, { 17, 2}, { 18, 1}, { 19, 1}, { 20, 1}, { 21, 1}, { 22, 1}, { 23, 1},
+{ 24, 1}, { 25, 1}, { 26, 1}, { 27, 1}, { 28, 1}, { 29, 1}, { 30, 1}, { 0, 1},
+{ 0, 2}, { 0, 3}, { 0, 4}, { 0, 5}, { 0, 6}, { 1, 1}, { 1, 2}, { 1, 3},
+{ 1, 4}, { 1, 5}, { 2, 1}, { 2, 2}, { 2, 3}, { 2, 4}, { 3, 1}, { 3, 2},
+{ 3, 3}, { 3, 4}, { 4, 1}, { 4, 2}, { 4, 3}, { 5, 1}, { 5, 2}, { 6, 1},
+{ 6, 2}, { 7, 1}, { 7, 2}, { 8, 1}, { 8, 2}, { 9, 1}, { 9, 2}, { 10, 1},
+{ 10, 2}, { 11, 1}, { 11, 2}, { 12, 1}, { 12, 2}, { 13, 1}, { 13, 2}, { 14, 1},
+{ 14, 2}, { 15, 1}, { 15, 2}, { 16, 1}, { 17, 1}, { 18, 1}, { 19, 1}, { 20, 1},
+{ 21, 1}, { 22, 1}, { 23, 1}, { 24, 1}, { 25, 1}, { 26, 1}, { 27, 1}, { 28, 1},
+{ 29, 1}, { 30, 1}, { 31, 1}, { 32, 1}, { 33, 1}, { 34, 1}, { 35, 1}, { 36, 1},
+{ 37, 1}
+},
+{
+{ 0, 1}, { 0, 2}, { 0, 3}, { 0, 4}, { 0, 5}, { 0, 6}, { 0, 7}, { 0, 8},
+{ 0, 9}, { 0, 10}, { 0, 11}, { 0, 12}, { 0, 13}, { 0, 14}, { 0, 15}, { 0, 16},
+{ 0, 17}, { 0, 18}, { 0, 19}, { 0, 20}, { 0, 21}, { 0, 22}, { 0, 23}, { 1, 1},
+{ 1, 2}, { 1, 3}, { 1, 4}, { 1, 5}, { 1, 6}, { 1, 7}, { 1, 8}, { 1, 9},
+{ 1, 10}, { 1, 11}, { 2, 1}, { 2, 2}, { 2, 3}, { 2, 4}, { 2, 5}, { 2, 6},
+{ 2, 7}, { 2, 8}, { 3, 1}, { 3, 2}, { 3, 3}, { 3, 4}, { 3, 5}, { 3, 6},
+{ 3, 7}, { 4, 1}, { 4, 2}, { 4, 3}, { 4, 4}, { 4, 5}, { 5, 1}, { 5, 2},
+{ 5, 3}, { 5, 4}, { 5, 5}, { 6, 1}, { 6, 2}, { 6, 3}, { 6, 4}, { 7, 1},
+{ 7, 2}, { 7, 3}, { 7, 4}, { 8, 1}, { 8, 2}, { 8, 3}, { 9, 1}, { 9, 2},
+{ 9, 3}, { 10, 1}, { 10, 2}, { 10, 3}, { 11, 1}, { 11, 2}, { 11, 3}, { 12, 1},
+{ 12, 2}, { 13, 1}, { 13, 2}, { 14, 1}, { 14, 2}, { 15, 1}, { 15, 2}, { 16, 1},
+{ 16, 2}, { 17, 1}, { 18, 1}, { 19, 1}, { 20, 1}, { 21, 1}, { 22, 1}, { 23, 1},
+{ 24, 1}, { 25, 1}, { 26, 1}, { 0, 1}, { 0, 2}, { 0, 3}, { 0, 4}, { 0, 5},
+{ 0, 6}, { 0, 7}, { 0, 8}, { 0, 9}, { 1, 1}, { 1, 2}, { 1, 3}, { 1, 4},
+{ 1, 5}, { 2, 1}, { 2, 2}, { 2, 3}, { 2, 4}, { 3, 1}, { 3, 2}, { 3, 3},
+{ 3, 4}, { 4, 1}, { 4, 2}, { 4, 3}, { 5, 1}, { 5, 2}, { 5, 3}, { 6, 1},
+{ 6, 2}, { 6, 3}, { 7, 1}, { 7, 2}, { 8, 1}, { 8, 2}, { 9, 1}, { 9, 2},
+{ 10, 1}, { 10, 2}, { 11, 1}, { 11, 2}, { 12, 1}, { 12, 2}, { 13, 1}, { 13, 2},
+{ 14, 1}, { 14, 2}, { 15, 1}, { 16, 1}, { 17, 1}, { 18, 1}, { 19, 1}, { 20, 1},
+{ 21, 1}, { 22, 1}, { 23, 1}, { 24, 1}, { 25, 1}, { 26, 1}, { 27, 1}, { 28, 1},
+{ 29, 1}, { 30, 1}, { 31, 1}, { 32, 1}, { 33, 1}, { 34, 1}, { 35, 1}, { 36, 1}
+},
+{
+{ 0, 1}, { 0, 2}, { 0, 3}, { 0, 4}, { 0, 5}, { 0, 6}, { 0, 7}, { 0, 8},
+{ 0, 9}, { 0, 10}, { 0, 11}, { 0, 12}, { 0, 13}, { 0, 14}, { 0, 15}, { 0, 16},
+{ 1, 1}, { 1, 2}, { 1, 3}, { 1, 4}, { 1, 5}, { 1, 6}, { 1, 7}, { 1, 8},
+{ 1, 9}, { 1, 10}, { 1, 11}, { 2, 1}, { 2, 2}, { 2, 3}, { 2, 4}, { 2, 5},
+{ 2, 6}, { 2, 7}, { 2, 8}, { 3, 1}, { 3, 2}, { 3, 3}, { 3, 4}, { 3, 5},
+{ 3, 6}, { 3, 7}, { 4, 1}, { 4, 2}, { 4, 3}, { 4, 4}, { 4, 5}, { 5, 1},
+{ 5, 2}, { 5, 3}, { 5, 4}, { 6, 1}, { 6, 2}, { 6, 3}, { 6, 4}, { 7, 1},
+{ 7, 2}, { 7, 3}, { 8, 1}, { 8, 2}, { 8, 3}, { 9, 1}, { 9, 2}, { 9, 3},
+{ 10, 1}, { 10, 2}, { 10, 3}, { 11, 1}, { 11, 2}, { 11, 3}, { 12, 1}, { 12, 2},
+{ 12, 3}, { 13, 1}, { 13, 2}, { 13, 3}, { 14, 1}, { 14, 2}, { 15, 1}, { 15, 2},
+{ 16, 1}, { 17, 1}, { 18, 1}, { 19, 1}, { 20, 1}, { 0, 1}, { 0, 2}, { 0, 3},
+{ 0, 4}, { 1, 1}, { 1, 2}, { 1, 3}, { 1, 4}, { 2, 1}, { 2, 2}, { 2, 3},
+{ 3, 1}, { 3, 2}, { 3, 3}, { 4, 1}, { 4, 2}, { 5, 1}, { 5, 2}, { 6, 1},
+{ 6, 2}, { 7, 1}, { 7, 2}, { 8, 1}, { 8, 2}, { 9, 1}, { 9, 2}, { 10, 1},
+{ 10, 2}, { 11, 1}, { 11, 2}, { 12, 1}, { 12, 2}, { 13, 1}, { 13, 2}, { 14, 1},
+{ 15, 1}, { 16, 1}, { 17, 1}, { 18, 1}, { 19, 1}, { 20, 1}, { 21, 1}, { 22, 1},
+{ 23, 1}, { 24, 1}, { 25, 1}, { 26, 1}
+},
+{
+{ 0, 1}, { 0, 2}, { 0, 3}, { 0, 4}, { 0, 5}, { 0, 6}, { 0, 7}, { 0, 8},
+{ 0, 9}, { 0, 10}, { 0, 11}, { 0, 12}, { 0, 13}, { 0, 14}, { 1, 1}, { 1, 2},
+{ 1, 3}, { 1, 4}, { 1, 5}, { 1, 6}, { 1, 7}, { 1, 8}, { 1, 9}, { 2, 1},
+{ 2, 2}, { 2, 3}, { 2, 4}, { 2, 5}, { 3, 1}, { 3, 2}, { 3, 3}, { 3, 4},
+{ 4, 1}, { 4, 2}, { 4, 3}, { 4, 4}, { 5, 1}, { 5, 2}, { 5, 3}, { 5, 4},
+{ 6, 1}, { 6, 2}, { 6, 3}, { 7, 1}, { 7, 2}, { 7, 3}, { 8, 1}, { 8, 2},
+{ 8, 3}, { 9, 1}, { 9, 2}, { 9, 3}, { 10, 1}, { 10, 2}, { 10, 3}, { 11, 1},
+{ 11, 2}, { 11, 3}, { 12, 1}, { 12, 2}, { 12, 3}, { 13, 1}, { 13, 2}, { 14, 1},
+{ 14, 2}, { 15, 1}, { 15, 2}, { 16, 1}, { 17, 1}, { 18, 1}, { 19, 1}, { 20, 1},
+{ 21, 1}, { 22, 1}, { 23, 1}, { 24, 1}, { 25, 1}, { 26, 1}, { 27, 1}, { 28, 1},
+{ 29, 1}, { 0, 1}, { 0, 2}, { 0, 3}, { 0, 4}, { 0, 5}, { 1, 1}, { 1, 2},
+{ 1, 3}, { 1, 4}, { 2, 1}, { 2, 2}, { 2, 3}, { 3, 1}, { 3, 2}, { 3, 3},
+{ 4, 1}, { 4, 2}, { 5, 1}, { 5, 2}, { 6, 1}, { 6, 2}, { 7, 1}, { 7, 2},
+{ 8, 1}, { 8, 2}, { 9, 1}, { 9, 2}, { 10, 1}, { 10, 2}, { 11, 1}, { 11, 2},
+{ 12, 1}, { 12, 2}, { 13, 1}, { 13, 2}, { 14, 1}, { 14, 2}, { 15, 1}, { 15, 2},
+{ 16, 1}, { 17, 1}, { 18, 1}, { 19, 1}, { 20, 1}, { 21, 1}, { 22, 1}, { 23, 1},
+{ 24, 1}, { 25, 1}, { 26, 1}, { 27, 1}, { 28, 1}, { 29, 1}, { 30, 1}, { 31, 1},
+{ 32, 1}, { 33, 1}, { 34, 1}, { 35, 1}, { 36, 1}, { 37, 1}, { 38, 1}, { 39, 1},
+{ 40, 1}, { 41, 1}, { 42, 1}, { 43, 1}
+},
+{
+{ 0, 1}, { 0, 2}, { 0, 3}, { 0, 4}, { 0, 5}, { 0, 6}, { 0, 7}, { 0, 8},
+{ 0, 9}, { 0, 10}, { 0, 11}, { 0, 12}, { 0, 13}, { 0, 14}, { 0, 15}, { 0, 16},
+{ 0, 17}, { 0, 18}, { 0, 19}, { 0, 20}, { 0, 21}, { 0, 22}, { 0, 23}, { 0, 24},
+{ 0, 25}, { 0, 26}, { 0, 27}, { 1, 1}, { 1, 2}, { 1, 3}, { 1, 4}, { 1, 5},
+{ 1, 6}, { 1, 7}, { 1, 8}, { 1, 9}, { 1, 10}, { 2, 1}, { 2, 2}, { 2, 3},
+{ 2, 4}, { 2, 5}, { 3, 1}, { 3, 2}, { 3, 3}, { 3, 4}, { 4, 1}, { 4, 2},
+{ 4, 3}, { 5, 1}, { 5, 2}, { 5, 3}, { 6, 1}, { 6, 2}, { 6, 3}, { 7, 1},
+{ 7, 2}, { 7, 3}, { 8, 1}, { 8, 2}, { 9, 1}, { 9, 2}, { 10, 1}, { 11, 1},
+{ 12, 1}, { 13, 1}, { 14, 1}, { 0, 1}, { 0, 2}, { 0, 3}, { 0, 4}, { 0, 5},
+{ 0, 6}, { 0, 7}, { 0, 8}, { 1, 1}, { 1, 2}, { 1, 3}, { 2, 1}, { 2, 2},
+{ 3, 1}, { 3, 2}, { 4, 1}, { 4, 2}, { 5, 1}, { 5, 2}, { 6, 1}, { 6, 2},
+{ 7, 1}, { 8, 1}, { 9, 1}, { 10, 1}, { 11, 1}, { 12, 1}, { 13, 1}, { 14, 1},
+{ 15, 1}, { 16, 1}, { 17, 1}, { 18, 1}, { 19, 1}, { 20, 1}
+},
+{
+{ 0, 1}, { 0, 2}, { 0, 3}, { 0, 4}, { 0, 5}, { 0, 6}, { 0, 7}, { 0, 8},
+{ 0, 9}, { 0, 10}, { 0, 11}, { 0, 12}, { 1, 1}, { 1, 2}, { 1, 3}, { 1, 4},
+{ 1, 5}, { 1, 6}, { 2, 1}, { 2, 2}, { 2, 3}, { 2, 4}, { 3, 1}, { 3, 2},
+{ 3, 3}, { 4, 1}, { 4, 2}, { 4, 3}, { 5, 1}, { 5, 2}, { 5, 3}, { 6, 1},
+{ 6, 2}, { 6, 3}, { 7, 1}, { 7, 2}, { 8, 1}, { 8, 2}, { 9, 1}, { 9, 2},
+{ 10, 1}, { 10, 2}, { 11, 1}, { 12, 1}, { 13, 1}, { 14, 1}, { 15, 1}, { 16, 1},
+{ 17, 1}, { 18, 1}, { 19, 1}, { 20, 1}, { 21, 1}, { 22, 1}, { 23, 1}, { 24, 1},
+{ 25, 1}, { 26, 1}, { 0, 1}, { 0, 2}, { 0, 3}, { 1, 1}, { 1, 2}, { 2, 1},
+{ 3, 1}, { 4, 1}, { 5, 1}, { 6, 1}, { 7, 1}, { 8, 1}, { 9, 1}, { 10, 1},
+{ 11, 1}, { 12, 1}, { 13, 1}, { 14, 1}, { 15, 1}, { 16, 1}, { 17, 1}, { 18, 1},
+{ 19, 1}, { 20, 1}, { 21, 1}, { 22, 1}, { 23, 1}, { 24, 1}, { 25, 1}, { 26, 1},
+{ 27, 1}, { 28, 1}, { 29, 1}, { 30, 1}, { 31, 1}, { 32, 1}, { 33, 1}, { 34, 1},
+{ 35, 1}, { 36, 1}, { 37, 1}, { 38, 1}, { 39, 1}, { 40, 1}
+},
+{
+{ 0, 1}, { 0, 2}, { 0, 3}, { 0, 4}, { 0, 5}, { 0, 6}, { 0, 7}, { 0, 8},
+{ 0, 9}, { 0, 10}, { 0, 11}, { 0, 12}, { 0, 13}, { 0, 14}, { 0, 15}, { 0, 16},
+{ 0, 17}, { 0, 18}, { 0, 19}, { 0, 20}, { 0, 21}, { 0, 22}, { 0, 23}, { 0, 24},
+{ 0, 25}, { 0, 26}, { 0, 27}, { 0, 28}, { 0, 29}, { 0, 30}, { 0, 31}, { 0, 32},
+{ 0, 33}, { 0, 34}, { 0, 35}, { 0, 36}, { 0, 37}, { 0, 38}, { 0, 39}, { 0, 40},
+{ 0, 41}, { 0, 42}, { 0, 43}, { 0, 44}, { 0, 45}, { 0, 46}, { 0, 47}, { 0, 48},
+{ 0, 49}, { 0, 50}, { 0, 51}, { 0, 52}, { 0, 53}, { 0, 54}, { 0, 55}, { 0, 56},
+{ 1, 1}, { 1, 2}, { 1, 3}, { 1, 4}, { 1, 5}, { 1, 6}, { 1, 7}, { 1, 8},
+{ 1, 9}, { 1, 10}, { 1, 11}, { 1, 12}, { 1, 13}, { 1, 14}, { 1, 15}, { 1, 16},
+{ 1, 17}, { 1, 18}, { 1, 19}, { 1, 20}, { 2, 1}, { 2, 2}, { 2, 3}, { 2, 4},
+{ 2, 5}, { 2, 6}, { 2, 7}, { 2, 8}, { 2, 9}, { 2, 10}, { 3, 1}, { 3, 2},
+{ 3, 3}, { 3, 4}, { 3, 5}, { 3, 6}, { 3, 7}, { 4, 1}, { 4, 2}, { 4, 3},
+{ 4, 4}, { 4, 5}, { 4, 6}, { 5, 1}, { 5, 2}, { 5, 3}, { 5, 4}, { 5, 5},
+{ 6, 1}, { 6, 2}, { 6, 3}, { 6, 4}, { 7, 1}, { 7, 2}, { 7, 3}, { 8, 1},
+{ 8, 2}, { 8, 3}, { 9, 1}, { 9, 2}, { 9, 3}, { 10, 1}, { 10, 2}, { 11, 1},
+{ 11, 2}, { 12, 1}, { 12, 2}, { 13, 1}, { 13, 2}, { 14, 1}, { 0, 1}, { 0, 2},
+{ 0, 3}, { 0, 4}, { 1, 1}, { 1, 2}, { 1, 3}, { 2, 1}, { 2, 2}, { 2, 3},
+{ 3, 1}, { 3, 2}, { 4, 1}, { 4, 2}, { 5, 1}, { 5, 2}, { 6, 1}, { 6, 2},
+{ 7, 1}, { 7, 2}, { 8, 1}, { 8, 2}, { 9, 1}, { 9, 2}, { 10, 1}, { 10, 2},
+{ 11, 1}, { 11, 2}, { 12, 1}, { 12, 2}, { 13, 1}, { 13, 2}, { 14, 1}, { 14, 2},
+{ 15, 1}, { 16, 1}
+},
+{
+{ 0, 1}, { 0, 2}, { 0, 3}, { 0, 4}, { 0, 5}, { 0, 6}, { 0, 7}, { 0, 8},
+{ 0, 9}, { 0, 10}, { 0, 11}, { 0, 12}, { 0, 13}, { 0, 14}, { 0, 15}, { 0, 16},
+{ 0, 17}, { 0, 18}, { 0, 19}, { 0, 20}, { 0, 21}, { 0, 22}, { 0, 23}, { 0, 24},
+{ 0, 25}, { 0, 26}, { 0, 27}, { 0, 28}, { 0, 29}, { 0, 30}, { 0, 31}, { 0, 32},
+{ 1, 1}, { 1, 2}, { 1, 3}, { 1, 4}, { 1, 5}, { 1, 6}, { 1, 7}, { 1, 8},
+{ 1, 9}, { 1, 10}, { 1, 11}, { 1, 12}, { 1, 13}, { 2, 1}, { 2, 2}, { 2, 3},
+{ 2, 4}, { 2, 5}, { 2, 6}, { 2, 7}, { 2, 8}, { 3, 1}, { 3, 2}, { 3, 3},
+{ 3, 4}, { 3, 5}, { 3, 6}, { 4, 1}, { 4, 2}, { 4, 3}, { 4, 4}, { 4, 5},
+{ 5, 1}, { 5, 2}, { 5, 3}, { 5, 4}, { 6, 1}, { 6, 2}, { 6, 3}, { 6, 4},
+{ 7, 1}, { 7, 2}, { 7, 3}, { 8, 1}, { 8, 2}, { 8, 3}, { 9, 1}, { 9, 2},
+{ 9, 3}, { 10, 1}, { 10, 2}, { 11, 1}, { 11, 2}, { 12, 1}, { 12, 2}, { 13, 1},
+{ 13, 2}, { 14, 1}, { 14, 2}, { 15, 1}, { 15, 2}, { 16, 1}, { 16, 2}, { 17, 1},
+{ 17, 2}, { 18, 1}, { 18, 2}, { 19, 1}, { 19, 2}, { 20, 1}, { 20, 2}, { 21, 1},
+{ 21, 2}, { 22, 1}, { 22, 2}, { 23, 1}, { 24, 1}, { 0, 1}, { 0, 2}, { 0, 3},
+{ 0, 4}, { 1, 1}, { 1, 2}, { 1, 3}, { 2, 1}, { 2, 2}, { 2, 3}, { 3, 1},
+{ 3, 2}, { 3, 3}, { 4, 1}, { 4, 2}, { 5, 1}, { 5, 2}, { 6, 1}, { 6, 2},
+{ 7, 1}, { 7, 2}, { 8, 1}, { 8, 2}, { 9, 1}, { 9, 2}, { 10, 1}, { 10, 2},
+{ 11, 1}, { 11, 2}, { 12, 1}, { 12, 2}, { 13, 1}, { 13, 2}, { 14, 1}, { 14, 2},
+{ 15, 1}, { 15, 2}, { 16, 1}, { 16, 2}, { 17, 1}, { 17, 2}, { 18, 1}, { 18, 2},
+{ 19, 1}, { 19, 2}, { 20, 1}, { 20, 2}, { 21, 1}, { 21, 2}, { 22, 1}, { 22, 2},
+{ 23, 1}, { 23, 2}, { 24, 1}, { 24, 2}, { 25, 1}, { 25, 2}, { 26, 1}, { 26, 2},
+{ 27, 1}, { 27, 2}, { 28, 1}, { 28, 2}, { 29, 1}, { 30, 1}
+}
+};
+
+static const uint8_t vc1_delta_level_table[AC_MODES][31] = {
+{
+ 19, 15, 12, 11, 6, 5, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1
+},
+{
+ 23, 11, 8, 7, 5, 5, 4, 4, 3, 3,
+ 3, 3, 2, 2, 2, 2, 2, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1
+},
+{
+ 16, 11, 8, 7, 5, 4, 4, 3, 3, 3,
+ 3, 3, 3, 3, 2, 2, 1, 1, 1, 1,
+ 1
+},
+{
+ 14, 9, 5, 4, 4, 4, 3, 3, 3, 3,
+ 3, 3, 3, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1
+},
+{
+ 27, 10, 5, 4, 3, 3, 3, 3, 2, 2,
+ 1, 1, 1, 1, 1
+},
+{
+ 12, 6, 4, 3, 3, 3, 3, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1
+},
+{
+ 56, 20, 10, 7, 6, 5, 4, 3, 3, 3,
+ 2, 2, 2, 2, 1
+},
+{
+ 32, 13, 8, 6, 5, 4, 4, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 1, 1
+}
+};
+
+static const uint8_t vc1_last_delta_level_table[AC_MODES][44] = {
+{
+ 6, 5, 4, 4, 3, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1
+},
+{
+ 9, 5, 4, 4, 3, 3, 3, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1
+},
+{
+ 4, 4, 3, 3, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1
+},
+{
+ 5, 4, 3, 3, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1
+},
+{
+ 8, 3, 2, 2, 2, 2, 2, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1
+},
+{
+ 3, 2, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1
+},
+{
+ 4, 3, 3, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1
+},
+{
+ 4, 3, 3, 3, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 1,
+ 1
+}
+};
+
+static const uint8_t vc1_delta_run_table[AC_MODES][57] = {
+{
+ -1, 30, 17, 15, 9, 5, 4, 3, 3, 3,
+ 3, 3, 2, 1, 1, 1, 0, 0, 0,
+ 0
+},
+{
+ -1, 26, 16, 11, 7, 5, 3, 3, 2, 1,
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0
+},
+{
+ -1, 20, 15, 13, 6, 4, 3, 3, 2, 1,
+ 1, 1, 0, 0, 0, 0, 0
+},
+{
+ -1, 29, 15, 12, 5, 2, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0
+},
+{
+ -1, 14, 9, 7, 3, 2, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+},
+{
+ -1, 26, 10, 6, 2, 1, 1, 0, 0, 0,
+ 0, 0, 0
+},
+{
+ -1, 14, 13, 9, 6, 5, 4, 3, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0
+},
+{
+ -1, 24, 22, 9, 6, 4, 3, 2, 2, 1,
+ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0
+}
+};
+
+static const uint8_t vc1_last_delta_run_table[AC_MODES][10] = {
+{
+ -1, 37, 15, 4, 3, 1, 0
+},
+{
+ -1, 36, 14, 6, 3, 1, 0, 0, 0,
+ 0
+},
+{
+ -1, 26, 13, 3, 1
+},
+{
+ -1, 43, 15, 3, 1, 0
+},
+{
+ -1, 20, 6, 1, 0, 0, 0, 0, 0
+},
+{
+ -1, 40, 1, 0
+},
+{
+ -1, 16, 14, 2, 0
+},
+{
+ -1, 30, 28, 3, 0
+}
+};
+
+#endif /* AVCODEC_VC1ACDATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1data.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1data.c
new file mode 100644
index 000000000..dd2978517
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1data.c
@@ -0,0 +1,645 @@
+/*
+ * VC-1 and WMV3 decoder
+ * copyright (c) 2006 Konstantin Shishkov
+ * (c) 2005 anonymous, Alex Beregszaszi, Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file vc1data.c
+ * VC-1 tables.
+ */
+
+#include "avcodec.h"
+#include "vc1.h"
+#include "vc1data.h"
+
+/** Table for conversion between TTBLK and TTMB */
+const int ff_vc1_ttblk_to_tt[3][8] = {
+ { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
+ { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
+ { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
+};
+
+const int ff_vc1_ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
+
+/** MV P mode - the 5th element is only used for mode 1 */
+const uint8_t ff_vc1_mv_pmode_table[2][5] = {
+ { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
+ { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
+};
+const uint8_t ff_vc1_mv_pmode_table2[2][4] = {
+ { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
+ { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
+};
+
+const int ff_vc1_fps_nr[5] = { 24, 25, 30, 50, 60 },
+ ff_vc1_fps_dr[2] = { 1000, 1001 };
+const uint8_t ff_vc1_pquant_table[3][32] = {
+ { /* Implicit quantizer */
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
+ },
+ { /* Explicit quantizer, pquantizer uniform */
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ },
+ { /* Explicit quantizer, pquantizer non-uniform */
+ 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
+ }
+};
+
+/** @name VC-1 VLC tables and defines
+ * @todo TODO move this into the context
+ */
+//@{
+#define VC1_BFRACTION_VLC_BITS 7
+VLC ff_vc1_bfraction_vlc;
+#define VC1_IMODE_VLC_BITS 4
+VLC ff_vc1_imode_vlc;
+#define VC1_NORM2_VLC_BITS 3
+VLC ff_vc1_norm2_vlc;
+#define VC1_NORM6_VLC_BITS 9
+VLC ff_vc1_norm6_vlc;
+/* Could be optimized, one table only needs 8 bits */
+#define VC1_TTMB_VLC_BITS 9 //12
+VLC ff_vc1_ttmb_vlc[3];
+#define VC1_MV_DIFF_VLC_BITS 9 //15
+VLC ff_vc1_mv_diff_vlc[4];
+#define VC1_CBPCY_P_VLC_BITS 9 //14
+VLC ff_vc1_cbpcy_p_vlc[4];
+#define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
+VLC ff_vc1_4mv_block_pattern_vlc[4];
+#define VC1_TTBLK_VLC_BITS 5
+VLC ff_vc1_ttblk_vlc[3];
+#define VC1_SUBBLKPAT_VLC_BITS 6
+VLC ff_vc1_subblkpat_vlc[3];
+
+VLC ff_vc1_ac_coeff_table[8];
+//@}
+
+
+#if B_FRACTION_DEN==840 //original bfraction from vc9data.h, not conforming to standard
+/* bfraction is fractional, we scale to the GCD 3*5*7*8 = 840 */
+const int16_t ff_vc1_bfraction_lut[23] = {
+ 420 /*1/2*/, 280 /*1/3*/, 560 /*2/3*/, 210 /*1/4*/,
+ 630 /*3/4*/, 168 /*1/5*/, 336 /*2/5*/,
+ 504 /*3/5*/, 672 /*4/5*/, 140 /*1/6*/, 700 /*5/6*/,
+ 120 /*1/7*/, 240 /*2/7*/, 360 /*3/7*/, 480 /*4/7*/,
+ 600 /*5/7*/, 720 /*6/7*/, 105 /*1/8*/, 315 /*3/8*/,
+ 525 /*5/8*/, 735 /*7/8*/,
+ -1 /*inv.*/, 0 /*BI fm*/
+};
+#else
+/* pre-computed scales for all bfractions and base=256 */
+const int16_t ff_vc1_bfraction_lut[23] = {
+ 128 /*1/2*/, 85 /*1/3*/, 170 /*2/3*/, 64 /*1/4*/,
+ 192 /*3/4*/, 51 /*1/5*/, 102 /*2/5*/,
+ 153 /*3/5*/, 204 /*4/5*/, 43 /*1/6*/, 215 /*5/6*/,
+ 37 /*1/7*/, 74 /*2/7*/, 111 /*3/7*/, 148 /*4/7*/,
+ 185 /*5/7*/, 222 /*6/7*/, 32 /*1/8*/, 96 /*3/8*/,
+ 160 /*5/8*/, 224 /*7/8*/,
+ -1 /*inv.*/, 0 /*BI fm*/
+};
+#endif
+
+const uint8_t ff_vc1_bfraction_bits[23] = {
+ 3, 3, 3, 3,
+ 3, 3, 3,
+ 7, 7, 7, 7,
+ 7, 7, 7, 7,
+ 7, 7, 7, 7,
+ 7, 7,
+ 7, 7
+};
+const uint8_t ff_vc1_bfraction_codes[23] = {
+ 0, 1, 2, 3,
+ 4, 5, 6,
+ 112, 113, 114, 115,
+ 116, 117, 118, 119,
+ 120, 121, 122, 123,
+ 124, 125,
+ 126, 127
+};
+
+//Same as H.264
+const AVRational ff_vc1_pixel_aspect[16]={
+ {0, 1},
+ {1, 1},
+ {12, 11},
+ {10, 11},
+ {16, 11},
+ {40, 33},
+ {24, 11},
+ {20, 11},
+ {32, 11},
+ {80, 33},
+ {18, 11},
+ {15, 11},
+ {64, 33},
+ {160, 99},
+ {0, 1},
+ {0, 1}
+};
+
+/* BitPlane IMODE - such a small table... */
+const uint8_t ff_vc1_imode_codes[7] = {
+ 0, 2, 1, 3, 1, 2, 3
+};
+const uint8_t ff_vc1_imode_bits[7] = {
+ 4, 2, 3, 2, 4, 3, 3
+};
+
+/* Normal-2 imode */
+const uint8_t ff_vc1_norm2_codes[4] = {
+ 0, 4, 5, 3
+};
+const uint8_t ff_vc1_norm2_bits[4] = {
+ 1, 3, 3, 2
+};
+
+const uint16_t ff_vc1_norm6_codes[64] = {
+0x001, 0x002, 0x003, 0x000, 0x004, 0x001, 0x002, 0x047, 0x005, 0x003, 0x004, 0x04B, 0x005, 0x04D, 0x04E, 0x30E,
+0x006, 0x006, 0x007, 0x053, 0x008, 0x055, 0x056, 0x30D, 0x009, 0x059, 0x05A, 0x30C, 0x05C, 0x30B, 0x30A, 0x037,
+0x007, 0x00A, 0x00B, 0x043, 0x00C, 0x045, 0x046, 0x309, 0x00D, 0x049, 0x04A, 0x308, 0x04C, 0x307, 0x306, 0x036,
+0x00E, 0x051, 0x052, 0x305, 0x054, 0x304, 0x303, 0x035, 0x058, 0x302, 0x301, 0x034, 0x300, 0x033, 0x032, 0x007,
+};
+
+const uint8_t ff_vc1_norm6_bits[64] = {
+ 1, 4, 4, 8, 4, 8, 8, 10, 4, 8, 8, 10, 8, 10, 10, 13,
+ 4, 8, 8, 10, 8, 10, 10, 13, 8, 10, 10, 13, 10, 13, 13, 9,
+ 4, 8, 8, 10, 8, 10, 10, 13, 8, 10, 10, 13, 10, 13, 13, 9,
+ 8, 10, 10, 13, 10, 13, 13, 9, 10, 13, 13, 9, 13, 9, 9, 6,
+};
+#if 0
+/* Normal-6 imode */
+const uint8_t ff_vc1_norm6_spec[64][5] = {
+{ 0, 1, 1 },
+{ 1, 2, 4 },
+{ 2, 3, 4 },
+{ 3, 0, 8 },
+{ 4, 4, 4 },
+{ 5, 1, 8 },
+{ 6, 2, 8 },
+{ 7, 2, 5, 7, 5 },
+{ 8, 5, 4 },
+{ 9, 3, 8 },
+{10, 4, 8 },
+{11, 2, 5, 11, 5 },
+{12, 5, 8 },
+{13, 2, 5, 13, 5 },
+{14, 2, 5, 14, 5 },
+{15, 3, 5, 14, 8 },
+{16, 6, 4 },
+{17, 6, 8 },
+{18, 7, 8 },
+{19, 2, 5, 19, 5 },
+{20, 8, 8 },
+{21, 2, 5, 21, 5 },
+{22, 2, 5, 22, 5 },
+{23, 3, 5, 13, 8 },
+{24, 9, 8 },
+{25, 2, 5, 25, 5 },
+{26, 2, 5, 26, 5 },
+{27, 3, 5, 12, 8 },
+{28, 2, 5, 28, 5 },
+{29, 3, 5, 11, 8 },
+{30, 3, 5, 10, 8 },
+{31, 3, 5, 7, 4 },
+{32, 7, 4 },
+{33, 10, 8 },
+{34, 11, 8 },
+{35, 2, 5, 3, 5 },
+{36, 12, 8 },
+{37, 2, 5, 5, 5 },
+{38, 2, 5, 6, 5 },
+{39, 3, 5, 9, 8 },
+{40, 13, 8 },
+{41, 2, 5, 9, 5 },
+{42, 2, 5, 10, 5 },
+{43, 3, 5, 8, 8 },
+{44, 2, 5, 12, 5 },
+{45, 3, 5, 7, 8 },
+{46, 3, 5, 6, 8 },
+{47, 3, 5, 6, 4 },
+{48, 14, 8 },
+{49, 2, 5, 17, 5 },
+{50, 2, 5, 18, 5 },
+{51, 3, 5, 5, 8 },
+{52, 2, 5, 20, 5 },
+{53, 3, 5, 4, 8 },
+{54, 3, 5, 3, 8 },
+{55, 3, 5, 5, 4 },
+{56, 2, 5, 24, 5 },
+{57, 3, 5, 2, 8 },
+{58, 3, 5, 1, 8 },
+{59, 3, 5, 4, 4 },
+{60, 3, 5, 0, 8 },
+{61, 3, 5, 3, 4 },
+{62, 3, 5, 2, 4 },
+{63, 3, 5, 1, 1 },
+};
+#endif
+
+/* 4MV Block pattern VLC tables */
+const uint8_t ff_vc1_4mv_block_pattern_codes[4][16] = {
+ { 14, 58, 59, 25, 12, 26, 15, 15, 13, 24, 27, 0, 28, 1, 2, 2},
+ { 8, 18, 19, 4, 20, 5, 30, 11, 21, 31, 6, 12, 7, 13, 14, 0},
+ { 15, 6, 7, 2, 8, 3, 28, 9, 10, 29, 4, 11, 5, 12, 13, 0},
+ { 0, 11, 12, 4, 13, 5, 30, 16, 14, 31, 6, 17, 7, 18, 19, 10}
+};
+const uint8_t ff_vc1_4mv_block_pattern_bits[4][16] = {
+ { 5, 6, 6, 5, 5, 5, 5, 4, 5, 5, 5, 3, 5, 3, 3, 2},
+ { 4, 5, 5, 4, 5, 4, 5, 4, 5, 5, 4, 4, 4, 4, 4, 2},
+ { 4, 4, 4, 4, 4, 4, 5, 4, 4, 5, 4, 4, 4, 4, 4, 3},
+ { 2, 4, 4, 4, 4, 4, 5, 5, 4, 5, 4, 5, 4, 5, 5, 4}
+};
+
+const uint8_t wmv3_dc_scale_table[32]={
+ 0, 2, 4, 8, 8, 8, 9, 9,10,10,11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18,19,19,20,20,21,21
+};
+
+/* P-Picture CBPCY VLC tables */
+#if 1 // Looks like original tables are not conforming to standard at all. Are they used for old WMV?
+const uint16_t ff_vc1_cbpcy_p_codes[4][64] = {
+ {
+ 0, 6, 15, 13, 13, 11, 3, 13, 5, 8, 49, 10, 12, 114, 102, 119,
+ 1, 54, 96, 8, 10, 111, 5, 15, 12, 10, 2, 12, 13, 115, 53, 63,
+ 1, 7, 1, 7, 14, 12, 4, 14, 1, 9, 97, 11, 7, 58, 52, 62,
+ 4, 103, 1, 9, 11, 56, 101, 118, 4, 110, 100, 30, 2, 5, 4, 3
+ },
+ {
+ 0, 9, 1, 18, 5, 14, 237, 26, 3, 121, 3, 22, 13, 16, 6, 30,
+ 2, 10, 1, 20, 12, 241, 5, 28, 16, 12, 3, 24, 28, 124, 239, 247,
+ 1, 240, 1, 19, 18, 15, 4, 27, 1, 122, 2, 23, 1, 17, 7, 31,
+ 1, 11, 2, 21, 19, 246, 238, 29, 17, 13, 236, 25, 58, 63, 8, 125
+ },
+ {
+ 0, 201, 25, 231, 5, 221, 1, 3, 2, 414, 2, 241, 16, 225, 195, 492,
+ 2, 412, 1, 240, 7, 224, 98, 245, 1, 220, 96, 5, 9, 230, 101, 247,
+ 1, 102, 1, 415, 24, 3, 2, 244, 3, 54, 3, 484, 17, 114, 200, 493,
+ 3, 413, 1, 4, 13, 113, 99, 485, 4, 111, 194, 243, 5, 29, 26, 31
+ },
+ {
+ 0, 28, 12, 44, 3, 36, 20, 52, 2, 32, 16, 48, 8, 40, 24, 28,
+ 1, 30, 14, 46, 6, 38, 22, 54, 3, 34, 18, 50, 10, 42, 26, 30,
+ 1, 29, 13, 45, 5, 37, 21, 53, 2, 33, 17, 49, 9, 41, 25, 29,
+ 1, 31, 15, 47, 7, 39, 23, 55, 4, 35, 19, 51, 11, 43, 27, 31
+ }
+};
+
+const uint8_t ff_vc1_cbpcy_p_bits[4][64] = {
+ {
+ 13, 13, 7, 13, 7, 13, 13, 12, 6, 13, 7, 12, 6, 8, 8, 8,
+ 5, 7, 8, 12, 6, 8, 13, 12, 7, 13, 13, 12, 6, 8, 7, 7,
+ 6, 13, 8, 12, 7, 13, 13, 12, 7, 13, 8, 12, 5, 7, 7, 7,
+ 6, 8, 13, 12, 6, 7, 8, 8, 5, 8, 8, 6, 3, 3, 3, 2
+ },
+ {
+ 14, 13, 8, 13, 3, 13, 8, 13, 3, 7, 8, 13, 4, 13, 13, 13,
+ 3, 13, 13, 13, 4, 8, 13, 13, 5, 13, 13, 13, 5, 7, 8, 8,
+ 3, 8, 14, 13, 5, 13, 13, 13, 4, 7, 13, 13, 6, 13, 13, 13,
+ 5, 13, 8, 13, 5, 8, 8, 13, 5, 13, 8, 13, 6, 6, 13, 7
+ },
+ {
+ 13, 8, 6, 8, 4, 8, 13, 12, 4, 9, 8, 8, 5, 8, 8, 9,
+ 5, 9, 10, 8, 4, 8, 7, 8, 6, 8, 7, 13, 4, 8, 7, 8,
+ 5, 7, 8, 9, 6, 13, 13, 8, 4, 6, 8, 9, 5, 7, 8, 9,
+ 5, 9, 9, 13, 5, 7, 7, 9, 4, 7, 8, 8, 3, 5, 5, 5
+ },
+ {
+ 9, 9, 9, 9, 2, 9, 9, 9, 2, 9, 9, 9, 9, 9, 9, 8,
+ 3, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8,
+ 2, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8
+ }
+};
+#else
+const uint16_t ff_vc1_cbpcy_p_codes[4][64] = {
+ {
+ 0, 1, 1, 4, 5, 1, 12, 4, 13, 14, 10, 11, 12, 7, 13, 2,
+ 15, 1, 96, 1, 49, 97, 2, 100, 3, 4, 5, 101, 102, 52, 53, 4,
+ 6, 7, 54, 103, 8, 9, 10, 110, 11, 12, 111, 56, 114, 58, 115, 5,
+ 13, 7, 8, 9, 10, 11, 12, 30, 13, 14, 15, 118, 119, 62, 63, 3
+ },
+ {
+ 0, 1, 2, 1, 3, 1, 16, 17, 5, 18, 12, 19, 13, 1, 28, 58,
+ 1, 1, 1, 2, 3, 2, 3, 236, 237, 4, 5, 238, 6, 7, 239, 8,
+ 9, 240, 10, 11, 121, 122, 12, 13, 14, 15, 241, 246, 16, 17, 124, 63,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 247, 125
+ },
+ {
+ 0, 1, 2, 3, 2, 3, 1, 4, 5, 24, 7, 13, 16, 17, 9, 5,
+ 25, 1, 1, 1, 2, 3, 96, 194, 1, 2, 98, 99, 195, 200, 101, 26,
+ 201, 102, 412, 413, 414, 54, 220, 111, 221, 3, 224, 113, 225, 114, 230, 29,
+ 231, 415, 240, 4, 241, 484, 5, 243, 3, 244, 245, 485, 492, 493, 247, 31
+ },
+ {
+ 0, 1, 1, 1, 2, 2, 3, 4, 3, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 28, 29, 30, 31
+ }
+};
+const uint8_t ff_vc1_cbpcy_p_bits[4][64] = {
+ {
+ 13, 6, 5, 6, 6, 7, 7, 5, 7, 7, 6, 6, 6, 5, 6, 3,
+ 7, 8, 8, 13, 7, 8, 13, 8, 13, 13, 13, 8, 8, 7, 7, 3,
+ 13, 13, 7, 8, 13, 13, 13, 8, 13, 13, 8, 7, 8, 7, 8, 3,
+ 13, 12, 12, 12, 12, 12, 12, 6, 12, 12, 12, 8, 8, 7, 7, 2
+ },
+ {
+ 14, 3, 3, 5, 3, 4, 5, 5, 3, 5, 4, 5, 4, 6, 5, 6,
+ 8, 14, 13, 8, 8, 13, 13, 8, 8, 13, 13, 8, 13, 13, 8, 13,
+ 13, 8, 13, 13, 7, 7, 13, 13, 13, 13, 8, 8, 13, 13, 7, 6,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 8, 7
+ },
+ {
+ 13, 5, 5, 5, 4, 4, 6, 4, 4, 6, 4, 5, 5, 5, 4, 3,
+ 6, 8, 10, 9, 8, 8, 7, 8, 13, 13, 7, 7, 8, 8, 7, 5,
+ 8, 7, 9, 9, 9, 6, 8, 7, 8, 13, 8, 7, 8, 7, 8, 5,
+ 8, 9, 8, 13, 8, 9, 13, 8, 12, 8, 8, 9, 9, 9, 8, 5
+ },
+ {
+ 9, 2, 3, 9, 2, 9, 9, 9, 2, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8
+ }
+};
+#endif
+
+/* MacroBlock Transform Type: 7.1.3.11, p89
+ * 8x8:B
+ * 8x4:B:btm 8x4:B:top 8x4:B:both,
+ * 4x8:B:right 4x8:B:left 4x8:B:both
+ * 4x4:B 8x8:MB
+ * 8x4:MB:btm 8x4:MB:top 8x4,MB,both
+ * 4x8,MB,right 4x8,MB,left
+ * 4x4,MB */
+const uint16_t ff_vc1_ttmb_codes[3][16] = {
+ {
+ 0x0003,
+ 0x002E, 0x005F, 0x0000,
+ 0x0016, 0x0015, 0x0001,
+ 0x0004, 0x0014,
+ 0x02F1, 0x0179, 0x017B,
+ 0x0BC0, 0x0BC1, 0x05E1,
+ 0x017A
+ },
+ {
+ 0x0006,
+ 0x0006, 0x0003, 0x0007,
+ 0x000F, 0x000E, 0x0000,
+ 0x0002, 0x0002,
+ 0x0014, 0x0011, 0x000B,
+ 0x0009, 0x0021, 0x0015,
+ 0x0020
+ },
+ {
+ 0x0006,
+ 0x0000, 0x000E, 0x0005,
+ 0x0002, 0x0003, 0x0003,
+ 0x000F, 0x0002,
+ 0x0081, 0x0021, 0x0009,
+ 0x0101, 0x0041, 0x0011,
+ 0x0100
+ }
+};
+
+const uint8_t ff_vc1_ttmb_bits[3][16] = {
+ {
+ 2,
+ 6, 7, 2,
+ 5, 5, 2,
+ 3, 5,
+ 10, 9, 9,
+ 12, 12, 11,
+ 9
+ },
+ {
+ 3,
+ 4, 4, 4,
+ 4, 4, 3,
+ 3, 2,
+ 7, 7, 6,
+ 6, 8, 7,
+ 8
+ },
+ {
+ 3,
+ 3, 4, 5,
+ 3, 3, 4,
+ 4, 2,
+ 10, 8, 6,
+ 11, 9, 7,
+ 11
+ }
+};
+
+/* TTBLK (Transform Type per Block) tables */
+const uint8_t ff_vc1_ttblk_codes[3][8] = {
+ { 0, 1, 3, 5, 16, 17, 18, 19},
+ { 3, 0, 1, 2, 3, 5, 8, 9},
+ { 1, 0, 1, 4, 6, 7, 10, 11}
+};
+const uint8_t ff_vc1_ttblk_bits[3][8] = {
+ { 2, 2, 2, 3, 5, 5, 5, 5},
+ { 2, 3, 3, 3, 3, 3, 4, 4},
+ { 2, 3, 3, 3, 3, 3, 4, 4}
+};
+
+/* SUBBLKPAT tables, p93-94, reordered */
+const uint8_t ff_vc1_subblkpat_codes[3][15] = {
+ { 14, 12, 7, 11, 9, 26, 2, 10, 27, 8, 0, 6, 1, 15, 1},
+ { 14, 0, 8, 15, 10, 4, 23, 13, 5, 9, 25, 3, 24, 22, 1},
+ { 5, 6, 2, 2, 8, 0, 28, 3, 1, 3, 29, 1, 19, 18, 15}
+};
+const uint8_t ff_vc1_subblkpat_bits[3][15] = {
+ { 5, 5, 5, 5, 5, 6, 4, 5, 6, 5, 4, 5, 4, 5, 1},
+ { 4, 3, 4, 4, 4, 5, 5, 4, 5, 4, 5, 4, 5, 5, 2},
+ { 3, 3, 4, 3, 4, 5, 5, 3, 5, 4, 5, 4, 5, 5, 4}
+};
+
+/* MV differential tables, p265 */
+const uint16_t ff_vc1_mv_diff_codes[4][73] = {
+ {
+ 0, 2, 3, 8, 576, 3, 2, 6,
+ 5, 577, 578, 7, 8, 9, 40, 19,
+ 37, 82, 21, 22, 23, 579, 580, 166,
+ 96, 167, 49, 194, 195, 581, 582, 583,
+ 292, 293, 294, 13, 2, 7, 24, 50,
+ 102, 295, 13, 7, 8, 18, 50, 103,
+ 38, 20, 21, 22, 39, 204, 103, 23,
+ 24, 25, 104, 410, 105, 106, 107, 108,
+ 109, 220, 411, 442, 222, 443, 446, 447,
+ 7 /* 73 elements */
+ },
+ {
+ 0, 4, 5, 3, 4, 3, 4, 5,
+ 20, 6, 21, 44, 45, 46, 3008, 95,
+ 112, 113, 57, 3009, 3010, 116, 117, 3011,
+ 118, 3012, 3013, 3014, 3015, 3016, 3017, 3018,
+ 3019, 3020, 3021, 3022, 1, 4, 15, 160,
+ 161, 41, 6, 11, 42, 162, 43, 119,
+ 56, 57, 58, 163, 236, 237, 3023, 119,
+ 120, 242, 122, 486, 1512, 487, 246, 494,
+ 1513, 495, 1514, 1515, 1516, 1517, 1518, 1519,
+ 31 /* 73 elements */
+ },
+ {
+ 0, 512, 513, 514, 515, 2, 3, 258,
+ 259, 260, 261, 262, 263, 264, 265, 266,
+ 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 1, 5, 287, 288,
+ 289, 290, 6, 7, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310,
+ 311, 312, 313, 314, 315, 316, 317, 318,
+ 319 /* 73 elements */
+ },
+ {
+ 0, 1, 1, 2, 3, 4, 1, 5,
+ 4, 3, 5, 8, 6, 9, 10, 11,
+ 12, 7, 104, 14, 105, 4, 10, 15,
+ 11, 6, 14, 8, 106, 107, 108, 15,
+ 109, 9, 55, 10, 1, 2, 1, 2,
+ 3, 12, 6, 2, 6, 7, 28, 7,
+ 15, 8, 5, 18, 29, 152, 77, 24,
+ 25, 26, 39, 108, 13, 109, 55, 56,
+ 57, 116, 11, 153, 234, 235, 118, 119,
+ 15 /* 73 elements */
+ }
+};
+const uint8_t ff_vc1_mv_diff_bits[4][73] = {
+ {
+ 6, 7, 7, 8, 14, 6, 5, 6, 7, 14, 14, 6, 6, 6, 8, 9,
+ 10, 9, 7, 7, 7, 14, 14, 10, 9, 10, 8, 10, 10, 14, 14, 14,
+ 13, 13, 13, 6, 3, 5, 6, 8, 9, 13, 5, 4, 4, 5, 7, 9,
+ 6, 5, 5, 5, 6, 9, 8, 5, 5, 5, 7, 10, 7, 7, 7, 7,
+ 7, 8, 10, 9, 8, 9, 9, 9, 3 /* 73 elements */
+ },
+ {
+ 5, 7, 7, 6, 6, 5, 5, 6, 7, 5, 7, 8, 8, 8, 14, 9,
+ 9, 9, 8, 14, 14, 9, 9, 14, 9, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 2, 3, 6, 8, 8, 6, 3, 4, 6, 8, 6, 9,
+ 6, 6, 6, 8, 8, 8, 14, 7, 7, 8, 7, 9, 13, 9, 8, 9,
+ 13, 9, 13, 13, 13, 13, 13, 13, 5 /* 73 elements */
+
+ },
+ {
+ 3, 12, 12, 12, 12, 3, 4, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 1, 5, 11, 11, 11, 11, 4, 4, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11 /* 73 elements */
+ },
+ {
+ 15, 11, 15, 15, 15, 15, 12, 15, 12, 11, 12, 12, 15, 12, 12, 12,
+ 12, 15, 15, 12, 15, 10, 11, 12, 11, 10, 11, 10, 15, 15, 15, 11,
+ 15, 10, 14, 10, 4, 4, 5, 7, 8, 9, 5, 3, 4, 5, 6, 8,
+ 5, 4, 3, 5, 6, 8, 7, 5, 5, 5, 6, 7, 9, 7, 6, 6,
+ 6, 7, 10, 8, 8, 8, 7, 7, 4 /* 73 elements */
+ }
+};
+
+/* DC differentials low+hi-mo, p217 are the same as in msmpeg4data .h */
+
+/* Table 232 */
+const int8_t ff_vc1_simple_progressive_4x4_zz [16] =
+{
+ 0, 8, 16, 1,
+ 9, 24, 17, 2,
+ 10, 18, 25, 3,
+ 11, 26, 19, 27
+};
+
+const int8_t ff_vc1_adv_progressive_8x4_zz [32] = /* Table 233 */
+{
+ 0, 8, 1, 16, 2, 9, 10, 3,
+ 24, 17, 4, 11, 18, 12, 5, 19,
+ 25, 13, 20, 26, 27, 6, 21, 28,
+ 14, 22, 29, 7, 30, 15, 23, 31
+};
+
+const int8_t ff_vc1_adv_progressive_4x8_zz [32] = /* Table 234 */
+{
+ 0, 1, 8, 2,
+ 9, 16, 17, 24,
+ 10, 32, 25, 18,
+ 40, 3, 33, 26,
+ 48, 11, 56, 41,
+ 34, 49, 57, 42,
+ 19, 50, 27, 58,
+ 35, 43, 51, 59
+};
+
+const int8_t ff_vc1_adv_interlaced_8x8_zz [64] = /* Table 235 */
+{
+ 0, 8, 1, 16, 24, 9, 2, 32,
+ 40, 48, 56, 17, 10, 3, 25, 18,
+ 11, 4, 33, 41, 49, 57, 26, 34,
+ 42, 50, 58, 19, 12, 5, 27, 20,
+ 13, 6, 35, 28, 21, 14, 7, 15,
+ 22, 29, 36, 43, 51, 59, 60, 52,
+ 44, 37, 30, 23, 31, 38, 45, 53,
+ 61, 62, 54, 46, 39, 47, 55, 63
+};
+
+const int8_t ff_vc1_adv_interlaced_8x4_zz [32] = /* Table 236 */
+{
+ 0, 8, 16, 24, 1, 9, 2, 17,
+ 25, 10, 3, 18, 26, 4, 11, 19,
+ 12, 5, 13, 20, 27, 6, 21, 28,
+ 14, 22, 29, 7, 30, 15, 23, 31
+};
+
+const int8_t ff_vc1_adv_interlaced_4x8_zz [32] = /* Table 237 */
+{
+ 0, 1, 2, 8,
+ 16, 9, 24, 17,
+ 10, 3, 32, 40,
+ 48, 56, 25, 18,
+ 33, 26, 41, 34,
+ 49, 57, 11, 42,
+ 19, 50, 27, 58,
+ 35, 43, 51, 59
+};
+
+const int8_t ff_vc1_adv_interlaced_4x4_zz [16] = /* Table 238 */
+{
+ 0, 8, 16, 24,
+ 1, 9, 17, 2,
+ 25, 10, 18, 3,
+ 26, 11, 19, 27
+};
+
+
+/* DQScale as specified in 8.1.3.9 - almost identical to 0x40000/i */
+const int32_t ff_vc1_dqscale[63] = {
+0x40000, 0x20000, 0x15555, 0x10000, 0xCCCD, 0xAAAB, 0x9249, 0x8000,
+ 0x71C7, 0x6666, 0x5D17, 0x5555, 0x4EC5, 0x4925, 0x4444, 0x4000,
+ 0x3C3C, 0x38E4, 0x35E5, 0x3333, 0x30C3, 0x2E8C, 0x2C86, 0x2AAB,
+ 0x28F6, 0x2762, 0x25ED, 0x2492, 0x234F, 0x2222, 0x2108, 0x2000,
+ 0x1F08, 0x1E1E, 0x1D42, 0x1C72, 0x1BAD, 0x1AF3, 0x1A42, 0x199A,
+ 0x18FA, 0x1862, 0x17D0, 0x1746, 0x16C1, 0x1643, 0x15CA, 0x1555,
+ 0x14E6, 0x147B, 0x1414, 0x13B1, 0x1352, 0x12F7, 0x129E, 0x1249,
+ 0x11F7, 0x11A8, 0x115B, 0x1111, 0x10C9, 0x1084, 0x1000
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1data.h
new file mode 100644
index 000000000..d40befdaa
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1data.h
@@ -0,0 +1,157 @@
+/*
+ * VC-1 and WMV3 decoder
+ * copyright (c) 2006 Konstantin Shishkov
+ * (c) 2005 anonymous, Alex Beregszaszi, Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/vc1data.h
+ * VC-1 tables.
+ */
+
+#ifndef AVCODEC_VC1DATA_H
+#define AVCODEC_VC1DATA_H
+
+#include <stdint.h>
+#include "libavutil/rational.h"
+#include "get_bits.h"
+
+/** Table for conversion between TTBLK and TTMB */
+extern const int ff_vc1_ttblk_to_tt[3][8];
+
+extern const int ff_vc1_ttfrm_to_tt[4];
+
+/** MV P mode - the 5th element is only used for mode 1 */
+extern const uint8_t ff_vc1_mv_pmode_table[2][5];
+extern const uint8_t ff_vc1_mv_pmode_table2[2][4];
+
+extern const int ff_vc1_fps_nr[5], ff_vc1_fps_dr[2];
+extern const uint8_t ff_vc1_pquant_table[3][32];
+
+/** @name VC-1 VLC tables and defines
+ * @todo TODO move this into the context
+ */
+//@{
+#define VC1_BFRACTION_VLC_BITS 7
+extern VLC ff_vc1_bfraction_vlc;
+#define VC1_IMODE_VLC_BITS 4
+extern VLC ff_vc1_imode_vlc;
+#define VC1_NORM2_VLC_BITS 3
+extern VLC ff_vc1_norm2_vlc;
+#define VC1_NORM6_VLC_BITS 9
+extern VLC ff_vc1_norm6_vlc;
+/* Could be optimized, one table only needs 8 bits */
+#define VC1_TTMB_VLC_BITS 9 //12
+extern VLC ff_vc1_ttmb_vlc[3];
+#define VC1_MV_DIFF_VLC_BITS 9 //15
+extern VLC ff_vc1_mv_diff_vlc[4];
+#define VC1_CBPCY_P_VLC_BITS 9 //14
+extern VLC ff_vc1_cbpcy_p_vlc[4];
+#define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
+extern VLC ff_vc1_4mv_block_pattern_vlc[4];
+#define VC1_TTBLK_VLC_BITS 5
+extern VLC ff_vc1_ttblk_vlc[3];
+#define VC1_SUBBLKPAT_VLC_BITS 6
+extern VLC ff_vc1_subblkpat_vlc[3];
+
+extern VLC ff_vc1_ac_coeff_table[8];
+//@}
+
+
+#if 0 //original bfraction from vc9data.h, not conforming to standard
+/* Denominator used for ff_vc1_bfraction_lut */
+#define B_FRACTION_DEN 840
+
+/* bfraction is fractional, we scale to the GCD 3*5*7*8 = 840 */
+extern const int16_t ff_vc1_bfraction_lut[23];
+#else
+/* Denominator used for ff_vc1_bfraction_lut */
+#define B_FRACTION_DEN 256
+
+/* pre-computed scales for all bfractions and base=256 */
+extern const int16_t ff_vc1_bfraction_lut[23];
+#endif
+
+extern const uint8_t ff_vc1_bfraction_bits[23];
+extern const uint8_t ff_vc1_bfraction_codes[23];
+
+//Same as H.264
+extern const AVRational ff_vc1_pixel_aspect[16];
+
+/* BitPlane IMODE - such a small table... */
+extern const uint8_t ff_vc1_imode_codes[7];
+extern const uint8_t ff_vc1_imode_bits[7];
+
+/* Normal-2 imode */
+extern const uint8_t ff_vc1_norm2_codes[4];
+extern const uint8_t ff_vc1_norm2_bits[4];
+extern const uint16_t ff_vc1_norm6_codes[64];
+extern const uint8_t ff_vc1_norm6_bits[64];
+/* Normal-6 imode */
+extern const uint8_t ff_vc1_norm6_spec[64][5];
+
+/* 4MV Block pattern VLC tables */
+extern const uint8_t ff_vc1_4mv_block_pattern_codes[4][16];
+extern const uint8_t ff_vc1_4mv_block_pattern_bits[4][16];
+
+extern const uint8_t wmv3_dc_scale_table[32];
+
+/* P-Picture CBPCY VLC tables */
+extern const uint16_t ff_vc1_cbpcy_p_codes[4][64];
+extern const uint8_t ff_vc1_cbpcy_p_bits[4][64];
+
+/* MacroBlock Transform Type: 7.1.3.11, p89
+ * 8x8:B
+ * 8x4:B:btm 8x4:B:top 8x4:B:both,
+ * 4x8:B:right 4x8:B:left 4x8:B:both
+ * 4x4:B 8x8:MB
+ * 8x4:MB:btm 8x4:MB:top 8x4,MB,both
+ * 4x8,MB,right 4x8,MB,left
+ * 4x4,MB */
+extern const uint16_t ff_vc1_ttmb_codes[3][16];
+
+extern const uint8_t ff_vc1_ttmb_bits[3][16];
+
+/* TTBLK (Transform Type per Block) tables */
+extern const uint8_t ff_vc1_ttblk_codes[3][8];
+extern const uint8_t ff_vc1_ttblk_bits[3][8];
+
+/* SUBBLKPAT tables, p93-94, reordered */
+extern const uint8_t ff_vc1_subblkpat_codes[3][15];
+extern const uint8_t ff_vc1_subblkpat_bits[3][15];
+
+/* MV differential tables, p265 */
+extern const uint16_t ff_vc1_mv_diff_codes[4][73];
+extern const uint8_t ff_vc1_mv_diff_bits[4][73];
+
+/* DC differentials low+hi-mo, p217 are the same as in msmpeg4data .h */
+
+/* Scantables/ZZ scan are at 11.9 (p262) and 8.1.1.12 (p10) */
+extern const int8_t ff_vc1_simple_progressive_4x4_zz [16];
+extern const int8_t ff_vc1_adv_progressive_8x4_zz [32];
+extern const int8_t ff_vc1_adv_progressive_4x8_zz [32];
+extern const int8_t ff_vc1_adv_interlaced_8x8_zz [64];
+extern const int8_t ff_vc1_adv_interlaced_8x4_zz [32];
+extern const int8_t ff_vc1_adv_interlaced_4x8_zz [32];
+extern const int8_t ff_vc1_adv_interlaced_4x4_zz [16];
+
+/* DQScale as specified in 8.1.3.9 - almost identical to 0x40000/i */
+extern const int32_t ff_vc1_dqscale[63];
+
+#endif /* AVCODEC_VC1DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1dec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1dec.c
new file mode 100644
index 000000000..c38c494c1
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1dec.c
@@ -0,0 +1,3331 @@
+/*
+ * VC-1 and WMV3 decoder
+ * Copyright (c) 2006-2007 Konstantin Shishkov
+ * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/vc1dec.c
+ * VC-1 and WMV3 decoder
+ *
+ */
+#include "internal.h"
+#include "dsputil.h"
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "vc1.h"
+#include "vc1data.h"
+#include "vc1acdata.h"
+#include "msmpeg4data.h"
+#include "unary.h"
+#include "simple_idct.h"
+#include "mathops.h"
+#include "h263.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+#define MB_INTRA_VLC_BITS 9
+#define DC_VLC_BITS 9
+#define AC_VLC_BITS 9
+static const uint16_t table_mb_intra[64][2];
+
+
+static const uint16_t vlc_offs[] = {
+ 0, 520, 552, 616, 1128, 1160, 1224, 1740, 1772, 1836, 1900, 2436,
+ 2986, 3050, 3610, 4154, 4218, 4746, 5326, 5390, 5902, 6554, 7658, 8620,
+ 9262, 10202, 10756, 11310, 12228, 15078
+};
+
+/**
+ * Init VC-1 specific tables and VC1Context members
+ * @param v The VC1Context to initialize
+ * @return Status
+ */
+static int vc1_init_common(VC1Context *v)
+{
+ static int done = 0;
+ int i = 0;
+ static VLC_TYPE vlc_table[15078][2];
+
+ v->hrd_rate = v->hrd_buffer = NULL;
+
+ /* VLC tables */
+ if(!done)
+ {
+ INIT_VLC_STATIC(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
+ ff_vc1_bfraction_bits, 1, 1,
+ ff_vc1_bfraction_codes, 1, 1, 1 << VC1_BFRACTION_VLC_BITS);
+ INIT_VLC_STATIC(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
+ ff_vc1_norm2_bits, 1, 1,
+ ff_vc1_norm2_codes, 1, 1, 1 << VC1_NORM2_VLC_BITS);
+ INIT_VLC_STATIC(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
+ ff_vc1_norm6_bits, 1, 1,
+ ff_vc1_norm6_codes, 2, 2, 556);
+ INIT_VLC_STATIC(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
+ ff_vc1_imode_bits, 1, 1,
+ ff_vc1_imode_codes, 1, 1, 1 << VC1_IMODE_VLC_BITS);
+ for (i=0; i<3; i++)
+ {
+ ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i*3+0]];
+ ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i*3+1] - vlc_offs[i*3+0];
+ init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
+ ff_vc1_ttmb_bits[i], 1, 1,
+ ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
+ ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i*3+1]];
+ ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i*3+2] - vlc_offs[i*3+1];
+ init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
+ ff_vc1_ttblk_bits[i], 1, 1,
+ ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
+ ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i*3+2]];
+ ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i*3+3] - vlc_offs[i*3+2];
+ init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
+ ff_vc1_subblkpat_bits[i], 1, 1,
+ ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
+ }
+ for(i=0; i<4; i++)
+ {
+ ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i*3+9]];
+ ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i*3+10] - vlc_offs[i*3+9];
+ init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
+ ff_vc1_4mv_block_pattern_bits[i], 1, 1,
+ ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
+ ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i*3+10]];
+ ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i*3+11] - vlc_offs[i*3+10];
+ init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
+ ff_vc1_cbpcy_p_bits[i], 1, 1,
+ ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
+ ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i*3+11]];
+ ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i*3+12] - vlc_offs[i*3+11];
+ init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
+ ff_vc1_mv_diff_bits[i], 1, 1,
+ ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
+ }
+ for(i=0; i<8; i++){
+ ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i+21]];
+ ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i+22] - vlc_offs[i+21];
+ init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
+ &vc1_ac_tables[i][0][1], 8, 4,
+ &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC);
+ }
+ done = 1;
+ }
+
+ /* Other defaults */
+ v->pq = -1;
+ v->mvrange = 0; /* 7.1.1.18, p80 */
+
+ return 0;
+}
+
+/***********************************************************************/
+/**
+ * @defgroup vc1bitplane VC-1 Bitplane decoding
+ * @see 8.7, p56
+ * @{
+ */
+
+/**
+ * Imode types
+ * @{
+ */
+enum Imode {
+ IMODE_RAW,
+ IMODE_NORM2,
+ IMODE_DIFF2,
+ IMODE_NORM6,
+ IMODE_DIFF6,
+ IMODE_ROWSKIP,
+ IMODE_COLSKIP
+};
+/** @} */ //imode defines
+
+
+/** @} */ //Bitplane group
+
+static void vc1_loop_filter_iblk(MpegEncContext *s, int pq)
+{
+ int i, j;
+ if(!s->first_slice_line)
+ s->dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
+ s->dsp.vc1_v_loop_filter16(s->dest[0] + 8*s->linesize, s->linesize, pq);
+ for(i = !s->mb_x*8; i < 16; i += 8)
+ s->dsp.vc1_h_loop_filter16(s->dest[0] + i, s->linesize, pq);
+ for(j = 0; j < 2; j++){
+ if(!s->first_slice_line)
+ s->dsp.vc1_v_loop_filter8(s->dest[j+1], s->uvlinesize, pq);
+ if(s->mb_x)
+ s->dsp.vc1_h_loop_filter8(s->dest[j+1], s->uvlinesize, pq);
+ }
+}
+
+/** Put block onto picture
+ */
+static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
+{
+ uint8_t *Y;
+ int ys, us, vs;
+ DSPContext *dsp = &v->s.dsp;
+
+ if(v->rangeredfrm) {
+ int i, j, k;
+ for(k = 0; k < 6; k++)
+ for(j = 0; j < 8; j++)
+ for(i = 0; i < 8; i++)
+ block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
+
+ }
+ ys = v->s.current_picture.linesize[0];
+ us = v->s.current_picture.linesize[1];
+ vs = v->s.current_picture.linesize[2];
+ Y = v->s.dest[0];
+
+ dsp->put_pixels_clamped(block[0], Y, ys);
+ dsp->put_pixels_clamped(block[1], Y + 8, ys);
+ Y += ys * 8;
+ dsp->put_pixels_clamped(block[2], Y, ys);
+ dsp->put_pixels_clamped(block[3], Y + 8, ys);
+
+ if(!(v->s.flags & CODEC_FLAG_GRAY)) {
+ dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
+ dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
+ }
+}
+
+/** Do motion compensation over 1 macroblock
+ * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
+ */
+static void vc1_mc_1mv(VC1Context *v, int dir)
+{
+ MpegEncContext *s = &v->s;
+ DSPContext *dsp = &v->s.dsp;
+ uint8_t *srcY, *srcU, *srcV;
+ int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
+
+ if(!v->s.last_picture.data[0])return;
+
+ mx = s->mv[dir][0][0];
+ my = s->mv[dir][0][1];
+
+ // store motion vectors for further use in B frames
+ if(s->pict_type == FF_P_TYPE) {
+ s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = my;
+ }
+ uvmx = (mx + ((mx & 3) == 3)) >> 1;
+ uvmy = (my + ((my & 3) == 3)) >> 1;
+ if(v->fastuvmc) {
+ uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
+ uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
+ }
+ if(!dir) {
+ srcY = s->last_picture.data[0];
+ srcU = s->last_picture.data[1];
+ srcV = s->last_picture.data[2];
+ } else {
+ srcY = s->next_picture.data[0];
+ srcU = s->next_picture.data[1];
+ srcV = s->next_picture.data[2];
+ }
+
+ src_x = s->mb_x * 16 + (mx >> 2);
+ src_y = s->mb_y * 16 + (my >> 2);
+ uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
+ uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
+
+ if(v->profile != PROFILE_ADVANCED){
+ src_x = av_clip( src_x, -16, s->mb_width * 16);
+ src_y = av_clip( src_y, -16, s->mb_height * 16);
+ uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
+ uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
+ }else{
+ src_x = av_clip( src_x, -17, s->avctx->coded_width);
+ src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
+ uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
+ uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
+ }
+
+ srcY += src_y * s->linesize + src_x;
+ srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
+ srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
+
+ /* for grayscale we should not try to read from unknown area */
+ if(s->flags & CODEC_FLAG_GRAY) {
+ srcU = s->edge_emu_buffer + 18 * s->linesize;
+ srcV = s->edge_emu_buffer + 18 * s->linesize;
+ }
+
+ if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
+ || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
+ || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
+ uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
+
+ srcY -= s->mspel * (1 + s->linesize);
+ ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
+ src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
+ srcY = s->edge_emu_buffer;
+ ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
+ uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
+ ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
+ uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
+ srcU = uvbuf;
+ srcV = uvbuf + 16;
+ /* if we deal with range reduction we need to scale source blocks */
+ if(v->rangeredfrm) {
+ int i, j;
+ uint8_t *src, *src2;
+
+ src = srcY;
+ for(j = 0; j < 17 + s->mspel*2; j++) {
+ for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
+ src += s->linesize;
+ }
+ src = srcU; src2 = srcV;
+ for(j = 0; j < 9; j++) {
+ for(i = 0; i < 9; i++) {
+ src[i] = ((src[i] - 128) >> 1) + 128;
+ src2[i] = ((src2[i] - 128) >> 1) + 128;
+ }
+ src += s->uvlinesize;
+ src2 += s->uvlinesize;
+ }
+ }
+ /* if we deal with intensity compensation we need to scale source blocks */
+ if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
+ int i, j;
+ uint8_t *src, *src2;
+
+ src = srcY;
+ for(j = 0; j < 17 + s->mspel*2; j++) {
+ for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
+ src += s->linesize;
+ }
+ src = srcU; src2 = srcV;
+ for(j = 0; j < 9; j++) {
+ for(i = 0; i < 9; i++) {
+ src[i] = v->lutuv[src[i]];
+ src2[i] = v->lutuv[src2[i]];
+ }
+ src += s->uvlinesize;
+ src2 += s->uvlinesize;
+ }
+ }
+ srcY += s->mspel * (1 + s->linesize);
+ }
+
+ if(s->mspel) {
+ dxy = ((my & 3) << 2) | (mx & 3);
+ dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
+ dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
+ srcY += s->linesize * 8;
+ dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
+ dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
+ } else { // hpel mc - always used for luma
+ dxy = (my & 2) | ((mx & 2) >> 1);
+
+ if(!v->rnd)
+ dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
+ else
+ dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
+ }
+
+ if(s->flags & CODEC_FLAG_GRAY) return;
+ /* Chroma MC always uses qpel bilinear */
+ uvmx = (uvmx&3)<<1;
+ uvmy = (uvmy&3)<<1;
+ if(!v->rnd){
+ dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
+ dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
+ }else{
+ dsp->put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
+ dsp->put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
+ }
+}
+
+/** Do motion compensation for 4-MV macroblock - luminance block
+ */
+static void vc1_mc_4mv_luma(VC1Context *v, int n)
+{
+ MpegEncContext *s = &v->s;
+ DSPContext *dsp = &v->s.dsp;
+ uint8_t *srcY;
+ int dxy, mx, my, src_x, src_y;
+ int off;
+
+ if(!v->s.last_picture.data[0])return;
+ mx = s->mv[0][n][0];
+ my = s->mv[0][n][1];
+ srcY = s->last_picture.data[0];
+
+ off = s->linesize * 4 * (n&2) + (n&1) * 8;
+
+ src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
+ src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
+
+ if(v->profile != PROFILE_ADVANCED){
+ src_x = av_clip( src_x, -16, s->mb_width * 16);
+ src_y = av_clip( src_y, -16, s->mb_height * 16);
+ }else{
+ src_x = av_clip( src_x, -17, s->avctx->coded_width);
+ src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
+ }
+
+ srcY += src_y * s->linesize + src_x;
+
+ if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
+ || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
+ || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
+ srcY -= s->mspel * (1 + s->linesize);
+ ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
+ src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
+ srcY = s->edge_emu_buffer;
+ /* if we deal with range reduction we need to scale source blocks */
+ if(v->rangeredfrm) {
+ int i, j;
+ uint8_t *src;
+
+ src = srcY;
+ for(j = 0; j < 9 + s->mspel*2; j++) {
+ for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
+ src += s->linesize;
+ }
+ }
+ /* if we deal with intensity compensation we need to scale source blocks */
+ if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
+ int i, j;
+ uint8_t *src;
+
+ src = srcY;
+ for(j = 0; j < 9 + s->mspel*2; j++) {
+ for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
+ src += s->linesize;
+ }
+ }
+ srcY += s->mspel * (1 + s->linesize);
+ }
+
+ if(s->mspel) {
+ dxy = ((my & 3) << 2) | (mx & 3);
+ dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
+ } else { // hpel mc - always used for luma
+ dxy = (my & 2) | ((mx & 2) >> 1);
+ if(!v->rnd)
+ dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
+ else
+ dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
+ }
+}
+
+static inline int median4(int a, int b, int c, int d)
+{
+ if(a < b) {
+ if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
+ else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
+ } else {
+ if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
+ else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
+ }
+}
+
+
+/** Do motion compensation for 4-MV macroblock - both chroma blocks
+ */
+static void vc1_mc_4mv_chroma(VC1Context *v)
+{
+ MpegEncContext *s = &v->s;
+ DSPContext *dsp = &v->s.dsp;
+ uint8_t *srcU, *srcV;
+ int uvmx, uvmy, uvsrc_x, uvsrc_y;
+ int i, idx, tx = 0, ty = 0;
+ int mvx[4], mvy[4], intra[4];
+ static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
+
+ if(!v->s.last_picture.data[0])return;
+ if(s->flags & CODEC_FLAG_GRAY) return;
+
+ for(i = 0; i < 4; i++) {
+ mvx[i] = s->mv[0][i][0];
+ mvy[i] = s->mv[0][i][1];
+ intra[i] = v->mb_type[0][s->block_index[i]];
+ }
+
+ /* calculate chroma MV vector from four luma MVs */
+ idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
+ if(!idx) { // all blocks are inter
+ tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
+ ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
+ } else if(count[idx] == 1) { // 3 inter blocks
+ switch(idx) {
+ case 0x1:
+ tx = mid_pred(mvx[1], mvx[2], mvx[3]);
+ ty = mid_pred(mvy[1], mvy[2], mvy[3]);
+ break;
+ case 0x2:
+ tx = mid_pred(mvx[0], mvx[2], mvx[3]);
+ ty = mid_pred(mvy[0], mvy[2], mvy[3]);
+ break;
+ case 0x4:
+ tx = mid_pred(mvx[0], mvx[1], mvx[3]);
+ ty = mid_pred(mvy[0], mvy[1], mvy[3]);
+ break;
+ case 0x8:
+ tx = mid_pred(mvx[0], mvx[1], mvx[2]);
+ ty = mid_pred(mvy[0], mvy[1], mvy[2]);
+ break;
+ }
+ } else if(count[idx] == 2) {
+ int t1 = 0, t2 = 0;
+ for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
+ for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
+ tx = (mvx[t1] + mvx[t2]) / 2;
+ ty = (mvy[t1] + mvy[t2]) / 2;
+ } else {
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+ return; //no need to do MC for inter blocks
+ }
+
+ s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
+ uvmx = (tx + ((tx&3) == 3)) >> 1;
+ uvmy = (ty + ((ty&3) == 3)) >> 1;
+ if(v->fastuvmc) {
+ uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
+ uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
+ }
+
+ uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
+ uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
+
+ if(v->profile != PROFILE_ADVANCED){
+ uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
+ uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
+ }else{
+ uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
+ uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
+ }
+
+ srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
+ srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
+ if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
+ || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
+ || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
+ ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
+ uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
+ ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
+ uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
+ srcU = s->edge_emu_buffer;
+ srcV = s->edge_emu_buffer + 16;
+
+ /* if we deal with range reduction we need to scale source blocks */
+ if(v->rangeredfrm) {
+ int i, j;
+ uint8_t *src, *src2;
+
+ src = srcU; src2 = srcV;
+ for(j = 0; j < 9; j++) {
+ for(i = 0; i < 9; i++) {
+ src[i] = ((src[i] - 128) >> 1) + 128;
+ src2[i] = ((src2[i] - 128) >> 1) + 128;
+ }
+ src += s->uvlinesize;
+ src2 += s->uvlinesize;
+ }
+ }
+ /* if we deal with intensity compensation we need to scale source blocks */
+ if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
+ int i, j;
+ uint8_t *src, *src2;
+
+ src = srcU; src2 = srcV;
+ for(j = 0; j < 9; j++) {
+ for(i = 0; i < 9; i++) {
+ src[i] = v->lutuv[src[i]];
+ src2[i] = v->lutuv[src2[i]];
+ }
+ src += s->uvlinesize;
+ src2 += s->uvlinesize;
+ }
+ }
+ }
+
+ /* Chroma MC always uses qpel bilinear */
+ uvmx = (uvmx&3)<<1;
+ uvmy = (uvmy&3)<<1;
+ if(!v->rnd){
+ dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
+ dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
+ }else{
+ dsp->put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
+ dsp->put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
+ }
+}
+
+/***********************************************************************/
+/**
+ * @defgroup vc1block VC-1 Block-level functions
+ * @see 7.1.4, p91 and 8.1.1.7, p(1)04
+ * @{
+ */
+
+/**
+ * @def GET_MQUANT
+ * @brief Get macroblock-level quantizer scale
+ */
+#define GET_MQUANT() \
+ if (v->dquantfrm) \
+ { \
+ int edges = 0; \
+ if (v->dqprofile == DQPROFILE_ALL_MBS) \
+ { \
+ if (v->dqbilevel) \
+ { \
+ mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
+ } \
+ else \
+ { \
+ mqdiff = get_bits(gb, 3); \
+ if (mqdiff != 7) mquant = v->pq + mqdiff; \
+ else mquant = get_bits(gb, 5); \
+ } \
+ } \
+ if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
+ edges = 1 << v->dqsbedge; \
+ else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
+ edges = (3 << v->dqsbedge) % 15; \
+ else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
+ edges = 15; \
+ if((edges&1) && !s->mb_x) \
+ mquant = v->altpq; \
+ if((edges&2) && s->first_slice_line) \
+ mquant = v->altpq; \
+ if((edges&4) && s->mb_x == (s->mb_width - 1)) \
+ mquant = v->altpq; \
+ if((edges&8) && s->mb_y == (s->mb_height - 1)) \
+ mquant = v->altpq; \
+ }
+
+/**
+ * @def GET_MVDATA(_dmv_x, _dmv_y)
+ * @brief Get MV differentials
+ * @see MVDATA decoding from 8.3.5.2, p(1)20
+ * @param _dmv_x Horizontal differential for decoded MV
+ * @param _dmv_y Vertical differential for decoded MV
+ */
+#define GET_MVDATA(_dmv_x, _dmv_y) \
+ index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table,\
+ VC1_MV_DIFF_VLC_BITS, 2); \
+ if (index > 36) \
+ { \
+ mb_has_coeffs = 1; \
+ index -= 37; \
+ } \
+ else mb_has_coeffs = 0; \
+ s->mb_intra = 0; \
+ if (!index) { _dmv_x = _dmv_y = 0; } \
+ else if (index == 35) \
+ { \
+ _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
+ _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
+ } \
+ else if (index == 36) \
+ { \
+ _dmv_x = 0; \
+ _dmv_y = 0; \
+ s->mb_intra = 1; \
+ } \
+ else \
+ { \
+ index1 = index%6; \
+ if (!s->quarter_sample && index1 == 5) val = 1; \
+ else val = 0; \
+ if(size_table[index1] - val > 0) \
+ val = get_bits(gb, size_table[index1] - val); \
+ else val = 0; \
+ sign = 0 - (val&1); \
+ _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
+ \
+ index1 = index/6; \
+ if (!s->quarter_sample && index1 == 5) val = 1; \
+ else val = 0; \
+ if(size_table[index1] - val > 0) \
+ val = get_bits(gb, size_table[index1] - val); \
+ else val = 0; \
+ sign = 0 - (val&1); \
+ _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
+ }
+
+/** Predict and set motion vector
+ */
+static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
+{
+ int xy, wrap, off = 0;
+ int16_t *A, *B, *C;
+ int px, py;
+ int sum;
+
+ /* scale MV difference to be quad-pel */
+ dmv_x <<= 1 - s->quarter_sample;
+ dmv_y <<= 1 - s->quarter_sample;
+
+ wrap = s->b8_stride;
+ xy = s->block_index[n];
+
+ if(s->mb_intra){
+ s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
+ s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
+ s->current_picture.motion_val[1][xy][0] = 0;
+ s->current_picture.motion_val[1][xy][1] = 0;
+ if(mv1) { /* duplicate motion data for 1-MV block */
+ s->current_picture.motion_val[0][xy + 1][0] = 0;
+ s->current_picture.motion_val[0][xy + 1][1] = 0;
+ s->current_picture.motion_val[0][xy + wrap][0] = 0;
+ s->current_picture.motion_val[0][xy + wrap][1] = 0;
+ s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
+ s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
+ s->current_picture.motion_val[1][xy + 1][0] = 0;
+ s->current_picture.motion_val[1][xy + 1][1] = 0;
+ s->current_picture.motion_val[1][xy + wrap][0] = 0;
+ s->current_picture.motion_val[1][xy + wrap][1] = 0;
+ s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
+ s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
+ }
+ return;
+ }
+
+ C = s->current_picture.motion_val[0][xy - 1];
+ A = s->current_picture.motion_val[0][xy - wrap];
+ if(mv1)
+ off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
+ else {
+ //in 4-MV mode different blocks have different B predictor position
+ switch(n){
+ case 0:
+ off = (s->mb_x > 0) ? -1 : 1;
+ break;
+ case 1:
+ off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
+ break;
+ case 2:
+ off = 1;
+ break;
+ case 3:
+ off = -1;
+ }
+ }
+ B = s->current_picture.motion_val[0][xy - wrap + off];
+
+ if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
+ if(s->mb_width == 1) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = mid_pred(A[0], B[0], C[0]);
+ py = mid_pred(A[1], B[1], C[1]);
+ }
+ } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
+ px = C[0];
+ py = C[1];
+ } else {
+ px = py = 0;
+ }
+ /* Pullback MV as specified in 8.3.5.3.4 */
+ {
+ int qx, qy, X, Y;
+ qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
+ qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
+ X = (s->mb_width << 6) - 4;
+ Y = (s->mb_height << 6) - 4;
+ if(mv1) {
+ if(qx + px < -60) px = -60 - qx;
+ if(qy + py < -60) py = -60 - qy;
+ } else {
+ if(qx + px < -28) px = -28 - qx;
+ if(qy + py < -28) py = -28 - qy;
+ }
+ if(qx + px > X) px = X - qx;
+ if(qy + py > Y) py = Y - qy;
+ }
+ /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
+ if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
+ if(is_intra[xy - wrap])
+ sum = FFABS(px) + FFABS(py);
+ else
+ sum = FFABS(px - A[0]) + FFABS(py - A[1]);
+ if(sum > 32) {
+ if(get_bits1(&s->gb)) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = C[0];
+ py = C[1];
+ }
+ } else {
+ if(is_intra[xy - 1])
+ sum = FFABS(px) + FFABS(py);
+ else
+ sum = FFABS(px - C[0]) + FFABS(py - C[1]);
+ if(sum > 32) {
+ if(get_bits1(&s->gb)) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = C[0];
+ py = C[1];
+ }
+ }
+ }
+ }
+ /* store MV using signed modulus of MV range defined in 4.11 */
+ s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
+ s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
+ if(mv1) { /* duplicate motion data for 1-MV block */
+ s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
+ s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
+ s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
+ s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
+ s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
+ s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
+ }
+}
+
+/** Motion compensation for direct or interpolated blocks in B-frames
+ */
+static void vc1_interp_mc(VC1Context *v)
+{
+ MpegEncContext *s = &v->s;
+ DSPContext *dsp = &v->s.dsp;
+ uint8_t *srcY, *srcU, *srcV;
+ int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
+
+ if(!v->s.next_picture.data[0])return;
+
+ mx = s->mv[1][0][0];
+ my = s->mv[1][0][1];
+ uvmx = (mx + ((mx & 3) == 3)) >> 1;
+ uvmy = (my + ((my & 3) == 3)) >> 1;
+ if(v->fastuvmc) {
+ uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
+ uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
+ }
+ srcY = s->next_picture.data[0];
+ srcU = s->next_picture.data[1];
+ srcV = s->next_picture.data[2];
+
+ src_x = s->mb_x * 16 + (mx >> 2);
+ src_y = s->mb_y * 16 + (my >> 2);
+ uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
+ uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
+
+ if(v->profile != PROFILE_ADVANCED){
+ src_x = av_clip( src_x, -16, s->mb_width * 16);
+ src_y = av_clip( src_y, -16, s->mb_height * 16);
+ uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
+ uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
+ }else{
+ src_x = av_clip( src_x, -17, s->avctx->coded_width);
+ src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
+ uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
+ uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
+ }
+
+ srcY += src_y * s->linesize + src_x;
+ srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
+ srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
+
+ /* for grayscale we should not try to read from unknown area */
+ if(s->flags & CODEC_FLAG_GRAY) {
+ srcU = s->edge_emu_buffer + 18 * s->linesize;
+ srcV = s->edge_emu_buffer + 18 * s->linesize;
+ }
+
+ if(v->rangeredfrm
+ || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
+ || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
+ uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
+
+ srcY -= s->mspel * (1 + s->linesize);
+ ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
+ src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
+ srcY = s->edge_emu_buffer;
+ ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
+ uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
+ ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
+ uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
+ srcU = uvbuf;
+ srcV = uvbuf + 16;
+ /* if we deal with range reduction we need to scale source blocks */
+ if(v->rangeredfrm) {
+ int i, j;
+ uint8_t *src, *src2;
+
+ src = srcY;
+ for(j = 0; j < 17 + s->mspel*2; j++) {
+ for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
+ src += s->linesize;
+ }
+ src = srcU; src2 = srcV;
+ for(j = 0; j < 9; j++) {
+ for(i = 0; i < 9; i++) {
+ src[i] = ((src[i] - 128) >> 1) + 128;
+ src2[i] = ((src2[i] - 128) >> 1) + 128;
+ }
+ src += s->uvlinesize;
+ src2 += s->uvlinesize;
+ }
+ }
+ srcY += s->mspel * (1 + s->linesize);
+ }
+
+ if(s->mspel) {
+ dxy = ((my & 3) << 2) | (mx & 3);
+ dsp->avg_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
+ dsp->avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
+ srcY += s->linesize * 8;
+ dsp->avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
+ dsp->avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
+ } else { // hpel mc
+ dxy = (my & 2) | ((mx & 2) >> 1);
+
+ if(!v->rnd)
+ dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
+ else
+ dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
+ }
+
+ if(s->flags & CODEC_FLAG_GRAY) return;
+ /* Chroma MC always uses qpel blilinear */
+ uvmx = (uvmx&3)<<1;
+ uvmy = (uvmy&3)<<1;
+ if(!v->rnd){
+ dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
+ dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
+ }else{
+ dsp->avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
+ dsp->avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
+ }
+}
+
+static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
+{
+ int n = bfrac;
+
+#if B_FRACTION_DEN==256
+ if(inv)
+ n -= 256;
+ if(!qs)
+ return 2 * ((value * n + 255) >> 9);
+ return (value * n + 128) >> 8;
+#else
+ if(inv)
+ n -= B_FRACTION_DEN;
+ if(!qs)
+ return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
+ return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
+#endif
+}
+
+/** Reconstruct motion vector for B-frame and do motion compensation
+ */
+static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
+{
+ if(v->use_ic) {
+ v->mv_mode2 = v->mv_mode;
+ v->mv_mode = MV_PMODE_INTENSITY_COMP;
+ }
+ if(direct) {
+ vc1_mc_1mv(v, 0);
+ vc1_interp_mc(v);
+ if(v->use_ic) v->mv_mode = v->mv_mode2;
+ return;
+ }
+ if(mode == BMV_TYPE_INTERPOLATED) {
+ vc1_mc_1mv(v, 0);
+ vc1_interp_mc(v);
+ if(v->use_ic) v->mv_mode = v->mv_mode2;
+ return;
+ }
+
+ if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
+ vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
+ if(v->use_ic) v->mv_mode = v->mv_mode2;
+}
+
+static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
+{
+ MpegEncContext *s = &v->s;
+ int xy, wrap, off = 0;
+ int16_t *A, *B, *C;
+ int px, py;
+ int sum;
+ int r_x, r_y;
+ const uint8_t *is_intra = v->mb_type[0];
+
+ r_x = v->range_x;
+ r_y = v->range_y;
+ /* scale MV difference to be quad-pel */
+ dmv_x[0] <<= 1 - s->quarter_sample;
+ dmv_y[0] <<= 1 - s->quarter_sample;
+ dmv_x[1] <<= 1 - s->quarter_sample;
+ dmv_y[1] <<= 1 - s->quarter_sample;
+
+ wrap = s->b8_stride;
+ xy = s->block_index[0];
+
+ if(s->mb_intra) {
+ s->current_picture.motion_val[0][xy][0] =
+ s->current_picture.motion_val[0][xy][1] =
+ s->current_picture.motion_val[1][xy][0] =
+ s->current_picture.motion_val[1][xy][1] = 0;
+ return;
+ }
+ s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
+ s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
+ s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
+ s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
+
+ /* Pullback predicted motion vectors as specified in 8.4.5.4 */
+ s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
+ s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
+ s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
+ s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
+ if(direct) {
+ s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
+ s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
+ s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
+ s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
+ return;
+ }
+
+ if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
+ C = s->current_picture.motion_val[0][xy - 2];
+ A = s->current_picture.motion_val[0][xy - wrap*2];
+ off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
+ B = s->current_picture.motion_val[0][xy - wrap*2 + off];
+
+ if(!s->mb_x) C[0] = C[1] = 0;
+ if(!s->first_slice_line) { // predictor A is not out of bounds
+ if(s->mb_width == 1) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = mid_pred(A[0], B[0], C[0]);
+ py = mid_pred(A[1], B[1], C[1]);
+ }
+ } else if(s->mb_x) { // predictor C is not out of bounds
+ px = C[0];
+ py = C[1];
+ } else {
+ px = py = 0;
+ }
+ /* Pullback MV as specified in 8.3.5.3.4 */
+ {
+ int qx, qy, X, Y;
+ if(v->profile < PROFILE_ADVANCED) {
+ qx = (s->mb_x << 5);
+ qy = (s->mb_y << 5);
+ X = (s->mb_width << 5) - 4;
+ Y = (s->mb_height << 5) - 4;
+ if(qx + px < -28) px = -28 - qx;
+ if(qy + py < -28) py = -28 - qy;
+ if(qx + px > X) px = X - qx;
+ if(qy + py > Y) py = Y - qy;
+ } else {
+ qx = (s->mb_x << 6);
+ qy = (s->mb_y << 6);
+ X = (s->mb_width << 6) - 4;
+ Y = (s->mb_height << 6) - 4;
+ if(qx + px < -60) px = -60 - qx;
+ if(qy + py < -60) py = -60 - qy;
+ if(qx + px > X) px = X - qx;
+ if(qy + py > Y) py = Y - qy;
+ }
+ }
+ /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
+ if(0 && !s->first_slice_line && s->mb_x) {
+ if(is_intra[xy - wrap])
+ sum = FFABS(px) + FFABS(py);
+ else
+ sum = FFABS(px - A[0]) + FFABS(py - A[1]);
+ if(sum > 32) {
+ if(get_bits1(&s->gb)) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = C[0];
+ py = C[1];
+ }
+ } else {
+ if(is_intra[xy - 2])
+ sum = FFABS(px) + FFABS(py);
+ else
+ sum = FFABS(px - C[0]) + FFABS(py - C[1]);
+ if(sum > 32) {
+ if(get_bits1(&s->gb)) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = C[0];
+ py = C[1];
+ }
+ }
+ }
+ }
+ /* store MV using signed modulus of MV range defined in 4.11 */
+ s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
+ s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
+ }
+ if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
+ C = s->current_picture.motion_val[1][xy - 2];
+ A = s->current_picture.motion_val[1][xy - wrap*2];
+ off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
+ B = s->current_picture.motion_val[1][xy - wrap*2 + off];
+
+ if(!s->mb_x) C[0] = C[1] = 0;
+ if(!s->first_slice_line) { // predictor A is not out of bounds
+ if(s->mb_width == 1) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = mid_pred(A[0], B[0], C[0]);
+ py = mid_pred(A[1], B[1], C[1]);
+ }
+ } else if(s->mb_x) { // predictor C is not out of bounds
+ px = C[0];
+ py = C[1];
+ } else {
+ px = py = 0;
+ }
+ /* Pullback MV as specified in 8.3.5.3.4 */
+ {
+ int qx, qy, X, Y;
+ if(v->profile < PROFILE_ADVANCED) {
+ qx = (s->mb_x << 5);
+ qy = (s->mb_y << 5);
+ X = (s->mb_width << 5) - 4;
+ Y = (s->mb_height << 5) - 4;
+ if(qx + px < -28) px = -28 - qx;
+ if(qy + py < -28) py = -28 - qy;
+ if(qx + px > X) px = X - qx;
+ if(qy + py > Y) py = Y - qy;
+ } else {
+ qx = (s->mb_x << 6);
+ qy = (s->mb_y << 6);
+ X = (s->mb_width << 6) - 4;
+ Y = (s->mb_height << 6) - 4;
+ if(qx + px < -60) px = -60 - qx;
+ if(qy + py < -60) py = -60 - qy;
+ if(qx + px > X) px = X - qx;
+ if(qy + py > Y) py = Y - qy;
+ }
+ }
+ /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
+ if(0 && !s->first_slice_line && s->mb_x) {
+ if(is_intra[xy - wrap])
+ sum = FFABS(px) + FFABS(py);
+ else
+ sum = FFABS(px - A[0]) + FFABS(py - A[1]);
+ if(sum > 32) {
+ if(get_bits1(&s->gb)) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = C[0];
+ py = C[1];
+ }
+ } else {
+ if(is_intra[xy - 2])
+ sum = FFABS(px) + FFABS(py);
+ else
+ sum = FFABS(px - C[0]) + FFABS(py - C[1]);
+ if(sum > 32) {
+ if(get_bits1(&s->gb)) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = C[0];
+ py = C[1];
+ }
+ }
+ }
+ }
+ /* store MV using signed modulus of MV range defined in 4.11 */
+
+ s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
+ s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
+ }
+ s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
+ s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
+ s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
+ s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
+}
+
+/** Get predicted DC value for I-frames only
+ * prediction dir: left=0, top=1
+ * @param s MpegEncContext
+ * @param overlap flag indicating that overlap filtering is used
+ * @param pq integer part of picture quantizer
+ * @param[in] n block index in the current MB
+ * @param dc_val_ptr Pointer to DC predictor
+ * @param dir_ptr Prediction direction for use in AC prediction
+ */
+static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
+ int16_t **dc_val_ptr, int *dir_ptr)
+{
+ int a, b, c, wrap, pred, scale;
+ int16_t *dc_val;
+ static const uint16_t dcpred[32] = {
+ -1, 1024, 512, 341, 256, 205, 171, 146, 128,
+ 114, 102, 93, 85, 79, 73, 68, 64,
+ 60, 57, 54, 51, 49, 47, 45, 43,
+ 41, 39, 38, 37, 35, 34, 33
+ };
+
+ /* find prediction - wmv3_dc_scale always used here in fact */
+ if (n < 4) scale = s->y_dc_scale;
+ else scale = s->c_dc_scale;
+
+ wrap = s->block_wrap[n];
+ dc_val= s->dc_val[0] + s->block_index[n];
+
+ /* B A
+ * C X
+ */
+ c = dc_val[ - 1];
+ b = dc_val[ - 1 - wrap];
+ a = dc_val[ - wrap];
+
+ if (pq < 9 || !overlap)
+ {
+ /* Set outer values */
+ if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
+ if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
+ }
+ else
+ {
+ /* Set outer values */
+ if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
+ if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
+ }
+
+ if (abs(a - b) <= abs(b - c)) {
+ pred = c;
+ *dir_ptr = 1;//left
+ } else {
+ pred = a;
+ *dir_ptr = 0;//top
+ }
+
+ /* update predictor */
+ *dc_val_ptr = &dc_val[0];
+ return pred;
+}
+
+
+/** Get predicted DC value
+ * prediction dir: left=0, top=1
+ * @param s MpegEncContext
+ * @param overlap flag indicating that overlap filtering is used
+ * @param pq integer part of picture quantizer
+ * @param[in] n block index in the current MB
+ * @param a_avail flag indicating top block availability
+ * @param c_avail flag indicating left block availability
+ * @param dc_val_ptr Pointer to DC predictor
+ * @param dir_ptr Prediction direction for use in AC prediction
+ */
+static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
+ int a_avail, int c_avail,
+ int16_t **dc_val_ptr, int *dir_ptr)
+{
+ int a, b, c, wrap, pred;
+ int16_t *dc_val;
+ int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+ int q1, q2 = 0;
+
+ wrap = s->block_wrap[n];
+ dc_val= s->dc_val[0] + s->block_index[n];
+
+ /* B A
+ * C X
+ */
+ c = dc_val[ - 1];
+ b = dc_val[ - 1 - wrap];
+ a = dc_val[ - wrap];
+ /* scale predictors if needed */
+ q1 = s->current_picture.qscale_table[mb_pos];
+ if(c_avail && (n!= 1 && n!=3)) {
+ q2 = s->current_picture.qscale_table[mb_pos - 1];
+ if(q2 && q2 != q1)
+ c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
+ }
+ if(a_avail && (n!= 2 && n!=3)) {
+ q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
+ if(q2 && q2 != q1)
+ a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
+ }
+ if(a_avail && c_avail && (n!=3)) {
+ int off = mb_pos;
+ if(n != 1) off--;
+ if(n != 2) off -= s->mb_stride;
+ q2 = s->current_picture.qscale_table[off];
+ if(q2 && q2 != q1)
+ b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
+ }
+
+ if(a_avail && c_avail) {
+ if(abs(a - b) <= abs(b - c)) {
+ pred = c;
+ *dir_ptr = 1;//left
+ } else {
+ pred = a;
+ *dir_ptr = 0;//top
+ }
+ } else if(a_avail) {
+ pred = a;
+ *dir_ptr = 0;//top
+ } else if(c_avail) {
+ pred = c;
+ *dir_ptr = 1;//left
+ } else {
+ pred = 0;
+ *dir_ptr = 1;//left
+ }
+
+ /* update predictor */
+ *dc_val_ptr = &dc_val[0];
+ return pred;
+}
+
+/** @} */ // Block group
+
+/**
+ * @defgroup vc1_std_mb VC1 Macroblock-level functions in Simple/Main Profiles
+ * @see 7.1.4, p91 and 8.1.1.7, p(1)04
+ * @{
+ */
+
+static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
+{
+ int xy, wrap, pred, a, b, c;
+
+ xy = s->block_index[n];
+ wrap = s->b8_stride;
+
+ /* B C
+ * A X
+ */
+ a = s->coded_block[xy - 1 ];
+ b = s->coded_block[xy - 1 - wrap];
+ c = s->coded_block[xy - wrap];
+
+ if (b == c) {
+ pred = a;
+ } else {
+ pred = c;
+ }
+
+ /* store value */
+ *coded_block_ptr = &s->coded_block[xy];
+
+ return pred;
+}
+
+/**
+ * Decode one AC coefficient
+ * @param v The VC1 context
+ * @param last Last coefficient
+ * @param skip How much zero coefficients to skip
+ * @param value Decoded AC coefficient value
+ * @param codingset set of VLC to decode data
+ * @see 8.1.3.4
+ */
+static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
+{
+ GetBitContext *gb = &v->s.gb;
+ int index, escape, run = 0, level = 0, lst = 0;
+
+ index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
+ if (index != vc1_ac_sizes[codingset] - 1) {
+ run = vc1_index_decode_table[codingset][index][0];
+ level = vc1_index_decode_table[codingset][index][1];
+ lst = index >= vc1_last_decode_table[codingset];
+ if(get_bits1(gb))
+ level = -level;
+ } else {
+ escape = decode210(gb);
+ if (escape != 2) {
+ index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
+ run = vc1_index_decode_table[codingset][index][0];
+ level = vc1_index_decode_table[codingset][index][1];
+ lst = index >= vc1_last_decode_table[codingset];
+ if(escape == 0) {
+ if(lst)
+ level += vc1_last_delta_level_table[codingset][run];
+ else
+ level += vc1_delta_level_table[codingset][run];
+ } else {
+ if(lst)
+ run += vc1_last_delta_run_table[codingset][level] + 1;
+ else
+ run += vc1_delta_run_table[codingset][level] + 1;
+ }
+ if(get_bits1(gb))
+ level = -level;
+ } else {
+ int sign;
+ lst = get_bits1(gb);
+ if(v->s.esc3_level_length == 0) {
+ if(v->pq < 8 || v->dquantfrm) { // table 59
+ v->s.esc3_level_length = get_bits(gb, 3);
+ if(!v->s.esc3_level_length)
+ v->s.esc3_level_length = get_bits(gb, 2) + 8;
+ } else { //table 60
+ v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
+ }
+ v->s.esc3_run_length = 3 + get_bits(gb, 2);
+ }
+ run = get_bits(gb, v->s.esc3_run_length);
+ sign = get_bits1(gb);
+ level = get_bits(gb, v->s.esc3_level_length);
+ if(sign)
+ level = -level;
+ }
+ }
+
+ *last = lst;
+ *skip = run;
+ *value = level;
+}
+
+/** Decode intra block in intra frames - should be faster than decode_intra_block
+ * @param v VC1Context
+ * @param block block to decode
+ * @param[in] n subblock index
+ * @param coded are AC coeffs present or not
+ * @param codingset set of VLC to decode data
+ */
+static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
+{
+ GetBitContext *gb = &v->s.gb;
+ MpegEncContext *s = &v->s;
+ int dc_pred_dir = 0; /* Direction of the DC prediction used */
+ int i;
+ int16_t *dc_val;
+ int16_t *ac_val, *ac_val2;
+ int dcdiff;
+
+ /* Get DC differential */
+ if (n < 4) {
+ dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
+ } else {
+ dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
+ }
+ if (dcdiff < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
+ return -1;
+ }
+ if (dcdiff)
+ {
+ if (dcdiff == 119 /* ESC index value */)
+ {
+ /* TODO: Optimize */
+ if (v->pq == 1) dcdiff = get_bits(gb, 10);
+ else if (v->pq == 2) dcdiff = get_bits(gb, 9);
+ else dcdiff = get_bits(gb, 8);
+ }
+ else
+ {
+ if (v->pq == 1)
+ dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
+ else if (v->pq == 2)
+ dcdiff = (dcdiff<<1) + get_bits1(gb) - 1;
+ }
+ if (get_bits1(gb))
+ dcdiff = -dcdiff;
+ }
+
+ /* Prediction */
+ dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
+ *dc_val = dcdiff;
+
+ /* Store the quantized DC coeff, used for prediction */
+ if (n < 4) {
+ block[0] = dcdiff * s->y_dc_scale;
+ } else {
+ block[0] = dcdiff * s->c_dc_scale;
+ }
+ /* Skip ? */
+ if (!coded) {
+ goto not_coded;
+ }
+
+ //AC Decoding
+ i = 1;
+
+ {
+ int last = 0, skip, value;
+ const int8_t *zz_table;
+ int scale;
+ int k;
+
+ scale = v->pq * 2 + v->halfpq;
+
+ if(v->s.ac_pred) {
+ if(!dc_pred_dir)
+ zz_table = wmv1_scantable[2];
+ else
+ zz_table = wmv1_scantable[3];
+ } else
+ zz_table = wmv1_scantable[1];
+
+ ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
+ ac_val2 = ac_val;
+ if(dc_pred_dir) //left
+ ac_val -= 16;
+ else //top
+ ac_val -= 16 * s->block_wrap[n];
+
+ while (!last) {
+ vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
+ i += skip;
+ if(i > 63)
+ break;
+ block[zz_table[i++]] = value;
+ }
+
+ /* apply AC prediction if needed */
+ if(s->ac_pred) {
+ if(dc_pred_dir) { //left
+ for(k = 1; k < 8; k++)
+ block[k << 3] += ac_val[k];
+ } else { //top
+ for(k = 1; k < 8; k++)
+ block[k] += ac_val[k + 8];
+ }
+ }
+ /* save AC coeffs for further prediction */
+ for(k = 1; k < 8; k++) {
+ ac_val2[k] = block[k << 3];
+ ac_val2[k + 8] = block[k];
+ }
+
+ /* scale AC coeffs */
+ for(k = 1; k < 64; k++)
+ if(block[k]) {
+ block[k] *= scale;
+ if(!v->pquantizer)
+ block[k] += (block[k] < 0) ? -v->pq : v->pq;
+ }
+
+ if(s->ac_pred) i = 63;
+ }
+
+not_coded:
+ if(!coded) {
+ int k, scale;
+ ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
+ ac_val2 = ac_val;
+
+ i = 0;
+ scale = v->pq * 2 + v->halfpq;
+ memset(ac_val2, 0, 16 * 2);
+ if(dc_pred_dir) {//left
+ ac_val -= 16;
+ if(s->ac_pred)
+ memcpy(ac_val2, ac_val, 8 * 2);
+ } else {//top
+ ac_val -= 16 * s->block_wrap[n];
+ if(s->ac_pred)
+ memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
+ }
+
+ /* apply AC prediction if needed */
+ if(s->ac_pred) {
+ if(dc_pred_dir) { //left
+ for(k = 1; k < 8; k++) {
+ block[k << 3] = ac_val[k] * scale;
+ if(!v->pquantizer && block[k << 3])
+ block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
+ }
+ } else { //top
+ for(k = 1; k < 8; k++) {
+ block[k] = ac_val[k + 8] * scale;
+ if(!v->pquantizer && block[k])
+ block[k] += (block[k] < 0) ? -v->pq : v->pq;
+ }
+ }
+ i = 63;
+ }
+ }
+ s->block_last_index[n] = i;
+
+ return 0;
+}
+
+/** Decode intra block in intra frames - should be faster than decode_intra_block
+ * @param v VC1Context
+ * @param block block to decode
+ * @param[in] n subblock number
+ * @param coded are AC coeffs present or not
+ * @param codingset set of VLC to decode data
+ * @param mquant quantizer value for this macroblock
+ */
+static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
+{
+ GetBitContext *gb = &v->s.gb;
+ MpegEncContext *s = &v->s;
+ int dc_pred_dir = 0; /* Direction of the DC prediction used */
+ int i;
+ int16_t *dc_val;
+ int16_t *ac_val, *ac_val2;
+ int dcdiff;
+ int a_avail = v->a_avail, c_avail = v->c_avail;
+ int use_pred = s->ac_pred;
+ int scale;
+ int q1, q2 = 0;
+ int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+
+ /* Get DC differential */
+ if (n < 4) {
+ dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
+ } else {
+ dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
+ }
+ if (dcdiff < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
+ return -1;
+ }
+ if (dcdiff)
+ {
+ if (dcdiff == 119 /* ESC index value */)
+ {
+ /* TODO: Optimize */
+ if (mquant == 1) dcdiff = get_bits(gb, 10);
+ else if (mquant == 2) dcdiff = get_bits(gb, 9);
+ else dcdiff = get_bits(gb, 8);
+ }
+ else
+ {
+ if (mquant == 1)
+ dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
+ else if (mquant == 2)
+ dcdiff = (dcdiff<<1) + get_bits1(gb) - 1;
+ }
+ if (get_bits1(gb))
+ dcdiff = -dcdiff;
+ }
+
+ /* Prediction */
+ dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
+ *dc_val = dcdiff;
+
+ /* Store the quantized DC coeff, used for prediction */
+ if (n < 4) {
+ block[0] = dcdiff * s->y_dc_scale;
+ } else {
+ block[0] = dcdiff * s->c_dc_scale;
+ }
+
+ //AC Decoding
+ i = 1;
+
+ /* check if AC is needed at all */
+ if(!a_avail && !c_avail) use_pred = 0;
+ ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
+ ac_val2 = ac_val;
+
+ scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
+
+ if(dc_pred_dir) //left
+ ac_val -= 16;
+ else //top
+ ac_val -= 16 * s->block_wrap[n];
+
+ q1 = s->current_picture.qscale_table[mb_pos];
+ if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
+ if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
+ if(dc_pred_dir && n==1) q2 = q1;
+ if(!dc_pred_dir && n==2) q2 = q1;
+ if(n==3) q2 = q1;
+
+ if(coded) {
+ int last = 0, skip, value;
+ const int8_t *zz_table;
+ int k;
+
+ if(v->s.ac_pred) {
+ if(!dc_pred_dir)
+ zz_table = wmv1_scantable[2];
+ else
+ zz_table = wmv1_scantable[3];
+ } else
+ zz_table = wmv1_scantable[1];
+
+ while (!last) {
+ vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
+ i += skip;
+ if(i > 63)
+ break;
+ block[zz_table[i++]] = value;
+ }
+
+ /* apply AC prediction if needed */
+ if(use_pred) {
+ /* scale predictors if needed*/
+ if(q2 && q1!=q2) {
+ q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+ q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+
+ if(dc_pred_dir) { //left
+ for(k = 1; k < 8; k++)
+ block[k << 3] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
+ } else { //top
+ for(k = 1; k < 8; k++)
+ block[k] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
+ }
+ } else {
+ if(dc_pred_dir) { //left
+ for(k = 1; k < 8; k++)
+ block[k << 3] += ac_val[k];
+ } else { //top
+ for(k = 1; k < 8; k++)
+ block[k] += ac_val[k + 8];
+ }
+ }
+ }
+ /* save AC coeffs for further prediction */
+ for(k = 1; k < 8; k++) {
+ ac_val2[k] = block[k << 3];
+ ac_val2[k + 8] = block[k];
+ }
+
+ /* scale AC coeffs */
+ for(k = 1; k < 64; k++)
+ if(block[k]) {
+ block[k] *= scale;
+ if(!v->pquantizer)
+ block[k] += (block[k] < 0) ? -mquant : mquant;
+ }
+
+ if(use_pred) i = 63;
+ } else { // no AC coeffs
+ int k;
+
+ memset(ac_val2, 0, 16 * 2);
+ if(dc_pred_dir) {//left
+ if(use_pred) {
+ memcpy(ac_val2, ac_val, 8 * 2);
+ if(q2 && q1!=q2) {
+ q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+ q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+ for(k = 1; k < 8; k++)
+ ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
+ }
+ }
+ } else {//top
+ if(use_pred) {
+ memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
+ if(q2 && q1!=q2) {
+ q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+ q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+ for(k = 1; k < 8; k++)
+ ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
+ }
+ }
+ }
+
+ /* apply AC prediction if needed */
+ if(use_pred) {
+ if(dc_pred_dir) { //left
+ for(k = 1; k < 8; k++) {
+ block[k << 3] = ac_val2[k] * scale;
+ if(!v->pquantizer && block[k << 3])
+ block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
+ }
+ } else { //top
+ for(k = 1; k < 8; k++) {
+ block[k] = ac_val2[k + 8] * scale;
+ if(!v->pquantizer && block[k])
+ block[k] += (block[k] < 0) ? -mquant : mquant;
+ }
+ }
+ i = 63;
+ }
+ }
+ s->block_last_index[n] = i;
+
+ return 0;
+}
+
+/** Decode intra block in inter frames - more generic version than vc1_decode_i_block
+ * @param v VC1Context
+ * @param block block to decode
+ * @param[in] n subblock index
+ * @param coded are AC coeffs present or not
+ * @param mquant block quantizer
+ * @param codingset set of VLC to decode data
+ */
+static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
+{
+ GetBitContext *gb = &v->s.gb;
+ MpegEncContext *s = &v->s;
+ int dc_pred_dir = 0; /* Direction of the DC prediction used */
+ int i;
+ int16_t *dc_val;
+ int16_t *ac_val, *ac_val2;
+ int dcdiff;
+ int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+ int a_avail = v->a_avail, c_avail = v->c_avail;
+ int use_pred = s->ac_pred;
+ int scale;
+ int q1, q2 = 0;
+
+ s->dsp.clear_block(block);
+
+ /* XXX: Guard against dumb values of mquant */
+ mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
+
+ /* Set DC scale - y and c use the same */
+ s->y_dc_scale = s->y_dc_scale_table[mquant];
+ s->c_dc_scale = s->c_dc_scale_table[mquant];
+
+ /* Get DC differential */
+ if (n < 4) {
+ dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
+ } else {
+ dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
+ }
+ if (dcdiff < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
+ return -1;
+ }
+ if (dcdiff)
+ {
+ if (dcdiff == 119 /* ESC index value */)
+ {
+ /* TODO: Optimize */
+ if (mquant == 1) dcdiff = get_bits(gb, 10);
+ else if (mquant == 2) dcdiff = get_bits(gb, 9);
+ else dcdiff = get_bits(gb, 8);
+ }
+ else
+ {
+ if (mquant == 1)
+ dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
+ else if (mquant == 2)
+ dcdiff = (dcdiff<<1) + get_bits1(gb) - 1;
+ }
+ if (get_bits1(gb))
+ dcdiff = -dcdiff;
+ }
+
+ /* Prediction */
+ dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
+ *dc_val = dcdiff;
+
+ /* Store the quantized DC coeff, used for prediction */
+
+ if (n < 4) {
+ block[0] = dcdiff * s->y_dc_scale;
+ } else {
+ block[0] = dcdiff * s->c_dc_scale;
+ }
+
+ //AC Decoding
+ i = 1;
+
+ /* check if AC is needed at all and adjust direction if needed */
+ if(!a_avail) dc_pred_dir = 1;
+ if(!c_avail) dc_pred_dir = 0;
+ if(!a_avail && !c_avail) use_pred = 0;
+ ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
+ ac_val2 = ac_val;
+
+ scale = mquant * 2 + v->halfpq;
+
+ if(dc_pred_dir) //left
+ ac_val -= 16;
+ else //top
+ ac_val -= 16 * s->block_wrap[n];
+
+ q1 = s->current_picture.qscale_table[mb_pos];
+ if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
+ if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
+ if(dc_pred_dir && n==1) q2 = q1;
+ if(!dc_pred_dir && n==2) q2 = q1;
+ if(n==3) q2 = q1;
+
+ if(coded) {
+ int last = 0, skip, value;
+ const int8_t *zz_table;
+ int k;
+
+ zz_table = wmv1_scantable[0];
+
+ while (!last) {
+ vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
+ i += skip;
+ if(i > 63)
+ break;
+ block[zz_table[i++]] = value;
+ }
+
+ /* apply AC prediction if needed */
+ if(use_pred) {
+ /* scale predictors if needed*/
+ if(q2 && q1!=q2) {
+ q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+ q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+
+ if(dc_pred_dir) { //left
+ for(k = 1; k < 8; k++)
+ block[k << 3] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
+ } else { //top
+ for(k = 1; k < 8; k++)
+ block[k] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
+ }
+ } else {
+ if(dc_pred_dir) { //left
+ for(k = 1; k < 8; k++)
+ block[k << 3] += ac_val[k];
+ } else { //top
+ for(k = 1; k < 8; k++)
+ block[k] += ac_val[k + 8];
+ }
+ }
+ }
+ /* save AC coeffs for further prediction */
+ for(k = 1; k < 8; k++) {
+ ac_val2[k] = block[k << 3];
+ ac_val2[k + 8] = block[k];
+ }
+
+ /* scale AC coeffs */
+ for(k = 1; k < 64; k++)
+ if(block[k]) {
+ block[k] *= scale;
+ if(!v->pquantizer)
+ block[k] += (block[k] < 0) ? -mquant : mquant;
+ }
+
+ if(use_pred) i = 63;
+ } else { // no AC coeffs
+ int k;
+
+ memset(ac_val2, 0, 16 * 2);
+ if(dc_pred_dir) {//left
+ if(use_pred) {
+ memcpy(ac_val2, ac_val, 8 * 2);
+ if(q2 && q1!=q2) {
+ q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+ q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+ for(k = 1; k < 8; k++)
+ ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
+ }
+ }
+ } else {//top
+ if(use_pred) {
+ memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
+ if(q2 && q1!=q2) {
+ q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+ q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+ for(k = 1; k < 8; k++)
+ ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
+ }
+ }
+ }
+
+ /* apply AC prediction if needed */
+ if(use_pred) {
+ if(dc_pred_dir) { //left
+ for(k = 1; k < 8; k++) {
+ block[k << 3] = ac_val2[k] * scale;
+ if(!v->pquantizer && block[k << 3])
+ block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
+ }
+ } else { //top
+ for(k = 1; k < 8; k++) {
+ block[k] = ac_val2[k + 8] * scale;
+ if(!v->pquantizer && block[k])
+ block[k] += (block[k] < 0) ? -mquant : mquant;
+ }
+ }
+ i = 63;
+ }
+ }
+ s->block_last_index[n] = i;
+
+ return 0;
+}
+
+/** Decode P block
+ */
+static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block,
+ uint8_t *dst, int linesize, int skip_block, int apply_filter, int cbp_top, int cbp_left)
+{
+ MpegEncContext *s = &v->s;
+ GetBitContext *gb = &s->gb;
+ int i, j;
+ int subblkpat = 0;
+ int scale, off, idx, last, skip, value;
+ int ttblk = ttmb & 7;
+ int pat = 0;
+
+ s->dsp.clear_block(block);
+
+ if(ttmb == -1) {
+ ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
+ }
+ if(ttblk == TT_4X4) {
+ subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
+ }
+ if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
+ subblkpat = decode012(gb);
+ if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
+ if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
+ if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
+ }
+ scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
+
+ // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
+ if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
+ subblkpat = 2 - (ttblk == TT_8X4_TOP);
+ ttblk = TT_8X4;
+ }
+ if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
+ subblkpat = 2 - (ttblk == TT_4X8_LEFT);
+ ttblk = TT_4X8;
+ }
+ switch(ttblk) {
+ case TT_8X8:
+ pat = 0xF;
+ i = 0;
+ last = 0;
+ while (!last) {
+ vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
+ i += skip;
+ if(i > 63)
+ break;
+ idx = wmv1_scantable[0][i++];
+ block[idx] = value * scale;
+ if(!v->pquantizer)
+ block[idx] += (block[idx] < 0) ? -mquant : mquant;
+ }
+ if(!skip_block){
+ if(i==1)
+ s->dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
+ else{
+ s->dsp.vc1_inv_trans_8x8(block);
+ s->dsp.add_pixels_clamped(block, dst, linesize);
+ }
+ if(apply_filter && cbp_top & 0xC)
+ s->dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
+ if(apply_filter && cbp_left & 0xA)
+ s->dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
+ }
+ break;
+ case TT_4X4:
+ pat = ~subblkpat & 0xF;
+ for(j = 0; j < 4; j++) {
+ last = subblkpat & (1 << (3 - j));
+ i = 0;
+ off = (j & 1) * 4 + (j & 2) * 16;
+ while (!last) {
+ vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
+ i += skip;
+ if(i > 15)
+ break;
+ idx = ff_vc1_simple_progressive_4x4_zz[i++];
+ block[idx + off] = value * scale;
+ if(!v->pquantizer)
+ block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
+ }
+ if(!(subblkpat & (1 << (3 - j))) && !skip_block){
+ if(i==1)
+ s->dsp.vc1_inv_trans_4x4_dc(dst + (j&1)*4 + (j&2)*2*linesize, linesize, block + off);
+ else
+ s->dsp.vc1_inv_trans_4x4(dst + (j&1)*4 + (j&2)*2*linesize, linesize, block + off);
+ if(apply_filter && (j&2 ? pat & (1<<(j-2)) : (cbp_top & (1 << (j + 2)))))
+ s->dsp.vc1_v_loop_filter4(dst + (j&1)*4 + (j&2)*2*linesize, linesize, v->pq);
+ if(apply_filter && (j&1 ? pat & (1<<(j-1)) : (cbp_left & (1 << (j + 1)))))
+ s->dsp.vc1_h_loop_filter4(dst + (j&1)*4 + (j&2)*2*linesize, linesize, v->pq);
+ }
+ }
+ break;
+ case TT_8X4:
+ pat = ~((subblkpat & 2)*6 + (subblkpat & 1)*3) & 0xF;
+ for(j = 0; j < 2; j++) {
+ last = subblkpat & (1 << (1 - j));
+ i = 0;
+ off = j * 32;
+ while (!last) {
+ vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
+ i += skip;
+ if(i > 31)
+ break;
+ idx = v->zz_8x4[i++]+off;
+ block[idx] = value * scale;
+ if(!v->pquantizer)
+ block[idx] += (block[idx] < 0) ? -mquant : mquant;
+ }
+ if(!(subblkpat & (1 << (1 - j))) && !skip_block){
+ if(i==1)
+ s->dsp.vc1_inv_trans_8x4_dc(dst + j*4*linesize, linesize, block + off);
+ else
+ s->dsp.vc1_inv_trans_8x4(dst + j*4*linesize, linesize, block + off);
+ if(apply_filter && j ? pat & 0x3 : (cbp_top & 0xC))
+ s->dsp.vc1_v_loop_filter8(dst + j*4*linesize, linesize, v->pq);
+ if(apply_filter && cbp_left & (2 << j))
+ s->dsp.vc1_h_loop_filter4(dst + j*4*linesize, linesize, v->pq);
+ }
+ }
+ break;
+ case TT_4X8:
+ pat = ~(subblkpat*5) & 0xF;
+ for(j = 0; j < 2; j++) {
+ last = subblkpat & (1 << (1 - j));
+ i = 0;
+ off = j * 4;
+ while (!last) {
+ vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
+ i += skip;
+ if(i > 31)
+ break;
+ idx = v->zz_4x8[i++]+off;
+ block[idx] = value * scale;
+ if(!v->pquantizer)
+ block[idx] += (block[idx] < 0) ? -mquant : mquant;
+ }
+ if(!(subblkpat & (1 << (1 - j))) && !skip_block){
+ if(i==1)
+ s->dsp.vc1_inv_trans_4x8_dc(dst + j*4, linesize, block + off);
+ else
+ s->dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
+ if(apply_filter && cbp_top & (2 << j))
+ s->dsp.vc1_v_loop_filter4(dst + j*4, linesize, v->pq);
+ if(apply_filter && j ? pat & 0x5 : (cbp_left & 0xA))
+ s->dsp.vc1_h_loop_filter8(dst + j*4, linesize, v->pq);
+ }
+ }
+ break;
+ }
+ return pat;
+}
+
+/** @} */ // Macroblock group
+
+static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
+static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
+
+/** Decode one P-frame MB (in Simple/Main profile)
+ */
+static int vc1_decode_p_mb(VC1Context *v)
+{
+ MpegEncContext *s = &v->s;
+ GetBitContext *gb = &s->gb;
+ int i, j;
+ int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+ int cbp; /* cbp decoding stuff */
+ int mqdiff, mquant; /* MB quantization */
+ int ttmb = v->ttfrm; /* MB Transform type */
+
+ int mb_has_coeffs = 1; /* last_flag */
+ int dmv_x, dmv_y; /* Differential MV components */
+ int index, index1; /* LUT indexes */
+ int val, sign; /* temp values */
+ int first_block = 1;
+ int dst_idx, off;
+ int skipped, fourmv;
+ int block_cbp = 0, pat;
+ int apply_loop_filter;
+
+ mquant = v->pq; /* Loosy initialization */
+
+ if (v->mv_type_is_raw)
+ fourmv = get_bits1(gb);
+ else
+ fourmv = v->mv_type_mb_plane[mb_pos];
+ if (v->skip_is_raw)
+ skipped = get_bits1(gb);
+ else
+ skipped = v->s.mbskip_table[mb_pos];
+
+ apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
+ if (!fourmv) /* 1MV mode */
+ {
+ if (!skipped)
+ {
+ GET_MVDATA(dmv_x, dmv_y);
+
+ if (s->mb_intra) {
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+ }
+ s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
+ vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
+
+ /* FIXME Set DC val for inter block ? */
+ if (s->mb_intra && !mb_has_coeffs)
+ {
+ GET_MQUANT();
+ s->ac_pred = get_bits1(gb);
+ cbp = 0;
+ }
+ else if (mb_has_coeffs)
+ {
+ if (s->mb_intra) s->ac_pred = get_bits1(gb);
+ cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+ GET_MQUANT();
+ }
+ else
+ {
+ mquant = v->pq;
+ cbp = 0;
+ }
+ s->current_picture.qscale_table[mb_pos] = mquant;
+
+ if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
+ ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
+ VC1_TTMB_VLC_BITS, 2);
+ if(!s->mb_intra) vc1_mc_1mv(v, 0);
+ dst_idx = 0;
+ for (i=0; i<6; i++)
+ {
+ s->dc_val[0][s->block_index[i]] = 0;
+ dst_idx += i >> 2;
+ val = ((cbp >> (5 - i)) & 1);
+ off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
+ v->mb_type[0][s->block_index[i]] = s->mb_intra;
+ if(s->mb_intra) {
+ /* check if prediction blocks A and C are available */
+ v->a_avail = v->c_avail = 0;
+ if(i == 2 || i == 3 || !s->first_slice_line)
+ v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
+ if(i == 1 || i == 3 || s->mb_x)
+ v->c_avail = v->mb_type[0][s->block_index[i] - 1];
+
+ vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
+ if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
+ s->dsp.vc1_inv_trans_8x8(s->block[i]);
+ if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
+ s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
+ if(v->pq >= 9 && v->overlap) {
+ if(v->c_avail)
+ s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
+ if(v->a_avail)
+ s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
+ }
+ if(apply_loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
+ int left_cbp, top_cbp;
+ if(i & 4){
+ left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
+ top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
+ }else{
+ left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
+ top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
+ }
+ if(left_cbp & 0xC)
+ s->dsp.vc1_v_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
+ if(top_cbp & 0xA)
+ s->dsp.vc1_h_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
+ }
+ block_cbp |= 0xF << (i << 2);
+ } else if(val) {
+ int left_cbp = 0, top_cbp = 0, filter = 0;
+ if(apply_loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
+ filter = 1;
+ if(i & 4){
+ left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
+ top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
+ }else{
+ left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
+ top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
+ }
+ if(left_cbp & 0xC)
+ s->dsp.vc1_v_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
+ if(top_cbp & 0xA)
+ s->dsp.vc1_h_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
+ }
+ pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), filter, left_cbp, top_cbp);
+ block_cbp |= pat << (i << 2);
+ if(!v->ttmbf && ttmb < 8) ttmb = -1;
+ first_block = 0;
+ }
+ }
+ }
+ else //Skipped
+ {
+ s->mb_intra = 0;
+ for(i = 0; i < 6; i++) {
+ v->mb_type[0][s->block_index[i]] = 0;
+ s->dc_val[0][s->block_index[i]] = 0;
+ }
+ s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
+ s->current_picture.qscale_table[mb_pos] = 0;
+ vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
+ vc1_mc_1mv(v, 0);
+ return 0;
+ }
+ } //1MV mode
+ else //4MV mode
+ {
+ if (!skipped /* unskipped MB */)
+ {
+ int intra_count = 0, coded_inter = 0;
+ int is_intra[6], is_coded[6];
+ /* Get CBPCY */
+ cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+ for (i=0; i<6; i++)
+ {
+ val = ((cbp >> (5 - i)) & 1);
+ s->dc_val[0][s->block_index[i]] = 0;
+ s->mb_intra = 0;
+ if(i < 4) {
+ dmv_x = dmv_y = 0;
+ s->mb_intra = 0;
+ mb_has_coeffs = 0;
+ if(val) {
+ GET_MVDATA(dmv_x, dmv_y);
+ }
+ vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
+ if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
+ intra_count += s->mb_intra;
+ is_intra[i] = s->mb_intra;
+ is_coded[i] = mb_has_coeffs;
+ }
+ if(i&4){
+ is_intra[i] = (intra_count >= 3);
+ is_coded[i] = val;
+ }
+ if(i == 4) vc1_mc_4mv_chroma(v);
+ v->mb_type[0][s->block_index[i]] = is_intra[i];
+ if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
+ }
+ // if there are no coded blocks then don't do anything more
+ if(!intra_count && !coded_inter) return 0;
+ dst_idx = 0;
+ GET_MQUANT();
+ s->current_picture.qscale_table[mb_pos] = mquant;
+ /* test if block is intra and has pred */
+ {
+ int intrapred = 0;
+ for(i=0; i<6; i++)
+ if(is_intra[i]) {
+ if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
+ || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
+ intrapred = 1;
+ break;
+ }
+ }
+ if(intrapred)s->ac_pred = get_bits1(gb);
+ else s->ac_pred = 0;
+ }
+ if (!v->ttmbf && coded_inter)
+ ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
+ for (i=0; i<6; i++)
+ {
+ dst_idx += i >> 2;
+ off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
+ s->mb_intra = is_intra[i];
+ if (is_intra[i]) {
+ /* check if prediction blocks A and C are available */
+ v->a_avail = v->c_avail = 0;
+ if(i == 2 || i == 3 || !s->first_slice_line)
+ v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
+ if(i == 1 || i == 3 || s->mb_x)
+ v->c_avail = v->mb_type[0][s->block_index[i] - 1];
+
+ vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
+ if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
+ s->dsp.vc1_inv_trans_8x8(s->block[i]);
+ if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
+ s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
+ if(v->pq >= 9 && v->overlap) {
+ if(v->c_avail)
+ s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
+ if(v->a_avail)
+ s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
+ }
+ if(v->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
+ int left_cbp, top_cbp;
+ if(i & 4){
+ left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
+ top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
+ }else{
+ left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
+ top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
+ }
+ if(left_cbp & 0xC)
+ s->dsp.vc1_v_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
+ if(top_cbp & 0xA)
+ s->dsp.vc1_h_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
+ }
+ block_cbp |= 0xF << (i << 2);
+ } else if(is_coded[i]) {
+ int left_cbp = 0, top_cbp = 0, filter = 0;
+ if(v->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
+ filter = 1;
+ if(i & 4){
+ left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
+ top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
+ }else{
+ left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
+ top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
+ }
+ if(left_cbp & 0xC)
+ s->dsp.vc1_v_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
+ if(top_cbp & 0xA)
+ s->dsp.vc1_h_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
+ }
+ pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), filter, left_cbp, top_cbp);
+ block_cbp |= pat << (i << 2);
+ if(!v->ttmbf && ttmb < 8) ttmb = -1;
+ first_block = 0;
+ }
+ }
+ return 0;
+ }
+ else //Skipped MB
+ {
+ s->mb_intra = 0;
+ s->current_picture.qscale_table[mb_pos] = 0;
+ for (i=0; i<6; i++) {
+ v->mb_type[0][s->block_index[i]] = 0;
+ s->dc_val[0][s->block_index[i]] = 0;
+ }
+ for (i=0; i<4; i++)
+ {
+ vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
+ vc1_mc_4mv_luma(v, i);
+ }
+ vc1_mc_4mv_chroma(v);
+ s->current_picture.qscale_table[mb_pos] = 0;
+ return 0;
+ }
+ }
+ v->cbp[s->mb_x] = block_cbp;
+
+ /* Should never happen */
+ return -1;
+}
+
+/** Decode one B-frame MB (in Main profile)
+ */
+static void vc1_decode_b_mb(VC1Context *v)
+{
+ MpegEncContext *s = &v->s;
+ GetBitContext *gb = &s->gb;
+ int i, j;
+ int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+ int cbp = 0; /* cbp decoding stuff */
+ int mqdiff, mquant; /* MB quantization */
+ int ttmb = v->ttfrm; /* MB Transform type */
+ int mb_has_coeffs = 0; /* last_flag */
+ int index, index1; /* LUT indexes */
+ int val, sign; /* temp values */
+ int first_block = 1;
+ int dst_idx, off;
+ int skipped, direct;
+ int dmv_x[2], dmv_y[2];
+ int bmvtype = BMV_TYPE_BACKWARD;
+
+ mquant = v->pq; /* Loosy initialization */
+ s->mb_intra = 0;
+
+ if (v->dmb_is_raw)
+ direct = get_bits1(gb);
+ else
+ direct = v->direct_mb_plane[mb_pos];
+ if (v->skip_is_raw)
+ skipped = get_bits1(gb);
+ else
+ skipped = v->s.mbskip_table[mb_pos];
+
+ dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
+ for(i = 0; i < 6; i++) {
+ v->mb_type[0][s->block_index[i]] = 0;
+ s->dc_val[0][s->block_index[i]] = 0;
+ }
+ s->current_picture.qscale_table[mb_pos] = 0;
+
+ if (!direct) {
+ if (!skipped) {
+ GET_MVDATA(dmv_x[0], dmv_y[0]);
+ dmv_x[1] = dmv_x[0];
+ dmv_y[1] = dmv_y[0];
+ }
+ if(skipped || !s->mb_intra) {
+ bmvtype = decode012(gb);
+ switch(bmvtype) {
+ case 0:
+ bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
+ break;
+ case 1:
+ bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
+ break;
+ case 2:
+ bmvtype = BMV_TYPE_INTERPOLATED;
+ dmv_x[0] = dmv_y[0] = 0;
+ }
+ }
+ }
+ for(i = 0; i < 6; i++)
+ v->mb_type[0][s->block_index[i]] = s->mb_intra;
+
+ if (skipped) {
+ if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
+ vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
+ vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
+ return;
+ }
+ if (direct) {
+ cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+ GET_MQUANT();
+ s->mb_intra = 0;
+ s->current_picture.qscale_table[mb_pos] = mquant;
+ if(!v->ttmbf)
+ ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
+ dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
+ vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
+ vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
+ } else {
+ if(!mb_has_coeffs && !s->mb_intra) {
+ /* no coded blocks - effectively skipped */
+ vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
+ vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
+ return;
+ }
+ if(s->mb_intra && !mb_has_coeffs) {
+ GET_MQUANT();
+ s->current_picture.qscale_table[mb_pos] = mquant;
+ s->ac_pred = get_bits1(gb);
+ cbp = 0;
+ vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
+ } else {
+ if(bmvtype == BMV_TYPE_INTERPOLATED) {
+ GET_MVDATA(dmv_x[0], dmv_y[0]);
+ if(!mb_has_coeffs) {
+ /* interpolated skipped block */
+ vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
+ vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
+ return;
+ }
+ }
+ vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
+ if(!s->mb_intra) {
+ vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
+ }
+ if(s->mb_intra)
+ s->ac_pred = get_bits1(gb);
+ cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+ GET_MQUANT();
+ s->current_picture.qscale_table[mb_pos] = mquant;
+ if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
+ ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
+ }
+ }
+ dst_idx = 0;
+ for (i=0; i<6; i++)
+ {
+ s->dc_val[0][s->block_index[i]] = 0;
+ dst_idx += i >> 2;
+ val = ((cbp >> (5 - i)) & 1);
+ off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
+ v->mb_type[0][s->block_index[i]] = s->mb_intra;
+ if(s->mb_intra) {
+ /* check if prediction blocks A and C are available */
+ v->a_avail = v->c_avail = 0;
+ if(i == 2 || i == 3 || !s->first_slice_line)
+ v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
+ if(i == 1 || i == 3 || s->mb_x)
+ v->c_avail = v->mb_type[0][s->block_index[i] - 1];
+
+ vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
+ if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
+ s->dsp.vc1_inv_trans_8x8(s->block[i]);
+ if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
+ s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
+ } else if(val) {
+ vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), 0, 0, 0);
+ if(!v->ttmbf && ttmb < 8) ttmb = -1;
+ first_block = 0;
+ }
+ }
+}
+
+/** Decode blocks of I-frame
+ */
+static void vc1_decode_i_blocks(VC1Context *v)
+{
+ int k, j;
+ MpegEncContext *s = &v->s;
+ int cbp, val;
+ uint8_t *coded_val;
+ int mb_pos;
+
+ /* select codingmode used for VLC tables selection */
+ switch(v->y_ac_table_index){
+ case 0:
+ v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
+ break;
+ case 1:
+ v->codingset = CS_HIGH_MOT_INTRA;
+ break;
+ case 2:
+ v->codingset = CS_MID_RATE_INTRA;
+ break;
+ }
+
+ switch(v->c_ac_table_index){
+ case 0:
+ v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
+ break;
+ case 1:
+ v->codingset2 = CS_HIGH_MOT_INTER;
+ break;
+ case 2:
+ v->codingset2 = CS_MID_RATE_INTER;
+ break;
+ }
+
+ /* Set DC scale - y and c use the same */
+ s->y_dc_scale = s->y_dc_scale_table[v->pq];
+ s->c_dc_scale = s->c_dc_scale_table[v->pq];
+
+ //do frame decode
+ s->mb_x = s->mb_y = 0;
+ s->mb_intra = 1;
+ s->first_slice_line = 1;
+ for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
+ s->mb_x = 0;
+ ff_init_block_index(s);
+ for(; s->mb_x < s->mb_width; s->mb_x++) {
+ ff_update_block_index(s);
+ s->dsp.clear_blocks(s->block[0]);
+ mb_pos = s->mb_x + s->mb_y * s->mb_width;
+ s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
+ s->current_picture.qscale_table[mb_pos] = v->pq;
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+
+ // do actual MB decoding and displaying
+ cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
+ v->s.ac_pred = get_bits1(&v->s.gb);
+
+ for(k = 0; k < 6; k++) {
+ val = ((cbp >> (5 - k)) & 1);
+
+ if (k < 4) {
+ int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
+ val = val ^ pred;
+ *coded_val = val;
+ }
+ cbp |= val << (5 - k);
+
+ vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
+
+ s->dsp.vc1_inv_trans_8x8(s->block[k]);
+ if(v->pq >= 9 && v->overlap) {
+ for(j = 0; j < 64; j++) s->block[k][j] += 128;
+ }
+ }
+
+ vc1_put_block(v, s->block);
+ if(v->pq >= 9 && v->overlap) {
+ if(s->mb_x) {
+ s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
+ s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
+ if(!(s->flags & CODEC_FLAG_GRAY)) {
+ s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
+ s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
+ }
+ }
+ s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
+ s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
+ if(!s->first_slice_line) {
+ s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
+ s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
+ if(!(s->flags & CODEC_FLAG_GRAY)) {
+ s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
+ s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
+ }
+ }
+ s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
+ s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
+ }
+ if(v->s.loop_filter) vc1_loop_filter_iblk(s, v->pq);
+
+ if(get_bits_count(&s->gb) > v->bits) {
+ ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
+ av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
+ return;
+ }
+ }
+ ff_draw_horiz_band(s, s->mb_y * 16, 16);
+ s->first_slice_line = 0;
+ }
+ ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
+}
+
+/** Decode blocks of I-frame for advanced profile
+ */
+static void vc1_decode_i_blocks_adv(VC1Context *v)
+{
+ int k, j;
+ MpegEncContext *s = &v->s;
+ int cbp, val;
+ uint8_t *coded_val;
+ int mb_pos;
+ int mquant = v->pq;
+ int mqdiff;
+ int overlap;
+ GetBitContext *gb = &s->gb;
+
+ /* select codingmode used for VLC tables selection */
+ switch(v->y_ac_table_index){
+ case 0:
+ v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
+ break;
+ case 1:
+ v->codingset = CS_HIGH_MOT_INTRA;
+ break;
+ case 2:
+ v->codingset = CS_MID_RATE_INTRA;
+ break;
+ }
+
+ switch(v->c_ac_table_index){
+ case 0:
+ v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
+ break;
+ case 1:
+ v->codingset2 = CS_HIGH_MOT_INTER;
+ break;
+ case 2:
+ v->codingset2 = CS_MID_RATE_INTER;
+ break;
+ }
+
+ //do frame decode
+ s->mb_x = s->mb_y = 0;
+ s->mb_intra = 1;
+ s->first_slice_line = 1;
+ for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
+ s->mb_x = 0;
+ ff_init_block_index(s);
+ for(;s->mb_x < s->mb_width; s->mb_x++) {
+ ff_update_block_index(s);
+ s->dsp.clear_blocks(s->block[0]);
+ mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+ s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+
+ // do actual MB decoding and displaying
+ cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
+ if(v->acpred_is_raw)
+ v->s.ac_pred = get_bits1(&v->s.gb);
+ else
+ v->s.ac_pred = v->acpred_plane[mb_pos];
+
+ if(v->condover == CONDOVER_SELECT) {
+ if(v->overflg_is_raw)
+ overlap = get_bits1(&v->s.gb);
+ else
+ overlap = v->over_flags_plane[mb_pos];
+ } else
+ overlap = (v->condover == CONDOVER_ALL);
+
+ GET_MQUANT();
+
+ s->current_picture.qscale_table[mb_pos] = mquant;
+ /* Set DC scale - y and c use the same */
+ s->y_dc_scale = s->y_dc_scale_table[mquant];
+ s->c_dc_scale = s->c_dc_scale_table[mquant];
+
+ for(k = 0; k < 6; k++) {
+ val = ((cbp >> (5 - k)) & 1);
+
+ if (k < 4) {
+ int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
+ val = val ^ pred;
+ *coded_val = val;
+ }
+ cbp |= val << (5 - k);
+
+ v->a_avail = !s->first_slice_line || (k==2 || k==3);
+ v->c_avail = !!s->mb_x || (k==1 || k==3);
+
+ vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
+
+ s->dsp.vc1_inv_trans_8x8(s->block[k]);
+ for(j = 0; j < 64; j++) s->block[k][j] += 128;
+ }
+
+ vc1_put_block(v, s->block);
+ if(overlap) {
+ if(s->mb_x) {
+ s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
+ s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
+ if(!(s->flags & CODEC_FLAG_GRAY)) {
+ s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
+ s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
+ }
+ }
+ s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
+ s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
+ if(!s->first_slice_line) {
+ s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
+ s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
+ if(!(s->flags & CODEC_FLAG_GRAY)) {
+ s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
+ s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
+ }
+ }
+ s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
+ s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
+ }
+ if(v->s.loop_filter) vc1_loop_filter_iblk(s, v->pq);
+
+ if(get_bits_count(&s->gb) > v->bits) {
+ ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
+ av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
+ return;
+ }
+ }
+ ff_draw_horiz_band(s, s->mb_y * 16, 16);
+ s->first_slice_line = 0;
+ }
+ ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
+}
+
+static void vc1_decode_p_blocks(VC1Context *v)
+{
+ MpegEncContext *s = &v->s;
+
+ /* select codingmode used for VLC tables selection */
+ switch(v->c_ac_table_index){
+ case 0:
+ v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
+ break;
+ case 1:
+ v->codingset = CS_HIGH_MOT_INTRA;
+ break;
+ case 2:
+ v->codingset = CS_MID_RATE_INTRA;
+ break;
+ }
+
+ switch(v->c_ac_table_index){
+ case 0:
+ v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
+ break;
+ case 1:
+ v->codingset2 = CS_HIGH_MOT_INTER;
+ break;
+ case 2:
+ v->codingset2 = CS_MID_RATE_INTER;
+ break;
+ }
+
+ s->first_slice_line = 1;
+ memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
+ for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
+ s->mb_x = 0;
+ ff_init_block_index(s);
+ for(; s->mb_x < s->mb_width; s->mb_x++) {
+ ff_update_block_index(s);
+
+ vc1_decode_p_mb(v);
+ if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
+ ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
+ av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
+ return;
+ }
+ }
+ memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0])*s->mb_stride);
+ ff_draw_horiz_band(s, s->mb_y * 16, 16);
+ s->first_slice_line = 0;
+ }
+ ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
+}
+
+static void vc1_decode_b_blocks(VC1Context *v)
+{
+ MpegEncContext *s = &v->s;
+
+ /* select codingmode used for VLC tables selection */
+ switch(v->c_ac_table_index){
+ case 0:
+ v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
+ break;
+ case 1:
+ v->codingset = CS_HIGH_MOT_INTRA;
+ break;
+ case 2:
+ v->codingset = CS_MID_RATE_INTRA;
+ break;
+ }
+
+ switch(v->c_ac_table_index){
+ case 0:
+ v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
+ break;
+ case 1:
+ v->codingset2 = CS_HIGH_MOT_INTER;
+ break;
+ case 2:
+ v->codingset2 = CS_MID_RATE_INTER;
+ break;
+ }
+
+ s->first_slice_line = 1;
+ for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
+ s->mb_x = 0;
+ ff_init_block_index(s);
+ for(; s->mb_x < s->mb_width; s->mb_x++) {
+ ff_update_block_index(s);
+
+ vc1_decode_b_mb(v);
+ if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
+ ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
+ av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
+ return;
+ }
+ if(v->s.loop_filter) vc1_loop_filter_iblk(s, v->pq);
+ }
+ ff_draw_horiz_band(s, s->mb_y * 16, 16);
+ s->first_slice_line = 0;
+ }
+ ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
+}
+
+static void vc1_decode_skip_blocks(VC1Context *v)
+{
+ MpegEncContext *s = &v->s;
+
+ ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
+ s->first_slice_line = 1;
+ for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
+ s->mb_x = 0;
+ ff_init_block_index(s);
+ ff_update_block_index(s);
+ memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
+ memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
+ memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
+ ff_draw_horiz_band(s, s->mb_y * 16, 16);
+ s->first_slice_line = 0;
+ }
+ s->pict_type = FF_P_TYPE;
+}
+
+static void vc1_decode_blocks(VC1Context *v)
+{
+
+ v->s.esc3_level_length = 0;
+ if(v->x8_type){
+ ff_intrax8_decode_picture(&v->x8, 2*v->pq+v->halfpq, v->pq*(!v->pquantizer) );
+ }else{
+
+ switch(v->s.pict_type) {
+ case FF_I_TYPE:
+ if(v->profile == PROFILE_ADVANCED)
+ vc1_decode_i_blocks_adv(v);
+ else
+ vc1_decode_i_blocks(v);
+ break;
+ case FF_P_TYPE:
+ if(v->p_frame_skipped)
+ vc1_decode_skip_blocks(v);
+ else
+ vc1_decode_p_blocks(v);
+ break;
+ case FF_B_TYPE:
+ if(v->bi_type){
+ if(v->profile == PROFILE_ADVANCED)
+ vc1_decode_i_blocks_adv(v);
+ else
+ vc1_decode_i_blocks(v);
+ }else
+ vc1_decode_b_blocks(v);
+ break;
+ }
+ }
+}
+
+/** Initialize a VC1/WMV3 decoder
+ * @todo TODO: Handle VC-1 IDUs (Transport level?)
+ * @todo TODO: Decypher remaining bits in extra_data
+ */
+static av_cold int vc1_decode_init(AVCodecContext *avctx)
+{
+ VC1Context *v = avctx->priv_data;
+ MpegEncContext *s = &v->s;
+ GetBitContext gb;
+
+ if (!avctx->extradata_size || !avctx->extradata) return -1;
+ if (!(avctx->flags & CODEC_FLAG_GRAY))
+ avctx->pix_fmt = PIX_FMT_YUV420P; /* ffdshow custom code */
+ else
+ avctx->pix_fmt = PIX_FMT_GRAY8;
+ v->s.avctx = avctx;
+ avctx->flags |= CODEC_FLAG_EMU_EDGE;
+ v->s.flags |= CODEC_FLAG_EMU_EDGE;
+
+ if(avctx->idct_algo==FF_IDCT_AUTO){
+ avctx->idct_algo=FF_IDCT_WMV2;
+ }
+
+ if(ff_h263_decode_init(avctx) < 0)
+ return -1;
+ if (vc1_init_common(v) < 0) return -1;
+ // only for ff_msmp4_mb_i_table
+ if (ff_msmpeg4_decode_init(avctx) < 0) return -1;
+
+ avctx->coded_width = avctx->width;
+ avctx->coded_height = avctx->height;
+ if (avctx->codec_id == CODEC_ID_WMV3)
+ {
+ int count = 0;
+
+ // looks like WMV3 has a sequence header stored in the extradata
+ // advanced sequence header may be before the first frame
+ // the last byte of the extradata is a version number, 1 for the
+ // samples we can decode
+
+ init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
+
+ if (vc1_decode_sequence_header(avctx, v, &gb) < 0)
+ return -1;
+
+ count = avctx->extradata_size*8 - get_bits_count(&gb);
+ if (count>0)
+ {
+ av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
+ count, get_bits(&gb, count));
+ }
+ else if (count < 0)
+ {
+ av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
+ }
+ } else { // VC1/WVC1
+ const uint8_t *start = avctx->extradata;
+ uint8_t *end = avctx->extradata + avctx->extradata_size;
+ const uint8_t *next;
+ int size, buf2_size;
+ uint8_t *buf2 = NULL;
+ int seq_initialized = 0, ep_initialized = 0;
+
+ if(avctx->extradata_size < 16) {
+ av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
+ return -1;
+ }
+
+ buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
+ next = start;
+ for(; next < end; start = next){
+ next = find_next_marker(start + 4, end);
+ size = next - start - 4;
+ if(size <= 0) continue;
+ buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
+ init_get_bits(&gb, buf2, buf2_size * 8);
+ switch(AV_RB32(start)){
+ case VC1_CODE_SEQHDR:
+ if(vc1_decode_sequence_header(avctx, v, &gb) < 0){
+ av_free(buf2);
+ return -1;
+ }
+ seq_initialized = 1;
+ break;
+ case VC1_CODE_ENTRYPOINT:
+ if(vc1_decode_entry_point(avctx, v, &gb) < 0){
+ av_free(buf2);
+ return -1;
+ }
+ ep_initialized = 1;
+ break;
+ }
+ }
+ av_free(buf2);
+ if(!seq_initialized || !ep_initialized){
+ av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
+ return -1;
+ }
+ }
+ avctx->has_b_frames= !!(avctx->max_b_frames);
+ s->low_delay = !avctx->has_b_frames;
+
+ s->mb_width = (avctx->coded_width+15)>>4;
+ s->mb_height = (avctx->coded_height+15)>>4;
+
+ /* Allocate mb bitplanes */
+ v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
+ v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
+ v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
+ v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
+
+ v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
+ v->cbp = v->cbp_base + s->mb_stride;
+
+ /* allocate block type info in that way so it could be used with s->block_index[] */
+ v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
+ v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
+ v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
+ v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
+
+ /* Init coded blocks info */
+ if (v->profile == PROFILE_ADVANCED)
+ {
+// if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
+// return -1;
+// if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
+// return -1;
+ }
+
+ ff_intrax8_common_init(&v->x8,s);
+ return 0;
+}
+
+
+/** Decode a VC1/WMV3 frame
+ * @todo TODO: Handle VC-1 IDUs (Transport level?)
+ */
+static int vc1_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ VC1Context *v = avctx->priv_data;
+ MpegEncContext *s = &v->s;
+ AVFrame *pict = data;
+ uint8_t *buf2 = NULL;
+
+ /* no supplementary picture */
+ if (buf_size == 0) {
+ /* special case for last picture */
+ if (s->low_delay==0 && s->next_picture_ptr) {
+ *pict= *(AVFrame*)s->next_picture_ptr;
+ s->next_picture_ptr= NULL;
+
+ *data_size = sizeof(AVFrame);
+ }
+
+ return 0;
+ }
+
+ /* We need to set current_picture_ptr before reading the header,
+ * otherwise we cannot store anything in there. */
+ if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
+ int i= ff_find_unused_picture(s, 0);
+ s->current_picture_ptr= &s->picture[i];
+ }
+
+ //for advanced profile we may need to parse and unescape data
+ if (avctx->codec_id == CODEC_ID_VC1) {
+ int buf_size2 = 0;
+ buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if(IS_MARKER(AV_RB32(buf))){ /* frame starts with marker and needs to be parsed */
+ const uint8_t *start, *end, *next;
+ int size;
+
+ next = buf;
+ for(start = buf, end = buf + buf_size; next < end; start = next){
+ next = find_next_marker(start + 4, end);
+ size = next - start - 4;
+ if(size <= 0) continue;
+ switch(AV_RB32(start)){
+ case VC1_CODE_FRAME:
+ buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
+ break;
+ case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
+ buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
+ init_get_bits(&s->gb, buf2, buf_size2*8);
+ vc1_decode_entry_point(avctx, v, &s->gb);
+ break;
+ case VC1_CODE_SLICE:
+ av_log(avctx, AV_LOG_ERROR, "Sliced decoding is not implemented (yet)\n");
+ av_free(buf2);
+ return -1;
+ }
+ }
+ }else if(v->interlace && ((buf[0] & 0xC0) == 0xC0)){ /* WVC1 interlaced stores both fields divided by marker */
+ const uint8_t *divider;
+
+ divider = find_next_marker(buf, buf + buf_size);
+ if((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD){
+ av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
+ av_free(buf2);
+ return -1;
+ }
+
+ buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
+ // TODO
+ if(!v->warn_interlaced++)
+ av_log(v->s.avctx, AV_LOG_ERROR, "Interlaced WVC1 support is not implemented\n");
+ av_free(buf2);return -1;
+ }else{
+ buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
+ }
+ init_get_bits(&s->gb, buf2, buf_size2*8);
+ } else
+ init_get_bits(&s->gb, buf, buf_size*8);
+ // do parse frame header
+ if(v->profile < PROFILE_ADVANCED) {
+ if(vc1_parse_frame_header(v, &s->gb) == -1) {
+ av_free(buf2);
+ return -1;
+ }
+ } else {
+ if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
+ av_free(buf2);
+ return -1;
+ }
+ }
+
+ if(s->pict_type != FF_I_TYPE && !v->res_rtm_flag){
+ av_free(buf2);
+ return -1;
+ }
+
+ // for hurry_up==5
+ s->current_picture.pict_type= s->pict_type;
+ s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
+
+ /* skip B-frames if we don't have reference frames */
+ if(s->last_picture_ptr==NULL && (s->pict_type==FF_B_TYPE || s->dropable)){
+ av_free(buf2);
+ return -1;//buf_size;
+ }
+ /* skip b frames if we are in a hurry */
+ if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return -1;//buf_size;
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
+ || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
+ || avctx->skip_frame >= AVDISCARD_ALL) {
+ av_free(buf2);
+ return buf_size;
+ }
+ /* skip everything if we are in a hurry>=5 */
+ if(avctx->hurry_up>=5) {
+ av_free(buf2);
+ return -1;//buf_size;
+ }
+
+ if(s->next_p_frame_damaged){
+ if(s->pict_type==FF_B_TYPE)
+ return buf_size;
+ else
+ s->next_p_frame_damaged=0;
+ }
+
+ if(MPV_frame_start(s, avctx) < 0) {
+ av_free(buf2);
+ return -1;
+ }
+
+ s->me.qpel_put= s->dsp.put_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
+
+ ff_er_frame_start(s);
+
+ v->bits = buf_size * 8;
+ vc1_decode_blocks(v);
+//av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8);
+// if(get_bits_count(&s->gb) > buf_size * 8)
+// return -1;
+ ff_er_frame_end(s);
+
+ MPV_frame_end(s);
+
+assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
+assert(s->current_picture.pict_type == s->pict_type);
+ if (s->pict_type == FF_B_TYPE || s->low_delay) {
+ *pict= *(AVFrame*)s->current_picture_ptr;
+ } else if (s->last_picture_ptr != NULL) {
+ *pict= *(AVFrame*)s->last_picture_ptr;
+ }
+
+ if(s->last_picture_ptr || s->low_delay){
+ *data_size = sizeof(AVFrame);
+ ff_print_debug_info(s, pict);
+ }
+
+ av_free(buf2);
+ return buf_size;
+}
+
+
+/** Close a VC1/WMV3 decoder
+ * @warning Initial try at using MpegEncContext stuff
+ */
+static av_cold int vc1_decode_end(AVCodecContext *avctx)
+{
+ VC1Context *v = avctx->priv_data;
+
+ av_freep(&v->hrd_rate);
+ av_freep(&v->hrd_buffer);
+ MPV_common_end(&v->s);
+ av_freep(&v->mv_type_mb_plane);
+ av_freep(&v->direct_mb_plane);
+ av_freep(&v->acpred_plane);
+ av_freep(&v->over_flags_plane);
+ av_freep(&v->mb_type_base);
+ av_freep(&v->cbp_base);
+ ff_intrax8_common_end(&v->x8);
+ return 0;
+}
+
+
+AVCodec vc1_decoder = {
+ "vc1",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_VC1,
+ sizeof(VC1Context),
+ vc1_decode_init,
+ NULL,
+ vc1_decode_end,
+ vc1_decode_frame,
+ CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
+};
+
+#if CONFIG_WMV3_DECODER
+AVCodec wmv3_decoder = {
+ "wmv3",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_WMV3,
+ sizeof(VC1Context),
+ vc1_decode_init,
+ NULL,
+ vc1_decode_end,
+ vc1_decode_frame,
+ CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
+};
+#endif
+
+#include "vc1_dxva.c"
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1dsp.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1dsp.c
new file mode 100644
index 000000000..85896dbdd
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vc1dsp.c
@@ -0,0 +1,666 @@
+/*
+ * VC-1 and WMV3 decoder - DSP functions
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+* @file libavcodec/vc1dsp.c
+ * VC-1 and WMV3 decoder
+ *
+ */
+
+#include "dsputil.h"
+
+
+/** Apply overlap transform to horizontal edge
+*/
+static void vc1_v_overlap_c(uint8_t* src, int stride)
+{
+ int i;
+ int a, b, c, d;
+ int d1, d2;
+ int rnd = 1;
+ for(i = 0; i < 8; i++) {
+ a = src[-2*stride];
+ b = src[-stride];
+ c = src[0];
+ d = src[stride];
+ d1 = (a - d + 3 + rnd) >> 3;
+ d2 = (a - d + b - c + 4 - rnd) >> 3;
+
+ src[-2*stride] = a - d1;
+ src[-stride] = av_clip_uint8(b - d2);
+ src[0] = av_clip_uint8(c + d2);
+ src[stride] = d + d1;
+ src++;
+ rnd = !rnd;
+ }
+}
+
+/** Apply overlap transform to vertical edge
+*/
+static void vc1_h_overlap_c(uint8_t* src, int stride)
+{
+ int i;
+ int a, b, c, d;
+ int d1, d2;
+ int rnd = 1;
+ for(i = 0; i < 8; i++) {
+ a = src[-2];
+ b = src[-1];
+ c = src[0];
+ d = src[1];
+ d1 = (a - d + 3 + rnd) >> 3;
+ d2 = (a - d + b - c + 4 - rnd) >> 3;
+
+ src[-2] = a - d1;
+ src[-1] = av_clip_uint8(b - d2);
+ src[0] = av_clip_uint8(c + d2);
+ src[1] = d + d1;
+ src += stride;
+ rnd = !rnd;
+ }
+}
+
+/**
+ * VC-1 in-loop deblocking filter for one line
+ * @param src source block type
+ * @param stride block stride
+ * @param pq block quantizer
+ * @return whether other 3 pairs should be filtered or not
+ * @see 8.6
+ */
+static av_always_inline int vc1_filter_line(uint8_t* src, int stride, int pq){
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ int a0 = (2*(src[-2*stride] - src[ 1*stride]) - 5*(src[-1*stride] - src[ 0*stride]) + 4) >> 3;
+ int a0_sign = a0 >> 31; /* Store sign */
+ a0 = (a0 ^ a0_sign) - a0_sign; /* a0 = FFABS(a0); */
+ if(a0 < pq){
+ int a1 = FFABS((2*(src[-4*stride] - src[-1*stride]) - 5*(src[-3*stride] - src[-2*stride]) + 4) >> 3);
+ int a2 = FFABS((2*(src[ 0*stride] - src[ 3*stride]) - 5*(src[ 1*stride] - src[ 2*stride]) + 4) >> 3);
+ if(a1 < a0 || a2 < a0){
+ int clip = src[-1*stride] - src[ 0*stride];
+ int clip_sign = clip >> 31;
+ clip = ((clip ^ clip_sign) - clip_sign)>>1;
+ if(clip){
+ int a3 = FFMIN(a1, a2);
+ int d = 5 * (a3 - a0);
+ int d_sign = (d >> 31);
+ d = ((d ^ d_sign) - d_sign) >> 3;
+ d_sign ^= a0_sign;
+
+ if( d_sign ^ clip_sign )
+ d = 0;
+ else{
+ d = FFMIN(d, clip);
+ d = (d ^ d_sign) - d_sign; /* Restore sign */
+ src[-1*stride] = cm[src[-1*stride] - d];
+ src[ 0*stride] = cm[src[ 0*stride] + d];
+ }
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * VC-1 in-loop deblocking filter
+ * @param src source block type
+ * @param step distance between horizontally adjacent elements
+ * @param stride distance between vertically adjacent elements
+ * @param len edge length to filter (4 or 8 pixels)
+ * @param pq block quantizer
+ * @see 8.6
+ */
+static inline void vc1_loop_filter(uint8_t* src, int step, int stride, int len, int pq)
+{
+ int i;
+ int filt3;
+
+ for(i = 0; i < len; i += 4){
+ filt3 = vc1_filter_line(src + 2*step, stride, pq);
+ if(filt3){
+ vc1_filter_line(src + 0*step, stride, pq);
+ vc1_filter_line(src + 1*step, stride, pq);
+ vc1_filter_line(src + 3*step, stride, pq);
+ }
+ src += step * 4;
+ }
+}
+
+static void vc1_v_loop_filter4_c(uint8_t *src, int stride, int pq)
+{
+ vc1_loop_filter(src, 1, stride, 4, pq);
+}
+
+static void vc1_h_loop_filter4_c(uint8_t *src, int stride, int pq)
+{
+ vc1_loop_filter(src, stride, 1, 4, pq);
+}
+
+static void vc1_v_loop_filter8_c(uint8_t *src, int stride, int pq)
+{
+ vc1_loop_filter(src, 1, stride, 8, pq);
+}
+
+static void vc1_h_loop_filter8_c(uint8_t *src, int stride, int pq)
+{
+ vc1_loop_filter(src, stride, 1, 8, pq);
+}
+
+static void vc1_v_loop_filter16_c(uint8_t *src, int stride, int pq)
+{
+ vc1_loop_filter(src, 1, stride, 16, pq);
+}
+
+static void vc1_h_loop_filter16_c(uint8_t *src, int stride, int pq)
+{
+ vc1_loop_filter(src, stride, 1, 16, pq);
+}
+
+/** Do inverse transform on 8x8 block
+*/
+static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, DCTELEM *block)
+{
+ int i;
+ int dc = block[0];
+ const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ dc = (3 * dc + 1) >> 1;
+ dc = (3 * dc + 16) >> 5;
+ for(i = 0; i < 8; i++){
+ dest[0] = cm[dest[0]+dc];
+ dest[1] = cm[dest[1]+dc];
+ dest[2] = cm[dest[2]+dc];
+ dest[3] = cm[dest[3]+dc];
+ dest[4] = cm[dest[4]+dc];
+ dest[5] = cm[dest[5]+dc];
+ dest[6] = cm[dest[6]+dc];
+ dest[7] = cm[dest[7]+dc];
+ dest += linesize;
+ }
+}
+
+static void vc1_inv_trans_8x8_c(DCTELEM block[64])
+{
+ int i;
+ register int t1,t2,t3,t4,t5,t6,t7,t8;
+ DCTELEM *src, *dst;
+
+ src = block;
+ dst = block;
+ for(i = 0; i < 8; i++){
+ t1 = 12 * (src[0] + src[4]) + 4;
+ t2 = 12 * (src[0] - src[4]) + 4;
+ t3 = 16 * src[2] + 6 * src[6];
+ t4 = 6 * src[2] - 16 * src[6];
+
+ t5 = t1 + t3;
+ t6 = t2 + t4;
+ t7 = t2 - t4;
+ t8 = t1 - t3;
+
+ t1 = 16 * src[1] + 15 * src[3] + 9 * src[5] + 4 * src[7];
+ t2 = 15 * src[1] - 4 * src[3] - 16 * src[5] - 9 * src[7];
+ t3 = 9 * src[1] - 16 * src[3] + 4 * src[5] + 15 * src[7];
+ t4 = 4 * src[1] - 9 * src[3] + 15 * src[5] - 16 * src[7];
+
+ dst[0] = (t5 + t1) >> 3;
+ dst[1] = (t6 + t2) >> 3;
+ dst[2] = (t7 + t3) >> 3;
+ dst[3] = (t8 + t4) >> 3;
+ dst[4] = (t8 - t4) >> 3;
+ dst[5] = (t7 - t3) >> 3;
+ dst[6] = (t6 - t2) >> 3;
+ dst[7] = (t5 - t1) >> 3;
+
+ src += 8;
+ dst += 8;
+ }
+
+ src = block;
+ dst = block;
+ for(i = 0; i < 8; i++){
+ t1 = 12 * (src[ 0] + src[32]) + 64;
+ t2 = 12 * (src[ 0] - src[32]) + 64;
+ t3 = 16 * src[16] + 6 * src[48];
+ t4 = 6 * src[16] - 16 * src[48];
+
+ t5 = t1 + t3;
+ t6 = t2 + t4;
+ t7 = t2 - t4;
+ t8 = t1 - t3;
+
+ t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
+ t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
+ t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
+ t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
+
+ dst[ 0] = (t5 + t1) >> 7;
+ dst[ 8] = (t6 + t2) >> 7;
+ dst[16] = (t7 + t3) >> 7;
+ dst[24] = (t8 + t4) >> 7;
+ dst[32] = (t8 - t4 + 1) >> 7;
+ dst[40] = (t7 - t3 + 1) >> 7;
+ dst[48] = (t6 - t2 + 1) >> 7;
+ dst[56] = (t5 - t1 + 1) >> 7;
+
+ src++;
+ dst++;
+ }
+}
+
+/** Do inverse transform on 8x4 part of block
+*/
+static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, int linesize, DCTELEM *block)
+{
+ int i;
+ int dc = block[0];
+ const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ dc = ( 3 * dc + 1) >> 1;
+ dc = (17 * dc + 64) >> 7;
+ for(i = 0; i < 4; i++){
+ dest[0] = cm[dest[0]+dc];
+ dest[1] = cm[dest[1]+dc];
+ dest[2] = cm[dest[2]+dc];
+ dest[3] = cm[dest[3]+dc];
+ dest[4] = cm[dest[4]+dc];
+ dest[5] = cm[dest[5]+dc];
+ dest[6] = cm[dest[6]+dc];
+ dest[7] = cm[dest[7]+dc];
+ dest += linesize;
+ }
+}
+
+static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, DCTELEM *block)
+{
+ int i;
+ register int t1,t2,t3,t4,t5,t6,t7,t8;
+ DCTELEM *src, *dst;
+ const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ src = block;
+ dst = block;
+ for(i = 0; i < 4; i++){
+ t1 = 12 * (src[0] + src[4]) + 4;
+ t2 = 12 * (src[0] - src[4]) + 4;
+ t3 = 16 * src[2] + 6 * src[6];
+ t4 = 6 * src[2] - 16 * src[6];
+
+ t5 = t1 + t3;
+ t6 = t2 + t4;
+ t7 = t2 - t4;
+ t8 = t1 - t3;
+
+ t1 = 16 * src[1] + 15 * src[3] + 9 * src[5] + 4 * src[7];
+ t2 = 15 * src[1] - 4 * src[3] - 16 * src[5] - 9 * src[7];
+ t3 = 9 * src[1] - 16 * src[3] + 4 * src[5] + 15 * src[7];
+ t4 = 4 * src[1] - 9 * src[3] + 15 * src[5] - 16 * src[7];
+
+ dst[0] = (t5 + t1) >> 3;
+ dst[1] = (t6 + t2) >> 3;
+ dst[2] = (t7 + t3) >> 3;
+ dst[3] = (t8 + t4) >> 3;
+ dst[4] = (t8 - t4) >> 3;
+ dst[5] = (t7 - t3) >> 3;
+ dst[6] = (t6 - t2) >> 3;
+ dst[7] = (t5 - t1) >> 3;
+
+ src += 8;
+ dst += 8;
+ }
+
+ src = block;
+ for(i = 0; i < 8; i++){
+ t1 = 17 * (src[ 0] + src[16]) + 64;
+ t2 = 17 * (src[ 0] - src[16]) + 64;
+ t3 = 22 * src[ 8] + 10 * src[24];
+ t4 = 22 * src[24] - 10 * src[ 8];
+
+ dest[0*linesize] = cm[dest[0*linesize] + ((t1 + t3) >> 7)];
+ dest[1*linesize] = cm[dest[1*linesize] + ((t2 - t4) >> 7)];
+ dest[2*linesize] = cm[dest[2*linesize] + ((t2 + t4) >> 7)];
+ dest[3*linesize] = cm[dest[3*linesize] + ((t1 - t3) >> 7)];
+
+ src ++;
+ dest++;
+ }
+}
+
+/** Do inverse transform on 4x8 parts of block
+*/
+static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, int linesize, DCTELEM *block)
+{
+ int i;
+ int dc = block[0];
+ const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ dc = (17 * dc + 4) >> 3;
+ dc = (12 * dc + 64) >> 7;
+ for(i = 0; i < 8; i++){
+ dest[0] = cm[dest[0]+dc];
+ dest[1] = cm[dest[1]+dc];
+ dest[2] = cm[dest[2]+dc];
+ dest[3] = cm[dest[3]+dc];
+ dest += linesize;
+ }
+}
+
+static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, DCTELEM *block)
+{
+ int i;
+ register int t1,t2,t3,t4,t5,t6,t7,t8;
+ DCTELEM *src, *dst;
+ const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ src = block;
+ dst = block;
+ for(i = 0; i < 8; i++){
+ t1 = 17 * (src[0] + src[2]) + 4;
+ t2 = 17 * (src[0] - src[2]) + 4;
+ t3 = 22 * src[1] + 10 * src[3];
+ t4 = 22 * src[3] - 10 * src[1];
+
+ dst[0] = (t1 + t3) >> 3;
+ dst[1] = (t2 - t4) >> 3;
+ dst[2] = (t2 + t4) >> 3;
+ dst[3] = (t1 - t3) >> 3;
+
+ src += 8;
+ dst += 8;
+ }
+
+ src = block;
+ for(i = 0; i < 4; i++){
+ t1 = 12 * (src[ 0] + src[32]) + 64;
+ t2 = 12 * (src[ 0] - src[32]) + 64;
+ t3 = 16 * src[16] + 6 * src[48];
+ t4 = 6 * src[16] - 16 * src[48];
+
+ t5 = t1 + t3;
+ t6 = t2 + t4;
+ t7 = t2 - t4;
+ t8 = t1 - t3;
+
+ t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
+ t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
+ t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
+ t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
+
+ dest[0*linesize] = cm[dest[0*linesize] + ((t5 + t1) >> 7)];
+ dest[1*linesize] = cm[dest[1*linesize] + ((t6 + t2) >> 7)];
+ dest[2*linesize] = cm[dest[2*linesize] + ((t7 + t3) >> 7)];
+ dest[3*linesize] = cm[dest[3*linesize] + ((t8 + t4) >> 7)];
+ dest[4*linesize] = cm[dest[4*linesize] + ((t8 - t4 + 1) >> 7)];
+ dest[5*linesize] = cm[dest[5*linesize] + ((t7 - t3 + 1) >> 7)];
+ dest[6*linesize] = cm[dest[6*linesize] + ((t6 - t2 + 1) >> 7)];
+ dest[7*linesize] = cm[dest[7*linesize] + ((t5 - t1 + 1) >> 7)];
+
+ src ++;
+ dest++;
+ }
+}
+
+/** Do inverse transform on 4x4 part of block
+*/
+static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, int linesize, DCTELEM *block)
+{
+ int i;
+ int dc = block[0];
+ const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ dc = (17 * dc + 4) >> 3;
+ dc = (17 * dc + 64) >> 7;
+ for(i = 0; i < 4; i++){
+ dest[0] = cm[dest[0]+dc];
+ dest[1] = cm[dest[1]+dc];
+ dest[2] = cm[dest[2]+dc];
+ dest[3] = cm[dest[3]+dc];
+ dest += linesize;
+ }
+}
+
+static void vc1_inv_trans_4x4_c(uint8_t *dest, int linesize, DCTELEM *block)
+{
+ int i;
+ register int t1,t2,t3,t4;
+ DCTELEM *src, *dst;
+ const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ src = block;
+ dst = block;
+ for(i = 0; i < 4; i++){
+ t1 = 17 * (src[0] + src[2]) + 4;
+ t2 = 17 * (src[0] - src[2]) + 4;
+ t3 = 22 * src[1] + 10 * src[3];
+ t4 = 22 * src[3] - 10 * src[1];
+
+ dst[0] = (t1 + t3) >> 3;
+ dst[1] = (t2 - t4) >> 3;
+ dst[2] = (t2 + t4) >> 3;
+ dst[3] = (t1 - t3) >> 3;
+
+ src += 8;
+ dst += 8;
+ }
+
+ src = block;
+ for(i = 0; i < 4; i++){
+ t1 = 17 * (src[ 0] + src[16]) + 64;
+ t2 = 17 * (src[ 0] - src[16]) + 64;
+ t3 = 22 * src[ 8] + 10 * src[24];
+ t4 = 22 * src[24] - 10 * src[ 8];
+
+ dest[0*linesize] = cm[dest[0*linesize] + ((t1 + t3) >> 7)];
+ dest[1*linesize] = cm[dest[1*linesize] + ((t2 - t4) >> 7)];
+ dest[2*linesize] = cm[dest[2*linesize] + ((t2 + t4) >> 7)];
+ dest[3*linesize] = cm[dest[3*linesize] + ((t1 - t3) >> 7)];
+
+ src ++;
+ dest++;
+ }
+}
+
+/* motion compensation functions */
+/** Filter in case of 2 filters */
+#define VC1_MSPEL_FILTER_16B(DIR, TYPE) \
+static av_always_inline int vc1_mspel_ ## DIR ## _filter_16bits(const TYPE *src, int stride, int mode) \
+{ \
+ switch(mode){ \
+ case 0: /* no shift - should not occur */ \
+ return 0; \
+ case 1: /* 1/4 shift */ \
+ return -4*src[-stride] + 53*src[0] + 18*src[stride] - 3*src[stride*2]; \
+ case 2: /* 1/2 shift */ \
+ return -src[-stride] + 9*src[0] + 9*src[stride] - src[stride*2]; \
+ case 3: /* 3/4 shift */ \
+ return -3*src[-stride] + 18*src[0] + 53*src[stride] - 4*src[stride*2]; \
+ } \
+ return 0; /* should not occur */ \
+}
+
+VC1_MSPEL_FILTER_16B(ver, uint8_t);
+VC1_MSPEL_FILTER_16B(hor, int16_t);
+
+
+/** Filter used to interpolate fractional pel values
+ */
+static av_always_inline int vc1_mspel_filter(const uint8_t *src, int stride, int mode, int r)
+{
+ switch(mode){
+ case 0: //no shift
+ return src[0];
+ case 1: // 1/4 shift
+ return (-4*src[-stride] + 53*src[0] + 18*src[stride] - 3*src[stride*2] + 32 - r) >> 6;
+ case 2: // 1/2 shift
+ return (-src[-stride] + 9*src[0] + 9*src[stride] - src[stride*2] + 8 - r) >> 4;
+ case 3: // 3/4 shift
+ return (-3*src[-stride] + 18*src[0] + 53*src[stride] - 4*src[stride*2] + 32 - r) >> 6;
+ }
+ return 0; //should not occur
+}
+
+/** Function used to do motion compensation with bicubic interpolation
+ */
+#define VC1_MSPEL_MC(OP, OPNAME)\
+static void OPNAME ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride, int hmode, int vmode, int rnd)\
+{\
+ int i, j;\
+\
+ if (vmode) { /* Horizontal filter to apply */\
+ int r;\
+\
+ if (hmode) { /* Vertical filter to apply, output to tmp */\
+ static const int shift_value[] = { 0, 5, 1, 5 };\
+ int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
+ int16_t tmp[11*8], *tptr = tmp;\
+\
+ r = (1<<(shift-1)) + rnd-1;\
+\
+ src -= 1;\
+ for(j = 0; j < 8; j++) {\
+ for(i = 0; i < 11; i++)\
+ tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode)+r)>>shift;\
+ src += stride;\
+ tptr += 11;\
+ }\
+\
+ r = 64-rnd;\
+ tptr = tmp+1;\
+ for(j = 0; j < 8; j++) {\
+ for(i = 0; i < 8; i++)\
+ OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode)+r)>>7);\
+ dst += stride;\
+ tptr += 11;\
+ }\
+\
+ return;\
+ }\
+ else { /* No horizontal filter, output 8 lines to dst */\
+ r = 1-rnd;\
+\
+ for(j = 0; j < 8; j++) {\
+ for(i = 0; i < 8; i++)\
+ OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r));\
+ src += stride;\
+ dst += stride;\
+ }\
+ return;\
+ }\
+ }\
+\
+ /* Horizontal mode with no vertical mode */\
+ for(j = 0; j < 8; j++) {\
+ for(i = 0; i < 8; i++)\
+ OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd));\
+ dst += stride;\
+ src += stride;\
+ }\
+}
+
+#define op_put(a, b) a = av_clip_uint8(b)
+#define op_avg(a, b) a = (a + av_clip_uint8(b) + 1) >> 1
+
+VC1_MSPEL_MC(op_put, put_)
+VC1_MSPEL_MC(op_avg, avg_)
+
+/* pixel functions - really are entry points to vc1_mspel_mc */
+
+/* this one is defined in dsputil.c */
+void ff_put_vc1_mspel_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int rnd);
+void ff_avg_vc1_mspel_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int rnd);
+
+#define PUT_VC1_MSPEL(a, b)\
+static void put_vc1_mspel_mc ## a ## b ##_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
+ put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
+}\
+static void avg_vc1_mspel_mc ## a ## b ##_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
+ avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
+}
+
+PUT_VC1_MSPEL(1, 0)
+PUT_VC1_MSPEL(2, 0)
+PUT_VC1_MSPEL(3, 0)
+
+PUT_VC1_MSPEL(0, 1)
+PUT_VC1_MSPEL(1, 1)
+PUT_VC1_MSPEL(2, 1)
+PUT_VC1_MSPEL(3, 1)
+
+PUT_VC1_MSPEL(0, 2)
+PUT_VC1_MSPEL(1, 2)
+PUT_VC1_MSPEL(2, 2)
+PUT_VC1_MSPEL(3, 2)
+
+PUT_VC1_MSPEL(0, 3)
+PUT_VC1_MSPEL(1, 3)
+PUT_VC1_MSPEL(2, 3)
+PUT_VC1_MSPEL(3, 3)
+
+av_cold void ff_vc1dsp_init(DSPContext* dsp, AVCodecContext *avctx) {
+ dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c;
+ dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c;
+ dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_c;
+ dsp->vc1_inv_trans_4x4 = vc1_inv_trans_4x4_c;
+ dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_c;
+ dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_c;
+ dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_c;
+ dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_c;
+ dsp->vc1_h_overlap = vc1_h_overlap_c;
+ dsp->vc1_v_overlap = vc1_v_overlap_c;
+ dsp->vc1_v_loop_filter4 = vc1_v_loop_filter4_c;
+ dsp->vc1_h_loop_filter4 = vc1_h_loop_filter4_c;
+ dsp->vc1_v_loop_filter8 = vc1_v_loop_filter8_c;
+ dsp->vc1_h_loop_filter8 = vc1_h_loop_filter8_c;
+ dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_c;
+ dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_c;
+
+ dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_c;
+ dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_c;
+ dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_c;
+ dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_c;
+ dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_c;
+ dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_c;
+ dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_c;
+ dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_c;
+ dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_c;
+ dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_c;
+ dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_c;
+ dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_c;
+ dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_c;
+ dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_c;
+ dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_c;
+ dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_c;
+
+ dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_c;
+ dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_c;
+ dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_c;
+ dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_c;
+ dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_c;
+ dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_c;
+ dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_c;
+ dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_c;
+ dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_c;
+ dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_c;
+ dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_c;
+ dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_c;
+ dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_c;
+ dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_c;
+ dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_c;
+ dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_c;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp3.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp3.c
new file mode 100644
index 000000000..e3595e718
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp3.c
@@ -0,0 +1,2436 @@
+/*
+ * Copyright (C) 2003-2004 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/vp3.c
+ * On2 VP3 Video Decoder
+ *
+ * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
+ * For more information about the VP3 coding process, visit:
+ * http://wiki.multimedia.cx/index.php?title=On2_VP3
+ *
+ * Theora decoder by Alex Beregszaszi
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "get_bits.h"
+
+#include "vp3data.h"
+#include "xiph.h"
+
+#define FRAGMENT_PIXELS 8
+
+static av_cold int vp3_decode_end(AVCodecContext *avctx);
+
+typedef struct Coeff {
+ struct Coeff *next;
+ DCTELEM coeff;
+ uint8_t index;
+} Coeff;
+
+//FIXME split things out into their own arrays
+typedef struct Vp3Fragment {
+ Coeff *next_coeff;
+ /* address of first pixel taking into account which plane the fragment
+ * lives on as well as the plane stride */
+ int first_pixel;
+ /* this is the macroblock that the fragment belongs to */
+ uint16_t macroblock;
+ uint8_t coding_method;
+ int8_t motion_x;
+ int8_t motion_y;
+ uint8_t qpi;
+} Vp3Fragment;
+
+#define _ilog(i) av_log2(2*(i)) /* ffdshow custom code */
+
+#define SB_NOT_CODED 0
+#define SB_PARTIALLY_CODED 1
+#define SB_FULLY_CODED 2
+
+#define MODE_INTER_NO_MV 0
+#define MODE_INTRA 1
+#define MODE_INTER_PLUS_MV 2
+#define MODE_INTER_LAST_MV 3
+#define MODE_INTER_PRIOR_LAST 4
+#define MODE_USING_GOLDEN 5
+#define MODE_GOLDEN_MV 6
+#define MODE_INTER_FOURMV 7
+#define CODING_MODE_COUNT 8
+
+/* special internal mode */
+#define MODE_COPY 8
+
+/* There are 6 preset schemes, plus a free-form scheme */
+static const int ModeAlphabet[6][CODING_MODE_COUNT] =
+{
+ /* scheme 1: Last motion vector dominates */
+ { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
+ MODE_INTER_PLUS_MV, MODE_INTER_NO_MV,
+ MODE_INTRA, MODE_USING_GOLDEN,
+ MODE_GOLDEN_MV, MODE_INTER_FOURMV },
+
+ /* scheme 2 */
+ { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
+ MODE_INTER_NO_MV, MODE_INTER_PLUS_MV,
+ MODE_INTRA, MODE_USING_GOLDEN,
+ MODE_GOLDEN_MV, MODE_INTER_FOURMV },
+
+ /* scheme 3 */
+ { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV,
+ MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV,
+ MODE_INTRA, MODE_USING_GOLDEN,
+ MODE_GOLDEN_MV, MODE_INTER_FOURMV },
+
+ /* scheme 4 */
+ { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV,
+ MODE_INTER_NO_MV, MODE_INTER_PRIOR_LAST,
+ MODE_INTRA, MODE_USING_GOLDEN,
+ MODE_GOLDEN_MV, MODE_INTER_FOURMV },
+
+ /* scheme 5: No motion vector dominates */
+ { MODE_INTER_NO_MV, MODE_INTER_LAST_MV,
+ MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV,
+ MODE_INTRA, MODE_USING_GOLDEN,
+ MODE_GOLDEN_MV, MODE_INTER_FOURMV },
+
+ /* scheme 6 */
+ { MODE_INTER_NO_MV, MODE_USING_GOLDEN,
+ MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
+ MODE_INTER_PLUS_MV, MODE_INTRA,
+ MODE_GOLDEN_MV, MODE_INTER_FOURMV },
+
+};
+
+#define MIN_DEQUANT_VAL 2
+
+typedef struct Vp3DecodeContext {
+ AVCodecContext *avctx;
+ int theora, theora_tables;
+ int version;
+ int width, height;
+ AVFrame golden_frame;
+ AVFrame last_frame;
+ AVFrame current_frame;
+ int keyframe;
+ DSPContext dsp;
+ int flipped_image;
+
+ int qps[3];
+ int nqps;
+ int last_qps[3];
+
+ int superblock_count;
+ int y_superblock_width;
+ int y_superblock_height;
+ int c_superblock_width;
+ int c_superblock_height;
+ int u_superblock_start;
+ int v_superblock_start;
+ unsigned char *superblock_coding;
+
+ int macroblock_count;
+ int macroblock_width;
+ int macroblock_height;
+
+ int fragment_count;
+ int fragment_width;
+ int fragment_height;
+
+ Vp3Fragment *all_fragments;
+ uint8_t *coeff_counts;
+ Coeff *coeffs;
+ Coeff *next_coeff;
+ int fragment_start[3];
+
+ ScanTable scantable;
+
+ /* tables */
+ uint16_t coded_dc_scale_factor[64];
+ uint32_t coded_ac_scale_factor[64];
+ uint8_t base_matrix[384][64];
+ uint8_t qr_count[2][3];
+ uint8_t qr_size [2][3][64];
+ uint16_t qr_base[2][3][64];
+
+ /* this is a list of indexes into the all_fragments array indicating
+ * which of the fragments are coded */
+ int *coded_fragment_list;
+ int coded_fragment_list_index;
+ int pixel_addresses_initialized;
+
+ /* track which fragments have already been decoded; called 'fast'
+ * because this data structure avoids having to iterate through every
+ * fragment in coded_fragment_list; once a fragment has been fully
+ * decoded, it is removed from this list */
+ int *fast_fragment_list;
+ int fragment_list_y_head;
+ int fragment_list_c_head;
+
+ VLC dc_vlc[16];
+ VLC ac_vlc_1[16];
+ VLC ac_vlc_2[16];
+ VLC ac_vlc_3[16];
+ VLC ac_vlc_4[16];
+
+ VLC superblock_run_length_vlc;
+ VLC fragment_run_length_vlc;
+ VLC mode_code_vlc;
+ VLC motion_vector_vlc;
+
+ /* these arrays need to be on 16-byte boundaries since SSE2 operations
+ * index into them */
+ DECLARE_ALIGNED_16(int16_t, qmat)[3][2][3][64]; //<qmat[qpi][is_inter][plane]
+
+ /* This table contains superblock_count * 16 entries. Each set of 16
+ * numbers corresponds to the fragment indexes 0..15 of the superblock.
+ * An entry will be -1 to indicate that no entry corresponds to that
+ * index. */
+ int *superblock_fragments;
+
+ /* This table contains superblock_count * 4 entries. Each set of 4
+ * numbers corresponds to the macroblock indexes 0..3 of the superblock.
+ * An entry will be -1 to indicate that no entry corresponds to that
+ * index. */
+ int *superblock_macroblocks;
+
+ /* This table contains macroblock_count * 6 entries. Each set of 6
+ * numbers corresponds to the fragment indexes 0..5 which comprise
+ * the macroblock (4 Y fragments and 2 C fragments). */
+ int *macroblock_fragments;
+ /* This is an array that indicates how a particular macroblock
+ * is coded. */
+ unsigned char *macroblock_coding;
+
+ int first_coded_y_fragment;
+ int first_coded_c_fragment;
+ int last_coded_y_fragment;
+ int last_coded_c_fragment;
+
+ uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc
+ int8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16
+
+ /* Huffman decode */
+ int hti;
+ unsigned int hbits;
+ int entries;
+ int huff_code_size;
+ uint16_t huffman_table[80][32][2];
+
+ uint8_t filter_limit_values[64];
+ DECLARE_ALIGNED_8(int, bounding_values_array)[256+2];
+
+ /* ffdshow custom stuffs (begin) */
+ int fps_numerator,fps_denumerator;
+ int64_t granulepos;
+ int keyframe_granule_shift,keyframe_frequency_force;
+ /* ffdshow custom stuffs (end) */
+} Vp3DecodeContext;
+
+/************************************************************************
+ * VP3 specific functions
+ ************************************************************************/
+
+/*
+ * This function sets up all of the various blocks mappings:
+ * superblocks <-> fragments, macroblocks <-> fragments,
+ * superblocks <-> macroblocks
+ *
+ * Returns 0 is successful; returns 1 if *anything* went wrong.
+ */
+static int init_block_mapping(Vp3DecodeContext *s)
+{
+ int i, j;
+ signed int hilbert_walk_mb[4];
+
+ int current_fragment = 0;
+ int current_width = 0;
+ int current_height = 0;
+ int right_edge = 0;
+ int bottom_edge = 0;
+ int superblock_row_inc = 0;
+ int mapping_index = 0;
+
+ int current_macroblock;
+ int c_fragment;
+
+ static const signed char travel_width[16] = {
+ 1, 1, 0, -1,
+ 0, 0, 1, 0,
+ 1, 0, 1, 0,
+ 0, -1, 0, 1
+ };
+
+ static const signed char travel_height[16] = {
+ 0, 0, 1, 0,
+ 1, 1, 0, -1,
+ 0, 1, 0, -1,
+ -1, 0, -1, 0
+ };
+
+ static const signed char travel_width_mb[4] = {
+ 1, 0, 1, 0
+ };
+
+ static const signed char travel_height_mb[4] = {
+ 0, 1, 0, -1
+ };
+
+ hilbert_walk_mb[0] = 1;
+ hilbert_walk_mb[1] = s->macroblock_width;
+ hilbert_walk_mb[2] = 1;
+ hilbert_walk_mb[3] = -s->macroblock_width;
+
+ /* iterate through each superblock (all planes) and map the fragments */
+ for (i = 0; i < s->superblock_count; i++) {
+ /* time to re-assign the limits? */
+ if (i == 0) {
+
+ /* start of Y superblocks */
+ right_edge = s->fragment_width;
+ bottom_edge = s->fragment_height;
+ current_width = -1;
+ current_height = 0;
+ superblock_row_inc = 3 * s->fragment_width -
+ (s->y_superblock_width * 4 - s->fragment_width);
+
+ /* the first operation for this variable is to advance by 1 */
+ current_fragment = -1;
+
+ } else if (i == s->u_superblock_start) {
+
+ /* start of U superblocks */
+ right_edge = s->fragment_width / 2;
+ bottom_edge = s->fragment_height / 2;
+ current_width = -1;
+ current_height = 0;
+ superblock_row_inc = 3 * (s->fragment_width / 2) -
+ (s->c_superblock_width * 4 - s->fragment_width / 2);
+
+ /* the first operation for this variable is to advance by 1 */
+ current_fragment = s->fragment_start[1] - 1;
+
+ } else if (i == s->v_superblock_start) {
+
+ /* start of V superblocks */
+ right_edge = s->fragment_width / 2;
+ bottom_edge = s->fragment_height / 2;
+ current_width = -1;
+ current_height = 0;
+ superblock_row_inc = 3 * (s->fragment_width / 2) -
+ (s->c_superblock_width * 4 - s->fragment_width / 2);
+
+ /* the first operation for this variable is to advance by 1 */
+ current_fragment = s->fragment_start[2] - 1;
+
+ }
+
+ if (current_width >= right_edge - 1) {
+ /* reset width and move to next superblock row */
+ current_width = -1;
+ current_height += 4;
+
+ /* fragment is now at the start of a new superblock row */
+ current_fragment += superblock_row_inc;
+ }
+
+ /* iterate through all 16 fragments in a superblock */
+ for (j = 0; j < 16; j++) {
+ current_fragment += travel_width[j] + right_edge * travel_height[j];
+ current_width += travel_width[j];
+ current_height += travel_height[j];
+
+ /* check if the fragment is in bounds */
+ if ((current_width < right_edge) &&
+ (current_height < bottom_edge)) {
+ s->superblock_fragments[mapping_index] = current_fragment;
+ } else {
+ s->superblock_fragments[mapping_index] = -1;
+ }
+
+ mapping_index++;
+ }
+ }
+
+ /* initialize the superblock <-> macroblock mapping; iterate through
+ * all of the Y plane superblocks to build this mapping */
+ right_edge = s->macroblock_width;
+ bottom_edge = s->macroblock_height;
+ current_width = -1;
+ current_height = 0;
+ superblock_row_inc = s->macroblock_width -
+ (s->y_superblock_width * 2 - s->macroblock_width);
+ mapping_index = 0;
+ current_macroblock = -1;
+ for (i = 0; i < s->u_superblock_start; i++) {
+
+ if (current_width >= right_edge - 1) {
+ /* reset width and move to next superblock row */
+ current_width = -1;
+ current_height += 2;
+
+ /* macroblock is now at the start of a new superblock row */
+ current_macroblock += superblock_row_inc;
+ }
+
+ /* iterate through each potential macroblock in the superblock */
+ for (j = 0; j < 4; j++) {
+ current_macroblock += hilbert_walk_mb[j];
+ current_width += travel_width_mb[j];
+ current_height += travel_height_mb[j];
+
+ /* check if the macroblock is in bounds */
+ if ((current_width < right_edge) &&
+ (current_height < bottom_edge)) {
+ s->superblock_macroblocks[mapping_index] = current_macroblock;
+ } else {
+ s->superblock_macroblocks[mapping_index] = -1;
+ }
+
+ mapping_index++;
+ }
+ }
+
+ /* initialize the macroblock <-> fragment mapping */
+ current_fragment = 0;
+ current_macroblock = 0;
+ mapping_index = 0;
+ for (i = 0; i < s->fragment_height; i += 2) {
+
+ for (j = 0; j < s->fragment_width; j += 2) {
+
+ s->all_fragments[current_fragment].macroblock = current_macroblock;
+ s->macroblock_fragments[mapping_index++] = current_fragment;
+
+ if (j + 1 < s->fragment_width) {
+ s->all_fragments[current_fragment + 1].macroblock = current_macroblock;
+ s->macroblock_fragments[mapping_index++] = current_fragment + 1;
+ } else
+ s->macroblock_fragments[mapping_index++] = -1;
+
+ if (i + 1 < s->fragment_height) {
+ s->all_fragments[current_fragment + s->fragment_width].macroblock =
+ current_macroblock;
+ s->macroblock_fragments[mapping_index++] =
+ current_fragment + s->fragment_width;
+ } else
+ s->macroblock_fragments[mapping_index++] = -1;
+
+ if ((j + 1 < s->fragment_width) && (i + 1 < s->fragment_height)) {
+ s->all_fragments[current_fragment + s->fragment_width + 1].macroblock =
+ current_macroblock;
+ s->macroblock_fragments[mapping_index++] =
+ current_fragment + s->fragment_width + 1;
+ } else
+ s->macroblock_fragments[mapping_index++] = -1;
+
+ /* C planes */
+ c_fragment = s->fragment_start[1] +
+ (i * s->fragment_width / 4) + (j / 2);
+ s->all_fragments[c_fragment].macroblock = s->macroblock_count;
+ s->macroblock_fragments[mapping_index++] = c_fragment;
+
+ c_fragment = s->fragment_start[2] +
+ (i * s->fragment_width / 4) + (j / 2);
+ s->all_fragments[c_fragment].macroblock = s->macroblock_count;
+ s->macroblock_fragments[mapping_index++] = c_fragment;
+
+ if (j + 2 <= s->fragment_width)
+ current_fragment += 2;
+ else
+ current_fragment++;
+ current_macroblock++;
+ }
+
+ current_fragment += s->fragment_width;
+ }
+
+ return 0; /* successful path out */
+}
+
+/*
+ * This function wipes out all of the fragment data.
+ */
+static void init_frame(Vp3DecodeContext *s, GetBitContext *gb)
+{
+ int i;
+
+ /* zero out all of the fragment information */
+ s->coded_fragment_list_index = 0;
+ for (i = 0; i < s->fragment_count; i++) {
+ s->coeff_counts[i] = 0;
+ s->all_fragments[i].motion_x = 127;
+ s->all_fragments[i].motion_y = 127;
+ s->all_fragments[i].next_coeff= NULL;
+ s->all_fragments[i].qpi = 0;
+ s->coeffs[i].index=
+ s->coeffs[i].coeff=0;
+ s->coeffs[i].next= NULL;
+ }
+}
+
+/*
+ * This function sets up the dequantization tables used for a particular
+ * frame.
+ */
+static void init_dequantizer(Vp3DecodeContext *s, int qpi)
+{
+ int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
+ int dc_scale_factor = s->coded_dc_scale_factor[s->qps[qpi]];
+ int i, plane, inter, qri, bmi, bmj, qistart;
+
+ for(inter=0; inter<2; inter++){
+ for(plane=0; plane<3; plane++){
+ int sum=0;
+ for(qri=0; qri<s->qr_count[inter][plane]; qri++){
+ sum+= s->qr_size[inter][plane][qri];
+ if(s->qps[qpi] <= sum)
+ break;
+ }
+ qistart= sum - s->qr_size[inter][plane][qri];
+ bmi= s->qr_base[inter][plane][qri ];
+ bmj= s->qr_base[inter][plane][qri+1];
+ for(i=0; i<64; i++){
+ int coeff= ( 2*(sum -s->qps[qpi])*s->base_matrix[bmi][i]
+ - 2*(qistart-s->qps[qpi])*s->base_matrix[bmj][i]
+ + s->qr_size[inter][plane][qri])
+ / (2*s->qr_size[inter][plane][qri]);
+
+ int qmin= 8<<(inter + !i);
+ int qscale= i ? ac_scale_factor : dc_scale_factor;
+
+ s->qmat[qpi][inter][plane][s->dsp.idct_permutation[i]]= av_clip((qscale * coeff)/100 * 4, qmin, 4096);
+ }
+ // all DC coefficients use the same quant so as not to interfere with DC prediction
+ s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
+ }
+ }
+
+ memset(s->qscale_table, (FFMAX(s->qmat[0][0][0][1], s->qmat[0][0][1][1])+8)/16, 512); //FIXME finetune
+}
+
+/*
+ * This function initializes the loop filter boundary limits if the frame's
+ * quality index is different from the previous frame's.
+ *
+ * The filter_limit_values may not be larger than 127.
+ */
+static void init_loop_filter(Vp3DecodeContext *s)
+{
+ int *bounding_values= s->bounding_values_array+127;
+ int filter_limit;
+ int x;
+ int value;
+
+ filter_limit = s->filter_limit_values[s->qps[0]];
+
+ /* set up the bounding values */
+ memset(s->bounding_values_array, 0, 256 * sizeof(int));
+ for (x = 0; x < filter_limit; x++) {
+ bounding_values[-x] = -x;
+ bounding_values[x] = x;
+ }
+ for (x = value = filter_limit; x < 128 && value; x++, value--) {
+ bounding_values[ x] = value;
+ bounding_values[-x] = -value;
+ }
+ if (value)
+ bounding_values[128] = value;
+ bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202;
+}
+
+/*
+ * This function unpacks all of the superblock/macroblock/fragment coding
+ * information from the bitstream.
+ */
+static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
+{
+ int bit = 0;
+ int current_superblock = 0;
+ int current_run = 0;
+ int decode_fully_flags = 0;
+ int decode_partial_blocks = 0;
+ int first_c_fragment_seen;
+
+ int i, j;
+ int current_fragment;
+
+ if (s->keyframe) {
+ memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
+
+ } else {
+
+ /* unpack the list of partially-coded superblocks */
+ bit = get_bits1(gb);
+ /* toggle the bit because as soon as the first run length is
+ * fetched the bit will be toggled again */
+ bit ^= 1;
+ while (current_superblock < s->superblock_count) {
+ if (current_run-- == 0) {
+ bit ^= 1;
+ current_run = get_vlc2(gb,
+ s->superblock_run_length_vlc.table, 6, 2);
+ if (current_run == 33)
+ current_run += get_bits(gb, 12);
+
+ /* if any of the superblocks are not partially coded, flag
+ * a boolean to decode the list of fully-coded superblocks */
+ if (bit == 0) {
+ decode_fully_flags = 1;
+ } else {
+
+ /* make a note of the fact that there are partially coded
+ * superblocks */
+ decode_partial_blocks = 1;
+ }
+ }
+ s->superblock_coding[current_superblock++] = bit;
+ }
+
+ /* unpack the list of fully coded superblocks if any of the blocks were
+ * not marked as partially coded in the previous step */
+ if (decode_fully_flags) {
+
+ current_superblock = 0;
+ current_run = 0;
+ bit = get_bits1(gb);
+ /* toggle the bit because as soon as the first run length is
+ * fetched the bit will be toggled again */
+ bit ^= 1;
+ while (current_superblock < s->superblock_count) {
+
+ /* skip any superblocks already marked as partially coded */
+ if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
+
+ if (current_run-- == 0) {
+ bit ^= 1;
+ current_run = get_vlc2(gb,
+ s->superblock_run_length_vlc.table, 6, 2);
+ if (current_run == 33)
+ current_run += get_bits(gb, 12);
+ }
+ s->superblock_coding[current_superblock] = 2*bit;
+ }
+ current_superblock++;
+ }
+ }
+
+ /* if there were partial blocks, initialize bitstream for
+ * unpacking fragment codings */
+ if (decode_partial_blocks) {
+
+ current_run = 0;
+ bit = get_bits1(gb);
+ /* toggle the bit because as soon as the first run length is
+ * fetched the bit will be toggled again */
+ bit ^= 1;
+ }
+ }
+
+ /* figure out which fragments are coded; iterate through each
+ * superblock (all planes) */
+ s->coded_fragment_list_index = 0;
+ s->next_coeff= s->coeffs + s->fragment_count;
+ s->first_coded_y_fragment = s->first_coded_c_fragment = 0;
+ s->last_coded_y_fragment = s->last_coded_c_fragment = -1;
+ first_c_fragment_seen = 0;
+ memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
+ for (i = 0; i < s->superblock_count; i++) {
+
+ /* iterate through all 16 fragments in a superblock */
+ for (j = 0; j < 16; j++) {
+
+ /* if the fragment is in bounds, check its coding status */
+ current_fragment = s->superblock_fragments[i * 16 + j];
+ if (current_fragment >= s->fragment_count) {
+ av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_superblocks(): bad fragment number (%d >= %d)\n",
+ current_fragment, s->fragment_count);
+ return 1;
+ }
+ if (current_fragment != -1) {
+ if (s->superblock_coding[i] == SB_NOT_CODED) {
+
+ /* copy all the fragments from the prior frame */
+ s->all_fragments[current_fragment].coding_method =
+ MODE_COPY;
+
+ } else if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
+
+ /* fragment may or may not be coded; this is the case
+ * that cares about the fragment coding runs */
+ if (current_run-- == 0) {
+ bit ^= 1;
+ current_run = get_vlc2(gb,
+ s->fragment_run_length_vlc.table, 5, 2);
+ }
+
+ if (bit) {
+ /* default mode; actual mode will be decoded in
+ * the next phase */
+ s->all_fragments[current_fragment].coding_method =
+ MODE_INTER_NO_MV;
+ s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment;
+ s->coded_fragment_list[s->coded_fragment_list_index] =
+ current_fragment;
+ if ((current_fragment >= s->fragment_start[1]) &&
+ (s->last_coded_y_fragment == -1) &&
+ (!first_c_fragment_seen)) {
+ s->first_coded_c_fragment = s->coded_fragment_list_index;
+ s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
+ first_c_fragment_seen = 1;
+ }
+ s->coded_fragment_list_index++;
+ s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV;
+ } else {
+ /* not coded; copy this fragment from the prior frame */
+ s->all_fragments[current_fragment].coding_method =
+ MODE_COPY;
+ }
+
+ } else {
+
+ /* fragments are fully coded in this superblock; actual
+ * coding will be determined in next step */
+ s->all_fragments[current_fragment].coding_method =
+ MODE_INTER_NO_MV;
+ s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment;
+ s->coded_fragment_list[s->coded_fragment_list_index] =
+ current_fragment;
+ if ((current_fragment >= s->fragment_start[1]) &&
+ (s->last_coded_y_fragment == -1) &&
+ (!first_c_fragment_seen)) {
+ s->first_coded_c_fragment = s->coded_fragment_list_index;
+ s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
+ first_c_fragment_seen = 1;
+ }
+ s->coded_fragment_list_index++;
+ s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV;
+ }
+ }
+ }
+ }
+
+ if (!first_c_fragment_seen)
+ /* only Y fragments coded in this frame */
+ s->last_coded_y_fragment = s->coded_fragment_list_index - 1;
+ else
+ /* end the list of coded C fragments */
+ s->last_coded_c_fragment = s->coded_fragment_list_index - 1;
+
+ for (i = 0; i < s->fragment_count - 1; i++) {
+ s->fast_fragment_list[i] = i + 1;
+ }
+ s->fast_fragment_list[s->fragment_count - 1] = -1;
+
+ if (s->last_coded_y_fragment == -1)
+ s->fragment_list_y_head = -1;
+ else {
+ s->fragment_list_y_head = s->first_coded_y_fragment;
+ s->fast_fragment_list[s->last_coded_y_fragment] = -1;
+ }
+
+ if (s->last_coded_c_fragment == -1)
+ s->fragment_list_c_head = -1;
+ else {
+ s->fragment_list_c_head = s->first_coded_c_fragment;
+ s->fast_fragment_list[s->last_coded_c_fragment] = -1;
+ }
+
+ return 0;
+}
+
+/*
+ * This function unpacks all the coding mode data for individual macroblocks
+ * from the bitstream.
+ */
+static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
+{
+ int i, j, k;
+ int scheme;
+ int current_macroblock;
+ int current_fragment;
+ int coding_mode;
+ int custom_mode_alphabet[CODING_MODE_COUNT];
+
+ if (s->keyframe) {
+ for (i = 0; i < s->fragment_count; i++)
+ s->all_fragments[i].coding_method = MODE_INTRA;
+
+ } else {
+
+ /* fetch the mode coding scheme for this frame */
+ scheme = get_bits(gb, 3);
+
+ /* is it a custom coding scheme? */
+ if (scheme == 0) {
+ for (i = 0; i < 8; i++)
+ custom_mode_alphabet[i] = MODE_INTER_NO_MV;
+ for (i = 0; i < 8; i++)
+ custom_mode_alphabet[get_bits(gb, 3)] = i;
+ }
+
+ /* iterate through all of the macroblocks that contain 1 or more
+ * coded fragments */
+ for (i = 0; i < s->u_superblock_start; i++) {
+
+ for (j = 0; j < 4; j++) {
+ current_macroblock = s->superblock_macroblocks[i * 4 + j];
+ if ((current_macroblock == -1) ||
+ (s->macroblock_coding[current_macroblock] == MODE_COPY))
+ continue;
+ if (current_macroblock >= s->macroblock_count) {
+ av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad macroblock number (%d >= %d)\n",
+ current_macroblock, s->macroblock_count);
+ return 1;
+ }
+
+ /* mode 7 means get 3 bits for each coding mode */
+ if (scheme == 7)
+ coding_mode = get_bits(gb, 3);
+ else if(scheme == 0)
+ coding_mode = custom_mode_alphabet
+ [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
+ else
+ coding_mode = ModeAlphabet[scheme-1]
+ [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
+
+ s->macroblock_coding[current_macroblock] = coding_mode;
+ for (k = 0; k < 6; k++) {
+ current_fragment =
+ s->macroblock_fragments[current_macroblock * 6 + k];
+ if (current_fragment == -1)
+ continue;
+ if (current_fragment >= s->fragment_count) {
+ av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad fragment number (%d >= %d)\n",
+ current_fragment, s->fragment_count);
+ return 1;
+ }
+ if (s->all_fragments[current_fragment].coding_method !=
+ MODE_COPY)
+ s->all_fragments[current_fragment].coding_method =
+ coding_mode;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function unpacks all the motion vectors for the individual
+ * macroblocks from the bitstream.
+ */
+static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
+{
+ int i, j, k, l;
+ int coding_mode;
+ int motion_x[6];
+ int motion_y[6];
+ int last_motion_x = 0;
+ int last_motion_y = 0;
+ int prior_last_motion_x = 0;
+ int prior_last_motion_y = 0;
+ int current_macroblock;
+ int current_fragment;
+
+ if (s->keyframe)
+ return 0;
+
+ memset(motion_x, 0, 6 * sizeof(int));
+ memset(motion_y, 0, 6 * sizeof(int));
+
+ /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
+ coding_mode = get_bits1(gb);
+
+ /* iterate through all of the macroblocks that contain 1 or more
+ * coded fragments */
+ for (i = 0; i < s->u_superblock_start; i++) {
+
+ for (j = 0; j < 4; j++) {
+ current_macroblock = s->superblock_macroblocks[i * 4 + j];
+ if ((current_macroblock == -1) ||
+ (s->macroblock_coding[current_macroblock] == MODE_COPY))
+ continue;
+ if (current_macroblock >= s->macroblock_count) {
+ av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad macroblock number (%d >= %d)\n",
+ current_macroblock, s->macroblock_count);
+ return 1;
+ }
+
+ current_fragment = s->macroblock_fragments[current_macroblock * 6];
+ if (current_fragment >= s->fragment_count) {
+ av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d\n",
+ current_fragment, s->fragment_count);
+ return 1;
+ }
+ switch (s->macroblock_coding[current_macroblock]) {
+
+ case MODE_INTER_PLUS_MV:
+ case MODE_GOLDEN_MV:
+ /* all 6 fragments use the same motion vector */
+ if (coding_mode == 0) {
+ motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
+ motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
+ } else {
+ motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
+ motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
+ }
+
+ /* vector maintenance, only on MODE_INTER_PLUS_MV */
+ if (s->macroblock_coding[current_macroblock] ==
+ MODE_INTER_PLUS_MV) {
+ prior_last_motion_x = last_motion_x;
+ prior_last_motion_y = last_motion_y;
+ last_motion_x = motion_x[0];
+ last_motion_y = motion_y[0];
+ }
+ break;
+
+ case MODE_INTER_FOURMV:
+ /* vector maintenance */
+ prior_last_motion_x = last_motion_x;
+ prior_last_motion_y = last_motion_y;
+
+ /* fetch 4 vectors from the bitstream, one for each
+ * Y fragment, then average for the C fragment vectors */
+ motion_x[4] = motion_y[4] = 0;
+ for (k = 0; k < 4; k++) {
+ for (l = 0; l < s->coded_fragment_list_index; l++)
+ if (s->coded_fragment_list[l] == s->macroblock_fragments[6*current_macroblock + k])
+ break;
+ if (l < s->coded_fragment_list_index) {
+ if (coding_mode == 0) {
+ motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
+ motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
+ } else {
+ motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
+ motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
+ }
+ last_motion_x = motion_x[k];
+ last_motion_y = motion_y[k];
+ } else {
+ motion_x[k] = 0;
+ motion_y[k] = 0;
+ }
+ motion_x[4] += motion_x[k];
+ motion_y[4] += motion_y[k];
+ }
+
+ motion_x[5]=
+ motion_x[4]= RSHIFT(motion_x[4], 2);
+ motion_y[5]=
+ motion_y[4]= RSHIFT(motion_y[4], 2);
+ break;
+
+ case MODE_INTER_LAST_MV:
+ /* all 6 fragments use the last motion vector */
+ motion_x[0] = last_motion_x;
+ motion_y[0] = last_motion_y;
+
+ /* no vector maintenance (last vector remains the
+ * last vector) */
+ break;
+
+ case MODE_INTER_PRIOR_LAST:
+ /* all 6 fragments use the motion vector prior to the
+ * last motion vector */
+ motion_x[0] = prior_last_motion_x;
+ motion_y[0] = prior_last_motion_y;
+
+ /* vector maintenance */
+ prior_last_motion_x = last_motion_x;
+ prior_last_motion_y = last_motion_y;
+ last_motion_x = motion_x[0];
+ last_motion_y = motion_y[0];
+ break;
+
+ default:
+ /* covers intra, inter without MV, golden without MV */
+ motion_x[0] = 0;
+ motion_y[0] = 0;
+
+ /* no vector maintenance */
+ break;
+ }
+
+ /* assign the motion vectors to the correct fragments */
+ for (k = 0; k < 6; k++) {
+ current_fragment =
+ s->macroblock_fragments[current_macroblock * 6 + k];
+ if (current_fragment == -1)
+ continue;
+ if (current_fragment >= s->fragment_count) {
+ av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d)\n",
+ current_fragment, s->fragment_count);
+ return 1;
+ }
+ if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
+ s->all_fragments[current_fragment].motion_x = motion_x[k];
+ s->all_fragments[current_fragment].motion_y = motion_y[k];
+ } else {
+ s->all_fragments[current_fragment].motion_x = motion_x[0];
+ s->all_fragments[current_fragment].motion_y = motion_y[0];
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
+{
+ int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi;
+ int num_blocks = s->coded_fragment_list_index;
+
+ for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) {
+ i = blocks_decoded = num_blocks_at_qpi = 0;
+
+ bit = get_bits1(gb);
+
+ do {
+ run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1;
+ if (run_length == 34)
+ run_length += get_bits(gb, 12);
+ blocks_decoded += run_length;
+
+ if (!bit)
+ num_blocks_at_qpi += run_length;
+
+ for (j = 0; j < run_length; i++) {
+ if (i >= s->coded_fragment_list_index)
+ return -1;
+
+ if (s->all_fragments[s->coded_fragment_list[i]].qpi == qpi) {
+ s->all_fragments[s->coded_fragment_list[i]].qpi += bit;
+ j++;
+ }
+ }
+
+ if (run_length == 4129)
+ bit = get_bits1(gb);
+ else
+ bit ^= 1;
+ } while (blocks_decoded < num_blocks);
+
+ num_blocks -= num_blocks_at_qpi;
+ }
+
+ return 0;
+}
+
+/*
+ * This function is called by unpack_dct_coeffs() to extract the VLCs from
+ * the bitstream. The VLCs encode tokens which are used to unpack DCT
+ * data. This function unpacks all the VLCs for either the Y plane or both
+ * C planes, and is called for DC coefficients or different AC coefficient
+ * levels (since different coefficient types require different VLC tables.
+ *
+ * This function returns a residual eob run. E.g, if a particular token gave
+ * instructions to EOB the next 5 fragments and there were only 2 fragments
+ * left in the current fragment range, 3 would be returned so that it could
+ * be passed into the next call to this same function.
+ */
+static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
+ VLC *table, int coeff_index,
+ int y_plane,
+ int eob_run)
+{
+ int i;
+ int token;
+ int zero_run = 0;
+ DCTELEM coeff = 0;
+ Vp3Fragment *fragment;
+ int bits_to_get;
+ int next_fragment;
+ int previous_fragment;
+ int fragment_num;
+ int *list_head;
+
+ /* local references to structure members to avoid repeated deferences */
+ uint8_t *perm= s->scantable.permutated;
+ int *coded_fragment_list = s->coded_fragment_list;
+ Vp3Fragment *all_fragments = s->all_fragments;
+ uint8_t *coeff_counts = s->coeff_counts;
+ VLC_TYPE (*vlc_table)[2] = table->table;
+ int *fast_fragment_list = s->fast_fragment_list;
+
+ if (y_plane) {
+ next_fragment = s->fragment_list_y_head;
+ list_head = &s->fragment_list_y_head;
+ } else {
+ next_fragment = s->fragment_list_c_head;
+ list_head = &s->fragment_list_c_head;
+ }
+
+ i = next_fragment;
+ previous_fragment = -1; /* this indicates that the previous fragment is actually the list head */
+ while (i != -1) {
+ fragment_num = coded_fragment_list[i];
+
+ if (coeff_counts[fragment_num] > coeff_index) {
+ previous_fragment = i;
+ i = fast_fragment_list[i];
+ continue;
+ }
+ fragment = &all_fragments[fragment_num];
+
+ if (!eob_run) {
+ /* decode a VLC into a token */
+ token = get_vlc2(gb, vlc_table, 5, 3);
+ /* use the token to get a zero run, a coefficient, and an eob run */
+ if (token <= 6) {
+ eob_run = eob_run_base[token];
+ if (eob_run_get_bits[token])
+ eob_run += get_bits(gb, eob_run_get_bits[token]);
+ coeff = zero_run = 0;
+ } else {
+ bits_to_get = coeff_get_bits[token];
+ if (bits_to_get)
+ bits_to_get = get_bits(gb, bits_to_get);
+ coeff = coeff_tables[token][bits_to_get];
+
+ zero_run = zero_run_base[token];
+ if (zero_run_get_bits[token])
+ zero_run += get_bits(gb, zero_run_get_bits[token]);
+ }
+ }
+
+ if (!eob_run) {
+ coeff_counts[fragment_num] += zero_run;
+ if (coeff_counts[fragment_num] < 64){
+ fragment->next_coeff->coeff= coeff;
+ fragment->next_coeff->index= perm[coeff_counts[fragment_num]++]; //FIXME perm here already?
+ fragment->next_coeff->next= s->next_coeff;
+ s->next_coeff->next=NULL;
+ fragment->next_coeff= s->next_coeff++;
+ }
+ /* previous fragment is now this fragment */
+ previous_fragment = i;
+ } else {
+ coeff_counts[fragment_num] |= 128;
+ eob_run--;
+ /* remove this fragment from the list */
+ if (previous_fragment != -1)
+ fast_fragment_list[previous_fragment] = fast_fragment_list[i];
+ else
+ *list_head = fast_fragment_list[i];
+ /* previous fragment remains unchanged */
+ }
+
+ i = fast_fragment_list[i];
+ }
+
+ return eob_run;
+}
+
+static void reverse_dc_prediction(Vp3DecodeContext *s,
+ int first_fragment,
+ int fragment_width,
+ int fragment_height);
+/*
+ * This function unpacks all of the DCT coefficient data from the
+ * bitstream.
+ */
+static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
+{
+ int i;
+ int dc_y_table;
+ int dc_c_table;
+ int ac_y_table;
+ int ac_c_table;
+ int residual_eob_run = 0;
+ VLC *y_tables[64];
+ VLC *c_tables[64];
+
+ /* fetch the DC table indexes */
+ dc_y_table = get_bits(gb, 4);
+ dc_c_table = get_bits(gb, 4);
+
+ /* unpack the Y plane DC coefficients */
+ residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
+ 1, residual_eob_run);
+
+ /* reverse prediction of the Y-plane DC coefficients */
+ reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);
+
+ /* unpack the C plane DC coefficients */
+ residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
+ 0, residual_eob_run);
+
+ /* reverse prediction of the C-plane DC coefficients */
+ if (!(s->avctx->flags & CODEC_FLAG_GRAY))
+ {
+ reverse_dc_prediction(s, s->fragment_start[1],
+ s->fragment_width / 2, s->fragment_height / 2);
+ reverse_dc_prediction(s, s->fragment_start[2],
+ s->fragment_width / 2, s->fragment_height / 2);
+ }
+
+ /* fetch the AC table indexes */
+ ac_y_table = get_bits(gb, 4);
+ ac_c_table = get_bits(gb, 4);
+
+ /* build tables of AC VLC tables */
+ for (i = 1; i <= 5; i++) {
+ y_tables[i] = &s->ac_vlc_1[ac_y_table];
+ c_tables[i] = &s->ac_vlc_1[ac_c_table];
+ }
+ for (i = 6; i <= 14; i++) {
+ y_tables[i] = &s->ac_vlc_2[ac_y_table];
+ c_tables[i] = &s->ac_vlc_2[ac_c_table];
+ }
+ for (i = 15; i <= 27; i++) {
+ y_tables[i] = &s->ac_vlc_3[ac_y_table];
+ c_tables[i] = &s->ac_vlc_3[ac_c_table];
+ }
+ for (i = 28; i <= 63; i++) {
+ y_tables[i] = &s->ac_vlc_4[ac_y_table];
+ c_tables[i] = &s->ac_vlc_4[ac_c_table];
+ }
+
+ /* decode all AC coefficents */
+ for (i = 1; i <= 63; i++) {
+ if (s->fragment_list_y_head != -1)
+ residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
+ 1, residual_eob_run);
+
+ if (s->fragment_list_c_head != -1)
+ residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
+ 0, residual_eob_run);
+ }
+
+ return 0;
+}
+
+/*
+ * This function reverses the DC prediction for each coded fragment in
+ * the frame. Much of this function is adapted directly from the original
+ * VP3 source code.
+ */
+#define COMPATIBLE_FRAME(x) \
+ (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
+#define DC_COEFF(u) (s->coeffs[u].index ? 0 : s->coeffs[u].coeff) //FIXME do somethin to simplify this
+
+static void reverse_dc_prediction(Vp3DecodeContext *s,
+ int first_fragment,
+ int fragment_width,
+ int fragment_height)
+{
+
+#define PUL 8
+#define PU 4
+#define PUR 2
+#define PL 1
+
+ int x, y;
+ int i = first_fragment;
+
+ int predicted_dc;
+
+ /* DC values for the left, up-left, up, and up-right fragments */
+ int vl, vul, vu, vur;
+
+ /* indexes for the left, up-left, up, and up-right fragments */
+ int l, ul, u, ur;
+
+ /*
+ * The 6 fields mean:
+ * 0: up-left multiplier
+ * 1: up multiplier
+ * 2: up-right multiplier
+ * 3: left multiplier
+ */
+ static const int predictor_transform[16][4] = {
+ { 0, 0, 0, 0},
+ { 0, 0, 0,128}, // PL
+ { 0, 0,128, 0}, // PUR
+ { 0, 0, 53, 75}, // PUR|PL
+ { 0,128, 0, 0}, // PU
+ { 0, 64, 0, 64}, // PU|PL
+ { 0,128, 0, 0}, // PU|PUR
+ { 0, 0, 53, 75}, // PU|PUR|PL
+ {128, 0, 0, 0}, // PUL
+ { 0, 0, 0,128}, // PUL|PL
+ { 64, 0, 64, 0}, // PUL|PUR
+ { 0, 0, 53, 75}, // PUL|PUR|PL
+ { 0,128, 0, 0}, // PUL|PU
+ {-104,116, 0,116}, // PUL|PU|PL
+ { 24, 80, 24, 0}, // PUL|PU|PUR
+ {-104,116, 0,116} // PUL|PU|PUR|PL
+ };
+
+ /* This table shows which types of blocks can use other blocks for
+ * prediction. For example, INTRA is the only mode in this table to
+ * have a frame number of 0. That means INTRA blocks can only predict
+ * from other INTRA blocks. There are 2 golden frame coding types;
+ * blocks encoding in these modes can only predict from other blocks
+ * that were encoded with these 1 of these 2 modes. */
+ static const unsigned char compatible_frame[9] = {
+ 1, /* MODE_INTER_NO_MV */
+ 0, /* MODE_INTRA */
+ 1, /* MODE_INTER_PLUS_MV */
+ 1, /* MODE_INTER_LAST_MV */
+ 1, /* MODE_INTER_PRIOR_MV */
+ 2, /* MODE_USING_GOLDEN */
+ 2, /* MODE_GOLDEN_MV */
+ 1, /* MODE_INTER_FOUR_MV */
+ 3 /* MODE_COPY */
+ };
+ int current_frame_type;
+
+ /* there is a last DC predictor for each of the 3 frame types */
+ short last_dc[3];
+
+ int transform = 0;
+
+ vul = vu = vur = vl = 0;
+ last_dc[0] = last_dc[1] = last_dc[2] = 0;
+
+ /* for each fragment row... */
+ for (y = 0; y < fragment_height; y++) {
+
+ /* for each fragment in a row... */
+ for (x = 0; x < fragment_width; x++, i++) {
+
+ /* reverse prediction if this block was coded */
+ if (s->all_fragments[i].coding_method != MODE_COPY) {
+
+ current_frame_type =
+ compatible_frame[s->all_fragments[i].coding_method];
+
+ transform= 0;
+ if(x){
+ l= i-1;
+ vl = DC_COEFF(l);
+ if(COMPATIBLE_FRAME(l))
+ transform |= PL;
+ }
+ if(y){
+ u= i-fragment_width;
+ vu = DC_COEFF(u);
+ if(COMPATIBLE_FRAME(u))
+ transform |= PU;
+ if(x){
+ ul= i-fragment_width-1;
+ vul = DC_COEFF(ul);
+ if(COMPATIBLE_FRAME(ul))
+ transform |= PUL;
+ }
+ if(x + 1 < fragment_width){
+ ur= i-fragment_width+1;
+ vur = DC_COEFF(ur);
+ if(COMPATIBLE_FRAME(ur))
+ transform |= PUR;
+ }
+ }
+
+ if (transform == 0) {
+
+ /* if there were no fragments to predict from, use last
+ * DC saved */
+ predicted_dc = last_dc[current_frame_type];
+ } else {
+
+ /* apply the appropriate predictor transform */
+ predicted_dc =
+ (predictor_transform[transform][0] * vul) +
+ (predictor_transform[transform][1] * vu) +
+ (predictor_transform[transform][2] * vur) +
+ (predictor_transform[transform][3] * vl);
+
+ predicted_dc /= 128;
+
+ /* check for outranging on the [ul u l] and
+ * [ul u ur l] predictors */
+ if ((transform == 15) || (transform == 13)) {
+ if (FFABS(predicted_dc - vu) > 128)
+ predicted_dc = vu;
+ else if (FFABS(predicted_dc - vl) > 128)
+ predicted_dc = vl;
+ else if (FFABS(predicted_dc - vul) > 128)
+ predicted_dc = vul;
+ }
+ }
+
+ /* at long last, apply the predictor */
+ if(s->coeffs[i].index){
+ *s->next_coeff= s->coeffs[i];
+ s->coeffs[i].index=0;
+ s->coeffs[i].coeff=0;
+ s->coeffs[i].next= s->next_coeff++;
+ }
+ s->coeffs[i].coeff += predicted_dc;
+ /* save the DC */
+ last_dc[current_frame_type] = DC_COEFF(i);
+ if(DC_COEFF(i) && !(s->coeff_counts[i]&127)){
+ s->coeff_counts[i]= 129;
+// s->all_fragments[i].next_coeff= s->next_coeff;
+ s->coeffs[i].next= s->next_coeff;
+ (s->next_coeff++)->next=NULL;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Perform the final rendering for a particular slice of data.
+ * The slice number ranges from 0..(macroblock_height - 1).
+ */
+static void render_slice(Vp3DecodeContext *s, int slice)
+{
+ int x;
+ int16_t *dequantizer;
+ DECLARE_ALIGNED_16(DCTELEM, block)[64];
+ int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
+ int motion_halfpel_index;
+ uint8_t *motion_source;
+ int plane;
+ int current_macroblock_entry = slice * s->macroblock_width * 6;
+
+ if (slice >= s->macroblock_height)
+ return;
+
+ for (plane = 0; plane < 3; plane++) {
+ uint8_t *output_plane = s->current_frame.data [plane];
+ uint8_t * last_plane = s-> last_frame.data [plane];
+ uint8_t *golden_plane = s-> golden_frame.data [plane];
+ int stride = s->current_frame.linesize[plane];
+ int plane_width = s->width >> !!plane;
+ int plane_height = s->height >> !!plane;
+ int y = slice * FRAGMENT_PIXELS << !plane ;
+ int slice_height = y + (FRAGMENT_PIXELS << !plane);
+ int i = s->macroblock_fragments[current_macroblock_entry + plane + 3*!!plane];
+
+ if (!s->flipped_image) stride = -stride;
+
+
+ if(FFABS(stride) > 2048)
+ return; //various tables are fixed size
+
+ /* for each fragment row in the slice (both of them)... */
+ for (; y < slice_height; y += 8) {
+
+ /* for each fragment in a row... */
+ for (x = 0; x < plane_width; x += 8, i++) {
+
+ if ((i < 0) || (i >= s->fragment_count)) {
+ av_log(s->avctx, AV_LOG_ERROR, " vp3:render_slice(): bad fragment number (%d)\n", i);
+ return;
+ }
+
+ /* transform if this block was coded */
+ if ((s->all_fragments[i].coding_method != MODE_COPY) &&
+ !((s->avctx->flags & CODEC_FLAG_GRAY) && plane)) {
+
+ if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
+ (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
+ motion_source= golden_plane;
+ else
+ motion_source= last_plane;
+
+ motion_source += s->all_fragments[i].first_pixel;
+ motion_halfpel_index = 0;
+
+ /* sort out the motion vector if this fragment is coded
+ * using a motion vector method */
+ if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
+ (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
+ int src_x, src_y;
+ motion_x = s->all_fragments[i].motion_x;
+ motion_y = s->all_fragments[i].motion_y;
+ if(plane){
+ motion_x= (motion_x>>1) | (motion_x&1);
+ motion_y= (motion_y>>1) | (motion_y&1);
+ }
+
+ src_x= (motion_x>>1) + x;
+ src_y= (motion_y>>1) + y;
+ if ((motion_x == 127) || (motion_y == 127))
+ av_log(s->avctx, AV_LOG_ERROR, " help! got invalid motion vector! (%X, %X)\n", motion_x, motion_y);
+
+ motion_halfpel_index = motion_x & 0x01;
+ motion_source += (motion_x >> 1);
+
+ motion_halfpel_index |= (motion_y & 0x01) << 1;
+ motion_source += ((motion_y >> 1) * stride);
+
+ if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){
+ uint8_t *temp= s->edge_emu_buffer;
+ if(stride<0) temp -= 9*stride;
+ else temp += 9*stride;
+
+ ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height);
+ motion_source= temp;
+ }
+ }
+
+
+ /* first, take care of copying a block from either the
+ * previous or the golden frame */
+ if (s->all_fragments[i].coding_method != MODE_INTRA) {
+ /* Note, it is possible to implement all MC cases with
+ put_no_rnd_pixels_l2 which would look more like the
+ VP3 source but this would be slower as
+ put_no_rnd_pixels_tab is better optimzed */
+ if(motion_halfpel_index != 3){
+ s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
+ output_plane + s->all_fragments[i].first_pixel,
+ motion_source, stride, 8);
+ }else{
+ int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1
+ s->dsp.put_no_rnd_pixels_l2[1](
+ output_plane + s->all_fragments[i].first_pixel,
+ motion_source - d,
+ motion_source + stride + 1 + d,
+ stride, 8);
+ }
+ dequantizer = s->qmat[s->all_fragments[i].qpi][1][plane];
+ }else{
+ dequantizer = s->qmat[s->all_fragments[i].qpi][0][plane];
+ }
+
+ /* dequantize the DCT coefficients */
+ if(s->avctx->idct_algo==FF_IDCT_VP3){
+ Coeff *coeff= s->coeffs + i;
+ s->dsp.clear_block(block);
+ while(coeff->next){
+ block[coeff->index]= coeff->coeff * dequantizer[coeff->index];
+ coeff= coeff->next;
+ }
+ }else{
+ Coeff *coeff= s->coeffs + i;
+ s->dsp.clear_block(block);
+ while(coeff->next){
+ block[coeff->index]= (coeff->coeff * dequantizer[coeff->index] + 2)>>2;
+ coeff= coeff->next;
+ }
+ }
+
+ /* invert DCT and place (or add) in final output */
+
+ if (s->all_fragments[i].coding_method == MODE_INTRA) {
+ if(s->avctx->idct_algo!=FF_IDCT_VP3)
+ block[0] += 128<<3;
+ s->dsp.idct_put(
+ output_plane + s->all_fragments[i].first_pixel,
+ stride,
+ block);
+ } else {
+ s->dsp.idct_add(
+ output_plane + s->all_fragments[i].first_pixel,
+ stride,
+ block);
+ }
+ } else {
+
+ /* copy directly from the previous frame */
+ s->dsp.put_pixels_tab[1][0](
+ output_plane + s->all_fragments[i].first_pixel,
+ last_plane + s->all_fragments[i].first_pixel,
+ stride, 8);
+
+ }
+ }
+ }
+ }
+
+ /* this looks like a good place for slice dispatch... */
+ /* algorithm:
+ * if (slice == s->macroblock_height - 1)
+ * dispatch (both last slice & 2nd-to-last slice);
+ * else if (slice > 0)
+ * dispatch (slice - 1);
+ */
+
+ emms_c();
+}
+
+static void apply_loop_filter(Vp3DecodeContext *s)
+{
+ int plane;
+ int x, y;
+ int *bounding_values= s->bounding_values_array+127;
+
+ for (plane = 0; plane < 3; plane++) {
+ int width = s->fragment_width >> !!plane;
+ int height = s->fragment_height >> !!plane;
+ int fragment = s->fragment_start [plane];
+ int stride = s->current_frame.linesize[plane];
+ uint8_t *plane_data = s->current_frame.data [plane];
+ if (!s->flipped_image) stride = -stride;
+
+ for (y = 0; y < height; y++) {
+
+ for (x = 0; x < width; x++) {
+ /* This code basically just deblocks on the edges of coded blocks.
+ * However, it has to be much more complicated because of the
+ * braindamaged deblock ordering used in VP3/Theora. Order matters
+ * because some pixels get filtered twice. */
+ if( s->all_fragments[fragment].coding_method != MODE_COPY )
+ {
+ /* do not perform left edge filter for left columns frags */
+ if (x > 0) {
+ s->dsp.vp3_h_loop_filter(
+ plane_data + s->all_fragments[fragment].first_pixel,
+ stride, bounding_values);
+ }
+
+ /* do not perform top edge filter for top row fragments */
+ if (y > 0) {
+ s->dsp.vp3_v_loop_filter(
+ plane_data + s->all_fragments[fragment].first_pixel,
+ stride, bounding_values);
+ }
+
+ /* do not perform right edge filter for right column
+ * fragments or if right fragment neighbor is also coded
+ * in this frame (it will be filtered in next iteration) */
+ if ((x < width - 1) &&
+ (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
+ s->dsp.vp3_h_loop_filter(
+ plane_data + s->all_fragments[fragment + 1].first_pixel,
+ stride, bounding_values);
+ }
+
+ /* do not perform bottom edge filter for bottom row
+ * fragments or if bottom fragment neighbor is also coded
+ * in this frame (it will be filtered in the next row) */
+ if ((y < height - 1) &&
+ (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
+ s->dsp.vp3_v_loop_filter(
+ plane_data + s->all_fragments[fragment + width].first_pixel,
+ stride, bounding_values);
+ }
+ }
+
+ fragment++;
+ }
+ }
+ }
+}
+
+/*
+ * This function computes the first pixel addresses for each fragment.
+ * This function needs to be invoked after the first frame is allocated
+ * so that it has access to the plane strides.
+ */
+static void vp3_calculate_pixel_addresses(Vp3DecodeContext *s)
+{
+#define Y_INITIAL(chroma_shift) s->flipped_image ? 1 : s->fragment_height >> chroma_shift
+#define Y_FINISHED(chroma_shift) s->flipped_image ? y <= s->fragment_height >> chroma_shift : y > 0
+
+ int i, x, y;
+ const int y_inc = s->flipped_image ? 1 : -1;
+
+ /* figure out the first pixel addresses for each of the fragments */
+ /* Y plane */
+ i = 0;
+ for (y = Y_INITIAL(0); Y_FINISHED(0); y += y_inc) {
+ for (x = 0; x < s->fragment_width; x++) {
+ s->all_fragments[i++].first_pixel =
+ s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS -
+ s->golden_frame.linesize[0] +
+ x * FRAGMENT_PIXELS;
+ }
+ }
+
+ /* U plane */
+ i = s->fragment_start[1];
+ for (y = Y_INITIAL(1); Y_FINISHED(1); y += y_inc) {
+ for (x = 0; x < s->fragment_width / 2; x++) {
+ s->all_fragments[i++].first_pixel =
+ s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS -
+ s->golden_frame.linesize[1] +
+ x * FRAGMENT_PIXELS;
+ }
+ }
+
+ /* V plane */
+ i = s->fragment_start[2];
+ for (y = Y_INITIAL(1); Y_FINISHED(1); y += y_inc) {
+ for (x = 0; x < s->fragment_width / 2; x++) {
+ s->all_fragments[i++].first_pixel =
+ s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS -
+ s->golden_frame.linesize[2] +
+ x * FRAGMENT_PIXELS;
+ }
+ }
+}
+
+/*
+ * This is the ffmpeg/libavcodec API init function.
+ */
+static av_cold int vp3_decode_init(AVCodecContext *avctx)
+{
+ Vp3DecodeContext *s = avctx->priv_data;
+ int i, inter, plane;
+ int c_width;
+ int c_height;
+ int y_superblock_count;
+ int c_superblock_count;
+
+ if (avctx->codec_tag == MKTAG('V','P','3','0'))
+ s->version = 0;
+ else
+ s->version = 1;
+
+ s->avctx = avctx;
+ s->width = FFALIGN(avctx->width, 16);
+ s->height = FFALIGN(avctx->height, 16);
+ avctx->pix_fmt = PIX_FMT_YUV420P;
+ avctx->chroma_sample_location = AVCHROMA_LOC_CENTER;
+ //if(avctx->idct_algo==FF_IDCT_AUTO)
+ avctx->idct_algo=FF_IDCT_VP3;
+ dsputil_init(&s->dsp, avctx);
+
+ ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
+
+ /* initialize to an impossible value which will force a recalculation
+ * in the first frame decode */
+ for (i = 0; i < 3; i++)
+ s->qps[i] = -1;
+
+ s->y_superblock_width = (s->width + 31) / 32;
+ s->y_superblock_height = (s->height + 31) / 32;
+ y_superblock_count = s->y_superblock_width * s->y_superblock_height;
+
+ /* work out the dimensions for the C planes */
+ c_width = s->width / 2;
+ c_height = s->height / 2;
+ s->c_superblock_width = (c_width + 31) / 32;
+ s->c_superblock_height = (c_height + 31) / 32;
+ c_superblock_count = s->c_superblock_width * s->c_superblock_height;
+
+ s->superblock_count = y_superblock_count + (c_superblock_count * 2);
+ s->u_superblock_start = y_superblock_count;
+ s->v_superblock_start = s->u_superblock_start + c_superblock_count;
+ s->superblock_coding = av_malloc(s->superblock_count);
+
+ s->macroblock_width = (s->width + 15) / 16;
+ s->macroblock_height = (s->height + 15) / 16;
+ s->macroblock_count = s->macroblock_width * s->macroblock_height;
+
+ s->fragment_width = s->width / FRAGMENT_PIXELS;
+ s->fragment_height = s->height / FRAGMENT_PIXELS;
+
+ /* fragment count covers all 8x8 blocks for all 3 planes */
+ s->fragment_count = s->fragment_width * s->fragment_height * 3 / 2;
+ s->fragment_start[1] = s->fragment_width * s->fragment_height;
+ s->fragment_start[2] = s->fragment_width * s->fragment_height * 5 / 4;
+
+ s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
+ s->coeff_counts = av_malloc(s->fragment_count * sizeof(*s->coeff_counts));
+ s->coeffs = av_malloc(s->fragment_count * sizeof(Coeff) * 65);
+ s->coded_fragment_list = av_malloc(s->fragment_count * sizeof(int));
+ s->fast_fragment_list = av_malloc(s->fragment_count * sizeof(int));
+ s->pixel_addresses_initialized = 0;
+ if (!s->superblock_coding || !s->all_fragments || !s->coeff_counts ||
+ !s->coeffs || !s->coded_fragment_list || !s->fast_fragment_list) {
+ vp3_decode_end(avctx);
+ return -1;
+ }
+
+ if (!s->theora_tables)
+ {
+ for (i = 0; i < 64; i++) {
+ s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i];
+ s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i];
+ s->base_matrix[0][i] = vp31_intra_y_dequant[i];
+ s->base_matrix[1][i] = vp31_intra_c_dequant[i];
+ s->base_matrix[2][i] = vp31_inter_dequant[i];
+ s->filter_limit_values[i] = vp31_filter_limit_values[i];
+ }
+
+ for(inter=0; inter<2; inter++){
+ for(plane=0; plane<3; plane++){
+ s->qr_count[inter][plane]= 1;
+ s->qr_size [inter][plane][0]= 63;
+ s->qr_base [inter][plane][0]=
+ s->qr_base [inter][plane][1]= 2*inter + (!!plane)*!inter;
+ }
+ }
+
+ /* init VLC tables */
+ for (i = 0; i < 16; i++) {
+
+ /* DC histograms */
+ init_vlc(&s->dc_vlc[i], 5, 32,
+ &dc_bias[i][0][1], 4, 2,
+ &dc_bias[i][0][0], 4, 2, 0);
+
+ /* group 1 AC histograms */
+ init_vlc(&s->ac_vlc_1[i], 5, 32,
+ &ac_bias_0[i][0][1], 4, 2,
+ &ac_bias_0[i][0][0], 4, 2, 0);
+
+ /* group 2 AC histograms */
+ init_vlc(&s->ac_vlc_2[i], 5, 32,
+ &ac_bias_1[i][0][1], 4, 2,
+ &ac_bias_1[i][0][0], 4, 2, 0);
+
+ /* group 3 AC histograms */
+ init_vlc(&s->ac_vlc_3[i], 5, 32,
+ &ac_bias_2[i][0][1], 4, 2,
+ &ac_bias_2[i][0][0], 4, 2, 0);
+
+ /* group 4 AC histograms */
+ init_vlc(&s->ac_vlc_4[i], 5, 32,
+ &ac_bias_3[i][0][1], 4, 2,
+ &ac_bias_3[i][0][0], 4, 2, 0);
+ }
+ } else {
+ for (i = 0; i < 16; i++) {
+
+ /* DC histograms */
+ if (init_vlc(&s->dc_vlc[i], 5, 32,
+ &s->huffman_table[i][0][1], 4, 2,
+ &s->huffman_table[i][0][0], 4, 2, 0) < 0)
+ goto vlc_fail;
+
+ /* group 1 AC histograms */
+ if (init_vlc(&s->ac_vlc_1[i], 5, 32,
+ &s->huffman_table[i+16][0][1], 4, 2,
+ &s->huffman_table[i+16][0][0], 4, 2, 0) < 0)
+ goto vlc_fail;
+
+ /* group 2 AC histograms */
+ if (init_vlc(&s->ac_vlc_2[i], 5, 32,
+ &s->huffman_table[i+16*2][0][1], 4, 2,
+ &s->huffman_table[i+16*2][0][0], 4, 2, 0) < 0)
+ goto vlc_fail;
+
+ /* group 3 AC histograms */
+ if (init_vlc(&s->ac_vlc_3[i], 5, 32,
+ &s->huffman_table[i+16*3][0][1], 4, 2,
+ &s->huffman_table[i+16*3][0][0], 4, 2, 0) < 0)
+ goto vlc_fail;
+
+ /* group 4 AC histograms */
+ if (init_vlc(&s->ac_vlc_4[i], 5, 32,
+ &s->huffman_table[i+16*4][0][1], 4, 2,
+ &s->huffman_table[i+16*4][0][0], 4, 2, 0) < 0)
+ goto vlc_fail;
+ }
+ }
+
+ init_vlc(&s->superblock_run_length_vlc, 6, 34,
+ &superblock_run_length_vlc_table[0][1], 4, 2,
+ &superblock_run_length_vlc_table[0][0], 4, 2, 0);
+
+ init_vlc(&s->fragment_run_length_vlc, 5, 30,
+ &fragment_run_length_vlc_table[0][1], 4, 2,
+ &fragment_run_length_vlc_table[0][0], 4, 2, 0);
+
+ init_vlc(&s->mode_code_vlc, 3, 8,
+ &mode_code_vlc_table[0][1], 2, 1,
+ &mode_code_vlc_table[0][0], 2, 1, 0);
+
+ init_vlc(&s->motion_vector_vlc, 6, 63,
+ &motion_vector_vlc_table[0][1], 2, 1,
+ &motion_vector_vlc_table[0][0], 2, 1, 0);
+
+ /* work out the block mapping tables */
+ s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
+ s->superblock_macroblocks = av_malloc(s->superblock_count * 4 * sizeof(int));
+ s->macroblock_fragments = av_malloc(s->macroblock_count * 6 * sizeof(int));
+ s->macroblock_coding = av_malloc(s->macroblock_count + 1);
+ if (!s->superblock_fragments || !s->superblock_macroblocks ||
+ !s->macroblock_fragments || !s->macroblock_coding) {
+ vp3_decode_end(avctx);
+ return -1;
+ }
+ init_block_mapping(s);
+
+ for (i = 0; i < 3; i++) {
+ s->current_frame.data[i] = NULL;
+ s->last_frame.data[i] = NULL;
+ s->golden_frame.data[i] = NULL;
+ }
+
+ return 0;
+
+vlc_fail:
+ av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n");
+ return -1;
+}
+
+/* ffdshow custom code (begin) */
+static int64_t theora_granule_frame(Vp3DecodeContext *s,int64_t granulepos)
+{
+ if(granulepos>=0){
+ int64_t iframe=granulepos>>s->keyframe_granule_shift;
+ int64_t pframe=granulepos-(iframe<<s->keyframe_granule_shift);
+ return iframe+pframe;
+ }else{
+ return -1;
+ }
+}
+/* ffdshow custom code (end) */
+
+/*
+ * This is the ffmpeg/libavcodec API frame decode function.
+ */
+static int vp3_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ Vp3DecodeContext *s = avctx->priv_data;
+ GetBitContext gb;
+ static int counter = 0;
+ int i;
+
+ init_get_bits(&gb, buf, buf_size * 8);
+
+ if (s->theora && get_bits1(&gb))
+ {
+ av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
+ return -1;
+ }
+
+ s->keyframe = !get_bits1(&gb);
+ if (!s->theora)
+ skip_bits(&gb, 1);
+ for (i = 0; i < 3; i++)
+ s->last_qps[i] = s->qps[i];
+
+ s->nqps=0;
+ do{
+ s->qps[s->nqps++]= get_bits(&gb, 6);
+ } while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb));
+ for (i = s->nqps; i < 3; i++)
+ s->qps[i] = -1;
+
+ if (s->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
+ s->keyframe?"key":"", counter, s->qps[0]);
+ counter++;
+
+ if (s->qps[0] != s->last_qps[0])
+ init_loop_filter(s);
+
+ for (i = 0; i < s->nqps; i++)
+ // reinit all dequantizers if the first one changed, because
+ // the DC of the first quantizer must be used for all matrices
+ if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
+ init_dequantizer(s, i);
+
+ if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
+ return buf_size;
+
+ if (s->keyframe) {
+ if (!s->theora)
+ {
+ skip_bits(&gb, 4); /* width code */
+ skip_bits(&gb, 4); /* height code */
+ if (s->version)
+ {
+ s->version = get_bits(&gb, 5);
+ if (counter == 1)
+ av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
+ }
+ }
+ if (s->version || s->theora)
+ {
+ if (get_bits1(&gb))
+ av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
+ skip_bits(&gb, 2); /* reserved? */
+ }
+
+ if (s->last_frame.data[0] == s->golden_frame.data[0]) {
+ if (s->golden_frame.data[0])
+ avctx->release_buffer(avctx, &s->golden_frame);
+ s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */
+ } else {
+ if (s->golden_frame.data[0])
+ avctx->release_buffer(avctx, &s->golden_frame);
+ if (s->last_frame.data[0])
+ avctx->release_buffer(avctx, &s->last_frame);
+ }
+
+ s->golden_frame.reference = 3;
+ if(avctx->get_buffer(avctx, &s->golden_frame) < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
+ return -1;
+ }
+
+ /* golden frame is also the current frame */
+ s->current_frame= s->golden_frame;
+
+ /* time to figure out pixel addresses? */
+ if (!s->pixel_addresses_initialized)
+ {
+ vp3_calculate_pixel_addresses(s);
+ s->pixel_addresses_initialized = 1;
+ }
+ } else {
+ /* allocate a new current frame */
+ s->current_frame.reference = 3;
+ if (!s->pixel_addresses_initialized) {
+ av_log(s->avctx, AV_LOG_ERROR, "vp3: first frame not a keyframe\n");
+ return -1;
+ }
+ if(avctx->get_buffer(avctx, &s->current_frame) < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
+ return -1;
+ }
+ }
+
+ s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame
+ s->current_frame.qstride= 0;
+
+ init_frame(s, &gb);
+
+ if (unpack_superblocks(s, &gb)){
+ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
+ return -1;
+ }
+ if (unpack_modes(s, &gb)){
+ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
+ return -1;
+ }
+ if (unpack_vectors(s, &gb)){
+ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
+ return -1;
+ }
+ if (unpack_block_qpis(s, &gb)){
+ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
+ return -1;
+ }
+ if (unpack_dct_coeffs(s, &gb)){
+ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
+ return -1;
+ }
+
+ for (i = 0; i < s->macroblock_height; i++)
+ render_slice(s, i);
+
+ apply_loop_filter(s);
+
+ /* ffdshow custom code (begin) */
+ if (s->theora && s->fps_numerator){
+ if (avctx->granulepos>-1){
+ s->granulepos=avctx->granulepos;
+ }else{
+ if (s->granulepos==-1)
+ s->granulepos=0;
+ else
+ if (s->keyframe){
+ long frames= s->granulepos & ((1<<s->keyframe_granule_shift)-1);
+ s->granulepos>>=s->keyframe_granule_shift;
+ s->granulepos+=frames+1;
+ s->granulepos<<=s->keyframe_granule_shift;
+ }else{
+ s->granulepos++;
+ }
+ }
+ s->current_frame.reordered_opaque = 10000000LL * theora_granule_frame(s,s->granulepos) * s->fps_denumerator / s->fps_numerator;
+ s->current_frame.pict_type=s->keyframe?FF_I_TYPE:FF_P_TYPE;
+ }
+ /* ffdshow custom code (end) */
+
+ *data_size=sizeof(AVFrame);
+ *(AVFrame*)data= s->current_frame;
+
+ /* release the last frame, if it is allocated and if it is not the
+ * golden frame */
+ if ((s->last_frame.data[0]) &&
+ (s->last_frame.data[0] != s->golden_frame.data[0]))
+ avctx->release_buffer(avctx, &s->last_frame);
+
+ /* shuffle frames (last = current) */
+ s->last_frame= s->current_frame;
+ s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */
+
+ return buf_size;
+}
+
+/*
+ * This is the ffmpeg/libavcodec API module cleanup function.
+ */
+static av_cold int vp3_decode_end(AVCodecContext *avctx)
+{
+ Vp3DecodeContext *s = avctx->priv_data;
+ int i;
+
+ av_free(s->superblock_coding);
+ av_free(s->all_fragments);
+ av_free(s->coeff_counts);
+ av_free(s->coeffs);
+ av_free(s->coded_fragment_list);
+ av_free(s->fast_fragment_list);
+ av_free(s->superblock_fragments);
+ av_free(s->superblock_macroblocks);
+ av_free(s->macroblock_fragments);
+ av_free(s->macroblock_coding);
+
+ for (i = 0; i < 16; i++) {
+ free_vlc(&s->dc_vlc[i]);
+ free_vlc(&s->ac_vlc_1[i]);
+ free_vlc(&s->ac_vlc_2[i]);
+ free_vlc(&s->ac_vlc_3[i]);
+ free_vlc(&s->ac_vlc_4[i]);
+ }
+
+ free_vlc(&s->superblock_run_length_vlc);
+ free_vlc(&s->fragment_run_length_vlc);
+ free_vlc(&s->mode_code_vlc);
+ free_vlc(&s->motion_vector_vlc);
+
+ /* release all frames */
+ if (s->golden_frame.data[0] && s->golden_frame.data[0] != s->last_frame.data[0])
+ avctx->release_buffer(avctx, &s->golden_frame);
+ if (s->last_frame.data[0])
+ avctx->release_buffer(avctx, &s->last_frame);
+ /* no need to release the current_frame since it will always be pointing
+ * to the same frame as either the golden or last frame */
+
+ return 0;
+}
+
+static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
+{
+ Vp3DecodeContext *s = avctx->priv_data;
+
+ if (get_bits1(gb)) {
+ int token;
+ if (s->entries >= 32) { /* overflow */
+ av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
+ return -1;
+ }
+ token = get_bits(gb, 5);
+ //av_log(avctx, AV_LOG_DEBUG, "hti %d hbits %x token %d entry : %d size %d\n", s->hti, s->hbits, token, s->entries, s->huff_code_size);
+ s->huffman_table[s->hti][token][0] = s->hbits;
+ s->huffman_table[s->hti][token][1] = s->huff_code_size;
+ s->entries++;
+ }
+ else {
+ if (s->huff_code_size >= 32) {/* overflow */
+ av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
+ return -1;
+ }
+ s->huff_code_size++;
+ s->hbits <<= 1;
+ if (read_huffman_tree(avctx, gb))
+ return -1;
+ s->hbits |= 1;
+ if (read_huffman_tree(avctx, gb))
+ return -1;
+ s->hbits >>= 1;
+ s->huff_code_size--;
+ }
+ return 0;
+}
+
+#if CONFIG_THEORA_DECODER
+static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
+{
+ Vp3DecodeContext *s = avctx->priv_data;
+ int visible_width, visible_height;
+
+ s->theora = get_bits_long(gb, 24);
+ av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
+
+ /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */
+ /* but previous versions have the image flipped relative to vp3 */
+ if (s->theora < 0x030200)
+ {
+ s->flipped_image = 1;
+ av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
+ }
+
+ visible_width = s->width = get_bits(gb, 16) << 4;
+ visible_height = s->height = get_bits(gb, 16) << 4;
+
+ if(avcodec_check_dimensions(avctx, s->width, s->height)){
+ av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height);
+ s->width= s->height= 0;
+ return -1;
+ }
+
+ if (s->theora >= 0x030400)
+ {
+ skip_bits(gb, 32); /* total number of superblocks in a frame */
+ // fixme, the next field is 36bits long
+ skip_bits(gb, 32); /* total number of blocks in a frame */
+ skip_bits(gb, 4); /* total number of blocks in a frame */
+ skip_bits(gb, 32); /* total number of macroblocks in a frame */
+ }
+
+ if (s->theora >= 0x030200) {
+ visible_width = get_bits_long(gb, 24);
+ visible_height = get_bits_long(gb, 24);
+
+ skip_bits(gb, 8); /* offset x */
+ skip_bits(gb, 8); /* offset y */
+ }
+
+ /* ffdshow custom code (begin) */
+ s->fps_numerator=get_bits(gb, 32); /* fps numerator */
+ s->fps_denumerator=get_bits(gb, 32); /* fps denumerator */
+ avctx->sample_aspect_ratio.num = get_bits(gb, 24); /* aspect numerator */
+ avctx->sample_aspect_ratio.den = get_bits(gb, 24); /* aspect denumerator */
+ /* ffdshow custom code (end) */
+
+ if (s->theora < 0x030200)
+ s->keyframe_frequency_force=1<<get_bits(gb, 5); /* keyframe frequency force */ /* ffdshow custom code */
+ skip_bits(gb, 8); /* colorspace */
+ if (s->theora >= 0x030400)
+ skip_bits(gb, 2); /* pixel format: 420,res,422,444 */
+ skip_bits(gb, 24); /* bitrate */
+
+ skip_bits(gb, 6); /* quality hint */
+
+ if (s->theora >= 0x030200)
+ {
+ s->keyframe_frequency_force=1<<get_bits(gb, 5); /* keyframe frequency force */ /* ffdshow custom code */
+
+ if (s->theora < 0x030400)
+ skip_bits(gb, 5); /* spare bits */
+ }
+ s->keyframe_granule_shift=_ilog(s->keyframe_frequency_force-1); // ffdshow custom code
+
+// align_get_bits(gb);
+
+ if ( visible_width <= s->width && visible_width > s->width-16
+ && visible_height <= s->height && visible_height > s->height-16)
+ avcodec_set_dimensions(avctx, visible_width, visible_height);
+ else
+ avcodec_set_dimensions(avctx, s->width, s->height);
+
+ return 0;
+}
+
+static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
+{
+ Vp3DecodeContext *s = avctx->priv_data;
+ int i, n, matrices, inter, plane;
+
+ if (s->theora >= 0x030200) {
+ n = get_bits(gb, 3);
+ /* loop filter limit values table */
+ for (i = 0; i < 64; i++) {
+ s->filter_limit_values[i] = get_bits(gb, n);
+ if (s->filter_limit_values[i] > 127) {
+ av_log(avctx, AV_LOG_ERROR, "filter limit value too large (%i > 127), clamping\n", s->filter_limit_values[i]);
+ s->filter_limit_values[i] = 127;
+ }
+ }
+ }
+
+ if (s->theora >= 0x030200)
+ n = get_bits(gb, 4) + 1;
+ else
+ n = 16;
+ /* quality threshold table */
+ for (i = 0; i < 64; i++)
+ s->coded_ac_scale_factor[i] = get_bits(gb, n);
+
+ if (s->theora >= 0x030200)
+ n = get_bits(gb, 4) + 1;
+ else
+ n = 16;
+ /* dc scale factor table */
+ for (i = 0; i < 64; i++)
+ s->coded_dc_scale_factor[i] = get_bits(gb, n);
+
+ if (s->theora >= 0x030200)
+ matrices = get_bits(gb, 9) + 1;
+ else
+ matrices = 3;
+
+ if(matrices > 384){
+ av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
+ return -1;
+ }
+
+ for(n=0; n<matrices; n++){
+ for (i = 0; i < 64; i++)
+ s->base_matrix[n][i]= get_bits(gb, 8);
+ }
+
+ for (inter = 0; inter <= 1; inter++) {
+ for (plane = 0; plane <= 2; plane++) {
+ int newqr= 1;
+ if (inter || plane > 0)
+ newqr = get_bits1(gb);
+ if (!newqr) {
+ int qtj, plj;
+ if(inter && get_bits1(gb)){
+ qtj = 0;
+ plj = plane;
+ }else{
+ qtj= (3*inter + plane - 1) / 3;
+ plj= (plane + 2) % 3;
+ }
+ s->qr_count[inter][plane]= s->qr_count[qtj][plj];
+ memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], sizeof(s->qr_size[0][0]));
+ memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], sizeof(s->qr_base[0][0]));
+ } else {
+ int qri= 0;
+ int qi = 0;
+
+ for(;;){
+ i= get_bits(gb, av_log2(matrices-1)+1);
+ if(i>= matrices){
+ av_log(avctx, AV_LOG_ERROR, "invalid base matrix index\n");
+ return -1;
+ }
+ s->qr_base[inter][plane][qri]= i;
+ if(qi >= 63)
+ break;
+ i = get_bits(gb, av_log2(63-qi)+1) + 1;
+ s->qr_size[inter][plane][qri++]= i;
+ qi += i;
+ }
+
+ if (qi > 63) {
+ av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
+ return -1;
+ }
+ s->qr_count[inter][plane]= qri;
+ }
+ }
+ }
+
+ /* Huffman tables */
+ for (s->hti = 0; s->hti < 80; s->hti++) {
+ s->entries = 0;
+ s->huff_code_size = 1;
+ if (!get_bits1(gb)) {
+ s->hbits = 0;
+ if(read_huffman_tree(avctx, gb))
+ return -1;
+ s->hbits = 1;
+ if(read_huffman_tree(avctx, gb))
+ return -1;
+ }
+ }
+
+ s->theora_tables = 1;
+
+ return 0;
+}
+
+static av_cold int theora_decode_init(AVCodecContext *avctx)
+{
+ Vp3DecodeContext *s = avctx->priv_data;
+ GetBitContext gb;
+ int ptype;
+ uint8_t *header_start[3];
+ int header_len[3];
+ int i;
+
+ s->theora = 1;
+
+ if (!avctx->extradata_size)
+ {
+ av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
+ return -1;
+ }
+
+ if (ff_split_xiph_headers(avctx->extradata, avctx->extradata_size,
+ 42, header_start, header_len) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
+ return -1;
+ }
+
+ for(i=0;i<3;i++) {
+ init_get_bits(&gb, header_start[i], header_len[i] * 8);
+
+ ptype = get_bits(&gb, 8);
+
+ if (!(ptype & 0x80))
+ {
+ av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
+// return -1;
+ }
+
+ // FIXME: Check for this as well.
+ skip_bits_long(&gb, 6*8); /* "theora" */
+
+ switch(ptype)
+ {
+ case 0x80:
+ theora_decode_header(avctx, &gb);
+ break;
+ case 0x81:
+// FIXME: is this needed? it breaks sometimes
+// theora_decode_comments(avctx, gb);
+ break;
+ case 0x82:
+ if (theora_decode_tables(avctx, &gb))
+ return -1;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80);
+ break;
+ }
+ if(ptype != 0x81 && 8*header_len[i] != get_bits_count(&gb))
+ av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype);
+ if (s->theora < 0x030200)
+ break;
+ }
+
+ s->granulepos=-1; /* ffdshow custom code */
+ return vp3_decode_init(avctx);
+}
+
+AVCodec theora_decoder = {
+ "theora",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_THEORA,
+ sizeof(Vp3DecodeContext),
+ /*.init = */theora_decode_init,
+ /*.encode = */NULL,
+ /*.close = */vp3_decode_end,
+ /*.decode = */vp3_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("Theora"),
+};
+#endif
+
+AVCodec vp3_decoder = {
+ "vp3",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_VP3,
+ sizeof(Vp3DecodeContext),
+ /*.init = */vp3_decode_init,
+ /*.encode = */NULL,
+ /*.close = */vp3_decode_end,
+ /*.decode = */vp3_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("On2 VP3"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp3data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp3data.h
new file mode 100644
index 000000000..c5d148e45
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp3data.h
@@ -0,0 +1,3181 @@
+/*
+ * copyright (C) 2003 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP3DATA_H
+#define AVCODEC_VP3DATA_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+/* these coefficients dequantize intraframe Y plane coefficients
+ * (note: same as JPEG) */
+static const int16_t vp31_intra_y_dequant[64] =
+{ 16, 11, 10, 16, 24, 40, 51, 61,
+ 12, 12, 14, 19, 26, 58, 60, 55,
+ 14, 13, 16, 24, 40, 57, 69, 56,
+ 14, 17, 22, 29, 51, 87, 80, 62,
+ 18, 22, 37, 58, 68, 109, 103, 77,
+ 24, 35, 55, 64, 81, 104, 113, 92,
+ 49, 64, 78, 87, 103, 121, 120, 101,
+ 72, 92, 95, 98, 112, 100, 103, 99
+};
+
+/* these coefficients dequantize intraframe C plane coefficients
+ * (note: same as JPEG) */
+static const int16_t vp31_intra_c_dequant[64] =
+{ 17, 18, 24, 47, 99, 99, 99, 99,
+ 18, 21, 26, 66, 99, 99, 99, 99,
+ 24, 26, 56, 99, 99, 99, 99, 99,
+ 47, 66, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99
+};
+
+/* these coefficients dequantize interframe coefficients (all planes) */
+static const int16_t vp31_inter_dequant[64] =
+{ 16, 16, 16, 20, 24, 28, 32, 40,
+ 16, 16, 20, 24, 28, 32, 40, 48,
+ 16, 20, 24, 28, 32, 40, 48, 64,
+ 20, 24, 28, 32, 40, 48, 64, 64,
+ 24, 28, 32, 40, 48, 64, 64, 64,
+ 28, 32, 40, 48, 64, 64, 64, 96,
+ 32, 40, 48, 64, 64, 64, 96, 128,
+ 40, 48, 64, 64, 64, 96, 128, 128
+};
+
+static const int16_t vp31_dc_scale_factor[64] =
+{ 220, 200, 190, 180, 170, 170, 160, 160,
+ 150, 150, 140, 140, 130, 130, 120, 120,
+ 110, 110, 100, 100, 90, 90, 90, 80,
+ 80, 80, 70, 70, 70, 60, 60, 60,
+ 60, 50, 50, 50, 50, 40, 40, 40,
+ 40, 40, 30, 30, 30, 30, 30, 30,
+ 30, 20, 20, 20, 20, 20, 20, 20,
+ 20, 10, 10, 10, 10, 10, 10, 10
+};
+
+static const uint32_t vp31_ac_scale_factor[64] =
+{ 500, 450, 400, 370, 340, 310, 285, 265,
+ 245, 225, 210, 195, 185, 180, 170, 160,
+ 150, 145, 135, 130, 125, 115, 110, 107,
+ 100, 96, 93, 89, 85, 82, 75, 74,
+ 70, 68, 64, 60, 57, 56, 52, 50,
+ 49, 45, 44, 43, 40, 38, 37, 35,
+ 33, 32, 30, 29, 28, 25, 24, 22,
+ 21, 19, 18, 17, 15, 13, 12, 10
+};
+
+static const uint8_t vp31_filter_limit_values[64] =
+{ 30, 25, 20, 20, 15, 15, 14, 14,
+ 13, 13, 12, 12, 11, 11, 10, 10,
+ 9, 9, 8, 8, 7, 7, 7, 7,
+ 6, 6, 6, 6, 5, 5, 5, 5,
+ 4, 4, 4, 4, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static const uint16_t superblock_run_length_vlc_table[34][2] = {
+ { 0, 1 },
+
+ { 4, 3 }, { 5, 3 },
+
+ { 0xC, 4 }, { 0xD, 4 },
+
+ { 0x38, 6 }, { 0x39, 6 }, { 0x3A, 6 }, { 0x3B, 6 },
+
+ { 0xF0, 8 }, { 0xF1, 8 }, { 0xF2, 8 }, { 0xF3, 8 },
+ { 0xF4, 8 }, { 0xF5, 8 }, { 0xF6, 8 }, { 0xF7, 8 },
+
+ { 0x3E0, 10 }, { 0x3E1, 10 }, { 0x3E2, 10 }, { 0x3E3, 10 },
+ { 0x3E4, 10 }, { 0x3E5, 10 }, { 0x3E6, 10 }, { 0x3E7, 10 },
+ { 0x3E8, 10 }, { 0x3E9, 10 }, { 0x3EA, 10 }, { 0x3EB, 10 },
+ { 0x3EC, 10 }, { 0x3ED, 10 }, { 0x3EE, 10 }, { 0x3EF, 10 },
+
+ { 0x3F, 6 } /* this last VLC is a special case for reading 12 more
+ bits from stream and adding the value 34 */
+};
+
+static const uint16_t fragment_run_length_vlc_table[30][2] = {
+ /* 1 -> 2 */
+ { 0x0, 2 }, { 0x1, 2 },
+
+ /* 3 -> 4 */
+ { 0x4, 3 }, { 0x5, 3 },
+
+ /* 5 -> 6 */
+ { 0xC, 4 }, { 0xD, 4 },
+
+ /* 7 -> 10 */
+ { 0x38, 6 }, { 0x39, 6 },
+ { 0x3A, 6 }, { 0x3B, 6 },
+
+ /* 11 -> 14 */
+ { 0x78, 7 }, { 0x79, 7 },
+ { 0x7A, 7 }, { 0x7B, 7 },
+
+ /* 15 -> 30 */
+ { 0x1F0, 9 }, { 0x1F1, 9 }, { 0x1F2, 9 }, { 0x1F3, 9 },
+ { 0x1F4, 9 }, { 0x1F5, 9 }, { 0x1F6, 9 }, { 0x1F7, 9 },
+ { 0x1F8, 9 }, { 0x1F9, 9 }, { 0x1FA, 9 }, { 0x1FB, 9 },
+ { 0x1FC, 9 }, { 0x1FD, 9 }, { 0x1FE, 9 }, { 0x1FF, 9 }
+};
+
+static const uint8_t mode_code_vlc_table[8][2] = {
+ { 0, 1 }, { 2, 2 },
+ { 6, 3 }, { 14, 4 },
+ { 30, 5 }, { 62, 6 },
+ { 126, 7 }, { 127, 7 }
+};
+
+static const uint8_t motion_vector_vlc_table[63][2] = {
+ { 0, 3 },
+ { 1, 3 },
+ { 2, 3 },
+
+ { 6, 4 }, { 7, 4 },
+
+ { 8, 4 }, { 9, 4 },
+
+ { 40, 6 }, { 41, 6 }, { 42, 6 }, { 43, 6 },
+ { 44, 6 }, { 45, 6 }, { 46, 6 }, { 47, 6 },
+
+ { 96, 7 }, { 97, 7 }, { 98, 7 }, { 99, 7 },
+ { 100, 7 }, { 101, 7 }, { 102, 7 }, { 103, 7 },
+ { 104, 7 }, { 105, 7 }, { 106, 7 }, { 107, 7 },
+ { 108, 7 }, { 109, 7 }, { 110, 7 }, { 111, 7 },
+
+ { 0xE0, 8 }, { 0xE1, 8 }, { 0xE2, 8 }, { 0xE3, 8 },
+ { 0xE4, 8 }, { 0xE5, 8 }, { 0xE6, 8 }, { 0xE7, 8 },
+ { 0xE8, 8 }, { 0xE9, 8 }, { 0xEA, 8 }, { 0xEB, 8 },
+ { 0xEC, 8 }, { 0xED, 8 }, { 0xEE, 8 }, { 0xEF, 8 },
+
+ { 0xF0, 8 }, { 0xF1, 8 }, { 0xF2, 8 }, { 0xF3, 8 },
+ { 0xF4, 8 }, { 0xF5, 8 }, { 0xF6, 8 }, { 0xF7, 8 },
+ { 0xF8, 8 }, { 0xF9, 8 }, { 0xFA, 8 }, { 0xFB, 8 },
+ { 0xFC, 8 }, { 0xFD, 8 }, { 0xFE, 8 }, { 0xFF, 8 }
+};
+
+static const int motion_vector_table[63] = {
+ 0, 1, -1,
+ 2, -2,
+ 3, -3,
+ 4, -4, 5, -5, 6, -6, 7, -7,
+ 8, -8, 9, -9, 10, -10, 11, -11, 12, -12, 13, -13, 14, -14, 15, -15,
+ 16, -16, 17, -17, 18, -18, 19, -19, 20, -20, 21, -21, 22, -22, 23, -23,
+ 24, -24, 25, -25, 26, -26, 27, -27, 28, -28, 29, -29, 30, -30, 31, -31
+};
+
+static const int8_t fixed_motion_vector_table[64] = {
+ 0, 0, 1, -1, 2, -2, 3, -3,
+ 4, -4, 5, -5, 6, -6, 7, -7,
+ 8, -8, 9, -9, 10, -10, 11, -11,
+ 12, -12, 13, -13, 14, -14, 15, -15,
+ 16, -16, 17, -17, 18, -18, 19, -19,
+ 20, -20, 21, -21, 22, -22, 23, -23,
+ 24, -24, 25, -25, 26, -26, 27, -27,
+ 28, -28, 29, -29, 30, -30, 31, -31
+};
+
+/* only tokens 0..6 indicate eob runs */
+static const int eob_run_base[7] = {
+ 1, 2, 3, 4, 8, 16, 0
+};
+static const int eob_run_get_bits[7] = {
+ 0, 0, 0, 2, 3, 4, 12
+};
+
+static const int zero_run_base[32] = {
+ 0, 0, 0, 0, 0, 0, 0, /* 0..6 are never used */
+ 0, 0, /* 7..8 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9..22 */
+ 1, 2, 3, 4, 5, /* 23..27 */
+ 6, 10, 1, 2 /* 28..31 */
+};
+static const int zero_run_get_bits[32] = {
+ 0, 0, 0, 0, 0, 0, 0, /* 0..6 are never used */
+ 3, 6, /* 7..8 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9..22 */
+ 0, 0, 0, 0, 0, /* 23..27 */
+ 2, 3, 0, 1 /* 28..31 */
+};
+
+static const int coeff_get_bits[32] = {
+ 0, 0, 0, 0, 0, 0, 0, /* 0..6 are never used */
+ 0, 0, 0, 0, 0, 0, /* 7..12 use constant coeffs */
+ 1, 1, 1, 1, /* 13..16 are constants but still need sign bit */
+ 2, 3, 4, 5, 6, 10, /* 17..22, for reading large coeffs */
+ 1, 1, 1, 1, 1, 1, 1, /* 23..29 are constants but still need sign bit */
+ 2, 2 /* 30..31 */
+};
+
+static const int16_t coeff_table_token_7_8[1] = { 0 };
+static const int16_t coeff_table_token_9[1] = { 1 };
+static const int16_t coeff_table_token_10[1] = { -1 };
+static const int16_t coeff_table_token_11[1] = { 2 };
+static const int16_t coeff_table_token_12[1] = { -2 };
+
+static const int16_t coeff_table_token_13[2] = { 3, -3 };
+static const int16_t coeff_table_token_14[2] = { 4, -4 };
+static const int16_t coeff_table_token_15[2] = { 5, -5 };
+static const int16_t coeff_table_token_16[2] = { 6, -6 };
+
+static const int16_t coeff_table_token_23_24_25_26_27_28_29[2] = { 1, -1 };
+static const int16_t coeff_table_token_30[4] = { 2, 3, -2, -3 };
+static const int16_t coeff_table_token_31[4] = { 2, 3, -2, -3 };
+
+static const int16_t coeff_table_token_17[4] = {
+ 7, 8, -7, -8
+};
+
+static const int16_t coeff_table_token_18[8] = {
+ 9, 10, 11, 12, -9, -10, -11, -12
+};
+
+static const int16_t coeff_table_token_19[16] = {
+ 13, 14, 15, 16, 17, 18, 19, 20, -13, -14, -15, -16, -17, -18, -19, -20
+};
+
+static const int16_t coeff_table_token_20[32] = {
+ 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36,
+ -21, -22, -23, -24, -25, -26, -27, -28,
+ -29, -30, -31, -32, -33, -34, -35, -36
+};
+
+static const int16_t coeff_table_token_21[64] = {
+ 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68,
+ -37, -38, -39, -40, -41, -42, -43, -44,
+ -45, -46, -47, -48, -49, -50, -51, -52,
+ -53, -54, -55, -56, -57, -58, -59, -60,
+ -61, -62, -63, -64, -65, -66, -67, -68
+};
+
+static const int16_t coeff_table_token_22[1024] = {
+ 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 98, 99, 100,
+ 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140,
+ 141, 142, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196,
+ 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220,
+ 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300,
+ 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316,
+ 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, 339, 340,
+ 341, 342, 343, 344, 345, 346, 347, 348,
+ 349, 350, 351, 352, 353, 354, 355, 356,
+ 357, 358, 359, 360, 361, 362, 363, 364,
+ 365, 366, 367, 368, 369, 370, 371, 372,
+ 373, 374, 375, 376, 377, 378, 379, 380,
+ 381, 382, 383, 384, 385, 386, 387, 388,
+ 389, 390, 391, 392, 393, 394, 395, 396,
+ 397, 398, 399, 400, 401, 402, 403, 404,
+ 405, 406, 407, 408, 409, 410, 411, 412,
+ 413, 414, 415, 416, 417, 418, 419, 420,
+ 421, 422, 423, 424, 425, 426, 427, 428,
+ 429, 430, 431, 432, 433, 434, 435, 436,
+ 437, 438, 439, 440, 441, 442, 443, 444,
+ 445, 446, 447, 448, 449, 450, 451, 452,
+ 453, 454, 455, 456, 457, 458, 459, 460,
+ 461, 462, 463, 464, 465, 466, 467, 468,
+ 469, 470, 471, 472, 473, 474, 475, 476,
+ 477, 478, 479, 480, 481, 482, 483, 484,
+ 485, 486, 487, 488, 489, 490, 491, 492,
+ 493, 494, 495, 496, 497, 498, 499, 500,
+ 501, 502, 503, 504, 505, 506, 507, 508,
+ 509, 510, 511, 512, 513, 514, 515, 516,
+ 517, 518, 519, 520, 521, 522, 523, 524,
+ 525, 526, 527, 528, 529, 530, 531, 532,
+ 533, 534, 535, 536, 537, 538, 539, 540,
+ 541, 542, 543, 544, 545, 546, 547, 548,
+ 549, 550, 551, 552, 553, 554, 555, 556,
+ 557, 558, 559, 560, 561, 562, 563, 564,
+ 565, 566, 567, 568, 569, 570, 571, 572,
+ 573, 574, 575, 576, 577, 578, 579, 580,
+ -69, -70, -71, -72, -73, -74, -75, -76,
+ -77, -78, -79, -80, -81, -82, -83, -84,
+ -85, -86, -87, -88, -89, -90, -91, -92,
+ -93, -94, -95, -96, -97, -98, -99, -100,
+ -101, -102, -103, -104, -105, -106, -107, -108,
+ -109, -110, -111, -112, -113, -114, -115, -116,
+ -117, -118, -119, -120, -121, -122, -123, -124,
+ -125, -126, -127, -128, -129, -130, -131, -132,
+ -133, -134, -135, -136, -137, -138, -139, -140,
+ -141, -142, -143, -144, -145, -146, -147, -148,
+ -149, -150, -151, -152, -153, -154, -155, -156,
+ -157, -158, -159, -160, -161, -162, -163, -164,
+ -165, -166, -167, -168, -169, -170, -171, -172,
+ -173, -174, -175, -176, -177, -178, -179, -180,
+ -181, -182, -183, -184, -185, -186, -187, -188,
+ -189, -190, -191, -192, -193, -194, -195, -196,
+ -197, -198, -199, -200, -201, -202, -203, -204,
+ -205, -206, -207, -208, -209, -210, -211, -212,
+ -213, -214, -215, -216, -217, -218, -219, -220,
+ -221, -222, -223, -224, -225, -226, -227, -228,
+ -229, -230, -231, -232, -233, -234, -235, -236,
+ -237, -238, -239, -240, -241, -242, -243, -244,
+ -245, -246, -247, -248, -249, -250, -251, -252,
+ -253, -254, -255, -256, -257, -258, -259, -260,
+ -261, -262, -263, -264, -265, -266, -267, -268,
+ -269, -270, -271, -272, -273, -274, -275, -276,
+ -277, -278, -279, -280, -281, -282, -283, -284,
+ -285, -286, -287, -288, -289, -290, -291, -292,
+ -293, -294, -295, -296, -297, -298, -299, -300,
+ -301, -302, -303, -304, -305, -306, -307, -308,
+ -309, -310, -311, -312, -313, -314, -315, -316,
+ -317, -318, -319, -320, -321, -322, -323, -324,
+ -325, -326, -327, -328, -329, -330, -331, -332,
+ -333, -334, -335, -336, -337, -338, -339, -340,
+ -341, -342, -343, -344, -345, -346, -347, -348,
+ -349, -350, -351, -352, -353, -354, -355, -356,
+ -357, -358, -359, -360, -361, -362, -363, -364,
+ -365, -366, -367, -368, -369, -370, -371, -372,
+ -373, -374, -375, -376, -377, -378, -379, -380,
+ -381, -382, -383, -384, -385, -386, -387, -388,
+ -389, -390, -391, -392, -393, -394, -395, -396,
+ -397, -398, -399, -400, -401, -402, -403, -404,
+ -405, -406, -407, -408, -409, -410, -411, -412,
+ -413, -414, -415, -416, -417, -418, -419, -420,
+ -421, -422, -423, -424, -425, -426, -427, -428,
+ -429, -430, -431, -432, -433, -434, -435, -436,
+ -437, -438, -439, -440, -441, -442, -443, -444,
+ -445, -446, -447, -448, -449, -450, -451, -452,
+ -453, -454, -455, -456, -457, -458, -459, -460,
+ -461, -462, -463, -464, -465, -466, -467, -468,
+ -469, -470, -471, -472, -473, -474, -475, -476,
+ -477, -478, -479, -480, -481, -482, -483, -484,
+ -485, -486, -487, -488, -489, -490, -491, -492,
+ -493, -494, -495, -496, -497, -498, -499, -500,
+ -501, -502, -503, -504, -505, -506, -507, -508,
+ -509, -510, -511, -512, -513, -514, -515, -516,
+ -517, -518, -519, -520, -521, -522, -523, -524,
+ -525, -526, -527, -528, -529, -530, -531, -532,
+ -533, -534, -535, -536, -537, -538, -539, -540,
+ -541, -542, -543, -544, -545, -546, -547, -548,
+ -549, -550, -551, -552, -553, -554, -555, -556,
+ -557, -558, -559, -560, -561, -562, -563, -564,
+ -565, -566, -567, -568, -569, -570, -571, -572,
+ -573, -574, -575, -576, -577, -578, -579, -580
+};
+
+static const int16_t *const coeff_tables[32] = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ coeff_table_token_7_8,
+
+ coeff_table_token_7_8,
+ coeff_table_token_9,
+ coeff_table_token_10,
+ coeff_table_token_11,
+ coeff_table_token_12,
+ coeff_table_token_13,
+ coeff_table_token_14,
+ coeff_table_token_15,
+
+ coeff_table_token_16,
+ coeff_table_token_17,
+ coeff_table_token_18,
+ coeff_table_token_19,
+ coeff_table_token_20,
+ coeff_table_token_21,
+ coeff_table_token_22,
+ coeff_table_token_23_24_25_26_27_28_29,
+
+ coeff_table_token_23_24_25_26_27_28_29,
+ coeff_table_token_23_24_25_26_27_28_29,
+ coeff_table_token_23_24_25_26_27_28_29,
+ coeff_table_token_23_24_25_26_27_28_29,
+ coeff_table_token_23_24_25_26_27_28_29,
+ coeff_table_token_23_24_25_26_27_28_29,
+ coeff_table_token_30,
+ coeff_table_token_31
+};
+
+static const uint16_t dc_bias[16][32][2] = {
+ { /* DC bias table 0 */
+ { 0x2D, 6 },
+ { 0x26, 7 },
+ { 0x166, 9 },
+ { 0x4E, 8 },
+ { 0x2CE, 10 },
+ { 0x59E, 11 },
+ { 0x27D, 11 },
+ { 0x8, 5 },
+ { 0x4F9, 12 },
+ { 0xF, 4 },
+ { 0xE, 4 },
+ { 0x1B, 5 },
+ { 0x6, 4 },
+ { 0x8, 4 },
+ { 0x5, 4 },
+ { 0x1A, 5 },
+ { 0x15, 5 },
+ { 0x7, 4 },
+ { 0xC, 4 },
+ { 0x1, 3 },
+ { 0x0, 3 },
+ { 0x9, 4 },
+ { 0x17, 5 },
+ { 0x29, 6 },
+ { 0x28, 6 },
+ { 0xB2, 8 },
+ { 0x4F8, 12 },
+ { 0x59F, 11 },
+ { 0x9E, 9 },
+ { 0x13F, 10 },
+ { 0x12, 6 },
+ { 0x58, 7 }
+ },
+ { /* DC bias table 1 */
+ { 0x10, 5 },
+ { 0x47, 7 },
+ { 0x1FF, 9 },
+ { 0x8C, 8 },
+ { 0x3FC, 10 },
+ { 0x46A, 11 },
+ { 0x469, 11 },
+ { 0x22, 6 },
+ { 0x11A1, 13 },
+ { 0xE, 4 },
+ { 0xD, 4 },
+ { 0x4, 4 },
+ { 0x5, 4 },
+ { 0x9, 4 },
+ { 0x6, 4 },
+ { 0x1E, 5 },
+ { 0x16, 5 },
+ { 0x7, 4 },
+ { 0xC, 4 },
+ { 0x1, 3 },
+ { 0x0, 3 },
+ { 0xA, 4 },
+ { 0x17, 5 },
+ { 0x7D, 7 },
+ { 0x7E, 7 },
+ { 0x11B, 9 },
+ { 0x8D1, 12 },
+ { 0x3FD, 10 },
+ { 0x46B, 11 },
+ { 0x11A0, 13 },
+ { 0x7C, 7 },
+ { 0xFE, 8 }
+ },
+ { /* DC bias table 2 */
+ { 0x16, 5 },
+ { 0x20, 6 },
+ { 0x86, 8 },
+ { 0x87, 8 },
+ { 0x367, 10 },
+ { 0x6CC, 11 },
+ { 0x6CB, 11 },
+ { 0x6E, 7 },
+ { 0x366D, 14 },
+ { 0xF, 4 },
+ { 0xE, 4 },
+ { 0x4, 4 },
+ { 0x5, 4 },
+ { 0xA, 4 },
+ { 0x6, 4 },
+ { 0x1A, 5 },
+ { 0x11, 5 },
+ { 0x7, 4 },
+ { 0xC, 4 },
+ { 0x1, 3 },
+ { 0x0, 3 },
+ { 0x9, 4 },
+ { 0x17, 5 },
+ { 0x6F, 7 },
+ { 0x6D, 7 },
+ { 0x364, 10 },
+ { 0xD9A, 12 },
+ { 0x6CA, 11 },
+ { 0x1B37, 13 },
+ { 0x366C, 14 },
+ { 0x42, 7 },
+ { 0xD8, 8 }
+ },
+ { /* DC bias table 3 */
+ { 0x0, 4 },
+ { 0x2D, 6 },
+ { 0xF7, 8 },
+ { 0x58, 7 },
+ { 0x167, 9 },
+ { 0x2CB, 10 },
+ { 0x2CA, 10 },
+ { 0xE, 6 },
+ { 0x1661, 13 },
+ { 0x3, 3 },
+ { 0x2, 3 },
+ { 0x8, 4 },
+ { 0x9, 4 },
+ { 0xD, 4 },
+ { 0x2, 4 },
+ { 0x1F, 5 },
+ { 0x17, 5 },
+ { 0x1, 4 },
+ { 0xC, 4 },
+ { 0xE, 4 },
+ { 0xA, 4 },
+ { 0x6, 5 },
+ { 0x78, 7 },
+ { 0xF, 6 },
+ { 0x7A, 7 },
+ { 0x164, 9 },
+ { 0x599, 11 },
+ { 0x2CD, 10 },
+ { 0xB31, 12 },
+ { 0x1660, 13 },
+ { 0x79, 7 },
+ { 0xF6, 8 }
+ },
+ { /* DC bias table 4 */
+ { 0x3, 4 },
+ { 0x3C, 6 },
+ { 0xF, 7 },
+ { 0x7A, 7 },
+ { 0x1D, 8 },
+ { 0x20, 9 },
+ { 0x72, 10 },
+ { 0x6, 6 },
+ { 0x399, 13 },
+ { 0x4, 3 },
+ { 0x5, 3 },
+ { 0x5, 4 },
+ { 0x6, 4 },
+ { 0xE, 4 },
+ { 0x4, 4 },
+ { 0x0, 4 },
+ { 0x19, 5 },
+ { 0x2, 4 },
+ { 0xD, 4 },
+ { 0x7, 4 },
+ { 0x1F, 5 },
+ { 0x30, 6 },
+ { 0x11, 8 },
+ { 0x31, 6 },
+ { 0x5, 6 },
+ { 0x21, 9 },
+ { 0xE7, 11 },
+ { 0x38, 9 },
+ { 0x1CD, 12 },
+ { 0x398, 13 },
+ { 0x7B, 7 },
+ { 0x9, 7 }
+ },
+ { /* DC bias table 5 */
+ { 0x9, 4 },
+ { 0x2, 5 },
+ { 0x74, 7 },
+ { 0x7, 6 },
+ { 0xEC, 8 },
+ { 0xD1, 9 },
+ { 0x1A6, 10 },
+ { 0x6, 6 },
+ { 0xD21, 13 },
+ { 0x5, 3 },
+ { 0x6, 3 },
+ { 0x8, 4 },
+ { 0x7, 4 },
+ { 0xF, 4 },
+ { 0x4, 4 },
+ { 0x0, 4 },
+ { 0x1C, 5 },
+ { 0x2, 4 },
+ { 0x5, 4 },
+ { 0x3, 4 },
+ { 0xC, 5 },
+ { 0x35, 7 },
+ { 0x1A7, 10 },
+ { 0x1B, 6 },
+ { 0x77, 7 },
+ { 0x1A5, 10 },
+ { 0x349, 11 },
+ { 0xD0, 9 },
+ { 0x691, 12 },
+ { 0xD20, 13 },
+ { 0x75, 7 },
+ { 0xED, 8 }
+ },
+ { /* DC bias table 6 */
+ { 0xA, 4 },
+ { 0xC, 5 },
+ { 0x12, 6 },
+ { 0x1B, 6 },
+ { 0xB7, 8 },
+ { 0x16C, 9 },
+ { 0x99, 9 },
+ { 0x5A, 7 },
+ { 0x16D8, 13 },
+ { 0x7, 3 },
+ { 0x6, 3 },
+ { 0x9, 4 },
+ { 0x8, 4 },
+ { 0x0, 3 },
+ { 0x5, 4 },
+ { 0x17, 5 },
+ { 0xE, 5 },
+ { 0x2, 4 },
+ { 0x3, 4 },
+ { 0xF, 5 },
+ { 0x1A, 6 },
+ { 0x4D, 8 },
+ { 0x2DB3, 14 },
+ { 0x2C, 6 },
+ { 0x11, 6 },
+ { 0x2DA, 10 },
+ { 0x5B7, 11 },
+ { 0x98, 9 },
+ { 0xB6D, 12 },
+ { 0x2DB2, 14 },
+ { 0x10, 6 },
+ { 0x27, 7 }
+ },
+ { /* DC bias table 7 */
+ { 0xD, 4 },
+ { 0xF, 5 },
+ { 0x1D, 6 },
+ { 0x8, 5 },
+ { 0x51, 7 },
+ { 0x56, 8 },
+ { 0xAF, 9 },
+ { 0x2A, 7 },
+ { 0x148A, 13 },
+ { 0x7, 3 },
+ { 0x0, 2 },
+ { 0x8, 4 },
+ { 0x9, 4 },
+ { 0xC, 4 },
+ { 0x6, 4 },
+ { 0x17, 5 },
+ { 0xB, 5 },
+ { 0x16, 5 },
+ { 0x15, 5 },
+ { 0x9, 5 },
+ { 0x50, 7 },
+ { 0xAE, 9 },
+ { 0x2917, 14 },
+ { 0x1C, 6 },
+ { 0x14, 6 },
+ { 0x290, 10 },
+ { 0x523, 11 },
+ { 0x149, 9 },
+ { 0xA44, 12 },
+ { 0x2916, 14 },
+ { 0x53, 7 },
+ { 0xA5, 8 }
+ },
+ { /* DC bias table 8 */
+ { 0x1, 4 },
+ { 0x1D, 6 },
+ { 0xF5, 8 },
+ { 0xF4, 8 },
+ { 0x24D, 10 },
+ { 0x499, 11 },
+ { 0x498, 11 },
+ { 0x1, 5 },
+ { 0x21, 6 },
+ { 0x6, 3 },
+ { 0x5, 3 },
+ { 0x6, 4 },
+ { 0x5, 4 },
+ { 0x2, 4 },
+ { 0x7, 5 },
+ { 0x25, 6 },
+ { 0x7B, 7 },
+ { 0x1C, 6 },
+ { 0x20, 6 },
+ { 0xD, 6 },
+ { 0x48, 7 },
+ { 0x92, 8 },
+ { 0x127, 9 },
+ { 0xE, 4 },
+ { 0x4, 4 },
+ { 0x11, 5 },
+ { 0xC, 6 },
+ { 0x3C, 6 },
+ { 0xF, 5 },
+ { 0x0, 5 },
+ { 0x1F, 5 },
+ { 0x13, 5 }
+ },
+ { /* DC bias table 9 */
+ { 0x5, 4 },
+ { 0x3C, 6 },
+ { 0x40, 7 },
+ { 0xD, 7 },
+ { 0x31, 9 },
+ { 0x61, 10 },
+ { 0x60, 10 },
+ { 0x2, 5 },
+ { 0xF5, 8 },
+ { 0x6, 3 },
+ { 0x5, 3 },
+ { 0x7, 4 },
+ { 0x6, 4 },
+ { 0x2, 4 },
+ { 0x9, 5 },
+ { 0x25, 6 },
+ { 0x7, 6 },
+ { 0x21, 6 },
+ { 0x24, 6 },
+ { 0x10, 6 },
+ { 0x41, 7 },
+ { 0xF4, 8 },
+ { 0x19, 8 },
+ { 0xE, 4 },
+ { 0x3, 4 },
+ { 0x11, 5 },
+ { 0x11, 6 },
+ { 0x3F, 6 },
+ { 0x3E, 6 },
+ { 0x7B, 7 },
+ { 0x0, 4 },
+ { 0x13, 5 }
+ },
+ { /* DC bias table 10 */
+ { 0xA, 4 },
+ { 0x7, 5 },
+ { 0x1, 6 },
+ { 0x9, 6 },
+ { 0x131, 9 },
+ { 0x261, 10 },
+ { 0x260, 10 },
+ { 0x15, 6 },
+ { 0x1, 7 },
+ { 0x7, 3 },
+ { 0x6, 3 },
+ { 0x8, 4 },
+ { 0x7, 4 },
+ { 0x6, 4 },
+ { 0x12, 5 },
+ { 0x2F, 6 },
+ { 0x14, 6 },
+ { 0x27, 6 },
+ { 0x2D, 6 },
+ { 0x16, 6 },
+ { 0x4D, 7 },
+ { 0x99, 8 },
+ { 0x0, 7 },
+ { 0x4, 4 },
+ { 0x1, 4 },
+ { 0x5, 5 },
+ { 0x17, 6 },
+ { 0x2E, 6 },
+ { 0x2C, 6 },
+ { 0x8, 6 },
+ { 0x6, 5 },
+ { 0x1, 5 }
+ },
+ { /* DC bias table 11 */
+ { 0x0, 3 },
+ { 0xE, 5 },
+ { 0x17, 6 },
+ { 0x2A, 6 },
+ { 0x10, 7 },
+ { 0xF9, 10 },
+ { 0xF8, 10 },
+ { 0x1E, 7 },
+ { 0x3F, 8 },
+ { 0x7, 3 },
+ { 0x6, 3 },
+ { 0x9, 4 },
+ { 0x8, 4 },
+ { 0x6, 4 },
+ { 0xF, 5 },
+ { 0x5, 5 },
+ { 0x16, 6 },
+ { 0x29, 6 },
+ { 0x2B, 6 },
+ { 0x15, 6 },
+ { 0x50, 7 },
+ { 0x11, 7 },
+ { 0x7D, 9 },
+ { 0x4, 4 },
+ { 0x17, 5 },
+ { 0x6, 5 },
+ { 0x14, 6 },
+ { 0x2C, 6 },
+ { 0x2D, 6 },
+ { 0xE, 6 },
+ { 0x9, 6 },
+ { 0x51, 7 }
+ },
+ { /* DC bias table 12 */
+ { 0x2, 3 },
+ { 0x18, 5 },
+ { 0x2F, 6 },
+ { 0xD, 5 },
+ { 0x53, 7 },
+ { 0x295, 10 },
+ { 0x294, 10 },
+ { 0xA4, 8 },
+ { 0x7C, 8 },
+ { 0x0, 2 },
+ { 0x7, 3 },
+ { 0x9, 4 },
+ { 0x8, 4 },
+ { 0x1B, 5 },
+ { 0xC, 5 },
+ { 0x28, 6 },
+ { 0x6A, 7 },
+ { 0x1E, 6 },
+ { 0x1D, 6 },
+ { 0x69, 7 },
+ { 0xD7, 8 },
+ { 0x7D, 8 },
+ { 0x14B, 9 },
+ { 0x19, 5 },
+ { 0x16, 5 },
+ { 0x2E, 6 },
+ { 0x1C, 6 },
+ { 0x2B, 6 },
+ { 0x2A, 6 },
+ { 0x68, 7 },
+ { 0x3F, 7 },
+ { 0xD6, 8 }
+ },
+ { /* DC bias table 13 */
+ { 0x2, 3 },
+ { 0x1B, 5 },
+ { 0xC, 5 },
+ { 0x18, 5 },
+ { 0x29, 6 },
+ { 0x7F, 8 },
+ { 0x2F0, 10 },
+ { 0x198, 9 },
+ { 0x179, 9 },
+ { 0x0, 2 },
+ { 0x7, 3 },
+ { 0x9, 4 },
+ { 0x8, 4 },
+ { 0x1A, 5 },
+ { 0xD, 5 },
+ { 0x2A, 6 },
+ { 0x64, 7 },
+ { 0x1E, 6 },
+ { 0x67, 7 },
+ { 0x5F, 7 },
+ { 0xCD, 8 },
+ { 0x7E, 8 },
+ { 0x2F1, 10 },
+ { 0x16, 5 },
+ { 0xE, 5 },
+ { 0x2E, 6 },
+ { 0x65, 7 },
+ { 0x2B, 6 },
+ { 0x28, 6 },
+ { 0x3E, 7 },
+ { 0xBD, 8 },
+ { 0x199, 9 }
+ },
+ { /* DC bias table 14 */
+ { 0x2, 3 },
+ { 0x7, 4 },
+ { 0x16, 5 },
+ { 0x6, 4 },
+ { 0x36, 6 },
+ { 0x5C, 7 },
+ { 0x15D, 9 },
+ { 0x15C, 9 },
+ { 0x2BF, 10 },
+ { 0x0, 2 },
+ { 0x7, 3 },
+ { 0x9, 4 },
+ { 0x8, 4 },
+ { 0x18, 5 },
+ { 0x34, 6 },
+ { 0x2A, 6 },
+ { 0x5E, 7 },
+ { 0x6A, 7 },
+ { 0x64, 7 },
+ { 0x5D, 7 },
+ { 0xCB, 8 },
+ { 0xAD, 8 },
+ { 0x2BE, 10 },
+ { 0x14, 5 },
+ { 0x33, 6 },
+ { 0x6E, 7 },
+ { 0x5F, 7 },
+ { 0x6F, 7 },
+ { 0x6B, 7 },
+ { 0xCA, 8 },
+ { 0xAC, 8 },
+ { 0x15E, 9 }
+ },
+ { /* DC bias table 15 */
+ { 0xF, 4 },
+ { 0x1D, 5 },
+ { 0x18, 5 },
+ { 0xB, 4 },
+ { 0x19, 5 },
+ { 0x29, 6 },
+ { 0xD6, 8 },
+ { 0x551, 11 },
+ { 0xAA1, 12 },
+ { 0x1, 2 },
+ { 0x0, 2 },
+ { 0x9, 4 },
+ { 0x8, 4 },
+ { 0x1B, 5 },
+ { 0x38, 6 },
+ { 0x28, 6 },
+ { 0x57, 7 },
+ { 0x6A, 7 },
+ { 0x68, 7 },
+ { 0x56, 7 },
+ { 0xE5, 8 },
+ { 0x155, 9 },
+ { 0xAA0, 12 },
+ { 0x73, 7 },
+ { 0x69, 7 },
+ { 0xD7, 8 },
+ { 0xAB, 8 },
+ { 0xE4, 8 },
+ { 0xA9, 8 },
+ { 0x151, 9 },
+ { 0x150, 9 },
+ { 0x2A9, 10 }
+ }
+};
+
+static const uint16_t ac_bias_0[16][32][2] = {
+ { /* AC bias group 1, table 0 */
+ { 0x8, 5 },
+ { 0x25, 7 },
+ { 0x17A, 9 },
+ { 0x2F7, 10 },
+ { 0xBDB, 12 },
+ { 0x17B4, 13 },
+ { 0x2F6B, 14 },
+ { 0x1D, 5 },
+ { 0x2F6A, 14 },
+ { 0x8, 4 },
+ { 0x7, 4 },
+ { 0x1, 4 },
+ { 0x2, 4 },
+ { 0xA, 4 },
+ { 0x6, 4 },
+ { 0x0, 4 },
+ { 0x1C, 5 },
+ { 0x9, 4 },
+ { 0xD, 4 },
+ { 0xF, 4 },
+ { 0xC, 4 },
+ { 0x3, 4 },
+ { 0xA, 5 },
+ { 0x16, 5 },
+ { 0x13, 6 },
+ { 0x5D, 7 },
+ { 0x24, 7 },
+ { 0xBC, 8 },
+ { 0x5C, 7 },
+ { 0x5EC, 11 },
+ { 0xB, 5 },
+ { 0x5F, 7 }
+ },
+ { /* AC bias group 1, table 1 */
+ { 0xF, 5 },
+ { 0x10, 6 },
+ { 0x4B, 8 },
+ { 0xC6, 8 },
+ { 0x31D, 10 },
+ { 0xC71, 12 },
+ { 0xC70, 12 },
+ { 0x1, 4 },
+ { 0xC73, 12 },
+ { 0x8, 4 },
+ { 0x9, 4 },
+ { 0x2, 4 },
+ { 0x3, 4 },
+ { 0xB, 4 },
+ { 0x6, 4 },
+ { 0x0, 4 },
+ { 0x1C, 5 },
+ { 0x5, 4 },
+ { 0xD, 4 },
+ { 0xF, 4 },
+ { 0xA, 4 },
+ { 0x19, 5 },
+ { 0x13, 6 },
+ { 0x1D, 5 },
+ { 0x30, 6 },
+ { 0x62, 7 },
+ { 0x24, 7 },
+ { 0x4A, 8 },
+ { 0x18F, 9 },
+ { 0xC72, 12 },
+ { 0xE, 5 },
+ { 0x11, 6 }
+ },
+ { /* AC bias group 1, table 2 */
+ { 0x1B, 5 },
+ { 0x3, 6 },
+ { 0x8D, 8 },
+ { 0x40, 7 },
+ { 0x239, 10 },
+ { 0x471, 11 },
+ { 0x8E0, 12 },
+ { 0x3, 4 },
+ { 0x11C3, 13 },
+ { 0xA, 4 },
+ { 0x9, 4 },
+ { 0x4, 4 },
+ { 0x5, 4 },
+ { 0xE, 4 },
+ { 0x7, 4 },
+ { 0x1, 4 },
+ { 0x1E, 5 },
+ { 0x6, 4 },
+ { 0xC, 4 },
+ { 0xB, 4 },
+ { 0x2, 4 },
+ { 0x0, 5 },
+ { 0x41, 7 },
+ { 0x1F, 5 },
+ { 0x22, 6 },
+ { 0x2, 6 },
+ { 0x8F, 8 },
+ { 0x8C, 8 },
+ { 0x11D, 9 },
+ { 0x11C2, 13 },
+ { 0x1A, 5 },
+ { 0x21, 6 }
+ },
+ { /* AC bias group 1, table 3 */
+ { 0x1F, 5 },
+ { 0x3, 6 },
+ { 0x3, 7 },
+ { 0x43, 7 },
+ { 0xB, 9 },
+ { 0x15, 10 },
+ { 0x51, 12 },
+ { 0x3, 4 },
+ { 0x50, 12 },
+ { 0xD, 4 },
+ { 0xC, 4 },
+ { 0x4, 4 },
+ { 0x6, 4 },
+ { 0xE, 4 },
+ { 0xA, 4 },
+ { 0x1, 4 },
+ { 0x1E, 5 },
+ { 0x5, 4 },
+ { 0x9, 4 },
+ { 0x7, 4 },
+ { 0x11, 5 },
+ { 0x2, 6 },
+ { 0x4, 8 },
+ { 0x2, 4 },
+ { 0x2D, 6 },
+ { 0x20, 6 },
+ { 0x42, 7 },
+ { 0x1, 7 },
+ { 0x0, 7 },
+ { 0x29, 11 },
+ { 0x17, 5 },
+ { 0x2C, 6 }
+ },
+ { /* AC bias group 1, table 4 */
+ { 0x3, 4 },
+ { 0x1F, 6 },
+ { 0x3A, 7 },
+ { 0x5D, 7 },
+ { 0x173, 9 },
+ { 0x2E4, 10 },
+ { 0x172D, 13 },
+ { 0x4, 4 },
+ { 0x172C, 13 },
+ { 0xF, 4 },
+ { 0xE, 4 },
+ { 0x9, 4 },
+ { 0x8, 4 },
+ { 0xC, 4 },
+ { 0xA, 4 },
+ { 0x1, 4 },
+ { 0x16, 5 },
+ { 0x2, 4 },
+ { 0x5, 4 },
+ { 0x1A, 5 },
+ { 0x2F, 6 },
+ { 0x38, 7 },
+ { 0x5CA, 11 },
+ { 0x6, 4 },
+ { 0x37, 6 },
+ { 0x1E, 6 },
+ { 0x3B, 7 },
+ { 0x39, 7 },
+ { 0xB8, 8 },
+ { 0xB97, 12 },
+ { 0x0, 4 },
+ { 0x36, 6 }
+ },
+ { /* AC bias group 1, table 5 */
+ { 0x6, 4 },
+ { 0x37, 6 },
+ { 0x5D, 7 },
+ { 0xC, 6 },
+ { 0xB9, 8 },
+ { 0x2E3, 10 },
+ { 0x5C4, 11 },
+ { 0x4, 4 },
+ { 0x1715, 13 },
+ { 0x0, 3 },
+ { 0xF, 4 },
+ { 0x8, 4 },
+ { 0x7, 4 },
+ { 0xC, 4 },
+ { 0x9, 4 },
+ { 0x1D, 5 },
+ { 0x16, 5 },
+ { 0x1C, 5 },
+ { 0x1A, 5 },
+ { 0xB, 5 },
+ { 0x5E, 7 },
+ { 0x170, 9 },
+ { 0x1714, 13 },
+ { 0xA, 4 },
+ { 0xA, 5 },
+ { 0x36, 6 },
+ { 0x5F, 7 },
+ { 0x1B, 7 },
+ { 0x1A, 7 },
+ { 0xB8B, 12 },
+ { 0x2, 4 },
+ { 0x7, 5 }
+ },
+ { /* AC bias group 1, table 6 */
+ { 0xC, 4 },
+ { 0xB, 5 },
+ { 0x79, 7 },
+ { 0x22, 6 },
+ { 0xF0, 8 },
+ { 0x119, 9 },
+ { 0x230, 10 },
+ { 0x1D, 5 },
+ { 0x8C4, 12 },
+ { 0x1, 3 },
+ { 0x0, 3 },
+ { 0xA, 4 },
+ { 0x9, 4 },
+ { 0xB, 4 },
+ { 0x7, 4 },
+ { 0x1C, 5 },
+ { 0x3D, 6 },
+ { 0xD, 5 },
+ { 0x8, 5 },
+ { 0x15, 6 },
+ { 0x8D, 8 },
+ { 0x118B, 13 },
+ { 0x118A, 13 },
+ { 0xD, 4 },
+ { 0x10, 5 },
+ { 0x9, 5 },
+ { 0x14, 6 },
+ { 0x47, 7 },
+ { 0xF1, 8 },
+ { 0x463, 11 },
+ { 0x1F, 5 },
+ { 0xC, 5 }
+ },
+ { /* AC bias group 1, table 7 */
+ { 0x0, 3 },
+ { 0x1A, 5 },
+ { 0x33, 6 },
+ { 0xC, 5 },
+ { 0x46, 7 },
+ { 0x1E3, 9 },
+ { 0x3C5, 10 },
+ { 0x17, 5 },
+ { 0x1E21, 13 },
+ { 0x2, 3 },
+ { 0x1, 3 },
+ { 0x9, 4 },
+ { 0xA, 4 },
+ { 0x7, 4 },
+ { 0x1B, 5 },
+ { 0x3D, 6 },
+ { 0x1B, 6 },
+ { 0x22, 6 },
+ { 0x79, 7 },
+ { 0xF0, 8 },
+ { 0x1E20, 13 },
+ { 0x1E23, 13 },
+ { 0x1E22, 13 },
+ { 0xE, 4 },
+ { 0x16, 5 },
+ { 0x18, 5 },
+ { 0x32, 6 },
+ { 0x1A, 6 },
+ { 0x47, 7 },
+ { 0x789, 11 },
+ { 0x1F, 5 },
+ { 0x10, 5 }
+ },
+ { /* AC bias group 1, table 8 */
+ { 0x1D, 5 },
+ { 0x61, 7 },
+ { 0x4E, 8 },
+ { 0x9E, 9 },
+ { 0x27C, 11 },
+ { 0x9F5, 13 },
+ { 0x9F4, 13 },
+ { 0x3, 4 },
+ { 0x60, 7 },
+ { 0x0, 3 },
+ { 0xF, 4 },
+ { 0xB, 4 },
+ { 0xA, 4 },
+ { 0x9, 4 },
+ { 0x5, 4 },
+ { 0xD, 5 },
+ { 0x31, 6 },
+ { 0x8, 5 },
+ { 0x38, 6 },
+ { 0x12, 6 },
+ { 0x26, 7 },
+ { 0x13F, 10 },
+ { 0x4FB, 12 },
+ { 0xD, 4 },
+ { 0x2, 4 },
+ { 0xC, 5 },
+ { 0x39, 6 },
+ { 0x1C, 6 },
+ { 0xF, 5 },
+ { 0x1D, 6 },
+ { 0x8, 4 },
+ { 0x19, 5 }
+ },
+ { /* AC bias group 1, table 9 */
+ { 0x7, 4 },
+ { 0x19, 6 },
+ { 0xAB, 8 },
+ { 0xAA, 8 },
+ { 0x119, 10 },
+ { 0x461, 12 },
+ { 0x460, 12 },
+ { 0x1B, 5 },
+ { 0x47, 8 },
+ { 0x1, 3 },
+ { 0x0, 3 },
+ { 0xC, 4 },
+ { 0xB, 4 },
+ { 0x9, 4 },
+ { 0x5, 4 },
+ { 0xD, 5 },
+ { 0x35, 6 },
+ { 0x3D, 6 },
+ { 0x3C, 6 },
+ { 0x18, 6 },
+ { 0x22, 7 },
+ { 0x8D, 9 },
+ { 0x231, 11 },
+ { 0xE, 4 },
+ { 0x1F, 5 },
+ { 0x9, 5 },
+ { 0x2B, 6 },
+ { 0x10, 6 },
+ { 0x34, 6 },
+ { 0x54, 7 },
+ { 0x8, 4 },
+ { 0x14, 5 }
+ },
+ { /* AC bias group 1, table 10 */
+ { 0xC, 4 },
+ { 0x5, 5 },
+ { 0x8, 6 },
+ { 0x5B, 7 },
+ { 0x4D, 9 },
+ { 0x131, 11 },
+ { 0x261, 12 },
+ { 0x1A, 5 },
+ { 0x12, 7 },
+ { 0x0, 3 },
+ { 0xF, 4 },
+ { 0xA, 4 },
+ { 0x9, 4 },
+ { 0x6, 4 },
+ { 0x1B, 5 },
+ { 0x6, 5 },
+ { 0x1C, 6 },
+ { 0x2C, 6 },
+ { 0x15, 6 },
+ { 0x5A, 7 },
+ { 0x27, 8 },
+ { 0x99, 10 },
+ { 0x260, 12 },
+ { 0xE, 4 },
+ { 0x4, 4 },
+ { 0xF, 5 },
+ { 0x7, 5 },
+ { 0x1D, 6 },
+ { 0xB, 5 },
+ { 0x14, 6 },
+ { 0x8, 4 },
+ { 0x17, 5 }
+ },
+ { /* AC bias group 1, table 11 */
+ { 0xF, 4 },
+ { 0x13, 5 },
+ { 0x75, 7 },
+ { 0x24, 6 },
+ { 0x95, 8 },
+ { 0x251, 10 },
+ { 0x4A0, 11 },
+ { 0x10, 5 },
+ { 0xC8, 8 },
+ { 0x2, 3 },
+ { 0x1, 3 },
+ { 0x1, 4 },
+ { 0x0, 4 },
+ { 0x1A, 5 },
+ { 0x11, 5 },
+ { 0x2C, 6 },
+ { 0x65, 7 },
+ { 0x74, 7 },
+ { 0x4B, 7 },
+ { 0xC9, 8 },
+ { 0x129, 9 },
+ { 0x943, 12 },
+ { 0x942, 12 },
+ { 0x3, 3 },
+ { 0xA, 4 },
+ { 0x1C, 5 },
+ { 0x18, 5 },
+ { 0x33, 6 },
+ { 0x17, 5 },
+ { 0x2D, 6 },
+ { 0x1B, 5 },
+ { 0x3B, 6 }
+ },
+ { /* AC bias group 1, table 12 */
+ { 0x3, 3 },
+ { 0x1A, 5 },
+ { 0x2D, 6 },
+ { 0x38, 6 },
+ { 0x28, 7 },
+ { 0x395, 10 },
+ { 0xE51, 12 },
+ { 0x37, 6 },
+ { 0xE4, 8 },
+ { 0x1, 3 },
+ { 0x0, 3 },
+ { 0x1F, 5 },
+ { 0x1E, 5 },
+ { 0x17, 5 },
+ { 0x3A, 6 },
+ { 0x73, 7 },
+ { 0x2A, 7 },
+ { 0x2B, 7 },
+ { 0x29, 7 },
+ { 0x1CB, 9 },
+ { 0x729, 11 },
+ { 0x1CA1, 13 },
+ { 0x1CA0, 13 },
+ { 0x4, 3 },
+ { 0xA, 4 },
+ { 0x4, 4 },
+ { 0x18, 5 },
+ { 0x36, 6 },
+ { 0xB, 5 },
+ { 0x2C, 6 },
+ { 0x19, 5 },
+ { 0x3B, 6 }
+ },
+ { /* AC bias group 1, table 13 */
+ { 0x4, 3 },
+ { 0x4, 4 },
+ { 0x3F, 6 },
+ { 0x17, 5 },
+ { 0x75, 7 },
+ { 0x1F5, 9 },
+ { 0x7D1, 11 },
+ { 0x17, 6 },
+ { 0x1F6, 9 },
+ { 0x1, 3 },
+ { 0x0, 3 },
+ { 0x1B, 5 },
+ { 0x1A, 5 },
+ { 0xA, 5 },
+ { 0x32, 6 },
+ { 0x74, 7 },
+ { 0xF8, 8 },
+ { 0xF9, 8 },
+ { 0x1F7, 9 },
+ { 0x3E9, 10 },
+ { 0xFA0, 12 },
+ { 0x1F43, 13 },
+ { 0x1F42, 13 },
+ { 0x3, 3 },
+ { 0xA, 4 },
+ { 0x1E, 5 },
+ { 0x1C, 5 },
+ { 0x3B, 6 },
+ { 0x18, 5 },
+ { 0x16, 6 },
+ { 0x16, 5 },
+ { 0x33, 6 }
+ },
+ { /* AC bias group 1, table 14 */
+ { 0x4, 3 },
+ { 0x7, 4 },
+ { 0x18, 5 },
+ { 0x1E, 5 },
+ { 0x36, 6 },
+ { 0x31, 7 },
+ { 0x177, 9 },
+ { 0x77, 7 },
+ { 0x176, 9 },
+ { 0x1, 3 },
+ { 0x0, 3 },
+ { 0x1A, 5 },
+ { 0x19, 5 },
+ { 0x3A, 6 },
+ { 0x19, 6 },
+ { 0x5C, 7 },
+ { 0xBA, 8 },
+ { 0x61, 8 },
+ { 0xC1, 9 },
+ { 0x180, 10 },
+ { 0x302, 11 },
+ { 0x607, 12 },
+ { 0x606, 12 },
+ { 0x2, 3 },
+ { 0xA, 4 },
+ { 0x1F, 5 },
+ { 0x1C, 5 },
+ { 0x37, 6 },
+ { 0x16, 5 },
+ { 0x76, 7 },
+ { 0xD, 5 },
+ { 0x2F, 6 }
+ },
+ { /* AC bias group 1, table 15 */
+ { 0x0, 3 },
+ { 0xA, 4 },
+ { 0x1A, 5 },
+ { 0xC, 4 },
+ { 0x1D, 5 },
+ { 0x39, 6 },
+ { 0x78, 7 },
+ { 0x5E, 7 },
+ { 0x393, 11 },
+ { 0x2, 3 },
+ { 0x1, 3 },
+ { 0x16, 5 },
+ { 0xF, 5 },
+ { 0x2E, 6 },
+ { 0x5F, 7 },
+ { 0x73, 8 },
+ { 0xE5, 9 },
+ { 0x1C8, 10 },
+ { 0xE4A, 13 },
+ { 0x1C97, 14 },
+ { 0x1C96, 14 },
+ { 0xE49, 13 },
+ { 0xE48, 13 },
+ { 0x4, 3 },
+ { 0x6, 4 },
+ { 0x1F, 5 },
+ { 0x1B, 5 },
+ { 0x1D, 6 },
+ { 0x38, 6 },
+ { 0x38, 7 },
+ { 0x3D, 6 },
+ { 0x79, 7 }
+ }
+};
+
+static const uint16_t ac_bias_1[16][32][2] = {
+ { /* AC bias group 2, table 0 */
+ { 0xB, 5 },
+ { 0x2B, 7 },
+ { 0x54, 8 },
+ { 0x1B7, 9 },
+ { 0x6D9, 11 },
+ { 0xDB1, 12 },
+ { 0xDB0, 12 },
+ { 0x2, 4 },
+ { 0xAB, 9 },
+ { 0x9, 4 },
+ { 0xA, 4 },
+ { 0x7, 4 },
+ { 0x8, 4 },
+ { 0xF, 4 },
+ { 0xC, 4 },
+ { 0x3, 4 },
+ { 0x1D, 5 },
+ { 0x4, 4 },
+ { 0xB, 4 },
+ { 0x6, 4 },
+ { 0x1A, 5 },
+ { 0x3, 6 },
+ { 0xAA, 9 },
+ { 0x1, 4 },
+ { 0x0, 5 },
+ { 0x14, 6 },
+ { 0x6C, 7 },
+ { 0xDA, 8 },
+ { 0x2, 6 },
+ { 0x36D, 10 },
+ { 0x1C, 5 },
+ { 0x37, 6 }
+ },
+ { /* AC bias group 2, table 1 */
+ { 0x1D, 5 },
+ { 0x4, 6 },
+ { 0xB6, 8 },
+ { 0x6A, 8 },
+ { 0x5B9, 11 },
+ { 0x16E1, 13 },
+ { 0x16E0, 13 },
+ { 0x7, 4 },
+ { 0x16F, 9 },
+ { 0xC, 4 },
+ { 0xD, 4 },
+ { 0x9, 4 },
+ { 0x8, 4 },
+ { 0xF, 4 },
+ { 0xA, 4 },
+ { 0x3, 4 },
+ { 0x17, 5 },
+ { 0x2, 4 },
+ { 0x4, 4 },
+ { 0x1C, 5 },
+ { 0x2C, 6 },
+ { 0x6B, 8 },
+ { 0xB71, 12 },
+ { 0x5, 4 },
+ { 0x3, 5 },
+ { 0x1B, 6 },
+ { 0x5A, 7 },
+ { 0x34, 7 },
+ { 0x5, 6 },
+ { 0x2DD, 10 },
+ { 0x0, 4 },
+ { 0xC, 5 }
+ },
+ { /* AC bias group 2, table 2 */
+ { 0x3, 4 },
+ { 0x7F, 7 },
+ { 0xA1, 8 },
+ { 0xA0, 8 },
+ { 0x20C, 10 },
+ { 0x834, 12 },
+ { 0x106B, 13 },
+ { 0x7, 4 },
+ { 0x82, 8 },
+ { 0xE, 4 },
+ { 0xD, 4 },
+ { 0xB, 4 },
+ { 0xC, 4 },
+ { 0x0, 3 },
+ { 0x9, 4 },
+ { 0x2, 4 },
+ { 0x11, 5 },
+ { 0x1E, 5 },
+ { 0x15, 5 },
+ { 0x3E, 6 },
+ { 0x40, 7 },
+ { 0x41B, 11 },
+ { 0x106A, 13 },
+ { 0x6, 4 },
+ { 0xA, 5 },
+ { 0x29, 6 },
+ { 0x7E, 7 },
+ { 0x51, 7 },
+ { 0x21, 6 },
+ { 0x107, 9 },
+ { 0x4, 4 },
+ { 0xB, 5 }
+ },
+ { /* AC bias group 2, table 3 */
+ { 0x7, 4 },
+ { 0x1B, 6 },
+ { 0xF6, 8 },
+ { 0xE9, 8 },
+ { 0x3A1, 10 },
+ { 0x740, 11 },
+ { 0xE82, 12 },
+ { 0x1F, 5 },
+ { 0x1EF, 9 },
+ { 0x1, 3 },
+ { 0x2, 3 },
+ { 0xB, 4 },
+ { 0xC, 4 },
+ { 0xD, 4 },
+ { 0x8, 4 },
+ { 0x1C, 5 },
+ { 0x3, 5 },
+ { 0x12, 5 },
+ { 0x2, 5 },
+ { 0x75, 7 },
+ { 0x1D1, 9 },
+ { 0x1D07, 13 },
+ { 0x1D06, 13 },
+ { 0xA, 4 },
+ { 0x13, 5 },
+ { 0x3B, 6 },
+ { 0x1A, 6 },
+ { 0x7A, 7 },
+ { 0x3C, 6 },
+ { 0x1EE, 9 },
+ { 0x0, 4 },
+ { 0xC, 5 }
+ },
+ { /* AC bias group 2, table 4 */
+ { 0xD, 4 },
+ { 0x3D, 6 },
+ { 0x42, 7 },
+ { 0x37, 7 },
+ { 0xD9, 9 },
+ { 0x362, 11 },
+ { 0x6C6, 12 },
+ { 0x1F, 5 },
+ { 0x86, 8 },
+ { 0x1, 3 },
+ { 0x2, 3 },
+ { 0xC, 4 },
+ { 0xB, 4 },
+ { 0xA, 4 },
+ { 0x1, 4 },
+ { 0xF, 5 },
+ { 0x25, 6 },
+ { 0x3C, 6 },
+ { 0x1A, 6 },
+ { 0x87, 8 },
+ { 0x1B0, 10 },
+ { 0xD8F, 13 },
+ { 0xD8E, 13 },
+ { 0xE, 4 },
+ { 0x13, 5 },
+ { 0xC, 5 },
+ { 0x24, 6 },
+ { 0x20, 6 },
+ { 0x11, 5 },
+ { 0x6D, 8 },
+ { 0x0, 4 },
+ { 0xE, 5 }
+ },
+ { /* AC bias group 2, table 5 */
+ { 0x0, 3 },
+ { 0x12, 5 },
+ { 0x76, 7 },
+ { 0x77, 7 },
+ { 0x14D, 9 },
+ { 0x533, 11 },
+ { 0x14C9, 13 },
+ { 0x13, 5 },
+ { 0xA5, 8 },
+ { 0x2, 3 },
+ { 0x3, 3 },
+ { 0xB, 4 },
+ { 0xC, 4 },
+ { 0x8, 4 },
+ { 0x1A, 5 },
+ { 0x2B, 6 },
+ { 0x75, 7 },
+ { 0x74, 7 },
+ { 0xA7, 8 },
+ { 0x298, 10 },
+ { 0x14C8, 13 },
+ { 0x14CB, 13 },
+ { 0x14CA, 13 },
+ { 0xF, 4 },
+ { 0x1C, 5 },
+ { 0x7, 5 },
+ { 0x2A, 6 },
+ { 0x28, 6 },
+ { 0x1B, 5 },
+ { 0xA4, 8 },
+ { 0x2, 4 },
+ { 0x6, 5 }
+ },
+ { /* AC bias group 2, table 6 */
+ { 0x2, 3 },
+ { 0x1A, 5 },
+ { 0x2B, 6 },
+ { 0x3A, 6 },
+ { 0xED, 8 },
+ { 0x283, 10 },
+ { 0xA0A, 12 },
+ { 0x4, 5 },
+ { 0xA1, 8 },
+ { 0x4, 3 },
+ { 0x3, 3 },
+ { 0xB, 4 },
+ { 0xC, 4 },
+ { 0x1F, 5 },
+ { 0x6, 5 },
+ { 0x77, 7 },
+ { 0xA3, 8 },
+ { 0xA2, 8 },
+ { 0x140, 9 },
+ { 0x1417, 13 },
+ { 0x1416, 13 },
+ { 0xA09, 12 },
+ { 0xA08, 12 },
+ { 0x0, 3 },
+ { 0x1E, 5 },
+ { 0x7, 5 },
+ { 0x2A, 6 },
+ { 0x29, 6 },
+ { 0x1C, 5 },
+ { 0xEC, 8 },
+ { 0x1B, 5 },
+ { 0x5, 5 }
+ },
+ { /* AC bias group 2, table 7 */
+ { 0x2, 3 },
+ { 0x2, 4 },
+ { 0x18, 5 },
+ { 0x1D, 5 },
+ { 0x35, 6 },
+ { 0xE4, 8 },
+ { 0x1CF, 11 },
+ { 0x1D, 7 },
+ { 0x72, 9 },
+ { 0x4, 3 },
+ { 0x5, 3 },
+ { 0x6, 4 },
+ { 0x7, 4 },
+ { 0x6, 5 },
+ { 0x73, 7 },
+ { 0x38, 8 },
+ { 0x1CE, 11 },
+ { 0x39B, 12 },
+ { 0x398, 12 },
+ { 0x733, 13 },
+ { 0x732, 13 },
+ { 0x735, 13 },
+ { 0x734, 13 },
+ { 0x0, 3 },
+ { 0x1F, 5 },
+ { 0x1B, 5 },
+ { 0x34, 6 },
+ { 0xF, 6 },
+ { 0x1E, 5 },
+ { 0xE5, 8 },
+ { 0x19, 5 },
+ { 0x38, 6 }
+ },
+ { /* AC bias group 2, table 8 */
+ { 0x16, 5 },
+ { 0x50, 7 },
+ { 0x172, 9 },
+ { 0x2E7, 10 },
+ { 0x1732, 13 },
+ { 0x2E67, 14 },
+ { 0x2E66, 14 },
+ { 0x6, 4 },
+ { 0x51, 7 },
+ { 0x1, 3 },
+ { 0x0, 3 },
+ { 0xD, 4 },
+ { 0xC, 4 },
+ { 0x9, 4 },
+ { 0x1C, 5 },
+ { 0x9, 5 },
+ { 0x1C, 6 },
+ { 0x1D, 6 },
+ { 0x5D, 7 },
+ { 0xB8, 8 },
+ { 0x5CD, 11 },
+ { 0x1731, 13 },
+ { 0x1730, 13 },
+ { 0xF, 4 },
+ { 0x5, 4 },
+ { 0xF, 5 },
+ { 0x8, 5 },
+ { 0x29, 6 },
+ { 0x1D, 5 },
+ { 0x2F, 6 },
+ { 0x8, 4 },
+ { 0x15, 5 }
+ },
+ { /* AC bias group 2, table 9 */
+ { 0x9, 4 },
+ { 0x21, 6 },
+ { 0x40, 7 },
+ { 0xAD, 8 },
+ { 0x2B0, 10 },
+ { 0x1589, 13 },
+ { 0x1588, 13 },
+ { 0x1C, 5 },
+ { 0x5F, 7 },
+ { 0x0, 3 },
+ { 0xF, 4 },
+ { 0xD, 4 },
+ { 0xC, 4 },
+ { 0x6, 4 },
+ { 0x11, 5 },
+ { 0x2A, 6 },
+ { 0x57, 7 },
+ { 0x5E, 7 },
+ { 0x41, 7 },
+ { 0x159, 9 },
+ { 0x563, 11 },
+ { 0x158B, 13 },
+ { 0x158A, 13 },
+ { 0x1, 3 },
+ { 0x5, 4 },
+ { 0x14, 5 },
+ { 0x3B, 6 },
+ { 0x2E, 6 },
+ { 0x4, 4 },
+ { 0x3A, 6 },
+ { 0x7, 4 },
+ { 0x16, 5 }
+ },
+ { /* AC bias group 2, table 10 */
+ { 0xE, 4 },
+ { 0x7, 5 },
+ { 0x46, 7 },
+ { 0x45, 7 },
+ { 0x64, 9 },
+ { 0x32A, 12 },
+ { 0x657, 13 },
+ { 0x18, 5 },
+ { 0xD, 6 },
+ { 0x0, 3 },
+ { 0xF, 4 },
+ { 0xA, 4 },
+ { 0xB, 4 },
+ { 0x1A, 5 },
+ { 0x36, 6 },
+ { 0x47, 7 },
+ { 0x44, 7 },
+ { 0x18, 7 },
+ { 0x33, 8 },
+ { 0xCB, 10 },
+ { 0x656, 13 },
+ { 0x329, 12 },
+ { 0x328, 12 },
+ { 0x2, 3 },
+ { 0x6, 4 },
+ { 0x19, 5 },
+ { 0xE, 5 },
+ { 0x37, 6 },
+ { 0x9, 4 },
+ { 0xF, 5 },
+ { 0x2, 4 },
+ { 0x10, 5 }
+ },
+ { /* AC bias group 2, table 11 */
+ { 0x3, 3 },
+ { 0x18, 5 },
+ { 0x23, 6 },
+ { 0x77, 7 },
+ { 0x194, 9 },
+ { 0x1956, 13 },
+ { 0x32AF, 14 },
+ { 0x3A, 6 },
+ { 0x76, 7 },
+ { 0x2, 3 },
+ { 0x1, 3 },
+ { 0x1F, 5 },
+ { 0x1E, 5 },
+ { 0x14, 5 },
+ { 0x22, 6 },
+ { 0x64, 7 },
+ { 0x197, 9 },
+ { 0x196, 9 },
+ { 0x32B, 10 },
+ { 0x654, 11 },
+ { 0x32AE, 14 },
+ { 0x1955, 13 },
+ { 0x1954, 13 },
+ { 0x0, 3 },
+ { 0x9, 4 },
+ { 0x1C, 5 },
+ { 0x15, 5 },
+ { 0x10, 5 },
+ { 0xD, 4 },
+ { 0x17, 5 },
+ { 0x16, 5 },
+ { 0x33, 6 }
+ },
+ { /* AC bias group 2, table 12 */
+ { 0x5, 3 },
+ { 0x6, 4 },
+ { 0x3E, 6 },
+ { 0x10, 5 },
+ { 0x48, 7 },
+ { 0x93F, 12 },
+ { 0x24FA, 14 },
+ { 0x32, 6 },
+ { 0x67, 7 },
+ { 0x2, 3 },
+ { 0x1, 3 },
+ { 0x1B, 5 },
+ { 0x1E, 5 },
+ { 0x34, 6 },
+ { 0x66, 7 },
+ { 0x92, 8 },
+ { 0x126, 9 },
+ { 0x24E, 10 },
+ { 0x49E, 11 },
+ { 0x49F7, 15 },
+ { 0x49F6, 15 },
+ { 0x24F9, 14 },
+ { 0x24F8, 14 },
+ { 0x0, 3 },
+ { 0x7, 4 },
+ { 0x18, 5 },
+ { 0x11, 5 },
+ { 0x3F, 6 },
+ { 0xE, 4 },
+ { 0x13, 5 },
+ { 0x35, 6 },
+ { 0x25, 6 }
+ },
+ { /* AC bias group 2, table 13 */
+ { 0x5, 3 },
+ { 0x8, 4 },
+ { 0x12, 5 },
+ { 0x1C, 5 },
+ { 0x1C, 6 },
+ { 0xEA, 9 },
+ { 0x1D75, 14 },
+ { 0x1E, 6 },
+ { 0x66, 7 },
+ { 0x1, 3 },
+ { 0x2, 3 },
+ { 0x1B, 5 },
+ { 0x1A, 5 },
+ { 0x1F, 6 },
+ { 0x3B, 7 },
+ { 0x74, 8 },
+ { 0x1D6, 10 },
+ { 0x3AF, 11 },
+ { 0x1D74, 14 },
+ { 0x1D77, 14 },
+ { 0x1D76, 14 },
+ { 0xEB9, 13 },
+ { 0xEB8, 13 },
+ { 0xF, 4 },
+ { 0x6, 4 },
+ { 0x13, 5 },
+ { 0x3B, 6 },
+ { 0x3A, 6 },
+ { 0x0, 3 },
+ { 0x18, 5 },
+ { 0x32, 6 },
+ { 0x67, 7 }
+ },
+ { /* AC bias group 2, table 14 */
+ { 0x4, 3 },
+ { 0xA, 4 },
+ { 0x1B, 5 },
+ { 0xC, 4 },
+ { 0xD, 5 },
+ { 0xE6, 8 },
+ { 0x684, 11 },
+ { 0x72, 7 },
+ { 0xE7, 8 },
+ { 0x2, 3 },
+ { 0x1, 3 },
+ { 0x17, 5 },
+ { 0x16, 5 },
+ { 0x18, 6 },
+ { 0xD1, 8 },
+ { 0x1A0, 9 },
+ { 0x686, 11 },
+ { 0xD0F, 12 },
+ { 0xD0A, 12 },
+ { 0x1A17, 13 },
+ { 0x1A16, 13 },
+ { 0x1A1D, 13 },
+ { 0x1A1C, 13 },
+ { 0xF, 4 },
+ { 0x1D, 5 },
+ { 0xE, 5 },
+ { 0x35, 6 },
+ { 0x38, 6 },
+ { 0x0, 3 },
+ { 0xF, 5 },
+ { 0x19, 6 },
+ { 0x69, 7 }
+ },
+ { /* AC bias group 2, table 15 */
+ { 0x3, 3 },
+ { 0xC, 4 },
+ { 0x1B, 5 },
+ { 0x0, 3 },
+ { 0x3, 4 },
+ { 0x2E, 6 },
+ { 0x51, 9 },
+ { 0xBC, 8 },
+ { 0x53, 9 },
+ { 0x4, 3 },
+ { 0x2, 3 },
+ { 0x16, 5 },
+ { 0x15, 5 },
+ { 0x15, 7 },
+ { 0x50, 9 },
+ { 0xA4, 10 },
+ { 0x294, 12 },
+ { 0x52B, 13 },
+ { 0x52A, 13 },
+ { 0x52D, 13 },
+ { 0x52C, 13 },
+ { 0x52F, 13 },
+ { 0x52E, 13 },
+ { 0xE, 4 },
+ { 0x1A, 5 },
+ { 0x4, 5 },
+ { 0x28, 6 },
+ { 0x29, 6 },
+ { 0xF, 4 },
+ { 0xB, 6 },
+ { 0x5F, 7 },
+ { 0xBD, 8 }
+ }
+};
+
+static const uint16_t ac_bias_2[16][32][2] = {
+ { /* AC bias group 3, table 0 */
+ { 0x3, 4 },
+ { 0x9, 6 },
+ { 0xD0, 8 },
+ { 0x1A3, 9 },
+ { 0x344, 10 },
+ { 0xD14, 12 },
+ { 0x1A2B, 13 },
+ { 0x4, 4 },
+ { 0x15, 7 },
+ { 0x0, 3 },
+ { 0xF, 4 },
+ { 0xB, 4 },
+ { 0xC, 4 },
+ { 0xE, 4 },
+ { 0x9, 4 },
+ { 0x1B, 5 },
+ { 0xA, 5 },
+ { 0x14, 5 },
+ { 0xD, 5 },
+ { 0x2A, 6 },
+ { 0x14, 7 },
+ { 0x68B, 11 },
+ { 0x1A2A, 13 },
+ { 0x8, 4 },
+ { 0xB, 5 },
+ { 0x2B, 6 },
+ { 0xB, 6 },
+ { 0x69, 7 },
+ { 0x35, 6 },
+ { 0x8, 6 },
+ { 0x7, 4 },
+ { 0xC, 5 }
+ },
+ { /* AC bias group 3, table 1 */
+ { 0xA, 4 },
+ { 0x3C, 6 },
+ { 0x32, 7 },
+ { 0x30, 7 },
+ { 0xC5, 9 },
+ { 0x621, 12 },
+ { 0x620, 12 },
+ { 0x1F, 5 },
+ { 0x33, 7 },
+ { 0x1, 3 },
+ { 0x0, 3 },
+ { 0xE, 4 },
+ { 0xD, 4 },
+ { 0xC, 4 },
+ { 0x4, 4 },
+ { 0xD, 5 },
+ { 0x26, 6 },
+ { 0x27, 6 },
+ { 0x14, 6 },
+ { 0x63, 8 },
+ { 0x189, 10 },
+ { 0x623, 12 },
+ { 0x622, 12 },
+ { 0xB, 4 },
+ { 0x12, 5 },
+ { 0x3D, 6 },
+ { 0x22, 6 },
+ { 0x15, 6 },
+ { 0xB, 5 },
+ { 0x23, 6 },
+ { 0x7, 4 },
+ { 0x10, 5 }
+ },
+ { /* AC bias group 3, table 2 */
+ { 0xF, 4 },
+ { 0xC, 5 },
+ { 0x43, 7 },
+ { 0x10, 6 },
+ { 0x44, 8 },
+ { 0x114, 10 },
+ { 0x455, 12 },
+ { 0x18, 5 },
+ { 0x23, 7 },
+ { 0x1, 3 },
+ { 0x0, 3 },
+ { 0xE, 4 },
+ { 0xD, 4 },
+ { 0x9, 4 },
+ { 0x19, 5 },
+ { 0x9, 5 },
+ { 0x17, 6 },
+ { 0x16, 6 },
+ { 0x42, 7 },
+ { 0x8B, 9 },
+ { 0x454, 12 },
+ { 0x457, 12 },
+ { 0x456, 12 },
+ { 0xB, 4 },
+ { 0x15, 5 },
+ { 0xA, 5 },
+ { 0x29, 6 },
+ { 0x20, 6 },
+ { 0xD, 5 },
+ { 0x28, 6 },
+ { 0x7, 4 },
+ { 0x11, 5 }
+ },
+ { /* AC bias group 3, table 3 */
+ { 0x1, 3 },
+ { 0x1A, 5 },
+ { 0x29, 6 },
+ { 0x2A, 6 },
+ { 0xA0, 8 },
+ { 0x285, 10 },
+ { 0x1425, 13 },
+ { 0x2, 5 },
+ { 0x0, 7 },
+ { 0x2, 3 },
+ { 0x3, 3 },
+ { 0xC, 4 },
+ { 0xB, 4 },
+ { 0x8, 4 },
+ { 0x12, 5 },
+ { 0x1, 6 },
+ { 0x51, 7 },
+ { 0x1, 7 },
+ { 0x143, 9 },
+ { 0x508, 11 },
+ { 0x1424, 13 },
+ { 0x1427, 13 },
+ { 0x1426, 13 },
+ { 0xF, 4 },
+ { 0x1C, 5 },
+ { 0x3, 5 },
+ { 0x37, 6 },
+ { 0x2B, 6 },
+ { 0x13, 5 },
+ { 0x36, 6 },
+ { 0x1D, 5 },
+ { 0x1, 5 }
+ },
+ { /* AC bias group 3, table 4 */
+ { 0x4, 3 },
+ { 0x1F, 5 },
+ { 0x3D, 6 },
+ { 0x6, 5 },
+ { 0x16, 7 },
+ { 0x53, 9 },
+ { 0x14A, 11 },
+ { 0x34, 6 },
+ { 0x2A, 8 },
+ { 0x2, 3 },
+ { 0x3, 3 },
+ { 0xB, 4 },
+ { 0xC, 4 },
+ { 0x1C, 5 },
+ { 0x37, 6 },
+ { 0x17, 7 },
+ { 0x2B, 8 },
+ { 0x28, 8 },
+ { 0xA4, 10 },
+ { 0x52D, 13 },
+ { 0x52C, 13 },
+ { 0x52F, 13 },
+ { 0x52E, 13 },
+ { 0x0, 3 },
+ { 0x1D, 5 },
+ { 0x7, 5 },
+ { 0x4, 5 },
+ { 0x35, 6 },
+ { 0x14, 5 },
+ { 0x36, 6 },
+ { 0x15, 5 },
+ { 0x3C, 6 }
+ },
+ { /* AC bias group 3, table 5 */
+ { 0x4, 3 },
+ { 0xA, 4 },
+ { 0x7, 5 },
+ { 0x1D, 5 },
+ { 0x9, 6 },
+ { 0x1F3, 9 },
+ { 0x7C7, 11 },
+ { 0x8, 6 },
+ { 0x1F0, 9 },
+ { 0x3, 3 },
+ { 0x2, 3 },
+ { 0xD, 4 },
+ { 0xC, 4 },
+ { 0x17, 5 },
+ { 0x7D, 7 },
+ { 0x1F2, 9 },
+ { 0x7C6, 11 },
+ { 0x7C5, 11 },
+ { 0x1F12, 13 },
+ { 0x3E27, 14 },
+ { 0x3E26, 14 },
+ { 0x1F11, 13 },
+ { 0x1F10, 13 },
+ { 0x0, 3 },
+ { 0x1E, 5 },
+ { 0x6, 5 },
+ { 0x39, 6 },
+ { 0x38, 6 },
+ { 0x3F, 6 },
+ { 0x2C, 6 },
+ { 0x5, 5 },
+ { 0x2D, 6 }
+ },
+ { /* AC bias group 3, table 6 */
+ { 0x2, 3 },
+ { 0x7, 4 },
+ { 0x18, 5 },
+ { 0x3, 4 },
+ { 0x5, 5 },
+ { 0x35, 7 },
+ { 0x4F, 9 },
+ { 0x12, 7 },
+ { 0x4E5, 13 },
+ { 0x5, 3 },
+ { 0x4, 3 },
+ { 0xD, 4 },
+ { 0xE, 4 },
+ { 0x33, 6 },
+ { 0x26, 8 },
+ { 0x9D, 10 },
+ { 0x4E4, 13 },
+ { 0x4E7, 13 },
+ { 0x4E6, 13 },
+ { 0x4E1, 13 },
+ { 0x4E0, 13 },
+ { 0x4E3, 13 },
+ { 0x4E2, 13 },
+ { 0x0, 3 },
+ { 0x1F, 5 },
+ { 0xC, 5 },
+ { 0x3D, 6 },
+ { 0x3C, 6 },
+ { 0x32, 6 },
+ { 0x34, 7 },
+ { 0x1B, 6 },
+ { 0x8, 6 }
+ },
+ { /* AC bias group 3, table 7 */
+ { 0x0, 3 },
+ { 0x4, 4 },
+ { 0x1C, 5 },
+ { 0xF, 4 },
+ { 0x2, 4 },
+ { 0x7, 5 },
+ { 0x75, 7 },
+ { 0xE8, 8 },
+ { 0x1D2A, 13 },
+ { 0x5, 3 },
+ { 0x4, 3 },
+ { 0xD, 4 },
+ { 0xC, 4 },
+ { 0x77, 7 },
+ { 0xE96, 12 },
+ { 0x3A57, 14 },
+ { 0x3A56, 14 },
+ { 0x3A5D, 14 },
+ { 0x3A5C, 14 },
+ { 0x3A5F, 14 },
+ { 0x3A5E, 14 },
+ { 0x1D29, 13 },
+ { 0x1D28, 13 },
+ { 0x3, 3 },
+ { 0x6, 5 },
+ { 0xA, 5 },
+ { 0x2C, 7 },
+ { 0x17, 6 },
+ { 0x76, 7 },
+ { 0x1D3, 9 },
+ { 0x3A4, 10 },
+ { 0x2D, 7 }
+ },
+ { /* AC bias group 3, table 8 */
+ { 0xA, 4 },
+ { 0x24, 6 },
+ { 0xBF, 8 },
+ { 0x85, 8 },
+ { 0x211, 10 },
+ { 0x842, 12 },
+ { 0x1087, 13 },
+ { 0x18, 5 },
+ { 0x20, 6 },
+ { 0x1, 3 },
+ { 0x2, 3 },
+ { 0xE, 4 },
+ { 0xD, 4 },
+ { 0x7, 4 },
+ { 0x13, 5 },
+ { 0x25, 6 },
+ { 0x5E, 7 },
+ { 0x43, 7 },
+ { 0xBE, 8 },
+ { 0x109, 9 },
+ { 0x1086, 13 },
+ { 0x841, 12 },
+ { 0x840, 12 },
+ { 0xF, 4 },
+ { 0x1, 4 },
+ { 0x11, 5 },
+ { 0x0, 5 },
+ { 0x2E, 6 },
+ { 0x19, 5 },
+ { 0x1, 5 },
+ { 0x6, 4 },
+ { 0x16, 5 }
+ },
+ { /* AC bias group 3, table 9 */
+ { 0x2, 3 },
+ { 0xF, 5 },
+ { 0x6F, 7 },
+ { 0x61, 7 },
+ { 0x374, 10 },
+ { 0x1BA8, 13 },
+ { 0x3753, 14 },
+ { 0x12, 5 },
+ { 0x36, 6 },
+ { 0x0, 3 },
+ { 0x1, 3 },
+ { 0xA, 4 },
+ { 0xB, 4 },
+ { 0x1A, 5 },
+ { 0x31, 6 },
+ { 0x60, 7 },
+ { 0xDC, 8 },
+ { 0x1BB, 9 },
+ { 0x6EB, 11 },
+ { 0x1BAB, 13 },
+ { 0x3752, 14 },
+ { 0x3755, 14 },
+ { 0x3754, 14 },
+ { 0xE, 4 },
+ { 0x6, 4 },
+ { 0x13, 5 },
+ { 0xE, 5 },
+ { 0x3E, 6 },
+ { 0x8, 4 },
+ { 0x1E, 5 },
+ { 0x19, 5 },
+ { 0x3F, 6 }
+ },
+ { /* AC bias group 3, table 10 */
+ { 0x3, 3 },
+ { 0x1C, 5 },
+ { 0x25, 6 },
+ { 0x24, 6 },
+ { 0x1DA, 9 },
+ { 0x1DBD, 13 },
+ { 0x3B7C, 14 },
+ { 0x3C, 6 },
+ { 0x3D, 6 },
+ { 0x0, 3 },
+ { 0x1, 3 },
+ { 0xB, 4 },
+ { 0xA, 4 },
+ { 0xB, 5 },
+ { 0x77, 7 },
+ { 0xEC, 8 },
+ { 0x3B6, 10 },
+ { 0x76E, 11 },
+ { 0x1DBF, 13 },
+ { 0x76FB, 15 },
+ { 0x76FA, 15 },
+ { 0x3B79, 14 },
+ { 0x3B78, 14 },
+ { 0xD, 4 },
+ { 0x1F, 5 },
+ { 0x13, 5 },
+ { 0xA, 5 },
+ { 0x8, 5 },
+ { 0xC, 4 },
+ { 0x8, 4 },
+ { 0x9, 5 },
+ { 0x3A, 6 }
+ },
+ { /* AC bias group 3, table 11 */
+ { 0x5, 3 },
+ { 0x3, 4 },
+ { 0x4, 5 },
+ { 0x10, 5 },
+ { 0x8F, 8 },
+ { 0x475, 11 },
+ { 0x11D1, 13 },
+ { 0x79, 7 },
+ { 0x27, 6 },
+ { 0x2, 3 },
+ { 0x3, 3 },
+ { 0x1, 4 },
+ { 0x0, 4 },
+ { 0x26, 6 },
+ { 0x46, 7 },
+ { 0x11C, 9 },
+ { 0x477, 11 },
+ { 0x8ED, 12 },
+ { 0x11D0, 13 },
+ { 0x11D3, 13 },
+ { 0x11D2, 13 },
+ { 0x11D9, 13 },
+ { 0x11D8, 13 },
+ { 0xD, 4 },
+ { 0x1F, 5 },
+ { 0x12, 5 },
+ { 0x5, 5 },
+ { 0x3D, 6 },
+ { 0xC, 4 },
+ { 0xE, 4 },
+ { 0x22, 6 },
+ { 0x78, 7 }
+ },
+ { /* AC bias group 3, table 12 */
+ { 0x5, 3 },
+ { 0xC, 4 },
+ { 0x1B, 5 },
+ { 0x0, 4 },
+ { 0x6, 6 },
+ { 0x3E2, 10 },
+ { 0x3E3D, 14 },
+ { 0xF, 7 },
+ { 0x34, 6 },
+ { 0x3, 3 },
+ { 0x2, 3 },
+ { 0x1E, 5 },
+ { 0x1D, 5 },
+ { 0x7D, 7 },
+ { 0x1F0, 9 },
+ { 0x7C6, 11 },
+ { 0x3E3C, 14 },
+ { 0x3E3F, 14 },
+ { 0x3E3E, 14 },
+ { 0x3E39, 14 },
+ { 0x3E38, 14 },
+ { 0x3E3B, 14 },
+ { 0x3E3A, 14 },
+ { 0x8, 4 },
+ { 0x1C, 5 },
+ { 0x2, 5 },
+ { 0x3F, 6 },
+ { 0x35, 6 },
+ { 0x9, 4 },
+ { 0x1, 3 },
+ { 0xE, 7 },
+ { 0xF9, 8 }
+ },
+ { /* AC bias group 3, table 13 */
+ { 0x4, 3 },
+ { 0xB, 4 },
+ { 0x1, 4 },
+ { 0xA, 4 },
+ { 0x1E, 6 },
+ { 0xE0, 9 },
+ { 0xE1E, 13 },
+ { 0x71, 8 },
+ { 0x39, 7 },
+ { 0x7, 3 },
+ { 0x6, 3 },
+ { 0xD, 5 },
+ { 0xC, 5 },
+ { 0x20, 7 },
+ { 0x1C2, 10 },
+ { 0x1C3F, 14 },
+ { 0x1C3E, 14 },
+ { 0xE19, 13 },
+ { 0xE18, 13 },
+ { 0xE1B, 13 },
+ { 0xE1A, 13 },
+ { 0xE1D, 13 },
+ { 0xE1C, 13 },
+ { 0x0, 4 },
+ { 0x9, 5 },
+ { 0x1D, 6 },
+ { 0x1F, 6 },
+ { 0x11, 6 },
+ { 0x5, 4 },
+ { 0x1, 3 },
+ { 0x43, 8 },
+ { 0x42, 8 }
+ },
+ { /* AC bias group 3, table 14 */
+ { 0x4, 3 },
+ { 0xD, 4 },
+ { 0x7, 4 },
+ { 0x2, 3 },
+ { 0x14, 5 },
+ { 0x16C, 9 },
+ { 0x16D1, 13 },
+ { 0x2DF, 10 },
+ { 0x16E, 9 },
+ { 0x0, 2 },
+ { 0x7, 3 },
+ { 0x2C, 6 },
+ { 0x2B, 6 },
+ { 0x2DE, 10 },
+ { 0x16D0, 13 },
+ { 0x16D3, 13 },
+ { 0x16D2, 13 },
+ { 0x2DB5, 14 },
+ { 0x2DB4, 14 },
+ { 0x2DB7, 14 },
+ { 0x2DB6, 14 },
+ { 0x16D9, 13 },
+ { 0x16D8, 13 },
+ { 0xC, 5 },
+ { 0x2A, 6 },
+ { 0x5A, 7 },
+ { 0x1B, 6 },
+ { 0x1A, 6 },
+ { 0x17, 5 },
+ { 0xC, 4 },
+ { 0x5B7, 11 },
+ { 0x5B5, 11 }
+ },
+ { /* AC bias group 3, table 15 */
+ { 0x2, 2 },
+ { 0xF, 4 },
+ { 0x1C, 5 },
+ { 0xC, 4 },
+ { 0x3B, 6 },
+ { 0x1AC, 9 },
+ { 0x1AD8, 13 },
+ { 0x35B3, 14 },
+ { 0x35B2, 14 },
+ { 0x1, 2 },
+ { 0x0, 2 },
+ { 0x69, 7 },
+ { 0x68, 7 },
+ { 0x35BD, 14 },
+ { 0x35BC, 14 },
+ { 0x35BF, 14 },
+ { 0x35BE, 14 },
+ { 0x35B9, 14 },
+ { 0x35B8, 14 },
+ { 0x35BB, 14 },
+ { 0x35BA, 14 },
+ { 0x35B5, 14 },
+ { 0x35B4, 14 },
+ { 0x1A9, 9 },
+ { 0x1A8, 9 },
+ { 0x35A, 10 },
+ { 0xD7, 8 },
+ { 0xD5, 8 },
+ { 0x3A, 6 },
+ { 0x1B, 5 },
+ { 0x35B7, 14 },
+ { 0x35B6, 14 }
+ }
+};
+
+static const uint16_t ac_bias_3[16][32][2] = {
+ { /* AC bias group 4, table 0 */
+ { 0x0, 3 },
+ { 0x10, 5 },
+ { 0x72, 7 },
+ { 0x71, 7 },
+ { 0x154, 9 },
+ { 0xAAB, 12 },
+ { 0xAA8, 12 },
+ { 0x14, 5 },
+ { 0x70, 7 },
+ { 0x2, 3 },
+ { 0x3, 3 },
+ { 0xC, 4 },
+ { 0xB, 4 },
+ { 0x3, 4 },
+ { 0x11, 5 },
+ { 0x73, 7 },
+ { 0x54, 7 },
+ { 0xAB, 8 },
+ { 0x2AB, 10 },
+ { 0x1553, 13 },
+ { 0x1552, 13 },
+ { 0x1555, 13 },
+ { 0x1554, 13 },
+ { 0xD, 4 },
+ { 0x1E, 5 },
+ { 0x12, 5 },
+ { 0x3E, 6 },
+ { 0x2B, 6 },
+ { 0x2, 4 },
+ { 0x3F, 6 },
+ { 0x1D, 5 },
+ { 0x13, 5 }
+ },
+ { /* AC bias group 4, table 1 */
+ { 0x3, 3 },
+ { 0x1F, 5 },
+ { 0x29, 6 },
+ { 0x3D, 6 },
+ { 0xC, 7 },
+ { 0x69, 10 },
+ { 0x345, 13 },
+ { 0x2, 5 },
+ { 0x28, 6 },
+ { 0x2, 3 },
+ { 0x1, 3 },
+ { 0xE, 4 },
+ { 0xC, 4 },
+ { 0x15, 5 },
+ { 0x7, 6 },
+ { 0x1B, 8 },
+ { 0x6B, 10 },
+ { 0x6A, 10 },
+ { 0x344, 13 },
+ { 0x347, 13 },
+ { 0x346, 13 },
+ { 0x1A1, 12 },
+ { 0x1A0, 12 },
+ { 0xB, 4 },
+ { 0x1A, 5 },
+ { 0x12, 5 },
+ { 0x0, 5 },
+ { 0x3C, 6 },
+ { 0x8, 4 },
+ { 0x1B, 5 },
+ { 0x13, 5 },
+ { 0x1, 5 }
+ },
+ { /* AC bias group 4, table 2 */
+ { 0x4, 3 },
+ { 0x4, 4 },
+ { 0x3F, 6 },
+ { 0x14, 5 },
+ { 0x56, 7 },
+ { 0x15C, 9 },
+ { 0x15D5, 13 },
+ { 0x3C, 6 },
+ { 0x2A, 6 },
+ { 0x0, 3 },
+ { 0x1, 3 },
+ { 0xE, 4 },
+ { 0xD, 4 },
+ { 0xC, 5 },
+ { 0xAF, 8 },
+ { 0x2BB, 10 },
+ { 0x15D4, 13 },
+ { 0x15D7, 13 },
+ { 0x15D6, 13 },
+ { 0x15D1, 13 },
+ { 0x15D0, 13 },
+ { 0x15D3, 13 },
+ { 0x15D2, 13 },
+ { 0xB, 4 },
+ { 0x19, 5 },
+ { 0xD, 5 },
+ { 0x3E, 6 },
+ { 0x31, 6 },
+ { 0x7, 4 },
+ { 0x5, 4 },
+ { 0x3D, 6 },
+ { 0x30, 6 }
+ },
+ { /* AC bias group 4, table 3 */
+ { 0x5, 3 },
+ { 0x8, 4 },
+ { 0x1A, 5 },
+ { 0x0, 4 },
+ { 0x36, 6 },
+ { 0x11, 8 },
+ { 0x106, 12 },
+ { 0xA, 7 },
+ { 0x6E, 7 },
+ { 0x2, 3 },
+ { 0x3, 3 },
+ { 0x3, 4 },
+ { 0x2, 4 },
+ { 0x6F, 7 },
+ { 0x21, 9 },
+ { 0x20F, 13 },
+ { 0x20E, 13 },
+ { 0x101, 12 },
+ { 0x100, 12 },
+ { 0x103, 12 },
+ { 0x102, 12 },
+ { 0x105, 12 },
+ { 0x104, 12 },
+ { 0xC, 4 },
+ { 0x1E, 5 },
+ { 0x3, 5 },
+ { 0x3E, 6 },
+ { 0x3F, 6 },
+ { 0x9, 4 },
+ { 0xE, 4 },
+ { 0xB, 7 },
+ { 0x9, 7 }
+ },
+ { /* AC bias group 4, table 4 */
+ { 0x2, 3 },
+ { 0xE, 4 },
+ { 0x1E, 5 },
+ { 0xC, 4 },
+ { 0x1F, 5 },
+ { 0x6E, 7 },
+ { 0xAD, 10 },
+ { 0xAF, 10 },
+ { 0x14, 7 },
+ { 0x4, 3 },
+ { 0x3, 3 },
+ { 0x1A, 5 },
+ { 0x17, 5 },
+ { 0x2A, 8 },
+ { 0x576, 13 },
+ { 0xAEF, 14 },
+ { 0xAEE, 14 },
+ { 0x571, 13 },
+ { 0x570, 13 },
+ { 0x573, 13 },
+ { 0x572, 13 },
+ { 0x575, 13 },
+ { 0x574, 13 },
+ { 0x3, 4 },
+ { 0x16, 5 },
+ { 0x4, 5 },
+ { 0x36, 6 },
+ { 0xB, 6 },
+ { 0xA, 4 },
+ { 0x0, 3 },
+ { 0x6F, 7 },
+ { 0xAC, 10 }
+ },
+ { /* AC bias group 4, table 5 */
+ { 0x4, 3 },
+ { 0x5, 4 },
+ { 0x3, 3 },
+ { 0x1, 3 },
+ { 0x4, 4 },
+ { 0x2F, 6 },
+ { 0x526, 11 },
+ { 0x1495, 13 },
+ { 0xA6, 8 },
+ { 0x7, 3 },
+ { 0x6, 3 },
+ { 0x2D, 6 },
+ { 0x2C, 6 },
+ { 0x1494, 13 },
+ { 0x1497, 13 },
+ { 0x1496, 13 },
+ { 0x1491, 13 },
+ { 0x1490, 13 },
+ { 0x1493, 13 },
+ { 0x1492, 13 },
+ { 0x293D, 14 },
+ { 0x293C, 14 },
+ { 0x293F, 14 },
+ { 0x0, 3 },
+ { 0x28, 6 },
+ { 0xA5, 8 },
+ { 0x148, 9 },
+ { 0xA7, 8 },
+ { 0x2E, 6 },
+ { 0x15, 5 },
+ { 0xA4E, 12 },
+ { 0x293E, 14 }
+ },
+ { /* AC bias group 4, table 6 */
+ { 0x4, 3 },
+ { 0x5, 4 },
+ { 0x3, 3 },
+ { 0x1, 3 },
+ { 0x4, 4 },
+ { 0x2F, 6 },
+ { 0x526, 11 },
+ { 0x1495, 13 },
+ { 0xA6, 8 },
+ { 0x7, 3 },
+ { 0x6, 3 },
+ { 0x2D, 6 },
+ { 0x2C, 6 },
+ { 0x1494, 13 },
+ { 0x1497, 13 },
+ { 0x1496, 13 },
+ { 0x1491, 13 },
+ { 0x1490, 13 },
+ { 0x1493, 13 },
+ { 0x1492, 13 },
+ { 0x293D, 14 },
+ { 0x293C, 14 },
+ { 0x293F, 14 },
+ { 0x0, 3 },
+ { 0x28, 6 },
+ { 0xA5, 8 },
+ { 0x148, 9 },
+ { 0xA7, 8 },
+ { 0x2E, 6 },
+ { 0x15, 5 },
+ { 0xA4E, 12 },
+ { 0x293E, 14 }
+ },
+ { /* AC bias group 4, table 7 */
+ { 0x4, 3 },
+ { 0x5, 4 },
+ { 0x3, 3 },
+ { 0x1, 3 },
+ { 0x4, 4 },
+ { 0x2F, 6 },
+ { 0x526, 11 },
+ { 0x1495, 13 },
+ { 0xA6, 8 },
+ { 0x7, 3 },
+ { 0x6, 3 },
+ { 0x2D, 6 },
+ { 0x2C, 6 },
+ { 0x1494, 13 },
+ { 0x1497, 13 },
+ { 0x1496, 13 },
+ { 0x1491, 13 },
+ { 0x1490, 13 },
+ { 0x1493, 13 },
+ { 0x1492, 13 },
+ { 0x293D, 14 },
+ { 0x293C, 14 },
+ { 0x293F, 14 },
+ { 0x0, 3 },
+ { 0x28, 6 },
+ { 0xA5, 8 },
+ { 0x148, 9 },
+ { 0xA7, 8 },
+ { 0x2E, 6 },
+ { 0x15, 5 },
+ { 0xA4E, 12 },
+ { 0x293E, 14 }
+ },
+ { /* AC bias group 4, table 8 */
+ { 0x3, 3 },
+ { 0x11, 5 },
+ { 0x20, 6 },
+ { 0x74, 7 },
+ { 0x10D, 9 },
+ { 0x863, 12 },
+ { 0x860, 12 },
+ { 0xA, 5 },
+ { 0x75, 7 },
+ { 0x1, 3 },
+ { 0x0, 3 },
+ { 0xB, 4 },
+ { 0xA, 4 },
+ { 0x18, 5 },
+ { 0x38, 6 },
+ { 0x42, 7 },
+ { 0x10F, 9 },
+ { 0x10E, 9 },
+ { 0x219, 10 },
+ { 0x10C3, 13 },
+ { 0x10C2, 13 },
+ { 0x10C5, 13 },
+ { 0x10C4, 13 },
+ { 0xF, 4 },
+ { 0x4, 4 },
+ { 0x19, 5 },
+ { 0xB, 5 },
+ { 0x39, 6 },
+ { 0x9, 4 },
+ { 0x1B, 5 },
+ { 0x1A, 5 },
+ { 0x3B, 6 }
+ },
+ { /* AC bias group 4, table 9 */
+ { 0x5, 3 },
+ { 0x1, 4 },
+ { 0x3E, 6 },
+ { 0x1, 5 },
+ { 0xE2, 8 },
+ { 0x1C6F, 13 },
+ { 0x38D9, 14 },
+ { 0x39, 6 },
+ { 0x1F, 6 },
+ { 0x2, 3 },
+ { 0x1, 3 },
+ { 0x9, 4 },
+ { 0x8, 4 },
+ { 0x0, 5 },
+ { 0x70, 7 },
+ { 0x1C7, 9 },
+ { 0x38C, 10 },
+ { 0x71A, 11 },
+ { 0x38D8, 14 },
+ { 0x38DB, 14 },
+ { 0x38DA, 14 },
+ { 0x38DD, 14 },
+ { 0x38DC, 14 },
+ { 0xD, 4 },
+ { 0x1D, 5 },
+ { 0xE, 5 },
+ { 0x3F, 6 },
+ { 0x3C, 6 },
+ { 0xC, 4 },
+ { 0x6, 4 },
+ { 0x3D, 6 },
+ { 0x1E, 6 }
+ },
+ { /* AC bias group 4, table 10 */
+ { 0x6, 3 },
+ { 0xB, 4 },
+ { 0x11, 5 },
+ { 0x1E, 5 },
+ { 0x74, 7 },
+ { 0x3AA, 10 },
+ { 0x1D5C, 13 },
+ { 0x1, 6 },
+ { 0x21, 6 },
+ { 0x1, 3 },
+ { 0x2, 3 },
+ { 0x7, 4 },
+ { 0x6, 4 },
+ { 0x3E, 6 },
+ { 0xEB, 8 },
+ { 0x1D4, 9 },
+ { 0xEAF, 12 },
+ { 0x3ABB, 14 },
+ { 0x3ABA, 14 },
+ { 0x1D59, 13 },
+ { 0x1D58, 13 },
+ { 0x1D5B, 13 },
+ { 0x1D5A, 13 },
+ { 0xA, 4 },
+ { 0x1C, 5 },
+ { 0x1, 5 },
+ { 0x3F, 6 },
+ { 0x3B, 6 },
+ { 0x1, 4 },
+ { 0x9, 4 },
+ { 0x20, 6 },
+ { 0x0, 6 }
+ },
+ { /* AC bias group 4, table 11 */
+ { 0x4, 3 },
+ { 0xA, 4 },
+ { 0x17, 5 },
+ { 0x4, 4 },
+ { 0x16, 6 },
+ { 0x16A, 9 },
+ { 0x16B1, 13 },
+ { 0x17, 7 },
+ { 0x5B, 7 },
+ { 0x6, 3 },
+ { 0x7, 3 },
+ { 0x1, 4 },
+ { 0x0, 4 },
+ { 0xA, 6 },
+ { 0x2D7, 10 },
+ { 0xB5A, 12 },
+ { 0x16B0, 13 },
+ { 0x16B3, 13 },
+ { 0x16B2, 13 },
+ { 0x2D6D, 14 },
+ { 0x2D6C, 14 },
+ { 0x2D6F, 14 },
+ { 0x2D6E, 14 },
+ { 0x6, 4 },
+ { 0xA, 5 },
+ { 0x4, 5 },
+ { 0x2C, 6 },
+ { 0x17, 6 },
+ { 0x3, 4 },
+ { 0x7, 4 },
+ { 0x16, 7 },
+ { 0xB4, 8 }
+ },
+ { /* AC bias group 4, table 12 */
+ { 0x5, 3 },
+ { 0xD, 4 },
+ { 0x5, 4 },
+ { 0x9, 4 },
+ { 0x33, 6 },
+ { 0x193, 9 },
+ { 0x192C, 13 },
+ { 0x61, 8 },
+ { 0x31, 7 },
+ { 0x0, 2 },
+ { 0x7, 3 },
+ { 0x10, 5 },
+ { 0x11, 5 },
+ { 0xC8, 8 },
+ { 0x192F, 13 },
+ { 0x325B, 14 },
+ { 0x325A, 14 },
+ { 0x1929, 13 },
+ { 0x1928, 13 },
+ { 0x192B, 13 },
+ { 0x192A, 13 },
+ { 0x325D, 14 },
+ { 0x325C, 14 },
+ { 0x18, 5 },
+ { 0x1A, 6 },
+ { 0x1B, 6 },
+ { 0x65, 7 },
+ { 0x19, 6 },
+ { 0x4, 4 },
+ { 0x7, 4 },
+ { 0x60, 8 },
+ { 0x324, 10 }
+ },
+ { /* AC bias group 4, table 13 */
+ { 0x6, 3 },
+ { 0x0, 3 },
+ { 0x2, 4 },
+ { 0xF, 4 },
+ { 0x39, 6 },
+ { 0x1D9, 9 },
+ { 0x1D82, 13 },
+ { 0x761, 11 },
+ { 0x3BE, 10 },
+ { 0x1, 2 },
+ { 0x2, 2 },
+ { 0xF, 6 },
+ { 0xE, 6 },
+ { 0x762, 11 },
+ { 0x3B07, 14 },
+ { 0x3B06, 14 },
+ { 0x3B1D, 14 },
+ { 0x3B1C, 14 },
+ { 0x3B1F, 14 },
+ { 0x3B1E, 14 },
+ { 0x3B19, 14 },
+ { 0x3B18, 14 },
+ { 0x3B1B, 14 },
+ { 0x38, 6 },
+ { 0x1DE, 9 },
+ { 0xED, 8 },
+ { 0x3BF, 10 },
+ { 0xEE, 8 },
+ { 0x3A, 6 },
+ { 0x6, 5 },
+ { 0xEC0, 12 },
+ { 0x3B1A, 14 }
+ },
+ { /* AC bias group 4, table 14 */
+ { 0x0, 2 },
+ { 0x2, 3 },
+ { 0xF, 5 },
+ { 0x6, 4 },
+ { 0x1C, 6 },
+ { 0x1D0, 10 },
+ { 0xE8C, 13 },
+ { 0x1D1B, 14 },
+ { 0x1D1A, 14 },
+ { 0x3, 2 },
+ { 0x2, 2 },
+ { 0xEA, 9 },
+ { 0xE9, 9 },
+ { 0xE89, 13 },
+ { 0xE88, 13 },
+ { 0xE8B, 13 },
+ { 0xE8A, 13 },
+ { 0x1D65, 14 },
+ { 0x1D64, 14 },
+ { 0x1D67, 14 },
+ { 0x1D66, 14 },
+ { 0x1D61, 14 },
+ { 0x1D60, 14 },
+ { 0x3AD, 11 },
+ { 0x1D63, 14 },
+ { 0x1D62, 14 },
+ { 0x1D1D, 14 },
+ { 0x1D1C, 14 },
+ { 0x3B, 7 },
+ { 0x1D7, 10 },
+ { 0x1D1F, 14 },
+ { 0x1D1E, 14 }
+ },
+ { /* AC bias group 4, table 15 */
+ { 0x2, 2 },
+ { 0xF, 4 },
+ { 0x1C, 5 },
+ { 0xC, 4 },
+ { 0x3B, 6 },
+ { 0x1AC, 9 },
+ { 0x1AD8, 13 },
+ { 0x35B3, 14 },
+ { 0x35B2, 14 },
+ { 0x1, 2 },
+ { 0x0, 2 },
+ { 0x69, 7 },
+ { 0x68, 7 },
+ { 0x35BD, 14 },
+ { 0x35BC, 14 },
+ { 0x35BF, 14 },
+ { 0x35BE, 14 },
+ { 0x35B9, 14 },
+ { 0x35B8, 14 },
+ { 0x35BB, 14 },
+ { 0x35BA, 14 },
+ { 0x35B5, 14 },
+ { 0x35B4, 14 },
+ { 0x1A9, 9 },
+ { 0x1A8, 9 },
+ { 0x35A, 10 },
+ { 0xD7, 8 },
+ { 0xD5, 8 },
+ { 0x3A, 6 },
+ { 0x1B, 5 },
+ { 0x35B7, 14 },
+ { 0x35B6, 14 }
+ }
+};
+
+#endif /* AVCODEC_VP3DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp3dsp.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp3dsp.c
new file mode 100644
index 000000000..83d46e97f
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp3dsp.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2004 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file vp3dsp.c
+ * Standard C DSP-oriented functions cribbed from the original VP3
+ * source code.
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+
+#define IdctAdjustBeforeShift 8
+#define xC1S7 64277
+#define xC2S6 60547
+#define xC3S5 54491
+#define xC4S4 46341
+#define xC5S3 36410
+#define xC6S2 25080
+#define xC7S1 12785
+
+#define M(a,b) (((a) * (b))>>16)
+
+static av_always_inline void idct(uint8_t *dst, int stride, int16_t *input, int type)
+{
+ int16_t *ip = input;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ int A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H;
+ int Ed, Gd, Add, Bdd, Fd, Hd;
+
+ int i;
+
+ /* Inverse DCT on the rows now */
+ for (i = 0; i < 8; i++) {
+ /* Check for non-zero values */
+ if ( ip[0] | ip[1] | ip[2] | ip[3] | ip[4] | ip[5] | ip[6] | ip[7] ) {
+ A = M(xC1S7, ip[1]) + M(xC7S1, ip[7]);
+ B = M(xC7S1, ip[1]) - M(xC1S7, ip[7]);
+ C = M(xC3S5, ip[3]) + M(xC5S3, ip[5]);
+ D = M(xC3S5, ip[5]) - M(xC5S3, ip[3]);
+
+ Ad = M(xC4S4, (A - C));
+ Bd = M(xC4S4, (B - D));
+
+ Cd = A + C;
+ Dd = B + D;
+
+ E = M(xC4S4, (ip[0] + ip[4]));
+ F = M(xC4S4, (ip[0] - ip[4]));
+
+ G = M(xC2S6, ip[2]) + M(xC6S2, ip[6]);
+ H = M(xC6S2, ip[2]) - M(xC2S6, ip[6]);
+
+ Ed = E - G;
+ Gd = E + G;
+
+ Add = F + Ad;
+ Bdd = Bd - H;
+
+ Fd = F - Ad;
+ Hd = Bd + H;
+
+ /* Final sequence of operations over-write original inputs. */
+ ip[0] = Gd + Cd ;
+ ip[7] = Gd - Cd ;
+
+ ip[1] = Add + Hd;
+ ip[2] = Add - Hd;
+
+ ip[3] = Ed + Dd ;
+ ip[4] = Ed - Dd ;
+
+ ip[5] = Fd + Bdd;
+ ip[6] = Fd - Bdd;
+ }
+
+ ip += 8; /* next row */
+ }
+
+ ip = input;
+
+ for ( i = 0; i < 8; i++) {
+ /* Check for non-zero values (bitwise or faster than ||) */
+ if ( ip[1 * 8] | ip[2 * 8] | ip[3 * 8] |
+ ip[4 * 8] | ip[5 * 8] | ip[6 * 8] | ip[7 * 8] ) {
+
+ A = M(xC1S7, ip[1*8]) + M(xC7S1, ip[7*8]);
+ B = M(xC7S1, ip[1*8]) - M(xC1S7, ip[7*8]);
+ C = M(xC3S5, ip[3*8]) + M(xC5S3, ip[5*8]);
+ D = M(xC3S5, ip[5*8]) - M(xC5S3, ip[3*8]);
+
+ Ad = M(xC4S4, (A - C));
+ Bd = M(xC4S4, (B - D));
+
+ Cd = A + C;
+ Dd = B + D;
+
+ E = M(xC4S4, (ip[0*8] + ip[4*8])) + 8;
+ F = M(xC4S4, (ip[0*8] - ip[4*8])) + 8;
+
+ if(type==1){ //HACK
+ E += 16*128;
+ F += 16*128;
+ }
+
+ G = M(xC2S6, ip[2*8]) + M(xC6S2, ip[6*8]);
+ H = M(xC6S2, ip[2*8]) - M(xC2S6, ip[6*8]);
+
+ Ed = E - G;
+ Gd = E + G;
+
+ Add = F + Ad;
+ Bdd = Bd - H;
+
+ Fd = F - Ad;
+ Hd = Bd + H;
+
+ /* Final sequence of operations over-write original inputs. */
+ if(type==0){
+ ip[0*8] = (Gd + Cd ) >> 4;
+ ip[7*8] = (Gd - Cd ) >> 4;
+
+ ip[1*8] = (Add + Hd ) >> 4;
+ ip[2*8] = (Add - Hd ) >> 4;
+
+ ip[3*8] = (Ed + Dd ) >> 4;
+ ip[4*8] = (Ed - Dd ) >> 4;
+
+ ip[5*8] = (Fd + Bdd ) >> 4;
+ ip[6*8] = (Fd - Bdd ) >> 4;
+ }else if(type==1){
+ dst[0*stride] = cm[(Gd + Cd ) >> 4];
+ dst[7*stride] = cm[(Gd - Cd ) >> 4];
+
+ dst[1*stride] = cm[(Add + Hd ) >> 4];
+ dst[2*stride] = cm[(Add - Hd ) >> 4];
+
+ dst[3*stride] = cm[(Ed + Dd ) >> 4];
+ dst[4*stride] = cm[(Ed - Dd ) >> 4];
+
+ dst[5*stride] = cm[(Fd + Bdd ) >> 4];
+ dst[6*stride] = cm[(Fd - Bdd ) >> 4];
+ }else{
+ dst[0*stride] = cm[dst[0*stride] + ((Gd + Cd ) >> 4)];
+ dst[7*stride] = cm[dst[7*stride] + ((Gd - Cd ) >> 4)];
+
+ dst[1*stride] = cm[dst[1*stride] + ((Add + Hd ) >> 4)];
+ dst[2*stride] = cm[dst[2*stride] + ((Add - Hd ) >> 4)];
+
+ dst[3*stride] = cm[dst[3*stride] + ((Ed + Dd ) >> 4)];
+ dst[4*stride] = cm[dst[4*stride] + ((Ed - Dd ) >> 4)];
+
+ dst[5*stride] = cm[dst[5*stride] + ((Fd + Bdd ) >> 4)];
+ dst[6*stride] = cm[dst[6*stride] + ((Fd - Bdd ) >> 4)];
+ }
+
+ } else {
+ if(type==0){
+ ip[0*8] =
+ ip[1*8] =
+ ip[2*8] =
+ ip[3*8] =
+ ip[4*8] =
+ ip[5*8] =
+ ip[6*8] =
+ ip[7*8] = ((xC4S4 * ip[0*8] + (IdctAdjustBeforeShift<<16))>>20);
+ }else if(type==1){
+ dst[0*stride]=
+ dst[1*stride]=
+ dst[2*stride]=
+ dst[3*stride]=
+ dst[4*stride]=
+ dst[5*stride]=
+ dst[6*stride]=
+ dst[7*stride]= cm[128 + ((xC4S4 * ip[0*8] + (IdctAdjustBeforeShift<<16))>>20)];
+ }else{
+ if(ip[0*8]){
+ int v= ((xC4S4 * ip[0*8] + (IdctAdjustBeforeShift<<16))>>20);
+ dst[0*stride] = cm[dst[0*stride] + v];
+ dst[1*stride] = cm[dst[1*stride] + v];
+ dst[2*stride] = cm[dst[2*stride] + v];
+ dst[3*stride] = cm[dst[3*stride] + v];
+ dst[4*stride] = cm[dst[4*stride] + v];
+ dst[5*stride] = cm[dst[5*stride] + v];
+ dst[6*stride] = cm[dst[6*stride] + v];
+ dst[7*stride] = cm[dst[7*stride] + v];
+ }
+ }
+ }
+
+ ip++; /* next column */
+ dst++;
+ }
+}
+
+void ff_vp3_idct_c(DCTELEM *block/* align 16*/){
+ idct(NULL, 0, block, 0);
+}
+
+void ff_vp3_idct_put_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/){
+ idct(dest, line_size, block, 1);
+}
+
+void ff_vp3_idct_add_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/){
+ idct(dest, line_size, block, 2);
+}
+
+void ff_vp3_v_loop_filter_c(uint8_t *first_pixel, int stride, int *bounding_values)
+{
+ unsigned char *end;
+ int filter_value;
+ const int nstride= -stride;
+
+ for (end= first_pixel + 8; first_pixel < end; first_pixel++) {
+ filter_value =
+ (first_pixel[2 * nstride] - first_pixel[ stride])
+ +3*(first_pixel[0 ] - first_pixel[nstride]);
+ filter_value = bounding_values[(filter_value + 4) >> 3];
+ first_pixel[nstride] = av_clip_uint8(first_pixel[nstride] + filter_value);
+ first_pixel[0] = av_clip_uint8(first_pixel[0] - filter_value);
+ }
+}
+
+void ff_vp3_h_loop_filter_c(uint8_t *first_pixel, int stride, int *bounding_values)
+{
+ unsigned char *end;
+ int filter_value;
+
+ for (end= first_pixel + 8*stride; first_pixel != end; first_pixel += stride) {
+ filter_value =
+ (first_pixel[-2] - first_pixel[ 1])
+ +3*(first_pixel[ 0] - first_pixel[-1]);
+ filter_value = bounding_values[(filter_value + 4) >> 3];
+ first_pixel[-1] = av_clip_uint8(first_pixel[-1] + filter_value);
+ first_pixel[ 0] = av_clip_uint8(first_pixel[ 0] - filter_value);
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp5.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp5.c
new file mode 100644
index 000000000..b8b7f9718
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp5.c
@@ -0,0 +1,302 @@
+/**
+ * @file libavcodec/vp5.c
+ * VP5 compatible video decoder
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "get_bits.h"
+
+#include "vp56.h"
+#include "vp56data.h"
+#include "vp5data.h"
+
+
+static int vp5_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
+ int *golden_frame)
+{
+ VP56RangeCoder *c = &s->c;
+ int rows, cols;
+
+ vp56_init_range_decoder(&s->c, buf, buf_size);
+ s->framep[VP56_FRAME_CURRENT]->key_frame = !vp56_rac_get(c);
+ vp56_rac_get(c);
+ vp56_init_dequant(s, vp56_rac_gets(c, 6));
+ if (s->framep[VP56_FRAME_CURRENT]->key_frame)
+ {
+ vp56_rac_gets(c, 8);
+ if(vp56_rac_gets(c, 5) > 5)
+ return 0;
+ vp56_rac_gets(c, 2);
+ if (vp56_rac_get(c)) {
+ av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n");
+ return 0;
+ }
+ rows = vp56_rac_gets(c, 8); /* number of stored macroblock rows */
+ cols = vp56_rac_gets(c, 8); /* number of stored macroblock cols */
+ vp56_rac_gets(c, 8); /* number of displayed macroblock rows */
+ vp56_rac_gets(c, 8); /* number of displayed macroblock cols */
+ vp56_rac_gets(c, 2);
+ if (!s->macroblocks || /* first frame */
+ 16*cols != s->avctx->coded_width ||
+ 16*rows != s->avctx->coded_height) {
+ avcodec_set_dimensions(s->avctx, 16*cols, 16*rows);
+ return 2;
+ }
+ } else if (!s->macroblocks)
+ return 0;
+ return 1;
+}
+
+/* Gives very similar result than the vp6 version except in a few cases */
+static int vp5_adjust(int v, int t)
+{
+ int s2, s1 = v >> 31;
+ v ^= s1;
+ v -= s1;
+ v *= v < 2*t;
+ v -= t;
+ s2 = v >> 31;
+ v ^= s2;
+ v -= s2;
+ v = t - v;
+ v += s1;
+ v ^= s1;
+ return v;
+}
+
+static void vp5_parse_vector_adjustment(VP56Context *s, VP56mv *vect)
+{
+ VP56RangeCoder *c = &s->c;
+ VP56Model *model = s->modelp;
+ int comp, di;
+
+ for (comp=0; comp<2; comp++) {
+ int delta = 0;
+ if (vp56_rac_get_prob(c, model->vector_dct[comp])) {
+ int sign = vp56_rac_get_prob(c, model->vector_sig[comp]);
+ di = vp56_rac_get_prob(c, model->vector_pdi[comp][0]);
+ di |= vp56_rac_get_prob(c, model->vector_pdi[comp][1]) << 1;
+ delta = vp56_rac_get_tree(c, vp56_pva_tree,
+ model->vector_pdv[comp]);
+ delta = di | (delta << 2);
+ delta = (delta ^ -sign) + sign;
+ }
+ if (!comp)
+ vect->x = delta;
+ else
+ vect->y = delta;
+ }
+}
+
+static void vp5_parse_vector_models(VP56Context *s)
+{
+ VP56RangeCoder *c = &s->c;
+ VP56Model *model = s->modelp;
+ int comp, node;
+
+ for (comp=0; comp<2; comp++) {
+ if (vp56_rac_get_prob(c, vp5_vmc_pct[comp][0]))
+ model->vector_dct[comp] = vp56_rac_gets_nn(c, 7);
+ if (vp56_rac_get_prob(c, vp5_vmc_pct[comp][1]))
+ model->vector_sig[comp] = vp56_rac_gets_nn(c, 7);
+ if (vp56_rac_get_prob(c, vp5_vmc_pct[comp][2]))
+ model->vector_pdi[comp][0] = vp56_rac_gets_nn(c, 7);
+ if (vp56_rac_get_prob(c, vp5_vmc_pct[comp][3]))
+ model->vector_pdi[comp][1] = vp56_rac_gets_nn(c, 7);
+ }
+
+ for (comp=0; comp<2; comp++)
+ for (node=0; node<7; node++)
+ if (vp56_rac_get_prob(c, vp5_vmc_pct[comp][4 + node]))
+ model->vector_pdv[comp][node] = vp56_rac_gets_nn(c, 7);
+}
+
+static void vp5_parse_coeff_models(VP56Context *s)
+{
+ VP56RangeCoder *c = &s->c;
+ VP56Model *model = s->modelp;
+ uint8_t def_prob[11];
+ int node, cg, ctx;
+ int ct; /* code type */
+ int pt; /* plane type (0 for Y, 1 for U or V) */
+
+ memset(def_prob, 0x80, sizeof(def_prob));
+
+ for (pt=0; pt<2; pt++)
+ for (node=0; node<11; node++)
+ if (vp56_rac_get_prob(c, vp5_dccv_pct[pt][node])) {
+ def_prob[node] = vp56_rac_gets_nn(c, 7);
+ model->coeff_dccv[pt][node] = def_prob[node];
+ } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
+ model->coeff_dccv[pt][node] = def_prob[node];
+ }
+
+ for (ct=0; ct<3; ct++)
+ for (pt=0; pt<2; pt++)
+ for (cg=0; cg<6; cg++)
+ for (node=0; node<11; node++)
+ if (vp56_rac_get_prob(c, vp5_ract_pct[ct][pt][cg][node])) {
+ def_prob[node] = vp56_rac_gets_nn(c, 7);
+ model->coeff_ract[pt][ct][cg][node] = def_prob[node];
+ } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
+ model->coeff_ract[pt][ct][cg][node] = def_prob[node];
+ }
+
+ /* coeff_dcct is a linear combination of coeff_dccv */
+ for (pt=0; pt<2; pt++)
+ for (ctx=0; ctx<36; ctx++)
+ for (node=0; node<5; node++)
+ model->coeff_dcct[pt][ctx][node] = av_clip(((model->coeff_dccv[pt][node] * vp5_dccv_lc[node][ctx][0] + 128) >> 8) + vp5_dccv_lc[node][ctx][1], 1, 254);
+
+ /* coeff_acct is a linear combination of coeff_ract */
+ for (ct=0; ct<3; ct++)
+ for (pt=0; pt<2; pt++)
+ for (cg=0; cg<3; cg++)
+ for (ctx=0; ctx<6; ctx++)
+ for (node=0; node<5; node++)
+ model->coeff_acct[pt][ct][cg][ctx][node] = av_clip(((model->coeff_ract[pt][ct][cg][node] * vp5_ract_lc[ct][cg][node][ctx][0] + 128) >> 8) + vp5_ract_lc[ct][cg][node][ctx][1], 1, 254);
+}
+
+static void vp5_parse_coeff(VP56Context *s)
+{
+ VP56RangeCoder *c = &s->c;
+ VP56Model *model = s->modelp;
+ uint8_t *permute = s->scantable.permutated;
+ uint8_t *model1, *model2;
+ int coeff, sign, coeff_idx;
+ int b, i, cg, idx, ctx, ctx_last;
+ int pt = 0; /* plane type (0 for Y, 1 for U or V) */
+
+ for (b=0; b<6; b++) {
+ int ct = 1; /* code type */
+
+ if (b > 3) pt = 1;
+
+ ctx = 6*s->coeff_ctx[vp56_b6to4[b]][0]
+ + s->above_blocks[s->above_block_idx[b]].not_null_dc;
+ model1 = model->coeff_dccv[pt];
+ model2 = model->coeff_dcct[pt][ctx];
+
+ for (coeff_idx=0; coeff_idx<64; ) {
+ if (vp56_rac_get_prob(c, model2[0])) {
+ if (vp56_rac_get_prob(c, model2[2])) {
+ if (vp56_rac_get_prob(c, model2[3])) {
+ s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 4;
+ idx = vp56_rac_get_tree(c, vp56_pc_tree, model1);
+ sign = vp56_rac_get(c);
+ coeff = vp56_coeff_bias[idx+5];
+ for (i=vp56_coeff_bit_length[idx]; i>=0; i--)
+ coeff += vp56_rac_get_prob(c, vp56_coeff_parse_table[idx][i]) << i;
+ } else {
+ if (vp56_rac_get_prob(c, model2[4])) {
+ coeff = 3 + vp56_rac_get_prob(c, model1[5]);
+ s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 3;
+ } else {
+ coeff = 2;
+ s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 2;
+ }
+ sign = vp56_rac_get(c);
+ }
+ ct = 2;
+ } else {
+ ct = 1;
+ s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 1;
+ sign = vp56_rac_get(c);
+ coeff = 1;
+ }
+ coeff = (coeff ^ -sign) + sign;
+ if (coeff_idx)
+ coeff *= s->dequant_ac;
+ s->block_coeff[b][permute[coeff_idx]] = coeff;
+ } else {
+ if (ct && !vp56_rac_get_prob(c, model2[1]))
+ break;
+ ct = 0;
+ s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 0;
+ }
+
+ cg = vp5_coeff_groups[++coeff_idx];
+ ctx = s->coeff_ctx[vp56_b6to4[b]][coeff_idx];
+ model1 = model->coeff_ract[pt][ct][cg];
+ model2 = cg > 2 ? model1 : model->coeff_acct[pt][ct][cg][ctx];
+ }
+
+ ctx_last = FFMIN(s->coeff_ctx_last[vp56_b6to4[b]], 24);
+ s->coeff_ctx_last[vp56_b6to4[b]] = coeff_idx;
+ if (coeff_idx < ctx_last)
+ for (i=coeff_idx; i<=ctx_last; i++)
+ s->coeff_ctx[vp56_b6to4[b]][i] = 5;
+ s->above_blocks[s->above_block_idx[b]].not_null_dc = s->coeff_ctx[vp56_b6to4[b]][0];
+ }
+}
+
+static void vp5_default_models_init(VP56Context *s)
+{
+ VP56Model *model = s->modelp;
+ int i;
+
+ for (i=0; i<2; i++) {
+ model->vector_sig[i] = 0x80;
+ model->vector_dct[i] = 0x80;
+ model->vector_pdi[i][0] = 0x55;
+ model->vector_pdi[i][1] = 0x80;
+ }
+ memcpy(model->mb_types_stats, vp56_def_mb_types_stats, sizeof(model->mb_types_stats));
+ memset(model->vector_pdv, 0x80, sizeof(model->vector_pdv));
+}
+
+static av_cold int vp5_decode_init(AVCodecContext *avctx)
+{
+ VP56Context *s = avctx->priv_data;
+
+ vp56_init(avctx, 1, 0);
+ s->vp56_coord_div = vp5_coord_div;
+ s->parse_vector_adjustment = vp5_parse_vector_adjustment;
+ s->adjust = vp5_adjust;
+ s->parse_coeff = vp5_parse_coeff;
+ s->default_models_init = vp5_default_models_init;
+ s->parse_vector_models = vp5_parse_vector_models;
+ s->parse_coeff_models = vp5_parse_coeff_models;
+ s->parse_header = vp5_parse_header;
+
+ return 0;
+}
+
+AVCodec vp5_decoder = {
+ "vp5",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_VP5,
+ sizeof(VP56Context),
+ vp5_decode_init,
+ NULL,
+ vp56_free,
+ vp56_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("On2 VP5"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56.c
new file mode 100644
index 000000000..a4baf23d2
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56.c
@@ -0,0 +1,699 @@
+/**
+ * @file libavcodec/vp56.c
+ * VP5 and VP6 compatible video decoder (common features)
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "bytestream.h"
+
+#include "vp56.h"
+#include "vp56data.h"
+
+
+void vp56_init_dequant(VP56Context *s, int quantizer)
+{
+ s->quantizer = quantizer;
+ s->dequant_dc = vp56_dc_dequant[quantizer] << 2;
+ s->dequant_ac = vp56_ac_dequant[quantizer] << 2;
+}
+
+static int vp56_get_vectors_predictors(VP56Context *s, int row, int col,
+ VP56Frame ref_frame)
+{
+ int nb_pred = 0;
+ VP56mv vect[2] = {{0,0}, {0,0}};
+ int pos, offset;
+ VP56mv mvp;
+
+ for (pos=0; pos<12; pos++) {
+ mvp.x = col + vp56_candidate_predictor_pos[pos][0];
+ mvp.y = row + vp56_candidate_predictor_pos[pos][1];
+ if (mvp.x < 0 || mvp.x >= s->mb_width ||
+ mvp.y < 0 || mvp.y >= s->mb_height)
+ continue;
+ offset = mvp.x + s->mb_width*mvp.y;
+
+ if (vp56_reference_frame[s->macroblocks[offset].type] != ref_frame)
+ continue;
+ if ((s->macroblocks[offset].mv.x == vect[0].x &&
+ s->macroblocks[offset].mv.y == vect[0].y) ||
+ (s->macroblocks[offset].mv.x == 0 &&
+ s->macroblocks[offset].mv.y == 0))
+ continue;
+
+ vect[nb_pred++] = s->macroblocks[offset].mv;
+ if (nb_pred > 1) {
+ nb_pred = -1;
+ break;
+ }
+ s->vector_candidate_pos = pos;
+ }
+
+ s->vector_candidate[0] = vect[0];
+ s->vector_candidate[1] = vect[1];
+
+ return nb_pred+1;
+}
+
+static void vp56_parse_mb_type_models(VP56Context *s)
+{
+ VP56RangeCoder *c = &s->c;
+ VP56Model *model = s->modelp;
+ int i, ctx, type;
+
+ for (ctx=0; ctx<3; ctx++) {
+ if (vp56_rac_get_prob(c, 174)) {
+ int idx = vp56_rac_gets(c, 4);
+ memcpy(model->mb_types_stats[ctx],
+ vp56_pre_def_mb_type_stats[idx][ctx],
+ sizeof(model->mb_types_stats[ctx]));
+ }
+ if (vp56_rac_get_prob(c, 254)) {
+ for (type=0; type<10; type++) {
+ for(i=0; i<2; i++) {
+ if (vp56_rac_get_prob(c, 205)) {
+ int delta, sign = vp56_rac_get(c);
+
+ delta = vp56_rac_get_tree(c, vp56_pmbtm_tree,
+ vp56_mb_type_model_model);
+ if (!delta)
+ delta = 4 * vp56_rac_gets(c, 7);
+ model->mb_types_stats[ctx][type][i] += (delta ^ -sign) + sign;
+ }
+ }
+ }
+ }
+ }
+
+ /* compute MB type probability tables based on previous MB type */
+ for (ctx=0; ctx<3; ctx++) {
+ int p[10];
+
+ for (type=0; type<10; type++)
+ p[type] = 100 * model->mb_types_stats[ctx][type][1];
+
+ for (type=0; type<10; type++) {
+ int p02, p34, p0234, p17, p56, p89, p5689, p156789;
+
+ /* conservative MB type probability */
+ model->mb_type[ctx][type][0] = 255 - (255 * model->mb_types_stats[ctx][type][0]) / (1 + model->mb_types_stats[ctx][type][0] + model->mb_types_stats[ctx][type][1]);
+
+ p[type] = 0; /* same MB type => weight is null */
+
+ /* binary tree parsing probabilities */
+ p02 = p[0] + p[2];
+ p34 = p[3] + p[4];
+ p0234 = p02 + p34;
+ p17 = p[1] + p[7];
+ p56 = p[5] + p[6];
+ p89 = p[8] + p[9];
+ p5689 = p56 + p89;
+ p156789 = p17 + p5689;
+
+ model->mb_type[ctx][type][1] = 1 + 255 * p0234/(1+p0234+p156789);
+ model->mb_type[ctx][type][2] = 1 + 255 * p02 / (1+p0234);
+ model->mb_type[ctx][type][3] = 1 + 255 * p17 / (1+p156789);
+ model->mb_type[ctx][type][4] = 1 + 255 * p[0] / (1+p02);
+ model->mb_type[ctx][type][5] = 1 + 255 * p[3] / (1+p34);
+ model->mb_type[ctx][type][6] = 1 + 255 * p[1] / (1+p17);
+ model->mb_type[ctx][type][7] = 1 + 255 * p56 / (1+p5689);
+ model->mb_type[ctx][type][8] = 1 + 255 * p[5] / (1+p56);
+ model->mb_type[ctx][type][9] = 1 + 255 * p[8] / (1+p89);
+
+ /* restore initial value */
+ p[type] = 100 * model->mb_types_stats[ctx][type][1];
+ }
+ }
+}
+
+static VP56mb vp56_parse_mb_type(VP56Context *s,
+ VP56mb prev_type, int ctx)
+{
+ uint8_t *mb_type_model = s->modelp->mb_type[ctx][prev_type];
+ VP56RangeCoder *c = &s->c;
+
+ if (vp56_rac_get_prob(c, mb_type_model[0]))
+ return prev_type;
+ else
+ return vp56_rac_get_tree(c, vp56_pmbt_tree, mb_type_model);
+}
+
+static void vp56_decode_4mv(VP56Context *s, int row, int col)
+{
+ VP56mv mv = {0,0};
+ int type[4];
+ int b;
+
+ /* parse each block type */
+ for (b=0; b<4; b++) {
+ type[b] = vp56_rac_gets(&s->c, 2);
+ if (type[b])
+ type[b]++; /* only returns 0, 2, 3 or 4 (all INTER_PF) */
+ }
+
+ /* get vectors */
+ for (b=0; b<4; b++) {
+ switch (type[b]) {
+ case VP56_MB_INTER_NOVEC_PF:
+ s->mv[b].x=0;s->mv[b].y=0;
+ break;
+ case VP56_MB_INTER_DELTA_PF:
+ s->parse_vector_adjustment(s, &s->mv[b]);
+ break;
+ case VP56_MB_INTER_V1_PF:
+ s->mv[b] = s->vector_candidate[0];
+ break;
+ case VP56_MB_INTER_V2_PF:
+ s->mv[b] = s->vector_candidate[1];
+ break;
+ }
+ mv.x += s->mv[b].x;
+ mv.y += s->mv[b].y;
+ }
+
+ /* this is the one selected for the whole MB for prediction */
+ s->macroblocks[row * s->mb_width + col].mv = s->mv[3];
+
+ /* chroma vectors are average luma vectors */
+ if (s->avctx->codec->id == CODEC_ID_VP5) {
+ s->mv[4].x = s->mv[5].x = RSHIFT(mv.x,2);
+ s->mv[4].y = s->mv[5].y = RSHIFT(mv.y,2);
+ } else {
+ s->mv[4].x = s->mv[5].x = mv.x/4; s->mv[4].y = s->mv[5].y = mv.y/4;
+ }
+}
+
+static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
+{
+ VP56mv *mv, vect = {0,0};
+ int ctx, b;
+
+ ctx = vp56_get_vectors_predictors(s, row, col, VP56_FRAME_PREVIOUS);
+ s->mb_type = vp56_parse_mb_type(s, s->mb_type, ctx);
+ s->macroblocks[row * s->mb_width + col].type = s->mb_type;
+
+ switch (s->mb_type) {
+ case VP56_MB_INTER_V1_PF:
+ mv = &s->vector_candidate[0];
+ break;
+
+ case VP56_MB_INTER_V2_PF:
+ mv = &s->vector_candidate[1];
+ break;
+
+ case VP56_MB_INTER_V1_GF:
+ vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
+ mv = &s->vector_candidate[0];
+ break;
+
+ case VP56_MB_INTER_V2_GF:
+ vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
+ mv = &s->vector_candidate[1];
+ break;
+
+ case VP56_MB_INTER_DELTA_PF:
+ s->parse_vector_adjustment(s, &vect);
+ mv = &vect;
+ break;
+
+ case VP56_MB_INTER_DELTA_GF:
+ vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
+ s->parse_vector_adjustment(s, &vect);
+ mv = &vect;
+ break;
+
+ case VP56_MB_INTER_4V:
+ vp56_decode_4mv(s, row, col);
+ return s->mb_type;
+
+ default:
+ mv = &vect;
+ break;
+ }
+
+ s->macroblocks[row*s->mb_width + col].mv = *mv;
+
+ /* same vector for all blocks */
+ for (b=0; b<6; b++)
+ s->mv[b] = *mv;
+
+ return s->mb_type;
+}
+
+static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
+{
+ int idx = s->scantable.permutated[0];
+ int b;
+
+ for (b=0; b<6; b++) {
+ VP56RefDc *ab = &s->above_blocks[s->above_block_idx[b]];
+ VP56RefDc *lb = &s->left_block[vp56_b6to4[b]];
+ int count = 0;
+ int dc = 0;
+ int i;
+
+ if (ref_frame == lb->ref_frame) {
+ dc += lb->dc_coeff;
+ count++;
+ }
+ if (ref_frame == ab->ref_frame) {
+ dc += ab->dc_coeff;
+ count++;
+ }
+ if (s->avctx->codec->id == CODEC_ID_VP5)
+ for (i=0; i<2; i++)
+ if (count < 2 && ref_frame == ab[-1+2*i].ref_frame) {
+ dc += ab[-1+2*i].dc_coeff;
+ count++;
+ }
+ if (count == 0)
+ dc = s->prev_dc[vp56_b2p[b]][ref_frame];
+ else if (count == 2)
+ dc /= 2;
+
+ s->block_coeff[b][idx] += dc;
+ s->prev_dc[vp56_b2p[b]][ref_frame] = s->block_coeff[b][idx];
+ ab->dc_coeff = s->block_coeff[b][idx];
+ ab->ref_frame = ref_frame;
+ lb->dc_coeff = s->block_coeff[b][idx];
+ lb->ref_frame = ref_frame;
+ s->block_coeff[b][idx] *= s->dequant_dc;
+ }
+}
+
+static void vp56_edge_filter(VP56Context *s, uint8_t *yuv,
+ int pix_inc, int line_inc, int t)
+{
+ int pix2_inc = 2 * pix_inc;
+ int i, v;
+
+ for (i=0; i<12; i++) {
+ v = (yuv[-pix2_inc] + 3*(yuv[0]-yuv[-pix_inc]) - yuv[pix_inc] + 4) >>3;
+ v = s->adjust(v, t);
+ yuv[-pix_inc] = av_clip_uint8(yuv[-pix_inc] + v);
+ yuv[0] = av_clip_uint8(yuv[0] - v);
+ yuv += line_inc;
+ }
+}
+
+static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv,
+ int stride, int dx, int dy)
+{
+ int t = vp56_filter_threshold[s->quantizer];
+ if (dx) vp56_edge_filter(s, yuv + 10-dx , 1, stride, t);
+ if (dy) vp56_edge_filter(s, yuv + stride*(10-dy), stride, 1, t);
+}
+
+static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src,
+ int stride, int x, int y)
+{
+ uint8_t *dst=s->framep[VP56_FRAME_CURRENT]->data[plane]+s->block_offset[b];
+ uint8_t *src_block;
+ int src_offset;
+ int overlap_offset = 0;
+ int mask = s->vp56_coord_div[b] - 1;
+ int deblock_filtering = s->deblock_filtering;
+ int dx;
+ int dy;
+
+ if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
+ (s->avctx->skip_loop_filter >= AVDISCARD_NONKEY
+ && !s->framep[VP56_FRAME_CURRENT]->key_frame))
+ deblock_filtering = 0;
+
+ dx = s->mv[b].x / s->vp56_coord_div[b];
+ dy = s->mv[b].y / s->vp56_coord_div[b];
+
+ if (b >= 4) {
+ x /= 2;
+ y /= 2;
+ }
+ x += dx - 2;
+ y += dy - 2;
+
+ if (x<0 || x+12>=s->plane_width[plane] ||
+ y<0 || y+12>=s->plane_height[plane]) {
+ ff_emulated_edge_mc(s->edge_emu_buffer,
+ src + s->block_offset[b] + (dy-2)*stride + (dx-2),
+ stride, 12, 12, x, y,
+ s->plane_width[plane],
+ s->plane_height[plane]);
+ src_block = s->edge_emu_buffer;
+ src_offset = 2 + 2*stride;
+ } else if (deblock_filtering) {
+ /* only need a 12x12 block, but there is no such dsp function, */
+ /* so copy a 16x12 block */
+ s->dsp.put_pixels_tab[0][0](s->edge_emu_buffer,
+ src + s->block_offset[b] + (dy-2)*stride + (dx-2),
+ stride, 12);
+ src_block = s->edge_emu_buffer;
+ src_offset = 2 + 2*stride;
+ } else {
+ src_block = src;
+ src_offset = s->block_offset[b] + dy*stride + dx;
+ }
+
+ if (deblock_filtering)
+ vp56_deblock_filter(s, src_block, stride, dx&7, dy&7);
+
+ if (s->mv[b].x & mask)
+ overlap_offset += (s->mv[b].x > 0) ? 1 : -1;
+ if (s->mv[b].y & mask)
+ overlap_offset += (s->mv[b].y > 0) ? stride : -stride;
+
+ if (overlap_offset) {
+ if (s->filter)
+ s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset,
+ stride, s->mv[b], mask, s->filter_selection, b<4);
+ else
+ s->dsp.put_no_rnd_pixels_l2[1](dst, src_block+src_offset,
+ src_block+src_offset+overlap_offset,
+ stride, 8);
+ } else {
+ s->dsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8);
+ }
+}
+
+static void vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
+{
+ AVFrame *frame_current, *frame_ref;
+ VP56mb mb_type;
+ VP56Frame ref_frame;
+ int b, ab, b_max, plane, off;
+
+ if (s->framep[VP56_FRAME_CURRENT]->key_frame)
+ mb_type = VP56_MB_INTRA;
+ else
+ mb_type = vp56_decode_mv(s, row, col);
+ ref_frame = vp56_reference_frame[mb_type];
+
+ s->dsp.clear_blocks(*s->block_coeff);
+
+ s->parse_coeff(s);
+
+ vp56_add_predictors_dc(s, ref_frame);
+
+ frame_current = s->framep[VP56_FRAME_CURRENT];
+ frame_ref = s->framep[ref_frame];
+
+ ab = 6*is_alpha;
+ b_max = 6 - 2*is_alpha;
+
+ switch (mb_type) {
+ case VP56_MB_INTRA:
+ for (b=0; b<b_max; b++) {
+ plane = vp56_b2p[b+ab];
+ s->dsp.idct_put(frame_current->data[plane] + s->block_offset[b],
+ s->stride[plane], s->block_coeff[b]);
+ }
+ break;
+
+ case VP56_MB_INTER_NOVEC_PF:
+ case VP56_MB_INTER_NOVEC_GF:
+ for (b=0; b<b_max; b++) {
+ plane = vp56_b2p[b+ab];
+ off = s->block_offset[b];
+ s->dsp.put_pixels_tab[1][0](frame_current->data[plane] + off,
+ frame_ref->data[plane] + off,
+ s->stride[plane], 8);
+ s->dsp.idct_add(frame_current->data[plane] + off,
+ s->stride[plane], s->block_coeff[b]);
+ }
+ break;
+
+ case VP56_MB_INTER_DELTA_PF:
+ case VP56_MB_INTER_V1_PF:
+ case VP56_MB_INTER_V2_PF:
+ case VP56_MB_INTER_DELTA_GF:
+ case VP56_MB_INTER_4V:
+ case VP56_MB_INTER_V1_GF:
+ case VP56_MB_INTER_V2_GF:
+ for (b=0; b<b_max; b++) {
+ int x_off = b==1 || b==3 ? 8 : 0;
+ int y_off = b==2 || b==3 ? 8 : 0;
+ plane = vp56_b2p[b+ab];
+ vp56_mc(s, b, plane, frame_ref->data[plane], s->stride[plane],
+ 16*col+x_off, 16*row+y_off);
+ s->dsp.idct_add(frame_current->data[plane] + s->block_offset[b],
+ s->stride[plane], s->block_coeff[b]);
+ }
+ break;
+ }
+}
+
+static int vp56_size_changed(AVCodecContext *avctx)
+{
+ VP56Context *s = avctx->priv_data;
+ int stride = s->framep[VP56_FRAME_CURRENT]->linesize[0];
+ int i;
+
+ s->plane_width[0] = s->plane_width[3] = avctx->coded_width;
+ s->plane_width[1] = s->plane_width[2] = avctx->coded_width/2;
+ s->plane_height[0] = s->plane_height[3] = avctx->coded_height;
+ s->plane_height[1] = s->plane_height[2] = avctx->coded_height/2;
+
+ for (i=0; i<4; i++)
+ s->stride[i] = s->flip * s->framep[VP56_FRAME_CURRENT]->linesize[i];
+
+ s->mb_width = (avctx->coded_width +15) / 16;
+ s->mb_height = (avctx->coded_height+15) / 16;
+
+ if (s->mb_width > 1000 || s->mb_height > 1000) {
+ av_log(avctx, AV_LOG_ERROR, "picture too big\n");
+ return -1;
+ }
+
+ s->above_blocks = av_realloc(s->above_blocks,
+ (4*s->mb_width+6) * sizeof(*s->above_blocks));
+ s->macroblocks = av_realloc(s->macroblocks,
+ s->mb_width*s->mb_height*sizeof(*s->macroblocks));
+ av_free(s->edge_emu_buffer_alloc);
+ s->edge_emu_buffer_alloc = av_malloc(16*stride);
+ s->edge_emu_buffer = s->edge_emu_buffer_alloc;
+ if (s->flip < 0)
+ s->edge_emu_buffer += 15 * stride;
+
+ return 0;
+}
+
+int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
+ const uint8_t *buf, int buf_size)
+{
+ VP56Context *s = avctx->priv_data;
+ AVFrame *const p = s->framep[VP56_FRAME_CURRENT];
+ int remaining_buf_size = buf_size;
+ int is_alpha, av_uninit(alpha_offset);
+
+ if (s->has_alpha) {
+ alpha_offset = bytestream_get_be24(&buf);
+ remaining_buf_size -= 3;
+ }
+
+ for (is_alpha=0; is_alpha < 1+s->has_alpha; is_alpha++) {
+ int mb_row, mb_col, mb_row_flip, mb_offset = 0;
+ int block, y, uv, stride_y, stride_uv;
+ int golden_frame = 0;
+ int res;
+
+ s->modelp = &s->models[is_alpha];
+
+ res = s->parse_header(s, buf, remaining_buf_size, &golden_frame);
+ if (!res)
+ return -1;
+
+ if (!is_alpha) {
+ p->reference = 1;
+ if (avctx->get_buffer(avctx, p) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+
+ if (res == 2)
+ if (vp56_size_changed(avctx)) {
+ avctx->release_buffer(avctx, p);
+ return -1;
+ }
+ }
+
+ if (p->key_frame) {
+ p->pict_type = FF_I_TYPE;
+ s->default_models_init(s);
+ for (block=0; block<s->mb_height*s->mb_width; block++)
+ s->macroblocks[block].type = VP56_MB_INTRA;
+ } else {
+ p->pict_type = FF_P_TYPE;
+ vp56_parse_mb_type_models(s);
+ s->parse_vector_models(s);
+ s->mb_type = VP56_MB_INTER_NOVEC_PF;
+ }
+
+ s->parse_coeff_models(s);
+
+ memset(s->prev_dc, 0, sizeof(s->prev_dc));
+ s->prev_dc[1][VP56_FRAME_CURRENT] = 128;
+ s->prev_dc[2][VP56_FRAME_CURRENT] = 128;
+
+ for (block=0; block < 4*s->mb_width+6; block++) {
+ s->above_blocks[block].ref_frame = VP56_FRAME_NONE;
+ s->above_blocks[block].dc_coeff = 0;
+ s->above_blocks[block].not_null_dc = 0;
+ }
+ s->above_blocks[2*s->mb_width + 2].ref_frame = VP56_FRAME_CURRENT;
+ s->above_blocks[3*s->mb_width + 4].ref_frame = VP56_FRAME_CURRENT;
+
+ stride_y = p->linesize[0];
+ stride_uv = p->linesize[1];
+
+ if (s->flip < 0)
+ mb_offset = 7;
+
+ /* main macroblocks loop */
+ for (mb_row=0; mb_row<s->mb_height; mb_row++) {
+ if (s->flip < 0)
+ mb_row_flip = s->mb_height - mb_row - 1;
+ else
+ mb_row_flip = mb_row;
+
+ for (block=0; block<4; block++) {
+ s->left_block[block].ref_frame = VP56_FRAME_NONE;
+ s->left_block[block].dc_coeff = 0;
+ s->left_block[block].not_null_dc = 0;
+ }
+ memset(s->coeff_ctx, 0, sizeof(s->coeff_ctx));
+ memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last));
+
+ s->above_block_idx[0] = 1;
+ s->above_block_idx[1] = 2;
+ s->above_block_idx[2] = 1;
+ s->above_block_idx[3] = 2;
+ s->above_block_idx[4] = 2*s->mb_width + 2 + 1;
+ s->above_block_idx[5] = 3*s->mb_width + 4 + 1;
+
+ s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
+ s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
+ s->block_offset[1] = s->block_offset[0] + 8;
+ s->block_offset[3] = s->block_offset[2] + 8;
+ s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;
+ s->block_offset[5] = s->block_offset[4];
+
+ for (mb_col=0; mb_col<s->mb_width; mb_col++) {
+ vp56_decode_mb(s, mb_row, mb_col, is_alpha);
+
+ for (y=0; y<4; y++) {
+ s->above_block_idx[y] += 2;
+ s->block_offset[y] += 16;
+ }
+
+ for (uv=4; uv<6; uv++) {
+ s->above_block_idx[uv] += 1;
+ s->block_offset[uv] += 8;
+ }
+ }
+ }
+
+ if (p->key_frame || golden_frame) {
+ if (s->framep[VP56_FRAME_GOLDEN]->data[0] &&
+ s->framep[VP56_FRAME_GOLDEN] != s->framep[VP56_FRAME_GOLDEN2])
+ avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN]);
+ s->framep[VP56_FRAME_GOLDEN] = p;
+ }
+
+ if (s->has_alpha) {
+ FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN],
+ s->framep[VP56_FRAME_GOLDEN2]);
+ buf += alpha_offset;
+ remaining_buf_size -= alpha_offset;
+ }
+ }
+
+ if (s->framep[VP56_FRAME_PREVIOUS] == s->framep[VP56_FRAME_GOLDEN] ||
+ s->framep[VP56_FRAME_PREVIOUS] == s->framep[VP56_FRAME_GOLDEN2]) {
+ if (s->framep[VP56_FRAME_UNUSED] != s->framep[VP56_FRAME_GOLDEN] &&
+ s->framep[VP56_FRAME_UNUSED] != s->framep[VP56_FRAME_GOLDEN2])
+ FFSWAP(AVFrame *, s->framep[VP56_FRAME_PREVIOUS],
+ s->framep[VP56_FRAME_UNUSED]);
+ else
+ FFSWAP(AVFrame *, s->framep[VP56_FRAME_PREVIOUS],
+ s->framep[VP56_FRAME_UNUSED2]);
+ } else if (s->framep[VP56_FRAME_PREVIOUS]->data[0])
+ avctx->release_buffer(avctx, s->framep[VP56_FRAME_PREVIOUS]);
+ FFSWAP(AVFrame *, s->framep[VP56_FRAME_CURRENT],
+ s->framep[VP56_FRAME_PREVIOUS]);
+
+ *(AVFrame*)data = *p;
+ *data_size = sizeof(AVFrame);
+
+ return buf_size;
+}
+
+av_cold void vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
+{
+ VP56Context *s = avctx->priv_data;
+ int i;
+
+ s->avctx = avctx;
+ avctx->pix_fmt = has_alpha ? PIX_FMT_YUVA420P : PIX_FMT_YUV420P;
+
+ /* always use the VP3 IDCT */
+ avctx->idct_algo = FF_IDCT_VP3;
+ dsputil_init(&s->dsp, avctx);
+ ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct);
+
+ for (i=0; i<4; i++)
+ s->framep[i] = &s->frames[i];
+ s->framep[VP56_FRAME_UNUSED] = s->framep[VP56_FRAME_GOLDEN];
+ s->framep[VP56_FRAME_UNUSED2] = s->framep[VP56_FRAME_GOLDEN2];
+ s->edge_emu_buffer_alloc = NULL;
+
+ s->above_blocks = NULL;
+ s->macroblocks = NULL;
+ s->quantizer = -1;
+ s->deblock_filtering = 1;
+
+ s->filter = NULL;
+
+ s->has_alpha = has_alpha;
+ if (flip) {
+ s->flip = -1;
+ s->frbi = 2;
+ s->srbi = 0;
+ } else {
+ s->flip = 1;
+ s->frbi = 0;
+ s->srbi = 2;
+ }
+}
+
+av_cold int vp56_free(AVCodecContext *avctx)
+{
+ VP56Context *s = avctx->priv_data;
+
+ av_freep(&s->above_blocks);
+ av_freep(&s->macroblocks);
+ av_freep(&s->edge_emu_buffer_alloc);
+ if (s->framep[VP56_FRAME_GOLDEN]->data[0])
+ avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN]);
+ if (s->framep[VP56_FRAME_GOLDEN2]->data[0])
+ avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN2]);
+ if (s->framep[VP56_FRAME_PREVIOUS]->data[0])
+ avctx->release_buffer(avctx, s->framep[VP56_FRAME_PREVIOUS]);
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56.h
new file mode 100644
index 000000000..53da77812
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56.h
@@ -0,0 +1,270 @@
+/**
+ * @file libavcodec/vp56.h
+ * VP5 and VP6 compatible video decoder (common features)
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP56_H
+#define AVCODEC_VP56_H
+
+#include "vp56data.h"
+#include "dsputil.h"
+#include "get_bits.h"
+#include "bytestream.h"
+
+
+typedef struct vp56_context VP56Context;
+typedef struct vp56_mv VP56mv;
+
+typedef void (*VP56ParseVectorAdjustment)(VP56Context *s,
+ VP56mv *vect);
+typedef int (*VP56Adjust)(int v, int t);
+typedef void (*VP56Filter)(VP56Context *s, uint8_t *dst, uint8_t *src,
+ int offset1, int offset2, int stride,
+ VP56mv mv, int mask, int select, int luma);
+typedef void (*VP56ParseCoeff)(VP56Context *s);
+typedef void (*VP56DefaultModelsInit)(VP56Context *s);
+typedef void (*VP56ParseVectorModels)(VP56Context *s);
+typedef void (*VP56ParseCoeffModels)(VP56Context *s);
+typedef int (*VP56ParseHeader)(VP56Context *s, const uint8_t *buf,
+ int buf_size, int *golden_frame);
+
+typedef struct {
+ int high;
+ int bits;
+ const uint8_t *buffer;
+ const uint8_t *end;
+ unsigned long code_word;
+} VP56RangeCoder;
+
+typedef struct {
+ uint8_t not_null_dc;
+ VP56Frame ref_frame;
+ DCTELEM dc_coeff;
+} VP56RefDc;
+
+struct vp56_mv {
+ int x;
+ int y;
+};
+
+typedef struct {
+ uint8_t type;
+ VP56mv mv;
+} VP56Macroblock;
+
+typedef struct {
+ uint8_t coeff_reorder[64]; /* used in vp6 only */
+ uint8_t coeff_index_to_pos[64]; /* used in vp6 only */
+ uint8_t vector_sig[2]; /* delta sign */
+ uint8_t vector_dct[2]; /* delta coding types */
+ uint8_t vector_pdi[2][2]; /* predefined delta init */
+ uint8_t vector_pdv[2][7]; /* predefined delta values */
+ uint8_t vector_fdv[2][8]; /* 8 bit delta value definition */
+ uint8_t coeff_dccv[2][11]; /* DC coeff value */
+ uint8_t coeff_ract[2][3][6][11]; /* Run/AC coding type and AC coeff value */
+ uint8_t coeff_acct[2][3][3][6][5];/* vp5 only AC coding type for coding group < 3 */
+ uint8_t coeff_dcct[2][36][5]; /* DC coeff coding type */
+ uint8_t coeff_runv[2][14]; /* run value (vp6 only) */
+ uint8_t mb_type[3][10][10]; /* model for decoding MB type */
+ uint8_t mb_types_stats[3][10][2];/* contextual, next MB type stats */
+} VP56Model;
+
+struct vp56_context {
+ AVCodecContext *avctx;
+ DSPContext dsp;
+ ScanTable scantable;
+ AVFrame frames[4];
+ AVFrame *framep[6];
+ uint8_t *edge_emu_buffer_alloc;
+ uint8_t *edge_emu_buffer;
+ VP56RangeCoder c;
+ VP56RangeCoder cc;
+ VP56RangeCoder *ccp;
+ int sub_version;
+
+ /* frame info */
+ int plane_width[4];
+ int plane_height[4];
+ int mb_width; /* number of horizontal MB */
+ int mb_height; /* number of vertical MB */
+ int block_offset[6];
+
+ int quantizer;
+ uint16_t dequant_dc;
+ uint16_t dequant_ac;
+
+ /* DC predictors management */
+ VP56RefDc *above_blocks;
+ VP56RefDc left_block[4];
+ int above_block_idx[6];
+ DCTELEM prev_dc[3][3]; /* [plan][ref_frame] */
+
+ /* blocks / macroblock */
+ VP56mb mb_type;
+ VP56Macroblock *macroblocks;
+ DECLARE_ALIGNED_16(DCTELEM, block_coeff)[6][64];
+
+ /* motion vectors */
+ VP56mv mv[6]; /* vectors for each block in MB */
+ VP56mv vector_candidate[2];
+ int vector_candidate_pos;
+
+ /* filtering hints */
+ int filter_header; /* used in vp6 only */
+ int deblock_filtering;
+ int filter_selection;
+ int filter_mode;
+ int max_vector_length;
+ int sample_variance_threshold;
+
+ uint8_t coeff_ctx[4][64]; /* used in vp5 only */
+ uint8_t coeff_ctx_last[4]; /* used in vp5 only */
+
+ int has_alpha;
+
+ /* upside-down flipping hints */
+ int flip; /* are we flipping ? */
+ int frbi; /* first row block index in MB */
+ int srbi; /* second row block index in MB */
+ int stride[4]; /* stride for each plan */
+
+ const uint8_t *vp56_coord_div;
+ VP56ParseVectorAdjustment parse_vector_adjustment;
+ VP56Adjust adjust;
+ VP56Filter filter;
+ VP56ParseCoeff parse_coeff;
+ VP56DefaultModelsInit default_models_init;
+ VP56ParseVectorModels parse_vector_models;
+ VP56ParseCoeffModels parse_coeff_models;
+ VP56ParseHeader parse_header;
+
+ VP56Model *modelp;
+ VP56Model models[2];
+
+ /* huffman decoding */
+ int use_huffman;
+ GetBitContext gb;
+ VLC dccv_vlc[2];
+ VLC runv_vlc[2];
+ VLC ract_vlc[2][3][6];
+ unsigned int nb_null[2][2]; /* number of consecutive NULL DC/AC */
+};
+
+
+void vp56_init(AVCodecContext *avctx, int flip, int has_alpha);
+int vp56_free(AVCodecContext *avctx);
+void vp56_init_dequant(VP56Context *s, int quantizer);
+int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
+ const uint8_t *buf, int buf_size);
+
+
+/**
+ * vp56 specific range coder implementation
+ */
+
+static inline void vp56_init_range_decoder(VP56RangeCoder *c,
+ const uint8_t *buf, int buf_size)
+{
+ c->high = 255;
+ c->bits = 8;
+ c->buffer = buf;
+ c->end = buf + buf_size;
+ c->code_word = bytestream_get_be16(&c->buffer);
+}
+
+static inline int vp56_rac_get_prob(VP56RangeCoder *c, uint8_t prob)
+{
+ unsigned int low = 1 + (((c->high - 1) * prob) / 256);
+ unsigned int low_shift = low << 8;
+ int bit = c->code_word >= low_shift;
+
+ if (bit) {
+ c->high -= low;
+ c->code_word -= low_shift;
+ } else {
+ c->high = low;
+ }
+
+ /* normalize */
+ while (c->high < 128) {
+ c->high <<= 1;
+ c->code_word <<= 1;
+ if (--c->bits == 0 && c->buffer < c->end) {
+ c->bits = 8;
+ c->code_word |= *c->buffer++;
+ }
+ }
+ return bit;
+}
+
+static inline int vp56_rac_get(VP56RangeCoder *c)
+{
+ /* equiprobable */
+ int low = (c->high + 1) >> 1;
+ unsigned int low_shift = low << 8;
+ int bit = c->code_word >= low_shift;
+ if (bit) {
+ c->high = (c->high - low) << 1;
+ c->code_word -= low_shift;
+ } else {
+ c->high = low << 1;
+ }
+
+ /* normalize */
+ c->code_word <<= 1;
+ if (--c->bits == 0 && c->buffer < c->end) {
+ c->bits = 8;
+ c->code_word |= *c->buffer++;
+ }
+ return bit;
+}
+
+static inline int vp56_rac_gets(VP56RangeCoder *c, int bits)
+{
+ int value = 0;
+
+ while (bits--) {
+ value = (value << 1) | vp56_rac_get(c);
+ }
+
+ return value;
+}
+
+static inline int vp56_rac_gets_nn(VP56RangeCoder *c, int bits)
+{
+ int v = vp56_rac_gets(c, 7) << 1;
+ return v + !v;
+}
+
+static inline int vp56_rac_get_tree(VP56RangeCoder *c,
+ const VP56Tree *tree,
+ const uint8_t *probs)
+{
+ while (tree->val > 0) {
+ if (vp56_rac_get_prob(c, probs[tree->prob_idx]))
+ tree += tree->val;
+ else
+ tree++;
+ }
+ return -tree->val;
+}
+
+#endif /* AVCODEC_VP56_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56data.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56data.c
new file mode 100644
index 000000000..9b98014e4
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56data.c
@@ -0,0 +1,66 @@
+/**
+ * @file vp56data.c
+ * VP5 and VP6 compatible video decoder (common data)
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "vp56data.h"
+
+const uint8_t vp56_b2p[] = { 0, 0, 0, 0, 1, 2, 3, 3, 3, 3 };
+const uint8_t vp56_b6to4[] = { 0, 0, 1, 1, 2, 3 };
+
+const uint8_t vp56_coeff_parse_table[6][11] = {
+ { 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 145, 165, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 140, 148, 173, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 135, 140, 155, 176, 0, 0, 0, 0, 0, 0, 0 },
+ { 130, 134, 141, 157, 180, 0, 0, 0, 0, 0, 0 },
+ { 129, 130, 133, 140, 153, 177, 196, 230, 243, 254, 254 },
+};
+
+const uint8_t vp56_def_mb_types_stats[3][10][2] = {
+ { { 69, 42 }, { 1, 2 }, { 1, 7 }, { 44, 42 }, { 6, 22 },
+ { 1, 3 }, { 0, 2 }, { 1, 5 }, { 0, 1 }, { 0, 0 }, },
+ { { 229, 8 }, { 1, 1 }, { 0, 8 }, { 0, 0 }, { 0, 0 },
+ { 1, 2 }, { 0, 1 }, { 0, 0 }, { 1, 1 }, { 0, 0 }, },
+ { { 122, 35 }, { 1, 1 }, { 1, 6 }, { 46, 34 }, { 0, 0 },
+ { 1, 2 }, { 0, 1 }, { 0, 1 }, { 1, 1 }, { 0, 0 }, },
+};
+
+const VP56Tree vp56_pva_tree[] = {
+ { 8, 0},
+ { 4, 1},
+ { 2, 2}, {-0}, {-1},
+ { 2, 3}, {-2}, {-3},
+ { 4, 4},
+ { 2, 5}, {-4}, {-5},
+ { 2, 6}, {-6}, {-7},
+};
+
+const VP56Tree vp56_pc_tree[] = {
+ { 4, 6},
+ { 2, 7}, {-0}, {-1},
+ { 4, 8},
+ { 2, 9}, {-2}, {-3},
+ { 2,10}, {-4}, {-5},
+};
+
+const uint8_t vp56_coeff_bias[] = { 0, 1, 2, 3, 4, 5, 7, 11, 19, 35, 67 };
+const uint8_t vp56_coeff_bit_length[] = { 0, 1, 2, 3, 4, 10 };
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56data.h
new file mode 100644
index 000000000..92f2512a3
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp56data.h
@@ -0,0 +1,252 @@
+/**
+ * @file vp56data.h
+ * VP5 and VP6 compatible video decoder (common data)
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP56DATA_H
+#define AVCODEC_VP56DATA_H
+
+#include "libavutil/common.h"
+
+typedef enum {
+ VP56_FRAME_NONE =-1,
+ VP56_FRAME_CURRENT = 0,
+ VP56_FRAME_PREVIOUS = 1,
+ VP56_FRAME_GOLDEN = 2,
+ VP56_FRAME_GOLDEN2 = 3,
+ VP56_FRAME_UNUSED = 4,
+ VP56_FRAME_UNUSED2 = 5,
+} VP56Frame;
+
+typedef enum {
+ VP56_MB_INTER_NOVEC_PF = 0, /**< Inter MB, no vector, from previous frame */
+ VP56_MB_INTRA = 1, /**< Intra MB */
+ VP56_MB_INTER_DELTA_PF = 2, /**< Inter MB, above/left vector + delta, from previous frame */
+ VP56_MB_INTER_V1_PF = 3, /**< Inter MB, first vector, from previous frame */
+ VP56_MB_INTER_V2_PF = 4, /**< Inter MB, second vector, from previous frame */
+ VP56_MB_INTER_NOVEC_GF = 5, /**< Inter MB, no vector, from golden frame */
+ VP56_MB_INTER_DELTA_GF = 6, /**< Inter MB, above/left vector + delta, from golden frame */
+ VP56_MB_INTER_4V = 7, /**< Inter MB, 4 vectors, from previous frame */
+ VP56_MB_INTER_V1_GF = 8, /**< Inter MB, first vector, from golden frame */
+ VP56_MB_INTER_V2_GF = 9, /**< Inter MB, second vector, from golden frame */
+} VP56mb;
+
+typedef struct {
+ int8_t val;
+ int8_t prob_idx;
+} VP56Tree;
+
+extern const uint8_t vp56_b2p[];
+extern const uint8_t vp56_b6to4[];
+extern const uint8_t vp56_coeff_parse_table[6][11];
+extern const uint8_t vp56_def_mb_types_stats[3][10][2];
+extern const VP56Tree vp56_pva_tree[];
+extern const VP56Tree vp56_pc_tree[];
+extern const uint8_t vp56_coeff_bias[];
+extern const uint8_t vp56_coeff_bit_length[];
+
+static const VP56Frame vp56_reference_frame[] = {
+ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_NOVEC_PF */
+ VP56_FRAME_CURRENT, /* VP56_MB_INTRA */
+ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_DELTA_PF */
+ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_V1_PF */
+ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_V2_PF */
+ VP56_FRAME_GOLDEN, /* VP56_MB_INTER_NOVEC_GF */
+ VP56_FRAME_GOLDEN, /* VP56_MB_INTER_DELTA_GF */
+ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_4V */
+ VP56_FRAME_GOLDEN, /* VP56_MB_INTER_V1_GF */
+ VP56_FRAME_GOLDEN, /* VP56_MB_INTER_V2_GF */
+};
+
+static const uint8_t vp56_ac_dequant[64] = {
+ 94, 92, 90, 88, 86, 82, 78, 74,
+ 70, 66, 62, 58, 54, 53, 52, 51,
+ 50, 49, 48, 47, 46, 45, 44, 43,
+ 42, 40, 39, 37, 36, 35, 34, 33,
+ 32, 31, 30, 29, 28, 27, 26, 25,
+ 24, 23, 22, 21, 20, 19, 18, 17,
+ 16, 15, 14, 13, 12, 11, 10, 9,
+ 8, 7, 6, 5, 4, 3, 2, 1,
+};
+
+static const uint8_t vp56_dc_dequant[64] = {
+ 47, 47, 47, 47, 45, 43, 43, 43,
+ 43, 43, 42, 41, 41, 40, 40, 40,
+ 40, 35, 35, 35, 35, 33, 33, 33,
+ 33, 32, 32, 32, 27, 27, 26, 26,
+ 25, 25, 24, 24, 23, 23, 19, 19,
+ 19, 19, 18, 18, 17, 16, 16, 16,
+ 16, 16, 15, 11, 11, 11, 10, 10,
+ 9, 8, 7, 5, 3, 3, 2, 2,
+};
+
+static const uint8_t vp56_pre_def_mb_type_stats[16][3][10][2] = {
+ { { { 9, 15 }, { 32, 25 }, { 7, 19 }, { 9, 21 }, { 1, 12 },
+ { 14, 12 }, { 3, 18 }, { 14, 23 }, { 3, 10 }, { 0, 4 }, },
+ { { 41, 22 }, { 1, 0 }, { 1, 31 }, { 0, 0 }, { 0, 0 },
+ { 0, 1 }, { 1, 7 }, { 0, 1 }, { 98, 25 }, { 4, 10 }, },
+ { { 2, 3 }, { 2, 3 }, { 0, 2 }, { 0, 2 }, { 0, 0 },
+ { 11, 4 }, { 1, 4 }, { 0, 2 }, { 3, 2 }, { 0, 4 }, }, },
+ { { { 48, 39 }, { 1, 2 }, { 11, 27 }, { 29, 44 }, { 7, 27 },
+ { 1, 4 }, { 0, 3 }, { 1, 6 }, { 1, 2 }, { 0, 0 }, },
+ { { 123, 37 }, { 6, 4 }, { 1, 27 }, { 0, 0 }, { 0, 0 },
+ { 5, 8 }, { 1, 7 }, { 0, 1 }, { 12, 10 }, { 0, 2 }, },
+ { { 49, 46 }, { 3, 4 }, { 7, 31 }, { 42, 41 }, { 0, 0 },
+ { 2, 6 }, { 1, 7 }, { 1, 4 }, { 2, 4 }, { 0, 1 }, }, },
+ { { { 21, 32 }, { 1, 2 }, { 4, 10 }, { 32, 43 }, { 6, 23 },
+ { 2, 3 }, { 1, 19 }, { 1, 6 }, { 12, 21 }, { 0, 7 }, },
+ { { 26, 14 }, { 14, 12 }, { 0, 24 }, { 0, 0 }, { 0, 0 },
+ { 55, 17 }, { 1, 9 }, { 0, 36 }, { 5, 7 }, { 1, 3 }, },
+ { { 26, 25 }, { 1, 1 }, { 2, 10 }, { 67, 39 }, { 0, 0 },
+ { 1, 1 }, { 0, 14 }, { 0, 2 }, { 31, 26 }, { 1, 6 }, }, },
+ { { { 69, 83 }, { 0, 0 }, { 0, 2 }, { 10, 29 }, { 3, 12 },
+ { 0, 1 }, { 0, 3 }, { 0, 3 }, { 2, 2 }, { 0, 0 }, },
+ { { 209, 5 }, { 0, 0 }, { 0, 27 }, { 0, 0 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, },
+ { { 103, 46 }, { 1, 2 }, { 2, 10 }, { 33, 42 }, { 0, 0 },
+ { 1, 4 }, { 0, 3 }, { 0, 1 }, { 1, 3 }, { 0, 0 }, }, },
+ { { { 11, 20 }, { 1, 4 }, { 18, 36 }, { 43, 48 }, { 13, 35 },
+ { 0, 2 }, { 0, 5 }, { 3, 12 }, { 1, 2 }, { 0, 0 }, },
+ { { 2, 5 }, { 4, 5 }, { 0, 121 }, { 0, 0 }, { 0, 0 },
+ { 0, 3 }, { 2, 4 }, { 1, 4 }, { 2, 2 }, { 0, 1 }, },
+ { { 14, 31 }, { 9, 13 }, { 14, 54 }, { 22, 29 }, { 0, 0 },
+ { 2, 6 }, { 4, 18 }, { 6, 13 }, { 1, 5 }, { 0, 1 }, }, },
+ { { { 70, 44 }, { 0, 1 }, { 2, 10 }, { 37, 46 }, { 8, 26 },
+ { 0, 2 }, { 0, 2 }, { 0, 2 }, { 0, 1 }, { 0, 0 }, },
+ { { 175, 5 }, { 0, 1 }, { 0, 48 }, { 0, 0 }, { 0, 0 },
+ { 0, 2 }, { 0, 1 }, { 0, 2 }, { 0, 1 }, { 0, 0 }, },
+ { { 85, 39 }, { 0, 0 }, { 1, 9 }, { 69, 40 }, { 0, 0 },
+ { 0, 1 }, { 0, 3 }, { 0, 1 }, { 2, 3 }, { 0, 0 }, }, },
+ { { { 8, 15 }, { 0, 1 }, { 8, 21 }, { 74, 53 }, { 22, 42 },
+ { 0, 1 }, { 0, 2 }, { 0, 3 }, { 1, 2 }, { 0, 0 }, },
+ { { 83, 5 }, { 2, 3 }, { 0, 102 }, { 0, 0 }, { 0, 0 },
+ { 1, 3 }, { 0, 2 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, },
+ { { 31, 28 }, { 0, 0 }, { 3, 14 }, { 130, 34 }, { 0, 0 },
+ { 0, 1 }, { 0, 3 }, { 0, 1 }, { 3, 3 }, { 0, 1 }, }, },
+ { { { 141, 42 }, { 0, 0 }, { 1, 4 }, { 11, 24 }, { 1, 11 },
+ { 0, 1 }, { 0, 1 }, { 0, 2 }, { 0, 0 }, { 0, 0 }, },
+ { { 233, 6 }, { 0, 0 }, { 0, 8 }, { 0, 0 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 1 }, { 0, 0 }, },
+ { { 171, 25 }, { 0, 0 }, { 1, 5 }, { 25, 21 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, }, },
+ { { { 8, 19 }, { 4, 10 }, { 24, 45 }, { 21, 37 }, { 9, 29 },
+ { 0, 3 }, { 1, 7 }, { 11, 25 }, { 0, 2 }, { 0, 1 }, },
+ { { 34, 16 }, { 112, 21 }, { 1, 28 }, { 0, 0 }, { 0, 0 },
+ { 6, 8 }, { 1, 7 }, { 0, 3 }, { 2, 5 }, { 0, 2 }, },
+ { { 17, 21 }, { 68, 29 }, { 6, 15 }, { 13, 22 }, { 0, 0 },
+ { 6, 12 }, { 3, 14 }, { 4, 10 }, { 1, 7 }, { 0, 3 }, }, },
+ { { { 46, 42 }, { 0, 1 }, { 2, 10 }, { 54, 51 }, { 10, 30 },
+ { 0, 2 }, { 0, 2 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, },
+ { { 159, 35 }, { 2, 2 }, { 0, 25 }, { 0, 0 }, { 0, 0 },
+ { 3, 6 }, { 0, 5 }, { 0, 1 }, { 4, 4 }, { 0, 1 }, },
+ { { 51, 39 }, { 0, 1 }, { 2, 12 }, { 91, 44 }, { 0, 0 },
+ { 0, 2 }, { 0, 3 }, { 0, 1 }, { 2, 3 }, { 0, 1 }, }, },
+ { { { 28, 32 }, { 0, 0 }, { 3, 10 }, { 75, 51 }, { 14, 33 },
+ { 0, 1 }, { 0, 2 }, { 0, 1 }, { 1, 2 }, { 0, 0 }, },
+ { { 75, 39 }, { 5, 7 }, { 2, 48 }, { 0, 0 }, { 0, 0 },
+ { 3, 11 }, { 2, 16 }, { 1, 4 }, { 7, 10 }, { 0, 2 }, },
+ { { 81, 25 }, { 0, 0 }, { 2, 9 }, { 106, 26 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 1, 1 }, { 0, 0 }, }, },
+ { { { 100, 46 }, { 0, 1 }, { 3, 9 }, { 21, 37 }, { 5, 20 },
+ { 0, 1 }, { 0, 2 }, { 1, 2 }, { 0, 1 }, { 0, 0 }, },
+ { { 212, 21 }, { 0, 1 }, { 0, 9 }, { 0, 0 }, { 0, 0 },
+ { 1, 2 }, { 0, 2 }, { 0, 0 }, { 2, 2 }, { 0, 0 }, },
+ { { 140, 37 }, { 0, 1 }, { 1, 8 }, { 24, 33 }, { 0, 0 },
+ { 1, 2 }, { 0, 2 }, { 0, 1 }, { 1, 2 }, { 0, 0 }, }, },
+ { { { 27, 29 }, { 0, 1 }, { 9, 25 }, { 53, 51 }, { 12, 34 },
+ { 0, 1 }, { 0, 3 }, { 1, 5 }, { 0, 2 }, { 0, 0 }, },
+ { { 4, 2 }, { 0, 0 }, { 0, 172 }, { 0, 0 }, { 0, 0 },
+ { 0, 1 }, { 0, 2 }, { 0, 0 }, { 2, 0 }, { 0, 0 }, },
+ { { 14, 23 }, { 1, 3 }, { 11, 53 }, { 90, 31 }, { 0, 0 },
+ { 0, 3 }, { 1, 5 }, { 2, 6 }, { 1, 2 }, { 0, 0 }, }, },
+ { { { 80, 38 }, { 0, 0 }, { 1, 4 }, { 69, 33 }, { 5, 16 },
+ { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 1 }, { 0, 0 }, },
+ { { 187, 22 }, { 1, 1 }, { 0, 17 }, { 0, 0 }, { 0, 0 },
+ { 3, 6 }, { 0, 4 }, { 0, 1 }, { 4, 4 }, { 0, 1 }, },
+ { { 123, 29 }, { 0, 0 }, { 1, 7 }, { 57, 30 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, }, },
+ { { { 16, 20 }, { 0, 0 }, { 2, 8 }, { 104, 49 }, { 15, 33 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 1, 1 }, { 0, 0 }, },
+ { { 133, 6 }, { 1, 2 }, { 1, 70 }, { 0, 0 }, { 0, 0 },
+ { 0, 2 }, { 0, 4 }, { 0, 3 }, { 1, 1 }, { 0, 0 }, },
+ { { 13, 14 }, { 0, 0 }, { 4, 20 }, { 175, 20 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 1, 1 }, { 0, 0 }, }, },
+ { { { 194, 16 }, { 0, 0 }, { 1, 1 }, { 1, 9 }, { 1, 3 },
+ { 0, 0 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, },
+ { { 251, 1 }, { 0, 0 }, { 0, 2 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, },
+ { { 202, 23 }, { 0, 0 }, { 1, 3 }, { 2, 9 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, }, },
+};
+
+static const uint8_t vp56_filter_threshold[] = {
+ 14, 14, 13, 13, 12, 12, 10, 10,
+ 10, 10, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 7, 7, 7, 7,
+ 7, 7, 6, 6, 6, 6, 6, 6,
+ 5, 5, 5, 5, 4, 4, 4, 4,
+ 4, 4, 4, 3, 3, 3, 3, 2,
+};
+
+static const uint8_t vp56_mb_type_model_model[] = {
+ 171, 83, 199, 140, 125, 104,
+};
+
+static const VP56Tree vp56_pmbtm_tree[] = {
+ { 4, 0},
+ { 2, 1}, {-8}, {-4},
+ { 8, 2},
+ { 6, 3},
+ { 4, 4},
+ { 2, 5}, {-24}, {-20}, {-16}, {-12}, {-0},
+};
+
+static const VP56Tree vp56_pmbt_tree[] = {
+ { 8, 1},
+ { 4, 2},
+ { 2, 4}, {-VP56_MB_INTER_NOVEC_PF}, {-VP56_MB_INTER_DELTA_PF},
+ { 2, 5}, {-VP56_MB_INTER_V1_PF}, {-VP56_MB_INTER_V2_PF},
+ { 4, 3},
+ { 2, 6}, {-VP56_MB_INTRA}, {-VP56_MB_INTER_4V},
+ { 4, 7},
+ { 2, 8}, {-VP56_MB_INTER_NOVEC_GF}, {-VP56_MB_INTER_DELTA_GF},
+ { 2, 9}, {-VP56_MB_INTER_V1_GF}, {-VP56_MB_INTER_V2_GF},
+};
+
+/* relative pos of surrounding blocks, from closest to farthest */
+static const int8_t vp56_candidate_predictor_pos[12][2] = {
+ { 0, -1 },
+ { -1, 0 },
+ { -1, -1 },
+ { 1, -1 },
+ { 0, -2 },
+ { -2, 0 },
+ { -2, -1 },
+ { -1, -2 },
+ { 1, -2 },
+ { 2, -1 },
+ { -2, -2 },
+ { 2, -2 },
+};
+
+#endif /* AVCODEC_VP56DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp5data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp5data.h
new file mode 100644
index 000000000..64ecb78b8
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp5data.h
@@ -0,0 +1,175 @@
+/**
+ * @file vp5data.h
+ * VP5 compatible video decoder
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP5DATA_H
+#define AVCODEC_VP5DATA_H
+
+#include <stdint.h>
+
+static const uint8_t vp5_coeff_groups[] = {
+ -1, 0, 1, 1, 2, 1, 1, 2,
+ 2, 1, 1, 2, 2, 2, 1, 2,
+ 2, 2, 2, 2, 1, 1, 2, 2,
+ 3, 3, 4, 3, 4, 4, 4, 3,
+ 3, 3, 3, 3, 4, 3, 3, 3,
+ 4, 4, 4, 4, 4, 3, 3, 4,
+ 4, 4, 3, 4, 4, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 5,
+};
+
+static const uint8_t vp5_vmc_pct[2][11] = {
+ { 243, 220, 251, 253, 237, 232, 241, 245, 247, 251, 253 },
+ { 235, 211, 246, 249, 234, 231, 248, 249, 252, 252, 254 },
+};
+
+static const uint8_t vp5_dccv_pct[2][11] = {
+ { 146, 197, 181, 207, 232, 243, 238, 251, 244, 250, 249 },
+ { 179, 219, 214, 240, 250, 254, 244, 254, 254, 254, 254 },
+};
+
+static const uint8_t vp5_ract_pct[3][2][6][11] = {
+ { { { 227, 246, 230, 247, 244, 254, 254, 254, 254, 254, 254 },
+ { 202, 254, 209, 231, 231, 249, 249, 253, 254, 254, 254 },
+ { 206, 254, 225, 242, 241, 251, 253, 254, 254, 254, 254 },
+ { 235, 254, 241, 253, 252, 254, 254, 254, 254, 254, 254 },
+ { 234, 254, 248, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 } },
+ { { 240, 254, 248, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 238, 254, 240, 253, 254, 254, 254, 254, 254, 254, 254 },
+ { 244, 254, 251, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 } } },
+ { { { 206, 203, 227, 239, 247, 254, 253, 254, 254, 254, 254 },
+ { 207, 199, 220, 236, 243, 252, 252, 254, 254, 254, 254 },
+ { 212, 219, 230, 243, 244, 253, 252, 254, 254, 254, 254 },
+ { 236, 237, 247, 252, 253, 254, 254, 254, 254, 254, 254 },
+ { 240, 240, 248, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 } },
+ { { 230, 233, 249, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 238, 238, 250, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 248, 251, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 } } },
+ { { { 225, 239, 227, 231, 244, 253, 243, 254, 254, 253, 254 },
+ { 232, 234, 224, 228, 242, 249, 242, 252, 251, 251, 254 },
+ { 235, 249, 238, 240, 251, 254, 249, 254, 253, 253, 254 },
+ { 249, 253, 251, 250, 254, 254, 254, 254, 254, 254, 254 },
+ { 251, 250, 249, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 } },
+ { { 243, 244, 250, 250, 254, 254, 254, 254, 254, 254, 254 },
+ { 249, 248, 250, 253, 254, 254, 254, 254, 254, 254, 254 },
+ { 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 } } },
+};
+
+static const int16_t vp5_dccv_lc[5][36][2] = {
+ { {154, 61}, {141, 54}, { 90, 45}, { 54, 34}, { 54, 13}, {128, 109},
+ {136, 54}, {148, 45}, { 92, 41}, { 54, 33}, { 51, 15}, { 87, 113},
+ { 87, 44}, { 97, 40}, { 67, 36}, { 46, 29}, { 41, 15}, { 64, 80},
+ { 59, 33}, { 61, 31}, { 51, 28}, { 44, 22}, { 33, 12}, { 49, 63},
+ { 69, 12}, { 59, 16}, { 46, 14}, { 31, 13}, { 26, 6}, { 92, 26},
+ {128, 108}, { 77, 119}, { 54, 84}, { 26, 71}, { 87, 19}, { 95, 155} },
+ { {154, 4}, {182, 0}, {159, -8}, {128, -5}, {143, -5}, {187, 55},
+ {182, 0}, {228, -3}, {187, -7}, {174, -9}, {189, -11}, {169, 79},
+ {161, -9}, {192, -8}, {187, -9}, {169, -10}, {136, -9}, {184, 40},
+ {164, -11}, {179, -10}, {174, -10}, {161, -10}, {115, -7}, {197, 20},
+ {195, -11}, {195, -11}, {146, -10}, {110, -6}, { 95, -4}, {195, 39},
+ {182, 55}, {172, 77}, {177, 37}, {169, 29}, {172, 52}, { 92, 162} },
+ { {174, 80}, {164, 80}, { 95, 80}, { 46, 66}, { 56, 24}, { 36, 193},
+ {164, 80}, {166, 77}, {105, 76}, { 49, 68}, { 46, 31}, { 49, 186},
+ { 97, 78}, {110, 74}, { 72, 72}, { 44, 60}, { 33, 30}, { 69, 131},
+ { 61, 61}, { 69, 63}, { 51, 57}, { 31, 48}, { 26, 27}, { 64, 89},
+ { 67, 23}, { 51, 32}, { 36, 33}, { 26, 28}, { 20, 12}, { 44, 68},
+ { 26, 197}, { 41, 189}, { 61, 129}, { 28, 103}, { 49, 52}, {-12, 245} },
+ { {102, 141}, { 79, 166}, { 72, 162}, { 97, 125}, {179, 4}, {307, 0},
+ { 72, 168}, { 69, 175}, { 84, 160}, {105, 127}, {148, 34}, {310, 0},
+ { 84, 151}, { 82, 161}, { 87, 153}, { 87, 135}, {115, 51}, {317, 0},
+ { 97, 125}, {102, 131}, {105, 125}, { 87, 122}, { 84, 64}, { 54, 184},
+ {166, 18}, {146, 43}, {125, 51}, { 90, 64}, { 95, 7}, { 38, 154},
+ {294, 0}, { 13, 225}, { 10, 225}, { 67, 168}, { 0, 167}, {161, 94} },
+ { {172, 76}, {172, 75}, {136, 80}, { 64, 98}, { 74, 67}, {315, 0},
+ {169, 76}, {207, 56}, {164, 66}, { 97, 80}, { 67, 72}, {328, 0},
+ {136, 80}, {187, 53}, {154, 62}, { 72, 85}, { -2, 105}, {305, 0},
+ { 74, 91}, {128, 64}, {113, 64}, { 61, 77}, { 41, 75}, {259, 0},
+ { 46, 84}, { 51, 81}, { 28, 89}, { 31, 78}, { 23, 77}, {202, 0},
+ {323, 0}, {323, 0}, {300, 0}, {236, 0}, {195, 0}, {328, 0} },
+};
+
+static const int16_t vp5_ract_lc[3][3][5][6][2] = {
+ { { { {276, 0}, {238, 0}, {195, 0}, {156, 0}, {113, 0}, {274, 0} },
+ { { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1} },
+ { {192, 59}, {182, 50}, {141, 48}, {110, 40}, { 92, 19}, {125,128} },
+ { {169, 87}, {169, 83}, {184, 62}, {220, 16}, {184, 0}, {264, 0} },
+ { {212, 40}, {212, 36}, {169, 49}, {174, 27}, { 8,120}, {182, 71} } },
+ { { {259, 10}, {197, 19}, {143, 22}, {123, 16}, {110, 8}, {133, 88} },
+ { { 0, 1}, {256, 0}, { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1} },
+ { {207, 46}, {187, 50}, { 97, 83}, { 23,100}, { 41, 56}, { 56,188} },
+ { {166, 90}, {146,108}, {161, 88}, {136, 95}, {174, 0}, {266, 0} },
+ { {264, 7}, {243, 18}, {184, 43}, {-14,154}, { 20,112}, { 20,199} } },
+ { { {230, 26}, {197, 22}, {159, 20}, {146, 12}, {136, 4}, { 54,162} },
+ { { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1} },
+ { {192, 59}, {156, 72}, { 84,101}, { 49,101}, { 79, 47}, { 79,167} },
+ { {138,115}, {136,116}, {166, 80}, {238, 0}, {195, 0}, {261, 0} },
+ { {225, 33}, {205, 42}, {159, 61}, { 79, 96}, { 92, 66}, { 28,195} } },
+ }, {
+ { { {200, 37}, {197, 18}, {159, 13}, {143, 7}, {102, 5}, {123,126} },
+ { {197, 3}, {220, -9}, {210,-12}, {187, -6}, {151, -2}, {174, 80} },
+ { {200, 53}, {187, 47}, {159, 40}, {118, 38}, {100, 18}, {141,111} },
+ { {179, 78}, {166, 86}, {197, 50}, {207, 27}, {187, 0}, {115,139} },
+ { {218, 34}, {220, 29}, {174, 46}, {128, 61}, { 54, 89}, {187, 65} } },
+ { { {238, 14}, {197, 18}, {125, 26}, { 90, 25}, { 82, 13}, {161, 86} },
+ { {189, 1}, {205, -2}, {156, -4}, {143, -4}, {146, -4}, {172, 72} },
+ { {230, 31}, {192, 45}, {102, 76}, { 38, 85}, { 56, 41}, { 64,173} },
+ { {166, 91}, {141,111}, {128,116}, {118,109}, {177, 0}, { 23,222} },
+ { {253, 14}, {236, 21}, {174, 49}, { 33,118}, { 44, 93}, { 23,187} } },
+ { { {218, 28}, {179, 28}, {118, 35}, { 95, 30}, { 72, 24}, {128,108} },
+ { {187, 1}, {174, -1}, {125, -1}, {110, -1}, {108, -1}, {202, 52} },
+ { {197, 53}, {146, 75}, { 46,118}, { 33,103}, { 64, 50}, {118,126} },
+ { {138,114}, {128,122}, {161, 86}, {243, -6}, {195, 0}, { 38,210} },
+ { {215, 39}, {179, 58}, { 97,101}, { 95, 85}, { 87, 70}, { 69,152} } },
+ }, {
+ { { {236, 24}, {205, 18}, {172, 12}, {154, 6}, {125, 1}, {169, 75} },
+ { {187, 4}, {230, -2}, {228, -4}, {236, -4}, {241, -2}, {192, 66} },
+ { {200, 46}, {187, 42}, {159, 34}, {136, 25}, {105, 10}, {179, 62} },
+ { {207, 55}, {192, 63}, {192, 54}, {195, 36}, {177, 1}, {143, 98} },
+ { {225, 27}, {207, 34}, {200, 30}, {131, 57}, { 97, 60}, {197, 45} } },
+ { { {271, 8}, {218, 13}, {133, 19}, { 90, 19}, { 72, 7}, {182, 51} },
+ { {179, 1}, {225, -1}, {154, -2}, {110, -1}, { 92, 0}, {195, 41} },
+ { {241, 26}, {189, 40}, { 82, 64}, { 33, 60}, { 67, 17}, {120, 94} },
+ { {192, 68}, {151, 94}, {146, 90}, {143, 72}, {161, 0}, {113,128} },
+ { {256, 12}, {218, 29}, {166, 48}, { 44, 99}, { 31, 87}, {148, 78} } },
+ { { {238, 20}, {184, 22}, {113, 27}, { 90, 22}, { 74, 9}, {192, 37} },
+ { {184, 0}, {215, -1}, {141, -1}, { 97, 0}, { 49, 0}, {264, 13} },
+ { {182, 51}, {138, 61}, { 95, 63}, { 54, 59}, { 64, 25}, {200, 45} },
+ { {179, 75}, {156, 87}, {174, 65}, {177, 44}, {174, 0}, {164, 85} },
+ { {195, 45}, {148, 65}, {105, 79}, { 95, 72}, { 87, 60}, {169, 63} } },
+ }
+};
+
+static const uint8_t vp5_coord_div[] = { 2, 2, 2, 2, 4, 4 };
+
+#endif /* AVCODEC_VP5DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp6.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp6.c
new file mode 100644
index 000000000..519833bdc
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp6.c
@@ -0,0 +1,662 @@
+/**
+ * @file libavcodec/vp6.c
+ * VP6 compatible video decoder
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * The VP6F decoder accepts an optional 1 byte extradata. It is composed of:
+ * - upper 4bits: difference between encoded width and visible width
+ * - lower 4bits: difference between encoded height and visible height
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "get_bits.h"
+#include "huffman.h"
+
+#include "vp56.h"
+#include "vp56data.h"
+#include "vp6data.h"
+
+#ifndef __GNUC__
+#include <malloc.h>
+#endif
+
+static void vp6_parse_coeff(VP56Context *s);
+static void vp6_parse_coeff_huffman(VP56Context *s);
+
+static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
+ int *golden_frame)
+{
+ VP56RangeCoder *c = &s->c;
+ int parse_filter_info = 0;
+ int coeff_offset = 0;
+ int vrt_shift = 0;
+ int sub_version;
+ int rows, cols;
+ int res = 1;
+ int separated_coeff = buf[0] & 1;
+
+ s->framep[VP56_FRAME_CURRENT]->key_frame = !(buf[0] & 0x80);
+ vp56_init_dequant(s, (buf[0] >> 1) & 0x3F);
+
+ if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
+ sub_version = buf[1] >> 3;
+ if (sub_version > 8)
+ return 0;
+ s->filter_header = buf[1] & 0x06;
+ if (buf[1] & 1) {
+ av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n");
+ return 0;
+ }
+ if (separated_coeff || !s->filter_header) {
+ coeff_offset = AV_RB16(buf+2) - 2;
+ buf += 2;
+ buf_size -= 2;
+ }
+
+ rows = buf[2]; /* number of stored macroblock rows */
+ cols = buf[3]; /* number of stored macroblock cols */
+ /* buf[4] is number of displayed macroblock rows */
+ /* buf[5] is number of displayed macroblock cols */
+
+ if (!s->macroblocks || /* first frame */
+ 16*cols != s->avctx->coded_width ||
+ 16*rows != s->avctx->coded_height) {
+ avcodec_set_dimensions(s->avctx, 16*cols, 16*rows);
+ if (s->avctx->extradata_size == 1) {
+ s->avctx->width -= s->avctx->extradata[0] >> 4;
+ s->avctx->height -= s->avctx->extradata[0] & 0x0F;
+ }
+ res = 2;
+ }
+
+ vp56_init_range_decoder(c, buf+6, buf_size-6);
+ vp56_rac_gets(c, 2);
+
+ parse_filter_info = s->filter_header;
+ if (sub_version < 8)
+ vrt_shift = 5;
+ s->sub_version = sub_version;
+ } else {
+ if (!s->sub_version)
+ return 0;
+
+ if (separated_coeff || !s->filter_header) {
+ coeff_offset = AV_RB16(buf+1) - 2;
+ buf += 2;
+ buf_size -= 2;
+ }
+ vp56_init_range_decoder(c, buf+1, buf_size-1);
+
+ *golden_frame = vp56_rac_get(c);
+ if (s->filter_header) {
+ s->deblock_filtering = vp56_rac_get(c);
+ if (s->deblock_filtering)
+ vp56_rac_get(c);
+ if (s->sub_version > 7)
+ parse_filter_info = vp56_rac_get(c);
+ }
+ }
+
+ if (parse_filter_info) {
+ if (vp56_rac_get(c)) {
+ s->filter_mode = 2;
+ s->sample_variance_threshold = vp56_rac_gets(c, 5) << vrt_shift;
+ s->max_vector_length = 2 << vp56_rac_gets(c, 3);
+ } else if (vp56_rac_get(c)) {
+ s->filter_mode = 1;
+ } else {
+ s->filter_mode = 0;
+ }
+ if (s->sub_version > 7)
+ s->filter_selection = vp56_rac_gets(c, 4);
+ else
+ s->filter_selection = 16;
+ }
+
+ s->use_huffman = vp56_rac_get(c);
+
+ s->parse_coeff = vp6_parse_coeff;
+ if (coeff_offset) {
+ buf += coeff_offset;
+ buf_size -= coeff_offset;
+ if (buf_size < 0)
+ return 0;
+ if (s->use_huffman) {
+ s->parse_coeff = vp6_parse_coeff_huffman;
+ init_get_bits(&s->gb, buf, buf_size<<3);
+ } else {
+ vp56_init_range_decoder(&s->cc, buf, buf_size);
+ s->ccp = &s->cc;
+ }
+ } else {
+ s->ccp = &s->c;
+ }
+
+ return res;
+}
+
+static void vp6_coeff_order_table_init(VP56Context *s)
+{
+ int i, pos, idx = 1;
+
+ s->modelp->coeff_index_to_pos[0] = 0;
+ for (i=0; i<16; i++)
+ for (pos=1; pos<64; pos++)
+ if (s->modelp->coeff_reorder[pos] == i)
+ s->modelp->coeff_index_to_pos[idx++] = pos;
+}
+
+static void vp6_default_models_init(VP56Context *s)
+{
+ VP56Model *model = s->modelp;
+
+ model->vector_dct[0] = 0xA2;
+ model->vector_dct[1] = 0xA4;
+ model->vector_sig[0] = 0x80;
+ model->vector_sig[1] = 0x80;
+
+ memcpy(model->mb_types_stats, vp56_def_mb_types_stats, sizeof(model->mb_types_stats));
+ memcpy(model->vector_fdv, vp6_def_fdv_vector_model, sizeof(model->vector_fdv));
+ memcpy(model->vector_pdv, vp6_def_pdv_vector_model, sizeof(model->vector_pdv));
+ memcpy(model->coeff_runv, vp6_def_runv_coeff_model, sizeof(model->coeff_runv));
+ memcpy(model->coeff_reorder, vp6_def_coeff_reorder, sizeof(model->coeff_reorder));
+
+ vp6_coeff_order_table_init(s);
+}
+
+static void vp6_parse_vector_models(VP56Context *s)
+{
+ VP56RangeCoder *c = &s->c;
+ VP56Model *model = s->modelp;
+ int comp, node;
+
+ for (comp=0; comp<2; comp++) {
+ if (vp56_rac_get_prob(c, vp6_sig_dct_pct[comp][0]))
+ model->vector_dct[comp] = vp56_rac_gets_nn(c, 7);
+ if (vp56_rac_get_prob(c, vp6_sig_dct_pct[comp][1]))
+ model->vector_sig[comp] = vp56_rac_gets_nn(c, 7);
+ }
+
+ for (comp=0; comp<2; comp++)
+ for (node=0; node<7; node++)
+ if (vp56_rac_get_prob(c, vp6_pdv_pct[comp][node]))
+ model->vector_pdv[comp][node] = vp56_rac_gets_nn(c, 7);
+
+ for (comp=0; comp<2; comp++)
+ for (node=0; node<8; node++)
+ if (vp56_rac_get_prob(c, vp6_fdv_pct[comp][node]))
+ model->vector_fdv[comp][node] = vp56_rac_gets_nn(c, 7);
+}
+
+/* nodes must ascend by count, but with descending symbol order */
+static int vp6_huff_cmp(const void *va, const void *vb)
+{
+ const Node *a = va, *b = vb;
+ return (a->count - b->count)*16 + (b->sym - a->sym);
+}
+
+static void vp6_build_huff_tree(VP56Context *s, uint8_t coeff_model[],
+ const uint8_t *map, unsigned size, VLC *vlc)
+{
+ #if __STDC_VERSION__ >= 199901L
+ Node nodes[2*size], *tmp = &nodes[size];
+ #else
+ Node *nodes=(Node *)alloca(2*size*sizeof(Node)), *tmp = &nodes[size];
+ #endif
+ int a, b, i;
+
+ /* first compute probabilities from model */
+ tmp[0].count = 256;
+ for (i=0; i<size-1; i++) {
+ a = tmp[i].count * coeff_model[i] >> 8;
+ b = tmp[i].count * (255 - coeff_model[i]) >> 8;
+ nodes[map[2*i ]].count = a + !a;
+ nodes[map[2*i+1]].count = b + !b;
+ }
+
+ /* then build the huffman tree accodring to probabilities */
+ ff_huff_build_tree(s->avctx, vlc, size, nodes, vp6_huff_cmp,
+ FF_HUFFMAN_FLAG_HNODE_FIRST);
+}
+
+static void vp6_parse_coeff_models(VP56Context *s)
+{
+ VP56RangeCoder *c = &s->c;
+ VP56Model *model = s->modelp;
+ int def_prob[11];
+ int node, cg, ctx, pos;
+ int ct; /* code type */
+ int pt; /* plane type (0 for Y, 1 for U or V) */
+
+ memset(def_prob, 0x80, sizeof(def_prob));
+
+ for (pt=0; pt<2; pt++)
+ for (node=0; node<11; node++)
+ if (vp56_rac_get_prob(c, vp6_dccv_pct[pt][node])) {
+ def_prob[node] = vp56_rac_gets_nn(c, 7);
+ model->coeff_dccv[pt][node] = def_prob[node];
+ } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
+ model->coeff_dccv[pt][node] = def_prob[node];
+ }
+
+ if (vp56_rac_get(c)) {
+ for (pos=1; pos<64; pos++)
+ if (vp56_rac_get_prob(c, vp6_coeff_reorder_pct[pos]))
+ model->coeff_reorder[pos] = vp56_rac_gets(c, 4);
+ vp6_coeff_order_table_init(s);
+ }
+
+ for (cg=0; cg<2; cg++)
+ for (node=0; node<14; node++)
+ if (vp56_rac_get_prob(c, vp6_runv_pct[cg][node]))
+ model->coeff_runv[cg][node] = vp56_rac_gets_nn(c, 7);
+
+ for (ct=0; ct<3; ct++)
+ for (pt=0; pt<2; pt++)
+ for (cg=0; cg<6; cg++)
+ for (node=0; node<11; node++)
+ if (vp56_rac_get_prob(c, vp6_ract_pct[ct][pt][cg][node])) {
+ def_prob[node] = vp56_rac_gets_nn(c, 7);
+ model->coeff_ract[pt][ct][cg][node] = def_prob[node];
+ } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
+ model->coeff_ract[pt][ct][cg][node] = def_prob[node];
+ }
+
+ if (s->use_huffman) {
+ for (pt=0; pt<2; pt++) {
+ vp6_build_huff_tree(s, model->coeff_dccv[pt],
+ vp6_huff_coeff_map, 12, &s->dccv_vlc[pt]);
+ vp6_build_huff_tree(s, model->coeff_runv[pt],
+ vp6_huff_run_map, 9, &s->runv_vlc[pt]);
+ for (ct=0; ct<3; ct++)
+ for (cg = 0; cg < 6; cg++)
+ vp6_build_huff_tree(s, model->coeff_ract[pt][ct][cg],
+ vp6_huff_coeff_map, 12,
+ &s->ract_vlc[pt][ct][cg]);
+ }
+ memset(s->nb_null, 0, sizeof(s->nb_null));
+ } else {
+ /* coeff_dcct is a linear combination of coeff_dccv */
+ for (pt=0; pt<2; pt++)
+ for (ctx=0; ctx<3; ctx++)
+ for (node=0; node<5; node++)
+ model->coeff_dcct[pt][ctx][node] = av_clip(((model->coeff_dccv[pt][node] * vp6_dccv_lc[ctx][node][0] + 128) >> 8) + vp6_dccv_lc[ctx][node][1], 1, 255);
+ }
+}
+
+static void vp6_parse_vector_adjustment(VP56Context *s, VP56mv *vect)
+{
+ VP56RangeCoder *c = &s->c;
+ VP56Model *model = s->modelp;
+ int comp;
+
+ vect->x=0;vect->y=0;
+ if (s->vector_candidate_pos < 2)
+ *vect = s->vector_candidate[0];
+
+ for (comp=0; comp<2; comp++) {
+ int i, delta = 0;
+
+ if (vp56_rac_get_prob(c, model->vector_dct[comp])) {
+ static const uint8_t prob_order[] = {0, 1, 2, 7, 6, 5, 4};
+ for (i=0; i<sizeof(prob_order); i++) {
+ int j = prob_order[i];
+ delta |= vp56_rac_get_prob(c, model->vector_fdv[comp][j])<<j;
+ }
+ if (delta & 0xF0)
+ delta |= vp56_rac_get_prob(c, model->vector_fdv[comp][3])<<3;
+ else
+ delta |= 8;
+ } else {
+ delta = vp56_rac_get_tree(c, vp56_pva_tree,
+ model->vector_pdv[comp]);
+ }
+
+ if (delta && vp56_rac_get_prob(c, model->vector_sig[comp]))
+ delta = -delta;
+
+ if (!comp)
+ vect->x += delta;
+ else
+ vect->y += delta;
+ }
+}
+
+/**
+ * Read number of consecutive blocks with null DC or AC.
+ * This value is < 74.
+ */
+static unsigned vp6_get_nb_null(VP56Context *s)
+{
+ unsigned val = get_bits(&s->gb, 2);
+ if (val == 2)
+ val += get_bits(&s->gb, 2);
+ else if (val == 3) {
+ val = get_bits1(&s->gb) << 2;
+ val = 6+val + get_bits(&s->gb, 2+val);
+ }
+ return val;
+}
+
+static void vp6_parse_coeff_huffman(VP56Context *s)
+{
+ VP56Model *model = s->modelp;
+ uint8_t *permute = s->scantable.permutated;
+ VLC *vlc_coeff;
+ int coeff, sign, coeff_idx;
+ int b, cg, idx;
+ int pt = 0; /* plane type (0 for Y, 1 for U or V) */
+
+ for (b=0; b<6; b++) {
+ int ct = 0; /* code type */
+ if (b > 3) pt = 1;
+ vlc_coeff = &s->dccv_vlc[pt];
+
+ for (coeff_idx=0; coeff_idx<64; ) {
+ int run = 1;
+ if (coeff_idx<2 && s->nb_null[coeff_idx][pt]) {
+ s->nb_null[coeff_idx][pt]--;
+ if (coeff_idx)
+ break;
+ } else {
+ if (get_bits_count(&s->gb) >= s->gb.size_in_bits)
+ return;
+ coeff = get_vlc2(&s->gb, vlc_coeff->table, 9, 3);
+ if (coeff == 0) {
+ if (coeff_idx) {
+ int pt = (coeff_idx >= 6);
+ run += get_vlc2(&s->gb, s->runv_vlc[pt].table, 9, 3);
+ if (run >= 9)
+ run += get_bits(&s->gb, 6);
+ } else
+ s->nb_null[0][pt] = vp6_get_nb_null(s);
+ ct = 0;
+ } else if (coeff == 11) { /* end of block */
+ if (coeff_idx == 1) /* first AC coeff ? */
+ s->nb_null[1][pt] = vp6_get_nb_null(s);
+ break;
+ } else {
+ int coeff2 = vp56_coeff_bias[coeff];
+ if (coeff > 4)
+ coeff2 += get_bits(&s->gb, coeff <= 9 ? coeff - 4 : 11);
+ ct = 1 + (coeff2 > 1);
+ sign = get_bits1(&s->gb);
+ coeff2 = (coeff2 ^ -sign) + sign;
+ if (coeff_idx)
+ coeff2 *= s->dequant_ac;
+ idx = model->coeff_index_to_pos[coeff_idx];
+ s->block_coeff[b][permute[idx]] = coeff2;
+ }
+ }
+ coeff_idx+=run;
+ cg = FFMIN(vp6_coeff_groups[coeff_idx], 3);
+ vlc_coeff = &s->ract_vlc[pt][ct][cg];
+ }
+ }
+}
+
+static void vp6_parse_coeff(VP56Context *s)
+{
+ VP56RangeCoder *c = s->ccp;
+ VP56Model *model = s->modelp;
+ uint8_t *permute = s->scantable.permutated;
+ uint8_t *model1, *model2, *model3;
+ int coeff, sign, coeff_idx;
+ int b, i, cg, idx, ctx;
+ int pt = 0; /* plane type (0 for Y, 1 for U or V) */
+
+ for (b=0; b<6; b++) {
+ int ct = 1; /* code type */
+ int run = 1;
+
+ if (b > 3) pt = 1;
+
+ ctx = s->left_block[vp56_b6to4[b]].not_null_dc
+ + s->above_blocks[s->above_block_idx[b]].not_null_dc;
+ model1 = model->coeff_dccv[pt];
+ model2 = model->coeff_dcct[pt][ctx];
+
+ for (coeff_idx=0; coeff_idx<64; ) {
+ if ((coeff_idx>1 && ct==0) || vp56_rac_get_prob(c, model2[0])) {
+ /* parse a coeff */
+ if (vp56_rac_get_prob(c, model2[2])) {
+ if (vp56_rac_get_prob(c, model2[3])) {
+ idx = vp56_rac_get_tree(c, vp56_pc_tree, model1);
+ coeff = vp56_coeff_bias[idx+5];
+ for (i=vp56_coeff_bit_length[idx]; i>=0; i--)
+ coeff += vp56_rac_get_prob(c, vp56_coeff_parse_table[idx][i]) << i;
+ } else {
+ if (vp56_rac_get_prob(c, model2[4]))
+ coeff = 3 + vp56_rac_get_prob(c, model1[5]);
+ else
+ coeff = 2;
+ }
+ ct = 2;
+ } else {
+ ct = 1;
+ coeff = 1;
+ }
+ sign = vp56_rac_get(c);
+ coeff = (coeff ^ -sign) + sign;
+ if (coeff_idx)
+ coeff *= s->dequant_ac;
+ idx = model->coeff_index_to_pos[coeff_idx];
+ s->block_coeff[b][permute[idx]] = coeff;
+ run = 1;
+ } else {
+ /* parse a run */
+ ct = 0;
+ if (coeff_idx > 0) {
+ if (!vp56_rac_get_prob(c, model2[1]))
+ break;
+
+ model3 = model->coeff_runv[coeff_idx >= 6];
+ run = vp56_rac_get_tree(c, vp6_pcr_tree, model3);
+ if (!run)
+ for (run=9, i=0; i<6; i++)
+ run += vp56_rac_get_prob(c, model3[i+8]) << i;
+ }
+ }
+
+ cg = vp6_coeff_groups[coeff_idx+=run];
+ model1 = model2 = model->coeff_ract[pt][ct][cg];
+ }
+
+ s->left_block[vp56_b6to4[b]].not_null_dc =
+ s->above_blocks[s->above_block_idx[b]].not_null_dc = !!s->block_coeff[b][0];
+ }
+}
+
+static int vp6_adjust(int v, int t)
+{
+ int V = v, s = v >> 31;
+ V ^= s;
+ V -= s;
+ if (V-t-1 >= (unsigned)(t-1))
+ return v;
+ V = 2*t - V;
+ V += s;
+ V ^= s;
+ return V;
+}
+
+static int vp6_block_variance(uint8_t *src, int stride)
+{
+ int sum = 0, square_sum = 0;
+ int y, x;
+
+ for (y=0; y<8; y+=2) {
+ for (x=0; x<8; x+=2) {
+ sum += src[x];
+ square_sum += src[x]*src[x];
+ }
+ src += 2*stride;
+ }
+ return (16*square_sum - sum*sum) >> 8;
+}
+
+static void vp6_filter_hv4(uint8_t *dst, uint8_t *src, int stride,
+ int delta, const int16_t *weights)
+{
+ int x, y;
+
+ for (y=0; y<8; y++) {
+ for (x=0; x<8; x++) {
+ dst[x] = av_clip_uint8(( src[x-delta ] * weights[0]
+ + src[x ] * weights[1]
+ + src[x+delta ] * weights[2]
+ + src[x+2*delta] * weights[3] + 64) >> 7);
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static void vp6_filter_diag2(VP56Context *s, uint8_t *dst, uint8_t *src,
+ int stride, int h_weight, int v_weight)
+{
+ uint8_t *tmp = s->edge_emu_buffer+16;
+ s->dsp.put_h264_chroma_pixels_tab[0](tmp, src, stride, 9, h_weight, 0);
+ s->dsp.put_h264_chroma_pixels_tab[0](dst, tmp, stride, 8, 0, v_weight);
+}
+
+static void vp6_filter(VP56Context *s, uint8_t *dst, uint8_t *src,
+ int offset1, int offset2, int stride,
+ VP56mv mv, int mask, int select, int luma)
+{
+ int filter4 = 0;
+ int x8 = mv.x & mask;
+ int y8 = mv.y & mask;
+
+ if (luma) {
+ x8 *= 2;
+ y8 *= 2;
+ filter4 = s->filter_mode;
+ if (filter4 == 2) {
+ if (s->max_vector_length &&
+ (FFABS(mv.x) > s->max_vector_length ||
+ FFABS(mv.y) > s->max_vector_length)) {
+ filter4 = 0;
+ } else if (s->sample_variance_threshold
+ && (vp6_block_variance(src+offset1, stride)
+ < s->sample_variance_threshold)) {
+ filter4 = 0;
+ }
+ }
+ }
+
+ if ((y8 && (offset2-offset1)*s->flip<0) || (!y8 && offset1 > offset2)) {
+ offset1 = offset2;
+ }
+
+ if (filter4) {
+ if (!y8) { /* left or right combine */
+ vp6_filter_hv4(dst, src+offset1, stride, 1,
+ vp6_block_copy_filter[select][x8]);
+ } else if (!x8) { /* above or below combine */
+ vp6_filter_hv4(dst, src+offset1, stride, stride,
+ vp6_block_copy_filter[select][y8]);
+ } else {
+ s->dsp.vp6_filter_diag4(dst, src+offset1+((mv.x^mv.y)>>31), stride,
+ vp6_block_copy_filter[select][x8],
+ vp6_block_copy_filter[select][y8]);
+ }
+ } else {
+ if (!x8 || !y8) {
+ s->dsp.put_h264_chroma_pixels_tab[0](dst, src+offset1, stride, 8, x8, y8);
+ } else {
+ vp6_filter_diag2(s, dst, src+offset1 + ((mv.x^mv.y)>>31), stride, x8, y8);
+ }
+ }
+}
+
+static av_cold int vp6_decode_init(AVCodecContext *avctx)
+{
+ VP56Context *s = avctx->priv_data;
+
+ vp56_init(avctx, avctx->codec->id == CODEC_ID_VP6,
+ avctx->codec->id == CODEC_ID_VP6A);
+ s->vp56_coord_div = vp6_coord_div;
+ s->parse_vector_adjustment = vp6_parse_vector_adjustment;
+ s->adjust = vp6_adjust;
+ s->filter = vp6_filter;
+ s->default_models_init = vp6_default_models_init;
+ s->parse_vector_models = vp6_parse_vector_models;
+ s->parse_coeff_models = vp6_parse_coeff_models;
+ s->parse_header = vp6_parse_header;
+
+ return 0;
+}
+
+AVCodec vp6_decoder = {
+ "vp6",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_VP6,
+ sizeof(VP56Context),
+ vp6_decode_init,
+ NULL,
+ vp56_free,
+ vp56_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("On2 VP6"),
+};
+
+/* flash version, not flipped upside-down */
+AVCodec vp6f_decoder = {
+ "vp6f",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_VP6F,
+ sizeof(VP56Context),
+ vp6_decode_init,
+ NULL,
+ vp56_free,
+ vp56_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("On2 VP6 (Flash version)"),
+};
+
+/* flash version, not flipped upside-down, with alpha channel */
+AVCodec vp6a_decoder = {
+ "vp6a",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_VP6A,
+ sizeof(VP56Context),
+ vp6_decode_init,
+ NULL,
+ vp56_free,
+ vp56_decode_frame,
+ /*.capabilities = */CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("On2 VP6 (Flash version, with alpha channel)"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp6data.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp6data.h
new file mode 100644
index 000000000..f57115c0a
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp6data.h
@@ -0,0 +1,308 @@
+/**
+ * @file vp6data.h
+ * VP6 compatible video decoder
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP6DATA_H
+#define AVCODEC_VP6DATA_H
+
+#include "vp56data.h"
+
+static const uint8_t vp6_def_fdv_vector_model[2][8] = {
+ { 247, 210, 135, 68, 138, 220, 239, 246 },
+ { 244, 184, 201, 44, 173, 221, 239, 253 },
+};
+
+static const uint8_t vp6_def_pdv_vector_model[2][7] = {
+ { 225, 146, 172, 147, 214, 39, 156 },
+ { 204, 170, 119, 235, 140, 230, 228 },
+};
+
+static const uint8_t vp6_def_coeff_reorder[] = {
+ 0, 0, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 3, 3, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 7, 7,
+ 7, 7, 7, 8, 8, 9, 9, 9,
+ 9, 9, 9, 10, 10, 11, 11, 11,
+ 11, 11, 11, 12, 12, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 14, 14,
+ 14, 14, 15, 15, 15, 15, 15, 15,
+};
+
+static const uint8_t vp6_def_runv_coeff_model[2][14] = {
+ { 198, 197, 196, 146, 198, 204, 169, 142, 130, 136, 149, 149, 191, 249 },
+ { 135, 201, 181, 154, 98, 117, 132, 126, 146, 169, 184, 240, 246, 254 },
+};
+
+static const uint8_t vp6_sig_dct_pct[2][2] = {
+ { 237, 246 },
+ { 231, 243 },
+};
+
+static const uint8_t vp6_pdv_pct[2][7] = {
+ { 253, 253, 254, 254, 254, 254, 254 },
+ { 245, 253, 254, 254, 254, 254, 254 },
+};
+
+static const uint8_t vp6_fdv_pct[2][8] = {
+ { 254, 254, 254, 254, 254, 250, 250, 252 },
+ { 254, 254, 254, 254, 254, 251, 251, 254 },
+};
+
+static const uint8_t vp6_dccv_pct[2][11] = {
+ { 146, 255, 181, 207, 232, 243, 238, 251, 244, 250, 249 },
+ { 179, 255, 214, 240, 250, 255, 244, 255, 255, 255, 255 },
+};
+
+static const uint8_t vp6_coeff_reorder_pct[] = {
+ 255, 132, 132, 159, 153, 151, 161, 170,
+ 164, 162, 136, 110, 103, 114, 129, 118,
+ 124, 125, 132, 136, 114, 110, 142, 135,
+ 134, 123, 143, 126, 153, 183, 166, 161,
+ 171, 180, 179, 164, 203, 218, 225, 217,
+ 215, 206, 203, 217, 229, 241, 248, 243,
+ 253, 255, 253, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255,
+};
+
+static const uint8_t vp6_runv_pct[2][14] = {
+ { 219, 246, 238, 249, 232, 239, 249, 255, 248, 253, 239, 244, 241, 248 },
+ { 198, 232, 251, 253, 219, 241, 253, 255, 248, 249, 244, 238, 251, 255 },
+};
+
+static const uint8_t vp6_ract_pct[3][2][6][11] = {
+ { { { 227, 246, 230, 247, 244, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 209, 231, 231, 249, 249, 253, 255, 255, 255 },
+ { 255, 255, 225, 242, 241, 251, 253, 255, 255, 255, 255 },
+ { 255, 255, 241, 253, 252, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 248, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } },
+ { { 240, 255, 248, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 240, 253, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } } },
+ { { { 206, 203, 227, 239, 247, 255, 253, 255, 255, 255, 255 },
+ { 207, 199, 220, 236, 243, 252, 252, 255, 255, 255, 255 },
+ { 212, 219, 230, 243, 244, 253, 252, 255, 255, 255, 255 },
+ { 236, 237, 247, 252, 253, 255, 255, 255, 255, 255, 255 },
+ { 240, 240, 248, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } },
+ { { 230, 233, 249, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 238, 238, 250, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 248, 251, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } } },
+ { { { 225, 239, 227, 231, 244, 253, 243, 255, 255, 253, 255 },
+ { 232, 234, 224, 228, 242, 249, 242, 252, 251, 251, 255 },
+ { 235, 249, 238, 240, 251, 255, 249, 255, 253, 253, 255 },
+ { 249, 253, 251, 250, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 250, 249, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } },
+ { { 243, 244, 250, 250, 255, 255, 255, 255, 255, 255, 255 },
+ { 249, 248, 250, 253, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } } }
+};
+
+static const int vp6_dccv_lc[3][5][2] = {
+ { { 122, 133 }, { 0, 1 }, { 78, 171 }, { 139, 117 }, { 168, 79 } },
+ { { 133, 51 }, { 0, 1 }, { 169, 71 }, { 214, 44 }, { 210, 38 } },
+ { { 142, -16 }, { 0, 1 }, { 221, -30 }, { 246, -3 }, { 203, 17 } },
+};
+
+static const uint8_t vp6_coeff_groups[] = {
+ 0, 0, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+};
+
+static const int16_t vp6_block_copy_filter[17][8][4] = {
+ { { 0, 128, 0, 0 }, /* 0 */
+ { -3, 122, 9, 0 },
+ { -4, 109, 24, -1 },
+ { -5, 91, 45, -3 },
+ { -4, 68, 68, -4 },
+ { -3, 45, 91, -5 },
+ { -1, 24, 109, -4 },
+ { 0, 9, 122, -3 } },
+ { { 0, 128, 0, 0 }, /* 1 */
+ { -4, 124, 9, -1 },
+ { -5, 110, 25, -2 },
+ { -6, 91, 46, -3 },
+ { -5, 69, 69, -5 },
+ { -3, 46, 91, -6 },
+ { -2, 25, 110, -5 },
+ { -1, 9, 124, -4 } },
+ { { 0, 128, 0, 0 }, /* 2 */
+ { -4, 123, 10, -1 },
+ { -6, 110, 26, -2 },
+ { -7, 92, 47, -4 },
+ { -6, 70, 70, -6 },
+ { -4, 47, 92, -7 },
+ { -2, 26, 110, -6 },
+ { -1, 10, 123, -4 } },
+ { { 0, 128, 0, 0 }, /* 3 */
+ { -5, 124, 10, -1 },
+ { -7, 110, 27, -2 },
+ { -7, 91, 48, -4 },
+ { -6, 70, 70, -6 },
+ { -4, 48, 92, -8 },
+ { -2, 27, 110, -7 },
+ { -1, 10, 124, -5 } },
+ { { 0, 128, 0, 0 }, /* 4 */
+ { -6, 124, 11, -1 },
+ { -8, 111, 28, -3 },
+ { -8, 92, 49, -5 },
+ { -7, 71, 71, -7 },
+ { -5, 49, 92, -8 },
+ { -3, 28, 111, -8 },
+ { -1, 11, 124, -6 } },
+ { { 0, 128, 0, 0 }, /* 5 */
+ { -6, 123, 12, -1 },
+ { -9, 111, 29, -3 },
+ { -9, 93, 50, -6 },
+ { -8, 72, 72, -8 },
+ { -6, 50, 93, -9 },
+ { -3, 29, 111, -9 },
+ { -1, 12, 123, -6 } },
+ { { 0, 128, 0, 0 }, /* 6 */
+ { -7, 124, 12, -1 },
+ { -10, 111, 30, -3 },
+ { -10, 93, 51, -6 },
+ { -9, 73, 73, -9 },
+ { -6, 51, 93, -10 },
+ { -3, 30, 111, -10 },
+ { -1, 12, 124, -7 } },
+ { { 0, 128, 0, 0 }, /* 7 */
+ { -7, 123, 13, -1 },
+ { -11, 112, 31, -4 },
+ { -11, 94, 52, -7 },
+ { -10, 74, 74, -10 },
+ { -7, 52, 94, -11 },
+ { -4, 31, 112, -11 },
+ { -1, 13, 123, -7 } },
+ { { 0, 128, 0, 0 }, /* 8 */
+ { -8, 124, 13, -1 },
+ { -12, 112, 32, -4 },
+ { -12, 94, 53, -7 },
+ { -10, 74, 74, -10 },
+ { -7, 53, 94, -12 },
+ { -4, 32, 112, -12 },
+ { -1, 13, 124, -8 } },
+ { { 0, 128, 0, 0 }, /* 9 */
+ { -9, 124, 14, -1 },
+ { -13, 112, 33, -4 },
+ { -13, 95, 54, -8 },
+ { -11, 75, 75, -11 },
+ { -8, 54, 95, -13 },
+ { -4, 33, 112, -13 },
+ { -1, 14, 124, -9 } },
+ { { 0, 128, 0, 0 }, /* 10 */
+ { -9, 123, 15, -1 },
+ { -14, 113, 34, -5 },
+ { -14, 95, 55, -8 },
+ { -12, 76, 76, -12 },
+ { -8, 55, 95, -14 },
+ { -5, 34, 112, -13 },
+ { -1, 15, 123, -9 } },
+ { { 0, 128, 0, 0 }, /* 11 */
+ { -10, 124, 15, -1 },
+ { -14, 113, 34, -5 },
+ { -15, 96, 56, -9 },
+ { -13, 77, 77, -13 },
+ { -9, 56, 96, -15 },
+ { -5, 34, 113, -14 },
+ { -1, 15, 124, -10 } },
+ { { 0, 128, 0, 0 }, /* 12 */
+ { -10, 123, 16, -1 },
+ { -15, 113, 35, -5 },
+ { -16, 98, 56, -10 },
+ { -14, 78, 78, -14 },
+ { -10, 56, 98, -16 },
+ { -5, 35, 113, -15 },
+ { -1, 16, 123, -10 } },
+ { { 0, 128, 0, 0 }, /* 13 */
+ { -11, 124, 17, -2 },
+ { -16, 113, 36, -5 },
+ { -17, 98, 57, -10 },
+ { -14, 78, 78, -14 },
+ { -10, 57, 98, -17 },
+ { -5, 36, 113, -16 },
+ { -2, 17, 124, -11 } },
+ { { 0, 128, 0, 0 }, /* 14 */
+ { -12, 125, 17, -2 },
+ { -17, 114, 37, -6 },
+ { -18, 99, 58, -11 },
+ { -15, 79, 79, -15 },
+ { -11, 58, 99, -18 },
+ { -6, 37, 114, -17 },
+ { -2, 17, 125, -12 } },
+ { { 0, 128, 0, 0 }, /* 15 */
+ { -12, 124, 18, -2 },
+ { -18, 114, 38, -6 },
+ { -19, 99, 59, -11 },
+ { -16, 80, 80, -16 },
+ { -11, 59, 99, -19 },
+ { -6, 38, 114, -18 },
+ { -2, 18, 124, -12 } },
+ { { 0, 128, 0, 0 }, /* 16 */
+ { -4, 118, 16, -2 },
+ { -7, 106, 34, -5 },
+ { -8, 90, 53, -7 },
+ { -8, 72, 72, -8 },
+ { -7, 53, 90, -8 },
+ { -5, 34, 106, -7 },
+ { -2, 16, 118, -4 } },
+};
+
+static const VP56Tree vp6_pcr_tree[] = {
+ { 8, 0},
+ { 4, 1},
+ { 2, 2}, {-1}, {-2},
+ { 2, 3}, {-3}, {-4},
+ { 8, 4},
+ { 4, 5},
+ { 2, 6}, {-5}, {-6},
+ { 2, 7}, {-7}, {-8},
+ {-0},
+};
+
+static const uint8_t vp6_coord_div[] = { 4, 4, 4, 4, 8, 8 };
+
+static const uint8_t vp6_huff_coeff_map[] = {
+ 13, 14, 11, 0, 1, 15, 16, 18, 2, 17, 3, 4, 19, 20, 5, 6, 21, 22, 7, 8, 9, 10
+};
+
+static const uint8_t vp6_huff_run_map[] = {
+ 10, 13, 11, 12, 0, 1, 2, 3, 14, 8, 15, 16, 4, 5, 6, 7
+};
+
+#endif /* AVCODEC_VP6DATA_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp6dsp.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp6dsp.c
new file mode 100644
index 000000000..f4b7670d7
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/vp6dsp.c
@@ -0,0 +1,59 @@
+/**
+ * @file libavcodec/vp6dsp.c
+ * VP6 DSP-oriented functions
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/common.h"
+#include "dsputil.h"
+
+
+void ff_vp6_filter_diag4_c(uint8_t *dst, uint8_t *src, int stride,
+ const int16_t *h_weights, const int16_t *v_weights)
+{
+ int x, y;
+ int tmp[8*11];
+ int *t = tmp;
+
+ src -= stride;
+
+ for (y=0; y<11; y++) {
+ for (x=0; x<8; x++) {
+ t[x] = av_clip_uint8(( src[x-1] * h_weights[0]
+ + src[x ] * h_weights[1]
+ + src[x+1] * h_weights[2]
+ + src[x+2] * h_weights[3] + 64) >> 7);
+ }
+ src += stride;
+ t += 8;
+ }
+
+ t = tmp + 8;
+ for (y=0; y<8; y++) {
+ for (x=0; x<8; x++) {
+ dst[x] = av_clip_uint8(( t[x-8 ] * v_weights[0]
+ + t[x ] * v_weights[1]
+ + t[x+8 ] * v_weights[2]
+ + t[x+16] * v_weights[3] + 64) >> 7);
+ }
+ dst += stride;
+ t += 8;
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/w32thread.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/w32thread.c
new file mode 100644
index 000000000..295a87a35
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/w32thread.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+//#define DEBUG
+
+#include "avcodec.h"
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <process.h>
+
+typedef struct ThreadContext{
+ AVCodecContext *avctx;
+ HANDLE thread;
+ HANDLE work_sem;
+ HANDLE job_sem;
+ HANDLE done_sem;
+ int (*func)(AVCodecContext *c, void *arg);
+ int (*func2)(AVCodecContext *c, void *arg, int, int);
+ void *arg;
+ int argsize;
+ int *jobnr;
+ int *ret;
+ int threadnr;
+}ThreadContext;
+
+
+static unsigned WINAPI attribute_align_arg thread_func(void *v){
+ ThreadContext *c= v;
+
+ for(;;){
+ int ret, jobnr;
+//printf("thread_func %X enter wait\n", (int)v); fflush(stdout);
+ WaitForSingleObject(c->work_sem, INFINITE);
+ // avoid trying to access jobnr if we should quit
+ if (!c->func && !c->func2)
+ break;
+ WaitForSingleObject(c->job_sem, INFINITE);
+ jobnr = (*c->jobnr)++;
+ ReleaseSemaphore(c->job_sem, 1, 0);
+//printf("thread_func %X after wait (func=%X)\n", (int)v, (int)c->func); fflush(stdout);
+ if(c->func)
+ ret= c->func(c->avctx, (uint8_t *)c->arg + jobnr*c->argsize);
+ else
+ ret= c->func2(c->avctx, c->arg, jobnr, c->threadnr);
+ if (c->ret)
+ c->ret[jobnr] = ret;
+//printf("thread_func %X signal complete\n", (int)v); fflush(stdout);
+ ReleaseSemaphore(c->done_sem, 1, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * Free what has been allocated by avcodec_thread_init().
+ * Must be called after decoding has finished, especially do not call while avcodec_thread_execute() is running.
+ */
+void avcodec_thread_free(AVCodecContext *s){
+ ThreadContext *c= s->thread_opaque;
+ int i;
+
+ for(i=0; i<s->thread_count; i++){
+
+ c[i].func= NULL;
+ c[i].func2= NULL;
+ }
+ ReleaseSemaphore(c[0].work_sem, s->thread_count, 0);
+ for(i=0; i<s->thread_count; i++){
+ WaitForSingleObject(c[i].thread, INFINITE);
+ if(c[i].thread) CloseHandle(c[i].thread);
+ }
+ if(c[0].work_sem) CloseHandle(c[0].work_sem);
+ if(c[0].job_sem) CloseHandle(c[0].job_sem);
+ if(c[0].done_sem) CloseHandle(c[0].done_sem);
+
+ av_freep(&s->thread_opaque);
+}
+
+int avcodec_thread_execute(AVCodecContext *s, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size){
+ ThreadContext *c= s->thread_opaque;
+ int i;
+ int jobnr = 0;
+
+ assert(s == c->avctx);
+
+ /* note, we can be certain that this is not called with the same AVCodecContext by different threads at the same time */
+
+ for(i=0; i<s->thread_count; i++){
+ c[i].arg= arg;
+ c[i].argsize= size;
+ c[i].func= func;
+ c[i].ret= ret;
+ c[i].jobnr = &jobnr;
+ }
+ ReleaseSemaphore(c[0].work_sem, count, 0);
+ for(i=0; i<count; i++)
+ WaitForSingleObject(c[0].done_sem, INFINITE);
+
+ return 0;
+}
+
+int avcodec_thread_execute2(AVCodecContext *s, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count){
+ ThreadContext *c= s->thread_opaque;
+ int i;
+ for(i=0; i<s->thread_count; i++)
+ c[i].func2 = func;
+ avcodec_thread_execute(s, NULL, arg, ret, count, 0);
+}
+
+int avcodec_thread_init(AVCodecContext *s, int thread_count){
+ int i;
+ ThreadContext *c;
+ uint32_t threadid;
+
+ s->thread_count= thread_count;
+
+ if (thread_count <= 1)
+ return 0;
+
+ assert(!s->thread_opaque);
+ c= av_mallocz(sizeof(ThreadContext)*thread_count);
+ s->thread_opaque= c;
+ if(!(c[0].work_sem = CreateSemaphore(NULL, 0, INT_MAX, NULL)))
+ goto fail;
+ if(!(c[0].job_sem = CreateSemaphore(NULL, 1, 1, NULL)))
+ goto fail;
+ if(!(c[0].done_sem = CreateSemaphore(NULL, 0, INT_MAX, NULL)))
+ goto fail;
+
+ for(i=0; i<thread_count; i++){
+//printf("init semaphors %d\n", i); fflush(stdout);
+ c[i].avctx= s;
+ c[i].work_sem = c[0].work_sem;
+ c[i].job_sem = c[0].job_sem;
+ c[i].done_sem = c[0].done_sem;
+ c[i].threadnr = i;
+
+//printf("create thread %d\n", i); fflush(stdout);
+ c[i].thread = (HANDLE)_beginthreadex(NULL, 0, thread_func, &c[i], 0, &threadid );
+ if( !c[i].thread ) goto fail;
+ }
+//printf("init done\n"); fflush(stdout);
+
+ s->execute= avcodec_thread_execute;
+ s->execute2= avcodec_thread_execute2;
+
+ return 0;
+fail:
+ avcodec_thread_free(s);
+ return -1;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/wmv2.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/wmv2.c
new file mode 100644
index 000000000..6cb7c8477
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/wmv2.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2002 The FFmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "msmpeg4data.h"
+#include "simple_idct.h"
+#include "wmv2.h"
+
+
+av_cold void ff_wmv2_common_init(Wmv2Context * w){
+ MpegEncContext * const s= &w->s;
+
+ ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[0], wmv2_scantableA);
+ ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[1], wmv2_scantableB);
+}
+
+static void wmv2_add_block(Wmv2Context *w, DCTELEM *block1, uint8_t *dst, int stride, int n){
+ MpegEncContext * const s= &w->s;
+
+ if (s->block_last_index[n] >= 0) {
+ switch(w->abt_type_table[n]){
+ case 0:
+ s->dsp.idct_add (dst, stride, block1);
+ break;
+ case 1:
+ ff_simple_idct84_add(dst , stride, block1);
+ ff_simple_idct84_add(dst + 4*stride, stride, w->abt_block2[n]);
+ s->dsp.clear_block(w->abt_block2[n]);
+ break;
+ case 2:
+ ff_simple_idct48_add(dst , stride, block1);
+ ff_simple_idct48_add(dst + 4 , stride, w->abt_block2[n]);
+ s->dsp.clear_block(w->abt_block2[n]);
+ break;
+ default:
+ av_log(s->avctx, AV_LOG_ERROR, "internal error in WMV2 abt\n");
+ }
+ }
+}
+
+void ff_wmv2_add_mb(MpegEncContext *s, DCTELEM block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr){
+ Wmv2Context * const w= (Wmv2Context*)s;
+
+ wmv2_add_block(w, block1[0], dest_y , s->linesize, 0);
+ wmv2_add_block(w, block1[1], dest_y + 8 , s->linesize, 1);
+ wmv2_add_block(w, block1[2], dest_y + 8*s->linesize, s->linesize, 2);
+ wmv2_add_block(w, block1[3], dest_y + 8 + 8*s->linesize, s->linesize, 3);
+
+ if(s->flags&CODEC_FLAG_GRAY) return;
+
+ wmv2_add_block(w, block1[4], dest_cb , s->uvlinesize, 4);
+ wmv2_add_block(w, block1[5], dest_cr , s->uvlinesize, 5);
+}
+
+void ff_mspel_motion(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
+ int motion_x, int motion_y, int h)
+{
+ Wmv2Context * const w= (Wmv2Context*)s;
+ uint8_t *ptr;
+ int dxy, offset, mx, my, src_x, src_y, v_edge_pos, linesize, uvlinesize;
+ int emu=0;
+
+ dxy = ((motion_y & 1) << 1) | (motion_x & 1);
+ dxy = 2*dxy + w->hshift;
+ src_x = s->mb_x * 16 + (motion_x >> 1);
+ src_y = s->mb_y * 16 + (motion_y >> 1);
+
+ /* WARNING: do no forget half pels */
+ v_edge_pos = s->v_edge_pos;
+ src_x = av_clip(src_x, -16, s->width);
+ src_y = av_clip(src_y, -16, s->height);
+
+ if(src_x<=-16 || src_x >= s->width)
+ dxy &= ~3;
+ if(src_y<=-16 || src_y >= s->height)
+ dxy &= ~4;
+
+ linesize = s->linesize;
+ uvlinesize = s->uvlinesize;
+ ptr = ref_picture[0] + (src_y * linesize) + src_x;
+
+ if(s->flags&CODEC_FLAG_EMU_EDGE){
+ if(src_x<1 || src_y<1 || src_x + 17 >= s->h_edge_pos
+ || src_y + h+1 >= v_edge_pos){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr - 1 - s->linesize, s->linesize, 19, 19,
+ src_x-1, src_y-1, s->h_edge_pos, s->v_edge_pos);
+ ptr= s->edge_emu_buffer + 1 + s->linesize;
+ emu=1;
+ }
+ }
+
+ s->dsp.put_mspel_pixels_tab[dxy](dest_y , ptr , linesize);
+ s->dsp.put_mspel_pixels_tab[dxy](dest_y+8 , ptr+8 , linesize);
+ s->dsp.put_mspel_pixels_tab[dxy](dest_y +8*linesize, ptr +8*linesize, linesize);
+ s->dsp.put_mspel_pixels_tab[dxy](dest_y+8+8*linesize, ptr+8+8*linesize, linesize);
+
+ if(s->flags&CODEC_FLAG_GRAY) return;
+
+ if (s->out_format == FMT_H263) {
+ dxy = 0;
+ if ((motion_x & 3) != 0)
+ dxy |= 1;
+ if ((motion_y & 3) != 0)
+ dxy |= 2;
+ mx = motion_x >> 2;
+ my = motion_y >> 2;
+ } else {
+ mx = motion_x / 2;
+ my = motion_y / 2;
+ dxy = ((my & 1) << 1) | (mx & 1);
+ mx >>= 1;
+ my >>= 1;
+ }
+
+ src_x = s->mb_x * 8 + mx;
+ src_y = s->mb_y * 8 + my;
+ src_x = av_clip(src_x, -8, s->width >> 1);
+ if (src_x == (s->width >> 1))
+ dxy &= ~1;
+ src_y = av_clip(src_y, -8, s->height >> 1);
+ if (src_y == (s->height >> 1))
+ dxy &= ~2;
+ offset = (src_y * uvlinesize) + src_x;
+ ptr = ref_picture[1] + offset;
+ if(emu){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
+ src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ptr= s->edge_emu_buffer;
+ }
+ pix_op[1][dxy](dest_cb, ptr, uvlinesize, h >> 1);
+
+ ptr = ref_picture[2] + offset;
+ if(emu){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
+ src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ptr= s->edge_emu_buffer;
+ }
+ pix_op[1][dxy](dest_cr, ptr, uvlinesize, h >> 1);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/wmv2.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/wmv2.h
new file mode 100644
index 000000000..8522f3a32
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/wmv2.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2002 The FFmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_WMV2_H
+#define AVCODEC_WMV2_H
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+#include "intrax8.h"
+
+#define SKIP_TYPE_NONE 0
+#define SKIP_TYPE_MPEG 1
+#define SKIP_TYPE_ROW 2
+#define SKIP_TYPE_COL 3
+
+
+typedef struct Wmv2Context{
+ MpegEncContext s;
+ IntraX8Context x8;
+ int j_type_bit;
+ int j_type;
+ int abt_flag;
+ int abt_type;
+ int abt_type_table[6];
+ int per_mb_abt;
+ int per_block_abt;
+ int mspel_bit;
+ int cbp_table_index;
+ int top_left_mv_flag;
+ int per_mb_rl_bit;
+ int skip_type;
+ int hshift;
+
+ ScanTable abt_scantable[2];
+ DECLARE_ALIGNED_16(DCTELEM, abt_block2)[6][64];
+}Wmv2Context;
+
+void ff_wmv2_common_init(Wmv2Context * w);
+
+#endif /* AVCODEC_WMV2_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/wmv2dec.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/wmv2dec.c
new file mode 100644
index 000000000..232099ecd
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/wmv2dec.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 2002 The FFmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+#include "h263.h"
+#include "mathops.h"
+#include "msmpeg4.h"
+#include "msmpeg4data.h"
+#include "intrax8.h"
+#include "wmv2.h"
+
+
+static void parse_mb_skip(Wmv2Context * w){
+ int mb_x, mb_y;
+ MpegEncContext * const s= &w->s;
+ uint32_t * const mb_type= s->current_picture_ptr->mb_type;
+
+ w->skip_type= get_bits(&s->gb, 2);
+ switch(w->skip_type){
+ case SKIP_TYPE_NONE:
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_16x16 | MB_TYPE_L0;
+ }
+ }
+ break;
+ case SKIP_TYPE_MPEG:
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
+ }
+ }
+ break;
+ case SKIP_TYPE_ROW:
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ if(get_bits1(&s->gb)){
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ }
+ }else{
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
+ }
+ }
+ }
+ break;
+ case SKIP_TYPE_COL:
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ if(get_bits1(&s->gb)){
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ }
+ }else{
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
+ }
+ }
+ }
+ break;
+ }
+}
+
+static int decode_ext_header(Wmv2Context *w){
+ MpegEncContext * const s= &w->s;
+ GetBitContext gb;
+ int fps;
+ int code;
+
+ if(s->avctx->extradata_size<4) return -1;
+
+ init_get_bits(&gb, s->avctx->extradata, s->avctx->extradata_size*8);
+
+ fps = get_bits(&gb, 5);
+ s->bit_rate = get_bits(&gb, 11)*1024;
+ w->mspel_bit = get_bits1(&gb);
+ s->loop_filter = get_bits1(&gb);
+ w->abt_flag = get_bits1(&gb);
+ w->j_type_bit = get_bits1(&gb);
+ w->top_left_mv_flag= get_bits1(&gb);
+ w->per_mb_rl_bit = get_bits1(&gb);
+ code = get_bits(&gb, 3);
+
+ if(code==0) return -1;
+
+ s->slice_height = s->mb_height / code;
+
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO){
+ av_log(s->avctx, AV_LOG_DEBUG, "fps:%d, br:%d, qpbit:%d, abt_flag:%d, j_type_bit:%d, tl_mv_flag:%d, mbrl_bit:%d, code:%d, loop_filter:%d, slices:%d\n",
+ fps, s->bit_rate, w->mspel_bit, w->abt_flag, w->j_type_bit, w->top_left_mv_flag, w->per_mb_rl_bit, code, s->loop_filter,
+ code);
+ }
+ return 0;
+}
+
+int ff_wmv2_decode_picture_header(MpegEncContext * s)
+{
+ Wmv2Context * const w= (Wmv2Context*)s;
+ int code;
+
+#if 0
+{
+int i;
+for(i=0; i<s->gb.size*8; i++)
+ printf("%d", get_bits1(&s->gb));
+// get_bits1(&s->gb);
+printf("END\n");
+return -1;
+}
+#endif
+ if(s->picture_number==0)
+ decode_ext_header(w);
+
+ s->pict_type = get_bits1(&s->gb) + 1;
+ if(s->pict_type == FF_I_TYPE){
+ code = get_bits(&s->gb, 7);
+ av_log(s->avctx, AV_LOG_DEBUG, "I7:%X/\n", code);
+ }
+ s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);
+ if(s->qscale <= 0)
+ return -1;
+
+ return 0;
+}
+
+int ff_wmv2_decode_secondary_picture_header(MpegEncContext * s)
+{
+ Wmv2Context * const w= (Wmv2Context*)s;
+
+ if (s->pict_type == FF_I_TYPE) {
+ if(w->j_type_bit) w->j_type= get_bits1(&s->gb);
+ else w->j_type= 0; //FIXME check
+
+ if(!w->j_type){
+ if(w->per_mb_rl_bit) s->per_mb_rl_table= get_bits1(&s->gb);
+ else s->per_mb_rl_table= 0;
+
+ if(!s->per_mb_rl_table){
+ s->rl_chroma_table_index = decode012(&s->gb);
+ s->rl_table_index = decode012(&s->gb);
+ }
+
+ s->dc_table_index = get_bits1(&s->gb);
+ }
+ s->inter_intra_pred= 0;
+ s->no_rounding = 1;
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO){
+ av_log(s->avctx, AV_LOG_DEBUG, "qscale:%d rlc:%d rl:%d dc:%d mbrl:%d j_type:%d \n",
+ s->qscale,
+ s->rl_chroma_table_index,
+ s->rl_table_index,
+ s->dc_table_index,
+ s->per_mb_rl_table,
+ w->j_type);
+ }
+ }else{
+ int cbp_index;
+ w->j_type=0;
+
+ parse_mb_skip(w);
+ cbp_index= decode012(&s->gb);
+ if(s->qscale <= 10){
+ int map[3]= {0,2,1};
+ w->cbp_table_index= map[cbp_index];
+ }else if(s->qscale <= 20){
+ int map[3]= {1,0,2};
+ w->cbp_table_index= map[cbp_index];
+ }else{
+ int map[3]= {2,1,0};
+ w->cbp_table_index= map[cbp_index];
+ }
+
+ if(w->mspel_bit) s->mspel= get_bits1(&s->gb);
+ else s->mspel= 0; //FIXME check
+
+ if(w->abt_flag){
+ w->per_mb_abt= get_bits1(&s->gb)^1;
+ if(!w->per_mb_abt){
+ w->abt_type= decode012(&s->gb);
+ }
+ }
+
+ if(w->per_mb_rl_bit) s->per_mb_rl_table= get_bits1(&s->gb);
+ else s->per_mb_rl_table= 0;
+
+ if(!s->per_mb_rl_table){
+ s->rl_table_index = decode012(&s->gb);
+ s->rl_chroma_table_index = s->rl_table_index;
+ }
+
+ s->dc_table_index = get_bits1(&s->gb);
+ s->mv_table_index = get_bits1(&s->gb);
+
+ s->inter_intra_pred= 0;//(s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE);
+ s->no_rounding ^= 1;
+
+ if(s->avctx->debug&FF_DEBUG_PICT_INFO){
+ av_log(s->avctx, AV_LOG_DEBUG, "rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d mspel:%d per_mb_abt:%d abt_type:%d cbp:%d ii:%d\n",
+ s->rl_table_index,
+ s->rl_chroma_table_index,
+ s->dc_table_index,
+ s->mv_table_index,
+ s->per_mb_rl_table,
+ s->qscale,
+ s->mspel,
+ w->per_mb_abt,
+ w->abt_type,
+ w->cbp_table_index,
+ s->inter_intra_pred);
+ }
+ }
+ s->esc3_level_length= 0;
+ s->esc3_run_length= 0;
+
+s->picture_number++; //FIXME ?
+
+
+ if(w->j_type){
+ ff_intrax8_decode_picture(&w->x8, 2*s->qscale, (s->qscale-1)|1 );
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int wmv2_decode_motion(Wmv2Context *w, int *mx_ptr, int *my_ptr){
+ MpegEncContext * const s= &w->s;
+ int ret;
+
+ ret= ff_msmpeg4_decode_motion(s, mx_ptr, my_ptr);
+
+ if(ret<0) return -1;
+
+ if((((*mx_ptr)|(*my_ptr)) & 1) && s->mspel)
+ w->hshift= get_bits1(&s->gb);
+ else
+ w->hshift= 0;
+
+//printf("%d %d ", *mx_ptr, *my_ptr);
+
+ return 0;
+}
+
+static int16_t *wmv2_pred_motion(Wmv2Context *w, int *px, int *py){
+ MpegEncContext * const s= &w->s;
+ int xy, wrap, diff, type;
+ int16_t *A, *B, *C, *mot_val;
+
+ wrap = s->b8_stride;
+ xy = s->block_index[0];
+
+ mot_val = s->current_picture.motion_val[0][xy];
+
+ A = s->current_picture.motion_val[0][xy - 1];
+ B = s->current_picture.motion_val[0][xy - wrap];
+ C = s->current_picture.motion_val[0][xy + 2 - wrap];
+
+ if(s->mb_x && !s->first_slice_line && !s->mspel && w->top_left_mv_flag)
+ diff= FFMAX(FFABS(A[0] - B[0]), FFABS(A[1] - B[1]));
+ else
+ diff=0;
+
+ if(diff >= 8)
+ type= get_bits1(&s->gb);
+ else
+ type= 2;
+
+ if(type == 0){
+ *px= A[0];
+ *py= A[1];
+ }else if(type == 1){
+ *px= B[0];
+ *py= B[1];
+ }else{
+ /* special case for first (slice) line */
+ if (s->first_slice_line) {
+ *px = A[0];
+ *py = A[1];
+ } else {
+ *px = mid_pred(A[0], B[0], C[0]);
+ *py = mid_pred(A[1], B[1], C[1]);
+ }
+ }
+
+ return mot_val;
+}
+
+static inline int wmv2_decode_inter_block(Wmv2Context *w, DCTELEM *block, int n, int cbp){
+ MpegEncContext * const s= &w->s;
+ static const int sub_cbp_table[3]= {2,3,1};
+ int sub_cbp;
+
+ if(!cbp){
+ s->block_last_index[n] = -1;
+
+ return 0;
+ }
+
+ if(w->per_block_abt)
+ w->abt_type= decode012(&s->gb);
+#if 0
+ if(w->per_block_abt)
+ printf("B%d", w->abt_type);
+#endif
+ w->abt_type_table[n]= w->abt_type;
+
+ if(w->abt_type){
+// const uint8_t *scantable= w->abt_scantable[w->abt_type-1].permutated;
+ const uint8_t *scantable= w->abt_scantable[w->abt_type-1].scantable;
+// const uint8_t *scantable= w->abt_type-1 ? w->abt_scantable[1].permutated : w->abt_scantable[0].scantable;
+
+ sub_cbp= sub_cbp_table[ decode012(&s->gb) ];
+// printf("S%d", sub_cbp);
+
+ if(sub_cbp&1){
+ if (ff_msmpeg4_decode_block(s, block, n, 1, scantable) < 0)
+ return -1;
+ }
+
+ if(sub_cbp&2){
+ if (ff_msmpeg4_decode_block(s, w->abt_block2[n], n, 1, scantable) < 0)
+ return -1;
+ }
+ s->block_last_index[n] = 63;
+
+ return 0;
+ }else{
+ return ff_msmpeg4_decode_block(s, block, n, 1, s->inter_scantable.permutated);
+ }
+}
+
+
+int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
+{
+ Wmv2Context * const w= (Wmv2Context*)s;
+ int cbp, code, i;
+ uint8_t *coded_val;
+
+ if(w->j_type) return 0;
+
+ if (s->pict_type == FF_P_TYPE) {
+ if(IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])){
+ /* skip mb */
+ s->mb_intra = 0;
+ for(i=0;i<6;i++)
+ s->block_last_index[i] = -1;
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ s->mv[0][0][0] = 0;
+ s->mv[0][0][1] = 0;
+ s->mb_skipped = 1;
+ w->hshift=0;
+ return 0;
+ }
+
+ code = get_vlc2(&s->gb, ff_mb_non_intra_vlc[w->cbp_table_index].table, MB_NON_INTRA_VLC_BITS, 3);
+ if (code < 0)
+ return -1;
+ s->mb_intra = (~code & 0x40) >> 6;
+
+ cbp = code & 0x3f;
+ } else {
+ s->mb_intra = 1;
+ code = get_vlc2(&s->gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
+ if (code < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "II-cbp illegal at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+ /* predict coded block pattern */
+ cbp = 0;
+ for(i=0;i<6;i++) {
+ int val = ((code >> (5 - i)) & 1);
+ if (i < 4) {
+ int pred = ff_msmpeg4_coded_block_pred(s, i, &coded_val);
+ val = val ^ pred;
+ *coded_val = val;
+ }
+ cbp |= val << (5 - i);
+ }
+ }
+
+ if (!s->mb_intra) {
+ int mx, my;
+//printf("P at %d %d\n", s->mb_x, s->mb_y);
+ wmv2_pred_motion(w, &mx, &my);
+
+ if(cbp){
+ s->dsp.clear_blocks(s->block[0]);
+ if(s->per_mb_rl_table){
+ s->rl_table_index = decode012(&s->gb);
+ s->rl_chroma_table_index = s->rl_table_index;
+ }
+
+ if(w->abt_flag && w->per_mb_abt){
+ w->per_block_abt= get_bits1(&s->gb);
+ if(!w->per_block_abt)
+ w->abt_type= decode012(&s->gb);
+ }else
+ w->per_block_abt=0;
+ }
+
+ if (wmv2_decode_motion(w, &mx, &my) < 0)
+ return -1;
+
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ s->mv[0][0][0] = mx;
+ s->mv[0][0][1] = my;
+
+ for (i = 0; i < 6; i++) {
+ if (wmv2_decode_inter_block(w, block[i], i, (cbp >> (5 - i)) & 1) < 0)
+ {
+ av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding inter block: %d x %d (%d)\n", s->mb_x, s->mb_y, i);
+ return -1;
+ }
+ }
+ } else {
+//if(s->pict_type==FF_P_TYPE)
+// printf("%d%d ", s->inter_intra_pred, cbp);
+//printf("I at %d %d %d %06X\n", s->mb_x, s->mb_y, ((cbp&3)? 1 : 0) +((cbp&0x3C)? 2 : 0), show_bits(&s->gb, 24));
+ s->ac_pred = get_bits1(&s->gb);
+ if(s->inter_intra_pred){
+ s->h263_aic_dir= get_vlc2(&s->gb, ff_inter_intra_vlc.table, INTER_INTRA_VLC_BITS, 1);
+// printf("%d%d %d %d/", s->ac_pred, s->h263_aic_dir, s->mb_x, s->mb_y);
+ }
+ if(s->per_mb_rl_table && cbp){
+ s->rl_table_index = decode012(&s->gb);
+ s->rl_chroma_table_index = s->rl_table_index;
+ }
+
+ s->dsp.clear_blocks(s->block[0]);
+ for (i = 0; i < 6; i++) {
+ if (ff_msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0)
+ {
+ av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding intra block: %d x %d (%d)\n", s->mb_x, s->mb_y, i);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static av_cold int wmv2_decode_init(AVCodecContext *avctx){
+ Wmv2Context * const w= avctx->priv_data;
+
+ if(avctx->idct_algo==FF_IDCT_AUTO){
+ avctx->idct_algo=FF_IDCT_WMV2;
+ }
+
+ if(ff_msmpeg4_decode_init(avctx) < 0)
+ return -1;
+
+ ff_wmv2_common_init(w);
+
+ ff_intrax8_common_init(&w->x8,&w->s);
+
+ return 0;
+}
+
+static av_cold int wmv2_decode_end(AVCodecContext *avctx)
+{
+ Wmv2Context *w = avctx->priv_data;
+
+ ff_intrax8_common_end(&w->x8);
+ return ff_h263_decode_end(avctx);
+}
+
+AVCodec wmv2_decoder = {
+ "wmv2",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_WMV2,
+ sizeof(Wmv2Context),
+ wmv2_decode_init,
+ NULL,
+ wmv2_decode_end,
+ ff_h263_decode_frame,
+ /*.capabilities = */CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
+ /*.next = */NULL,
+ /*.flush = */NULL,
+ /*.supported_framerates = */NULL,
+ /*.pix_fmts = */NULL,
+ /*.long_name = */NULL_IF_CONFIG_SMALL("Windows Media Video 8"),
+};
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/cpuid.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/cpuid.c
new file mode 100644
index 000000000..f012fb18f
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/cpuid.c
@@ -0,0 +1,125 @@
+/*
+ * CPU detection code, extracted from mmx.h
+ * (c)1997-99 by H. Dietz and R. Fisher
+ * Converted to C and improved by Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+#include "libavutil/x86_cpu.h"
+#include "libavcodec/dsputil.h"
+
+#undef printf
+
+/* ebx saving is necessary for PIC. gcc seems unable to see it alone */
+#define cpuid(index,eax,ebx,ecx,edx)\
+ __asm__ volatile\
+ ("mov %%"REG_b", %%"REG_S"\n\t"\
+ "cpuid\n\t"\
+ "xchg %%"REG_b", %%"REG_S\
+ : "=a" (eax), "=S" (ebx),\
+ "=c" (ecx), "=d" (edx)\
+ : "0" (index));
+
+/* Function to test if multimedia instructions are supported... */
+int mm_support(void)
+{
+ int rval = 0;
+ int eax, ebx, ecx, edx;
+ int max_std_level, max_ext_level, std_caps=0, ext_caps=0;
+
+#if ARCH_X86_32
+ x86_reg a, c;
+ __asm__ volatile (
+ /* See if CPUID instruction is supported ... */
+ /* ... Get copies of EFLAGS into eax and ecx */
+ "pushfl\n\t"
+ "pop %0\n\t"
+ "mov %0, %1\n\t"
+
+ /* ... Toggle the ID bit in one copy and store */
+ /* to the EFLAGS reg */
+ "xor $0x200000, %0\n\t"
+ "push %0\n\t"
+ "popfl\n\t"
+
+ /* ... Get the (hopefully modified) EFLAGS */
+ "pushfl\n\t"
+ "pop %0\n\t"
+ : "=a" (a), "=c" (c)
+ :
+ : "cc"
+ );
+
+ if (a == c)
+ return 0; /* CPUID not supported */
+#endif
+
+ cpuid(0, max_std_level, ebx, ecx, edx);
+
+ if(max_std_level >= 1){
+ cpuid(1, eax, ebx, ecx, std_caps);
+ if (std_caps & (1<<23))
+ rval |= FF_MM_MMX;
+ if (std_caps & (1<<25))
+ rval |= FF_MM_MMX2
+#if HAVE_SSE
+ | FF_MM_SSE;
+ if (std_caps & (1<<26))
+ rval |= FF_MM_SSE2;
+ if (ecx & 1)
+ rval |= FF_MM_SSE3;
+ if (ecx & 0x00000200 )
+ rval |= FF_MM_SSSE3;
+ if (ecx & 0x00080000 )
+ rval |= FF_MM_SSE4;
+ if (ecx & 0x00100000 )
+ rval |= FF_MM_SSE42;
+#endif
+ ;
+ }
+
+ cpuid(0x80000000, max_ext_level, ebx, ecx, edx);
+
+ if(max_ext_level >= 0x80000001){
+ cpuid(0x80000001, eax, ebx, ecx, ext_caps);
+ if (ext_caps & (1<<31))
+ rval |= FF_MM_3DNOW;
+ if (ext_caps & (1<<30))
+ rval |= FF_MM_3DNOWEXT;
+ if (ext_caps & (1<<23))
+ rval |= FF_MM_MMX;
+ if (ext_caps & (1<<22))
+ rval |= FF_MM_MMX2;
+ }
+
+#if 0
+ av_log(NULL, AV_LOG_DEBUG, "%s%s%s%s%s%s%s%s%s%s\n",
+ (rval&FF_MM_MMX) ? "MMX ":"",
+ (rval&FF_MM_MMX2) ? "MMX2 ":"",
+ (rval&FF_MM_SSE) ? "SSE ":"",
+ (rval&FF_MM_SSE2) ? "SSE2 ":"",
+ (rval&FF_MM_SSE3) ? "SSE3 ":"",
+ (rval&FF_MM_SSSE3) ? "SSSE3 ":"",
+ (rval&FF_MM_SSE4) ? "SSE4.1 ":"",
+ (rval&FF_MM_SSE42) ? "SSE4.2 ":"",
+ (rval&FF_MM_3DNOW) ? "3DNow ":"",
+ (rval&FF_MM_3DNOWEXT) ? "3DNowExt ":"");
+#endif
+ return rval;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_h264_template_mmx.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_h264_template_mmx.c
new file mode 100644
index 000000000..9aa8a52aa
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_h264_template_mmx.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>,
+ * Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * MMX optimized version of (put|avg)_h264_chroma_mc8.
+ * H264_CHROMA_MC8_TMPL must be defined to the desired function name
+ * H264_CHROMA_OP must be defined to empty for put and pavgb/pavgusb for avg
+ * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function
+ */
+static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, const uint64_t *rnd_reg)
+{
+ DECLARE_ALIGNED_8(uint64_t, AA);
+ DECLARE_ALIGNED_8(uint64_t, DD);
+ int i;
+
+ if(y==0 && x==0) {
+ /* no filter needed */
+ H264_CHROMA_MC8_MV0(dst, src, stride, h);
+ return;
+ }
+
+ assert(x<8 && y<8 && x>=0 && y>=0);
+
+ if(y==0 || x==0)
+ {
+ /* 1 dimensional filter only */
+ const int dxy = x ? 1 : stride;
+
+ __asm__ volatile(
+ "movd %0, %%mm5\n\t"
+ "movq %1, %%mm4\n\t"
+ "movq %2, %%mm6\n\t" /* mm6 = rnd >> 3 */
+ "punpcklwd %%mm5, %%mm5\n\t"
+ "punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */
+ "pxor %%mm7, %%mm7\n\t"
+ "psubw %%mm5, %%mm4\n\t" /* mm4 = A = 8-x */
+ :: "rm"(x+y), "m"(ff_pw_8), "m"(*(rnd_reg+1)));
+
+ for(i=0; i<h; i++) {
+ __asm__ volatile(
+ /* mm0 = src[0..7], mm1 = src[1..8] */
+ "movq %0, %%mm0\n\t"
+ "movq %1, %%mm2\n\t"
+ :: "m"(src[0]), "m"(src[dxy]));
+
+ __asm__ volatile(
+ /* [mm0,mm1] = A * src[0..7] */
+ /* [mm2,mm3] = B * src[1..8] */
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpckhbw %%mm7, %%mm1\n\t"
+ "punpcklbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "pmullw %%mm4, %%mm0\n\t"
+ "pmullw %%mm4, %%mm1\n\t"
+ "pmullw %%mm5, %%mm2\n\t"
+ "pmullw %%mm5, %%mm3\n\t"
+
+ /* dst[0..7] = (A * src[0..7] + B * src[1..8] + (rnd >> 3)) >> 3 */
+ "paddw %%mm6, %%mm0\n\t"
+ "paddw %%mm6, %%mm1\n\t"
+ "paddw %%mm2, %%mm0\n\t"
+ "paddw %%mm3, %%mm1\n\t"
+ "psrlw $3, %%mm0\n\t"
+ "psrlw $3, %%mm1\n\t"
+ "packuswb %%mm1, %%mm0\n\t"
+ H264_CHROMA_OP(%0, %%mm0)
+ "movq %%mm0, %0\n\t"
+ : "=m" (dst[0]));
+
+ src += stride;
+ dst += stride;
+ }
+ return;
+ }
+
+ /* general case, bilinear */
+ __asm__ volatile("movd %2, %%mm4\n\t"
+ "movd %3, %%mm6\n\t"
+ "punpcklwd %%mm4, %%mm4\n\t"
+ "punpcklwd %%mm6, %%mm6\n\t"
+ "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */
+ "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */
+ "movq %%mm4, %%mm5\n\t"
+ "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */
+ "psllw $3, %%mm5\n\t"
+ "psllw $3, %%mm6\n\t"
+ "movq %%mm5, %%mm7\n\t"
+ "paddw %%mm6, %%mm7\n\t"
+ "movq %%mm4, %1\n\t" /* DD = x * y */
+ "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */
+ "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */
+ "paddw %4, %%mm4\n\t"
+ "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */
+ "pxor %%mm7, %%mm7\n\t"
+ "movq %%mm4, %0\n\t"
+ : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
+
+ __asm__ volatile(
+ /* mm0 = src[0..7], mm1 = src[1..8] */
+ "movq %0, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ : : "m" (src[0]), "m" (src[1]));
+
+ for(i=0; i<h; i++) {
+ src += stride;
+
+ __asm__ volatile(
+ /* mm2 = A * src[0..3] + B * src[1..4] */
+ /* mm3 = A * src[4..7] + B * src[5..8] */
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "punpckhbw %%mm7, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm1\n\t"
+ "punpcklbw %%mm7, %%mm2\n\t"
+ "punpckhbw %%mm7, %%mm3\n\t"
+ "pmullw %0, %%mm0\n\t"
+ "pmullw %0, %%mm2\n\t"
+ "pmullw %%mm5, %%mm1\n\t"
+ "pmullw %%mm5, %%mm3\n\t"
+ "paddw %%mm1, %%mm2\n\t"
+ "paddw %%mm0, %%mm3\n\t"
+ : : "m" (AA));
+
+ __asm__ volatile(
+ /* [mm2,mm3] += C * src[0..7] */
+ "movq %0, %%mm0\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpckhbw %%mm7, %%mm1\n\t"
+ "pmullw %%mm6, %%mm0\n\t"
+ "pmullw %%mm6, %%mm1\n\t"
+ "paddw %%mm0, %%mm2\n\t"
+ "paddw %%mm1, %%mm3\n\t"
+ : : "m" (src[0]));
+
+ __asm__ volatile(
+ /* [mm2,mm3] += D * src[1..8] */
+ "movq %1, %%mm1\n\t"
+ "movq %%mm1, %%mm0\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "punpckhbw %%mm7, %%mm4\n\t"
+ "pmullw %2, %%mm0\n\t"
+ "pmullw %2, %%mm4\n\t"
+ "paddw %%mm0, %%mm2\n\t"
+ "paddw %%mm4, %%mm3\n\t"
+ "movq %0, %%mm0\n\t"
+ : : "m" (src[0]), "m" (src[1]), "m" (DD));
+
+ __asm__ volatile(
+ /* dst[0..7] = ([mm2,mm3] + rnd) >> 6 */
+ "paddw %1, %%mm2\n\t"
+ "paddw %1, %%mm3\n\t"
+ "psrlw $6, %%mm2\n\t"
+ "psrlw $6, %%mm3\n\t"
+ "packuswb %%mm3, %%mm2\n\t"
+ H264_CHROMA_OP(%0, %%mm2)
+ "movq %%mm2, %0\n\t"
+ : "=m" (dst[0]) : "m" (*rnd_reg));
+ dst+= stride;
+ }
+}
+
+static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, const uint64_t *rnd_reg)
+{
+ __asm__ volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "movd %5, %%mm2 \n\t"
+ "movd %6, %%mm3 \n\t"
+ "movq "MANGLE(ff_pw_8)", %%mm4\n\t"
+ "movq "MANGLE(ff_pw_8)", %%mm5\n\t"
+ "punpcklwd %%mm2, %%mm2 \n\t"
+ "punpcklwd %%mm3, %%mm3 \n\t"
+ "punpcklwd %%mm2, %%mm2 \n\t"
+ "punpcklwd %%mm3, %%mm3 \n\t"
+ "psubw %%mm2, %%mm4 \n\t"
+ "psubw %%mm3, %%mm5 \n\t"
+
+ "movd (%1), %%mm0 \n\t"
+ "movd 1(%1), %%mm6 \n\t"
+ "add %3, %1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm6 \n\t"
+ "pmullw %%mm4, %%mm0 \n\t"
+ "pmullw %%mm2, %%mm6 \n\t"
+ "paddw %%mm0, %%mm6 \n\t"
+
+ "1: \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "movd 1(%1), %%mm1 \n\t"
+ "add %3, %1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "pmullw %%mm4, %%mm0 \n\t"
+ "pmullw %%mm2, %%mm1 \n\t"
+ "paddw %%mm0, %%mm1 \n\t"
+ "movq %%mm1, %%mm0 \n\t"
+ "pmullw %%mm5, %%mm6 \n\t"
+ "pmullw %%mm3, %%mm1 \n\t"
+ "paddw %4, %%mm6 \n\t"
+ "paddw %%mm6, %%mm1 \n\t"
+ "psrlw $6, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ H264_CHROMA_OP4((%0), %%mm1, %%mm6)
+ "movd %%mm1, (%0) \n\t"
+ "add %3, %0 \n\t"
+ "movd (%1), %%mm6 \n\t"
+ "movd 1(%1), %%mm1 \n\t"
+ "add %3, %1 \n\t"
+ "punpcklbw %%mm7, %%mm6 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "pmullw %%mm4, %%mm6 \n\t"
+ "pmullw %%mm2, %%mm1 \n\t"
+ "paddw %%mm6, %%mm1 \n\t"
+ "movq %%mm1, %%mm6 \n\t"
+ "pmullw %%mm5, %%mm0 \n\t"
+ "pmullw %%mm3, %%mm1 \n\t"
+ "paddw %4, %%mm0 \n\t"
+ "paddw %%mm0, %%mm1 \n\t"
+ "psrlw $6, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ H264_CHROMA_OP4((%0), %%mm1, %%mm0)
+ "movd %%mm1, (%0) \n\t"
+ "add %3, %0 \n\t"
+ "sub $2, %2 \n\t"
+ "jnz 1b \n\t"
+ : "+r"(dst), "+r"(src), "+r"(h)
+ : "r"((x86_reg)stride), "m"(*rnd_reg), "m"(x), "m"(y)
+ );
+}
+
+#ifdef H264_CHROMA_MC2_TMPL
+static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ int tmp = ((1<<16)-1)*x + 8;
+ int CD= tmp*y;
+ int AB= (tmp<<3) - CD;
+ __asm__ volatile(
+ /* mm5 = {A,B,A,B} */
+ /* mm6 = {C,D,C,D} */
+ "movd %0, %%mm5\n\t"
+ "movd %1, %%mm6\n\t"
+ "punpckldq %%mm5, %%mm5\n\t"
+ "punpckldq %%mm6, %%mm6\n\t"
+ "pxor %%mm7, %%mm7\n\t"
+ /* mm0 = src[0,1,1,2] */
+ "movd %2, %%mm2\n\t"
+ "punpcklbw %%mm7, %%mm2\n\t"
+ "pshufw $0x94, %%mm2, %%mm2\n\t"
+ :: "r"(AB), "r"(CD), "m"(src[0]));
+
+
+ __asm__ volatile(
+ "1:\n\t"
+ "add %4, %1\n\t"
+ /* mm1 = A * src[0,1] + B * src[1,2] */
+ "movq %%mm2, %%mm1\n\t"
+ "pmaddwd %%mm5, %%mm1\n\t"
+ /* mm0 = src[0,1,1,2] */
+ "movd (%1), %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "pshufw $0x94, %%mm0, %%mm0\n\t"
+ /* mm1 += C * src[0,1] + D * src[1,2] */
+ "movq %%mm0, %%mm2\n\t"
+ "pmaddwd %%mm6, %%mm0\n\t"
+ "paddw %3, %%mm1\n\t"
+ "paddw %%mm0, %%mm1\n\t"
+ /* dst[0,1] = pack((mm1 + 32) >> 6) */
+ "psrlw $6, %%mm1\n\t"
+ "packssdw %%mm7, %%mm1\n\t"
+ "packuswb %%mm7, %%mm1\n\t"
+ H264_CHROMA_OP4((%0), %%mm1, %%mm3)
+ "movd %%mm1, %%esi\n\t"
+ "movw %%si, (%0)\n\t"
+ "add %4, %0\n\t"
+ "sub $1, %2\n\t"
+ "jnz 1b\n\t"
+ : "+r" (dst), "+r"(src), "+r"(h)
+ : "m" (ff_pw_32), "r"((x86_reg)stride)
+ : "%esi");
+
+}
+#endif
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_h264_template_ssse3.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_h264_template_ssse3.c
new file mode 100644
index 000000000..e29e05e7c
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_h264_template_ssse3.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2008 Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * SSSE3 optimized version of (put|avg)_h264_chroma_mc8.
+ * H264_CHROMA_MC8_TMPL must be defined to the desired function name
+ * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function
+ * AVG_OP must be defined to empty for put and the identify for avg
+ */
+static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, int rnd)
+{
+ if(y==0 && x==0) {
+ /* no filter needed */
+ H264_CHROMA_MC8_MV0(dst, src, stride, h);
+ return;
+ }
+
+ assert(x<8 && y<8 && x>=0 && y>=0);
+
+ if(y==0 || x==0)
+ {
+ /* 1 dimensional filter only */
+ __asm__ volatile(
+ "movd %0, %%xmm7 \n\t"
+ "movq %1, %%xmm6 \n\t"
+ "pshuflw $0, %%xmm7, %%xmm7 \n\t"
+ "movlhps %%xmm6, %%xmm6 \n\t"
+ "movlhps %%xmm7, %%xmm7 \n\t"
+ :: "r"(255*(x+y)+8), "m"(*(rnd?&ff_pw_4:&ff_pw_3))
+ );
+
+ if(x) {
+ __asm__ volatile(
+ "1: \n\t"
+ "movq (%1), %%xmm0 \n\t"
+ "movq 1(%1), %%xmm1 \n\t"
+ "movq (%1,%3), %%xmm2 \n\t"
+ "movq 1(%1,%3), %%xmm3 \n\t"
+ "punpcklbw %%xmm1, %%xmm0 \n\t"
+ "punpcklbw %%xmm3, %%xmm2 \n\t"
+ "pmaddubsw %%xmm7, %%xmm0 \n\t"
+ "pmaddubsw %%xmm7, %%xmm2 \n\t"
+ AVG_OP("movq (%0), %%xmm4 \n\t")
+ AVG_OP("movhps (%0,%3), %%xmm4 \n\t")
+ "paddw %%xmm6, %%xmm0 \n\t"
+ "paddw %%xmm6, %%xmm2 \n\t"
+ "psrlw $3, %%xmm0 \n\t"
+ "psrlw $3, %%xmm2 \n\t"
+ "packuswb %%xmm2, %%xmm0 \n\t"
+ AVG_OP("pavgb %%xmm4, %%xmm0 \n\t")
+ "movq %%xmm0, (%0) \n\t"
+ "movhps %%xmm0, (%0,%3) \n\t"
+ "sub $2, %2 \n\t"
+ "lea (%1,%3,2), %1 \n\t"
+ "lea (%0,%3,2), %0 \n\t"
+ "jg 1b \n\t"
+ :"+r"(dst), "+r"(src), "+r"(h)
+ :"r"((x86_reg)stride)
+ );
+ } else {
+ __asm__ volatile(
+ "1: \n\t"
+ "movq (%1), %%xmm0 \n\t"
+ "movq (%1,%3), %%xmm1 \n\t"
+ "movdqa %%xmm1, %%xmm2 \n\t"
+ "movq (%1,%3,2), %%xmm3 \n\t"
+ "punpcklbw %%xmm1, %%xmm0 \n\t"
+ "punpcklbw %%xmm3, %%xmm2 \n\t"
+ "pmaddubsw %%xmm7, %%xmm0 \n\t"
+ "pmaddubsw %%xmm7, %%xmm2 \n\t"
+ AVG_OP("movq (%0), %%xmm4 \n\t")
+ AVG_OP("movhps (%0,%3), %%xmm4 \n\t")
+ "paddw %%xmm6, %%xmm0 \n\t"
+ "paddw %%xmm6, %%xmm2 \n\t"
+ "psrlw $3, %%xmm0 \n\t"
+ "psrlw $3, %%xmm2 \n\t"
+ "packuswb %%xmm2, %%xmm0 \n\t"
+ AVG_OP("pavgb %%xmm4, %%xmm0 \n\t")
+ "movq %%xmm0, (%0) \n\t"
+ "movhps %%xmm0, (%0,%3) \n\t"
+ "sub $2, %2 \n\t"
+ "lea (%1,%3,2), %1 \n\t"
+ "lea (%0,%3,2), %0 \n\t"
+ "jg 1b \n\t"
+ :"+r"(dst), "+r"(src), "+r"(h)
+ :"r"((x86_reg)stride)
+ );
+ }
+ return;
+ }
+
+ /* general case, bilinear */
+ __asm__ volatile(
+ "movd %0, %%xmm7 \n\t"
+ "movd %1, %%xmm6 \n\t"
+ "movdqa %2, %%xmm5 \n\t"
+ "pshuflw $0, %%xmm7, %%xmm7 \n\t"
+ "pshuflw $0, %%xmm6, %%xmm6 \n\t"
+ "movlhps %%xmm7, %%xmm7 \n\t"
+ "movlhps %%xmm6, %%xmm6 \n\t"
+ :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(*(rnd?&ff_pw_32:&ff_pw_28))
+ );
+
+ __asm__ volatile(
+ "movq (%1), %%xmm0 \n\t"
+ "movq 1(%1), %%xmm1 \n\t"
+ "punpcklbw %%xmm1, %%xmm0 \n\t"
+ "add %3, %1 \n\t"
+ "1: \n\t"
+ "movq (%1), %%xmm1 \n\t"
+ "movq 1(%1), %%xmm2 \n\t"
+ "movq (%1,%3), %%xmm3 \n\t"
+ "movq 1(%1,%3), %%xmm4 \n\t"
+ "lea (%1,%3,2), %1 \n\t"
+ "punpcklbw %%xmm2, %%xmm1 \n\t"
+ "punpcklbw %%xmm4, %%xmm3 \n\t"
+ "movdqa %%xmm1, %%xmm2 \n\t"
+ "movdqa %%xmm3, %%xmm4 \n\t"
+ "pmaddubsw %%xmm7, %%xmm0 \n\t"
+ "pmaddubsw %%xmm6, %%xmm1 \n\t"
+ "pmaddubsw %%xmm7, %%xmm2 \n\t"
+ "pmaddubsw %%xmm6, %%xmm3 \n\t"
+ "paddw %%xmm5, %%xmm0 \n\t"
+ "paddw %%xmm5, %%xmm2 \n\t"
+ "paddw %%xmm0, %%xmm1 \n\t"
+ "paddw %%xmm2, %%xmm3 \n\t"
+ "movdqa %%xmm4, %%xmm0 \n\t"
+ "psrlw $6, %%xmm1 \n\t"
+ "psrlw $6, %%xmm3 \n\t"
+ AVG_OP("movq (%0), %%xmm2 \n\t")
+ AVG_OP("movhps (%0,%3), %%xmm2 \n\t")
+ "packuswb %%xmm3, %%xmm1 \n\t"
+ AVG_OP("pavgb %%xmm2, %%xmm1 \n\t")
+ "movq %%xmm1, (%0)\n\t"
+ "movhps %%xmm1, (%0,%3)\n\t"
+ "sub $2, %2 \n\t"
+ "lea (%0,%3,2), %0 \n\t"
+ "jg 1b \n\t"
+ :"+r"(dst), "+r"(src), "+r"(h)
+ :"r"((x86_reg)stride)
+ );
+}
+
+static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ __asm__ volatile(
+ "movd %0, %%mm7 \n\t"
+ "movd %1, %%mm6 \n\t"
+ "movq %2, %%mm5 \n\t"
+ "pshufw $0, %%mm7, %%mm7 \n\t"
+ "pshufw $0, %%mm6, %%mm6 \n\t"
+ :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(ff_pw_32)
+ );
+
+ __asm__ volatile(
+ "movd (%1), %%mm0 \n\t"
+ "punpcklbw 1(%1), %%mm0 \n\t"
+ "add %3, %1 \n\t"
+ "1: \n\t"
+ "movd (%1), %%mm1 \n\t"
+ "movd (%1,%3), %%mm3 \n\t"
+ "punpcklbw 1(%1), %%mm1 \n\t"
+ "punpcklbw 1(%1,%3), %%mm3 \n\t"
+ "lea (%1,%3,2), %1 \n\t"
+ "movq %%mm1, %%mm2 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pmaddubsw %%mm7, %%mm0 \n\t"
+ "pmaddubsw %%mm6, %%mm1 \n\t"
+ "pmaddubsw %%mm7, %%mm2 \n\t"
+ "pmaddubsw %%mm6, %%mm3 \n\t"
+ "paddw %%mm5, %%mm0 \n\t"
+ "paddw %%mm5, %%mm2 \n\t"
+ "paddw %%mm0, %%mm1 \n\t"
+ "paddw %%mm2, %%mm3 \n\t"
+ "movq %%mm4, %%mm0 \n\t"
+ "psrlw $6, %%mm1 \n\t"
+ "psrlw $6, %%mm3 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ "packuswb %%mm3, %%mm3 \n\t"
+ AVG_OP("pavgb (%0), %%mm1 \n\t")
+ AVG_OP("pavgb (%0,%3), %%mm3 \n\t")
+ "movd %%mm1, (%0)\n\t"
+ "movd %%mm3, (%0,%3)\n\t"
+ "sub $2, %2 \n\t"
+ "lea (%0,%3,2), %0 \n\t"
+ "jg 1b \n\t"
+ :"+r"(dst), "+r"(src), "+r"(h)
+ :"r"((x86_reg)stride)
+ );
+}
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx.c
new file mode 100644
index 000000000..95cd1fe99
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx.c
@@ -0,0 +1,2938 @@
+/*
+ * MMX optimized DSP utils
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
+ */
+
+#include "libavutil/x86_cpu.h"
+#include "libavcodec/dsputil.h"
+#include "libavcodec/mpegvideo.h"
+#include "libavcodec/simple_idct.h"
+#include "dsputil_mmx.h"
+#include "vp3dsp_mmx.h"
+#include "vp3dsp_sse2.h"
+#include "vp6dsp_mmx.h"
+#include "vp6dsp_sse2.h"
+#include "idct_xvid.h"
+
+//#undef NDEBUG
+//#include <assert.h>
+
+int mm_flags; /* multimedia extension flags */
+
+/* pixel operations */
+DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
+
+DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000)[2] =
+{0x8000000080000000ULL, 0x8000000080000000ULL};
+
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL;
+DECLARE_ALIGNED_16(const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
+DECLARE_ALIGNED_16(const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
+DECLARE_ALIGNED_16(const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
+DECLARE_ALIGNED_16(const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
+DECLARE_ALIGNED_16(const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
+DECLARE_ALIGNED_16(const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
+
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
+
+DECLARE_ALIGNED_16(const double, ff_pd_1)[2] = { 1.0, 1.0 };
+DECLARE_ALIGNED_16(const double, ff_pd_2)[2] = { 2.0, 2.0 };
+
+#define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
+#define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
+
+#define MOVQ_BFE(regd) \
+ __asm__ volatile ( \
+ "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
+ "paddb %%" #regd ", %%" #regd " \n\t" ::)
+
+#ifndef PIC
+#define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
+#define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
+#else
+// for shared library it's better to use this way for accessing constants
+// pcmpeqd -> -1
+#define MOVQ_BONE(regd) \
+ __asm__ volatile ( \
+ "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
+ "psrlw $15, %%" #regd " \n\t" \
+ "packuswb %%" #regd ", %%" #regd " \n\t" ::)
+
+#define MOVQ_WTWO(regd) \
+ __asm__ volatile ( \
+ "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
+ "psrlw $15, %%" #regd " \n\t" \
+ "psllw $1, %%" #regd " \n\t"::)
+
+#endif
+
+// using regr as temporary and for the output result
+// first argument is unmodifed and second is trashed
+// regfe is supposed to contain 0xfefefefefefefefe
+#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
+ "movq " #rega ", " #regr " \n\t"\
+ "pand " #regb ", " #regr " \n\t"\
+ "pxor " #rega ", " #regb " \n\t"\
+ "pand " #regfe "," #regb " \n\t"\
+ "psrlq $1, " #regb " \n\t"\
+ "paddb " #regb ", " #regr " \n\t"
+
+#define PAVGB_MMX(rega, regb, regr, regfe) \
+ "movq " #rega ", " #regr " \n\t"\
+ "por " #regb ", " #regr " \n\t"\
+ "pxor " #rega ", " #regb " \n\t"\
+ "pand " #regfe "," #regb " \n\t"\
+ "psrlq $1, " #regb " \n\t"\
+ "psubb " #regb ", " #regr " \n\t"
+
+// mm6 is supposed to contain 0xfefefefefefefefe
+#define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
+ "movq " #rega ", " #regr " \n\t"\
+ "movq " #regc ", " #regp " \n\t"\
+ "pand " #regb ", " #regr " \n\t"\
+ "pand " #regd ", " #regp " \n\t"\
+ "pxor " #rega ", " #regb " \n\t"\
+ "pxor " #regc ", " #regd " \n\t"\
+ "pand %%mm6, " #regb " \n\t"\
+ "pand %%mm6, " #regd " \n\t"\
+ "psrlq $1, " #regb " \n\t"\
+ "psrlq $1, " #regd " \n\t"\
+ "paddb " #regb ", " #regr " \n\t"\
+ "paddb " #regd ", " #regp " \n\t"
+
+#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
+ "movq " #rega ", " #regr " \n\t"\
+ "movq " #regc ", " #regp " \n\t"\
+ "por " #regb ", " #regr " \n\t"\
+ "por " #regd ", " #regp " \n\t"\
+ "pxor " #rega ", " #regb " \n\t"\
+ "pxor " #regc ", " #regd " \n\t"\
+ "pand %%mm6, " #regb " \n\t"\
+ "pand %%mm6, " #regd " \n\t"\
+ "psrlq $1, " #regd " \n\t"\
+ "psrlq $1, " #regb " \n\t"\
+ "psubb " #regb ", " #regr " \n\t"\
+ "psubb " #regd ", " #regp " \n\t"
+
+/***********************************/
+/* MMX no rounding */
+#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
+#define SET_RND MOVQ_WONE
+#define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
+#define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
+#define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
+
+#include "dsputil_mmx_rnd_template.c"
+
+#undef DEF
+#undef SET_RND
+#undef PAVGBP
+#undef PAVGB
+/***********************************/
+/* MMX rounding */
+
+#define DEF(x, y) x ## _ ## y ##_mmx
+#define SET_RND MOVQ_WTWO
+#define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
+#define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
+
+#include "dsputil_mmx_rnd_template.c"
+
+#undef DEF
+#undef SET_RND
+#undef PAVGBP
+#undef PAVGB
+#undef OP_AVG
+
+/***********************************/
+/* 3Dnow specific */
+
+#define DEF(x) x ## _3dnow
+#define PAVGB "pavgusb"
+#define OP_AVG PAVGB
+
+#include "dsputil_mmx_avg_template.c"
+
+#undef DEF
+#undef PAVGB
+#undef OP_AVG
+
+/***********************************/
+/* MMX2 specific */
+
+#define DEF(x) x ## _mmx2
+
+/* Introduced only in MMX2 set */
+#define PAVGB "pavgb"
+#define OP_AVG PAVGB
+
+#include "dsputil_mmx_avg_template.c"
+
+#undef DEF
+#undef PAVGB
+#undef OP_AVG
+
+#define put_no_rnd_pixels16_mmx put_pixels16_mmx
+#define put_no_rnd_pixels8_mmx put_pixels8_mmx
+#define put_pixels16_mmx2 put_pixels16_mmx
+#define put_pixels8_mmx2 put_pixels8_mmx
+#define put_pixels4_mmx2 put_pixels4_mmx
+#define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
+#define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
+#define put_pixels16_3dnow put_pixels16_mmx
+#define put_pixels8_3dnow put_pixels8_mmx
+#define put_pixels4_3dnow put_pixels4_mmx
+#define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
+#define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
+
+/***********************************/
+/* standard MMX */
+
+void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
+{
+ const DCTELEM *p;
+ uint8_t *pix;
+
+ /* read the pixels */
+ p = block;
+ pix = pixels;
+ /* unrolled loop */
+ __asm__ volatile(
+ "movq %3, %%mm0 \n\t"
+ "movq 8%3, %%mm1 \n\t"
+ "movq 16%3, %%mm2 \n\t"
+ "movq 24%3, %%mm3 \n\t"
+ "movq 32%3, %%mm4 \n\t"
+ "movq 40%3, %%mm5 \n\t"
+ "movq 48%3, %%mm6 \n\t"
+ "movq 56%3, %%mm7 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm2 \n\t"
+ "packuswb %%mm5, %%mm4 \n\t"
+ "packuswb %%mm7, %%mm6 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm2, (%0, %1) \n\t"
+ "movq %%mm4, (%0, %1, 2) \n\t"
+ "movq %%mm6, (%0, %2) \n\t"
+ ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
+ :"memory");
+ pix += line_size*4;
+ p += 32;
+
+ // if here would be an exact copy of the code above
+ // compiler would generate some very strange code
+ // thus using "r"
+ __asm__ volatile(
+ "movq (%3), %%mm0 \n\t"
+ "movq 8(%3), %%mm1 \n\t"
+ "movq 16(%3), %%mm2 \n\t"
+ "movq 24(%3), %%mm3 \n\t"
+ "movq 32(%3), %%mm4 \n\t"
+ "movq 40(%3), %%mm5 \n\t"
+ "movq 48(%3), %%mm6 \n\t"
+ "movq 56(%3), %%mm7 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm2 \n\t"
+ "packuswb %%mm5, %%mm4 \n\t"
+ "packuswb %%mm7, %%mm6 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm2, (%0, %1) \n\t"
+ "movq %%mm4, (%0, %1, 2) \n\t"
+ "movq %%mm6, (%0, %2) \n\t"
+ ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
+ :"memory");
+}
+
+DECLARE_ASM_CONST(8, uint8_t, ff_vector128)[8] =
+ { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
+
+#define put_signed_pixels_clamped_mmx_half(off) \
+ "movq "#off"(%2), %%mm1 \n\t"\
+ "movq 16+"#off"(%2), %%mm2 \n\t"\
+ "movq 32+"#off"(%2), %%mm3 \n\t"\
+ "movq 48+"#off"(%2), %%mm4 \n\t"\
+ "packsswb 8+"#off"(%2), %%mm1 \n\t"\
+ "packsswb 24+"#off"(%2), %%mm2 \n\t"\
+ "packsswb 40+"#off"(%2), %%mm3 \n\t"\
+ "packsswb 56+"#off"(%2), %%mm4 \n\t"\
+ "paddb %%mm0, %%mm1 \n\t"\
+ "paddb %%mm0, %%mm2 \n\t"\
+ "paddb %%mm0, %%mm3 \n\t"\
+ "paddb %%mm0, %%mm4 \n\t"\
+ "movq %%mm1, (%0) \n\t"\
+ "movq %%mm2, (%0, %3) \n\t"\
+ "movq %%mm3, (%0, %3, 2) \n\t"\
+ "movq %%mm4, (%0, %1) \n\t"
+
+void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
+{
+ x86_reg line_skip = line_size;
+ x86_reg line_skip3;
+
+ __asm__ volatile (
+ "movq "MANGLE(ff_vector128)", %%mm0 \n\t"
+ "lea (%3, %3, 2), %1 \n\t"
+ put_signed_pixels_clamped_mmx_half(0)
+ "lea (%0, %3, 4), %0 \n\t"
+ put_signed_pixels_clamped_mmx_half(64)
+ :"+&r" (pixels), "=&r" (line_skip3)
+ :"r" (block), "r"(line_skip)
+ :"memory");
+}
+
+void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
+{
+ const DCTELEM *p;
+ uint8_t *pix;
+ int i;
+
+ /* read the pixels */
+ p = block;
+ pix = pixels;
+ MOVQ_ZERO(mm7);
+ i = 4;
+ do {
+ __asm__ volatile(
+ "movq (%2), %%mm0 \n\t"
+ "movq 8(%2), %%mm1 \n\t"
+ "movq 16(%2), %%mm2 \n\t"
+ "movq 24(%2), %%mm3 \n\t"
+ "movq %0, %%mm4 \n\t"
+ "movq %1, %%mm6 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpckhbw %%mm7, %%mm5 \n\t"
+ "paddsw %%mm4, %%mm0 \n\t"
+ "paddsw %%mm5, %%mm1 \n\t"
+ "movq %%mm6, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm6 \n\t"
+ "punpckhbw %%mm7, %%mm5 \n\t"
+ "paddsw %%mm6, %%mm2 \n\t"
+ "paddsw %%mm5, %%mm3 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm2 \n\t"
+ "movq %%mm0, %0 \n\t"
+ "movq %%mm2, %1 \n\t"
+ :"+m"(*pix), "+m"(*(pix+line_size))
+ :"r"(p)
+ :"memory");
+ pix += line_size*2;
+ p += 16;
+ } while (--i);
+}
+
+static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ ASMALIGN(3)
+ "1: \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "movd (%1, %3), %%mm1 \n\t"
+ "movd %%mm0, (%2) \n\t"
+ "movd %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "movd (%1, %3), %%mm1 \n\t"
+ "movd %%mm0, (%2) \n\t"
+ "movd %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r" (pixels), "+r" (block)
+ : "r"((x86_reg)line_size)
+ : "%"REG_a, "memory"
+ );
+}
+
+static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ ASMALIGN(3)
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r" (pixels), "+r" (block)
+ : "r"((x86_reg)line_size)
+ : "%"REG_a, "memory"
+ );
+}
+
+static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ ASMALIGN(3)
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm4 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq 8(%1, %3), %%mm5 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm4, 8(%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq %%mm5, 8(%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm4 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq 8(%1, %3), %%mm5 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm4, 8(%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq %%mm5, 8(%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r" (pixels), "+r" (block)
+ : "r"((x86_reg)line_size)
+ : "%"REG_a, "memory"
+ );
+}
+
+static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ __asm__ volatile(
+ "1: \n\t"
+ "movdqu (%1), %%xmm0 \n\t"
+ "movdqu (%1,%3), %%xmm1 \n\t"
+ "movdqu (%1,%3,2), %%xmm2 \n\t"
+ "movdqu (%1,%4), %%xmm3 \n\t"
+ "movdqa %%xmm0, (%2) \n\t"
+ "movdqa %%xmm1, (%2,%3) \n\t"
+ "movdqa %%xmm2, (%2,%3,2) \n\t"
+ "movdqa %%xmm3, (%2,%4) \n\t"
+ "subl $4, %0 \n\t"
+ "lea (%1,%3,4), %1 \n\t"
+ "lea (%2,%3,4), %2 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r" (pixels), "+r" (block)
+ : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
+ : "memory"
+ );
+}
+
+static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ __asm__ volatile(
+ "1: \n\t"
+ "movdqu (%1), %%xmm0 \n\t"
+ "movdqu (%1,%3), %%xmm1 \n\t"
+ "movdqu (%1,%3,2), %%xmm2 \n\t"
+ "movdqu (%1,%4), %%xmm3 \n\t"
+ "pavgb (%2), %%xmm0 \n\t"
+ "pavgb (%2,%3), %%xmm1 \n\t"
+ "pavgb (%2,%3,2), %%xmm2 \n\t"
+ "pavgb (%2,%4), %%xmm3 \n\t"
+ "movdqa %%xmm0, (%2) \n\t"
+ "movdqa %%xmm1, (%2,%3) \n\t"
+ "movdqa %%xmm2, (%2,%3,2) \n\t"
+ "movdqa %%xmm3, (%2,%4) \n\t"
+ "subl $4, %0 \n\t"
+ "lea (%1,%3,4), %1 \n\t"
+ "lea (%2,%3,4), %2 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r" (pixels), "+r" (block)
+ : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
+ : "memory"
+ );
+}
+
+#define CLEAR_BLOCKS(name,n) \
+static void name(DCTELEM *blocks)\
+{\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "mov %1, %%"REG_a" \n\t"\
+ "1: \n\t"\
+ "movq %%mm7, (%0, %%"REG_a") \n\t"\
+ "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
+ "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
+ "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
+ "add $32, %%"REG_a" \n\t"\
+ " js 1b \n\t"\
+ : : "r" (((uint8_t *)blocks)+128*n),\
+ "i" (-128*n)\
+ : "%"REG_a\
+ );\
+}
+CLEAR_BLOCKS(clear_blocks_mmx, 6)
+CLEAR_BLOCKS(clear_block_mmx, 1)
+
+static void clear_block_sse(DCTELEM *block)
+{
+ __asm__ volatile(
+ "xorps %%xmm0, %%xmm0 \n"
+ "movaps %%xmm0, (%0) \n"
+ "movaps %%xmm0, 16(%0) \n"
+ "movaps %%xmm0, 32(%0) \n"
+ "movaps %%xmm0, 48(%0) \n"
+ "movaps %%xmm0, 64(%0) \n"
+ "movaps %%xmm0, 80(%0) \n"
+ "movaps %%xmm0, 96(%0) \n"
+ "movaps %%xmm0, 112(%0) \n"
+ :: "r"(block)
+ : "memory"
+ );
+}
+
+static void clear_blocks_sse(DCTELEM *blocks)
+{\
+ __asm__ volatile(
+ "xorps %%xmm0, %%xmm0 \n"
+ "mov %1, %%"REG_a" \n"
+ "1: \n"
+ "movaps %%xmm0, (%0, %%"REG_a") \n"
+ "movaps %%xmm0, 16(%0, %%"REG_a") \n"
+ "movaps %%xmm0, 32(%0, %%"REG_a") \n"
+ "movaps %%xmm0, 48(%0, %%"REG_a") \n"
+ "movaps %%xmm0, 64(%0, %%"REG_a") \n"
+ "movaps %%xmm0, 80(%0, %%"REG_a") \n"
+ "movaps %%xmm0, 96(%0, %%"REG_a") \n"
+ "movaps %%xmm0, 112(%0, %%"REG_a") \n"
+ "add $128, %%"REG_a" \n"
+ " js 1b \n"
+ : : "r" (((uint8_t *)blocks)+128*6),
+ "i" (-128*6)
+ : "%"REG_a
+ );
+}
+
+static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
+ x86_reg i=0;
+ __asm__ volatile(
+ "jmp 2f \n\t"
+ "1: \n\t"
+ "movq (%1, %0), %%mm0 \n\t"
+ "movq (%2, %0), %%mm1 \n\t"
+ "paddb %%mm0, %%mm1 \n\t"
+ "movq %%mm1, (%2, %0) \n\t"
+ "movq 8(%1, %0), %%mm0 \n\t"
+ "movq 8(%2, %0), %%mm1 \n\t"
+ "paddb %%mm0, %%mm1 \n\t"
+ "movq %%mm1, 8(%2, %0) \n\t"
+ "add $16, %0 \n\t"
+ "2: \n\t"
+ "cmp %3, %0 \n\t"
+ " js 1b \n\t"
+ : "+r" (i)
+ : "r"(src), "r"(dst), "r"((x86_reg)w-15)
+ );
+ for(; i<w; i++)
+ dst[i+0] += src[i+0];
+}
+
+static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
+ x86_reg i=0;
+ __asm__ volatile(
+ "jmp 2f \n\t"
+ "1: \n\t"
+ "movq (%2, %0), %%mm0 \n\t"
+ "movq 8(%2, %0), %%mm1 \n\t"
+ "paddb (%3, %0), %%mm0 \n\t"
+ "paddb 8(%3, %0), %%mm1 \n\t"
+ "movq %%mm0, (%1, %0) \n\t"
+ "movq %%mm1, 8(%1, %0) \n\t"
+ "add $16, %0 \n\t"
+ "2: \n\t"
+ "cmp %4, %0 \n\t"
+ " js 1b \n\t"
+ : "+r" (i)
+ : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
+ );
+ for(; i<w; i++)
+ dst[i] = src1[i] + src2[i];
+}
+
+#if HAVE_7REGS && HAVE_TEN_OPERANDS
+static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) {
+ x86_reg w2 = -w;
+ x86_reg x;
+ int l = *left & 0xff;
+ int tl = *left_top & 0xff;
+ int t;
+ __asm__ volatile(
+ "mov %7, %3 \n"
+ "1: \n"
+ "movzx (%3,%4), %2 \n"
+ "mov %2, %k3 \n"
+ "sub %b1, %b3 \n"
+ "add %b0, %b3 \n"
+ "mov %2, %1 \n"
+ "cmp %0, %2 \n"
+ "cmovg %0, %2 \n"
+ "cmovg %1, %0 \n"
+ "cmp %k3, %0 \n"
+ "cmovg %k3, %0 \n"
+ "mov %7, %3 \n"
+ "cmp %2, %0 \n"
+ "cmovl %2, %0 \n"
+ "add (%6,%4), %b0 \n"
+ "mov %b0, (%5,%4) \n"
+ "inc %4 \n"
+ "jl 1b \n"
+ :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
+ :"r"(dst+w), "r"(diff+w), "rm"(top+w)
+ );
+ *left = l;
+ *left_top = tl;
+}
+#endif
+
+#define H263_LOOP_FILTER \
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movq %0, %%mm0 \n\t"\
+ "movq %0, %%mm1 \n\t"\
+ "movq %3, %%mm2 \n\t"\
+ "movq %3, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpckhbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm3 \n\t"\
+ "psubw %%mm2, %%mm0 \n\t"\
+ "psubw %%mm3, %%mm1 \n\t"\
+ "movq %1, %%mm2 \n\t"\
+ "movq %1, %%mm3 \n\t"\
+ "movq %2, %%mm4 \n\t"\
+ "movq %2, %%mm5 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm7, %%mm5 \n\t"\
+ "psubw %%mm2, %%mm4 \n\t"\
+ "psubw %%mm3, %%mm5 \n\t"\
+ "psllw $2, %%mm4 \n\t"\
+ "psllw $2, %%mm5 \n\t"\
+ "paddw %%mm0, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ "pcmpgtw %%mm4, %%mm6 \n\t"\
+ "pcmpgtw %%mm5, %%mm7 \n\t"\
+ "pxor %%mm6, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm5 \n\t"\
+ "psubw %%mm6, %%mm4 \n\t"\
+ "psubw %%mm7, %%mm5 \n\t"\
+ "psrlw $3, %%mm4 \n\t"\
+ "psrlw $3, %%mm5 \n\t"\
+ "packuswb %%mm5, %%mm4 \n\t"\
+ "packsswb %%mm7, %%mm6 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movd %4, %%mm2 \n\t"\
+ "punpcklbw %%mm2, %%mm2 \n\t"\
+ "punpcklbw %%mm2, %%mm2 \n\t"\
+ "punpcklbw %%mm2, %%mm2 \n\t"\
+ "psubusb %%mm4, %%mm2 \n\t"\
+ "movq %%mm2, %%mm3 \n\t"\
+ "psubusb %%mm4, %%mm3 \n\t"\
+ "psubb %%mm3, %%mm2 \n\t"\
+ "movq %1, %%mm3 \n\t"\
+ "movq %2, %%mm4 \n\t"\
+ "pxor %%mm6, %%mm3 \n\t"\
+ "pxor %%mm6, %%mm4 \n\t"\
+ "paddusb %%mm2, %%mm3 \n\t"\
+ "psubusb %%mm2, %%mm4 \n\t"\
+ "pxor %%mm6, %%mm3 \n\t"\
+ "pxor %%mm6, %%mm4 \n\t"\
+ "paddusb %%mm2, %%mm2 \n\t"\
+ "packsswb %%mm1, %%mm0 \n\t"\
+ "pcmpgtb %%mm0, %%mm7 \n\t"\
+ "pxor %%mm7, %%mm0 \n\t"\
+ "psubb %%mm7, %%mm0 \n\t"\
+ "movq %%mm0, %%mm1 \n\t"\
+ "psubusb %%mm2, %%mm0 \n\t"\
+ "psubb %%mm0, %%mm1 \n\t"\
+ "pand %5, %%mm1 \n\t"\
+ "psrlw $2, %%mm1 \n\t"\
+ "pxor %%mm7, %%mm1 \n\t"\
+ "psubb %%mm7, %%mm1 \n\t"\
+ "movq %0, %%mm5 \n\t"\
+ "movq %3, %%mm6 \n\t"\
+ "psubb %%mm1, %%mm5 \n\t"\
+ "paddb %%mm1, %%mm6 \n\t"
+
+static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
+ if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
+ const int strength= ff_h263_loop_filter_strength[qscale];
+
+ __asm__ volatile(
+
+ H263_LOOP_FILTER
+
+ "movq %%mm3, %1 \n\t"
+ "movq %%mm4, %2 \n\t"
+ "movq %%mm5, %0 \n\t"
+ "movq %%mm6, %3 \n\t"
+ : "+m" (*(uint64_t*)(src - 2*stride)),
+ "+m" (*(uint64_t*)(src - 1*stride)),
+ "+m" (*(uint64_t*)(src + 0*stride)),
+ "+m" (*(uint64_t*)(src + 1*stride))
+ : "g" (2*strength), "m"(ff_pb_FC)
+ );
+ }
+}
+
+static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
+ __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ...
+ "movd %4, %%mm0 \n\t"
+ "movd %5, %%mm1 \n\t"
+ "movd %6, %%mm2 \n\t"
+ "movd %7, %%mm3 \n\t"
+ "punpcklbw %%mm1, %%mm0 \n\t"
+ "punpcklbw %%mm3, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "punpcklwd %%mm2, %%mm0 \n\t"
+ "punpckhwd %%mm2, %%mm1 \n\t"
+ "movd %%mm0, %0 \n\t"
+ "punpckhdq %%mm0, %%mm0 \n\t"
+ "movd %%mm0, %1 \n\t"
+ "movd %%mm1, %2 \n\t"
+ "punpckhdq %%mm1, %%mm1 \n\t"
+ "movd %%mm1, %3 \n\t"
+
+ : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
+ "=m" (*(uint32_t*)(dst + 1*dst_stride)),
+ "=m" (*(uint32_t*)(dst + 2*dst_stride)),
+ "=m" (*(uint32_t*)(dst + 3*dst_stride))
+ : "m" (*(uint32_t*)(src + 0*src_stride)),
+ "m" (*(uint32_t*)(src + 1*src_stride)),
+ "m" (*(uint32_t*)(src + 2*src_stride)),
+ "m" (*(uint32_t*)(src + 3*src_stride))
+ );
+}
+
+static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
+ if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
+ const int strength= ff_h263_loop_filter_strength[qscale];
+ DECLARE_ALIGNED(8, uint64_t, temp)[4];
+ uint8_t *btemp= (uint8_t*)temp;
+
+ src -= 2;
+
+ transpose4x4(btemp , src , 8, stride);
+ transpose4x4(btemp+4, src + 4*stride, 8, stride);
+ __asm__ volatile(
+ H263_LOOP_FILTER // 5 3 4 6
+
+ : "+m" (temp[0]),
+ "+m" (temp[1]),
+ "+m" (temp[2]),
+ "+m" (temp[3])
+ : "g" (2*strength), "m"(ff_pb_FC)
+ );
+
+ __asm__ volatile(
+ "movq %%mm5, %%mm1 \n\t"
+ "movq %%mm4, %%mm0 \n\t"
+ "punpcklbw %%mm3, %%mm5 \n\t"
+ "punpcklbw %%mm6, %%mm4 \n\t"
+ "punpckhbw %%mm3, %%mm1 \n\t"
+ "punpckhbw %%mm6, %%mm0 \n\t"
+ "movq %%mm5, %%mm3 \n\t"
+ "movq %%mm1, %%mm6 \n\t"
+ "punpcklwd %%mm4, %%mm5 \n\t"
+ "punpcklwd %%mm0, %%mm1 \n\t"
+ "punpckhwd %%mm4, %%mm3 \n\t"
+ "punpckhwd %%mm0, %%mm6 \n\t"
+ "movd %%mm5, (%0) \n\t"
+ "punpckhdq %%mm5, %%mm5 \n\t"
+ "movd %%mm5, (%0,%2) \n\t"
+ "movd %%mm3, (%0,%2,2) \n\t"
+ "punpckhdq %%mm3, %%mm3 \n\t"
+ "movd %%mm3, (%0,%3) \n\t"
+ "movd %%mm1, (%1) \n\t"
+ "punpckhdq %%mm1, %%mm1 \n\t"
+ "movd %%mm1, (%1,%2) \n\t"
+ "movd %%mm6, (%1,%2,2) \n\t"
+ "punpckhdq %%mm6, %%mm6 \n\t"
+ "movd %%mm6, (%1,%3) \n\t"
+ :: "r" (src),
+ "r" (src + 4*stride),
+ "r" ((x86_reg) stride ),
+ "r" ((x86_reg)(3*stride))
+ );
+ }
+}
+
+/* draw the edges of width 'w' of an image of size width, height
+ this mmx version can only handle w==8 || w==16 */
+static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
+{
+ uint8_t *ptr, *last_line;
+ int i;
+
+ last_line = buf + (height - 1) * wrap;
+ /* left and right */
+ ptr = buf;
+ if(w==8)
+ {
+ __asm__ volatile(
+ "1: \n\t"
+ "movd (%0), %%mm0 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpcklwd %%mm0, %%mm0 \n\t"
+ "punpckldq %%mm0, %%mm0 \n\t"
+ "movq %%mm0, -8(%0) \n\t"
+ "movq -8(%0, %2), %%mm1 \n\t"
+ "punpckhbw %%mm1, %%mm1 \n\t"
+ "punpckhwd %%mm1, %%mm1 \n\t"
+ "punpckhdq %%mm1, %%mm1 \n\t"
+ "movq %%mm1, (%0, %2) \n\t"
+ "add %1, %0 \n\t"
+ "cmp %3, %0 \n\t"
+ " jb 1b \n\t"
+ : "+r" (ptr)
+ : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
+ );
+ }
+ else
+ {
+ __asm__ volatile(
+ "1: \n\t"
+ "movd (%0), %%mm0 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpcklwd %%mm0, %%mm0 \n\t"
+ "punpckldq %%mm0, %%mm0 \n\t"
+ "movq %%mm0, -8(%0) \n\t"
+ "movq %%mm0, -16(%0) \n\t"
+ "movq -8(%0, %2), %%mm1 \n\t"
+ "punpckhbw %%mm1, %%mm1 \n\t"
+ "punpckhwd %%mm1, %%mm1 \n\t"
+ "punpckhdq %%mm1, %%mm1 \n\t"
+ "movq %%mm1, (%0, %2) \n\t"
+ "movq %%mm1, 8(%0, %2) \n\t"
+ "add %1, %0 \n\t"
+ "cmp %3, %0 \n\t"
+ " jb 1b \n\t"
+ : "+r" (ptr)
+ : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
+ );
+ }
+
+ for(i=0;i<w;i+=4) {
+ /* top and bottom (and hopefully also the corners) */
+ ptr= buf - (i + 1) * wrap - w;
+ __asm__ volatile(
+ "1: \n\t"
+ "movq (%1, %0), %%mm0 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm0, (%0, %2) \n\t"
+ "movq %%mm0, (%0, %2, 2) \n\t"
+ "movq %%mm0, (%0, %3) \n\t"
+ "add $8, %0 \n\t"
+ "cmp %4, %0 \n\t"
+ " jb 1b \n\t"
+ : "+r" (ptr)
+ : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
+ );
+ ptr= last_line + (i + 1) * wrap - w;
+ __asm__ volatile(
+ "1: \n\t"
+ "movq (%1, %0), %%mm0 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm0, (%0, %2) \n\t"
+ "movq %%mm0, (%0, %2, 2) \n\t"
+ "movq %%mm0, (%0, %3) \n\t"
+ "add $8, %0 \n\t"
+ "cmp %4, %0 \n\t"
+ " jb 1b \n\t"
+ : "+r" (ptr)
+ : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
+ );
+ }
+}
+
+#define PAETH(cpu, abs3)\
+static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
+{\
+ x86_reg i = -bpp;\
+ x86_reg end = w-3;\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n"\
+ "movd (%1,%0), %%mm0 \n"\
+ "movd (%2,%0), %%mm1 \n"\
+ "punpcklbw %%mm7, %%mm0 \n"\
+ "punpcklbw %%mm7, %%mm1 \n"\
+ "add %4, %0 \n"\
+ "1: \n"\
+ "movq %%mm1, %%mm2 \n"\
+ "movd (%2,%0), %%mm1 \n"\
+ "movq %%mm2, %%mm3 \n"\
+ "punpcklbw %%mm7, %%mm1 \n"\
+ "movq %%mm2, %%mm4 \n"\
+ "psubw %%mm1, %%mm3 \n"\
+ "psubw %%mm0, %%mm4 \n"\
+ "movq %%mm3, %%mm5 \n"\
+ "paddw %%mm4, %%mm5 \n"\
+ abs3\
+ "movq %%mm4, %%mm6 \n"\
+ "pminsw %%mm5, %%mm6 \n"\
+ "pcmpgtw %%mm6, %%mm3 \n"\
+ "pcmpgtw %%mm5, %%mm4 \n"\
+ "movq %%mm4, %%mm6 \n"\
+ "pand %%mm3, %%mm4 \n"\
+ "pandn %%mm3, %%mm6 \n"\
+ "pandn %%mm0, %%mm3 \n"\
+ "movd (%3,%0), %%mm0 \n"\
+ "pand %%mm1, %%mm6 \n"\
+ "pand %%mm4, %%mm2 \n"\
+ "punpcklbw %%mm7, %%mm0 \n"\
+ "movq %6, %%mm5 \n"\
+ "paddw %%mm6, %%mm0 \n"\
+ "paddw %%mm2, %%mm3 \n"\
+ "paddw %%mm3, %%mm0 \n"\
+ "pand %%mm5, %%mm0 \n"\
+ "movq %%mm0, %%mm3 \n"\
+ "packuswb %%mm3, %%mm3 \n"\
+ "movd %%mm3, (%1,%0) \n"\
+ "add %4, %0 \n"\
+ "cmp %5, %0 \n"\
+ "jle 1b \n"\
+ :"+r"(i)\
+ :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
+ "m"(ff_pw_255)\
+ :"memory"\
+ );\
+}
+
+#define ABS3_MMX2\
+ "psubw %%mm5, %%mm7 \n"\
+ "pmaxsw %%mm7, %%mm5 \n"\
+ "pxor %%mm6, %%mm6 \n"\
+ "pxor %%mm7, %%mm7 \n"\
+ "psubw %%mm3, %%mm6 \n"\
+ "psubw %%mm4, %%mm7 \n"\
+ "pmaxsw %%mm6, %%mm3 \n"\
+ "pmaxsw %%mm7, %%mm4 \n"\
+ "pxor %%mm7, %%mm7 \n"
+
+#define ABS3_SSSE3\
+ "pabsw %%mm3, %%mm3 \n"\
+ "pabsw %%mm4, %%mm4 \n"\
+ "pabsw %%mm5, %%mm5 \n"
+
+PAETH(mmx2, ABS3_MMX2)
+#if HAVE_SSSE3
+PAETH(ssse3, ABS3_SSSE3)
+#endif
+
+#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
+ "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
+ "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
+ "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
+ "movq "#in7", " #m3 " \n\t" /* d */\
+ "movq "#in0", %%mm5 \n\t" /* D */\
+ "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
+ "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
+ "movq "#in1", %%mm5 \n\t" /* C */\
+ "movq "#in2", %%mm6 \n\t" /* B */\
+ "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
+ "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
+ "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
+ "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
+ "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
+ "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
+ "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
+ "psraw $5, %%mm5 \n\t"\
+ "packuswb %%mm5, %%mm5 \n\t"\
+ OP(%%mm5, out, %%mm7, d)
+
+#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
+static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ uint64_t temp;\
+\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "1: \n\t"\
+ "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
+ "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
+ "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
+ "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
+ "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
+ "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
+ "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
+ "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
+ "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
+ "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
+ "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
+ "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
+ "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
+ "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
+ "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
+ "paddw %%mm3, %%mm5 \n\t" /* b */\
+ "paddw %%mm2, %%mm6 \n\t" /* c */\
+ "paddw %%mm5, %%mm5 \n\t" /* 2b */\
+ "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
+ "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
+ "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
+ "paddw %%mm4, %%mm0 \n\t" /* a */\
+ "paddw %%mm1, %%mm5 \n\t" /* d */\
+ "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
+ "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
+ "paddw %6, %%mm6 \n\t"\
+ "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
+ "psraw $5, %%mm0 \n\t"\
+ "movq %%mm0, %5 \n\t"\
+ /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
+ \
+ "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
+ "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
+ "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
+ "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
+ "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
+ "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
+ "paddw %%mm0, %%mm2 \n\t" /* b */\
+ "paddw %%mm5, %%mm3 \n\t" /* c */\
+ "paddw %%mm2, %%mm2 \n\t" /* 2b */\
+ "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
+ "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
+ "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
+ "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
+ "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
+ "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
+ "paddw %%mm2, %%mm1 \n\t" /* a */\
+ "paddw %%mm6, %%mm4 \n\t" /* d */\
+ "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
+ "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
+ "paddw %6, %%mm1 \n\t"\
+ "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
+ "psraw $5, %%mm3 \n\t"\
+ "movq %5, %%mm1 \n\t"\
+ "packuswb %%mm3, %%mm1 \n\t"\
+ OP_MMX2(%%mm1, (%1),%%mm4, q)\
+ /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
+ \
+ "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
+ "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
+ "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
+ "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
+ "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
+ "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
+ "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
+ "paddw %%mm1, %%mm5 \n\t" /* b */\
+ "paddw %%mm4, %%mm0 \n\t" /* c */\
+ "paddw %%mm5, %%mm5 \n\t" /* 2b */\
+ "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
+ "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
+ "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
+ "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
+ "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
+ "paddw %%mm3, %%mm2 \n\t" /* d */\
+ "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
+ "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
+ "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
+ "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
+ "paddw %%mm2, %%mm6 \n\t" /* a */\
+ "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
+ "paddw %6, %%mm0 \n\t"\
+ "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
+ "psraw $5, %%mm0 \n\t"\
+ /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
+ \
+ "paddw %%mm5, %%mm3 \n\t" /* a */\
+ "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
+ "paddw %%mm4, %%mm6 \n\t" /* b */\
+ "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
+ "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
+ "paddw %%mm1, %%mm4 \n\t" /* c */\
+ "paddw %%mm2, %%mm5 \n\t" /* d */\
+ "paddw %%mm6, %%mm6 \n\t" /* 2b */\
+ "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
+ "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
+ "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
+ "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
+ "paddw %6, %%mm4 \n\t"\
+ "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
+ "psraw $5, %%mm4 \n\t"\
+ "packuswb %%mm4, %%mm0 \n\t"\
+ OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
+ \
+ "add %3, %0 \n\t"\
+ "add %4, %1 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ : "+a"(src), "+c"(dst), "+D"(h)\
+ : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
+ : "memory"\
+ );\
+}\
+\
+static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ int i;\
+ int16_t temp[16];\
+ /* quick HACK, XXX FIXME MUST be optimized */\
+ for(i=0; i<h; i++)\
+ {\
+ temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
+ temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
+ temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
+ temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
+ temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
+ temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
+ temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
+ temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
+ temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
+ temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
+ temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
+ temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
+ temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
+ temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
+ temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
+ temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
+ __asm__ volatile(\
+ "movq (%0), %%mm0 \n\t"\
+ "movq 8(%0), %%mm1 \n\t"\
+ "paddw %2, %%mm0 \n\t"\
+ "paddw %2, %%mm1 \n\t"\
+ "psraw $5, %%mm0 \n\t"\
+ "psraw $5, %%mm1 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ OP_3DNOW(%%mm0, (%1), %%mm1, q)\
+ "movq 16(%0), %%mm0 \n\t"\
+ "movq 24(%0), %%mm1 \n\t"\
+ "paddw %2, %%mm0 \n\t"\
+ "paddw %2, %%mm1 \n\t"\
+ "psraw $5, %%mm0 \n\t"\
+ "psraw $5, %%mm1 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
+ :: "r"(temp), "r"(dst), "m"(ROUNDER)\
+ : "memory"\
+ );\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }\
+}\
+\
+static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "1: \n\t"\
+ "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
+ "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
+ "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
+ "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
+ "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
+ "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
+ "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
+ "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
+ "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
+ "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
+ "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
+ "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
+ "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
+ "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
+ "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
+ "paddw %%mm3, %%mm5 \n\t" /* b */\
+ "paddw %%mm2, %%mm6 \n\t" /* c */\
+ "paddw %%mm5, %%mm5 \n\t" /* 2b */\
+ "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
+ "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
+ "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
+ "paddw %%mm4, %%mm0 \n\t" /* a */\
+ "paddw %%mm1, %%mm5 \n\t" /* d */\
+ "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
+ "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
+ "paddw %5, %%mm6 \n\t"\
+ "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
+ "psraw $5, %%mm0 \n\t"\
+ /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
+ \
+ "movd 5(%0), %%mm5 \n\t" /* FGHI */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
+ "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
+ "paddw %%mm5, %%mm1 \n\t" /* a */\
+ "paddw %%mm6, %%mm2 \n\t" /* b */\
+ "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
+ "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
+ "paddw %%mm6, %%mm3 \n\t" /* c */\
+ "paddw %%mm5, %%mm4 \n\t" /* d */\
+ "paddw %%mm2, %%mm2 \n\t" /* 2b */\
+ "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
+ "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
+ "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
+ "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
+ "paddw %5, %%mm1 \n\t"\
+ "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
+ "psraw $5, %%mm3 \n\t"\
+ "packuswb %%mm3, %%mm0 \n\t"\
+ OP_MMX2(%%mm0, (%1), %%mm4, q)\
+ \
+ "add %3, %0 \n\t"\
+ "add %4, %1 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ : "+a"(src), "+c"(dst), "+d"(h)\
+ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
+ : "memory"\
+ );\
+}\
+\
+static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ int i;\
+ int16_t temp[8];\
+ /* quick HACK, XXX FIXME MUST be optimized */\
+ for(i=0; i<h; i++)\
+ {\
+ temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
+ temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
+ temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
+ temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
+ temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
+ temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
+ temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
+ temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
+ __asm__ volatile(\
+ "movq (%0), %%mm0 \n\t"\
+ "movq 8(%0), %%mm1 \n\t"\
+ "paddw %2, %%mm0 \n\t"\
+ "paddw %2, %%mm1 \n\t"\
+ "psraw $5, %%mm0 \n\t"\
+ "psraw $5, %%mm1 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ OP_3DNOW(%%mm0, (%1), %%mm1, q)\
+ :: "r"(temp), "r"(dst), "m"(ROUNDER)\
+ :"memory"\
+ );\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }\
+}
+
+#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
+\
+static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ uint64_t temp[17*4];\
+ uint64_t *temp_ptr= temp;\
+ int count= 17;\
+\
+ /*FIXME unroll */\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "1: \n\t"\
+ "movq (%0), %%mm0 \n\t"\
+ "movq (%0), %%mm1 \n\t"\
+ "movq 8(%0), %%mm2 \n\t"\
+ "movq 8(%0), %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpckhbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm3 \n\t"\
+ "movq %%mm0, (%1) \n\t"\
+ "movq %%mm1, 17*8(%1) \n\t"\
+ "movq %%mm2, 2*17*8(%1) \n\t"\
+ "movq %%mm3, 3*17*8(%1) \n\t"\
+ "add $8, %1 \n\t"\
+ "add %3, %0 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ : "+r" (src), "+r" (temp_ptr), "+r"(count)\
+ : "r" ((x86_reg)srcStride)\
+ : "memory"\
+ );\
+ \
+ temp_ptr= temp;\
+ count=4;\
+ \
+/*FIXME reorder for speed */\
+ __asm__ volatile(\
+ /*"pxor %%mm7, %%mm7 \n\t"*/\
+ "1: \n\t"\
+ "movq (%0), %%mm0 \n\t"\
+ "movq 8(%0), %%mm1 \n\t"\
+ "movq 16(%0), %%mm2 \n\t"\
+ "movq 24(%0), %%mm3 \n\t"\
+ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
+ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
+ "add %4, %1 \n\t"\
+ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
+ \
+ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
+ "add %4, %1 \n\t"\
+ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
+ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
+ "add %4, %1 \n\t"\
+ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
+ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
+ "add %4, %1 \n\t"\
+ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
+ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
+ "add %4, %1 \n\t"\
+ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
+ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
+ "add %4, %1 \n\t"\
+ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
+ \
+ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
+ "add %4, %1 \n\t" \
+ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
+ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
+ \
+ "add $136, %0 \n\t"\
+ "add %6, %1 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ \
+ : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
+ : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
+ :"memory"\
+ );\
+}\
+\
+static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ uint64_t temp[9*2];\
+ uint64_t *temp_ptr= temp;\
+ int count= 9;\
+\
+ /*FIXME unroll */\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "1: \n\t"\
+ "movq (%0), %%mm0 \n\t"\
+ "movq (%0), %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpckhbw %%mm7, %%mm1 \n\t"\
+ "movq %%mm0, (%1) \n\t"\
+ "movq %%mm1, 9*8(%1) \n\t"\
+ "add $8, %1 \n\t"\
+ "add %3, %0 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ : "+r" (src), "+r" (temp_ptr), "+r"(count)\
+ : "r" ((x86_reg)srcStride)\
+ : "memory"\
+ );\
+ \
+ temp_ptr= temp;\
+ count=2;\
+ \
+/*FIXME reorder for speed */\
+ __asm__ volatile(\
+ /*"pxor %%mm7, %%mm7 \n\t"*/\
+ "1: \n\t"\
+ "movq (%0), %%mm0 \n\t"\
+ "movq 8(%0), %%mm1 \n\t"\
+ "movq 16(%0), %%mm2 \n\t"\
+ "movq 24(%0), %%mm3 \n\t"\
+ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
+ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
+ "add %4, %1 \n\t"\
+ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
+ \
+ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
+ "add %4, %1 \n\t"\
+ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
+ \
+ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
+ "add %4, %1 \n\t"\
+ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
+ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
+ \
+ "add $72, %0 \n\t"\
+ "add %6, %1 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ \
+ : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
+ : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
+ : "memory"\
+ );\
+}\
+\
+static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[8];\
+ uint8_t * const half= (uint8_t*)temp;\
+ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
+ OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[8];\
+ uint8_t * const half= (uint8_t*)temp;\
+ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
+ OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[8];\
+ uint8_t * const half= (uint8_t*)temp;\
+ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
+ OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[8];\
+ uint8_t * const half= (uint8_t*)temp;\
+ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
+ OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
+}\
+static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[8 + 9];\
+ uint8_t * const halfH= ((uint8_t*)half) + 64;\
+ uint8_t * const halfHV= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
+ put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[8 + 9];\
+ uint8_t * const halfH= ((uint8_t*)half) + 64;\
+ uint8_t * const halfHV= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
+ put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[8 + 9];\
+ uint8_t * const halfH= ((uint8_t*)half) + 64;\
+ uint8_t * const halfHV= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
+ put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[8 + 9];\
+ uint8_t * const halfH= ((uint8_t*)half) + 64;\
+ uint8_t * const halfHV= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
+ put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[8 + 9];\
+ uint8_t * const halfH= ((uint8_t*)half) + 64;\
+ uint8_t * const halfHV= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[8 + 9];\
+ uint8_t * const halfH= ((uint8_t*)half) + 64;\
+ uint8_t * const halfHV= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[8 + 9];\
+ uint8_t * const halfH= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
+ put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
+}\
+static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[8 + 9];\
+ uint8_t * const halfH= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
+ put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
+}\
+static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[9];\
+ uint8_t * const halfH= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
+}\
+static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[32];\
+ uint8_t * const half= (uint8_t*)temp;\
+ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
+ OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[32];\
+ uint8_t * const half= (uint8_t*)temp;\
+ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
+ OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[32];\
+ uint8_t * const half= (uint8_t*)temp;\
+ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
+ OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[32];\
+ uint8_t * const half= (uint8_t*)temp;\
+ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
+ OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
+}\
+static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[16*2 + 17*2];\
+ uint8_t * const halfH= ((uint8_t*)half) + 256;\
+ uint8_t * const halfHV= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
+ put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[16*2 + 17*2];\
+ uint8_t * const halfH= ((uint8_t*)half) + 256;\
+ uint8_t * const halfHV= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
+ put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[16*2 + 17*2];\
+ uint8_t * const halfH= ((uint8_t*)half) + 256;\
+ uint8_t * const halfHV= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
+ put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[16*2 + 17*2];\
+ uint8_t * const halfH= ((uint8_t*)half) + 256;\
+ uint8_t * const halfHV= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
+ put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[16*2 + 17*2];\
+ uint8_t * const halfH= ((uint8_t*)half) + 256;\
+ uint8_t * const halfHV= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[16*2 + 17*2];\
+ uint8_t * const halfH= ((uint8_t*)half) + 256;\
+ uint8_t * const halfHV= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[17*2];\
+ uint8_t * const halfH= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
+ put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
+}\
+static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[17*2];\
+ uint8_t * const halfH= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
+ put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
+}\
+static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t half[17*2];\
+ uint8_t * const halfH= ((uint8_t*)half);\
+ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
+}
+
+#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
+#define AVG_3DNOW_OP(a,b,temp, size) \
+"mov" #size " " #b ", " #temp " \n\t"\
+"pavgusb " #temp ", " #a " \n\t"\
+"mov" #size " " #a ", " #b " \n\t"
+#define AVG_MMX2_OP(a,b,temp, size) \
+"mov" #size " " #b ", " #temp " \n\t"\
+"pavgb " #temp ", " #a " \n\t"\
+"mov" #size " " #a ", " #b " \n\t"
+
+QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
+QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
+QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
+QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
+QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
+QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
+QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
+QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
+QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
+
+/***********************************/
+/* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
+
+#define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
+static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
+}
+#define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
+static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
+}
+
+#define QPEL_2TAP(OPNAME, SIZE, MMX)\
+QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
+QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
+QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
+static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
+ OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
+static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
+ OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
+static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
+ OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
+static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
+}\
+static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
+}\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
+
+QPEL_2TAP(put_, 16, mmx2)
+QPEL_2TAP(avg_, 16, mmx2)
+QPEL_2TAP(put_, 8, mmx2)
+QPEL_2TAP(avg_, 8, mmx2)
+QPEL_2TAP(put_, 16, 3dnow)
+QPEL_2TAP(avg_, 16, 3dnow)
+QPEL_2TAP(put_, 8, 3dnow)
+QPEL_2TAP(avg_, 8, 3dnow)
+
+
+#if 0
+static void just_return(void) { return; }
+#endif
+
+static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
+ int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
+ const int w = 8;
+ const int ix = ox>>(16+shift);
+ const int iy = oy>>(16+shift);
+ const int oxs = ox>>4;
+ const int oys = oy>>4;
+ const int dxxs = dxx>>4;
+ const int dxys = dxy>>4;
+ const int dyxs = dyx>>4;
+ const int dyys = dyy>>4;
+ const uint16_t r4[4] = {r,r,r,r};
+ const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
+ const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
+ const uint64_t shift2 = 2*shift;
+ uint8_t edge_buf[(h+1)*stride];
+ int x, y;
+
+ const int dxw = (dxx-(1<<(16+shift)))*(w-1);
+ const int dyh = (dyy-(1<<(16+shift)))*(h-1);
+ const int dxh = dxy*(h-1);
+ const int dyw = dyx*(w-1);
+ if( // non-constant fullpel offset (3% of blocks)
+ ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
+ (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
+ // uses more than 16 bits of subpel mv (only at huge resolution)
+ || (dxx|dxy|dyx|dyy)&15 )
+ {
+ //FIXME could still use mmx for some of the rows
+ ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
+ return;
+ }
+
+ src += ix + iy*stride;
+ if( (unsigned)ix >= width-w ||
+ (unsigned)iy >= height-h )
+ {
+ ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
+ src = edge_buf;
+ }
+
+ __asm__ volatile(
+ "movd %0, %%mm6 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "punpcklwd %%mm6, %%mm6 \n\t"
+ "punpcklwd %%mm6, %%mm6 \n\t"
+ :: "r"(1<<shift)
+ );
+
+ for(x=0; x<w; x+=4){
+ uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
+ oxs - dxys + dxxs*(x+1),
+ oxs - dxys + dxxs*(x+2),
+ oxs - dxys + dxxs*(x+3) };
+ uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
+ oys - dyys + dyxs*(x+1),
+ oys - dyys + dyxs*(x+2),
+ oys - dyys + dyxs*(x+3) };
+
+ for(y=0; y<h; y++){
+ __asm__ volatile(
+ "movq %0, %%mm4 \n\t"
+ "movq %1, %%mm5 \n\t"
+ "paddw %2, %%mm4 \n\t"
+ "paddw %3, %%mm5 \n\t"
+ "movq %%mm4, %0 \n\t"
+ "movq %%mm5, %1 \n\t"
+ "psrlw $12, %%mm4 \n\t"
+ "psrlw $12, %%mm5 \n\t"
+ : "+m"(*dx4), "+m"(*dy4)
+ : "m"(*dxy4), "m"(*dyy4)
+ );
+
+ __asm__ volatile(
+ "movq %%mm6, %%mm2 \n\t"
+ "movq %%mm6, %%mm1 \n\t"
+ "psubw %%mm4, %%mm2 \n\t"
+ "psubw %%mm5, %%mm1 \n\t"
+ "movq %%mm2, %%mm0 \n\t"
+ "movq %%mm4, %%mm3 \n\t"
+ "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
+ "pmullw %%mm5, %%mm3 \n\t" // dx*dy
+ "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
+ "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
+
+ "movd %4, %%mm5 \n\t"
+ "movd %3, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
+ "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
+
+ "movd %2, %%mm5 \n\t"
+ "movd %1, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
+ "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
+ "paddw %5, %%mm1 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+
+ "psrlw %6, %%mm0 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "movd %%mm0, %0 \n\t"
+
+ : "=m"(dst[x+y*stride])
+ : "m"(src[0]), "m"(src[1]),
+ "m"(src[stride]), "m"(src[stride+1]),
+ "m"(*r4), "m"(shift2)
+ );
+ src += stride;
+ }
+ src += 4-h*stride;
+ }
+}
+
+#define PREFETCH(name, op) \
+static void name(void *mem, int stride, int h){\
+ const uint8_t *p= mem;\
+ do{\
+ __asm__ volatile(#op" %0" :: "m"(*p));\
+ p+= stride;\
+ }while(--h);\
+}
+PREFETCH(prefetch_mmx2, prefetcht0)
+PREFETCH(prefetch_3dnow, prefetch)
+#undef PREFETCH
+
+#include "h264dsp_mmx.c"
+#include "rv40dsp_mmx.c"
+
+/* CAVS specific */
+void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
+void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx);
+
+void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
+ put_pixels8_mmx(dst, src, stride, 8);
+}
+void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
+ avg_pixels8_mmx(dst, src, stride, 8);
+}
+void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
+ put_pixels16_mmx(dst, src, stride, 16);
+}
+void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
+ avg_pixels16_mmx(dst, src, stride, 16);
+}
+
+/* VC1 specific */
+void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx);
+
+void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
+ put_pixels8_mmx(dst, src, stride, 8);
+}
+void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
+ avg_pixels8_mmx2(dst, src, stride, 8);
+}
+
+/* external functions, from idct_mmx.c */
+void ff_mmx_idct(DCTELEM *block);
+void ff_mmxext_idct(DCTELEM *block);
+
+/* XXX: those functions should be suppressed ASAP when all IDCTs are
+ converted */
+#if CONFIG_GPL
+static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_mmx_idct (block);
+ put_pixels_clamped_mmx(block, dest, line_size);
+}
+static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_mmx_idct (block);
+ add_pixels_clamped_mmx(block, dest, line_size);
+}
+static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_mmxext_idct (block);
+ put_pixels_clamped_mmx(block, dest, line_size);
+}
+static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_mmxext_idct (block);
+ add_pixels_clamped_mmx(block, dest, line_size);
+}
+#endif
+static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_idct_xvid_mmx (block);
+ put_pixels_clamped_mmx(block, dest, line_size);
+}
+static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_idct_xvid_mmx (block);
+ add_pixels_clamped_mmx(block, dest, line_size);
+}
+static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_idct_xvid_mmx2 (block);
+ put_pixels_clamped_mmx(block, dest, line_size);
+}
+static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_idct_xvid_mmx2 (block);
+ add_pixels_clamped_mmx(block, dest, line_size);
+}
+
+static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
+{
+ int i;
+ __asm__ volatile("pxor %%mm7, %%mm7":);
+ for(i=0; i<blocksize; i+=2) {
+ __asm__ volatile(
+ "movq %0, %%mm0 \n\t"
+ "movq %1, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+ "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
+ "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
+ "pslld $31, %%mm2 \n\t" // keep only the sign bit
+ "pxor %%mm2, %%mm1 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pand %%mm1, %%mm3 \n\t"
+ "pandn %%mm1, %%mm4 \n\t"
+ "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
+ "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
+ "movq %%mm3, %1 \n\t"
+ "movq %%mm0, %0 \n\t"
+ :"+m"(mag[i]), "+m"(ang[i])
+ ::"memory"
+ );
+ }
+ __asm__ volatile("femms");
+}
+static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
+{
+ int i;
+
+ __asm__ volatile(
+ "movaps %0, %%xmm5 \n\t"
+ ::"m"(ff_pdw_80000000[0])
+ );
+ for(i=0; i<blocksize; i+=4) {
+ __asm__ volatile(
+ "movaps %0, %%xmm0 \n\t"
+ "movaps %1, %%xmm1 \n\t"
+ "xorps %%xmm2, %%xmm2 \n\t"
+ "xorps %%xmm3, %%xmm3 \n\t"
+ "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
+ "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
+ "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
+ "xorps %%xmm2, %%xmm1 \n\t"
+ "movaps %%xmm3, %%xmm4 \n\t"
+ "andps %%xmm1, %%xmm3 \n\t"
+ "andnps %%xmm1, %%xmm4 \n\t"
+ "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
+ "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
+ "movaps %%xmm3, %1 \n\t"
+ "movaps %%xmm0, %0 \n\t"
+ :"+m"(mag[i]), "+m"(ang[i])
+ ::"memory"
+ );
+ }
+}
+
+#define IF1(x) x
+#define IF0(x)
+
+#define MIX5(mono,stereo)\
+ __asm__ volatile(\
+ "movss 0(%2), %%xmm5 \n"\
+ "movss 8(%2), %%xmm6 \n"\
+ "movss 24(%2), %%xmm7 \n"\
+ "shufps $0, %%xmm5, %%xmm5 \n"\
+ "shufps $0, %%xmm6, %%xmm6 \n"\
+ "shufps $0, %%xmm7, %%xmm7 \n"\
+ "1: \n"\
+ "movaps (%0,%1), %%xmm0 \n"\
+ "movaps 0x400(%0,%1), %%xmm1 \n"\
+ "movaps 0x800(%0,%1), %%xmm2 \n"\
+ "movaps 0xc00(%0,%1), %%xmm3 \n"\
+ "movaps 0x1000(%0,%1), %%xmm4 \n"\
+ "mulps %%xmm5, %%xmm0 \n"\
+ "mulps %%xmm6, %%xmm1 \n"\
+ "mulps %%xmm5, %%xmm2 \n"\
+ "mulps %%xmm7, %%xmm3 \n"\
+ "mulps %%xmm7, %%xmm4 \n"\
+ stereo("addps %%xmm1, %%xmm0 \n")\
+ "addps %%xmm1, %%xmm2 \n"\
+ "addps %%xmm3, %%xmm0 \n"\
+ "addps %%xmm4, %%xmm2 \n"\
+ mono("addps %%xmm2, %%xmm0 \n")\
+ "movaps %%xmm0, (%0,%1) \n"\
+ stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
+ "add $16, %0 \n"\
+ "jl 1b \n"\
+ :"+&r"(i)\
+ :"r"(samples[0]+len), "r"(matrix)\
+ :"memory"\
+ );
+
+#define MIX_MISC(stereo)\
+ __asm__ volatile(\
+ "1: \n"\
+ "movaps (%3,%0), %%xmm0 \n"\
+ stereo("movaps %%xmm0, %%xmm1 \n")\
+ "mulps %%xmm6, %%xmm0 \n"\
+ stereo("mulps %%xmm7, %%xmm1 \n")\
+ "lea 1024(%3,%0), %1 \n"\
+ "mov %5, %2 \n"\
+ "2: \n"\
+ "movaps (%1), %%xmm2 \n"\
+ stereo("movaps %%xmm2, %%xmm3 \n")\
+ "mulps (%4,%2), %%xmm2 \n"\
+ stereo("mulps 16(%4,%2), %%xmm3 \n")\
+ "addps %%xmm2, %%xmm0 \n"\
+ stereo("addps %%xmm3, %%xmm1 \n")\
+ "add $1024, %1 \n"\
+ "add $32, %2 \n"\
+ "jl 2b \n"\
+ "movaps %%xmm0, (%3,%0) \n"\
+ stereo("movaps %%xmm1, 1024(%3,%0) \n")\
+ "add $16, %0 \n"\
+ "jl 1b \n"\
+ :"+&r"(i), "=&r"(j), "=&r"(k)\
+ :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
+ :"memory"\
+ );
+
+static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
+{
+ int (*matrix_cmp)[2] = (int(*)[2])matrix;
+ intptr_t i,j,k;
+
+ i = -len*sizeof(float);
+ if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
+ MIX5(IF0,IF1);
+ } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
+ MIX5(IF1,IF0);
+ } else {
+ DECLARE_ALIGNED_16(float, matrix_simd)[in_ch][2][4];
+ j = 2*in_ch*sizeof(float);
+ __asm__ volatile(
+ "1: \n"
+ "sub $8, %0 \n"
+ "movss (%2,%0), %%xmm6 \n"
+ "movss 4(%2,%0), %%xmm7 \n"
+ "shufps $0, %%xmm6, %%xmm6 \n"
+ "shufps $0, %%xmm7, %%xmm7 \n"
+ "movaps %%xmm6, (%1,%0,4) \n"
+ "movaps %%xmm7, 16(%1,%0,4) \n"
+ "jg 1b \n"
+ :"+&r"(j)
+ :"r"(matrix_simd), "r"(matrix)
+ :"memory"
+ );
+ if(out_ch == 2) {
+ MIX_MISC(IF1);
+ } else {
+ MIX_MISC(IF0);
+ }
+ }
+}
+
+static void vector_fmul_3dnow(float *dst, const float *src, int len){
+ x86_reg i = (len-4)*4;
+ __asm__ volatile(
+ "1: \n\t"
+ "movq (%1,%0), %%mm0 \n\t"
+ "movq 8(%1,%0), %%mm1 \n\t"
+ "pfmul (%2,%0), %%mm0 \n\t"
+ "pfmul 8(%2,%0), %%mm1 \n\t"
+ "movq %%mm0, (%1,%0) \n\t"
+ "movq %%mm1, 8(%1,%0) \n\t"
+ "sub $16, %0 \n\t"
+ "jge 1b \n\t"
+ "femms \n\t"
+ :"+r"(i)
+ :"r"(dst), "r"(src)
+ :"memory"
+ );
+}
+static void vector_fmul_sse(float *dst, const float *src, int len){
+ x86_reg i = (len-8)*4;
+ __asm__ volatile(
+ "1: \n\t"
+ "movaps (%1,%0), %%xmm0 \n\t"
+ "movaps 16(%1,%0), %%xmm1 \n\t"
+ "mulps (%2,%0), %%xmm0 \n\t"
+ "mulps 16(%2,%0), %%xmm1 \n\t"
+ "movaps %%xmm0, (%1,%0) \n\t"
+ "movaps %%xmm1, 16(%1,%0) \n\t"
+ "sub $32, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(i)
+ :"r"(dst), "r"(src)
+ :"memory"
+ );
+}
+
+static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
+ x86_reg i = len*4-16;
+ __asm__ volatile(
+ "1: \n\t"
+ "pswapd 8(%1), %%mm0 \n\t"
+ "pswapd (%1), %%mm1 \n\t"
+ "pfmul (%3,%0), %%mm0 \n\t"
+ "pfmul 8(%3,%0), %%mm1 \n\t"
+ "movq %%mm0, (%2,%0) \n\t"
+ "movq %%mm1, 8(%2,%0) \n\t"
+ "add $16, %1 \n\t"
+ "sub $16, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(i), "+r"(src1)
+ :"r"(dst), "r"(src0)
+ );
+ __asm__ volatile("femms");
+}
+static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
+ x86_reg i = len*4-32;
+ __asm__ volatile(
+ "1: \n\t"
+ "movaps 16(%1), %%xmm0 \n\t"
+ "movaps (%1), %%xmm1 \n\t"
+ "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
+ "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
+ "mulps (%3,%0), %%xmm0 \n\t"
+ "mulps 16(%3,%0), %%xmm1 \n\t"
+ "movaps %%xmm0, (%2,%0) \n\t"
+ "movaps %%xmm1, 16(%2,%0) \n\t"
+ "add $32, %1 \n\t"
+ "sub $32, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(i), "+r"(src1)
+ :"r"(dst), "r"(src0)
+ );
+}
+
+static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
+ const float *src2, int len){
+ x86_reg i = (len-4)*4;
+ __asm__ volatile(
+ "1: \n\t"
+ "movq (%2,%0), %%mm0 \n\t"
+ "movq 8(%2,%0), %%mm1 \n\t"
+ "pfmul (%3,%0), %%mm0 \n\t"
+ "pfmul 8(%3,%0), %%mm1 \n\t"
+ "pfadd (%4,%0), %%mm0 \n\t"
+ "pfadd 8(%4,%0), %%mm1 \n\t"
+ "movq %%mm0, (%1,%0) \n\t"
+ "movq %%mm1, 8(%1,%0) \n\t"
+ "sub $16, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(i)
+ :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
+ :"memory"
+ );
+ __asm__ volatile("femms");
+}
+static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
+ const float *src2, int len){
+ x86_reg i = (len-8)*4;
+ __asm__ volatile(
+ "1: \n\t"
+ "movaps (%2,%0), %%xmm0 \n\t"
+ "movaps 16(%2,%0), %%xmm1 \n\t"
+ "mulps (%3,%0), %%xmm0 \n\t"
+ "mulps 16(%3,%0), %%xmm1 \n\t"
+ "addps (%4,%0), %%xmm0 \n\t"
+ "addps 16(%4,%0), %%xmm1 \n\t"
+ "movaps %%xmm0, (%1,%0) \n\t"
+ "movaps %%xmm1, 16(%1,%0) \n\t"
+ "sub $32, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(i)
+ :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
+ :"memory"
+ );
+}
+
+static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
+ const float *win, float add_bias, int len){
+#if HAVE_6REGS
+ if(add_bias == 0){
+ x86_reg i = -len*4;
+ x86_reg j = len*4-8;
+ __asm__ volatile(
+ "1: \n"
+ "pswapd (%5,%1), %%mm1 \n"
+ "movq (%5,%0), %%mm0 \n"
+ "pswapd (%4,%1), %%mm5 \n"
+ "movq (%3,%0), %%mm4 \n"
+ "movq %%mm0, %%mm2 \n"
+ "movq %%mm1, %%mm3 \n"
+ "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
+ "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j]
+ "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
+ "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i]
+ "pfadd %%mm3, %%mm2 \n"
+ "pfsub %%mm0, %%mm1 \n"
+ "pswapd %%mm2, %%mm2 \n"
+ "movq %%mm1, (%2,%0) \n"
+ "movq %%mm2, (%2,%1) \n"
+ "sub $8, %1 \n"
+ "add $8, %0 \n"
+ "jl 1b \n"
+ "femms \n"
+ :"+r"(i), "+r"(j)
+ :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
+ );
+ }else
+#endif
+ ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
+}
+
+static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
+ const float *win, float add_bias, int len){
+#if HAVE_6REGS
+ if(add_bias == 0){
+ x86_reg i = -len*4;
+ x86_reg j = len*4-16;
+ __asm__ volatile(
+ "1: \n"
+ "movaps (%5,%1), %%xmm1 \n"
+ "movaps (%5,%0), %%xmm0 \n"
+ "movaps (%4,%1), %%xmm5 \n"
+ "movaps (%3,%0), %%xmm4 \n"
+ "shufps $0x1b, %%xmm1, %%xmm1 \n"
+ "shufps $0x1b, %%xmm5, %%xmm5 \n"
+ "movaps %%xmm0, %%xmm2 \n"
+ "movaps %%xmm1, %%xmm3 \n"
+ "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
+ "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j]
+ "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
+ "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i]
+ "addps %%xmm3, %%xmm2 \n"
+ "subps %%xmm0, %%xmm1 \n"
+ "shufps $0x1b, %%xmm2, %%xmm2 \n"
+ "movaps %%xmm1, (%2,%0) \n"
+ "movaps %%xmm2, (%2,%1) \n"
+ "sub $16, %1 \n"
+ "add $16, %0 \n"
+ "jl 1b \n"
+ :"+r"(i), "+r"(j)
+ :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
+ );
+ }else
+#endif
+ ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
+}
+
+static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
+{
+ x86_reg i = -4*len;
+ __asm__ volatile(
+ "movss %3, %%xmm4 \n"
+ "shufps $0, %%xmm4, %%xmm4 \n"
+ "1: \n"
+ "cvtpi2ps (%2,%0), %%xmm0 \n"
+ "cvtpi2ps 8(%2,%0), %%xmm1 \n"
+ "cvtpi2ps 16(%2,%0), %%xmm2 \n"
+ "cvtpi2ps 24(%2,%0), %%xmm3 \n"
+ "movlhps %%xmm1, %%xmm0 \n"
+ "movlhps %%xmm3, %%xmm2 \n"
+ "mulps %%xmm4, %%xmm0 \n"
+ "mulps %%xmm4, %%xmm2 \n"
+ "movaps %%xmm0, (%1,%0) \n"
+ "movaps %%xmm2, 16(%1,%0) \n"
+ "add $32, %0 \n"
+ "jl 1b \n"
+ :"+r"(i)
+ :"r"(dst+len), "r"(src+len), "m"(mul)
+ );
+}
+
+static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
+{
+ x86_reg i = -4*len;
+ __asm__ volatile(
+ "movss %3, %%xmm4 \n"
+ "shufps $0, %%xmm4, %%xmm4 \n"
+ "1: \n"
+ "cvtdq2ps (%2,%0), %%xmm0 \n"
+ "cvtdq2ps 16(%2,%0), %%xmm1 \n"
+ "mulps %%xmm4, %%xmm0 \n"
+ "mulps %%xmm4, %%xmm1 \n"
+ "movaps %%xmm0, (%1,%0) \n"
+ "movaps %%xmm1, 16(%1,%0) \n"
+ "add $32, %0 \n"
+ "jl 1b \n"
+ :"+r"(i)
+ :"r"(dst+len), "r"(src+len), "m"(mul)
+ );
+}
+
+static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
+ x86_reg reglen = len;
+ // not bit-exact: pf2id uses different rounding than C and SSE
+ __asm__ volatile(
+ "add %0 , %0 \n\t"
+ "lea (%2,%0,2) , %2 \n\t"
+ "add %0 , %1 \n\t"
+ "neg %0 \n\t"
+ "1: \n\t"
+ "pf2id (%2,%0,2) , %%mm0 \n\t"
+ "pf2id 8(%2,%0,2) , %%mm1 \n\t"
+ "pf2id 16(%2,%0,2) , %%mm2 \n\t"
+ "pf2id 24(%2,%0,2) , %%mm3 \n\t"
+ "packssdw %%mm1 , %%mm0 \n\t"
+ "packssdw %%mm3 , %%mm2 \n\t"
+ "movq %%mm0 , (%1,%0) \n\t"
+ "movq %%mm2 , 8(%1,%0) \n\t"
+ "add $16 , %0 \n\t"
+ " js 1b \n\t"
+ "femms \n\t"
+ :"+r"(reglen), "+r"(dst), "+r"(src)
+ );
+}
+static void float_to_int16_sse(int16_t *dst, const float *src, long len){
+ x86_reg reglen = len;
+ __asm__ volatile(
+ "add %0 , %0 \n\t"
+ "lea (%2,%0,2) , %2 \n\t"
+ "add %0 , %1 \n\t"
+ "neg %0 \n\t"
+ "1: \n\t"
+ "cvtps2pi (%2,%0,2) , %%mm0 \n\t"
+ "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t"
+ "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t"
+ "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t"
+ "packssdw %%mm1 , %%mm0 \n\t"
+ "packssdw %%mm3 , %%mm2 \n\t"
+ "movq %%mm0 , (%1,%0) \n\t"
+ "movq %%mm2 , 8(%1,%0) \n\t"
+ "add $16 , %0 \n\t"
+ " js 1b \n\t"
+ "emms \n\t"
+ :"+r"(reglen), "+r"(dst), "+r"(src)
+ );
+}
+
+static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
+ x86_reg reglen = len;
+ __asm__ volatile(
+ "add %0 , %0 \n\t"
+ "lea (%2,%0,2) , %2 \n\t"
+ "add %0 , %1 \n\t"
+ "neg %0 \n\t"
+ "1: \n\t"
+ "cvtps2dq (%2,%0,2) , %%xmm0 \n\t"
+ "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t"
+ "packssdw %%xmm1 , %%xmm0 \n\t"
+ "movdqa %%xmm0 , (%1,%0) \n\t"
+ "add $16 , %0 \n\t"
+ " js 1b \n\t"
+ :"+r"(reglen), "+r"(dst), "+r"(src)
+ );
+}
+
+void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
+void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
+void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
+int32_t ff_scalarproduct_int16_mmx2(int16_t *v1, int16_t *v2, int order, int shift);
+int32_t ff_scalarproduct_int16_sse2(int16_t *v1, int16_t *v2, int order, int shift);
+int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul);
+int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul);
+int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul);
+void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
+int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left);
+int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left);
+void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
+void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
+void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
+void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
+void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
+
+#if HAVE_YASM && ARCH_X86_32
+void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
+static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
+{
+ ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
+ ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
+}
+#elif !HAVE_YASM
+#define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6)
+#define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
+#define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
+#endif
+#define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
+
+#define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
+/* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
+static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
+ DECLARE_ALIGNED_16(int16_t, tmp)[len];\
+ int i,j,c;\
+ for(c=0; c<channels; c++){\
+ float_to_int16_##cpu(tmp, src[c], len);\
+ for(i=0, j=c; i<len; i++, j+=channels)\
+ dst[j] = tmp[i];\
+ }\
+}\
+\
+static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
+ if(channels==1)\
+ float_to_int16_##cpu(dst, src[0], len);\
+ else if(channels==2){\
+ x86_reg reglen = len; \
+ const float *src0 = src[0];\
+ const float *src1 = src[1];\
+ __asm__ volatile(\
+ "shl $2, %0 \n"\
+ "add %0, %1 \n"\
+ "add %0, %2 \n"\
+ "add %0, %3 \n"\
+ "neg %0 \n"\
+ body\
+ :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
+ );\
+ }else if(channels==6){\
+ ff_float_to_int16_interleave6_##cpu(dst, src, len);\
+ }else\
+ float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
+}
+
+FLOAT_TO_INT16_INTERLEAVE(3dnow,
+ "1: \n"
+ "pf2id (%2,%0), %%mm0 \n"
+ "pf2id 8(%2,%0), %%mm1 \n"
+ "pf2id (%3,%0), %%mm2 \n"
+ "pf2id 8(%3,%0), %%mm3 \n"
+ "packssdw %%mm1, %%mm0 \n"
+ "packssdw %%mm3, %%mm2 \n"
+ "movq %%mm0, %%mm1 \n"
+ "punpcklwd %%mm2, %%mm0 \n"
+ "punpckhwd %%mm2, %%mm1 \n"
+ "movq %%mm0, (%1,%0)\n"
+ "movq %%mm1, 8(%1,%0)\n"
+ "add $16, %0 \n"
+ "js 1b \n"
+ "femms \n"
+)
+
+FLOAT_TO_INT16_INTERLEAVE(sse,
+ "1: \n"
+ "cvtps2pi (%2,%0), %%mm0 \n"
+ "cvtps2pi 8(%2,%0), %%mm1 \n"
+ "cvtps2pi (%3,%0), %%mm2 \n"
+ "cvtps2pi 8(%3,%0), %%mm3 \n"
+ "packssdw %%mm1, %%mm0 \n"
+ "packssdw %%mm3, %%mm2 \n"
+ "movq %%mm0, %%mm1 \n"
+ "punpcklwd %%mm2, %%mm0 \n"
+ "punpckhwd %%mm2, %%mm1 \n"
+ "movq %%mm0, (%1,%0)\n"
+ "movq %%mm1, 8(%1,%0)\n"
+ "add $16, %0 \n"
+ "js 1b \n"
+ "emms \n"
+)
+
+FLOAT_TO_INT16_INTERLEAVE(sse2,
+ "1: \n"
+ "cvtps2dq (%2,%0), %%xmm0 \n"
+ "cvtps2dq (%3,%0), %%xmm1 \n"
+ "packssdw %%xmm1, %%xmm0 \n"
+ "movhlps %%xmm0, %%xmm1 \n"
+ "punpcklwd %%xmm1, %%xmm0 \n"
+ "movdqa %%xmm0, (%1,%0) \n"
+ "add $16, %0 \n"
+ "js 1b \n"
+)
+
+static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
+ if(channels==6)
+ ff_float_to_int16_interleave6_3dn2(dst, src, len);
+ else
+ float_to_int16_interleave_3dnow(dst, src, len, channels);
+}
+
+float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
+
+void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
+{
+ mm_flags = mm_support();
+
+ if (avctx->dsp_mask) {
+ if (avctx->dsp_mask & FF_MM_FORCE)
+ mm_flags |= (avctx->dsp_mask & 0xffff);
+ else
+ mm_flags &= ~(avctx->dsp_mask & 0xffff);
+ }
+
+#if 0
+ av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
+ if (mm_flags & FF_MM_MMX)
+ av_log(avctx, AV_LOG_INFO, " mmx");
+ if (mm_flags & FF_MM_MMX2)
+ av_log(avctx, AV_LOG_INFO, " mmx2");
+ if (mm_flags & FF_MM_3DNOW)
+ av_log(avctx, AV_LOG_INFO, " 3dnow");
+ if (mm_flags & FF_MM_SSE)
+ av_log(avctx, AV_LOG_INFO, " sse");
+ if (mm_flags & FF_MM_SSE2)
+ av_log(avctx, AV_LOG_INFO, " sse2");
+ av_log(avctx, AV_LOG_INFO, "\n");
+#endif
+
+ if (mm_flags & FF_MM_MMX) {
+ const int idct_algo= avctx->idct_algo;
+
+ if(avctx->lowres==0){
+ if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
+ c->idct_put= ff_simple_idct_put_mmx;
+ c->idct_add= ff_simple_idct_add_mmx;
+ c->idct = ff_simple_idct_mmx;
+ c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
+#if CONFIG_GPL
+ }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
+ if(mm_flags & FF_MM_MMX2){
+ c->idct_put= ff_libmpeg2mmx2_idct_put;
+ c->idct_add= ff_libmpeg2mmx2_idct_add;
+ c->idct = ff_mmxext_idct;
+ }else{
+ c->idct_put= ff_libmpeg2mmx_idct_put;
+ c->idct_add= ff_libmpeg2mmx_idct_add;
+ c->idct = ff_mmx_idct;
+ }
+ c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
+#endif
+ }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
+ idct_algo==FF_IDCT_VP3){
+ if(mm_flags & FF_MM_SSE2){
+ c->idct_put= ff_vp3_idct_put_sse2;
+ c->idct_add= ff_vp3_idct_add_sse2;
+ c->idct = ff_vp3_idct_sse2;
+ c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
+ }else{
+ c->idct_put= ff_vp3_idct_put_mmx;
+ c->idct_add= ff_vp3_idct_add_mmx;
+ c->idct = ff_vp3_idct_mmx;
+ c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
+ }
+ }else if(idct_algo==FF_IDCT_CAVS){
+ c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
+ }else if(idct_algo==FF_IDCT_XVIDMMX){
+ if(mm_flags & FF_MM_SSE2){
+ c->idct_put= ff_idct_xvid_sse2_put;
+ c->idct_add= ff_idct_xvid_sse2_add;
+ c->idct = ff_idct_xvid_sse2;
+ c->idct_permutation_type= FF_SSE2_IDCT_PERM;
+ }else if(mm_flags & FF_MM_MMX2){
+ c->idct_put= ff_idct_xvid_mmx2_put;
+ c->idct_add= ff_idct_xvid_mmx2_add;
+ c->idct = ff_idct_xvid_mmx2;
+ }else{
+ c->idct_put= ff_idct_xvid_mmx_put;
+ c->idct_add= ff_idct_xvid_mmx_add;
+ c->idct = ff_idct_xvid_mmx;
+ }
+ }
+ }
+
+ c->put_pixels_clamped = put_pixels_clamped_mmx;
+ c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
+ c->add_pixels_clamped = add_pixels_clamped_mmx;
+ c->clear_block = clear_block_mmx;
+ c->clear_blocks = clear_blocks_mmx;
+ if (mm_flags & FF_MM_SSE){
+ c->clear_block = clear_block_sse;
+ c->clear_blocks = clear_blocks_sse;
+ }
+
+#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
+ c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
+
+ SET_HPEL_FUNCS(put, 0, 16, mmx);
+ SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
+ SET_HPEL_FUNCS(avg, 0, 16, mmx);
+ SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
+ SET_HPEL_FUNCS(put, 1, 8, mmx);
+ SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
+ SET_HPEL_FUNCS(avg, 1, 8, mmx);
+ SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
+
+ c->gmc= gmc_mmx;
+
+ c->add_bytes= add_bytes_mmx;
+ c->add_bytes_l2= add_bytes_l2_mmx;
+
+ c->draw_edges = draw_edges_mmx;
+
+ if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
+ c->h263_v_loop_filter= h263_v_loop_filter_mmx;
+ c->h263_h_loop_filter= h263_h_loop_filter_mmx;
+ }
+ c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
+ c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
+ c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_mmx_nornd;
+
+ c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx;
+ c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx;
+
+ c->h264_idct_dc_add=
+ c->h264_idct_add= ff_h264_idct_add_mmx;
+ c->h264_idct8_dc_add=
+ c->h264_idct8_add= ff_h264_idct8_add_mmx;
+
+ c->h264_idct_add16 = ff_h264_idct_add16_mmx;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_mmx;
+ c->h264_idct_add8 = ff_h264_idct_add8_mmx;
+ c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
+
+ if (CONFIG_VP6_DECODER) {
+ c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx;
+ }
+
+ if (mm_flags & FF_MM_MMX2) {
+ c->prefetch = prefetch_mmx2;
+
+ c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
+ c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
+
+ c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
+ c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
+ c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
+
+ c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
+ c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
+
+ c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
+ c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
+ c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
+
+ c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
+ c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
+ c->h264_idct_add16 = ff_h264_idct_add16_mmx2;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2;
+ c->h264_idct_add8 = ff_h264_idct_add8_mmx2;
+ c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
+
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
+ c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
+ c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
+ c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
+ c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
+ c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
+
+ if (CONFIG_VP3_DECODER) {
+ c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
+ c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
+ }
+ }
+
+#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
+ c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
+ c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
+
+ SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
+ SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
+ SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
+ SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
+ SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
+ SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
+
+ SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
+ SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
+ SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
+ SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
+ SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
+ SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
+
+ SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
+ SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
+ SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
+ SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
+
+ c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2;
+ c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2;
+
+ c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_mmx2_nornd;
+
+ c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
+ c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
+ c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
+ c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
+ c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
+ c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
+ c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
+ c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
+ c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
+ c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
+ c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
+
+ c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
+ c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
+ c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
+ c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
+ c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
+ c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
+ c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
+ c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
+
+ c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
+ c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
+ c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
+ c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
+ c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
+ c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
+ c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
+ c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
+
+#if HAVE_YASM && ARCH_X86_32
+ c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
+#endif
+#if HAVE_7REGS && HAVE_TEN_OPERANDS
+ if( mm_flags&FF_MM_3DNOW )
+ c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
+#endif
+
+ if (CONFIG_CAVS_DECODER)
+ ff_cavsdsp_init_mmx2(c, avctx);
+
+ if (CONFIG_VC1_DECODER)
+ ff_vc1dsp_init_mmx(c, avctx);
+
+ c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
+ } else if (mm_flags & FF_MM_3DNOW) {
+ c->prefetch = prefetch_3dnow;
+
+ c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
+ c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
+
+ c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
+ c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
+ c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
+
+ c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
+ c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
+
+ c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
+ c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
+ c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
+
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
+ c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
+ c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
+ c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
+ c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
+ c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
+ }
+
+ SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
+ SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
+ SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
+ SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
+ SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
+ SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
+
+ SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
+ SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
+ SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
+ SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
+ SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
+ SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
+
+ SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
+ SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
+ SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
+ SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
+
+ c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
+ c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
+
+ c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow;
+ c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow;
+
+ if (CONFIG_CAVS_DECODER)
+ ff_cavsdsp_init_3dnow(c, avctx);
+ }
+
+
+#define H264_QPEL_FUNCS(x, y, CPU)\
+ c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
+ c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
+ c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
+ c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
+ if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
+ // these functions are slower than mmx on AMD, but faster on Intel
+/* FIXME works in most codecs, but crashes svq1 due to unaligned chroma
+ c->put_pixels_tab[0][0] = put_pixels16_sse2;
+ c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
+*/
+ H264_QPEL_FUNCS(0, 0, sse2);
+ }
+ if(mm_flags & FF_MM_SSE2){
+ c->h264_idct8_add = ff_h264_idct8_add_sse2;
+ c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
+
+ H264_QPEL_FUNCS(0, 1, sse2);
+ H264_QPEL_FUNCS(0, 2, sse2);
+ H264_QPEL_FUNCS(0, 3, sse2);
+ H264_QPEL_FUNCS(1, 1, sse2);
+ H264_QPEL_FUNCS(1, 2, sse2);
+ H264_QPEL_FUNCS(1, 3, sse2);
+ H264_QPEL_FUNCS(2, 1, sse2);
+ H264_QPEL_FUNCS(2, 2, sse2);
+ H264_QPEL_FUNCS(2, 3, sse2);
+ H264_QPEL_FUNCS(3, 1, sse2);
+ H264_QPEL_FUNCS(3, 2, sse2);
+ H264_QPEL_FUNCS(3, 3, sse2);
+
+ if (CONFIG_VP6_DECODER) {
+ c->vp6_filter_diag4 = ff_vp6_filter_diag4_sse2;
+ }
+ }
+#if HAVE_SSSE3
+ if(mm_flags & FF_MM_SSSE3){
+ H264_QPEL_FUNCS(1, 0, ssse3);
+ H264_QPEL_FUNCS(1, 1, ssse3);
+ H264_QPEL_FUNCS(1, 2, ssse3);
+ H264_QPEL_FUNCS(1, 3, ssse3);
+ H264_QPEL_FUNCS(2, 0, ssse3);
+ H264_QPEL_FUNCS(2, 1, ssse3);
+ H264_QPEL_FUNCS(2, 2, ssse3);
+ H264_QPEL_FUNCS(2, 3, ssse3);
+ H264_QPEL_FUNCS(3, 0, ssse3);
+ H264_QPEL_FUNCS(3, 1, ssse3);
+ H264_QPEL_FUNCS(3, 2, ssse3);
+ H264_QPEL_FUNCS(3, 3, ssse3);
+ c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_ssse3_nornd;
+ c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_ssse3_nornd;
+ c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
+ c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
+ c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
+ c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3;
+ c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
+#if HAVE_YASM && ARCH_X86_32
+ c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
+ if (mm_flags & FF_MM_SSE4) // not really sse4, just slow on Conroe
+ c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
+#endif
+ }
+#endif
+
+#if CONFIG_GPL && HAVE_YASM && ARCH_X86_32
+ if (mm_flags & FF_MM_MMX2){
+#if ARCH_X86_32
+ c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
+ c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
+#endif
+ if( mm_flags&FF_MM_SSE2 ){
+#if ARCH_X86_64 || !defined(__ICC) || __ICC > 1110
+ c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
+ c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
+ c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
+ c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
+#endif
+ c->h264_idct_add16 = ff_h264_idct_add16_sse2;
+ c->h264_idct_add8 = ff_h264_idct_add8_sse2;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
+ }
+ }
+#endif
+
+/* disable audio related ASM for 64-bit builds */
+#if ARCH_X86_32
+ if(mm_flags & FF_MM_3DNOW){
+ c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
+ c->vector_fmul = vector_fmul_3dnow;
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->float_to_int16 = float_to_int16_3dnow;
+ c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
+ }
+ }
+ if(mm_flags & FF_MM_3DNOWEXT){
+ c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
+ c->vector_fmul_window = vector_fmul_window_3dnow2;
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
+ }
+ }
+ if(mm_flags & FF_MM_SSE){
+ c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
+ c->ac3_downmix = ac3_downmix_sse;
+ c->vector_fmul = vector_fmul_sse;
+ c->vector_fmul_reverse = vector_fmul_reverse_sse;
+ c->vector_fmul_add = vector_fmul_add_sse;
+ c->vector_fmul_window = vector_fmul_window_sse;
+ c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
+ c->float_to_int16 = float_to_int16_sse;
+ c->float_to_int16_interleave = float_to_int16_interleave_sse;
+#if CONFIG_AAC_DECODER
+#if HAVE_YASM
+ c->scalarproduct_float = ff_scalarproduct_float_sse;
+#endif
+#endif
+ }
+ if(mm_flags & FF_MM_3DNOW)
+ c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse
+ if(mm_flags & FF_MM_SSE2){
+ c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
+ c->float_to_int16 = float_to_int16_sse2;
+ c->float_to_int16_interleave = float_to_int16_interleave_sse2;
+ }
+#endif /* ARCH_X86_32 */
+ }
+
+ if (CONFIG_ENCODERS)
+ dsputilenc_init_mmx(c, avctx);
+}
+
+const char* avcodec_get_current_idct_mmx(AVCodecContext *avctx,DSPContext *c)
+{
+ if (c->idct_put==ff_idct_xvid_mmx_put)
+ return "Xvid (ff_idct_xvid_mmx)";
+ if (c->idct_put==ff_idct_xvid_mmx2_put)
+ return "Xvid (ff_idct_xvid_mmx2)";
+ if (c->idct_put==ff_idct_xvid_sse2_put)
+ return "Xvid (ff_idct_xvid_sse2)";
+ if (c->idct_put==ff_simple_idct_put_mmx)
+ return "Simple MMX (ff_simple_idct_mmx)";
+ if (c->idct_put==ff_libmpeg2mmx2_idct_put)
+ return "libmpeg2 (ff_libmpeg2mmx2_idct)";
+ if (c->idct_put==ff_libmpeg2mmx_idct_put)
+ return "libmpeg2 (ff_libmpeg2mmx_idct)";
+ if (c->idct_put==ff_vp3_idct_put_sse2)
+ return "VP3 (ff_vp3_idct_sse2)";
+ if (c->idct_put==ff_vp3_idct_put_mmx)
+ return "VP3 (ff_vp3_idct_mmx)";
+ return NULL;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx.h
new file mode 100644
index 000000000..c0ef49b30
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx.h
@@ -0,0 +1,159 @@
+/*
+ * MMX optimized DSP utils
+ * Copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_DSPUTIL_MMX_H
+#define AVCODEC_X86_DSPUTIL_MMX_H
+
+#include <stdint.h>
+#include "libavcodec/dsputil.h"
+
+typedef struct { uint64_t a, b; } xmm_reg;
+
+extern const uint64_t ff_bone;
+extern const uint64_t ff_wtwo;
+
+extern const uint64_t ff_pdw_80000000[2];
+
+extern const uint64_t ff_pw_3;
+extern const uint64_t ff_pw_4;
+extern const xmm_reg ff_pw_5;
+extern const xmm_reg ff_pw_8;
+extern const uint64_t ff_pw_15;
+extern const xmm_reg ff_pw_16;
+extern const uint64_t ff_pw_20;
+extern const xmm_reg ff_pw_28;
+extern const xmm_reg ff_pw_32;
+extern const uint64_t ff_pw_42;
+extern const xmm_reg ff_pw_64;
+extern const uint64_t ff_pw_96;
+extern const uint64_t ff_pw_128;
+extern const uint64_t ff_pw_255;
+
+extern const uint64_t ff_pb_1;
+extern const uint64_t ff_pb_3;
+extern const uint64_t ff_pb_7;
+extern const uint64_t ff_pb_1F;
+extern const uint64_t ff_pb_3F;
+extern const uint64_t ff_pb_81;
+extern const uint64_t ff_pb_A1;
+extern const uint64_t ff_pb_FC;
+
+extern const double ff_pd_1[2];
+extern const double ff_pd_2[2];
+
+#define LOAD4(stride,in,a,b,c,d)\
+ "movq 0*"#stride"+"#in", "#a"\n\t"\
+ "movq 1*"#stride"+"#in", "#b"\n\t"\
+ "movq 2*"#stride"+"#in", "#c"\n\t"\
+ "movq 3*"#stride"+"#in", "#d"\n\t"
+
+#define STORE4(stride,out,a,b,c,d)\
+ "movq "#a", 0*"#stride"+"#out"\n\t"\
+ "movq "#b", 1*"#stride"+"#out"\n\t"\
+ "movq "#c", 2*"#stride"+"#out"\n\t"\
+ "movq "#d", 3*"#stride"+"#out"\n\t"
+
+/* in/out: mma=mma+mmb, mmb=mmb-mma */
+#define SUMSUB_BA( a, b ) \
+ "paddw "#b", "#a" \n\t"\
+ "paddw "#b", "#b" \n\t"\
+ "psubw "#a", "#b" \n\t"
+
+#define SBUTTERFLY(a,b,t,n,m)\
+ "mov" #m " " #a ", " #t " \n\t" /* abcd */\
+ "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
+ "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
+
+#define TRANSPOSE4(a,b,c,d,t)\
+ SBUTTERFLY(a,b,t,wd,q) /* a=aebf t=cgdh */\
+ SBUTTERFLY(c,d,b,wd,q) /* c=imjn b=kolp */\
+ SBUTTERFLY(a,c,d,dq,q) /* a=aeim d=bfjn */\
+ SBUTTERFLY(t,b,c,dq,q) /* t=cgko c=dhlp */
+
+// e,f,g,h can be memory
+// out: a,d,t,c
+#define TRANSPOSE8x4(a,b,c,d,e,f,g,h,t)\
+ "punpcklbw " #e ", " #a " \n\t" /* a0 e0 a1 e1 a2 e2 a3 e3 */\
+ "punpcklbw " #f ", " #b " \n\t" /* b0 f0 b1 f1 b2 f2 b3 f3 */\
+ "punpcklbw " #g ", " #c " \n\t" /* c0 g0 c1 g1 c2 g2 d3 g3 */\
+ "punpcklbw " #h ", " #d " \n\t" /* d0 h0 d1 h1 d2 h2 d3 h3 */\
+ SBUTTERFLY(a, b, t, bw, q) /* a= a0 b0 e0 f0 a1 b1 e1 f1 */\
+ /* t= a2 b2 e2 f2 a3 b3 e3 f3 */\
+ SBUTTERFLY(c, d, b, bw, q) /* c= c0 d0 g0 h0 c1 d1 g1 h1 */\
+ /* b= c2 d2 g2 h2 c3 d3 g3 h3 */\
+ SBUTTERFLY(a, c, d, wd, q) /* a= a0 b0 c0 d0 e0 f0 g0 h0 */\
+ /* d= a1 b1 c1 d1 e1 f1 g1 h1 */\
+ SBUTTERFLY(t, b, c, wd, q) /* t= a2 b2 c2 d2 e2 f2 g2 h2 */\
+ /* c= a3 b3 c3 d3 e3 f3 g3 h3 */
+
+#if ARCH_X86_64
+// permutes 01234567 -> 05736421
+#define TRANSPOSE8(a,b,c,d,e,f,g,h,t)\
+ SBUTTERFLY(a,b,%%xmm8,wd,dqa)\
+ SBUTTERFLY(c,d,b,wd,dqa)\
+ SBUTTERFLY(e,f,d,wd,dqa)\
+ SBUTTERFLY(g,h,f,wd,dqa)\
+ SBUTTERFLY(a,c,h,dq,dqa)\
+ SBUTTERFLY(%%xmm8,b,c,dq,dqa)\
+ SBUTTERFLY(e,g,b,dq,dqa)\
+ SBUTTERFLY(d,f,g,dq,dqa)\
+ SBUTTERFLY(a,e,f,qdq,dqa)\
+ SBUTTERFLY(%%xmm8,d,e,qdq,dqa)\
+ SBUTTERFLY(h,b,d,qdq,dqa)\
+ SBUTTERFLY(c,g,b,qdq,dqa)\
+ "movdqa %%xmm8, "#g" \n\t"
+#else
+#define TRANSPOSE8(a,b,c,d,e,f,g,h,t)\
+ "movdqa "#h", "#t" \n\t"\
+ SBUTTERFLY(a,b,h,wd,dqa)\
+ "movdqa "#h", 16"#t" \n\t"\
+ "movdqa "#t", "#h" \n\t"\
+ SBUTTERFLY(c,d,b,wd,dqa)\
+ SBUTTERFLY(e,f,d,wd,dqa)\
+ SBUTTERFLY(g,h,f,wd,dqa)\
+ SBUTTERFLY(a,c,h,dq,dqa)\
+ "movdqa "#h", "#t" \n\t"\
+ "movdqa 16"#t", "#h" \n\t"\
+ SBUTTERFLY(h,b,c,dq,dqa)\
+ SBUTTERFLY(e,g,b,dq,dqa)\
+ SBUTTERFLY(d,f,g,dq,dqa)\
+ SBUTTERFLY(a,e,f,qdq,dqa)\
+ SBUTTERFLY(h,d,e,qdq,dqa)\
+ "movdqa "#h", 16"#t" \n\t"\
+ "movdqa "#t", "#h" \n\t"\
+ SBUTTERFLY(h,b,d,qdq,dqa)\
+ SBUTTERFLY(c,g,b,qdq,dqa)\
+ "movdqa 16"#t", "#g" \n\t"
+#endif
+
+#define MOVQ_WONE(regd) \
+ __asm__ volatile ( \
+ "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
+ "psrlw $15, %%" #regd ::)
+
+void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx);
+void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx);
+
+void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
+void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
+void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
+
+#endif /* AVCODEC_X86_DSPUTIL_MMX_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx_avg_template.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx_avg_template.c
new file mode 100644
index 000000000..218d5995a
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx_avg_template.c
@@ -0,0 +1,896 @@
+/*
+ * DSP utils : average functions are compiled twice for 3dnow/mmx2
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer
+ *
+ * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
+ * mostly rewritten by Michael Niedermayer <michaelni@gmx.at>
+ * and improved by Zdenek Kabelac <kabi@users.sf.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* XXX: we use explicit registers to avoid a gcc 2.95.2 register asm
+ clobber bug - now it will work with 2.95.2 and also with -fPIC
+ */
+static void DEF(put_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm1 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" ((x86_reg)line_size)
+ :"%"REG_a, "memory");
+}
+
+static void DEF(put_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+{
+ __asm__ volatile(
+ "testl $1, %0 \n\t"
+ " jz 1f \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "movd (%2), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "add $4, %2 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ "movd %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "decl %0 \n\t"
+ "1: \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "add %4, %1 \n\t"
+ "movd (%1), %%mm1 \n\t"
+ "movd (%2), %%mm2 \n\t"
+ "movd 4(%2), %%mm3 \n\t"
+ "add %4, %1 \n\t"
+ PAVGB" %%mm2, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm1 \n\t"
+ "movd %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movd %%mm1, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "add %4, %1 \n\t"
+ "movd (%1), %%mm1 \n\t"
+ "movd 8(%2), %%mm2 \n\t"
+ "movd 12(%2), %%mm3 \n\t"
+ "add %4, %1 \n\t"
+ PAVGB" %%mm2, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm1 \n\t"
+ "movd %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movd %%mm1, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "add $16, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
+ :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#else
+ :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#endif
+ :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
+ :"memory");
+}
+
+
+static void DEF(put_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+{
+ __asm__ volatile(
+ "testl $1, %0 \n\t"
+ " jz 1f \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%2), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "add $8, %2 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "decl %0 \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "add %4, %1 \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" 8(%2), %%mm1 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq %%mm1, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "add %4, %1 \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ PAVGB" 16(%2), %%mm0 \n\t"
+ PAVGB" 24(%2), %%mm1 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq %%mm1, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "add $32, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
+ :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#else
+ :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#endif
+ :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
+ :"memory");
+//the following should be used, though better not with gcc ...
+/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst)
+ :"r"(src1Stride), "r"(dstStride)
+ :"memory");*/
+}
+
+static void DEF(put_no_rnd_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+{
+ __asm__ volatile(
+ "pcmpeqb %%mm6, %%mm6 \n\t"
+ "testl $1, %0 \n\t"
+ " jz 1f \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%2), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "add $8, %2 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "decl %0 \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "add %4, %1 \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "movq (%2), %%mm2 \n\t"
+ "movq 8(%2), %%mm3 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "pxor %%mm6, %%mm2 \n\t"
+ "pxor %%mm6, %%mm3 \n\t"
+ PAVGB" %%mm2, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm1 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq %%mm1, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "add %4, %1 \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "movq 16(%2), %%mm2 \n\t"
+ "movq 24(%2), %%mm3 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "pxor %%mm6, %%mm2 \n\t"
+ "pxor %%mm6, %%mm3 \n\t"
+ PAVGB" %%mm2, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm1 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq %%mm1, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "add $32, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
+ :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#else
+ :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#endif
+ :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
+ :"memory");
+//the following should be used, though better not with gcc ...
+/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst)
+ :"r"(src1Stride), "r"(dstStride)
+ :"memory");*/
+}
+
+static void DEF(avg_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+{
+ __asm__ volatile(
+ "testl $1, %0 \n\t"
+ " jz 1f \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "movd (%2), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "add $4, %2 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" (%3), %%mm0 \n\t"
+ "movd %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "decl %0 \n\t"
+ "1: \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "add %4, %1 \n\t"
+ "movd (%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" 4(%2), %%mm1 \n\t"
+ PAVGB" (%3), %%mm0 \n\t"
+ "movd %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ PAVGB" (%3), %%mm1 \n\t"
+ "movd %%mm1, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "add %4, %1 \n\t"
+ "movd (%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ PAVGB" 8(%2), %%mm0 \n\t"
+ PAVGB" 12(%2), %%mm1 \n\t"
+ PAVGB" (%3), %%mm0 \n\t"
+ "movd %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ PAVGB" (%3), %%mm1 \n\t"
+ "movd %%mm1, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "add $16, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
+ :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#else
+ :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#endif
+ :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
+ :"memory");
+}
+
+
+static void DEF(avg_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+{
+ __asm__ volatile(
+ "testl $1, %0 \n\t"
+ " jz 1f \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%2), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "add $8, %2 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" (%3), %%mm0 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "decl %0 \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "add %4, %1 \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" 8(%2), %%mm1 \n\t"
+ PAVGB" (%3), %%mm0 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ PAVGB" (%3), %%mm1 \n\t"
+ "movq %%mm1, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "add %4, %1 \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ PAVGB" 16(%2), %%mm0 \n\t"
+ PAVGB" 24(%2), %%mm1 \n\t"
+ PAVGB" (%3), %%mm0 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "add %5, %3 \n\t"
+ PAVGB" (%3), %%mm1 \n\t"
+ "movq %%mm1, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "add $32, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
+ :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#else
+ :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#endif
+ :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
+ :"memory");
+//the following should be used, though better not with gcc ...
+/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst)
+ :"r"(src1Stride), "r"(dstStride)
+ :"memory");*/
+}
+
+static void DEF(put_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq 8(%1), %%mm2 \n\t"
+ "movq 8(%1, %3), %%mm3 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm1 \n\t"
+ PAVGB" 9(%1), %%mm2 \n\t"
+ PAVGB" 9(%1, %3), %%mm3 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq %%mm2, 8(%2) \n\t"
+ "movq %%mm3, 8(%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq 8(%1), %%mm2 \n\t"
+ "movq 8(%1, %3), %%mm3 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm1 \n\t"
+ PAVGB" 9(%1), %%mm2 \n\t"
+ PAVGB" 9(%1, %3), %%mm3 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq %%mm2, 8(%2) \n\t"
+ "movq %%mm3, 8(%2, %3) \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" ((x86_reg)line_size)
+ :"%"REG_a, "memory");
+}
+
+static void DEF(put_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+{
+ __asm__ volatile(
+ "testl $1, %0 \n\t"
+ " jz 1f \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm1 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" 8(%2), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "add $16, %2 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "movq %%mm1, 8(%3) \n\t"
+ "add %5, %3 \n\t"
+ "decl %0 \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" 8(%2), %%mm1 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "movq %%mm1, 8(%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ PAVGB" 16(%2), %%mm0 \n\t"
+ PAVGB" 24(%2), %%mm1 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "movq %%mm1, 8(%3) \n\t"
+ "add %5, %3 \n\t"
+ "add $32, %2 \n\t"
+ "subl $2, %0 \n\t"
+ "jnz 1b \n\t"
+#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
+ :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#else
+ :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#endif
+ :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
+ :"memory");
+//the following should be used, though better not with gcc ...
+/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst)
+ :"r"(src1Stride), "r"(dstStride)
+ :"memory");*/
+}
+
+static void DEF(avg_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+{
+ __asm__ volatile(
+ "testl $1, %0 \n\t"
+ " jz 1f \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm1 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" 8(%2), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "add $16, %2 \n\t"
+ PAVGB" (%3), %%mm0 \n\t"
+ PAVGB" 8(%3), %%mm1 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "movq %%mm1, 8(%3) \n\t"
+ "add %5, %3 \n\t"
+ "decl %0 \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" 8(%2), %%mm1 \n\t"
+ PAVGB" (%3), %%mm0 \n\t"
+ PAVGB" 8(%3), %%mm1 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "movq %%mm1, 8(%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ PAVGB" 16(%2), %%mm0 \n\t"
+ PAVGB" 24(%2), %%mm1 \n\t"
+ PAVGB" (%3), %%mm0 \n\t"
+ PAVGB" 8(%3), %%mm1 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "movq %%mm1, 8(%3) \n\t"
+ "add %5, %3 \n\t"
+ "add $32, %2 \n\t"
+ "subl $2, %0 \n\t"
+ "jnz 1b \n\t"
+#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
+ :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#else
+ :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#endif
+ :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
+ :"memory");
+//the following should be used, though better not with gcc ...
+/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst)
+ :"r"(src1Stride), "r"(dstStride)
+ :"memory");*/
+}
+
+static void DEF(put_no_rnd_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+{
+ __asm__ volatile(
+ "pcmpeqb %%mm6, %%mm6 \n\t"
+ "testl $1, %0 \n\t"
+ " jz 1f \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm1 \n\t"
+ "movq (%2), %%mm2 \n\t"
+ "movq 8(%2), %%mm3 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "pxor %%mm6, %%mm2 \n\t"
+ "pxor %%mm6, %%mm3 \n\t"
+ PAVGB" %%mm2, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm1 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "add $16, %2 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "movq %%mm1, 8(%3) \n\t"
+ "add %5, %3 \n\t"
+ "decl %0 \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "movq (%2), %%mm2 \n\t"
+ "movq 8(%2), %%mm3 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "pxor %%mm6, %%mm2 \n\t"
+ "pxor %%mm6, %%mm3 \n\t"
+ PAVGB" %%mm2, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm1 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "movq %%mm1, 8(%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "movq 16(%2), %%mm2 \n\t"
+ "movq 24(%2), %%mm3 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "pxor %%mm6, %%mm2 \n\t"
+ "pxor %%mm6, %%mm3 \n\t"
+ PAVGB" %%mm2, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm1 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "movq %%mm0, (%3) \n\t"
+ "movq %%mm1, 8(%3) \n\t"
+ "add %5, %3 \n\t"
+ "add $32, %2 \n\t"
+ "subl $2, %0 \n\t"
+ "jnz 1b \n\t"
+#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
+ :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#else
+ :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#endif
+ :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
+ :"memory");
+//the following should be used, though better not with gcc ...
+/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst)
+ :"r"(src1Stride), "r"(dstStride)
+ :"memory");*/
+}
+
+/* GL: this function does incorrect rounding if overflow */
+static void DEF(put_no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_BONE(mm6);
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ "movq 1(%1), %%mm1 \n\t"
+ "movq 1(%1, %3), %%mm3 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "psubusb %%mm6, %%mm0 \n\t"
+ "psubusb %%mm6, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm1 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ "movq 1(%1, %3), %%mm3 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "psubusb %%mm6, %%mm0 \n\t"
+ "psubusb %%mm6, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" ((x86_reg)line_size)
+ :"%"REG_a, "memory");
+}
+
+static void DEF(put_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "sub %3, %2 \n\t"
+ "1: \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm2 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm2, %%mm1 \n\t"
+ "movq %%mm0, (%2, %3) \n\t"
+ "movq %%mm1, (%2, %%"REG_a") \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ PAVGB" %%mm0, %%mm1 \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "movq %%mm1, (%2, %%"REG_a") \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D" (block)
+ :"r" ((x86_reg)line_size)
+ :"%"REG_a, "memory");
+}
+
+/* GL: this function does incorrect rounding if overflow */
+static void DEF(put_no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_BONE(mm6);
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "sub %3, %2 \n\t"
+ "1: \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm2 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "psubusb %%mm6, %%mm1 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm2, %%mm1 \n\t"
+ "movq %%mm0, (%2, %3) \n\t"
+ "movq %%mm1, (%2, %%"REG_a") \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "psubusb %%mm6, %%mm1 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ PAVGB" %%mm0, %%mm1 \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "movq %%mm1, (%2, %%"REG_a") \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D" (block)
+ :"r" ((x86_reg)line_size)
+ :"%"REG_a, "memory");
+}
+
+static void DEF(avg_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%2), %%mm0 \n\t"
+ "movq (%2, %3), %%mm1 \n\t"
+ PAVGB" (%1), %%mm0 \n\t"
+ PAVGB" (%1, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movq (%2), %%mm0 \n\t"
+ "movq (%2, %3), %%mm1 \n\t"
+ PAVGB" (%1), %%mm0 \n\t"
+ PAVGB" (%1, %3), %%mm1 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" ((x86_reg)line_size)
+ :"%"REG_a, "memory");
+}
+
+static void DEF(avg_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm2 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" (%2, %3), %%mm2 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm2 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" (%2, %3), %%mm2 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" ((x86_reg)line_size)
+ :"%"REG_a, "memory");
+}
+
+static void DEF(avg_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "sub %3, %2 \n\t"
+ "1: \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm2 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm2, %%mm1 \n\t"
+ "movq (%2, %3), %%mm3 \n\t"
+ "movq (%2, %%"REG_a"), %%mm4 \n\t"
+ PAVGB" %%mm3, %%mm0 \n\t"
+ PAVGB" %%mm4, %%mm1 \n\t"
+ "movq %%mm0, (%2, %3) \n\t"
+ "movq %%mm1, (%2, %%"REG_a") \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ PAVGB" %%mm0, %%mm1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "movq (%2, %3), %%mm3 \n\t"
+ "movq (%2, %%"REG_a"), %%mm4 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ PAVGB" %%mm4, %%mm1 \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "movq %%mm1, (%2, %%"REG_a") \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" ((x86_reg)line_size)
+ :"%"REG_a, "memory");
+}
+
+/* Note this is not correctly rounded, but this function is only
+ * used for B-frames so it does not matter. */
+static void DEF(avg_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_BONE(mm6);
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ "movq (%1), %%mm0 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ ASMALIGN(3)
+ "1: \n\t"
+ "movq (%1, %%"REG_a"), %%mm2 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "psubusb %%mm6, %%mm2 \n\t"
+ PAVGB" 1(%1, %3), %%mm1 \n\t"
+ PAVGB" 1(%1, %%"REG_a"), %%mm2 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm2, %%mm1 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" (%2, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm1 \n\t"
+ PAVGB" 1(%1, %%"REG_a"), %%mm0 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "add %%"REG_a", %1 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ PAVGB" %%mm0, %%mm1 \n\t"
+ PAVGB" (%2), %%mm2 \n\t"
+ PAVGB" (%2, %3), %%mm1 \n\t"
+ "movq %%mm2, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" ((x86_reg)line_size)
+ :"%"REG_a, "memory");
+}
+
+static void DEF(avg_pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ do {
+ __asm__ volatile(
+ "movd (%1), %%mm0 \n\t"
+ "movd (%1, %2), %%mm1 \n\t"
+ "movd (%1, %2, 2), %%mm2 \n\t"
+ "movd (%1, %3), %%mm3 \n\t"
+ PAVGB" (%0), %%mm0 \n\t"
+ PAVGB" (%0, %2), %%mm1 \n\t"
+ PAVGB" (%0, %2, 2), %%mm2 \n\t"
+ PAVGB" (%0, %3), %%mm3 \n\t"
+ "movd %%mm0, (%1) \n\t"
+ "movd %%mm1, (%1, %2) \n\t"
+ "movd %%mm2, (%1, %2, 2) \n\t"
+ "movd %%mm3, (%1, %3) \n\t"
+ ::"S"(pixels), "D"(block),
+ "r" ((x86_reg)line_size), "r"((x86_reg)3L*line_size)
+ :"memory");
+ block += 4*line_size;
+ pixels += 4*line_size;
+ h -= 4;
+ } while(h > 0);
+}
+
+//FIXME the following could be optimized too ...
+static void DEF(put_no_rnd_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
+ DEF(put_no_rnd_pixels8_x2)(block , pixels , line_size, h);
+ DEF(put_no_rnd_pixels8_x2)(block+8, pixels+8, line_size, h);
+}
+static void DEF(put_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
+ DEF(put_pixels8_y2)(block , pixels , line_size, h);
+ DEF(put_pixels8_y2)(block+8, pixels+8, line_size, h);
+}
+static void DEF(put_no_rnd_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
+ DEF(put_no_rnd_pixels8_y2)(block , pixels , line_size, h);
+ DEF(put_no_rnd_pixels8_y2)(block+8, pixels+8, line_size, h);
+}
+static void DEF(avg_pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
+ DEF(avg_pixels8)(block , pixels , line_size, h);
+ DEF(avg_pixels8)(block+8, pixels+8, line_size, h);
+}
+static void DEF(avg_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
+ DEF(avg_pixels8_x2)(block , pixels , line_size, h);
+ DEF(avg_pixels8_x2)(block+8, pixels+8, line_size, h);
+}
+static void DEF(avg_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
+ DEF(avg_pixels8_y2)(block , pixels , line_size, h);
+ DEF(avg_pixels8_y2)(block+8, pixels+8, line_size, h);
+}
+static void DEF(avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
+ DEF(avg_pixels8_xy2)(block , pixels , line_size, h);
+ DEF(avg_pixels8_xy2)(block+8, pixels+8, line_size, h);
+}
+
+#define QPEL_2TAP_L3(OPNAME) \
+static void DEF(OPNAME ## 2tap_qpel16_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\
+ __asm__ volatile(\
+ "1: \n\t"\
+ "movq (%1,%2), %%mm0 \n\t"\
+ "movq 8(%1,%2), %%mm1 \n\t"\
+ PAVGB" (%1,%3), %%mm0 \n\t"\
+ PAVGB" 8(%1,%3), %%mm1 \n\t"\
+ PAVGB" (%1), %%mm0 \n\t"\
+ PAVGB" 8(%1), %%mm1 \n\t"\
+ STORE_OP( (%1,%4),%%mm0)\
+ STORE_OP(8(%1,%4),%%mm1)\
+ "movq %%mm0, (%1,%4) \n\t"\
+ "movq %%mm1, 8(%1,%4) \n\t"\
+ "add %5, %1 \n\t"\
+ "decl %0 \n\t"\
+ "jnz 1b \n\t"\
+ :"+g"(h), "+r"(src)\
+ :"r"((x86_reg)off1), "r"((x86_reg)off2),\
+ "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\
+ :"memory"\
+ );\
+}\
+static void DEF(OPNAME ## 2tap_qpel8_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\
+ __asm__ volatile(\
+ "1: \n\t"\
+ "movq (%1,%2), %%mm0 \n\t"\
+ PAVGB" (%1,%3), %%mm0 \n\t"\
+ PAVGB" (%1), %%mm0 \n\t"\
+ STORE_OP((%1,%4),%%mm0)\
+ "movq %%mm0, (%1,%4) \n\t"\
+ "add %5, %1 \n\t"\
+ "decl %0 \n\t"\
+ "jnz 1b \n\t"\
+ :"+g"(h), "+r"(src)\
+ :"r"((x86_reg)off1), "r"((x86_reg)off2),\
+ "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\
+ :"memory"\
+ );\
+}
+
+#define STORE_OP(a,b) PAVGB" "#a","#b" \n\t"
+QPEL_2TAP_L3(avg_)
+#undef STORE_OP
+#define STORE_OP(a,b)
+QPEL_2TAP_L3(put_)
+#undef STORE_OP
+#undef QPEL_2TAP_L3
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx_qns_template.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx_qns_template.c
new file mode 100644
index 000000000..677618a90
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx_qns_template.c
@@ -0,0 +1,101 @@
+/*
+ * DSP utils : QNS functions are compiled 3 times for mmx/3dnow/ssse3
+ * Copyright (c) 2004 Michael Niedermayer
+ *
+ * MMX optimization by Michael Niedermayer <michaelni@gmx.at>
+ * 3DNow! and SSSE3 optimization by Zuxy Meng <zuxy.meng@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define MAX_ABS (512 >> (SCALE_OFFSET>0 ? SCALE_OFFSET : 0))
+
+static int DEF(try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
+{
+ x86_reg i=0;
+
+ assert(FFABS(scale) < MAX_ABS);
+ scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT;
+
+ SET_RND(mm6);
+ __asm__ volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "movd %4, %%mm5 \n\t"
+ "punpcklwd %%mm5, %%mm5 \n\t"
+ "punpcklwd %%mm5, %%mm5 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%1, %0), %%mm0 \n\t"
+ "movq 8(%1, %0), %%mm1 \n\t"
+ PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6)
+ "paddw (%2, %0), %%mm0 \n\t"
+ "paddw 8(%2, %0), %%mm1 \n\t"
+ "psraw $6, %%mm0 \n\t"
+ "psraw $6, %%mm1 \n\t"
+ "pmullw (%3, %0), %%mm0 \n\t"
+ "pmullw 8(%3, %0), %%mm1 \n\t"
+ "pmaddwd %%mm0, %%mm0 \n\t"
+ "pmaddwd %%mm1, %%mm1 \n\t"
+ "paddd %%mm1, %%mm0 \n\t"
+ "psrld $4, %%mm0 \n\t"
+ "paddd %%mm0, %%mm7 \n\t"
+ "add $16, %0 \n\t"
+ "cmp $128, %0 \n\t" //FIXME optimize & bench
+ " jb 1b \n\t"
+ PHADDD(%%mm7, %%mm6)
+ "psrld $2, %%mm7 \n\t"
+ "movd %%mm7, %0 \n\t"
+
+ : "+r" (i)
+ : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
+ );
+ return i;
+}
+
+static void DEF(add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
+{
+ x86_reg i=0;
+
+ if(FFABS(scale) < MAX_ABS){
+ scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT;
+ SET_RND(mm6);
+ __asm__ volatile(
+ "movd %3, %%mm5 \n\t"
+ "punpcklwd %%mm5, %%mm5 \n\t"
+ "punpcklwd %%mm5, %%mm5 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%1, %0), %%mm0 \n\t"
+ "movq 8(%1, %0), %%mm1 \n\t"
+ PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6)
+ "paddw (%2, %0), %%mm0 \n\t"
+ "paddw 8(%2, %0), %%mm1 \n\t"
+ "movq %%mm0, (%2, %0) \n\t"
+ "movq %%mm1, 8(%2, %0) \n\t"
+ "add $16, %0 \n\t"
+ "cmp $128, %0 \n\t" // FIXME optimize & bench
+ " jb 1b \n\t"
+
+ : "+r" (i)
+ : "r"(basis), "r"(rem), "g"(scale)
+ );
+ }else{
+ for(i=0; i<8*8; i++){
+ rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
+ }
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx_rnd_template.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx_rnd_template.c
new file mode 100644
index 000000000..d4dfb41e6
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_mmx_rnd_template.c
@@ -0,0 +1,590 @@
+/*
+ * DSP utils mmx functions are compiled twice for rnd/no_rnd
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2003-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
+ * mostly rewritten by Michael Niedermayer <michaelni@gmx.at>
+ * and improved by Zdenek Kabelac <kabi@users.sf.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+// put_pixels
+static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ ASMALIGN(3)
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm1 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ "movq 1(%1, %3), %%mm3 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%2) \n\t"
+ "movq %%mm5, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm1 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ "movq 1(%1, %3), %%mm3 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%2) \n\t"
+ "movq %%mm5, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r"((x86_reg)line_size)
+ :REG_a, "memory");
+}
+
+static void av_unused DEF(put, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+{
+ MOVQ_BFE(mm6);
+ __asm__ volatile(
+ "testl $1, %0 \n\t"
+ " jz 1f \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%2), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "add $8, %2 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm4, %%mm6)
+ "movq %%mm4, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "decl %0 \n\t"
+ ASMALIGN(3)
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%2), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "movq (%1), %%mm2 \n\t"
+ "movq 8(%2), %%mm3 \n\t"
+ "add %4, %1 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq %%mm5, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 16(%2), %%mm1 \n\t"
+ "add %4, %1 \n\t"
+ "movq (%1), %%mm2 \n\t"
+ "movq 24(%2), %%mm3 \n\t"
+ "add %4, %1 \n\t"
+ "add $32, %2 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq %%mm5, (%3) \n\t"
+ "add %5, %3 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
+ :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#else
+ :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#endif
+ :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
+ :"memory");
+}
+
+static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ ASMALIGN(3)
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm1 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ "movq 1(%1, %3), %%mm3 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%2) \n\t"
+ "movq %%mm5, (%2, %3) \n\t"
+ "movq 8(%1), %%mm0 \n\t"
+ "movq 9(%1), %%mm1 \n\t"
+ "movq 8(%1, %3), %%mm2 \n\t"
+ "movq 9(%1, %3), %%mm3 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, 8(%2) \n\t"
+ "movq %%mm5, 8(%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm1 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ "movq 1(%1, %3), %%mm3 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%2) \n\t"
+ "movq %%mm5, (%2, %3) \n\t"
+ "movq 8(%1), %%mm0 \n\t"
+ "movq 9(%1), %%mm1 \n\t"
+ "movq 8(%1, %3), %%mm2 \n\t"
+ "movq 9(%1, %3), %%mm3 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, 8(%2) \n\t"
+ "movq %%mm5, 8(%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r"((x86_reg)line_size)
+ :REG_a, "memory");
+}
+
+static void av_unused DEF(put, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+{
+ MOVQ_BFE(mm6);
+ __asm__ volatile(
+ "testl $1, %0 \n\t"
+ " jz 1f \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%2), %%mm1 \n\t"
+ "movq 8(%1), %%mm2 \n\t"
+ "movq 8(%2), %%mm3 \n\t"
+ "add %4, %1 \n\t"
+ "add $16, %2 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%3) \n\t"
+ "movq %%mm5, 8(%3) \n\t"
+ "add %5, %3 \n\t"
+ "decl %0 \n\t"
+ ASMALIGN(3)
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%2), %%mm1 \n\t"
+ "movq 8(%1), %%mm2 \n\t"
+ "movq 8(%2), %%mm3 \n\t"
+ "add %4, %1 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%3) \n\t"
+ "movq %%mm5, 8(%3) \n\t"
+ "add %5, %3 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 16(%2), %%mm1 \n\t"
+ "movq 8(%1), %%mm2 \n\t"
+ "movq 24(%2), %%mm3 \n\t"
+ "add %4, %1 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%3) \n\t"
+ "movq %%mm5, 8(%3) \n\t"
+ "add %5, %3 \n\t"
+ "add $32, %2 \n\t"
+ "subl $2, %0 \n\t"
+ "jnz 1b \n\t"
+#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
+ :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#else
+ :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
+#endif
+ :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
+ :"memory");
+}
+
+static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ "movq (%1), %%mm0 \n\t"
+ ASMALIGN(3)
+ "1: \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"),%%mm2 \n\t"
+ PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5)
+ "movq %%mm4, (%2) \n\t"
+ "movq %%mm5, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"),%%mm0 \n\t"
+ PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5)
+ "movq %%mm4, (%2) \n\t"
+ "movq %%mm5, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r"((x86_reg)line_size)
+ :REG_a, "memory");
+}
+
+static void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_ZERO(mm7);
+ SET_RND(mm6); // =2 for rnd and =1 for no_rnd version
+ __asm__ volatile(
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm4 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpckhbw %%mm7, %%mm5 \n\t"
+ "paddusw %%mm0, %%mm4 \n\t"
+ "paddusw %%mm1, %%mm5 \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ "add %3, %1 \n\t"
+ ASMALIGN(3)
+ "1: \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t"
+ "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "paddusw %%mm2, %%mm0 \n\t"
+ "paddusw %%mm3, %%mm1 \n\t"
+ "paddusw %%mm6, %%mm4 \n\t"
+ "paddusw %%mm6, %%mm5 \n\t"
+ "paddusw %%mm0, %%mm4 \n\t"
+ "paddusw %%mm1, %%mm5 \n\t"
+ "psrlw $2, %%mm4 \n\t"
+ "psrlw $2, %%mm5 \n\t"
+ "packuswb %%mm5, %%mm4 \n\t"
+ "movq %%mm4, (%2, %%"REG_a") \n\t"
+ "add %3, %%"REG_a" \n\t"
+
+ "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3
+ "movq 1(%1, %%"REG_a"), %%mm4 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "punpckhbw %%mm7, %%mm5 \n\t"
+ "paddusw %%mm2, %%mm4 \n\t"
+ "paddusw %%mm3, %%mm5 \n\t"
+ "paddusw %%mm6, %%mm0 \n\t"
+ "paddusw %%mm6, %%mm1 \n\t"
+ "paddusw %%mm4, %%mm0 \n\t"
+ "paddusw %%mm5, %%mm1 \n\t"
+ "psrlw $2, %%mm0 \n\t"
+ "psrlw $2, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%2, %%"REG_a") \n\t"
+ "add %3, %%"REG_a" \n\t"
+
+ "subl $2, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels)
+ :"D"(block), "r"((x86_reg)line_size)
+ :REG_a, "memory");
+}
+
+// avg_pixels
+static void av_unused DEF(avg, pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ JUMPALIGN();
+ do {
+ __asm__ volatile(
+ "movd %0, %%mm0 \n\t"
+ "movd %1, %%mm1 \n\t"
+ OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6)
+ "movd %%mm2, %0 \n\t"
+ :"+m"(*block)
+ :"m"(*pixels)
+ :"memory");
+ pixels += line_size;
+ block += line_size;
+ }
+ while (--h);
+}
+
+// in case more speed is needed - unroling would certainly help
+static void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ JUMPALIGN();
+ do {
+ __asm__ volatile(
+ "movq %0, %%mm0 \n\t"
+ "movq %1, %%mm1 \n\t"
+ OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6)
+ "movq %%mm2, %0 \n\t"
+ :"+m"(*block)
+ :"m"(*pixels)
+ :"memory");
+ pixels += line_size;
+ block += line_size;
+ }
+ while (--h);
+}
+
+static void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ JUMPALIGN();
+ do {
+ __asm__ volatile(
+ "movq %0, %%mm0 \n\t"
+ "movq %1, %%mm1 \n\t"
+ OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6)
+ "movq %%mm2, %0 \n\t"
+ "movq 8%0, %%mm0 \n\t"
+ "movq 8%1, %%mm1 \n\t"
+ OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6)
+ "movq %%mm2, 8%0 \n\t"
+ :"+m"(*block)
+ :"m"(*pixels)
+ :"memory");
+ pixels += line_size;
+ block += line_size;
+ }
+ while (--h);
+}
+
+static void DEF(avg, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ JUMPALIGN();
+ do {
+ __asm__ volatile(
+ "movq %1, %%mm0 \n\t"
+ "movq 1%1, %%mm1 \n\t"
+ "movq %0, %%mm3 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
+ OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6)
+ "movq %%mm0, %0 \n\t"
+ :"+m"(*block)
+ :"m"(*pixels)
+ :"memory");
+ pixels += line_size;
+ block += line_size;
+ } while (--h);
+}
+
+static av_unused void DEF(avg, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+{
+ MOVQ_BFE(mm6);
+ JUMPALIGN();
+ do {
+ __asm__ volatile(
+ "movq %1, %%mm0 \n\t"
+ "movq %2, %%mm1 \n\t"
+ "movq %0, %%mm3 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
+ OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6)
+ "movq %%mm0, %0 \n\t"
+ :"+m"(*dst)
+ :"m"(*src1), "m"(*src2)
+ :"memory");
+ dst += dstStride;
+ src1 += src1Stride;
+ src2 += 8;
+ } while (--h);
+}
+
+static void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ JUMPALIGN();
+ do {
+ __asm__ volatile(
+ "movq %1, %%mm0 \n\t"
+ "movq 1%1, %%mm1 \n\t"
+ "movq %0, %%mm3 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
+ OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6)
+ "movq %%mm0, %0 \n\t"
+ "movq 8%1, %%mm0 \n\t"
+ "movq 9%1, %%mm1 \n\t"
+ "movq 8%0, %%mm3 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
+ OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6)
+ "movq %%mm0, 8%0 \n\t"
+ :"+m"(*block)
+ :"m"(*pixels)
+ :"memory");
+ pixels += line_size;
+ block += line_size;
+ } while (--h);
+}
+
+static av_unused void DEF(avg, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+{
+ MOVQ_BFE(mm6);
+ JUMPALIGN();
+ do {
+ __asm__ volatile(
+ "movq %1, %%mm0 \n\t"
+ "movq %2, %%mm1 \n\t"
+ "movq %0, %%mm3 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
+ OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6)
+ "movq %%mm0, %0 \n\t"
+ "movq 8%1, %%mm0 \n\t"
+ "movq 8%2, %%mm1 \n\t"
+ "movq 8%0, %%mm3 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
+ OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6)
+ "movq %%mm0, 8%0 \n\t"
+ :"+m"(*dst)
+ :"m"(*src1), "m"(*src2)
+ :"memory");
+ dst += dstStride;
+ src1 += src1Stride;
+ src2 += 16;
+ } while (--h);
+}
+
+static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ __asm__ volatile(
+ "lea (%3, %3), %%"REG_a" \n\t"
+ "movq (%1), %%mm0 \n\t"
+ ASMALIGN(3)
+ "1: \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm2 \n\t"
+ PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5)
+ "movq (%2), %%mm3 \n\t"
+ OP_AVG(%%mm3, %%mm4, %%mm0, %%mm6)
+ "movq (%2, %3), %%mm3 \n\t"
+ OP_AVG(%%mm3, %%mm5, %%mm1, %%mm6)
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t"
+ PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5)
+ "movq (%2), %%mm3 \n\t"
+ OP_AVG(%%mm3, %%mm4, %%mm2, %%mm6)
+ "movq (%2, %3), %%mm3 \n\t"
+ OP_AVG(%%mm3, %%mm5, %%mm1, %%mm6)
+ "movq %%mm2, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r"((x86_reg)line_size)
+ :REG_a, "memory");
+}
+
+// this routine is 'slightly' suboptimal but mostly unused
+static void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ MOVQ_ZERO(mm7);
+ SET_RND(mm6); // =2 for rnd and =1 for no_rnd version
+ __asm__ volatile(
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm4 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpckhbw %%mm7, %%mm5 \n\t"
+ "paddusw %%mm0, %%mm4 \n\t"
+ "paddusw %%mm1, %%mm5 \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ "add %3, %1 \n\t"
+ ASMALIGN(3)
+ "1: \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t"
+ "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "paddusw %%mm2, %%mm0 \n\t"
+ "paddusw %%mm3, %%mm1 \n\t"
+ "paddusw %%mm6, %%mm4 \n\t"
+ "paddusw %%mm6, %%mm5 \n\t"
+ "paddusw %%mm0, %%mm4 \n\t"
+ "paddusw %%mm1, %%mm5 \n\t"
+ "psrlw $2, %%mm4 \n\t"
+ "psrlw $2, %%mm5 \n\t"
+ "movq (%2, %%"REG_a"), %%mm3 \n\t"
+ "packuswb %%mm5, %%mm4 \n\t"
+ "pcmpeqd %%mm2, %%mm2 \n\t"
+ "paddb %%mm2, %%mm2 \n\t"
+ OP_AVG(%%mm3, %%mm4, %%mm5, %%mm2)
+ "movq %%mm5, (%2, %%"REG_a") \n\t"
+ "add %3, %%"REG_a" \n\t"
+
+ "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3
+ "movq 1(%1, %%"REG_a"), %%mm4 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "punpckhbw %%mm7, %%mm5 \n\t"
+ "paddusw %%mm2, %%mm4 \n\t"
+ "paddusw %%mm3, %%mm5 \n\t"
+ "paddusw %%mm6, %%mm0 \n\t"
+ "paddusw %%mm6, %%mm1 \n\t"
+ "paddusw %%mm4, %%mm0 \n\t"
+ "paddusw %%mm5, %%mm1 \n\t"
+ "psrlw $2, %%mm0 \n\t"
+ "psrlw $2, %%mm1 \n\t"
+ "movq (%2, %%"REG_a"), %%mm3 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "pcmpeqd %%mm2, %%mm2 \n\t"
+ "paddb %%mm2, %%mm2 \n\t"
+ OP_AVG(%%mm3, %%mm0, %%mm1, %%mm2)
+ "movq %%mm1, (%2, %%"REG_a") \n\t"
+ "add %3, %%"REG_a" \n\t"
+
+ "subl $2, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels)
+ :"D"(block), "r"((x86_reg)line_size)
+ :REG_a, "memory");
+}
+
+//FIXME optimize
+static void DEF(put, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
+ DEF(put, pixels8_y2)(block , pixels , line_size, h);
+ DEF(put, pixels8_y2)(block+8, pixels+8, line_size, h);
+}
+
+static void DEF(put, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
+ DEF(put, pixels8_xy2)(block , pixels , line_size, h);
+ DEF(put, pixels8_xy2)(block+8, pixels+8, line_size, h);
+}
+
+static void DEF(avg, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
+ DEF(avg, pixels8_y2)(block , pixels , line_size, h);
+ DEF(avg, pixels8_y2)(block+8, pixels+8, line_size, h);
+}
+
+static void DEF(avg, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
+ DEF(avg, pixels8_xy2)(block , pixels , line_size, h);
+ DEF(avg, pixels8_xy2)(block+8, pixels+8, line_size, h);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_yasm.asm b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_yasm.asm
new file mode 100644
index 000000000..e2478a484
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/dsputil_yasm.asm
@@ -0,0 +1,423 @@
+;******************************************************************************
+;* MMX optimized DSP utils
+;* Copyright (c) 2008 Loren Merritt
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "x86inc.asm"
+
+SECTION_RODATA
+pb_f: times 16 db 15
+pb_zzzzzzzz77777777: times 8 db -1
+pb_7: times 8 db 7
+pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
+pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
+
+section .text align=16
+
+%macro PSWAPD_SSE 2
+ pshufw %1, %2, 0x4e
+%endmacro
+%macro PSWAPD_3DN1 2
+ movq %1, %2
+ psrlq %1, 32
+ punpckldq %1, %2
+%endmacro
+
+%macro FLOAT_TO_INT16_INTERLEAVE6 1
+; void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len)
+cglobal float_to_int16_interleave6_%1, 2,7,0, dst, src, src1, src2, src3, src4, src5
+%ifdef ARCH_X86_64
+ %define lend r10d
+ mov lend, r2d
+%else
+ %define lend dword r2m
+%endif
+ mov src1q, [srcq+1*gprsize]
+ mov src2q, [srcq+2*gprsize]
+ mov src3q, [srcq+3*gprsize]
+ mov src4q, [srcq+4*gprsize]
+ mov src5q, [srcq+5*gprsize]
+ mov srcq, [srcq]
+ sub src1q, srcq
+ sub src2q, srcq
+ sub src3q, srcq
+ sub src4q, srcq
+ sub src5q, srcq
+.loop:
+ cvtps2pi mm0, [srcq]
+ cvtps2pi mm1, [srcq+src1q]
+ cvtps2pi mm2, [srcq+src2q]
+ cvtps2pi mm3, [srcq+src3q]
+ cvtps2pi mm4, [srcq+src4q]
+ cvtps2pi mm5, [srcq+src5q]
+ packssdw mm0, mm3
+ packssdw mm1, mm4
+ packssdw mm2, mm5
+ pswapd mm3, mm0
+ punpcklwd mm0, mm1
+ punpckhwd mm1, mm2
+ punpcklwd mm2, mm3
+ pswapd mm3, mm0
+ punpckldq mm0, mm2
+ punpckhdq mm2, mm1
+ punpckldq mm1, mm3
+ movq [dstq ], mm0
+ movq [dstq+16], mm2
+ movq [dstq+ 8], mm1
+ add srcq, 8
+ add dstq, 24
+ sub lend, 2
+ jg .loop
+ emms
+ RET
+%endmacro ; FLOAT_TO_INT16_INTERLEAVE6
+
+%define pswapd PSWAPD_SSE
+FLOAT_TO_INT16_INTERLEAVE6 sse
+%define cvtps2pi pf2id
+%define pswapd PSWAPD_3DN1
+FLOAT_TO_INT16_INTERLEAVE6 3dnow
+%undef pswapd
+FLOAT_TO_INT16_INTERLEAVE6 3dn2
+%undef cvtps2pi
+
+
+
+%macro SCALARPRODUCT 1
+; int scalarproduct_int16(int16_t *v1, int16_t *v2, int order, int shift)
+cglobal scalarproduct_int16_%1, 3,3,4, v1, v2, order, shift
+ shl orderq, 1
+ add v1q, orderq
+ add v2q, orderq
+ neg orderq
+ movd m3, shiftm
+ pxor m2, m2
+.loop:
+ movu m0, [v1q + orderq]
+ movu m1, [v1q + orderq + mmsize]
+ pmaddwd m0, [v2q + orderq]
+ pmaddwd m1, [v2q + orderq + mmsize]
+ paddd m2, m0
+ paddd m2, m1
+ add orderq, mmsize*2
+ jl .loop
+%if mmsize == 16
+ movhlps m0, m2
+ paddd m2, m0
+ psrad m2, m3
+ pshuflw m0, m2, 0x4e
+%else
+ psrad m2, m3
+ pshufw m0, m2, 0x4e
+%endif
+ paddd m2, m0
+ movd eax, m2
+ RET
+
+; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
+cglobal scalarproduct_and_madd_int16_%1, 4,4,8, v1, v2, v3, order, mul
+ shl orderq, 1
+ movd m7, mulm
+%if mmsize == 16
+ pshuflw m7, m7, 0
+ punpcklqdq m7, m7
+%else
+ pshufw m7, m7, 0
+%endif
+ pxor m6, m6
+ add v1q, orderq
+ add v2q, orderq
+ add v3q, orderq
+ neg orderq
+.loop:
+ movu m0, [v2q + orderq]
+ movu m1, [v2q + orderq + mmsize]
+ mova m4, [v1q + orderq]
+ mova m5, [v1q + orderq + mmsize]
+ movu m2, [v3q + orderq]
+ movu m3, [v3q + orderq + mmsize]
+ pmaddwd m0, m4
+ pmaddwd m1, m5
+ pmullw m2, m7
+ pmullw m3, m7
+ paddd m6, m0
+ paddd m6, m1
+ paddw m2, m4
+ paddw m3, m5
+ mova [v1q + orderq], m2
+ mova [v1q + orderq + mmsize], m3
+ add orderq, mmsize*2
+ jl .loop
+%if mmsize == 16
+ movhlps m0, m6
+ paddd m6, m0
+ pshuflw m0, m6, 0x4e
+%else
+ pshufw m0, m6, 0x4e
+%endif
+ paddd m6, m0
+ movd eax, m6
+ RET
+%endmacro
+
+INIT_MMX
+SCALARPRODUCT mmx2
+INIT_XMM
+SCALARPRODUCT sse2
+
+%macro SCALARPRODUCT_LOOP 1
+align 16
+.loop%1:
+ sub orderq, mmsize*2
+%if %1
+ mova m1, m4
+ mova m4, [v2q + orderq]
+ mova m0, [v2q + orderq + mmsize]
+ palignr m1, m0, %1
+ palignr m0, m4, %1
+ mova m3, m5
+ mova m5, [v3q + orderq]
+ mova m2, [v3q + orderq + mmsize]
+ palignr m3, m2, %1
+ palignr m2, m5, %1
+%else
+ mova m0, [v2q + orderq]
+ mova m1, [v2q + orderq + mmsize]
+ mova m2, [v3q + orderq]
+ mova m3, [v3q + orderq + mmsize]
+%endif
+ %define t0 [v1q + orderq]
+ %define t1 [v1q + orderq + mmsize]
+%ifdef ARCH_X86_64
+ mova m8, t0
+ mova m9, t1
+ %define t0 m8
+ %define t1 m9
+%endif
+ pmaddwd m0, t0
+ pmaddwd m1, t1
+ pmullw m2, m7
+ pmullw m3, m7
+ paddw m2, t0
+ paddw m3, t1
+ paddd m6, m0
+ paddd m6, m1
+ mova [v1q + orderq], m2
+ mova [v1q + orderq + mmsize], m3
+ jg .loop%1
+%if %1
+ jmp .end
+%endif
+%endmacro
+
+; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
+cglobal scalarproduct_and_madd_int16_ssse3, 4,5,10, v1, v2, v3, order, mul
+ shl orderq, 1
+ movd m7, mulm
+ pshuflw m7, m7, 0
+ punpcklqdq m7, m7
+ pxor m6, m6
+ mov r4d, v2d
+ and r4d, 15
+ and v2q, ~15
+ and v3q, ~15
+ mova m4, [v2q + orderq]
+ mova m5, [v3q + orderq]
+ ; linear is faster than branch tree or jump table, because the branches taken are cyclic (i.e. predictable)
+ cmp r4d, 0
+ je .loop0
+ cmp r4d, 2
+ je .loop2
+ cmp r4d, 4
+ je .loop4
+ cmp r4d, 6
+ je .loop6
+ cmp r4d, 8
+ je .loop8
+ cmp r4d, 10
+ je .loop10
+ cmp r4d, 12
+ je .loop12
+SCALARPRODUCT_LOOP 14
+SCALARPRODUCT_LOOP 12
+SCALARPRODUCT_LOOP 10
+SCALARPRODUCT_LOOP 8
+SCALARPRODUCT_LOOP 6
+SCALARPRODUCT_LOOP 4
+SCALARPRODUCT_LOOP 2
+SCALARPRODUCT_LOOP 0
+.end:
+ movhlps m0, m6
+ paddd m6, m0
+ pshuflw m0, m6, 0x4e
+ paddd m6, m0
+ movd eax, m6
+ RET
+
+
+
+; void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top)
+cglobal add_hfyu_median_prediction_mmx2, 6,6,0, dst, top, diff, w, left, left_top
+ movq mm0, [topq]
+ movq mm2, mm0
+ movd mm4, [left_topq]
+ psllq mm2, 8
+ movq mm1, mm0
+ por mm4, mm2
+ movd mm3, [leftq]
+ psubb mm0, mm4 ; t-tl
+ add dstq, wq
+ add topq, wq
+ add diffq, wq
+ neg wq
+ jmp .skip
+.loop:
+ movq mm4, [topq+wq]
+ movq mm0, mm4
+ psllq mm4, 8
+ por mm4, mm1
+ movq mm1, mm0 ; t
+ psubb mm0, mm4 ; t-tl
+.skip:
+ movq mm2, [diffq+wq]
+%assign i 0
+%rep 8
+ movq mm4, mm0
+ paddb mm4, mm3 ; t-tl+l
+ movq mm5, mm3
+ pmaxub mm3, mm1
+ pminub mm5, mm1
+ pminub mm3, mm4
+ pmaxub mm3, mm5 ; median
+ paddb mm3, mm2 ; +residual
+%if i==0
+ movq mm7, mm3
+ psllq mm7, 56
+%else
+ movq mm6, mm3
+ psrlq mm7, 8
+ psllq mm6, 56
+ por mm7, mm6
+%endif
+%if i<7
+ psrlq mm0, 8
+ psrlq mm1, 8
+ psrlq mm2, 8
+%endif
+%assign i i+1
+%endrep
+ movq [dstq+wq], mm7
+ add wq, 8
+ jl .loop
+ movzx r2d, byte [dstq-1]
+ mov [leftq], r2d
+ movzx r2d, byte [topq-1]
+ mov [left_topq], r2d
+ RET
+
+
+%macro ADD_HFYU_LEFT_LOOP 1 ; %1 = is_aligned
+ add srcq, wq
+ add dstq, wq
+ neg wq
+%%.loop:
+ mova m1, [srcq+wq]
+ mova m2, m1
+ psllw m1, 8
+ paddb m1, m2
+ mova m2, m1
+ pshufb m1, m3
+ paddb m1, m2
+ pshufb m0, m5
+ mova m2, m1
+ pshufb m1, m4
+ paddb m1, m2
+%if mmsize == 16
+ mova m2, m1
+ pshufb m1, m6
+ paddb m1, m2
+%endif
+ paddb m0, m1
+%if %1
+ mova [dstq+wq], m0
+%else
+ movq [dstq+wq], m0
+ movhps [dstq+wq+8], m0
+%endif
+ add wq, mmsize
+ jl %%.loop
+ mov eax, mmsize-1
+ sub eax, wd
+ movd m1, eax
+ pshufb m0, m1
+ movd eax, m0
+ RET
+%endmacro
+
+; int ff_add_hfyu_left_prediction(uint8_t *dst, const uint8_t *src, int w, int left)
+INIT_MMX
+cglobal add_hfyu_left_prediction_ssse3, 3,3,7, dst, src, w, left
+.skip_prologue:
+ mova m5, [pb_7 GLOBAL]
+ mova m4, [pb_zzzz3333zzzzbbbb GLOBAL]
+ mova m3, [pb_zz11zz55zz99zzdd GLOBAL]
+ movd m0, leftm
+ psllq m0, 56
+ ADD_HFYU_LEFT_LOOP 1
+
+INIT_XMM
+cglobal add_hfyu_left_prediction_sse4, 3,3,7, dst, src, w, left
+ mova m5, [pb_f GLOBAL]
+ mova m6, [pb_zzzzzzzz77777777 GLOBAL]
+ mova m4, [pb_zzzz3333zzzzbbbb GLOBAL]
+ mova m3, [pb_zz11zz55zz99zzdd GLOBAL]
+ movd m0, leftm
+ pslldq m0, 15
+ test srcq, 15
+ jnz add_hfyu_left_prediction_ssse3.skip_prologue
+ test dstq, 15
+ jnz .unaligned
+ ADD_HFYU_LEFT_LOOP 1
+.unaligned:
+ ADD_HFYU_LEFT_LOOP 0
+
+
+; float ff_scalarproduct_float_sse(const float *v1, const float *v2, int len)
+cglobal scalarproduct_float_sse, 3,3,2, v1, v2, offset
+ neg offsetq
+ shl offsetq, 2
+ sub v1q, offsetq
+ sub v2q, offsetq
+ xorps xmm0, xmm0
+ .loop:
+ movaps xmm1, [v1q+offsetq]
+ mulps xmm1, [v2q+offsetq]
+ addps xmm0, xmm1
+ add offsetq, 16
+ js .loop
+ movhlps xmm1, xmm0
+ addps xmm0, xmm1
+ movss xmm1, xmm0
+ shufps xmm0, xmm0, 1
+ addss xmm0, xmm1
+%ifndef ARCH_X86_64
+ movd r0m, xmm0
+ fld dword r0m
+%endif
+ RET
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fdct_mmx.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fdct_mmx.c
new file mode 100644
index 000000000..319daf28b
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fdct_mmx.c
@@ -0,0 +1,580 @@
+/*
+ * MMX optimized forward DCT
+ * The gcc porting is Copyright (c) 2001 Fabrice Bellard.
+ * cleanup/optimizations are Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * SSE2 optimization is Copyright (c) 2004 Denes Balatoni.
+ *
+ * from fdctam32.c - AP922 MMX(3D-Now) forward-DCT
+ *
+ * Intel Application Note AP-922 - fast, precise implementation of DCT
+ * http://developer.intel.com/vtune/cbts/appnotes.htm
+ *
+ * Also of inspiration:
+ * a page about fdct at http://www.geocities.com/ssavekar/dct.htm
+ * Skal's fdct at http://skal.planet-d.net/coding/dct.html
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/common.h"
+#include "libavcodec/dsputil.h"
+
+#define ATTR_ALIGN(align) __attribute__ ((__aligned__ (align)))
+
+//////////////////////////////////////////////////////////////////////
+//
+// constants for the forward DCT
+// -----------------------------
+//
+// Be sure to check that your compiler is aligning all constants to QWORD
+// (8-byte) memory boundaries! Otherwise the unaligned memory access will
+// severely stall MMX execution.
+//
+//////////////////////////////////////////////////////////////////////
+
+#define BITS_FRW_ACC 3 //; 2 or 3 for accuracy
+#define SHIFT_FRW_COL BITS_FRW_ACC
+#define SHIFT_FRW_ROW (BITS_FRW_ACC + 17 - 3)
+#define RND_FRW_ROW (1 << (SHIFT_FRW_ROW-1))
+//#define RND_FRW_COL (1 << (SHIFT_FRW_COL-1))
+
+#define X8(x) x,x,x,x,x,x,x,x
+
+//concatenated table, for forward DCT transformation
+static const int16_t fdct_tg_all_16[24] ATTR_ALIGN(16) = {
+ X8(13036), // tg * (2<<16) + 0.5
+ X8(27146), // tg * (2<<16) + 0.5
+ X8(-21746) // tg * (2<<16) + 0.5
+};
+
+static const int16_t ocos_4_16[8] ATTR_ALIGN(16) = {
+ X8(23170) //cos * (2<<15) + 0.5
+};
+
+static const int16_t fdct_one_corr[8] ATTR_ALIGN(16) = { X8(1) };
+
+static const int32_t fdct_r_row[2] ATTR_ALIGN(8) = {RND_FRW_ROW, RND_FRW_ROW };
+
+static struct
+{
+ const int32_t fdct_r_row_sse2[4] ATTR_ALIGN(16);
+} fdct_r_row_sse2 ATTR_ALIGN(16)=
+{{
+ RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW
+}};
+//static const long fdct_r_row_sse2[4] ATTR_ALIGN(16) = {RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW};
+
+static const int16_t tab_frw_01234567[] ATTR_ALIGN(8) = { // forward_dct coeff table
+ 16384, 16384, 22725, 19266,
+ 16384, 16384, 12873, 4520,
+ 21407, 8867, 19266, -4520,
+ -8867, -21407, -22725, -12873,
+ 16384, -16384, 12873, -22725,
+ -16384, 16384, 4520, 19266,
+ 8867, -21407, 4520, -12873,
+ 21407, -8867, 19266, -22725,
+
+ 22725, 22725, 31521, 26722,
+ 22725, 22725, 17855, 6270,
+ 29692, 12299, 26722, -6270,
+ -12299, -29692, -31521, -17855,
+ 22725, -22725, 17855, -31521,
+ -22725, 22725, 6270, 26722,
+ 12299, -29692, 6270, -17855,
+ 29692, -12299, 26722, -31521,
+
+ 21407, 21407, 29692, 25172,
+ 21407, 21407, 16819, 5906,
+ 27969, 11585, 25172, -5906,
+ -11585, -27969, -29692, -16819,
+ 21407, -21407, 16819, -29692,
+ -21407, 21407, 5906, 25172,
+ 11585, -27969, 5906, -16819,
+ 27969, -11585, 25172, -29692,
+
+ 19266, 19266, 26722, 22654,
+ 19266, 19266, 15137, 5315,
+ 25172, 10426, 22654, -5315,
+ -10426, -25172, -26722, -15137,
+ 19266, -19266, 15137, -26722,
+ -19266, 19266, 5315, 22654,
+ 10426, -25172, 5315, -15137,
+ 25172, -10426, 22654, -26722,
+
+ 16384, 16384, 22725, 19266,
+ 16384, 16384, 12873, 4520,
+ 21407, 8867, 19266, -4520,
+ -8867, -21407, -22725, -12873,
+ 16384, -16384, 12873, -22725,
+ -16384, 16384, 4520, 19266,
+ 8867, -21407, 4520, -12873,
+ 21407, -8867, 19266, -22725,
+
+ 19266, 19266, 26722, 22654,
+ 19266, 19266, 15137, 5315,
+ 25172, 10426, 22654, -5315,
+ -10426, -25172, -26722, -15137,
+ 19266, -19266, 15137, -26722,
+ -19266, 19266, 5315, 22654,
+ 10426, -25172, 5315, -15137,
+ 25172, -10426, 22654, -26722,
+
+ 21407, 21407, 29692, 25172,
+ 21407, 21407, 16819, 5906,
+ 27969, 11585, 25172, -5906,
+ -11585, -27969, -29692, -16819,
+ 21407, -21407, 16819, -29692,
+ -21407, 21407, 5906, 25172,
+ 11585, -27969, 5906, -16819,
+ 27969, -11585, 25172, -29692,
+
+ 22725, 22725, 31521, 26722,
+ 22725, 22725, 17855, 6270,
+ 29692, 12299, 26722, -6270,
+ -12299, -29692, -31521, -17855,
+ 22725, -22725, 17855, -31521,
+ -22725, 22725, 6270, 26722,
+ 12299, -29692, 6270, -17855,
+ 29692, -12299, 26722, -31521,
+};
+
+static struct
+{
+ const int16_t tab_frw_01234567_sse2[256] ATTR_ALIGN(16);
+} tab_frw_01234567_sse2 ATTR_ALIGN(16) =
+{{
+//static const int16_t tab_frw_01234567_sse2[] ATTR_ALIGN(16) = { // forward_dct coeff table
+#define TABLE_SSE2 C4, C4, C1, C3, -C6, -C2, -C1, -C5, \
+ C4, C4, C5, C7, C2, C6, C3, -C7, \
+ -C4, C4, C7, C3, C6, -C2, C7, -C5, \
+ C4, -C4, C5, -C1, C2, -C6, C3, -C1,
+// c1..c7 * cos(pi/4) * 2^15
+#define C1 22725
+#define C2 21407
+#define C3 19266
+#define C4 16384
+#define C5 12873
+#define C6 8867
+#define C7 4520
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 31521
+#define C2 29692
+#define C3 26722
+#define C4 22725
+#define C5 17855
+#define C6 12299
+#define C7 6270
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 29692
+#define C2 27969
+#define C3 25172
+#define C4 21407
+#define C5 16819
+#define C6 11585
+#define C7 5906
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 26722
+#define C2 25172
+#define C3 22654
+#define C4 19266
+#define C5 15137
+#define C6 10426
+#define C7 5315
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 22725
+#define C2 21407
+#define C3 19266
+#define C4 16384
+#define C5 12873
+#define C6 8867
+#define C7 4520
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 26722
+#define C2 25172
+#define C3 22654
+#define C4 19266
+#define C5 15137
+#define C6 10426
+#define C7 5315
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 29692
+#define C2 27969
+#define C3 25172
+#define C4 21407
+#define C5 16819
+#define C6 11585
+#define C7 5906
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 31521
+#define C2 29692
+#define C3 26722
+#define C4 22725
+#define C5 17855
+#define C6 12299
+#define C7 6270
+TABLE_SSE2
+}};
+
+#define S(s) AV_TOSTRING(s) //AV_STRINGIFY is too long
+
+#define FDCT_COL(cpu, mm, mov)\
+static av_always_inline void fdct_col_##cpu(const int16_t *in, int16_t *out, int offset)\
+{\
+ __asm__ volatile (\
+ #mov" 16(%0), %%"#mm"0 \n\t" \
+ #mov" 96(%0), %%"#mm"1 \n\t" \
+ #mov" %%"#mm"0, %%"#mm"2 \n\t" \
+ #mov" 32(%0), %%"#mm"3 \n\t" \
+ "paddsw %%"#mm"1, %%"#mm"0 \n\t" \
+ #mov" 80(%0), %%"#mm"4 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"0 \n\t" \
+ #mov" (%0), %%"#mm"5 \n\t" \
+ "paddsw %%"#mm"3, %%"#mm"4 \n\t" \
+ "paddsw 112(%0), %%"#mm"5 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"4 \n\t" \
+ #mov" %%"#mm"0, %%"#mm"6 \n\t" \
+ "psubsw %%"#mm"1, %%"#mm"2 \n\t" \
+ #mov" 16(%1), %%"#mm"1 \n\t" \
+ "psubsw %%"#mm"4, %%"#mm"0 \n\t" \
+ #mov" 48(%0), %%"#mm"7 \n\t" \
+ "pmulhw %%"#mm"0, %%"#mm"1 \n\t" \
+ "paddsw 64(%0), %%"#mm"7 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"5 \n\t" \
+ "paddsw %%"#mm"4, %%"#mm"6 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"7 \n\t" \
+ #mov" %%"#mm"5, %%"#mm"4 \n\t" \
+ "psubsw %%"#mm"7, %%"#mm"5 \n\t" \
+ "paddsw %%"#mm"5, %%"#mm"1 \n\t" \
+ "paddsw %%"#mm"7, %%"#mm"4 \n\t" \
+ "por (%2), %%"#mm"1 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)"+1, %%"#mm"2 \n\t" \
+ "pmulhw 16(%1), %%"#mm"5 \n\t" \
+ #mov" %%"#mm"4, %%"#mm"7 \n\t" \
+ "psubsw 80(%0), %%"#mm"3 \n\t" \
+ "psubsw %%"#mm"6, %%"#mm"4 \n\t" \
+ #mov" %%"#mm"1, 32(%3) \n\t" \
+ "paddsw %%"#mm"6, %%"#mm"7 \n\t" \
+ #mov" 48(%0), %%"#mm"1 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)"+1, %%"#mm"3 \n\t" \
+ "psubsw 64(%0), %%"#mm"1 \n\t" \
+ #mov" %%"#mm"2, %%"#mm"6 \n\t" \
+ #mov" %%"#mm"4, 64(%3) \n\t" \
+ "paddsw %%"#mm"3, %%"#mm"2 \n\t" \
+ "pmulhw (%4), %%"#mm"2 \n\t" \
+ "psubsw %%"#mm"3, %%"#mm"6 \n\t" \
+ "pmulhw (%4), %%"#mm"6 \n\t" \
+ "psubsw %%"#mm"0, %%"#mm"5 \n\t" \
+ "por (%2), %%"#mm"5 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"1 \n\t" \
+ "por (%2), %%"#mm"2 \n\t" \
+ #mov" %%"#mm"1, %%"#mm"4 \n\t" \
+ #mov" (%0), %%"#mm"3 \n\t" \
+ "paddsw %%"#mm"6, %%"#mm"1 \n\t" \
+ "psubsw 112(%0), %%"#mm"3 \n\t" \
+ "psubsw %%"#mm"6, %%"#mm"4 \n\t" \
+ #mov" (%1), %%"#mm"0 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"3 \n\t" \
+ #mov" 32(%1), %%"#mm"6 \n\t" \
+ "pmulhw %%"#mm"1, %%"#mm"0 \n\t" \
+ #mov" %%"#mm"7, (%3) \n\t" \
+ "pmulhw %%"#mm"4, %%"#mm"6 \n\t" \
+ #mov" %%"#mm"5, 96(%3) \n\t" \
+ #mov" %%"#mm"3, %%"#mm"7 \n\t" \
+ #mov" 32(%1), %%"#mm"5 \n\t" \
+ "psubsw %%"#mm"2, %%"#mm"7 \n\t" \
+ "paddsw %%"#mm"2, %%"#mm"3 \n\t" \
+ "pmulhw %%"#mm"7, %%"#mm"5 \n\t" \
+ "paddsw %%"#mm"3, %%"#mm"0 \n\t" \
+ "paddsw %%"#mm"4, %%"#mm"6 \n\t" \
+ "pmulhw (%1), %%"#mm"3 \n\t" \
+ "por (%2), %%"#mm"0 \n\t" \
+ "paddsw %%"#mm"7, %%"#mm"5 \n\t" \
+ "psubsw %%"#mm"6, %%"#mm"7 \n\t" \
+ #mov" %%"#mm"0, 16(%3) \n\t" \
+ "paddsw %%"#mm"4, %%"#mm"5 \n\t" \
+ #mov" %%"#mm"7, 48(%3) \n\t" \
+ "psubsw %%"#mm"1, %%"#mm"3 \n\t" \
+ #mov" %%"#mm"5, 80(%3) \n\t" \
+ #mov" %%"#mm"3, 112(%3) \n\t" \
+ : \
+ : "r" (in + offset), "r" (fdct_tg_all_16), "r" (fdct_one_corr), \
+ "r" (out + offset), "r" (ocos_4_16)); \
+}
+
+FDCT_COL(mmx, mm, movq)
+FDCT_COL(sse2, xmm, movdqa)
+
+static av_always_inline void fdct_row_sse2(const int16_t *in, int16_t *out)
+{
+ __asm__ volatile(
+#define FDCT_ROW_SSE2_H1(i,t) \
+ "movq " #i "(%0), %%xmm2 \n\t" \
+ "movq " #i "+8(%0), %%xmm0 \n\t" \
+ "movdqa " #t "+32(%1), %%xmm3 \n\t" \
+ "movdqa " #t "+48(%1), %%xmm7 \n\t" \
+ "movdqa " #t "(%1), %%xmm4 \n\t" \
+ "movdqa " #t "+16(%1), %%xmm5 \n\t"
+
+#define FDCT_ROW_SSE2_H2(i,t) \
+ "movq " #i "(%0), %%xmm2 \n\t" \
+ "movq " #i "+8(%0), %%xmm0 \n\t" \
+ "movdqa " #t "+32(%1), %%xmm3 \n\t" \
+ "movdqa " #t "+48(%1), %%xmm7 \n\t"
+
+#define FDCT_ROW_SSE2(i) \
+ "movq %%xmm2, %%xmm1 \n\t" \
+ "pshuflw $27, %%xmm0, %%xmm0 \n\t" \
+ "paddsw %%xmm0, %%xmm1 \n\t" \
+ "psubsw %%xmm0, %%xmm2 \n\t" \
+ "punpckldq %%xmm2, %%xmm1 \n\t" \
+ "pshufd $78, %%xmm1, %%xmm2 \n\t" \
+ "pmaddwd %%xmm2, %%xmm3 \n\t" \
+ "pmaddwd %%xmm1, %%xmm7 \n\t" \
+ "pmaddwd %%xmm5, %%xmm2 \n\t" \
+ "pmaddwd %%xmm4, %%xmm1 \n\t" \
+ "paddd %%xmm7, %%xmm3 \n\t" \
+ "paddd %%xmm2, %%xmm1 \n\t" \
+ "paddd %%xmm6, %%xmm3 \n\t" \
+ "paddd %%xmm6, %%xmm1 \n\t" \
+ "psrad %3, %%xmm3 \n\t" \
+ "psrad %3, %%xmm1 \n\t" \
+ "packssdw %%xmm3, %%xmm1 \n\t" \
+ "movdqa %%xmm1, " #i "(%4) \n\t"
+
+ "movdqa (%2), %%xmm6 \n\t"
+ FDCT_ROW_SSE2_H1(0,0)
+ FDCT_ROW_SSE2(0)
+ FDCT_ROW_SSE2_H2(64,0)
+ FDCT_ROW_SSE2(64)
+
+ FDCT_ROW_SSE2_H1(16,64)
+ FDCT_ROW_SSE2(16)
+ FDCT_ROW_SSE2_H2(112,64)
+ FDCT_ROW_SSE2(112)
+
+ FDCT_ROW_SSE2_H1(32,128)
+ FDCT_ROW_SSE2(32)
+ FDCT_ROW_SSE2_H2(96,128)
+ FDCT_ROW_SSE2(96)
+
+ FDCT_ROW_SSE2_H1(48,192)
+ FDCT_ROW_SSE2(48)
+ FDCT_ROW_SSE2_H2(80,192)
+ FDCT_ROW_SSE2(80)
+ :
+ : "r" (in), "r" (tab_frw_01234567_sse2.tab_frw_01234567_sse2), "r" (fdct_r_row_sse2.fdct_r_row_sse2), "i" (SHIFT_FRW_ROW), "r" (out)
+ );
+}
+
+static av_always_inline void fdct_row_mmx2(const int16_t *in, int16_t *out, const int16_t *table)
+{
+ __asm__ volatile (
+ "pshufw $0x1B, 8(%0), %%mm5 \n\t"
+ "movq (%0), %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "paddsw %%mm5, %%mm0 \n\t"
+ "psubsw %%mm5, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "punpckldq %%mm1, %%mm0 \n\t"
+ "punpckhdq %%mm1, %%mm2 \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "movq 8(%1), %%mm3 \n\t"
+ "movq 16(%1), %%mm4 \n\t"
+ "movq 24(%1), %%mm5 \n\t"
+ "movq 32(%1), %%mm6 \n\t"
+ "movq 40(%1), %%mm7 \n\t"
+ "pmaddwd %%mm0, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm0, %%mm4 \n\t"
+ "pmaddwd %%mm2, %%mm5 \n\t"
+ "pmaddwd %%mm0, %%mm6 \n\t"
+ "pmaddwd %%mm2, %%mm7 \n\t"
+ "pmaddwd 48(%1), %%mm0 \n\t"
+ "pmaddwd 56(%1), %%mm2 \n\t"
+ "paddd %%mm1, %%mm3 \n\t"
+ "paddd %%mm4, %%mm5 \n\t"
+ "paddd %%mm6, %%mm7 \n\t"
+ "paddd %%mm0, %%mm2 \n\t"
+ "movq (%2), %%mm0 \n\t"
+ "paddd %%mm0, %%mm3 \n\t"
+ "paddd %%mm0, %%mm5 \n\t"
+ "paddd %%mm0, %%mm7 \n\t"
+ "paddd %%mm0, %%mm2 \n\t"
+ "psrad $"S(SHIFT_FRW_ROW)", %%mm3 \n\t"
+ "psrad $"S(SHIFT_FRW_ROW)", %%mm5 \n\t"
+ "psrad $"S(SHIFT_FRW_ROW)", %%mm7 \n\t"
+ "psrad $"S(SHIFT_FRW_ROW)", %%mm2 \n\t"
+ "packssdw %%mm5, %%mm3 \n\t"
+ "packssdw %%mm2, %%mm7 \n\t"
+ "movq %%mm3, (%3) \n\t"
+ "movq %%mm7, 8(%3) \n\t"
+ :
+ : "r" (in), "r" (table), "r" (fdct_r_row), "r" (out));
+}
+
+static av_always_inline void fdct_row_mmx(const int16_t *in, int16_t *out, const int16_t *table)
+{
+ //FIXME reorder (I do not have an old MMX-only CPU here to benchmark ...)
+ __asm__ volatile(
+ "movd 12(%0), %%mm1 \n\t"
+ "punpcklwd 8(%0), %%mm1 \n\t"
+ "movq %%mm1, %%mm2 \n\t"
+ "psrlq $0x20, %%mm1 \n\t"
+ "movq 0(%0), %%mm0 \n\t"
+ "punpcklwd %%mm2, %%mm1 \n\t"
+ "movq %%mm0, %%mm5 \n\t"
+ "paddsw %%mm1, %%mm0 \n\t"
+ "psubsw %%mm1, %%mm5 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "punpckldq %%mm5, %%mm0 \n\t"
+ "punpckhdq %%mm5, %%mm2 \n\t"
+ "movq 0(%1), %%mm1 \n\t"
+ "movq 8(%1), %%mm3 \n\t"
+ "movq 16(%1), %%mm4 \n\t"
+ "movq 24(%1), %%mm5 \n\t"
+ "movq 32(%1), %%mm6 \n\t"
+ "movq 40(%1), %%mm7 \n\t"
+ "pmaddwd %%mm0, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm0, %%mm4 \n\t"
+ "pmaddwd %%mm2, %%mm5 \n\t"
+ "pmaddwd %%mm0, %%mm6 \n\t"
+ "pmaddwd %%mm2, %%mm7 \n\t"
+ "pmaddwd 48(%1), %%mm0 \n\t"
+ "pmaddwd 56(%1), %%mm2 \n\t"
+ "paddd %%mm1, %%mm3 \n\t"
+ "paddd %%mm4, %%mm5 \n\t"
+ "paddd %%mm6, %%mm7 \n\t"
+ "paddd %%mm0, %%mm2 \n\t"
+ "movq (%2), %%mm0 \n\t"
+ "paddd %%mm0, %%mm3 \n\t"
+ "paddd %%mm0, %%mm5 \n\t"
+ "paddd %%mm0, %%mm7 \n\t"
+ "paddd %%mm0, %%mm2 \n\t"
+ "psrad $"S(SHIFT_FRW_ROW)", %%mm3 \n\t"
+ "psrad $"S(SHIFT_FRW_ROW)", %%mm5 \n\t"
+ "psrad $"S(SHIFT_FRW_ROW)", %%mm7 \n\t"
+ "psrad $"S(SHIFT_FRW_ROW)", %%mm2 \n\t"
+ "packssdw %%mm5, %%mm3 \n\t"
+ "packssdw %%mm2, %%mm7 \n\t"
+ "movq %%mm3, 0(%3) \n\t"
+ "movq %%mm7, 8(%3) \n\t"
+ :
+ : "r" (in), "r" (table), "r" (fdct_r_row), "r" (out));
+}
+
+void ff_fdct_mmx(int16_t *block)
+{
+ int64_t align_tmp[16] ATTR_ALIGN(8);
+ int16_t * block1= (int16_t*)align_tmp;
+ const int16_t *table= tab_frw_01234567;
+ int i;
+
+ fdct_col_mmx(block, block1, 0);
+ fdct_col_mmx(block, block1, 4);
+
+ for(i=8;i>0;i--) {
+ fdct_row_mmx(block1, block, table);
+ block1 += 8;
+ table += 32;
+ block += 8;
+ }
+}
+
+void ff_fdct_mmx2(int16_t *block)
+{
+ int64_t align_tmp[16] ATTR_ALIGN(8);
+ int16_t *block1= (int16_t*)align_tmp;
+ const int16_t *table= tab_frw_01234567;
+ int i;
+
+ fdct_col_mmx(block, block1, 0);
+ fdct_col_mmx(block, block1, 4);
+
+ for(i=8;i>0;i--) {
+ fdct_row_mmx2(block1, block, table);
+ block1 += 8;
+ table += 32;
+ block += 8;
+ }
+}
+
+void ff_fdct_sse2(int16_t *block)
+{
+ int64_t align_tmp[16] ATTR_ALIGN(16);
+ int16_t * const block1= (int16_t*)align_tmp;
+
+ fdct_col_sse2(block, block1, 0);
+ fdct_row_sse2(block1, block);
+}
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft.c
new file mode 100644
index 000000000..ad395991e
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft.c
@@ -0,0 +1,48 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/dsputil.h"
+#include "fft.h"
+
+av_cold void ff_fft_init_mmx(FFTContext *s)
+{
+/* Crashes on 64-bit
+ * ToDo: verify if that is still the case with the current code and with GCC 4.3.3 and above
+ */
+#if HAVE_YASM && ARCH_X86_32
+ int has_vectors = mm_support();
+ if (has_vectors & FF_MM_SSE && HAVE_SSE) {
+ /* SSE for P3/P4/K8 */
+ s->imdct_calc = ff_imdct_calc_sse;
+ /* crashes DTS decoder */
+ //s->imdct_half = ff_imdct_half_sse;
+ s->fft_permute = ff_fft_permute_sse;
+ s->fft_calc = ff_fft_calc_sse;
+ } else if (has_vectors & FF_MM_3DNOWEXT && HAVE_AMD3DNOWEXT) {
+ /* 3DNowEx for K7 */
+ s->imdct_calc = ff_imdct_calc_3dn2;
+ s->imdct_half = ff_imdct_half_3dn2;
+ s->fft_calc = ff_fft_calc_3dn2;
+ } else if (has_vectors & FF_MM_3DNOW && HAVE_AMD3DNOW) {
+ /* 3DNow! for K6-2/3 */
+ s->imdct_calc = ff_imdct_calc_3dn;
+ s->imdct_half = ff_imdct_half_3dn;
+ s->fft_calc = ff_fft_calc_3dn;
+ }
+#endif
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft.h
new file mode 100644
index 000000000..b0fff1b12
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft.h
@@ -0,0 +1,36 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_FFT_H
+#define AVCODEC_X86_FFT_H
+
+#include "libavcodec/dsputil.h"
+
+void ff_fft_permute_sse(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_sse(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z);
+
+void ff_imdct_calc_3dn(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_half_3dn(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_calc_3dn2(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_half_3dn2(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_calc_sse(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_half_sse(FFTContext *s, FFTSample *output, const FFTSample *input);
+
+#endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_3dn.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_3dn.c
new file mode 100644
index 000000000..f8103e871
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_3dn.c
@@ -0,0 +1,23 @@
+/*
+ * FFT/MDCT transform with 3DNow! optimizations
+ * Copyright (c) 2008 Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define EMULATE_3DNOWEXT
+#include "fft_3dn2.c"
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_3dn2.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_3dn2.c
new file mode 100644
index 000000000..160caeecf
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_3dn2.c
@@ -0,0 +1,174 @@
+/*
+ * FFT/MDCT transform with Extended 3DNow! optimizations
+ * Copyright (c) 2006-2008 Zuxy MENG Jie, Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/x86_cpu.h"
+#include "libavcodec/dsputil.h"
+#include "fft.h"
+
+DECLARE_ALIGNED_8(static const int, m1m1)[2] = { 1<<31, 1<<31 };
+
+#ifdef EMULATE_3DNOWEXT
+#define PSWAPD(s,d)\
+ "movq "#s","#d"\n"\
+ "psrlq $32,"#d"\n"\
+ "punpckldq "#s","#d"\n"
+#define ff_fft_calc_3dn2 ff_fft_calc_3dn
+#define ff_fft_dispatch_3dn2 ff_fft_dispatch_3dn
+#define ff_fft_dispatch_interleave_3dn2 ff_fft_dispatch_interleave_3dn
+#define ff_imdct_calc_3dn2 ff_imdct_calc_3dn
+#define ff_imdct_half_3dn2 ff_imdct_half_3dn
+#else
+#define PSWAPD(s,d) "pswapd "#s","#d"\n"
+#endif
+
+void ff_fft_dispatch_3dn2(FFTComplex *z, int nbits);
+void ff_fft_dispatch_interleave_3dn2(FFTComplex *z, int nbits);
+
+void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z)
+{
+ int n = 1<<s->nbits;
+ int i;
+ ff_fft_dispatch_interleave_3dn2(z, s->nbits);
+ __asm__ volatile("femms");
+ if(n <= 8)
+ for(i=0; i<n; i+=2)
+ FFSWAP(FFTSample, z[i].im, z[i+1].re);
+}
+
+void ff_imdct_half_3dn2(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ x86_reg j, k;
+ long n = 1 << s->mdct_bits;
+ long n2 = n >> 1;
+ long n4 = n >> 2;
+ long n8 = n >> 3;
+ const uint16_t *revtab = s->revtab;
+ const FFTSample *tcos = s->tcos;
+ const FFTSample *tsin = s->tsin;
+ const FFTSample *in1, *in2;
+ FFTComplex *z = (FFTComplex *)output;
+
+ /* pre rotation */
+ in1 = input;
+ in2 = input + n2 - 1;
+#ifdef EMULATE_3DNOWEXT
+ __asm__ volatile("movd %0, %%mm7" ::"r"(1<<31));
+#endif
+ for(k = 0; k < n4; k++) {
+ // FIXME a single block is faster, but gcc 2.95 and 3.4.x on 32bit can't compile it
+ __asm__ volatile(
+ "movd %0, %%mm0 \n"
+ "movd %2, %%mm1 \n"
+ "punpckldq %1, %%mm0 \n"
+ "punpckldq %3, %%mm1 \n"
+ "movq %%mm0, %%mm2 \n"
+ PSWAPD( %%mm1, %%mm3 )
+ "pfmul %%mm1, %%mm0 \n"
+ "pfmul %%mm3, %%mm2 \n"
+#ifdef EMULATE_3DNOWEXT
+ "movq %%mm0, %%mm1 \n"
+ "punpckhdq %%mm2, %%mm0 \n"
+ "punpckldq %%mm2, %%mm1 \n"
+ "pxor %%mm7, %%mm0 \n"
+ "pfadd %%mm1, %%mm0 \n"
+#else
+ "pfpnacc %%mm2, %%mm0 \n"
+#endif
+ ::"m"(in2[-2*k]), "m"(in1[2*k]),
+ "m"(tcos[k]), "m"(tsin[k])
+ );
+ __asm__ volatile(
+ "movq %%mm0, %0 \n\t"
+ :"=m"(z[revtab[k]])
+ );
+ }
+
+ ff_fft_dispatch_3dn2(z, s->nbits);
+
+#define CMUL(j,mm0,mm1)\
+ "movq (%2,"#j",2), %%mm6 \n"\
+ "movq 8(%2,"#j",2), "#mm0"\n"\
+ "movq %%mm6, "#mm1"\n"\
+ "movq "#mm0",%%mm7 \n"\
+ "pfmul (%3,"#j"), %%mm6 \n"\
+ "pfmul (%4,"#j"), "#mm0"\n"\
+ "pfmul (%4,"#j"), "#mm1"\n"\
+ "pfmul (%3,"#j"), %%mm7 \n"\
+ "pfsub %%mm6, "#mm0"\n"\
+ "pfadd %%mm7, "#mm1"\n"
+
+ /* post rotation */
+ j = -n2;
+ k = n2-8;
+ __asm__ volatile(
+ "1: \n"
+ CMUL(%0, %%mm0, %%mm1)
+ CMUL(%1, %%mm2, %%mm3)
+ "movd %%mm0, (%2,%0,2) \n"
+ "movd %%mm1,12(%2,%1,2) \n"
+ "movd %%mm2, (%2,%1,2) \n"
+ "movd %%mm3,12(%2,%0,2) \n"
+ "psrlq $32, %%mm0 \n"
+ "psrlq $32, %%mm1 \n"
+ "psrlq $32, %%mm2 \n"
+ "psrlq $32, %%mm3 \n"
+ "movd %%mm0, 8(%2,%0,2) \n"
+ "movd %%mm1, 4(%2,%1,2) \n"
+ "movd %%mm2, 8(%2,%1,2) \n"
+ "movd %%mm3, 4(%2,%0,2) \n"
+ "sub $8, %1 \n"
+ "add $8, %0 \n"
+ "jl 1b \n"
+ :"+r"(j), "+r"(k)
+ :"r"(z+n8), "r"(tcos+n8), "r"(tsin+n8)
+ :"memory"
+ );
+ __asm__ volatile("femms");
+}
+
+void ff_imdct_calc_3dn2(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ x86_reg j, k;
+ long n = 1 << s->mdct_bits;
+ long n4 = n >> 2;
+
+ ff_imdct_half_3dn2(s, output+n4, input);
+
+ j = -n;
+ k = n-8;
+ __asm__ volatile(
+ "movq %4, %%mm7 \n"
+ "1: \n"
+ PSWAPD((%2,%1), %%mm0)
+ PSWAPD((%3,%0), %%mm1)
+ "pxor %%mm7, %%mm0 \n"
+ "movq %%mm1, (%3,%1) \n"
+ "movq %%mm0, (%2,%0) \n"
+ "sub $8, %1 \n"
+ "add $8, %0 \n"
+ "jl 1b \n"
+ :"+r"(j), "+r"(k)
+ :"r"(output+n4), "r"(output+n4*3),
+ "m"(*m1m1)
+ );
+ __asm__ volatile("femms");
+}
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_mmx.asm b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_mmx.asm
new file mode 100644
index 000000000..26bd748ae
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_mmx.asm
@@ -0,0 +1,475 @@
+;******************************************************************************
+;* FFT transform with SSE/3DNow optimizations
+;* Copyright (c) 2008 Loren Merritt
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+; These functions are not individually interchangeable with the C versions.
+; While C takes arrays of FFTComplex, SSE/3DNow leave intermediate results
+; in blocks as conventient to the vector size.
+; i.e. {4x real, 4x imaginary, 4x real, ...} (or 2x respectively)
+
+%include "x86inc.asm"
+
+SECTION_RODATA
+
+%define M_SQRT1_2 0.70710678118654752440
+ps_root2: times 4 dd M_SQRT1_2
+ps_root2mppm: dd -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
+ps_m1p1: dd 1<<31, 0
+
+%assign i 16
+%rep 13
+cextern ff_cos_ %+ i
+%assign i i<<1
+%endrep
+
+%ifdef ARCH_X86_64
+ %define pointer dq
+%else
+ %define pointer dd
+%endif
+
+%macro IF0 1+
+%endmacro
+%macro IF1 1+
+ %1
+%endmacro
+
+section .text align=16
+
+%macro T2_3DN 4 ; z0, z1, mem0, mem1
+ mova %1, %3
+ mova %2, %1
+ pfadd %1, %4
+ pfsub %2, %4
+%endmacro
+
+%macro T4_3DN 6 ; z0, z1, z2, z3, tmp0, tmp1
+ mova %5, %3
+ pfsub %3, %4
+ pfadd %5, %4 ; {t6,t5}
+ pxor %3, [ps_m1p1 GLOBAL] ; {t8,t7}
+ mova %6, %1
+ pswapd %3, %3
+ pfadd %1, %5 ; {r0,i0}
+ pfsub %6, %5 ; {r2,i2}
+ mova %4, %2
+ pfadd %2, %3 ; {r1,i1}
+ pfsub %4, %3 ; {r3,i3}
+ SWAP %3, %6
+%endmacro
+
+; in: %1={r0,i0,r1,i1} %2={r2,i2,r3,i3}
+; out: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3}
+%macro T4_SSE 3
+ mova %3, %1
+ shufps %1, %2, 0x64 ; {r0,i0,r3,i2}
+ shufps %3, %2, 0xce ; {r1,i1,r2,i3}
+ mova %2, %1
+ addps %1, %3 ; {t1,t2,t6,t5}
+ subps %2, %3 ; {t3,t4,t8,t7}
+ mova %3, %1
+ shufps %1, %2, 0x44 ; {t1,t2,t3,t4}
+ shufps %3, %2, 0xbe ; {t6,t5,t7,t8}
+ mova %2, %1
+ addps %1, %3 ; {r0,i0,r1,i1}
+ subps %2, %3 ; {r2,i2,r3,i3}
+ mova %3, %1
+ shufps %1, %2, 0x88 ; {r0,r1,r2,r3}
+ shufps %3, %2, 0xdd ; {i0,i1,i2,i3}
+ SWAP %2, %3
+%endmacro
+
+%macro T8_SSE 6 ; r0,i0,r1,i1,t0,t1
+ mova %5, %3
+ shufps %3, %4, 0x44 ; {r4,i4,r6,i6}
+ shufps %5, %4, 0xee ; {r5,i5,r7,i7}
+ mova %6, %3
+ subps %3, %5 ; {r5,i5,r7,i7}
+ addps %6, %5 ; {t1,t2,t3,t4}
+ mova %5, %3
+ shufps %5, %5, 0xb1 ; {i5,r5,i7,r7}
+ mulps %3, [ps_root2mppm GLOBAL] ; {-r5,i5,r7,-i7}
+ mulps %5, [ps_root2 GLOBAL]
+ addps %3, %5 ; {t8,t7,ta,t9}
+ mova %5, %6
+ shufps %6, %3, 0x36 ; {t3,t2,t9,t8}
+ shufps %5, %3, 0x9c ; {t1,t4,t7,ta}
+ mova %3, %6
+ addps %6, %5 ; {t1,t2,t9,ta}
+ subps %3, %5 ; {t6,t5,tc,tb}
+ mova %5, %6
+ shufps %6, %3, 0xd8 ; {t1,t9,t5,tb}
+ shufps %5, %3, 0x8d ; {t2,ta,t6,tc}
+ mova %3, %1
+ mova %4, %2
+ addps %1, %6 ; {r0,r1,r2,r3}
+ addps %2, %5 ; {i0,i1,i2,i3}
+ subps %3, %6 ; {r4,r5,r6,r7}
+ subps %4, %5 ; {i4,i5,i6,i7}
+%endmacro
+
+; scheduled for cpu-bound sizes
+%macro PASS_SMALL 3 ; (to load m4-m7), wre, wim
+IF%1 mova m4, Z(4)
+IF%1 mova m5, Z(5)
+ mova m0, %2 ; wre
+ mova m2, m4
+ mova m1, %3 ; wim
+ mova m3, m5
+ mulps m2, m0 ; r2*wre
+IF%1 mova m6, Z(6)
+ mulps m3, m1 ; i2*wim
+IF%1 mova m7, Z(7)
+ mulps m4, m1 ; r2*wim
+ mulps m5, m0 ; i2*wre
+ addps m2, m3 ; r2*wre + i2*wim
+ mova m3, m1
+ mulps m1, m6 ; r3*wim
+ subps m5, m4 ; i2*wre - r2*wim
+ mova m4, m0
+ mulps m3, m7 ; i3*wim
+ mulps m4, m6 ; r3*wre
+ mulps m0, m7 ; i3*wre
+ subps m4, m3 ; r3*wre - i3*wim
+ mova m3, Z(0)
+ addps m0, m1 ; i3*wre + r3*wim
+ mova m1, m4
+ addps m4, m2 ; t5
+ subps m1, m2 ; t3
+ subps m3, m4 ; r2
+ addps m4, Z(0) ; r0
+ mova m6, Z(2)
+ mova Z(4), m3
+ mova Z(0), m4
+ mova m3, m5
+ subps m5, m0 ; t4
+ mova m4, m6
+ subps m6, m5 ; r3
+ addps m5, m4 ; r1
+ mova Z(6), m6
+ mova Z(2), m5
+ mova m2, Z(3)
+ addps m3, m0 ; t6
+ subps m2, m1 ; i3
+ mova m7, Z(1)
+ addps m1, Z(3) ; i1
+ mova Z(7), m2
+ mova Z(3), m1
+ mova m4, m7
+ subps m7, m3 ; i2
+ addps m3, m4 ; i0
+ mova Z(5), m7
+ mova Z(1), m3
+%endmacro
+
+; scheduled to avoid store->load aliasing
+%macro PASS_BIG 1 ; (!interleave)
+ mova m4, Z(4) ; r2
+ mova m5, Z(5) ; i2
+ mova m2, m4
+ mova m0, [wq] ; wre
+ mova m3, m5
+ mova m1, [wq+o1q] ; wim
+ mulps m2, m0 ; r2*wre
+ mova m6, Z(6) ; r3
+ mulps m3, m1 ; i2*wim
+ mova m7, Z(7) ; i3
+ mulps m4, m1 ; r2*wim
+ mulps m5, m0 ; i2*wre
+ addps m2, m3 ; r2*wre + i2*wim
+ mova m3, m1
+ mulps m1, m6 ; r3*wim
+ subps m5, m4 ; i2*wre - r2*wim
+ mova m4, m0
+ mulps m3, m7 ; i3*wim
+ mulps m4, m6 ; r3*wre
+ mulps m0, m7 ; i3*wre
+ subps m4, m3 ; r3*wre - i3*wim
+ mova m3, Z(0)
+ addps m0, m1 ; i3*wre + r3*wim
+ mova m1, m4
+ addps m4, m2 ; t5
+ subps m1, m2 ; t3
+ subps m3, m4 ; r2
+ addps m4, Z(0) ; r0
+ mova m6, Z(2)
+ mova Z(4), m3
+ mova Z(0), m4
+ mova m3, m5
+ subps m5, m0 ; t4
+ mova m4, m6
+ subps m6, m5 ; r3
+ addps m5, m4 ; r1
+IF%1 mova Z(6), m6
+IF%1 mova Z(2), m5
+ mova m2, Z(3)
+ addps m3, m0 ; t6
+ subps m2, m1 ; i3
+ mova m7, Z(1)
+ addps m1, Z(3) ; i1
+IF%1 mova Z(7), m2
+IF%1 mova Z(3), m1
+ mova m4, m7
+ subps m7, m3 ; i2
+ addps m3, m4 ; i0
+IF%1 mova Z(5), m7
+IF%1 mova Z(1), m3
+%if %1==0
+ mova m4, m5 ; r1
+ mova m0, m6 ; r3
+ unpcklps m5, m1
+ unpckhps m4, m1
+ unpcklps m6, m2
+ unpckhps m0, m2
+ mova m1, Z(0)
+ mova m2, Z(4)
+ mova Z(2), m5
+ mova Z(3), m4
+ mova Z(6), m6
+ mova Z(7), m0
+ mova m5, m1 ; r0
+ mova m4, m2 ; r2
+ unpcklps m1, m3
+ unpckhps m5, m3
+ unpcklps m2, m7
+ unpckhps m4, m7
+ mova Z(0), m1
+ mova Z(1), m5
+ mova Z(4), m2
+ mova Z(5), m4
+%endif
+%endmacro
+
+%macro PUNPCK 3
+ mova %3, %1
+ punpckldq %1, %2
+ punpckhdq %3, %2
+%endmacro
+
+INIT_XMM
+%define mova movaps
+
+%define Z(x) [r0+mmsize*x]
+
+align 16
+fft4_sse:
+ mova m0, Z(0)
+ mova m1, Z(1)
+ T4_SSE m0, m1, m2
+ mova Z(0), m0
+ mova Z(1), m1
+ ret
+
+align 16
+fft8_sse:
+ mova m0, Z(0)
+ mova m1, Z(1)
+ T4_SSE m0, m1, m2
+ mova m2, Z(2)
+ mova m3, Z(3)
+ T8_SSE m0, m1, m2, m3, m4, m5
+ mova Z(0), m0
+ mova Z(1), m1
+ mova Z(2), m2
+ mova Z(3), m3
+ ret
+
+align 16
+fft16_sse:
+ mova m0, Z(0)
+ mova m1, Z(1)
+ T4_SSE m0, m1, m2
+ mova m2, Z(2)
+ mova m3, Z(3)
+ T8_SSE m0, m1, m2, m3, m4, m5
+ mova m4, Z(4)
+ mova m5, Z(5)
+ mova Z(0), m0
+ mova Z(1), m1
+ mova Z(2), m2
+ mova Z(3), m3
+ T4_SSE m4, m5, m6
+ mova m6, Z(6)
+ mova m7, Z(7)
+ T4_SSE m6, m7, m0
+ PASS_SMALL 0, [ff_cos_16 GLOBAL], [ff_cos_16+16 GLOBAL]
+ ret
+
+
+INIT_MMX
+
+%macro FFT48_3DN 1
+align 16
+fft4%1:
+ T2_3DN m0, m1, Z(0), Z(1)
+ mova m2, Z(2)
+ mova m3, Z(3)
+ T4_3DN m0, m1, m2, m3, m4, m5
+ PUNPCK m0, m1, m4
+ PUNPCK m2, m3, m5
+ mova Z(0), m0
+ mova Z(1), m4
+ mova Z(2), m2
+ mova Z(3), m5
+ ret
+
+align 16
+fft8%1:
+ T2_3DN m0, m1, Z(0), Z(1)
+ mova m2, Z(2)
+ mova m3, Z(3)
+ T4_3DN m0, m1, m2, m3, m4, m5
+ mova Z(0), m0
+ mova Z(2), m2
+ T2_3DN m4, m5, Z(4), Z(5)
+ T2_3DN m6, m7, Z(6), Z(7)
+ pswapd m0, m5
+ pswapd m2, m7
+ pxor m0, [ps_m1p1 GLOBAL]
+ pxor m2, [ps_m1p1 GLOBAL]
+ pfsub m5, m0
+ pfadd m7, m2
+ pfmul m5, [ps_root2 GLOBAL]
+ pfmul m7, [ps_root2 GLOBAL]
+ T4_3DN m1, m3, m5, m7, m0, m2
+ mova Z(5), m5
+ mova Z(7), m7
+ mova m0, Z(0)
+ mova m2, Z(2)
+ T4_3DN m0, m2, m4, m6, m5, m7
+ PUNPCK m0, m1, m5
+ PUNPCK m2, m3, m7
+ mova Z(0), m0
+ mova Z(1), m5
+ mova Z(2), m2
+ mova Z(3), m7
+ PUNPCK m4, Z(5), m5
+ PUNPCK m6, Z(7), m7
+ mova Z(4), m4
+ mova Z(5), m5
+ mova Z(6), m6
+ mova Z(7), m7
+ ret
+%endmacro
+
+FFT48_3DN _3dn2
+
+%macro pswapd 2
+%ifidn %1, %2
+ movd [r0+12], %1
+ punpckhdq %1, [r0+8]
+%else
+ movq %1, %2
+ psrlq %1, 32
+ punpckldq %1, %2
+%endif
+%endmacro
+
+FFT48_3DN _3dn
+
+
+%define Z(x) [zq + o1q*(x&6)*((x/6)^1) + o3q*(x/6) + mmsize*(x&1)]
+
+%macro DECL_PASS 2+ ; name, payload
+align 16
+%1:
+DEFINE_ARGS z, w, n, o1, o3
+ lea o3q, [nq*3]
+ lea o1q, [nq*8]
+ shl o3q, 4
+.loop:
+ %2
+ add zq, mmsize*2
+ add wq, mmsize
+ sub nd, mmsize/8
+ jg .loop
+ rep ret
+%endmacro
+
+INIT_XMM
+%define mova movaps
+DECL_PASS pass_sse, PASS_BIG 1
+DECL_PASS pass_interleave_sse, PASS_BIG 0
+
+INIT_MMX
+%define mulps pfmul
+%define addps pfadd
+%define subps pfsub
+%define unpcklps punpckldq
+%define unpckhps punpckhdq
+DECL_PASS pass_3dn, PASS_SMALL 1, [wq], [wq+o1q]
+DECL_PASS pass_interleave_3dn, PASS_BIG 0
+%define pass_3dn2 pass_3dn
+%define pass_interleave_3dn2 pass_interleave_3dn
+
+
+%macro DECL_FFT 2-3 ; nbits, cpu, suffix
+%xdefine list_of_fft fft4%2, fft8%2
+%if %1==5
+%xdefine list_of_fft list_of_fft, fft16%2
+%endif
+
+%assign n 1<<%1
+%rep 17-%1
+%assign n2 n/2
+%assign n4 n/4
+%xdefine list_of_fft list_of_fft, fft %+ n %+ %3%2
+
+align 16
+fft %+ n %+ %3%2:
+ call fft %+ n2 %+ %2
+ add r0, n*4 - (n&(-2<<%1))
+ call fft %+ n4 %+ %2
+ add r0, n*2 - (n2&(-2<<%1))
+ call fft %+ n4 %+ %2
+ sub r0, n*6 + (n2&(-2<<%1))
+ lea r1, [ff_cos_ %+ n GLOBAL]
+ mov r2d, n4/2
+ jmp pass%3%2
+
+%assign n n*2
+%endrep
+%undef n
+
+%ifidn __OUTPUT_FORMAT__,macho64
+section .rodata
+%endif
+
+align 8
+dispatch_tab%3%2: pointer list_of_fft
+
+section .text
+
+; On x86_32, this function does the register saving and restoring for all of fft.
+; The others pass args in registers and don't spill anything.
+cglobal fft_dispatch%3%2, 2,5,8, z, nbits
+ lea r2, [dispatch_tab%3%2 GLOBAL]
+ mov r2, [r2 + (nbitsq-2)*gprsize]
+ call r2
+ RET
+%endmacro ; DECL_FFT
+
+DECL_FFT 5, _sse
+DECL_FFT 5, _sse, _interleave
+DECL_FFT 4, _3dn
+DECL_FFT 4, _3dn, _interleave
+DECL_FFT 4, _3dn2
+DECL_FFT 4, _3dn2, _interleave
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_sse.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_sse.c
new file mode 100644
index 000000000..726e186b5
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/fft_sse.c
@@ -0,0 +1,203 @@
+/*
+ * FFT/MDCT transform with SSE optimizations
+ * Copyright (c) 2008 Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/x86_cpu.h"
+#include "libavcodec/dsputil.h"
+#include "fft.h"
+
+DECLARE_ALIGNED(16, static const int, m1m1m1m1)[4] =
+ { 1 << 31, 1 << 31, 1 << 31, 1 << 31 };
+
+void ff_fft_dispatch_sse(FFTComplex *z, int nbits);
+void ff_fft_dispatch_interleave_sse(FFTComplex *z, int nbits);
+
+void ff_fft_calc_sse(FFTContext *s, FFTComplex *z)
+{
+ int n = 1 << s->nbits;
+
+ ff_fft_dispatch_interleave_sse(z, s->nbits);
+
+ if(n <= 16) {
+ x86_reg i = -8*n;
+ __asm__ volatile(
+ "1: \n"
+ "movaps (%0,%1), %%xmm0 \n"
+ "movaps %%xmm0, %%xmm1 \n"
+ "unpcklps 16(%0,%1), %%xmm0 \n"
+ "unpckhps 16(%0,%1), %%xmm1 \n"
+ "movaps %%xmm0, (%0,%1) \n"
+ "movaps %%xmm1, 16(%0,%1) \n"
+ "add $32, %0 \n"
+ "jl 1b \n"
+ :"+r"(i)
+ :"r"(z+n)
+ :"memory"
+ );
+ }
+}
+
+void ff_fft_permute_sse(FFTContext *s, FFTComplex *z)
+{
+ int n = 1 << s->nbits;
+ int i;
+ for(i=0; i<n; i+=2) {
+ __asm__ volatile(
+ "movaps %2, %%xmm0 \n"
+ "movlps %%xmm0, %0 \n"
+ "movhps %%xmm0, %1 \n"
+ :"=m"(s->tmp_buf[s->revtab[i]]),
+ "=m"(s->tmp_buf[s->revtab[i+1]])
+ :"m"(z[i])
+ );
+ }
+ memcpy(z, s->tmp_buf, n*sizeof(FFTComplex));
+}
+
+void ff_imdct_half_sse(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ av_unused x86_reg i, j, k, l;
+ long n = 1 << s->mdct_bits;
+ long n2 = n >> 1;
+ long n4 = n >> 2;
+ long n8 = n >> 3;
+ const uint16_t *revtab = s->revtab + n8;
+ const FFTSample *tcos = s->tcos;
+ const FFTSample *tsin = s->tsin;
+ FFTComplex *z = (FFTComplex *)output;
+
+ /* pre rotation */
+ for(k=n8-2; k>=0; k-=2) {
+ __asm__ volatile(
+ "movaps (%2,%1,2), %%xmm0 \n" // { z[k].re, z[k].im, z[k+1].re, z[k+1].im }
+ "movaps -16(%2,%0,2), %%xmm1 \n" // { z[-k-2].re, z[-k-2].im, z[-k-1].re, z[-k-1].im }
+ "movaps %%xmm0, %%xmm2 \n"
+ "shufps $0x88, %%xmm1, %%xmm0 \n" // { z[k].re, z[k+1].re, z[-k-2].re, z[-k-1].re }
+ "shufps $0x77, %%xmm2, %%xmm1 \n" // { z[-k-1].im, z[-k-2].im, z[k+1].im, z[k].im }
+ "movlps (%3,%1), %%xmm4 \n"
+ "movlps (%4,%1), %%xmm5 \n"
+ "movhps -8(%3,%0), %%xmm4 \n" // { cos[k], cos[k+1], cos[-k-2], cos[-k-1] }
+ "movhps -8(%4,%0), %%xmm5 \n" // { sin[k], sin[k+1], sin[-k-2], sin[-k-1] }
+ "movaps %%xmm0, %%xmm2 \n"
+ "movaps %%xmm1, %%xmm3 \n"
+ "mulps %%xmm5, %%xmm0 \n" // re*sin
+ "mulps %%xmm4, %%xmm1 \n" // im*cos
+ "mulps %%xmm4, %%xmm2 \n" // re*cos
+ "mulps %%xmm5, %%xmm3 \n" // im*sin
+ "subps %%xmm0, %%xmm1 \n" // -> re
+ "addps %%xmm3, %%xmm2 \n" // -> im
+ "movaps %%xmm1, %%xmm0 \n"
+ "unpcklps %%xmm2, %%xmm1 \n" // { z[k], z[k+1] }
+ "unpckhps %%xmm2, %%xmm0 \n" // { z[-k-2], z[-k-1] }
+ ::"r"(-4*k), "r"(4*k),
+ "r"(input+n4), "r"(tcos+n8), "r"(tsin+n8)
+ );
+#if ARCH_X86_64
+ // if we have enough regs, don't let gcc make the luts latency-bound
+ // but if not, latency is faster than spilling
+ __asm__("movlps %%xmm0, %0 \n"
+ "movhps %%xmm0, %1 \n"
+ "movlps %%xmm1, %2 \n"
+ "movhps %%xmm1, %3 \n"
+ :"=m"(z[revtab[-k-2]]),
+ "=m"(z[revtab[-k-1]]),
+ "=m"(z[revtab[ k ]]),
+ "=m"(z[revtab[ k+1]])
+ );
+#else
+ __asm__("movlps %%xmm0, %0" :"=m"(z[revtab[-k-2]]));
+ __asm__("movhps %%xmm0, %0" :"=m"(z[revtab[-k-1]]));
+ __asm__("movlps %%xmm1, %0" :"=m"(z[revtab[ k ]]));
+ __asm__("movhps %%xmm1, %0" :"=m"(z[revtab[ k+1]]));
+#endif
+ }
+
+ ff_fft_dispatch_sse(z, s->nbits);
+
+ /* post rotation + reinterleave + reorder */
+
+#define CMUL(j,xmm0,xmm1)\
+ "movaps (%2,"#j",2), %%xmm6 \n"\
+ "movaps 16(%2,"#j",2), "#xmm0"\n"\
+ "movaps %%xmm6, "#xmm1"\n"\
+ "movaps "#xmm0",%%xmm7 \n"\
+ "mulps (%3,"#j"), %%xmm6 \n"\
+ "mulps (%4,"#j"), "#xmm0"\n"\
+ "mulps (%4,"#j"), "#xmm1"\n"\
+ "mulps (%3,"#j"), %%xmm7 \n"\
+ "subps %%xmm6, "#xmm0"\n"\
+ "addps %%xmm7, "#xmm1"\n"
+
+ j = -n2;
+ k = n2-16;
+ __asm__ volatile(
+ "1: \n"
+ CMUL(%0, %%xmm0, %%xmm1)
+ CMUL(%1, %%xmm4, %%xmm5)
+ "shufps $0x1b, %%xmm1, %%xmm1 \n"
+ "shufps $0x1b, %%xmm5, %%xmm5 \n"
+ "movaps %%xmm4, %%xmm6 \n"
+ "unpckhps %%xmm1, %%xmm4 \n"
+ "unpcklps %%xmm1, %%xmm6 \n"
+ "movaps %%xmm0, %%xmm2 \n"
+ "unpcklps %%xmm5, %%xmm0 \n"
+ "unpckhps %%xmm5, %%xmm2 \n"
+ "movaps %%xmm6, (%2,%1,2) \n"
+ "movaps %%xmm4, 16(%2,%1,2) \n"
+ "movaps %%xmm0, (%2,%0,2) \n"
+ "movaps %%xmm2, 16(%2,%0,2) \n"
+ "sub $16, %1 \n"
+ "add $16, %0 \n"
+ "jl 1b \n"
+ :"+&r"(j), "+&r"(k)
+ :"r"(z+n8), "r"(tcos+n8), "r"(tsin+n8)
+ :"memory"
+ );
+}
+
+void ff_imdct_calc_sse(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ x86_reg j, k;
+ long n = 1 << s->mdct_bits;
+ long n4 = n >> 2;
+
+ ff_imdct_half_sse(s, output+n4, input);
+
+ j = -n;
+ k = n-16;
+ __asm__ volatile(
+ "movaps %4, %%xmm7 \n"
+ "1: \n"
+ "movaps (%2,%1), %%xmm0 \n"
+ "movaps (%3,%0), %%xmm1 \n"
+ "shufps $0x1b, %%xmm0, %%xmm0 \n"
+ "shufps $0x1b, %%xmm1, %%xmm1 \n"
+ "xorps %%xmm7, %%xmm0 \n"
+ "movaps %%xmm1, (%3,%1) \n"
+ "movaps %%xmm0, (%2,%0) \n"
+ "sub $16, %1 \n"
+ "add $16, %0 \n"
+ "jl 1b \n"
+ :"+r"(j), "+r"(k)
+ :"r"(output+n4), "r"(output+n4*3),
+ "m"(*m1m1m1m1)
+ );
+}
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264_deblock_sse2.asm b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264_deblock_sse2.asm
new file mode 100644
index 000000000..bf45c7ea6
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264_deblock_sse2.asm
@@ -0,0 +1,759 @@
+;*****************************************************************************
+;* MMX/SSE2-optimized H.264 deblocking code
+;*****************************************************************************
+;* Copyright (C) 2005-2008 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
+;*****************************************************************************
+
+%include "x86inc.asm"
+
+SECTION_RODATA
+pb_00: times 16 db 0x00
+pb_01: times 16 db 0x01
+pb_03: times 16 db 0x03
+pb_a1: times 16 db 0xa1
+
+SECTION .text
+
+; expands to [base],...,[base+7*stride]
+%define PASS8ROWS(base, base3, stride, stride3) \
+ [base], [base+stride], [base+stride*2], [base3], \
+ [base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4]
+
+; in: 8 rows of 4 bytes in %1..%8
+; out: 4 rows of 8 bytes in m0..m3
+%macro TRANSPOSE4x8_LOAD 8
+ movd m0, %1
+ movd m2, %2
+ movd m1, %3
+ movd m3, %4
+ punpcklbw m0, m2
+ punpcklbw m1, m3
+ movq m2, m0
+ punpcklwd m0, m1
+ punpckhwd m2, m1
+
+ movd m4, %5
+ movd m6, %6
+ movd m5, %7
+ movd m7, %8
+ punpcklbw m4, m6
+ punpcklbw m5, m7
+ movq m6, m4
+ punpcklwd m4, m5
+ punpckhwd m6, m5
+
+ movq m1, m0
+ movq m3, m2
+ punpckldq m0, m4
+ punpckhdq m1, m4
+ punpckldq m2, m6
+ punpckhdq m3, m6
+%endmacro
+
+; in: 4 rows of 8 bytes in m0..m3
+; out: 8 rows of 4 bytes in %1..%8
+%macro TRANSPOSE8x4_STORE 8
+ movq m4, m0
+ movq m5, m1
+ movq m6, m2
+ punpckhdq m4, m4
+ punpckhdq m5, m5
+ punpckhdq m6, m6
+
+ punpcklbw m0, m1
+ punpcklbw m2, m3
+ movq m1, m0
+ punpcklwd m0, m2
+ punpckhwd m1, m2
+ movd %1, m0
+ punpckhdq m0, m0
+ movd %2, m0
+ movd %3, m1
+ punpckhdq m1, m1
+ movd %4, m1
+
+ punpckhdq m3, m3
+ punpcklbw m4, m5
+ punpcklbw m6, m3
+ movq m5, m4
+ punpcklwd m4, m6
+ punpckhwd m5, m6
+ movd %5, m4
+ punpckhdq m4, m4
+ movd %6, m4
+ movd %7, m5
+ punpckhdq m5, m5
+ movd %8, m5
+%endmacro
+
+%macro SBUTTERFLY 4
+ movq %4, %2
+ punpckl%1 %2, %3
+ punpckh%1 %4, %3
+%endmacro
+
+; in: 8 rows of 8 (only the middle 6 pels are used) in %1..%8
+; out: 6 rows of 8 in [%9+0*16] .. [%9+5*16]
+%macro TRANSPOSE6x8_MEM 9
+ movq m0, %1
+ movq m1, %2
+ movq m2, %3
+ movq m3, %4
+ movq m4, %5
+ movq m5, %6
+ movq m6, %7
+ SBUTTERFLY bw, m0, m1, m7
+ SBUTTERFLY bw, m2, m3, m1
+ SBUTTERFLY bw, m4, m5, m3
+ movq [%9+0x10], m1
+ SBUTTERFLY bw, m6, %8, m5
+ SBUTTERFLY wd, m0, m2, m1
+ SBUTTERFLY wd, m4, m6, m2
+ punpckhdq m0, m4
+ movq [%9+0x00], m0
+ SBUTTERFLY wd, m7, [%9+0x10], m6
+ SBUTTERFLY wd, m3, m5, m4
+ SBUTTERFLY dq, m7, m3, m0
+ SBUTTERFLY dq, m1, m2, m5
+ punpckldq m6, m4
+ movq [%9+0x10], m1
+ movq [%9+0x20], m5
+ movq [%9+0x30], m7
+ movq [%9+0x40], m0
+ movq [%9+0x50], m6
+%endmacro
+
+; in: 8 rows of 8 in %1..%8
+; out: 8 rows of 8 in %9..%16
+%macro TRANSPOSE8x8_MEM 16
+ movq m0, %1
+ movq m1, %2
+ movq m2, %3
+ movq m3, %4
+ movq m4, %5
+ movq m5, %6
+ movq m6, %7
+ SBUTTERFLY bw, m0, m1, m7
+ SBUTTERFLY bw, m2, m3, m1
+ SBUTTERFLY bw, m4, m5, m3
+ SBUTTERFLY bw, m6, %8, m5
+ movq %9, m3
+ SBUTTERFLY wd, m0, m2, m3
+ SBUTTERFLY wd, m4, m6, m2
+ SBUTTERFLY wd, m7, m1, m6
+ movq %11, m2
+ movq m2, %9
+ SBUTTERFLY wd, m2, m5, m1
+ SBUTTERFLY dq, m0, m4, m5
+ SBUTTERFLY dq, m7, m2, m4
+ movq %9, m0
+ movq %10, m5
+ movq %13, m7
+ movq %14, m4
+ SBUTTERFLY dq, m3, %11, m0
+ SBUTTERFLY dq, m6, m1, m5
+ movq %11, m3
+ movq %12, m0
+ movq %15, m6
+ movq %16, m5
+%endmacro
+
+; out: %4 = |%1-%2|>%3
+; clobbers: %5
+%macro DIFF_GT 5
+ mova %5, %2
+ mova %4, %1
+ psubusb %5, %1
+ psubusb %4, %2
+ por %4, %5
+ psubusb %4, %3
+%endmacro
+
+; out: %4 = |%1-%2|>%3
+; clobbers: %5
+%macro DIFF_GT2 5
+ mova %5, %2
+ mova %4, %1
+ psubusb %5, %1
+ psubusb %4, %2
+ psubusb %5, %3
+ psubusb %4, %3
+ pcmpeqb %4, %5
+%endmacro
+
+%macro SPLATW 1
+%ifidn m0, xmm0
+ pshuflw %1, %1, 0
+ punpcklqdq %1, %1
+%else
+ pshufw %1, %1, 0
+%endif
+%endmacro
+
+; in: m0=p1 m1=p0 m2=q0 m3=q1 %1=alpha-1 %2=beta-1
+; out: m5=beta-1, m7=mask, %3=alpha-1
+; clobbers: m4,m6
+%macro LOAD_MASK 2-3
+ movd m4, %1
+ movd m5, %2
+ SPLATW m4
+ SPLATW m5
+ packuswb m4, m4 ; 16x alpha-1
+ packuswb m5, m5 ; 16x beta-1
+%if %0>2
+ mova %3, m4
+%endif
+ DIFF_GT m1, m2, m4, m7, m6 ; |p0-q0| > alpha-1
+ DIFF_GT m0, m1, m5, m4, m6 ; |p1-p0| > beta-1
+ por m7, m4
+ DIFF_GT m3, m2, m5, m4, m6 ; |q1-q0| > beta-1
+ por m7, m4
+ pxor m6, m6
+ pcmpeqb m7, m6
+%endmacro
+
+; in: m0=p1 m1=p0 m2=q0 m3=q1 m7=(tc&mask)
+; out: m1=p0' m2=q0'
+; clobbers: m0,3-6
+%macro DEBLOCK_P0_Q0 0
+ mova m5, m1
+ pxor m5, m2 ; p0^q0
+ pand m5, [pb_01 GLOBAL] ; (p0^q0)&1
+ pcmpeqb m4, m4
+ pxor m3, m4
+ pavgb m3, m0 ; (p1 - q1 + 256)>>1
+ pavgb m3, [pb_03 GLOBAL] ; (((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2
+ pxor m4, m1
+ pavgb m4, m2 ; (q0 - p0 + 256)>>1
+ pavgb m3, m5
+ paddusb m3, m4 ; d+128+33
+ mova m6, [pb_a1 GLOBAL]
+ psubusb m6, m3
+ psubusb m3, [pb_a1 GLOBAL]
+ pminub m6, m7
+ pminub m3, m7
+ psubusb m1, m6
+ psubusb m2, m3
+ paddusb m1, m3
+ paddusb m2, m6
+%endmacro
+
+; in: m1=p0 m2=q0
+; %1=p1 %2=q2 %3=[q2] %4=[q1] %5=tc0 %6=tmp
+; out: [q1] = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
+; clobbers: q2, tmp, tc0
+%macro LUMA_Q1 6
+ mova %6, m1
+ pavgb %6, m2
+ pavgb %2, %6 ; avg(p2,avg(p0,q0))
+ pxor %6, %3
+ pand %6, [pb_01 GLOBAL] ; (p2^avg(p0,q0))&1
+ psubusb %2, %6 ; (p2+((p0+q0+1)>>1))>>1
+ mova %6, %1
+ psubusb %6, %5
+ paddusb %5, %1
+ pmaxub %2, %6
+ pminub %2, %5
+ mova %4, %2
+%endmacro
+
+%ifdef ARCH_X86_64
+;-----------------------------------------------------------------------------
+; void x264_deblock_v_luma_sse2( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
+;-----------------------------------------------------------------------------
+INIT_XMM
+cglobal x264_deblock_v_luma_sse2, 5,5,10
+ movd m8, [r4] ; tc0
+ lea r4, [r1*3]
+ dec r2d ; alpha-1
+ neg r4
+ dec r3d ; beta-1
+ add r4, r0 ; pix-3*stride
+
+ mova m0, [r4+r1] ; p1
+ mova m1, [r4+2*r1] ; p0
+ mova m2, [r0] ; q0
+ mova m3, [r0+r1] ; q1
+ LOAD_MASK r2d, r3d
+
+ punpcklbw m8, m8
+ punpcklbw m8, m8 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
+ pcmpeqb m9, m9
+ pcmpeqb m9, m8
+ pandn m9, m7
+ pand m8, m9
+
+ movdqa m3, [r4] ; p2
+ DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
+ pand m6, m9
+ mova m7, m8
+ psubb m7, m6
+ pand m6, m8
+ LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
+
+ movdqa m4, [r0+2*r1] ; q2
+ DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
+ pand m6, m9
+ pand m8, m6
+ psubb m7, m6
+ mova m3, [r0+r1]
+ LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m8, m6
+
+ DEBLOCK_P0_Q0
+ mova [r4+2*r1], m1
+ mova [r0], m2
+ RET
+
+;-----------------------------------------------------------------------------
+; void x264_deblock_h_luma_sse2( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
+;-----------------------------------------------------------------------------
+INIT_MMX
+cglobal x264_deblock_h_luma_sse2, 5,7
+ movsxd r10, r1d
+ lea r11, [r10+r10*2]
+ lea r6, [r0-4]
+ lea r5, [r0-4+r11]
+%ifdef WIN64
+ sub rsp, 0x98
+ %define pix_tmp rsp+0x30
+%else
+ sub rsp, 0x68
+ %define pix_tmp rsp
+%endif
+
+ ; transpose 6x16 -> tmp space
+ TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r10, r11), pix_tmp
+ lea r6, [r6+r10*8]
+ lea r5, [r5+r10*8]
+ TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r10, r11), pix_tmp+8
+
+ ; vertical filter
+ ; alpha, beta, tc0 are still in r2d, r3d, r4
+ ; don't backup r6, r5, r10, r11 because x264_deblock_v_luma_sse2 doesn't use them
+ lea r0, [pix_tmp+0x30]
+ mov r1d, 0x10
+%ifdef WIN64
+ mov [rsp+0x20], r4
+%endif
+ call x264_deblock_v_luma_sse2
+
+ ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
+ add r6, 2
+ add r5, 2
+ movq m0, [pix_tmp+0x18]
+ movq m1, [pix_tmp+0x28]
+ movq m2, [pix_tmp+0x38]
+ movq m3, [pix_tmp+0x48]
+ TRANSPOSE8x4_STORE PASS8ROWS(r6, r5, r10, r11)
+
+ shl r10, 3
+ sub r6, r10
+ sub r5, r10
+ shr r10, 3
+ movq m0, [pix_tmp+0x10]
+ movq m1, [pix_tmp+0x20]
+ movq m2, [pix_tmp+0x30]
+ movq m3, [pix_tmp+0x40]
+ TRANSPOSE8x4_STORE PASS8ROWS(r6, r5, r10, r11)
+
+%ifdef WIN64
+ add rsp, 0x98
+%else
+ add rsp, 0x68
+%endif
+ RET
+
+%else
+
+%macro DEBLOCK_LUMA 3
+;-----------------------------------------------------------------------------
+; void x264_deblock_v8_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
+;-----------------------------------------------------------------------------
+cglobal x264_deblock_%2_luma_%1, 5,5
+ lea r4, [r1*3]
+ dec r2 ; alpha-1
+ neg r4
+ dec r3 ; beta-1
+ add r4, r0 ; pix-3*stride
+ %assign pad 2*%3+12-(stack_offset&15)
+ SUB esp, pad
+
+ mova m0, [r4+r1] ; p1
+ mova m1, [r4+2*r1] ; p0
+ mova m2, [r0] ; q0
+ mova m3, [r0+r1] ; q1
+ LOAD_MASK r2, r3
+
+ mov r3, r4mp
+ movd m4, [r3] ; tc0
+ punpcklbw m4, m4
+ punpcklbw m4, m4 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
+ mova [esp+%3], m4 ; tc
+ pcmpeqb m3, m3
+ pcmpgtb m4, m3
+ pand m4, m7
+ mova [esp], m4 ; mask
+
+ mova m3, [r4] ; p2
+ DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
+ pand m6, m4
+ pand m4, [esp+%3] ; tc
+ mova m7, m4
+ psubb m7, m6
+ pand m6, m4
+ LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
+
+ mova m4, [r0+2*r1] ; q2
+ DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
+ mova m5, [esp] ; mask
+ pand m6, m5
+ mova m5, [esp+%3] ; tc
+ pand m5, m6
+ psubb m7, m6
+ mova m3, [r0+r1]
+ LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m5, m6
+
+ DEBLOCK_P0_Q0
+ mova [r4+2*r1], m1
+ mova [r0], m2
+ ADD esp, pad
+ RET
+
+;-----------------------------------------------------------------------------
+; void x264_deblock_h_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
+;-----------------------------------------------------------------------------
+INIT_MMX
+cglobal x264_deblock_h_luma_%1, 0,5
+ mov r0, r0mp
+ mov r3, r1m
+ lea r4, [r3*3]
+ sub r0, 4
+ lea r1, [r0+r4]
+ %assign pad 0x78-(stack_offset&15)
+ SUB esp, pad
+%define pix_tmp esp+12
+
+ ; transpose 6x16 -> tmp space
+ TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp
+ lea r0, [r0+r3*8]
+ lea r1, [r1+r3*8]
+ TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp+8
+
+ ; vertical filter
+ lea r0, [pix_tmp+0x30]
+ PUSH dword r4m
+ PUSH dword r3m
+ PUSH dword r2m
+ PUSH dword 16
+ PUSH dword r0
+ call x264_deblock_%2_luma_%1
+%ifidn %2, v8
+ add dword [esp ], 8 ; pix_tmp+0x38
+ add dword [esp+16], 2 ; tc0+2
+ call x264_deblock_%2_luma_%1
+%endif
+ ADD esp, 20
+
+ ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
+ mov r0, r0mp
+ sub r0, 2
+ lea r1, [r0+r4]
+
+ movq m0, [pix_tmp+0x10]
+ movq m1, [pix_tmp+0x20]
+ movq m2, [pix_tmp+0x30]
+ movq m3, [pix_tmp+0x40]
+ TRANSPOSE8x4_STORE PASS8ROWS(r0, r1, r3, r4)
+
+ lea r0, [r0+r3*8]
+ lea r1, [r1+r3*8]
+ movq m0, [pix_tmp+0x18]
+ movq m1, [pix_tmp+0x28]
+ movq m2, [pix_tmp+0x38]
+ movq m3, [pix_tmp+0x48]
+ TRANSPOSE8x4_STORE PASS8ROWS(r0, r1, r3, r4)
+
+ ADD esp, pad
+ RET
+%endmacro ; DEBLOCK_LUMA
+
+INIT_XMM
+DEBLOCK_LUMA sse2, v, 16
+
+%endif ; ARCH
+
+
+
+%macro LUMA_INTRA_P012 4 ; p0..p3 in memory
+ mova t0, p2
+ mova t1, p0
+ pavgb t0, p1
+ pavgb t1, q0
+ pavgb t0, t1 ; ((p2+p1+1)/2 + (p0+q0+1)/2 + 1)/2
+ mova t5, t1
+ mova t2, p2
+ mova t3, p0
+ paddb t2, p1
+ paddb t3, q0
+ paddb t2, t3
+ mova t3, t2
+ mova t4, t2
+ psrlw t2, 1
+ pavgb t2, mpb_00
+ pxor t2, t0
+ pand t2, mpb_01
+ psubb t0, t2 ; p1' = (p2+p1+p0+q0+2)/4;
+
+ mova t1, p2
+ mova t2, p2
+ pavgb t1, q1
+ psubb t2, q1
+ paddb t3, t3
+ psubb t3, t2 ; p2+2*p1+2*p0+2*q0+q1
+ pand t2, mpb_01
+ psubb t1, t2
+ pavgb t1, p1
+ pavgb t1, t5 ; (((p2+q1)/2 + p1+1)/2 + (p0+q0+1)/2 + 1)/2
+ psrlw t3, 2
+ pavgb t3, mpb_00
+ pxor t3, t1
+ pand t3, mpb_01
+ psubb t1, t3 ; p0'a = (p2+2*p1+2*p0+2*q0+q1+4)/8
+
+ mova t3, p0
+ mova t2, p0
+ pxor t3, q1
+ pavgb t2, q1
+ pand t3, mpb_01
+ psubb t2, t3
+ pavgb t2, p1 ; p0'b = (2*p1+p0+q0+2)/4
+
+ pxor t1, t2
+ pxor t2, p0
+ pand t1, mask1p
+ pand t2, mask0
+ pxor t1, t2
+ pxor t1, p0
+ mova %1, t1 ; store p0
+
+ mova t1, %4 ; p3
+ mova t2, t1
+ pavgb t1, p2
+ paddb t2, p2
+ pavgb t1, t0 ; (p3+p2+1)/2 + (p2+p1+p0+q0+2)/4
+ paddb t2, t2
+ paddb t2, t4 ; 2*p3+3*p2+p1+p0+q0
+ psrlw t2, 2
+ pavgb t2, mpb_00
+ pxor t2, t1
+ pand t2, mpb_01
+ psubb t1, t2 ; p2' = (2*p3+3*p2+p1+p0+q0+4)/8
+
+ pxor t0, p1
+ pxor t1, p2
+ pand t0, mask1p
+ pand t1, mask1p
+ pxor t0, p1
+ pxor t1, p2
+ mova %2, t0 ; store p1
+ mova %3, t1 ; store p2
+%endmacro
+
+%macro LUMA_INTRA_SWAP_PQ 0
+ %define q1 m0
+ %define q0 m1
+ %define p0 m2
+ %define p1 m3
+ %define p2 q2
+ %define mask1p mask1q
+%endmacro
+
+%macro DEBLOCK_LUMA_INTRA 2
+ %define p1 m0
+ %define p0 m1
+ %define q0 m2
+ %define q1 m3
+ %define t0 m4
+ %define t1 m5
+ %define t2 m6
+ %define t3 m7
+%ifdef ARCH_X86_64
+ %define p2 m8
+ %define q2 m9
+ %define t4 m10
+ %define t5 m11
+ %define mask0 m12
+ %define mask1p m13
+ %define mask1q [rsp-24]
+ %define mpb_00 m14
+ %define mpb_01 m15
+%else
+ %define spill(x) [esp+16*x+((stack_offset+4)&15)]
+ %define p2 [r4+r1]
+ %define q2 [r0+2*r1]
+ %define t4 spill(0)
+ %define t5 spill(1)
+ %define mask0 spill(2)
+ %define mask1p spill(3)
+ %define mask1q spill(4)
+ %define mpb_00 [pb_00 GLOBAL]
+ %define mpb_01 [pb_01 GLOBAL]
+%endif
+
+;-----------------------------------------------------------------------------
+; void x264_deblock_v_luma_intra_sse2( uint8_t *pix, int stride, int alpha, int beta )
+;-----------------------------------------------------------------------------
+cglobal x264_deblock_%2_luma_intra_%1, 4,6,16
+%ifndef ARCH_X86_64
+ sub esp, 0x60
+%endif
+ lea r4, [r1*4]
+ lea r5, [r1*3] ; 3*stride
+ dec r2d ; alpha-1
+ jl .end
+ neg r4
+ dec r3d ; beta-1
+ jl .end
+ add r4, r0 ; pix-4*stride
+ mova p1, [r4+2*r1]
+ mova p0, [r4+r5]
+ mova q0, [r0]
+ mova q1, [r0+r1]
+%ifdef ARCH_X86_64
+ pxor mpb_00, mpb_00
+ mova mpb_01, [pb_01 GLOBAL]
+ LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
+ SWAP 7, 12 ; m12=mask0
+ pavgb t5, mpb_00
+ pavgb t5, mpb_01 ; alpha/4+1
+ movdqa p2, [r4+r1]
+ movdqa q2, [r0+2*r1]
+ DIFF_GT2 p0, q0, t5, t0, t3 ; t0 = |p0-q0| > alpha/4+1
+ DIFF_GT2 p0, p2, m5, t2, t5 ; mask1 = |p2-p0| > beta-1
+ DIFF_GT2 q0, q2, m5, t4, t5 ; t4 = |q2-q0| > beta-1
+ pand t0, mask0
+ pand t4, t0
+ pand t2, t0
+ mova mask1q, t4
+ mova mask1p, t2
+%else
+ LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
+ mova m4, t5
+ mova mask0, m7
+ pavgb m4, [pb_00 GLOBAL]
+ pavgb m4, [pb_01 GLOBAL] ; alpha/4+1
+ DIFF_GT2 p0, q0, m4, m6, m7 ; m6 = |p0-q0| > alpha/4+1
+ pand m6, mask0
+ DIFF_GT2 p0, p2, m5, m4, m7 ; m4 = |p2-p0| > beta-1
+ pand m4, m6
+ mova mask1p, m4
+ DIFF_GT2 q0, q2, m5, m4, m7 ; m4 = |q2-q0| > beta-1
+ pand m4, m6
+ mova mask1q, m4
+%endif
+ LUMA_INTRA_P012 [r4+r5], [r4+2*r1], [r4+r1], [r4]
+ LUMA_INTRA_SWAP_PQ
+ LUMA_INTRA_P012 [r0], [r0+r1], [r0+2*r1], [r0+r5]
+.end:
+%ifndef ARCH_X86_64
+ add esp, 0x60
+%endif
+ RET
+
+INIT_MMX
+%ifdef ARCH_X86_64
+;-----------------------------------------------------------------------------
+; void x264_deblock_h_luma_intra_sse2( uint8_t *pix, int stride, int alpha, int beta )
+;-----------------------------------------------------------------------------
+cglobal x264_deblock_h_luma_intra_%1, 4,7
+ movsxd r10, r1d
+ lea r11, [r10*3]
+ lea r6, [r0-4]
+ lea r5, [r0-4+r11]
+ sub rsp, 0x88
+ %define pix_tmp rsp
+
+ ; transpose 8x16 -> tmp space
+ TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r10, r11), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
+ lea r6, [r6+r10*8]
+ lea r5, [r5+r10*8]
+ TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r10, r11), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
+
+ lea r0, [pix_tmp+0x40]
+ mov r1, 0x10
+ call x264_deblock_v_luma_intra_%1
+
+ ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
+ lea r5, [r6+r11]
+ TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r6, r5, r10, r11)
+ shl r10, 3
+ sub r6, r10
+ sub r5, r10
+ shr r10, 3
+ TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r6, r5, r10, r11)
+ add rsp, 0x88
+ RET
+%else
+cglobal x264_deblock_h_luma_intra_%1, 2,4
+ lea r3, [r1*3]
+ sub r0, 4
+ lea r2, [r0+r3]
+%assign pad 0x8c-(stack_offset&15)
+ SUB rsp, pad
+ %define pix_tmp rsp
+
+ ; transpose 8x16 -> tmp space
+ TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
+ lea r0, [r0+r1*8]
+ lea r2, [r2+r1*8]
+ TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
+
+ lea r0, [pix_tmp+0x40]
+ PUSH dword r3m
+ PUSH dword r2m
+ PUSH dword 16
+ PUSH r0
+ call x264_deblock_%2_luma_intra_%1
+%ifidn %2, v8
+ add dword [rsp], 8 ; pix_tmp+8
+ call x264_deblock_%2_luma_intra_%1
+%endif
+ ADD esp, 16
+
+ mov r1, r1m
+ mov r0, r0mp
+ lea r3, [r1*3]
+ sub r0, 4
+ lea r2, [r0+r3]
+ ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
+ TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
+ lea r0, [r0+r1*8]
+ lea r2, [r2+r1*8]
+ TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
+ ADD rsp, pad
+ RET
+%endif ; ARCH_X86_64
+%endmacro ; DEBLOCK_LUMA_INTRA
+
+INIT_XMM
+DEBLOCK_LUMA_INTRA sse2, v
+%ifndef ARCH_X86_64
+INIT_MMX
+DEBLOCK_LUMA_INTRA mmxext, v8
+%endif
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264_i386.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264_i386.h
new file mode 100644
index 000000000..d16e633f2
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264_i386.h
@@ -0,0 +1,153 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file h264_i386.h
+ * H.264 / AVC / MPEG4 part10 codec.
+ * non-MMX i386-specific optimizations for H.264
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVCODEC_X86_H264_I386_H
+#define AVCODEC_X86_H264_I386_H
+
+#include "libavcodec/cabac.h"
+
+//FIXME use some macros to avoid duplicating get_cabac (cannot be done yet
+//as that would make optimization work hard)
+#if ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS)
+static int decode_significance_x86(CABACContext *c, int max_coeff,
+ uint8_t *significant_coeff_ctx_base,
+ int *index){
+ void *end= significant_coeff_ctx_base + max_coeff - 1;
+ int minusstart= -(int)significant_coeff_ctx_base;
+ int minusindex= 4-(int)index;
+ int coeff_count;
+ __asm__ volatile(
+ "movl "RANGE "(%3), %%esi \n\t"
+ "movl "LOW "(%3), %%ebx \n\t"
+
+ "2: \n\t"
+
+ BRANCHLESS_GET_CABAC("%%edx", "%3", "(%1)", "%%ebx",
+ "%%bx", "%%esi", "%%eax", "%%al")
+
+ "test $1, %%edx \n\t"
+ " jz 3f \n\t"
+
+ BRANCHLESS_GET_CABAC("%%edx", "%3", "61(%1)", "%%ebx",
+ "%%bx", "%%esi", "%%eax", "%%al")
+
+ "mov %2, %%"REG_a" \n\t"
+ "movl %4, %%ecx \n\t"
+ "add %1, %%"REG_c" \n\t"
+ "movl %%ecx, (%%"REG_a") \n\t"
+
+ "test $1, %%edx \n\t"
+ " jnz 4f \n\t"
+
+ "add $4, %%"REG_a" \n\t"
+ "mov %%"REG_a", %2 \n\t"
+
+ "3: \n\t"
+ "add $1, %1 \n\t"
+ "cmp %5, %1 \n\t"
+ " jb 2b \n\t"
+ "mov %2, %%"REG_a" \n\t"
+ "movl %4, %%ecx \n\t"
+ "add %1, %%"REG_c" \n\t"
+ "movl %%ecx, (%%"REG_a") \n\t"
+ "4: \n\t"
+ "add %6, %%eax \n\t"
+ "shr $2, %%eax \n\t"
+
+ "movl %%esi, "RANGE "(%3) \n\t"
+ "movl %%ebx, "LOW "(%3) \n\t"
+ :"=&a"(coeff_count), "+r"(significant_coeff_ctx_base), "+m"(index)
+ :"r"(c), "m"(minusstart), "m"(end), "m"(minusindex)
+ : "%"REG_c, "%ebx", "%edx", "%esi", "memory"
+ );
+ return coeff_count;
+}
+
+static int decode_significance_8x8_x86(CABACContext *c,
+ uint8_t *significant_coeff_ctx_base,
+ int *index, const uint8_t *sig_off){
+ int minusindex= 4-(int)index;
+ int coeff_count;
+ x86_reg last=0;
+ __asm__ volatile(
+ "movl "RANGE "(%3), %%esi \n\t"
+ "movl "LOW "(%3), %%ebx \n\t"
+
+ "mov %1, %%"REG_D" \n\t"
+ "2: \n\t"
+
+ "mov %6, %%"REG_a" \n\t"
+ "movzbl (%%"REG_a", %%"REG_D"), %%edi \n\t"
+ "add %5, %%"REG_D" \n\t"
+
+ BRANCHLESS_GET_CABAC("%%edx", "%3", "(%%"REG_D")", "%%ebx",
+ "%%bx", "%%esi", "%%eax", "%%al")
+
+ "mov %1, %%edi \n\t"
+ "test $1, %%edx \n\t"
+ " jz 3f \n\t"
+
+ "movzbl "MANGLE(last_coeff_flag_offset_8x8)"(%%edi), %%edi\n\t"
+ "add %5, %%"REG_D" \n\t"
+
+ BRANCHLESS_GET_CABAC("%%edx", "%3", "15(%%"REG_D")", "%%ebx",
+ "%%bx", "%%esi", "%%eax", "%%al")
+
+ "mov %2, %%"REG_a" \n\t"
+ "mov %1, %%edi \n\t"
+ "movl %%edi, (%%"REG_a") \n\t"
+
+ "test $1, %%edx \n\t"
+ " jnz 4f \n\t"
+
+ "add $4, %%"REG_a" \n\t"
+ "mov %%"REG_a", %2 \n\t"
+
+ "3: \n\t"
+ "addl $1, %%edi \n\t"
+ "mov %%edi, %1 \n\t"
+ "cmpl $63, %%edi \n\t"
+ " jb 2b \n\t"
+ "mov %2, %%"REG_a" \n\t"
+ "movl %%edi, (%%"REG_a") \n\t"
+ "4: \n\t"
+ "addl %4, %%eax \n\t"
+ "shr $2, %%eax \n\t"
+
+ "movl %%esi, "RANGE "(%3) \n\t"
+ "movl %%ebx, "LOW "(%3) \n\t"
+ :"=&a"(coeff_count),"+m"(last), "+m"(index)
+ :"r"(c), "m"(minusindex), "m"(significant_coeff_ctx_base), "m"(sig_off)
+ : "%"REG_c, "%ebx", "%edx", "%esi", "%"REG_D, "memory"
+ );
+ return coeff_count;
+}
+#endif /* ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE */
+ /* !defined(BROKEN_RELOCATIONS) */
+
+#endif /* AVCODEC_X86_H264_I386_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264_idct_sse2.asm b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264_idct_sse2.asm
new file mode 100644
index 000000000..f8ee2b628
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264_idct_sse2.asm
@@ -0,0 +1,54 @@
+;*****************************************************************************
+;* SSE2-optimized H.264 iDCT
+;*****************************************************************************
+;* Copyright (C) 2003-2008 x264 project
+;*
+;* Authors: Laurent Aimar <fenrir@via.ecp.fr>
+;* Loren Merritt <lorenm@u.washington.edu>
+;* Holger Lubitz <hal@duncan.ol.sub.de>
+;* Min Chen <chenm001.163.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
+;*****************************************************************************
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA
+pw_32: times 8 dw 32
+
+SECTION .text
+
+INIT_XMM
+cglobal x264_add8x4_idct_sse2, 3,3,8
+ movq m0, [r1+ 0]
+ movq m1, [r1+ 8]
+ movq m2, [r1+16]
+ movq m3, [r1+24]
+ movhps m0, [r1+32]
+ movhps m1, [r1+40]
+ movhps m2, [r1+48]
+ movhps m3, [r1+56]
+ IDCT4_1D 0,1,2,3,4,5
+ TRANSPOSE2x4x4W 0,1,2,3,4
+ paddw m0, [pw_32 GLOBAL]
+ IDCT4_1D 0,1,2,3,4,5
+ pxor m7, m7
+ STORE_DIFF m0, m4, m7, [r0]
+ STORE_DIFF m1, m4, m7, [r0+r2]
+ lea r0, [r0+r2*2]
+ STORE_DIFF m2, m4, m7, [r0]
+ STORE_DIFF m3, m4, m7, [r0+r2]
+ RET
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264dsp_mmx.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264dsp_mmx.c
new file mode 100644
index 000000000..cfe3429aa
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/h264dsp_mmx.c
@@ -0,0 +1,2325 @@
+/*
+ * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dsputil_mmx.h"
+
+DECLARE_ALIGNED_8 (static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
+DECLARE_ALIGNED_8 (static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL;
+
+/***********************************/
+/* IDCT */
+
+#define SUMSUB_BADC( a, b, c, d ) \
+ "paddw "#b", "#a" \n\t"\
+ "paddw "#d", "#c" \n\t"\
+ "paddw "#b", "#b" \n\t"\
+ "paddw "#d", "#d" \n\t"\
+ "psubw "#a", "#b" \n\t"\
+ "psubw "#c", "#d" \n\t"
+
+#define SUMSUBD2_AB( a, b, t ) \
+ "movq "#b", "#t" \n\t"\
+ "psraw $1 , "#b" \n\t"\
+ "paddw "#a", "#b" \n\t"\
+ "psraw $1 , "#a" \n\t"\
+ "psubw "#t", "#a" \n\t"
+
+#define IDCT4_1D( s02, s13, d02, d13, t ) \
+ SUMSUB_BA ( s02, d02 )\
+ SUMSUBD2_AB( s13, d13, t )\
+ SUMSUB_BADC( d13, s02, s13, d02 )
+
+#define STORE_DIFF_4P( p, t, z ) \
+ "psraw $6, "#p" \n\t"\
+ "movd (%0), "#t" \n\t"\
+ "punpcklbw "#z", "#t" \n\t"\
+ "paddsw "#t", "#p" \n\t"\
+ "packuswb "#z", "#p" \n\t"\
+ "movd "#p", (%0) \n\t"
+
+static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
+{
+ /* Load dct coeffs */
+ __asm__ volatile(
+ "movq (%0), %%mm0 \n\t"
+ "movq 8(%0), %%mm1 \n\t"
+ "movq 16(%0), %%mm2 \n\t"
+ "movq 24(%0), %%mm3 \n\t"
+ :: "r"(block) );
+
+ __asm__ volatile(
+ /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */
+ IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
+
+ "movq %0, %%mm6 \n\t"
+ /* in: 1,4,0,2 out: 1,2,3,0 */
+ TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
+
+ "paddw %%mm6, %%mm3 \n\t"
+
+ /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */
+ IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
+
+ "pxor %%mm7, %%mm7 \n\t"
+ :: "m"(ff_pw_32));
+
+ __asm__ volatile(
+ STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
+ "add %1, %0 \n\t"
+ STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
+ "add %1, %0 \n\t"
+ STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
+ "add %1, %0 \n\t"
+ STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
+ : "+r"(dst)
+ : "r" ((x86_reg)stride)
+ );
+}
+
+static inline void h264_idct8_1d(int16_t *block)
+{
+ __asm__ volatile(
+ "movq 112(%0), %%mm7 \n\t"
+ "movq 80(%0), %%mm0 \n\t"
+ "movq 48(%0), %%mm3 \n\t"
+ "movq 16(%0), %%mm5 \n\t"
+
+ "movq %%mm0, %%mm4 \n\t"
+ "movq %%mm5, %%mm1 \n\t"
+ "psraw $1, %%mm4 \n\t"
+ "psraw $1, %%mm1 \n\t"
+ "paddw %%mm0, %%mm4 \n\t"
+ "paddw %%mm5, %%mm1 \n\t"
+ "paddw %%mm7, %%mm4 \n\t"
+ "paddw %%mm0, %%mm1 \n\t"
+ "psubw %%mm5, %%mm4 \n\t"
+ "paddw %%mm3, %%mm1 \n\t"
+
+ "psubw %%mm3, %%mm5 \n\t"
+ "psubw %%mm3, %%mm0 \n\t"
+ "paddw %%mm7, %%mm5 \n\t"
+ "psubw %%mm7, %%mm0 \n\t"
+ "psraw $1, %%mm3 \n\t"
+ "psraw $1, %%mm7 \n\t"
+ "psubw %%mm3, %%mm5 \n\t"
+ "psubw %%mm7, %%mm0 \n\t"
+
+ "movq %%mm4, %%mm3 \n\t"
+ "movq %%mm1, %%mm7 \n\t"
+ "psraw $2, %%mm1 \n\t"
+ "psraw $2, %%mm3 \n\t"
+ "paddw %%mm5, %%mm3 \n\t"
+ "psraw $2, %%mm5 \n\t"
+ "paddw %%mm0, %%mm1 \n\t"
+ "psraw $2, %%mm0 \n\t"
+ "psubw %%mm4, %%mm5 \n\t"
+ "psubw %%mm0, %%mm7 \n\t"
+
+ "movq 32(%0), %%mm2 \n\t"
+ "movq 96(%0), %%mm6 \n\t"
+ "movq %%mm2, %%mm4 \n\t"
+ "movq %%mm6, %%mm0 \n\t"
+ "psraw $1, %%mm4 \n\t"
+ "psraw $1, %%mm6 \n\t"
+ "psubw %%mm0, %%mm4 \n\t"
+ "paddw %%mm2, %%mm6 \n\t"
+
+ "movq (%0), %%mm2 \n\t"
+ "movq 64(%0), %%mm0 \n\t"
+ SUMSUB_BA( %%mm0, %%mm2 )
+ SUMSUB_BA( %%mm6, %%mm0 )
+ SUMSUB_BA( %%mm4, %%mm2 )
+ SUMSUB_BA( %%mm7, %%mm6 )
+ SUMSUB_BA( %%mm5, %%mm4 )
+ SUMSUB_BA( %%mm3, %%mm2 )
+ SUMSUB_BA( %%mm1, %%mm0 )
+ :: "r"(block)
+ );
+}
+
+static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
+{
+ int i;
+ DECLARE_ALIGNED_8(int16_t, b2)[64];
+
+ block[0] += 32;
+
+ for(i=0; i<2; i++){
+ DECLARE_ALIGNED_8(uint64_t, tmp);
+
+ h264_idct8_1d(block+4*i);
+
+ __asm__ volatile(
+ "movq %%mm7, %0 \n\t"
+ TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
+ "movq %%mm0, 8(%1) \n\t"
+ "movq %%mm6, 24(%1) \n\t"
+ "movq %%mm7, 40(%1) \n\t"
+ "movq %%mm4, 56(%1) \n\t"
+ "movq %0, %%mm7 \n\t"
+ TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
+ "movq %%mm7, (%1) \n\t"
+ "movq %%mm1, 16(%1) \n\t"
+ "movq %%mm0, 32(%1) \n\t"
+ "movq %%mm3, 48(%1) \n\t"
+ : "=m"(tmp)
+ : "r"(b2+32*i)
+ : "memory"
+ );
+ }
+
+ for(i=0; i<2; i++){
+ h264_idct8_1d(b2+4*i);
+
+ __asm__ volatile(
+ "psraw $6, %%mm7 \n\t"
+ "psraw $6, %%mm6 \n\t"
+ "psraw $6, %%mm5 \n\t"
+ "psraw $6, %%mm4 \n\t"
+ "psraw $6, %%mm3 \n\t"
+ "psraw $6, %%mm2 \n\t"
+ "psraw $6, %%mm1 \n\t"
+ "psraw $6, %%mm0 \n\t"
+
+ "movq %%mm7, (%0) \n\t"
+ "movq %%mm5, 16(%0) \n\t"
+ "movq %%mm3, 32(%0) \n\t"
+ "movq %%mm1, 48(%0) \n\t"
+ "movq %%mm0, 64(%0) \n\t"
+ "movq %%mm2, 80(%0) \n\t"
+ "movq %%mm4, 96(%0) \n\t"
+ "movq %%mm6, 112(%0) \n\t"
+ :: "r"(b2+4*i)
+ : "memory"
+ );
+ }
+
+ add_pixels_clamped_mmx(b2, dst, stride);
+}
+
+#define STORE_DIFF_8P( p, d, t, z )\
+ "movq "#d", "#t" \n"\
+ "psraw $6, "#p" \n"\
+ "punpcklbw "#z", "#t" \n"\
+ "paddsw "#t", "#p" \n"\
+ "packuswb "#p", "#p" \n"\
+ "movq "#p", "#d" \n"
+
+#define H264_IDCT8_1D_SSE2(a,b,c,d,e,f,g,h)\
+ "movdqa "#c", "#a" \n"\
+ "movdqa "#g", "#e" \n"\
+ "psraw $1, "#c" \n"\
+ "psraw $1, "#g" \n"\
+ "psubw "#e", "#c" \n"\
+ "paddw "#a", "#g" \n"\
+ "movdqa "#b", "#e" \n"\
+ "psraw $1, "#e" \n"\
+ "paddw "#b", "#e" \n"\
+ "paddw "#d", "#e" \n"\
+ "paddw "#f", "#e" \n"\
+ "movdqa "#f", "#a" \n"\
+ "psraw $1, "#a" \n"\
+ "paddw "#f", "#a" \n"\
+ "paddw "#h", "#a" \n"\
+ "psubw "#b", "#a" \n"\
+ "psubw "#d", "#b" \n"\
+ "psubw "#d", "#f" \n"\
+ "paddw "#h", "#b" \n"\
+ "psubw "#h", "#f" \n"\
+ "psraw $1, "#d" \n"\
+ "psraw $1, "#h" \n"\
+ "psubw "#d", "#b" \n"\
+ "psubw "#h", "#f" \n"\
+ "movdqa "#e", "#d" \n"\
+ "movdqa "#a", "#h" \n"\
+ "psraw $2, "#d" \n"\
+ "psraw $2, "#h" \n"\
+ "paddw "#f", "#d" \n"\
+ "paddw "#b", "#h" \n"\
+ "psraw $2, "#f" \n"\
+ "psraw $2, "#b" \n"\
+ "psubw "#f", "#e" \n"\
+ "psubw "#a", "#b" \n"\
+ "movdqa 0x00(%1), "#a" \n"\
+ "movdqa 0x40(%1), "#f" \n"\
+ SUMSUB_BA(f, a)\
+ SUMSUB_BA(g, f)\
+ SUMSUB_BA(c, a)\
+ SUMSUB_BA(e, g)\
+ SUMSUB_BA(b, c)\
+ SUMSUB_BA(h, a)\
+ SUMSUB_BA(d, f)
+
+static void ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride)
+{
+ __asm__ volatile(
+ "movdqa 0x10(%1), %%xmm1 \n"
+ "movdqa 0x20(%1), %%xmm2 \n"
+ "movdqa 0x30(%1), %%xmm3 \n"
+ "movdqa 0x50(%1), %%xmm5 \n"
+ "movdqa 0x60(%1), %%xmm6 \n"
+ "movdqa 0x70(%1), %%xmm7 \n"
+ H264_IDCT8_1D_SSE2(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)
+ TRANSPOSE8(%%xmm4, %%xmm1, %%xmm7, %%xmm3, %%xmm5, %%xmm0, %%xmm2, %%xmm6, (%1))
+ "paddw %4, %%xmm4 \n"
+ "movdqa %%xmm4, 0x00(%1) \n"
+ "movdqa %%xmm2, 0x40(%1) \n"
+ H264_IDCT8_1D_SSE2(%%xmm4, %%xmm0, %%xmm6, %%xmm3, %%xmm2, %%xmm5, %%xmm7, %%xmm1)
+ "movdqa %%xmm6, 0x60(%1) \n"
+ "movdqa %%xmm7, 0x70(%1) \n"
+ "pxor %%xmm7, %%xmm7 \n"
+ STORE_DIFF_8P(%%xmm2, (%0), %%xmm6, %%xmm7)
+ STORE_DIFF_8P(%%xmm0, (%0,%2), %%xmm6, %%xmm7)
+ STORE_DIFF_8P(%%xmm1, (%0,%2,2), %%xmm6, %%xmm7)
+ STORE_DIFF_8P(%%xmm3, (%0,%3), %%xmm6, %%xmm7)
+ "lea (%0,%2,4), %0 \n"
+ STORE_DIFF_8P(%%xmm5, (%0), %%xmm6, %%xmm7)
+ STORE_DIFF_8P(%%xmm4, (%0,%2), %%xmm6, %%xmm7)
+ "movdqa 0x60(%1), %%xmm0 \n"
+ "movdqa 0x70(%1), %%xmm1 \n"
+ STORE_DIFF_8P(%%xmm0, (%0,%2,2), %%xmm6, %%xmm7)
+ STORE_DIFF_8P(%%xmm1, (%0,%3), %%xmm6, %%xmm7)
+ :"+r"(dst)
+ :"r"(block), "r"((x86_reg)stride), "r"((x86_reg)3L*stride), "m"(ff_pw_32)
+ );
+}
+
+static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
+{
+ int dc = (block[0] + 32) >> 6;
+ __asm__ volatile(
+ "movd %0, %%mm0 \n\t"
+ "pshufw $0, %%mm0, %%mm0 \n\t"
+ "pxor %%mm1, %%mm1 \n\t"
+ "psubw %%mm0, %%mm1 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ ::"r"(dc)
+ );
+ __asm__ volatile(
+ "movd %0, %%mm2 \n\t"
+ "movd %1, %%mm3 \n\t"
+ "movd %2, %%mm4 \n\t"
+ "movd %3, %%mm5 \n\t"
+ "paddusb %%mm0, %%mm2 \n\t"
+ "paddusb %%mm0, %%mm3 \n\t"
+ "paddusb %%mm0, %%mm4 \n\t"
+ "paddusb %%mm0, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "psubusb %%mm1, %%mm3 \n\t"
+ "psubusb %%mm1, %%mm4 \n\t"
+ "psubusb %%mm1, %%mm5 \n\t"
+ "movd %%mm2, %0 \n\t"
+ "movd %%mm3, %1 \n\t"
+ "movd %%mm4, %2 \n\t"
+ "movd %%mm5, %3 \n\t"
+ :"+m"(*(uint32_t*)(dst+0*stride)),
+ "+m"(*(uint32_t*)(dst+1*stride)),
+ "+m"(*(uint32_t*)(dst+2*stride)),
+ "+m"(*(uint32_t*)(dst+3*stride))
+ );
+}
+
+static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
+{
+ int dc = (block[0] + 32) >> 6;
+ int y;
+ __asm__ volatile(
+ "movd %0, %%mm0 \n\t"
+ "pshufw $0, %%mm0, %%mm0 \n\t"
+ "pxor %%mm1, %%mm1 \n\t"
+ "psubw %%mm0, %%mm1 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ ::"r"(dc)
+ );
+ for(y=2; y--; dst += 4*stride){
+ __asm__ volatile(
+ "movq %0, %%mm2 \n\t"
+ "movq %1, %%mm3 \n\t"
+ "movq %2, %%mm4 \n\t"
+ "movq %3, %%mm5 \n\t"
+ "paddusb %%mm0, %%mm2 \n\t"
+ "paddusb %%mm0, %%mm3 \n\t"
+ "paddusb %%mm0, %%mm4 \n\t"
+ "paddusb %%mm0, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "psubusb %%mm1, %%mm3 \n\t"
+ "psubusb %%mm1, %%mm4 \n\t"
+ "psubusb %%mm1, %%mm5 \n\t"
+ "movq %%mm2, %0 \n\t"
+ "movq %%mm3, %1 \n\t"
+ "movq %%mm4, %2 \n\t"
+ "movq %%mm5, %3 \n\t"
+ :"+m"(*(uint64_t*)(dst+0*stride)),
+ "+m"(*(uint64_t*)(dst+1*stride)),
+ "+m"(*(uint64_t*)(dst+2*stride)),
+ "+m"(*(uint64_t*)(dst+3*stride))
+ );
+ }
+}
+
+//FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split
+static const uint8_t scan8[16 + 2*4]={
+ 4+1*8, 5+1*8, 4+2*8, 5+2*8,
+ 6+1*8, 7+1*8, 6+2*8, 7+2*8,
+ 4+3*8, 5+3*8, 4+4*8, 5+4*8,
+ 6+3*8, 7+3*8, 6+4*8, 7+4*8,
+ 1+1*8, 2+1*8,
+ 1+2*8, 2+2*8,
+ 1+4*8, 2+4*8,
+ 1+5*8, 2+5*8,
+};
+
+static void ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i++){
+ if(nnzc[ scan8[i] ])
+ ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride);
+ }
+}
+
+static void ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i+=4){
+ if(nnzc[ scan8[i] ])
+ ff_h264_idct8_add_mmx(dst + block_offset[i], block + i*16, stride);
+ }
+}
+
+
+static void ff_h264_idct_add16_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i++){
+ int nnz = nnzc[ scan8[i] ];
+ if(nnz){
+ if(nnz==1 && block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride);
+ else ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride);
+ }
+ }
+}
+
+static void ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i++){
+ if(nnzc[ scan8[i] ] || block[i*16])
+ ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride);
+ }
+}
+
+static void ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i++){
+ if(nnzc[ scan8[i] ]) ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride);
+ else if(block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride);
+ }
+}
+
+static void ff_h264_idct8_add4_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i+=4){
+ int nnz = nnzc[ scan8[i] ];
+ if(nnz){
+ if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_mmx2(dst + block_offset[i], block + i*16, stride);
+ else ff_h264_idct8_add_mmx (dst + block_offset[i], block + i*16, stride);
+ }
+ }
+}
+
+static void ff_h264_idct8_add4_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i+=4){
+ int nnz = nnzc[ scan8[i] ];
+ if(nnz){
+ if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_mmx2(dst + block_offset[i], block + i*16, stride);
+ else ff_h264_idct8_add_sse2 (dst + block_offset[i], block + i*16, stride);
+ }
+ }
+}
+
+static void ff_h264_idct_add8_mmx(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=16; i<16+8; i++){
+ if(nnzc[ scan8[i] ] || block[i*16])
+ ff_h264_idct_add_mmx (dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
+ }
+}
+
+static void ff_h264_idct_add8_mmx2(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=16; i<16+8; i++){
+ if(nnzc[ scan8[i] ])
+ ff_h264_idct_add_mmx (dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
+ else if(block[i*16])
+ ff_h264_idct_dc_add_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
+ }
+}
+
+#if CONFIG_GPL && HAVE_YASM
+static void ff_h264_idct_dc_add8_mmx2(uint8_t *dst, int16_t *block, int stride)
+{
+ __asm__ volatile(
+ "movd %0, %%mm0 \n\t" // 0 0 X D
+ "punpcklwd %1, %%mm0 \n\t" // x X d D
+ "paddsw %2, %%mm0 \n\t"
+ "psraw $6, %%mm0 \n\t"
+ "punpcklwd %%mm0, %%mm0 \n\t" // d d D D
+ "pxor %%mm1, %%mm1 \n\t" // 0 0 0 0
+ "psubw %%mm0, %%mm1 \n\t" // -d-d-D-D
+ "packuswb %%mm1, %%mm0 \n\t" // -d-d-D-D d d D D
+ "pshufw $0xFA, %%mm0, %%mm1 \n\t" // -d-d-d-d-D-D-D-D
+ "punpcklwd %%mm0, %%mm0 \n\t" // d d d d D D D D
+ ::"m"(block[ 0]),
+ "m"(block[16]),
+ "m"(ff_pw_32)
+ );
+ __asm__ volatile(
+ "movq %0, %%mm2 \n\t"
+ "movq %1, %%mm3 \n\t"
+ "movq %2, %%mm4 \n\t"
+ "movq %3, %%mm5 \n\t"
+ "paddusb %%mm0, %%mm2 \n\t"
+ "paddusb %%mm0, %%mm3 \n\t"
+ "paddusb %%mm0, %%mm4 \n\t"
+ "paddusb %%mm0, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "psubusb %%mm1, %%mm3 \n\t"
+ "psubusb %%mm1, %%mm4 \n\t"
+ "psubusb %%mm1, %%mm5 \n\t"
+ "movq %%mm2, %0 \n\t"
+ "movq %%mm3, %1 \n\t"
+ "movq %%mm4, %2 \n\t"
+ "movq %%mm5, %3 \n\t"
+ :"+m"(*(uint64_t*)(dst+0*stride)),
+ "+m"(*(uint64_t*)(dst+1*stride)),
+ "+m"(*(uint64_t*)(dst+2*stride)),
+ "+m"(*(uint64_t*)(dst+3*stride))
+ );
+}
+
+extern void ff_x264_add8x4_idct_sse2(uint8_t *dst, int16_t *block, int stride);
+
+static void ff_h264_idct_add16_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i+=2)
+ if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ])
+ ff_x264_add8x4_idct_sse2 (dst + block_offset[i], block + i*16, stride);
+}
+
+static void ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i+=2){
+ if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ])
+ ff_x264_add8x4_idct_sse2 (dst + block_offset[i], block + i*16, stride);
+ else if(block[i*16]|block[i*16+16])
+ ff_h264_idct_dc_add8_mmx2(dst + block_offset[i], block + i*16, stride);
+ }
+}
+
+static void ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=16; i<16+8; i+=2){
+ if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ])
+ ff_x264_add8x4_idct_sse2 (dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
+ else if(block[i*16]|block[i*16+16])
+ ff_h264_idct_dc_add8_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
+ }
+}
+#endif
+
+/***********************************/
+/* deblocking */
+
+// out: o = |x-y|>a
+// clobbers: t
+#define DIFF_GT_MMX(x,y,a,o,t)\
+ "movq "#y", "#t" \n\t"\
+ "movq "#x", "#o" \n\t"\
+ "psubusb "#x", "#t" \n\t"\
+ "psubusb "#y", "#o" \n\t"\
+ "por "#t", "#o" \n\t"\
+ "psubusb "#a", "#o" \n\t"
+
+// out: o = |x-y|>a
+// clobbers: t
+#define DIFF_GT2_MMX(x,y,a,o,t)\
+ "movq "#y", "#t" \n\t"\
+ "movq "#x", "#o" \n\t"\
+ "psubusb "#x", "#t" \n\t"\
+ "psubusb "#y", "#o" \n\t"\
+ "psubusb "#a", "#t" \n\t"\
+ "psubusb "#a", "#o" \n\t"\
+ "pcmpeqb "#t", "#o" \n\t"\
+
+// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
+// out: mm5=beta-1, mm7=mask
+// clobbers: mm4,mm6
+#define H264_DEBLOCK_MASK(alpha1, beta1) \
+ "pshufw $0, "#alpha1", %%mm4 \n\t"\
+ "pshufw $0, "#beta1 ", %%mm5 \n\t"\
+ "packuswb %%mm4, %%mm4 \n\t"\
+ "packuswb %%mm5, %%mm5 \n\t"\
+ DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
+ DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
+ "por %%mm4, %%mm7 \n\t"\
+ DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
+ "por %%mm4, %%mm7 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ "pcmpeqb %%mm6, %%mm7 \n\t"
+
+// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
+// out: mm1=p0' mm2=q0'
+// clobbers: mm0,3-6
+#define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
+ "movq %%mm1 , %%mm5 \n\t"\
+ "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\
+ "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\
+ "pcmpeqb %%mm4 , %%mm4 \n\t"\
+ "pxor %%mm4 , %%mm3 \n\t"\
+ "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\
+ "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\
+ "pxor %%mm1 , %%mm4 \n\t"\
+ "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\
+ "pavgb %%mm5 , %%mm3 \n\t"\
+ "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\
+ "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\
+ "psubusb %%mm3 , %%mm6 \n\t"\
+ "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\
+ "pminub %%mm7 , %%mm6 \n\t"\
+ "pminub %%mm7 , %%mm3 \n\t"\
+ "psubusb %%mm6 , %%mm1 \n\t"\
+ "psubusb %%mm3 , %%mm2 \n\t"\
+ "paddusb %%mm3 , %%mm1 \n\t"\
+ "paddusb %%mm6 , %%mm2 \n\t"
+
+// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=ff_bone
+// out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
+// clobbers: q2, tmp, tc0
+#define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
+ "movq %%mm1, "#tmp" \n\t"\
+ "pavgb %%mm2, "#tmp" \n\t"\
+ "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
+ "pxor "q2addr", "#tmp" \n\t"\
+ "pand %9, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
+ "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
+ "movq "#p1", "#tmp" \n\t"\
+ "psubusb "#tc0", "#tmp" \n\t"\
+ "paddusb "#p1", "#tc0" \n\t"\
+ "pmaxub "#tmp", "#q2" \n\t"\
+ "pminub "#tc0", "#q2" \n\t"\
+ "movq "#q2", "q1addr" \n\t"
+
+static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
+{
+ DECLARE_ALIGNED_8(uint64_t, tmp0)[2];
+
+ __asm__ volatile(
+ "movq (%2,%4), %%mm0 \n\t" //p1
+ "movq (%2,%4,2), %%mm1 \n\t" //p0
+ "movq (%3), %%mm2 \n\t" //q0
+ "movq (%3,%4), %%mm3 \n\t" //q1
+ H264_DEBLOCK_MASK(%7, %8)
+
+ "movd %6, %%mm4 \n\t"
+ "punpcklbw %%mm4, %%mm4 \n\t"
+ "punpcklwd %%mm4, %%mm4 \n\t"
+ "pcmpeqb %%mm3, %%mm3 \n\t"
+ "movq %%mm4, %%mm6 \n\t"
+ "pcmpgtb %%mm3, %%mm4 \n\t"
+ "movq %%mm6, %1 \n\t"
+ "pand %%mm4, %%mm7 \n\t"
+ "movq %%mm7, %0 \n\t"
+
+ /* filter p1 */
+ "movq (%2), %%mm3 \n\t" //p2
+ DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
+ "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
+ "pand %1, %%mm7 \n\t" // mask & tc0
+ "movq %%mm7, %%mm4 \n\t"
+ "psubb %%mm6, %%mm7 \n\t"
+ "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
+ H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%2)", "(%2,%4)", %%mm6, %%mm4)
+
+ /* filter q1 */
+ "movq (%3,%4,2), %%mm4 \n\t" //q2
+ DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
+ "pand %0, %%mm6 \n\t"
+ "movq %1, %%mm5 \n\t" // can be merged with the and below but is slower then
+ "pand %%mm6, %%mm5 \n\t"
+ "psubb %%mm6, %%mm7 \n\t"
+ "movq (%3,%4), %%mm3 \n\t"
+ H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%3,%4,2)", "(%3,%4)", %%mm5, %%mm6)
+
+ /* filter p0, q0 */
+ H264_DEBLOCK_P0_Q0(%9, unused)
+ "movq %%mm1, (%2,%4,2) \n\t"
+ "movq %%mm2, (%3) \n\t"
+
+ : "=m"(tmp0[0]), "=m"(tmp0[1])
+ : "r"(pix-3*stride), "r"(pix), "r"((x86_reg)stride),
+ "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1),
+ "m"(ff_bone)
+ );
+}
+
+static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
+{
+ if((tc0[0] & tc0[1]) >= 0)
+ h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
+ if((tc0[2] & tc0[3]) >= 0)
+ h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
+}
+static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
+{
+ //FIXME: could cut some load/stores by merging transpose with filter
+ // also, it only needs to transpose 6x8
+ DECLARE_ALIGNED_8(uint8_t, trans)[8*8];
+ int i;
+ for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
+ if((tc0[0] & tc0[1]) < 0)
+ continue;
+ transpose4x4(trans, pix-4, 8, stride);
+ transpose4x4(trans +4*8, pix, 8, stride);
+ transpose4x4(trans+4, pix-4+4*stride, 8, stride);
+ transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
+ h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
+ transpose4x4(pix-2, trans +2*8, stride, 8);
+ transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
+ }
+}
+
+static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
+{
+ __asm__ volatile(
+ "movq (%0), %%mm0 \n\t" //p1
+ "movq (%0,%2), %%mm1 \n\t" //p0
+ "movq (%1), %%mm2 \n\t" //q0
+ "movq (%1,%2), %%mm3 \n\t" //q1
+ H264_DEBLOCK_MASK(%4, %5)
+ "movd %3, %%mm6 \n\t"
+ "punpcklbw %%mm6, %%mm6 \n\t"
+ "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
+ H264_DEBLOCK_P0_Q0(%6, %7)
+ "movq %%mm1, (%0,%2) \n\t"
+ "movq %%mm2, (%1) \n\t"
+
+ :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride),
+ "r"(*(uint32_t*)tc0),
+ "m"(alpha1), "m"(beta1), "m"(ff_bone), "m"(ff_pb_3F)
+ );
+}
+
+static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
+{
+ h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
+}
+
+static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
+{
+ //FIXME: could cut some load/stores by merging transpose with filter
+ DECLARE_ALIGNED_8(uint8_t, trans)[8*4];
+ transpose4x4(trans, pix-2, 8, stride);
+ transpose4x4(trans+4, pix-2+4*stride, 8, stride);
+ h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
+ transpose4x4(pix-2, trans, stride, 8);
+ transpose4x4(pix-2+4*stride, trans+4, stride, 8);
+}
+
+// p0 = (p0 + q1 + 2*p1 + 2) >> 2
+#define H264_FILTER_CHROMA4(p0, p1, q1, one) \
+ "movq "#p0", %%mm4 \n\t"\
+ "pxor "#q1", %%mm4 \n\t"\
+ "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
+ "pavgb "#q1", "#p0" \n\t"\
+ "psubusb %%mm4, "#p0" \n\t"\
+ "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
+
+static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
+{
+ __asm__ volatile(
+ "movq (%0), %%mm0 \n\t"
+ "movq (%0,%2), %%mm1 \n\t"
+ "movq (%1), %%mm2 \n\t"
+ "movq (%1,%2), %%mm3 \n\t"
+ H264_DEBLOCK_MASK(%3, %4)
+ "movq %%mm1, %%mm5 \n\t"
+ "movq %%mm2, %%mm6 \n\t"
+ H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
+ H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
+ "psubb %%mm5, %%mm1 \n\t"
+ "psubb %%mm6, %%mm2 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "paddb %%mm5, %%mm1 \n\t"
+ "paddb %%mm6, %%mm2 \n\t"
+ "movq %%mm1, (%0,%2) \n\t"
+ "movq %%mm2, (%1) \n\t"
+ :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride),
+ "m"(alpha1), "m"(beta1), "m"(ff_bone)
+ );
+}
+
+static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
+{
+ h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
+}
+
+static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
+{
+ //FIXME: could cut some load/stores by merging transpose with filter
+ DECLARE_ALIGNED_8(uint8_t, trans)[8*4];
+ transpose4x4(trans, pix-2, 8, stride);
+ transpose4x4(trans+4, pix-2+4*stride, 8, stride);
+ h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
+ transpose4x4(pix-2, trans, stride, 8);
+ transpose4x4(pix-2+4*stride, trans+4, stride, 8);
+}
+
+static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
+ int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
+ int dir;
+ __asm__ volatile(
+ "movq %0, %%mm7 \n"
+ "movq %1, %%mm6 \n"
+ ::"m"(ff_pb_1), "m"(ff_pb_3)
+ );
+ if(field)
+ __asm__ volatile(
+ "movq %0, %%mm6 \n"
+ ::"m"(ff_pb_3_1)
+ );
+ __asm__ volatile(
+ "movq %%mm6, %%mm5 \n"
+ "paddb %%mm5, %%mm5 \n"
+ :);
+
+ // could do a special case for dir==0 && edges==1, but it only reduces the
+ // average filter time by 1.2%
+ for( dir=1; dir>=0; dir-- ) {
+ const x86_reg d_idx = dir ? -8 : -1;
+ const int mask_mv = dir ? mask_mv1 : mask_mv0;
+ DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
+ int b_idx, edge;
+ for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
+ __asm__ volatile(
+ "pand %0, %%mm0 \n\t"
+ ::"m"(mask_dir)
+ );
+ if(!(mask_mv & edge)) {
+ if(bidir) {
+ __asm__ volatile(
+ "movd (%1,%0), %%mm2 \n"
+ "punpckldq 40(%1,%0), %%mm2 \n" // { ref0[bn], ref1[bn] }
+ "pshufw $0x44, (%1), %%mm0 \n" // { ref0[b], ref0[b] }
+ "pshufw $0x44, 40(%1), %%mm1 \n" // { ref1[b], ref1[b] }
+ "pshufw $0x4E, %%mm2, %%mm3 \n"
+ "psubb %%mm2, %%mm0 \n" // { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] }
+ "psubb %%mm3, %%mm1 \n" // { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] }
+ "1: \n"
+ "por %%mm1, %%mm0 \n"
+ "movq (%2,%0,4), %%mm1 \n"
+ "movq 8(%2,%0,4), %%mm2 \n"
+ "movq %%mm1, %%mm3 \n"
+ "movq %%mm2, %%mm4 \n"
+ "psubw (%2), %%mm1 \n"
+ "psubw 8(%2), %%mm2 \n"
+ "psubw 160(%2), %%mm3 \n"
+ "psubw 168(%2), %%mm4 \n"
+ "packsswb %%mm2, %%mm1 \n"
+ "packsswb %%mm4, %%mm3 \n"
+ "paddb %%mm6, %%mm1 \n"
+ "paddb %%mm6, %%mm3 \n"
+ "psubusb %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit
+ "psubusb %%mm5, %%mm3 \n"
+ "packsswb %%mm3, %%mm1 \n"
+ "add $40, %0 \n"
+ "cmp $40, %0 \n"
+ "jl 1b \n"
+ "sub $80, %0 \n"
+ "pshufw $0x4E, %%mm1, %%mm1 \n"
+ "por %%mm1, %%mm0 \n"
+ "pshufw $0x4E, %%mm0, %%mm1 \n"
+ "pminub %%mm1, %%mm0 \n"
+ ::"r"(d_idx),
+ "r"(ref[0]+b_idx),
+ "r"(mv[0]+b_idx)
+ );
+ } else {
+ __asm__ volatile(
+ "movd (%1), %%mm0 \n"
+ "psubb (%1,%0), %%mm0 \n" // ref[b] != ref[bn]
+ "movq (%2), %%mm1 \n"
+ "movq 8(%2), %%mm2 \n"
+ "psubw (%2,%0,4), %%mm1 \n"
+ "psubw 8(%2,%0,4), %%mm2 \n"
+ "packsswb %%mm2, %%mm1 \n"
+ "paddb %%mm6, %%mm1 \n"
+ "psubusb %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit
+ "packsswb %%mm1, %%mm1 \n"
+ "por %%mm1, %%mm0 \n"
+ ::"r"(d_idx),
+ "r"(ref[0]+b_idx),
+ "r"(mv[0]+b_idx)
+ );
+ }
+ }
+ __asm__ volatile(
+ "movd %0, %%mm1 \n"
+ "por %1, %%mm1 \n" // nnz[b] || nnz[bn]
+ ::"m"(nnz[b_idx]),
+ "m"(nnz[b_idx+d_idx])
+ );
+ __asm__ volatile(
+ "pminub %%mm7, %%mm1 \n"
+ "pminub %%mm7, %%mm0 \n"
+ "psllw $1, %%mm1 \n"
+ "pxor %%mm2, %%mm2 \n"
+ "pmaxub %%mm0, %%mm1 \n"
+ "punpcklbw %%mm2, %%mm1 \n"
+ "movq %%mm1, %0 \n"
+ :"=m"(*bS[dir][edge])
+ ::"memory"
+ );
+ }
+ edges = 4;
+ step = 1;
+ }
+ __asm__ volatile(
+ "movq (%0), %%mm0 \n\t"
+ "movq 8(%0), %%mm1 \n\t"
+ "movq 16(%0), %%mm2 \n\t"
+ "movq 24(%0), %%mm3 \n\t"
+ TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm3, 8(%0) \n\t"
+ "movq %%mm4, 16(%0) \n\t"
+ "movq %%mm2, 24(%0) \n\t"
+ ::"r"(bS[0])
+ :"memory"
+ );
+}
+
+/***********************************/
+/* motion compensation */
+
+#define QPEL_H264V_MM(A,B,C,D,E,F,OP,T,Z,d,q)\
+ "mov"#q" "#C", "#T" \n\t"\
+ "mov"#d" (%0), "#F" \n\t"\
+ "paddw "#D", "#T" \n\t"\
+ "psllw $2, "#T" \n\t"\
+ "psubw "#B", "#T" \n\t"\
+ "psubw "#E", "#T" \n\t"\
+ "punpcklbw "#Z", "#F" \n\t"\
+ "pmullw %4, "#T" \n\t"\
+ "paddw %5, "#A" \n\t"\
+ "add %2, %0 \n\t"\
+ "paddw "#F", "#A" \n\t"\
+ "paddw "#A", "#T" \n\t"\
+ "psraw $5, "#T" \n\t"\
+ "packuswb "#T", "#T" \n\t"\
+ OP(T, (%1), A, d)\
+ "add %3, %1 \n\t"
+
+#define QPEL_H264HV_MM(A,B,C,D,E,F,OF,T,Z,d,q)\
+ "mov"#q" "#C", "#T" \n\t"\
+ "mov"#d" (%0), "#F" \n\t"\
+ "paddw "#D", "#T" \n\t"\
+ "psllw $2, "#T" \n\t"\
+ "paddw %4, "#A" \n\t"\
+ "psubw "#B", "#T" \n\t"\
+ "psubw "#E", "#T" \n\t"\
+ "punpcklbw "#Z", "#F" \n\t"\
+ "pmullw %3, "#T" \n\t"\
+ "paddw "#F", "#A" \n\t"\
+ "add %2, %0 \n\t"\
+ "paddw "#A", "#T" \n\t"\
+ "mov"#q" "#T", "#OF"(%1) \n\t"
+
+#define QPEL_H264V(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%mm6,%%mm7,d,q)
+#define QPEL_H264HV(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%mm6,%%mm7,d,q)
+#define QPEL_H264V_XMM(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%xmm6,%%xmm7,q,dqa)
+#define QPEL_H264HV_XMM(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%xmm6,%%xmm7,q,dqa)
+
+
+#define QPEL_H264(OPNAME, OP, MMX)\
+static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ int h=4;\
+\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movq %5, %%mm4 \n\t"\
+ "movq %6, %%mm5 \n\t"\
+ "1: \n\t"\
+ "movd -1(%0), %%mm1 \n\t"\
+ "movd (%0), %%mm2 \n\t"\
+ "movd 1(%0), %%mm3 \n\t"\
+ "movd 2(%0), %%mm0 \n\t"\
+ "punpcklbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm0, %%mm1 \n\t"\
+ "paddw %%mm3, %%mm2 \n\t"\
+ "movd -2(%0), %%mm0 \n\t"\
+ "movd 3(%0), %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "paddw %%mm3, %%mm0 \n\t"\
+ "psllw $2, %%mm2 \n\t"\
+ "psubw %%mm1, %%mm2 \n\t"\
+ "pmullw %%mm4, %%mm2 \n\t"\
+ "paddw %%mm5, %%mm0 \n\t"\
+ "paddw %%mm2, %%mm0 \n\t"\
+ "psraw $5, %%mm0 \n\t"\
+ "packuswb %%mm0, %%mm0 \n\t"\
+ OP(%%mm0, (%1),%%mm6, d)\
+ "add %3, %0 \n\t"\
+ "add %4, %1 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ : "+a"(src), "+c"(dst), "+g"(h)\
+ : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
+ : "memory"\
+ );\
+}\
+static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
+ int h=4;\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movq %0, %%mm4 \n\t"\
+ "movq %1, %%mm5 \n\t"\
+ :: "m"(ff_pw_5), "m"(ff_pw_16)\
+ );\
+ do{\
+ __asm__ volatile(\
+ "movd -1(%0), %%mm1 \n\t"\
+ "movd (%0), %%mm2 \n\t"\
+ "movd 1(%0), %%mm3 \n\t"\
+ "movd 2(%0), %%mm0 \n\t"\
+ "punpcklbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm0, %%mm1 \n\t"\
+ "paddw %%mm3, %%mm2 \n\t"\
+ "movd -2(%0), %%mm0 \n\t"\
+ "movd 3(%0), %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "paddw %%mm3, %%mm0 \n\t"\
+ "psllw $2, %%mm2 \n\t"\
+ "psubw %%mm1, %%mm2 \n\t"\
+ "pmullw %%mm4, %%mm2 \n\t"\
+ "paddw %%mm5, %%mm0 \n\t"\
+ "paddw %%mm2, %%mm0 \n\t"\
+ "movd (%2), %%mm3 \n\t"\
+ "psraw $5, %%mm0 \n\t"\
+ "packuswb %%mm0, %%mm0 \n\t"\
+ PAVGB" %%mm3, %%mm0 \n\t"\
+ OP(%%mm0, (%1),%%mm6, d)\
+ "add %4, %0 \n\t"\
+ "add %4, %1 \n\t"\
+ "add %3, %2 \n\t"\
+ : "+a"(src), "+c"(dst), "+d"(src2)\
+ : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\
+ : "memory"\
+ );\
+ }while(--h);\
+}\
+static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ src -= 2*srcStride;\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movd (%0), %%mm0 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm1 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm2 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm3 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm4 \n\t"\
+ "add %2, %0 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpcklbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm4 \n\t"\
+ QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
+ QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
+ QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
+ QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
+ \
+ : "+a"(src), "+c"(dst)\
+ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
+ : "memory"\
+ );\
+}\
+static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+ int h=4;\
+ int w=3;\
+ src -= 2*srcStride+2;\
+ while(w--){\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movd (%0), %%mm0 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm1 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm2 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm3 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm4 \n\t"\
+ "add %2, %0 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpcklbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm4 \n\t"\
+ QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
+ QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
+ QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
+ QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
+ \
+ : "+a"(src)\
+ : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
+ : "memory"\
+ );\
+ tmp += 4;\
+ src += 4 - 9*srcStride;\
+ }\
+ tmp -= 3*4;\
+ __asm__ volatile(\
+ "1: \n\t"\
+ "movq (%0), %%mm0 \n\t"\
+ "paddw 10(%0), %%mm0 \n\t"\
+ "movq 2(%0), %%mm1 \n\t"\
+ "paddw 8(%0), %%mm1 \n\t"\
+ "movq 4(%0), %%mm2 \n\t"\
+ "paddw 6(%0), %%mm2 \n\t"\
+ "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
+ "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
+ "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
+ "paddsw %%mm2, %%mm0 \n\t"\
+ "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\
+ "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 */\
+ "psraw $6, %%mm0 \n\t"\
+ "packuswb %%mm0, %%mm0 \n\t"\
+ OP(%%mm0, (%1),%%mm7, d)\
+ "add $24, %0 \n\t"\
+ "add %3, %1 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ : "+a"(tmp), "+c"(dst), "+g"(h)\
+ : "S"((x86_reg)dstStride)\
+ : "memory"\
+ );\
+}\
+\
+static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ int h=8;\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movq %5, %%mm6 \n\t"\
+ "1: \n\t"\
+ "movq (%0), %%mm0 \n\t"\
+ "movq 1(%0), %%mm2 \n\t"\
+ "movq %%mm0, %%mm1 \n\t"\
+ "movq %%mm2, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpckhbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm3 \n\t"\
+ "paddw %%mm2, %%mm0 \n\t"\
+ "paddw %%mm3, %%mm1 \n\t"\
+ "psllw $2, %%mm0 \n\t"\
+ "psllw $2, %%mm1 \n\t"\
+ "movq -1(%0), %%mm2 \n\t"\
+ "movq 2(%0), %%mm4 \n\t"\
+ "movq %%mm2, %%mm3 \n\t"\
+ "movq %%mm4, %%mm5 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm7, %%mm5 \n\t"\
+ "paddw %%mm4, %%mm2 \n\t"\
+ "paddw %%mm3, %%mm5 \n\t"\
+ "psubw %%mm2, %%mm0 \n\t"\
+ "psubw %%mm5, %%mm1 \n\t"\
+ "pmullw %%mm6, %%mm0 \n\t"\
+ "pmullw %%mm6, %%mm1 \n\t"\
+ "movd -2(%0), %%mm2 \n\t"\
+ "movd 7(%0), %%mm5 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpcklbw %%mm7, %%mm5 \n\t"\
+ "paddw %%mm3, %%mm2 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ "movq %6, %%mm5 \n\t"\
+ "paddw %%mm5, %%mm2 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ "paddw %%mm2, %%mm0 \n\t"\
+ "paddw %%mm4, %%mm1 \n\t"\
+ "psraw $5, %%mm0 \n\t"\
+ "psraw $5, %%mm1 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ OP(%%mm0, (%1),%%mm5, q)\
+ "add %3, %0 \n\t"\
+ "add %4, %1 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ : "+a"(src), "+c"(dst), "+g"(h)\
+ : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
+ : "memory"\
+ );\
+}\
+\
+static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
+ int h=8;\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movq %0, %%mm6 \n\t"\
+ :: "m"(ff_pw_5)\
+ );\
+ do{\
+ __asm__ volatile(\
+ "movq (%0), %%mm0 \n\t"\
+ "movq 1(%0), %%mm2 \n\t"\
+ "movq %%mm0, %%mm1 \n\t"\
+ "movq %%mm2, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpckhbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm3 \n\t"\
+ "paddw %%mm2, %%mm0 \n\t"\
+ "paddw %%mm3, %%mm1 \n\t"\
+ "psllw $2, %%mm0 \n\t"\
+ "psllw $2, %%mm1 \n\t"\
+ "movq -1(%0), %%mm2 \n\t"\
+ "movq 2(%0), %%mm4 \n\t"\
+ "movq %%mm2, %%mm3 \n\t"\
+ "movq %%mm4, %%mm5 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm7, %%mm5 \n\t"\
+ "paddw %%mm4, %%mm2 \n\t"\
+ "paddw %%mm3, %%mm5 \n\t"\
+ "psubw %%mm2, %%mm0 \n\t"\
+ "psubw %%mm5, %%mm1 \n\t"\
+ "pmullw %%mm6, %%mm0 \n\t"\
+ "pmullw %%mm6, %%mm1 \n\t"\
+ "movd -2(%0), %%mm2 \n\t"\
+ "movd 7(%0), %%mm5 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpcklbw %%mm7, %%mm5 \n\t"\
+ "paddw %%mm3, %%mm2 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ "movq %5, %%mm5 \n\t"\
+ "paddw %%mm5, %%mm2 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ "paddw %%mm2, %%mm0 \n\t"\
+ "paddw %%mm4, %%mm1 \n\t"\
+ "psraw $5, %%mm0 \n\t"\
+ "psraw $5, %%mm1 \n\t"\
+ "movq (%2), %%mm4 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ PAVGB" %%mm4, %%mm0 \n\t"\
+ OP(%%mm0, (%1),%%mm5, q)\
+ "add %4, %0 \n\t"\
+ "add %4, %1 \n\t"\
+ "add %3, %2 \n\t"\
+ : "+a"(src), "+c"(dst), "+d"(src2)\
+ : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
+ "m"(ff_pw_16)\
+ : "memory"\
+ );\
+ }while(--h);\
+}\
+\
+static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ int w= 2;\
+ src -= 2*srcStride;\
+ \
+ while(w--){\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movd (%0), %%mm0 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm1 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm2 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm3 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm4 \n\t"\
+ "add %2, %0 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpcklbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm4 \n\t"\
+ QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
+ QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
+ QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
+ QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
+ QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
+ QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
+ QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
+ QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
+ \
+ : "+a"(src), "+c"(dst)\
+ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
+ : "memory"\
+ );\
+ if(h==16){\
+ __asm__ volatile(\
+ QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
+ QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
+ QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
+ QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
+ QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
+ QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
+ QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
+ QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
+ \
+ : "+a"(src), "+c"(dst)\
+ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
+ : "memory"\
+ );\
+ }\
+ src += 4-(h+5)*srcStride;\
+ dst += 4-h*dstStride;\
+ }\
+}\
+static av_always_inline void OPNAME ## h264_qpel8or16_hv1_lowpass_ ## MMX(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){\
+ int w = (size+8)>>2;\
+ src -= 2*srcStride+2;\
+ while(w--){\
+ __asm__ volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movd (%0), %%mm0 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm1 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm2 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm3 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm4 \n\t"\
+ "add %2, %0 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpcklbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm4 \n\t"\
+ QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
+ QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
+ QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
+ QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
+ QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
+ QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
+ QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
+ QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
+ : "+a"(src)\
+ : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
+ : "memory"\
+ );\
+ if(size==16){\
+ __asm__ volatile(\
+ QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\
+ QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\
+ QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
+ QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
+ QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
+ QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
+ QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
+ QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
+ : "+a"(src)\
+ : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
+ : "memory"\
+ );\
+ }\
+ tmp += 4;\
+ src += 4 - (size+5)*srcStride;\
+ }\
+}\
+static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
+ int w = size>>4;\
+ do{\
+ int h = size;\
+ __asm__ volatile(\
+ "1: \n\t"\
+ "movq (%0), %%mm0 \n\t"\
+ "movq 8(%0), %%mm3 \n\t"\
+ "movq 2(%0), %%mm1 \n\t"\
+ "movq 10(%0), %%mm4 \n\t"\
+ "paddw %%mm4, %%mm0 \n\t"\
+ "paddw %%mm3, %%mm1 \n\t"\
+ "paddw 18(%0), %%mm3 \n\t"\
+ "paddw 16(%0), %%mm4 \n\t"\
+ "movq 4(%0), %%mm2 \n\t"\
+ "movq 12(%0), %%mm5 \n\t"\
+ "paddw 6(%0), %%mm2 \n\t"\
+ "paddw 14(%0), %%mm5 \n\t"\
+ "psubw %%mm1, %%mm0 \n\t"\
+ "psubw %%mm4, %%mm3 \n\t"\
+ "psraw $2, %%mm0 \n\t"\
+ "psraw $2, %%mm3 \n\t"\
+ "psubw %%mm1, %%mm0 \n\t"\
+ "psubw %%mm4, %%mm3 \n\t"\
+ "paddsw %%mm2, %%mm0 \n\t"\
+ "paddsw %%mm5, %%mm3 \n\t"\
+ "psraw $2, %%mm0 \n\t"\
+ "psraw $2, %%mm3 \n\t"\
+ "paddw %%mm2, %%mm0 \n\t"\
+ "paddw %%mm5, %%mm3 \n\t"\
+ "psraw $6, %%mm0 \n\t"\
+ "psraw $6, %%mm3 \n\t"\
+ "packuswb %%mm3, %%mm0 \n\t"\
+ OP(%%mm0, (%1),%%mm7, q)\
+ "add $48, %0 \n\t"\
+ "add %3, %1 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ : "+a"(tmp), "+c"(dst), "+g"(h)\
+ : "S"((x86_reg)dstStride)\
+ : "memory"\
+ );\
+ tmp += 8 - size*24;\
+ dst += 8 - size*dstStride;\
+ }while(w--);\
+}\
+\
+static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
+}\
+static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
+ OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
+}\
+\
+static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
+ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
+ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
+}\
+\
+static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
+ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
+ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
+ src += 8*dstStride;\
+ dst += 8*dstStride;\
+ src2 += 8*src2Stride;\
+ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
+ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
+}\
+\
+static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
+ put_h264_qpel8or16_hv1_lowpass_ ## MMX(tmp, src, tmpStride, srcStride, size);\
+ OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
+}\
+static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+ OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
+}\
+\
+static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+ OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
+}\
+\
+static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
+{\
+ __asm__ volatile(\
+ "movq (%1), %%mm0 \n\t"\
+ "movq 24(%1), %%mm1 \n\t"\
+ "psraw $5, %%mm0 \n\t"\
+ "psraw $5, %%mm1 \n\t"\
+ "packuswb %%mm0, %%mm0 \n\t"\
+ "packuswb %%mm1, %%mm1 \n\t"\
+ PAVGB" (%0), %%mm0 \n\t"\
+ PAVGB" (%0,%3), %%mm1 \n\t"\
+ OP(%%mm0, (%2), %%mm4, d)\
+ OP(%%mm1, (%2,%4), %%mm5, d)\
+ "lea (%0,%3,2), %0 \n\t"\
+ "lea (%2,%4,2), %2 \n\t"\
+ "movq 48(%1), %%mm0 \n\t"\
+ "movq 72(%1), %%mm1 \n\t"\
+ "psraw $5, %%mm0 \n\t"\
+ "psraw $5, %%mm1 \n\t"\
+ "packuswb %%mm0, %%mm0 \n\t"\
+ "packuswb %%mm1, %%mm1 \n\t"\
+ PAVGB" (%0), %%mm0 \n\t"\
+ PAVGB" (%0,%3), %%mm1 \n\t"\
+ OP(%%mm0, (%2), %%mm4, d)\
+ OP(%%mm1, (%2,%4), %%mm5, d)\
+ :"+a"(src8), "+c"(src16), "+d"(dst)\
+ :"S"((x86_reg)src8Stride), "D"((x86_reg)dstStride)\
+ :"memory");\
+}\
+static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
+{\
+ do{\
+ __asm__ volatile(\
+ "movq (%1), %%mm0 \n\t"\
+ "movq 8(%1), %%mm1 \n\t"\
+ "movq 48(%1), %%mm2 \n\t"\
+ "movq 8+48(%1), %%mm3 \n\t"\
+ "psraw $5, %%mm0 \n\t"\
+ "psraw $5, %%mm1 \n\t"\
+ "psraw $5, %%mm2 \n\t"\
+ "psraw $5, %%mm3 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ "packuswb %%mm3, %%mm2 \n\t"\
+ PAVGB" (%0), %%mm0 \n\t"\
+ PAVGB" (%0,%3), %%mm2 \n\t"\
+ OP(%%mm0, (%2), %%mm5, q)\
+ OP(%%mm2, (%2,%4), %%mm5, q)\
+ ::"a"(src8), "c"(src16), "d"(dst),\
+ "r"((x86_reg)src8Stride), "r"((x86_reg)dstStride)\
+ :"memory");\
+ src8 += 2L*src8Stride;\
+ src16 += 48;\
+ dst += 2L*dstStride;\
+ }while(h-=2);\
+}\
+static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
+{\
+ OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\
+ OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
+}\
+
+
+#if ARCH_X86_64
+#define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
+static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
+ int h=16;\
+ __asm__ volatile(\
+ "pxor %%xmm15, %%xmm15 \n\t"\
+ "movdqa %6, %%xmm14 \n\t"\
+ "movdqa %7, %%xmm13 \n\t"\
+ "1: \n\t"\
+ "lddqu 6(%0), %%xmm1 \n\t"\
+ "lddqu -2(%0), %%xmm7 \n\t"\
+ "movdqa %%xmm1, %%xmm0 \n\t"\
+ "punpckhbw %%xmm15, %%xmm1 \n\t"\
+ "punpcklbw %%xmm15, %%xmm0 \n\t"\
+ "punpcklbw %%xmm15, %%xmm7 \n\t"\
+ "movdqa %%xmm1, %%xmm2 \n\t"\
+ "movdqa %%xmm0, %%xmm6 \n\t"\
+ "movdqa %%xmm1, %%xmm3 \n\t"\
+ "movdqa %%xmm0, %%xmm8 \n\t"\
+ "movdqa %%xmm1, %%xmm4 \n\t"\
+ "movdqa %%xmm0, %%xmm9 \n\t"\
+ "movdqa %%xmm0, %%xmm12 \n\t"\
+ "movdqa %%xmm1, %%xmm11 \n\t"\
+ "palignr $10,%%xmm0, %%xmm11\n\t"\
+ "palignr $10,%%xmm7, %%xmm12\n\t"\
+ "palignr $2, %%xmm0, %%xmm4 \n\t"\
+ "palignr $2, %%xmm7, %%xmm9 \n\t"\
+ "palignr $4, %%xmm0, %%xmm3 \n\t"\
+ "palignr $4, %%xmm7, %%xmm8 \n\t"\
+ "palignr $6, %%xmm0, %%xmm2 \n\t"\
+ "palignr $6, %%xmm7, %%xmm6 \n\t"\
+ "paddw %%xmm0 ,%%xmm11 \n\t"\
+ "palignr $8, %%xmm0, %%xmm1 \n\t"\
+ "palignr $8, %%xmm7, %%xmm0 \n\t"\
+ "paddw %%xmm12,%%xmm7 \n\t"\
+ "paddw %%xmm3, %%xmm2 \n\t"\
+ "paddw %%xmm8, %%xmm6 \n\t"\
+ "paddw %%xmm4, %%xmm1 \n\t"\
+ "paddw %%xmm9, %%xmm0 \n\t"\
+ "psllw $2, %%xmm2 \n\t"\
+ "psllw $2, %%xmm6 \n\t"\
+ "psubw %%xmm1, %%xmm2 \n\t"\
+ "psubw %%xmm0, %%xmm6 \n\t"\
+ "paddw %%xmm13,%%xmm11 \n\t"\
+ "paddw %%xmm13,%%xmm7 \n\t"\
+ "pmullw %%xmm14,%%xmm2 \n\t"\
+ "pmullw %%xmm14,%%xmm6 \n\t"\
+ "lddqu (%2), %%xmm3 \n\t"\
+ "paddw %%xmm11,%%xmm2 \n\t"\
+ "paddw %%xmm7, %%xmm6 \n\t"\
+ "psraw $5, %%xmm2 \n\t"\
+ "psraw $5, %%xmm6 \n\t"\
+ "packuswb %%xmm2,%%xmm6 \n\t"\
+ "pavgb %%xmm3, %%xmm6 \n\t"\
+ OP(%%xmm6, (%1), %%xmm4, dqa)\
+ "add %5, %0 \n\t"\
+ "add %5, %1 \n\t"\
+ "add %4, %2 \n\t"\
+ "decl %3 \n\t"\
+ "jg 1b \n\t"\
+ : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\
+ : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
+ "m"(ff_pw_5), "m"(ff_pw_16)\
+ : "memory"\
+ );\
+}
+#else // ARCH_X86_64
+#define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
+static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
+ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
+ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
+ src += 8*dstStride;\
+ dst += 8*dstStride;\
+ src2 += 8*src2Stride;\
+ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
+ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
+}
+#endif // ARCH_X86_64
+
+#define QPEL_H264_H_XMM(OPNAME, OP, MMX)\
+static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
+ int h=8;\
+ __asm__ volatile(\
+ "pxor %%xmm7, %%xmm7 \n\t"\
+ "movdqa %0, %%xmm6 \n\t"\
+ :: "m"(ff_pw_5)\
+ );\
+ do{\
+ __asm__ volatile(\
+ "lddqu -2(%0), %%xmm1 \n\t"\
+ "movdqa %%xmm1, %%xmm0 \n\t"\
+ "punpckhbw %%xmm7, %%xmm1 \n\t"\
+ "punpcklbw %%xmm7, %%xmm0 \n\t"\
+ "movdqa %%xmm1, %%xmm2 \n\t"\
+ "movdqa %%xmm1, %%xmm3 \n\t"\
+ "movdqa %%xmm1, %%xmm4 \n\t"\
+ "movdqa %%xmm1, %%xmm5 \n\t"\
+ "palignr $2, %%xmm0, %%xmm4 \n\t"\
+ "palignr $4, %%xmm0, %%xmm3 \n\t"\
+ "palignr $6, %%xmm0, %%xmm2 \n\t"\
+ "palignr $8, %%xmm0, %%xmm1 \n\t"\
+ "palignr $10,%%xmm0, %%xmm5 \n\t"\
+ "paddw %%xmm5, %%xmm0 \n\t"\
+ "paddw %%xmm3, %%xmm2 \n\t"\
+ "paddw %%xmm4, %%xmm1 \n\t"\
+ "psllw $2, %%xmm2 \n\t"\
+ "movq (%2), %%xmm3 \n\t"\
+ "psubw %%xmm1, %%xmm2 \n\t"\
+ "paddw %5, %%xmm0 \n\t"\
+ "pmullw %%xmm6, %%xmm2 \n\t"\
+ "paddw %%xmm0, %%xmm2 \n\t"\
+ "psraw $5, %%xmm2 \n\t"\
+ "packuswb %%xmm2, %%xmm2 \n\t"\
+ "pavgb %%xmm3, %%xmm2 \n\t"\
+ OP(%%xmm2, (%1), %%xmm4, q)\
+ "add %4, %0 \n\t"\
+ "add %4, %1 \n\t"\
+ "add %3, %2 \n\t"\
+ : "+a"(src), "+c"(dst), "+d"(src2)\
+ : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
+ "m"(ff_pw_16)\
+ : "memory"\
+ );\
+ }while(--h);\
+}\
+QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
+\
+static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ int h=8;\
+ __asm__ volatile(\
+ "pxor %%xmm7, %%xmm7 \n\t"\
+ "movdqa %5, %%xmm6 \n\t"\
+ "1: \n\t"\
+ "lddqu -2(%0), %%xmm1 \n\t"\
+ "movdqa %%xmm1, %%xmm0 \n\t"\
+ "punpckhbw %%xmm7, %%xmm1 \n\t"\
+ "punpcklbw %%xmm7, %%xmm0 \n\t"\
+ "movdqa %%xmm1, %%xmm2 \n\t"\
+ "movdqa %%xmm1, %%xmm3 \n\t"\
+ "movdqa %%xmm1, %%xmm4 \n\t"\
+ "movdqa %%xmm1, %%xmm5 \n\t"\
+ "palignr $2, %%xmm0, %%xmm4 \n\t"\
+ "palignr $4, %%xmm0, %%xmm3 \n\t"\
+ "palignr $6, %%xmm0, %%xmm2 \n\t"\
+ "palignr $8, %%xmm0, %%xmm1 \n\t"\
+ "palignr $10,%%xmm0, %%xmm5 \n\t"\
+ "paddw %%xmm5, %%xmm0 \n\t"\
+ "paddw %%xmm3, %%xmm2 \n\t"\
+ "paddw %%xmm4, %%xmm1 \n\t"\
+ "psllw $2, %%xmm2 \n\t"\
+ "psubw %%xmm1, %%xmm2 \n\t"\
+ "paddw %6, %%xmm0 \n\t"\
+ "pmullw %%xmm6, %%xmm2 \n\t"\
+ "paddw %%xmm0, %%xmm2 \n\t"\
+ "psraw $5, %%xmm2 \n\t"\
+ "packuswb %%xmm2, %%xmm2 \n\t"\
+ OP(%%xmm2, (%1), %%xmm4, q)\
+ "add %3, %0 \n\t"\
+ "add %4, %1 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ : "+a"(src), "+c"(dst), "+g"(h)\
+ : "D"((x86_reg)srcStride), "S"((x86_reg)dstStride),\
+ "m"(ff_pw_5), "m"(ff_pw_16)\
+ : "memory"\
+ );\
+}\
+static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
+ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
+ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
+}\
+
+#define QPEL_H264_V_XMM(OPNAME, OP, MMX)\
+static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ src -= 2*srcStride;\
+ \
+ __asm__ volatile(\
+ "pxor %%xmm7, %%xmm7 \n\t"\
+ "movq (%0), %%xmm0 \n\t"\
+ "add %2, %0 \n\t"\
+ "movq (%0), %%xmm1 \n\t"\
+ "add %2, %0 \n\t"\
+ "movq (%0), %%xmm2 \n\t"\
+ "add %2, %0 \n\t"\
+ "movq (%0), %%xmm3 \n\t"\
+ "add %2, %0 \n\t"\
+ "movq (%0), %%xmm4 \n\t"\
+ "add %2, %0 \n\t"\
+ "punpcklbw %%xmm7, %%xmm0 \n\t"\
+ "punpcklbw %%xmm7, %%xmm1 \n\t"\
+ "punpcklbw %%xmm7, %%xmm2 \n\t"\
+ "punpcklbw %%xmm7, %%xmm3 \n\t"\
+ "punpcklbw %%xmm7, %%xmm4 \n\t"\
+ QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
+ QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
+ QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
+ QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
+ QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
+ QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
+ QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
+ QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
+ \
+ : "+a"(src), "+c"(dst)\
+ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
+ : "memory"\
+ );\
+ if(h==16){\
+ __asm__ volatile(\
+ QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
+ QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
+ QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
+ QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
+ QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
+ QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
+ QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
+ QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
+ \
+ : "+a"(src), "+c"(dst)\
+ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
+ : "memory"\
+ );\
+ }\
+}\
+static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
+}\
+static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
+ OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
+}
+
+static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){
+ int w = (size+8)>>3;
+ src -= 2*srcStride+2;
+ while(w--){
+ __asm__ volatile(
+ "pxor %%xmm7, %%xmm7 \n\t"
+ "movq (%0), %%xmm0 \n\t"
+ "add %2, %0 \n\t"
+ "movq (%0), %%xmm1 \n\t"
+ "add %2, %0 \n\t"
+ "movq (%0), %%xmm2 \n\t"
+ "add %2, %0 \n\t"
+ "movq (%0), %%xmm3 \n\t"
+ "add %2, %0 \n\t"
+ "movq (%0), %%xmm4 \n\t"
+ "add %2, %0 \n\t"
+ "punpcklbw %%xmm7, %%xmm0 \n\t"
+ "punpcklbw %%xmm7, %%xmm1 \n\t"
+ "punpcklbw %%xmm7, %%xmm2 \n\t"
+ "punpcklbw %%xmm7, %%xmm3 \n\t"
+ "punpcklbw %%xmm7, %%xmm4 \n\t"
+ QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 0*48)
+ QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 1*48)
+ QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 2*48)
+ QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 3*48)
+ QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 4*48)
+ QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 5*48)
+ QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 6*48)
+ QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 7*48)
+ : "+a"(src)
+ : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
+ : "memory"
+ );
+ if(size==16){
+ __asm__ volatile(
+ QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 8*48)
+ QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 9*48)
+ QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 10*48)
+ QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 11*48)
+ QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 12*48)
+ QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 13*48)
+ QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 14*48)
+ QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 15*48)
+ : "+a"(src)
+ : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
+ : "memory"
+ );
+ }
+ tmp += 8;
+ src += 8 - (size+5)*srcStride;
+ }
+}
+
+#define QPEL_H264_HV2_XMM(OPNAME, OP, MMX)\
+static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
+ int h = size;\
+ if(size == 16){\
+ __asm__ volatile(\
+ "1: \n\t"\
+ "movdqa 32(%0), %%xmm4 \n\t"\
+ "movdqa 16(%0), %%xmm5 \n\t"\
+ "movdqa (%0), %%xmm7 \n\t"\
+ "movdqa %%xmm4, %%xmm3 \n\t"\
+ "movdqa %%xmm4, %%xmm2 \n\t"\
+ "movdqa %%xmm4, %%xmm1 \n\t"\
+ "movdqa %%xmm4, %%xmm0 \n\t"\
+ "palignr $10, %%xmm5, %%xmm0 \n\t"\
+ "palignr $8, %%xmm5, %%xmm1 \n\t"\
+ "palignr $6, %%xmm5, %%xmm2 \n\t"\
+ "palignr $4, %%xmm5, %%xmm3 \n\t"\
+ "palignr $2, %%xmm5, %%xmm4 \n\t"\
+ "paddw %%xmm5, %%xmm0 \n\t"\
+ "paddw %%xmm4, %%xmm1 \n\t"\
+ "paddw %%xmm3, %%xmm2 \n\t"\
+ "movdqa %%xmm5, %%xmm6 \n\t"\
+ "movdqa %%xmm5, %%xmm4 \n\t"\
+ "movdqa %%xmm5, %%xmm3 \n\t"\
+ "palignr $8, %%xmm7, %%xmm4 \n\t"\
+ "palignr $2, %%xmm7, %%xmm6 \n\t"\
+ "palignr $10, %%xmm7, %%xmm3 \n\t"\
+ "paddw %%xmm6, %%xmm4 \n\t"\
+ "movdqa %%xmm5, %%xmm6 \n\t"\
+ "palignr $6, %%xmm7, %%xmm5 \n\t"\
+ "palignr $4, %%xmm7, %%xmm6 \n\t"\
+ "paddw %%xmm7, %%xmm3 \n\t"\
+ "paddw %%xmm6, %%xmm5 \n\t"\
+ \
+ "psubw %%xmm1, %%xmm0 \n\t"\
+ "psubw %%xmm4, %%xmm3 \n\t"\
+ "psraw $2, %%xmm0 \n\t"\
+ "psraw $2, %%xmm3 \n\t"\
+ "psubw %%xmm1, %%xmm0 \n\t"\
+ "psubw %%xmm4, %%xmm3 \n\t"\
+ "paddw %%xmm2, %%xmm0 \n\t"\
+ "paddw %%xmm5, %%xmm3 \n\t"\
+ "psraw $2, %%xmm0 \n\t"\
+ "psraw $2, %%xmm3 \n\t"\
+ "paddw %%xmm2, %%xmm0 \n\t"\
+ "paddw %%xmm5, %%xmm3 \n\t"\
+ "psraw $6, %%xmm0 \n\t"\
+ "psraw $6, %%xmm3 \n\t"\
+ "packuswb %%xmm0, %%xmm3 \n\t"\
+ OP(%%xmm3, (%1), %%xmm7, dqa)\
+ "add $48, %0 \n\t"\
+ "add %3, %1 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ : "+a"(tmp), "+c"(dst), "+g"(h)\
+ : "S"((x86_reg)dstStride)\
+ : "memory"\
+ );\
+ }else{\
+ __asm__ volatile(\
+ "1: \n\t"\
+ "movdqa 16(%0), %%xmm1 \n\t"\
+ "movdqa (%0), %%xmm0 \n\t"\
+ "movdqa %%xmm1, %%xmm2 \n\t"\
+ "movdqa %%xmm1, %%xmm3 \n\t"\
+ "movdqa %%xmm1, %%xmm4 \n\t"\
+ "movdqa %%xmm1, %%xmm5 \n\t"\
+ "palignr $10, %%xmm0, %%xmm5 \n\t"\
+ "palignr $8, %%xmm0, %%xmm4 \n\t"\
+ "palignr $6, %%xmm0, %%xmm3 \n\t"\
+ "palignr $4, %%xmm0, %%xmm2 \n\t"\
+ "palignr $2, %%xmm0, %%xmm1 \n\t"\
+ "paddw %%xmm5, %%xmm0 \n\t"\
+ "paddw %%xmm4, %%xmm1 \n\t"\
+ "paddw %%xmm3, %%xmm2 \n\t"\
+ "psubw %%xmm1, %%xmm0 \n\t"\
+ "psraw $2, %%xmm0 \n\t"\
+ "psubw %%xmm1, %%xmm0 \n\t"\
+ "paddw %%xmm2, %%xmm0 \n\t"\
+ "psraw $2, %%xmm0 \n\t"\
+ "paddw %%xmm2, %%xmm0 \n\t"\
+ "psraw $6, %%xmm0 \n\t"\
+ "packuswb %%xmm0, %%xmm0 \n\t"\
+ OP(%%xmm0, (%1), %%xmm7, q)\
+ "add $48, %0 \n\t"\
+ "add %3, %1 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ : "+a"(tmp), "+c"(dst), "+g"(h)\
+ : "S"((x86_reg)dstStride)\
+ : "memory"\
+ );\
+ }\
+}
+
+#define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\
+static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
+ put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\
+ OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
+}\
+static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+ OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 8);\
+}\
+static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+ OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 16);\
+}\
+
+#define put_pixels8_l2_sse2 put_pixels8_l2_mmx2
+#define avg_pixels8_l2_sse2 avg_pixels8_l2_mmx2
+#define put_pixels16_l2_sse2 put_pixels16_l2_mmx2
+#define avg_pixels16_l2_sse2 avg_pixels16_l2_mmx2
+#define put_pixels8_l2_ssse3 put_pixels8_l2_mmx2
+#define avg_pixels8_l2_ssse3 avg_pixels8_l2_mmx2
+#define put_pixels16_l2_ssse3 put_pixels16_l2_mmx2
+#define avg_pixels16_l2_ssse3 avg_pixels16_l2_mmx2
+
+#define put_pixels8_l2_shift5_sse2 put_pixels8_l2_shift5_mmx2
+#define avg_pixels8_l2_shift5_sse2 avg_pixels8_l2_shift5_mmx2
+#define put_pixels16_l2_shift5_sse2 put_pixels16_l2_shift5_mmx2
+#define avg_pixels16_l2_shift5_sse2 avg_pixels16_l2_shift5_mmx2
+#define put_pixels8_l2_shift5_ssse3 put_pixels8_l2_shift5_mmx2
+#define avg_pixels8_l2_shift5_ssse3 avg_pixels8_l2_shift5_mmx2
+#define put_pixels16_l2_shift5_ssse3 put_pixels16_l2_shift5_mmx2
+#define avg_pixels16_l2_shift5_ssse3 avg_pixels16_l2_shift5_mmx2
+
+#define put_h264_qpel8_h_lowpass_l2_sse2 put_h264_qpel8_h_lowpass_l2_mmx2
+#define avg_h264_qpel8_h_lowpass_l2_sse2 avg_h264_qpel8_h_lowpass_l2_mmx2
+#define put_h264_qpel16_h_lowpass_l2_sse2 put_h264_qpel16_h_lowpass_l2_mmx2
+#define avg_h264_qpel16_h_lowpass_l2_sse2 avg_h264_qpel16_h_lowpass_l2_mmx2
+
+#define put_h264_qpel8_v_lowpass_ssse3 put_h264_qpel8_v_lowpass_sse2
+#define avg_h264_qpel8_v_lowpass_ssse3 avg_h264_qpel8_v_lowpass_sse2
+#define put_h264_qpel16_v_lowpass_ssse3 put_h264_qpel16_v_lowpass_sse2
+#define avg_h264_qpel16_v_lowpass_ssse3 avg_h264_qpel16_v_lowpass_sse2
+
+#define put_h264_qpel8or16_hv2_lowpass_sse2 put_h264_qpel8or16_hv2_lowpass_mmx2
+#define avg_h264_qpel8or16_hv2_lowpass_sse2 avg_h264_qpel8or16_hv2_lowpass_mmx2
+
+#define H264_MC(OPNAME, SIZE, MMX, ALIGN) \
+H264_MC_C(OPNAME, SIZE, MMX, ALIGN)\
+H264_MC_V(OPNAME, SIZE, MMX, ALIGN)\
+H264_MC_H(OPNAME, SIZE, MMX, ALIGN)\
+H264_MC_HV(OPNAME, SIZE, MMX, ALIGN)\
+
+static void put_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
+ put_pixels16_sse2(dst, src, stride, 16);
+}
+static void avg_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
+ avg_pixels16_sse2(dst, src, stride, 16);
+}
+#define put_h264_qpel8_mc00_sse2 put_h264_qpel8_mc00_mmx2
+#define avg_h264_qpel8_mc00_sse2 avg_h264_qpel8_mc00_mmx2
+
+#define H264_MC_C(OPNAME, SIZE, MMX, ALIGN) \
+static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels ## SIZE ## _ ## MMX(dst, src, stride, SIZE);\
+}\
+
+#define H264_MC_H(OPNAME, SIZE, MMX, ALIGN) \
+static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
+}\
+
+#define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \
+static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\
+}\
+
+#define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \
+static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED(ALIGN, uint16_t, temp)[SIZE*(SIZE<8?12:24)];\
+ OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
+ uint8_t * const halfHV= temp;\
+ int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
+ assert(((int)temp & 7) == 0);\
+ put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
+ uint8_t * const halfHV= temp;\
+ int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
+ assert(((int)temp & 7) == 0);\
+ put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
+ uint8_t * const halfHV= temp;\
+ int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
+ assert(((int)temp & 7) == 0);\
+ put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
+ uint8_t * const halfHV= temp;\
+ int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
+ assert(((int)temp & 7) == 0);\
+ put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
+}\
+
+#define H264_MC_4816(MMX)\
+H264_MC(put_, 4, MMX, 8)\
+H264_MC(put_, 8, MMX, 8)\
+H264_MC(put_, 16,MMX, 8)\
+H264_MC(avg_, 4, MMX, 8)\
+H264_MC(avg_, 8, MMX, 8)\
+H264_MC(avg_, 16,MMX, 8)\
+
+#define H264_MC_816(QPEL, XMM)\
+QPEL(put_, 8, XMM, 16)\
+QPEL(put_, 16,XMM, 16)\
+QPEL(avg_, 8, XMM, 16)\
+QPEL(avg_, 16,XMM, 16)\
+
+
+#define AVG_3DNOW_OP(a,b,temp, size) \
+"mov" #size " " #b ", " #temp " \n\t"\
+"pavgusb " #temp ", " #a " \n\t"\
+"mov" #size " " #a ", " #b " \n\t"
+#define AVG_MMX2_OP(a,b,temp, size) \
+"mov" #size " " #b ", " #temp " \n\t"\
+"pavgb " #temp ", " #a " \n\t"\
+"mov" #size " " #a ", " #b " \n\t"
+
+#define PAVGB "pavgusb"
+QPEL_H264(put_, PUT_OP, 3dnow)
+QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
+#undef PAVGB
+#define PAVGB "pavgb"
+QPEL_H264(put_, PUT_OP, mmx2)
+QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
+QPEL_H264_V_XMM(put_, PUT_OP, sse2)
+QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2)
+QPEL_H264_HV_XMM(put_, PUT_OP, sse2)
+QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2)
+#if HAVE_SSSE3
+QPEL_H264_H_XMM(put_, PUT_OP, ssse3)
+QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3)
+QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3)
+QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3)
+QPEL_H264_HV_XMM(put_, PUT_OP, ssse3)
+QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3)
+#endif
+#undef PAVGB
+
+H264_MC_4816(3dnow)
+H264_MC_4816(mmx2)
+H264_MC_816(H264_MC_V, sse2)
+H264_MC_816(H264_MC_HV, sse2)
+#if HAVE_SSSE3
+H264_MC_816(H264_MC_H, ssse3)
+H264_MC_816(H264_MC_HV, ssse3)
+#endif
+
+/* rnd interleaved with rnd div 8, use p+1 to access rnd div 8 */
+DECLARE_ALIGNED_8(static const uint64_t, h264_rnd_reg)[4] = {
+ 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL
+};
+
+#define H264_CHROMA_OP(S,D)
+#define H264_CHROMA_OP4(S,D,T)
+#define H264_CHROMA_MC8_TMPL put_h264_chroma_generic_mc8_mmx
+#define H264_CHROMA_MC4_TMPL put_h264_chroma_generic_mc4_mmx
+#define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2
+#define H264_CHROMA_MC8_MV0 put_pixels8_mmx
+#include "dsputil_h264_template_mmx.c"
+
+static void put_h264_chroma_mc8_mmx_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg);
+}
+static void put_vc1_chroma_mc8_mmx_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg+2);
+}
+static void put_h264_chroma_mc4_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ put_h264_chroma_generic_mc4_mmx(dst, src, stride, h, x, y, h264_rnd_reg);
+}
+
+#undef H264_CHROMA_OP
+#undef H264_CHROMA_OP4
+#undef H264_CHROMA_MC8_TMPL
+#undef H264_CHROMA_MC4_TMPL
+#undef H264_CHROMA_MC2_TMPL
+#undef H264_CHROMA_MC8_MV0
+
+#define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
+#define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
+ "pavgb " #T ", " #D " \n\t"
+#define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_mmx2
+#define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_mmx2
+#define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2
+#define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
+#include "dsputil_h264_template_mmx.c"
+static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg);
+}
+static void avg_vc1_chroma_mc8_mmx2_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg+2);
+}
+static void avg_h264_chroma_mc4_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ avg_h264_chroma_generic_mc4_mmx2(dst, src, stride, h, x, y, h264_rnd_reg);
+}
+#undef H264_CHROMA_OP
+#undef H264_CHROMA_OP4
+#undef H264_CHROMA_MC8_TMPL
+#undef H264_CHROMA_MC4_TMPL
+#undef H264_CHROMA_MC2_TMPL
+#undef H264_CHROMA_MC8_MV0
+
+#define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
+#define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
+ "pavgusb " #T ", " #D " \n\t"
+#define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_3dnow
+#define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_3dnow
+#define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
+#include "dsputil_h264_template_mmx.c"
+static void avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ avg_h264_chroma_generic_mc8_3dnow(dst, src, stride, h, x, y, h264_rnd_reg);
+}
+static void avg_h264_chroma_mc4_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ avg_h264_chroma_generic_mc4_3dnow(dst, src, stride, h, x, y, h264_rnd_reg);
+}
+#undef H264_CHROMA_OP
+#undef H264_CHROMA_OP4
+#undef H264_CHROMA_MC8_TMPL
+#undef H264_CHROMA_MC4_TMPL
+#undef H264_CHROMA_MC8_MV0
+
+#if HAVE_SSSE3
+#define AVG_OP(X)
+#undef H264_CHROMA_MC8_TMPL
+#undef H264_CHROMA_MC4_TMPL
+#define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3
+#define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3
+#define H264_CHROMA_MC8_MV0 put_pixels8_mmx
+#include "dsputil_h264_template_ssse3.c"
+static void put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
+}
+static void put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0);
+}
+
+#undef AVG_OP
+#undef H264_CHROMA_MC8_TMPL
+#undef H264_CHROMA_MC4_TMPL
+#undef H264_CHROMA_MC8_MV0
+#define AVG_OP(X) X
+#define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_ssse3
+#define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_ssse3
+#define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
+#include "dsputil_h264_template_ssse3.c"
+static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
+}
+static void avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0);
+}
+#undef AVG_OP
+#undef H264_CHROMA_MC8_TMPL
+#undef H264_CHROMA_MC4_TMPL
+#undef H264_CHROMA_MC8_MV0
+#endif
+
+/***********************************/
+/* weighted prediction */
+
+static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
+{
+ int x, y;
+ offset <<= log2_denom;
+ offset += (1 << log2_denom) >> 1;
+ __asm__ volatile(
+ "movd %0, %%mm4 \n\t"
+ "movd %1, %%mm5 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "pshufw $0, %%mm4, %%mm4 \n\t"
+ "pshufw $0, %%mm5, %%mm5 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ :: "g"(weight), "g"(offset), "g"(log2_denom)
+ );
+ for(y=0; y<h; y+=2){
+ for(x=0; x<w; x+=4){
+ __asm__ volatile(
+ "movd %0, %%mm0 \n\t"
+ "movd %1, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "pmullw %%mm4, %%mm0 \n\t"
+ "pmullw %%mm4, %%mm1 \n\t"
+ "paddsw %%mm5, %%mm0 \n\t"
+ "paddsw %%mm5, %%mm1 \n\t"
+ "psraw %%mm6, %%mm0 \n\t"
+ "psraw %%mm6, %%mm1 \n\t"
+ "packuswb %%mm7, %%mm0 \n\t"
+ "packuswb %%mm7, %%mm1 \n\t"
+ "movd %%mm0, %0 \n\t"
+ "movd %%mm1, %1 \n\t"
+ : "+m"(*(uint32_t*)(dst+x)),
+ "+m"(*(uint32_t*)(dst+x+stride))
+ );
+ }
+ dst += 2*stride;
+ }
+}
+
+static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h)
+{
+ int x, y;
+ offset = ((offset + 1) | 1) << log2_denom;
+ __asm__ volatile(
+ "movd %0, %%mm3 \n\t"
+ "movd %1, %%mm4 \n\t"
+ "movd %2, %%mm5 \n\t"
+ "movd %3, %%mm6 \n\t"
+ "pshufw $0, %%mm3, %%mm3 \n\t"
+ "pshufw $0, %%mm4, %%mm4 \n\t"
+ "pshufw $0, %%mm5, %%mm5 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
+ );
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x+=4){
+ __asm__ volatile(
+ "movd %0, %%mm0 \n\t"
+ "movd %1, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "pmullw %%mm3, %%mm0 \n\t"
+ "pmullw %%mm4, %%mm1 \n\t"
+ "paddsw %%mm1, %%mm0 \n\t"
+ "paddsw %%mm5, %%mm0 \n\t"
+ "psraw %%mm6, %%mm0 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "movd %%mm0, %0 \n\t"
+ : "+m"(*(uint32_t*)(dst+x))
+ : "m"(*(uint32_t*)(src+x))
+ );
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+#define H264_WEIGHT(W,H) \
+static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
+ ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
+} \
+static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
+ ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
+}
+
+H264_WEIGHT(16,16)
+H264_WEIGHT(16, 8)
+H264_WEIGHT( 8,16)
+H264_WEIGHT( 8, 8)
+H264_WEIGHT( 8, 4)
+H264_WEIGHT( 4, 8)
+H264_WEIGHT( 4, 4)
+H264_WEIGHT( 4, 2)
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_mmx.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_mmx.c
new file mode 100644
index 000000000..a39f54b3f
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_mmx.c
@@ -0,0 +1,605 @@
+/*
+ * idct_mmx.c
+ * Copyright (C) 1999-2001 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with mpeg2dec; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/common.h"
+#include "libavcodec/dsputil.h"
+
+#include "mmx.h"
+
+#define ATTR_ALIGN(align) __attribute__ ((__aligned__ (align)))
+
+#define ROW_SHIFT 11
+#define COL_SHIFT 6
+
+#define round(bias) ((int)(((bias)+0.5) * (1<<ROW_SHIFT)))
+#define rounder(bias) {round (bias), round (bias)}
+
+
+#if 0
+/* C row IDCT - it is just here to document the MMXEXT and MMX versions */
+static inline void idct_row (int16_t * row, int offset,
+ int16_t * table, int32_t * rounder)
+{
+ int C1, C2, C3, C4, C5, C6, C7;
+ int a0, a1, a2, a3, b0, b1, b2, b3;
+
+ row += offset;
+
+ C1 = table[1];
+ C2 = table[2];
+ C3 = table[3];
+ C4 = table[4];
+ C5 = table[5];
+ C6 = table[6];
+ C7 = table[7];
+
+ a0 = C4*row[0] + C2*row[2] + C4*row[4] + C6*row[6] + *rounder;
+ a1 = C4*row[0] + C6*row[2] - C4*row[4] - C2*row[6] + *rounder;
+ a2 = C4*row[0] - C6*row[2] - C4*row[4] + C2*row[6] + *rounder;
+ a3 = C4*row[0] - C2*row[2] + C4*row[4] - C6*row[6] + *rounder;
+
+ b0 = C1*row[1] + C3*row[3] + C5*row[5] + C7*row[7];
+ b1 = C3*row[1] - C7*row[3] - C1*row[5] - C5*row[7];
+ b2 = C5*row[1] - C1*row[3] + C7*row[5] + C3*row[7];
+ b3 = C7*row[1] - C5*row[3] + C3*row[5] - C1*row[7];
+
+ row[0] = (a0 + b0) >> ROW_SHIFT;
+ row[1] = (a1 + b1) >> ROW_SHIFT;
+ row[2] = (a2 + b2) >> ROW_SHIFT;
+ row[3] = (a3 + b3) >> ROW_SHIFT;
+ row[4] = (a3 - b3) >> ROW_SHIFT;
+ row[5] = (a2 - b2) >> ROW_SHIFT;
+ row[6] = (a1 - b1) >> ROW_SHIFT;
+ row[7] = (a0 - b0) >> ROW_SHIFT;
+}
+#endif
+
+
+/* MMXEXT row IDCT */
+
+#define mmxext_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, -c4, -c2, \
+ c4, c6, c4, c6, \
+ c1, c3, -c1, -c5, \
+ c5, c7, c3, -c7, \
+ c4, -c6, c4, -c6, \
+ -c4, c2, c4, -c2, \
+ c5, -c1, c3, -c1, \
+ c7, c3, c7, -c5 }
+
+static inline void mmxext_row_head (int16_t * const row, const int offset,
+ const int16_t * const table)
+{
+ movq_m2r (*(row+offset), mm2); /* mm2 = x6 x4 x2 x0 */
+
+ movq_m2r (*(row+offset+4), mm5); /* mm5 = x7 x5 x3 x1 */
+ movq_r2r (mm2, mm0); /* mm0 = x6 x4 x2 x0 */
+
+ movq_m2r (*table, mm3); /* mm3 = -C2 -C4 C2 C4 */
+ movq_r2r (mm5, mm6); /* mm6 = x7 x5 x3 x1 */
+
+ movq_m2r (*(table+4), mm4); /* mm4 = C6 C4 C6 C4 */
+ pmaddwd_r2r (mm0, mm3); /* mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 */
+
+ pshufw_r2r (mm2, mm2, 0x4e); /* mm2 = x2 x0 x6 x4 */
+}
+
+static inline void mmxext_row (const int16_t * const table,
+ const int32_t * const rounder)
+{
+ movq_m2r (*(table+8), mm1); /* mm1 = -C5 -C1 C3 C1 */
+ pmaddwd_r2r (mm2, mm4); /* mm4 = C4*x0+C6*x2 C4*x4+C6*x6 */
+
+ pmaddwd_m2r (*(table+16), mm0); /* mm0 = C4*x4-C6*x6 C4*x0-C6*x2 */
+ pshufw_r2r (mm6, mm6, 0x4e); /* mm6 = x3 x1 x7 x5 */
+
+ movq_m2r (*(table+12), mm7); /* mm7 = -C7 C3 C7 C5 */
+ pmaddwd_r2r (mm5, mm1); /* mm1 = -C1*x5-C5*x7 C1*x1+C3*x3 */
+
+ paddd_m2r (*rounder, mm3); /* mm3 += rounder */
+ pmaddwd_r2r (mm6, mm7); /* mm7 = C3*x1-C7*x3 C5*x5+C7*x7 */
+
+ pmaddwd_m2r (*(table+20), mm2); /* mm2 = C4*x0-C2*x2 -C4*x4+C2*x6 */
+ paddd_r2r (mm4, mm3); /* mm3 = a1 a0 + rounder */
+
+ pmaddwd_m2r (*(table+24), mm5); /* mm5 = C3*x5-C1*x7 C5*x1-C1*x3 */
+ movq_r2r (mm3, mm4); /* mm4 = a1 a0 + rounder */
+
+ pmaddwd_m2r (*(table+28), mm6); /* mm6 = C7*x1-C5*x3 C7*x5+C3*x7 */
+ paddd_r2r (mm7, mm1); /* mm1 = b1 b0 */
+
+ paddd_m2r (*rounder, mm0); /* mm0 += rounder */
+ psubd_r2r (mm1, mm3); /* mm3 = a1-b1 a0-b0 + rounder */
+
+ psrad_i2r (ROW_SHIFT, mm3); /* mm3 = y6 y7 */
+ paddd_r2r (mm4, mm1); /* mm1 = a1+b1 a0+b0 + rounder */
+
+ paddd_r2r (mm2, mm0); /* mm0 = a3 a2 + rounder */
+ psrad_i2r (ROW_SHIFT, mm1); /* mm1 = y1 y0 */
+
+ paddd_r2r (mm6, mm5); /* mm5 = b3 b2 */
+ movq_r2r (mm0, mm4); /* mm4 = a3 a2 + rounder */
+
+ paddd_r2r (mm5, mm0); /* mm0 = a3+b3 a2+b2 + rounder */
+ psubd_r2r (mm5, mm4); /* mm4 = a3-b3 a2-b2 + rounder */
+}
+
+static inline void mmxext_row_tail (int16_t * const row, const int store)
+{
+ psrad_i2r (ROW_SHIFT, mm0); /* mm0 = y3 y2 */
+
+ psrad_i2r (ROW_SHIFT, mm4); /* mm4 = y4 y5 */
+
+ packssdw_r2r (mm0, mm1); /* mm1 = y3 y2 y1 y0 */
+
+ packssdw_r2r (mm3, mm4); /* mm4 = y6 y7 y4 y5 */
+
+ movq_r2m (mm1, *(row+store)); /* save y3 y2 y1 y0 */
+ pshufw_r2r (mm4, mm4, 0xb1); /* mm4 = y7 y6 y5 y4 */
+
+ /* slot */
+
+ movq_r2m (mm4, *(row+store+4)); /* save y7 y6 y5 y4 */
+}
+
+static inline void mmxext_row_mid (int16_t * const row, const int store,
+ const int offset,
+ const int16_t * const table)
+{
+ movq_m2r (*(row+offset), mm2); /* mm2 = x6 x4 x2 x0 */
+ psrad_i2r (ROW_SHIFT, mm0); /* mm0 = y3 y2 */
+
+ movq_m2r (*(row+offset+4), mm5); /* mm5 = x7 x5 x3 x1 */
+ psrad_i2r (ROW_SHIFT, mm4); /* mm4 = y4 y5 */
+
+ packssdw_r2r (mm0, mm1); /* mm1 = y3 y2 y1 y0 */
+ movq_r2r (mm5, mm6); /* mm6 = x7 x5 x3 x1 */
+
+ packssdw_r2r (mm3, mm4); /* mm4 = y6 y7 y4 y5 */
+ movq_r2r (mm2, mm0); /* mm0 = x6 x4 x2 x0 */
+
+ movq_r2m (mm1, *(row+store)); /* save y3 y2 y1 y0 */
+ pshufw_r2r (mm4, mm4, 0xb1); /* mm4 = y7 y6 y5 y4 */
+
+ movq_m2r (*table, mm3); /* mm3 = -C2 -C4 C2 C4 */
+ movq_r2m (mm4, *(row+store+4)); /* save y7 y6 y5 y4 */
+
+ pmaddwd_r2r (mm0, mm3); /* mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 */
+
+ movq_m2r (*(table+4), mm4); /* mm4 = C6 C4 C6 C4 */
+ pshufw_r2r (mm2, mm2, 0x4e); /* mm2 = x2 x0 x6 x4 */
+}
+
+
+/* MMX row IDCT */
+
+#define mmx_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, c4, c6, \
+ c4, c6, -c4, -c2, \
+ c1, c3, c3, -c7, \
+ c5, c7, -c1, -c5, \
+ c4, -c6, c4, -c2, \
+ -c4, c2, c4, -c6, \
+ c5, -c1, c7, -c5, \
+ c7, c3, c3, -c1 }
+
+static inline void mmx_row_head (int16_t * const row, const int offset,
+ const int16_t * const table)
+{
+ movq_m2r (*(row+offset), mm2); /* mm2 = x6 x4 x2 x0 */
+
+ movq_m2r (*(row+offset+4), mm5); /* mm5 = x7 x5 x3 x1 */
+ movq_r2r (mm2, mm0); /* mm0 = x6 x4 x2 x0 */
+
+ movq_m2r (*table, mm3); /* mm3 = C6 C4 C2 C4 */
+ movq_r2r (mm5, mm6); /* mm6 = x7 x5 x3 x1 */
+
+ punpckldq_r2r (mm0, mm0); /* mm0 = x2 x0 x2 x0 */
+
+ movq_m2r (*(table+4), mm4); /* mm4 = -C2 -C4 C6 C4 */
+ pmaddwd_r2r (mm0, mm3); /* mm3 = C4*x0+C6*x2 C4*x0+C2*x2 */
+
+ movq_m2r (*(table+8), mm1); /* mm1 = -C7 C3 C3 C1 */
+ punpckhdq_r2r (mm2, mm2); /* mm2 = x6 x4 x6 x4 */
+}
+
+static inline void mmx_row (const int16_t * const table,
+ const int32_t * const rounder)
+{
+ pmaddwd_r2r (mm2, mm4); /* mm4 = -C4*x4-C2*x6 C4*x4+C6*x6 */
+ punpckldq_r2r (mm5, mm5); /* mm5 = x3 x1 x3 x1 */
+
+ pmaddwd_m2r (*(table+16), mm0); /* mm0 = C4*x0-C2*x2 C4*x0-C6*x2 */
+ punpckhdq_r2r (mm6, mm6); /* mm6 = x7 x5 x7 x5 */
+
+ movq_m2r (*(table+12), mm7); /* mm7 = -C5 -C1 C7 C5 */
+ pmaddwd_r2r (mm5, mm1); /* mm1 = C3*x1-C7*x3 C1*x1+C3*x3 */
+
+ paddd_m2r (*rounder, mm3); /* mm3 += rounder */
+ pmaddwd_r2r (mm6, mm7); /* mm7 = -C1*x5-C5*x7 C5*x5+C7*x7 */
+
+ pmaddwd_m2r (*(table+20), mm2); /* mm2 = C4*x4-C6*x6 -C4*x4+C2*x6 */
+ paddd_r2r (mm4, mm3); /* mm3 = a1 a0 + rounder */
+
+ pmaddwd_m2r (*(table+24), mm5); /* mm5 = C7*x1-C5*x3 C5*x1-C1*x3 */
+ movq_r2r (mm3, mm4); /* mm4 = a1 a0 + rounder */
+
+ pmaddwd_m2r (*(table+28), mm6); /* mm6 = C3*x5-C1*x7 C7*x5+C3*x7 */
+ paddd_r2r (mm7, mm1); /* mm1 = b1 b0 */
+
+ paddd_m2r (*rounder, mm0); /* mm0 += rounder */
+ psubd_r2r (mm1, mm3); /* mm3 = a1-b1 a0-b0 + rounder */
+
+ psrad_i2r (ROW_SHIFT, mm3); /* mm3 = y6 y7 */
+ paddd_r2r (mm4, mm1); /* mm1 = a1+b1 a0+b0 + rounder */
+
+ paddd_r2r (mm2, mm0); /* mm0 = a3 a2 + rounder */
+ psrad_i2r (ROW_SHIFT, mm1); /* mm1 = y1 y0 */
+
+ paddd_r2r (mm6, mm5); /* mm5 = b3 b2 */
+ movq_r2r (mm0, mm7); /* mm7 = a3 a2 + rounder */
+
+ paddd_r2r (mm5, mm0); /* mm0 = a3+b3 a2+b2 + rounder */
+ psubd_r2r (mm5, mm7); /* mm7 = a3-b3 a2-b2 + rounder */
+}
+
+static inline void mmx_row_tail (int16_t * const row, const int store)
+{
+ psrad_i2r (ROW_SHIFT, mm0); /* mm0 = y3 y2 */
+
+ psrad_i2r (ROW_SHIFT, mm7); /* mm7 = y4 y5 */
+
+ packssdw_r2r (mm0, mm1); /* mm1 = y3 y2 y1 y0 */
+
+ packssdw_r2r (mm3, mm7); /* mm7 = y6 y7 y4 y5 */
+
+ movq_r2m (mm1, *(row+store)); /* save y3 y2 y1 y0 */
+ movq_r2r (mm7, mm4); /* mm4 = y6 y7 y4 y5 */
+
+ pslld_i2r (16, mm7); /* mm7 = y7 0 y5 0 */
+
+ psrld_i2r (16, mm4); /* mm4 = 0 y6 0 y4 */
+
+ por_r2r (mm4, mm7); /* mm7 = y7 y6 y5 y4 */
+
+ /* slot */
+
+ movq_r2m (mm7, *(row+store+4)); /* save y7 y6 y5 y4 */
+}
+
+static inline void mmx_row_mid (int16_t * const row, const int store,
+ const int offset, const int16_t * const table)
+{
+ movq_m2r (*(row+offset), mm2); /* mm2 = x6 x4 x2 x0 */
+ psrad_i2r (ROW_SHIFT, mm0); /* mm0 = y3 y2 */
+
+ movq_m2r (*(row+offset+4), mm5); /* mm5 = x7 x5 x3 x1 */
+ psrad_i2r (ROW_SHIFT, mm7); /* mm7 = y4 y5 */
+
+ packssdw_r2r (mm0, mm1); /* mm1 = y3 y2 y1 y0 */
+ movq_r2r (mm5, mm6); /* mm6 = x7 x5 x3 x1 */
+
+ packssdw_r2r (mm3, mm7); /* mm7 = y6 y7 y4 y5 */
+ movq_r2r (mm2, mm0); /* mm0 = x6 x4 x2 x0 */
+
+ movq_r2m (mm1, *(row+store)); /* save y3 y2 y1 y0 */
+ movq_r2r (mm7, mm1); /* mm1 = y6 y7 y4 y5 */
+
+ punpckldq_r2r (mm0, mm0); /* mm0 = x2 x0 x2 x0 */
+ psrld_i2r (16, mm7); /* mm7 = 0 y6 0 y4 */
+
+ movq_m2r (*table, mm3); /* mm3 = C6 C4 C2 C4 */
+ pslld_i2r (16, mm1); /* mm1 = y7 0 y5 0 */
+
+ movq_m2r (*(table+4), mm4); /* mm4 = -C2 -C4 C6 C4 */
+ por_r2r (mm1, mm7); /* mm7 = y7 y6 y5 y4 */
+
+ movq_m2r (*(table+8), mm1); /* mm1 = -C7 C3 C3 C1 */
+ punpckhdq_r2r (mm2, mm2); /* mm2 = x6 x4 x6 x4 */
+
+ movq_r2m (mm7, *(row+store+4)); /* save y7 y6 y5 y4 */
+ pmaddwd_r2r (mm0, mm3); /* mm3 = C4*x0+C6*x2 C4*x0+C2*x2 */
+}
+
+
+#if 0
+/* C column IDCT - it is just here to document the MMXEXT and MMX versions */
+static inline void idct_col (int16_t * col, int offset)
+{
+/* multiplication - as implemented on mmx */
+#define F(c,x) (((c) * (x)) >> 16)
+
+/* saturation - it helps us handle torture test cases */
+#define S(x) (((x)>32767) ? 32767 : ((x)<-32768) ? -32768 : (x))
+
+ int16_t x0, x1, x2, x3, x4, x5, x6, x7;
+ int16_t y0, y1, y2, y3, y4, y5, y6, y7;
+ int16_t a0, a1, a2, a3, b0, b1, b2, b3;
+ int16_t u04, v04, u26, v26, u17, v17, u35, v35, u12, v12;
+
+ col += offset;
+
+ x0 = col[0*8];
+ x1 = col[1*8];
+ x2 = col[2*8];
+ x3 = col[3*8];
+ x4 = col[4*8];
+ x5 = col[5*8];
+ x6 = col[6*8];
+ x7 = col[7*8];
+
+ u04 = S (x0 + x4);
+ v04 = S (x0 - x4);
+ u26 = S (F (T2, x6) + x2);
+ v26 = S (F (T2, x2) - x6);
+
+ a0 = S (u04 + u26);
+ a1 = S (v04 + v26);
+ a2 = S (v04 - v26);
+ a3 = S (u04 - u26);
+
+ u17 = S (F (T1, x7) + x1);
+ v17 = S (F (T1, x1) - x7);
+ u35 = S (F (T3, x5) + x3);
+ v35 = S (F (T3, x3) - x5);
+
+ b0 = S (u17 + u35);
+ b3 = S (v17 - v35);
+ u12 = S (u17 - u35);
+ v12 = S (v17 + v35);
+ u12 = S (2 * F (C4, u12));
+ v12 = S (2 * F (C4, v12));
+ b1 = S (u12 + v12);
+ b2 = S (u12 - v12);
+
+ y0 = S (a0 + b0) >> COL_SHIFT;
+ y1 = S (a1 + b1) >> COL_SHIFT;
+ y2 = S (a2 + b2) >> COL_SHIFT;
+ y3 = S (a3 + b3) >> COL_SHIFT;
+
+ y4 = S (a3 - b3) >> COL_SHIFT;
+ y5 = S (a2 - b2) >> COL_SHIFT;
+ y6 = S (a1 - b1) >> COL_SHIFT;
+ y7 = S (a0 - b0) >> COL_SHIFT;
+
+ col[0*8] = y0;
+ col[1*8] = y1;
+ col[2*8] = y2;
+ col[3*8] = y3;
+ col[4*8] = y4;
+ col[5*8] = y5;
+ col[6*8] = y6;
+ col[7*8] = y7;
+}
+#endif
+
+
+/* MMX column IDCT */
+static inline void idct_col (int16_t * const col, const int offset)
+{
+#define T1 13036
+#define T2 27146
+#define T3 43790
+#define C4 23170
+
+ static const short t1_vector[] ATTR_ALIGN(8) = {T1,T1,T1,T1};
+ static const short t2_vector[] ATTR_ALIGN(8) = {T2,T2,T2,T2};
+ static const short t3_vector[] ATTR_ALIGN(8) = {T3,T3,T3,T3};
+ static const short c4_vector[] ATTR_ALIGN(8) = {C4,C4,C4,C4};
+
+ /* column code adapted from Peter Gubanov */
+ /* http://www.elecard.com/peter/idct.shtml */
+
+ movq_m2r (*t1_vector, mm0); /* mm0 = T1 */
+
+ movq_m2r (*(col+offset+1*8), mm1); /* mm1 = x1 */
+ movq_r2r (mm0, mm2); /* mm2 = T1 */
+
+ movq_m2r (*(col+offset+7*8), mm4); /* mm4 = x7 */
+ pmulhw_r2r (mm1, mm0); /* mm0 = T1*x1 */
+
+ movq_m2r (*t3_vector, mm5); /* mm5 = T3 */
+ pmulhw_r2r (mm4, mm2); /* mm2 = T1*x7 */
+
+ movq_m2r (*(col+offset+5*8), mm6); /* mm6 = x5 */
+ movq_r2r (mm5, mm7); /* mm7 = T3-1 */
+
+ movq_m2r (*(col+offset+3*8), mm3); /* mm3 = x3 */
+ psubsw_r2r (mm4, mm0); /* mm0 = v17 */
+
+ movq_m2r (*t2_vector, mm4); /* mm4 = T2 */
+ pmulhw_r2r (mm3, mm5); /* mm5 = (T3-1)*x3 */
+
+ paddsw_r2r (mm2, mm1); /* mm1 = u17 */
+ pmulhw_r2r (mm6, mm7); /* mm7 = (T3-1)*x5 */
+
+ /* slot */
+
+ movq_r2r (mm4, mm2); /* mm2 = T2 */
+ paddsw_r2r (mm3, mm5); /* mm5 = T3*x3 */
+
+ pmulhw_m2r (*(col+offset+2*8), mm4);/* mm4 = T2*x2 */
+ paddsw_r2r (mm6, mm7); /* mm7 = T3*x5 */
+
+ psubsw_r2r (mm6, mm5); /* mm5 = v35 */
+ paddsw_r2r (mm3, mm7); /* mm7 = u35 */
+
+ movq_m2r (*(col+offset+6*8), mm3); /* mm3 = x6 */
+ movq_r2r (mm0, mm6); /* mm6 = v17 */
+
+ pmulhw_r2r (mm3, mm2); /* mm2 = T2*x6 */
+ psubsw_r2r (mm5, mm0); /* mm0 = b3 */
+
+ psubsw_r2r (mm3, mm4); /* mm4 = v26 */
+ paddsw_r2r (mm6, mm5); /* mm5 = v12 */
+
+ movq_r2m (mm0, *(col+offset+3*8)); /* save b3 in scratch0 */
+ movq_r2r (mm1, mm6); /* mm6 = u17 */
+
+ paddsw_m2r (*(col+offset+2*8), mm2);/* mm2 = u26 */
+ paddsw_r2r (mm7, mm6); /* mm6 = b0 */
+
+ psubsw_r2r (mm7, mm1); /* mm1 = u12 */
+ movq_r2r (mm1, mm7); /* mm7 = u12 */
+
+ movq_m2r (*(col+offset+0*8), mm3); /* mm3 = x0 */
+ paddsw_r2r (mm5, mm1); /* mm1 = u12+v12 */
+
+ movq_m2r (*c4_vector, mm0); /* mm0 = C4/2 */
+ psubsw_r2r (mm5, mm7); /* mm7 = u12-v12 */
+
+ movq_r2m (mm6, *(col+offset+5*8)); /* save b0 in scratch1 */
+ pmulhw_r2r (mm0, mm1); /* mm1 = b1/2 */
+
+ movq_r2r (mm4, mm6); /* mm6 = v26 */
+ pmulhw_r2r (mm0, mm7); /* mm7 = b2/2 */
+
+ movq_m2r (*(col+offset+4*8), mm5); /* mm5 = x4 */
+ movq_r2r (mm3, mm0); /* mm0 = x0 */
+
+ psubsw_r2r (mm5, mm3); /* mm3 = v04 */
+ paddsw_r2r (mm5, mm0); /* mm0 = u04 */
+
+ paddsw_r2r (mm3, mm4); /* mm4 = a1 */
+ movq_r2r (mm0, mm5); /* mm5 = u04 */
+
+ psubsw_r2r (mm6, mm3); /* mm3 = a2 */
+ paddsw_r2r (mm2, mm5); /* mm5 = a0 */
+
+ paddsw_r2r (mm1, mm1); /* mm1 = b1 */
+ psubsw_r2r (mm2, mm0); /* mm0 = a3 */
+
+ paddsw_r2r (mm7, mm7); /* mm7 = b2 */
+ movq_r2r (mm3, mm2); /* mm2 = a2 */
+
+ movq_r2r (mm4, mm6); /* mm6 = a1 */
+ paddsw_r2r (mm7, mm3); /* mm3 = a2+b2 */
+
+ psraw_i2r (COL_SHIFT, mm3); /* mm3 = y2 */
+ paddsw_r2r (mm1, mm4); /* mm4 = a1+b1 */
+
+ psraw_i2r (COL_SHIFT, mm4); /* mm4 = y1 */
+ psubsw_r2r (mm1, mm6); /* mm6 = a1-b1 */
+
+ movq_m2r (*(col+offset+5*8), mm1); /* mm1 = b0 */
+ psubsw_r2r (mm7, mm2); /* mm2 = a2-b2 */
+
+ psraw_i2r (COL_SHIFT, mm6); /* mm6 = y6 */
+ movq_r2r (mm5, mm7); /* mm7 = a0 */
+
+ movq_r2m (mm4, *(col+offset+1*8)); /* save y1 */
+ psraw_i2r (COL_SHIFT, mm2); /* mm2 = y5 */
+
+ movq_r2m (mm3, *(col+offset+2*8)); /* save y2 */
+ paddsw_r2r (mm1, mm5); /* mm5 = a0+b0 */
+
+ movq_m2r (*(col+offset+3*8), mm4); /* mm4 = b3 */
+ psubsw_r2r (mm1, mm7); /* mm7 = a0-b0 */
+
+ psraw_i2r (COL_SHIFT, mm5); /* mm5 = y0 */
+ movq_r2r (mm0, mm3); /* mm3 = a3 */
+
+ movq_r2m (mm2, *(col+offset+5*8)); /* save y5 */
+ psubsw_r2r (mm4, mm3); /* mm3 = a3-b3 */
+
+ psraw_i2r (COL_SHIFT, mm7); /* mm7 = y7 */
+ paddsw_r2r (mm0, mm4); /* mm4 = a3+b3 */
+
+ movq_r2m (mm5, *(col+offset+0*8)); /* save y0 */
+ psraw_i2r (COL_SHIFT, mm3); /* mm3 = y4 */
+
+ movq_r2m (mm6, *(col+offset+6*8)); /* save y6 */
+ psraw_i2r (COL_SHIFT, mm4); /* mm4 = y3 */
+
+ movq_r2m (mm7, *(col+offset+7*8)); /* save y7 */
+
+ movq_r2m (mm3, *(col+offset+4*8)); /* save y4 */
+
+ movq_r2m (mm4, *(col+offset+3*8)); /* save y3 */
+
+#undef T1
+#undef T2
+#undef T3
+#undef C4
+}
+
+
+static const int32_t rounder0[] ATTR_ALIGN(8) =
+ rounder ((1 << (COL_SHIFT - 1)) - 0.5);
+static const int32_t rounder4[] ATTR_ALIGN(8) = rounder (0);
+static const int32_t rounder1[] ATTR_ALIGN(8) =
+ rounder (1.25683487303); /* C1*(C1/C4+C1+C7)/2 */
+static const int32_t rounder7[] ATTR_ALIGN(8) =
+ rounder (-0.25); /* C1*(C7/C4+C7-C1)/2 */
+static const int32_t rounder2[] ATTR_ALIGN(8) =
+ rounder (0.60355339059); /* C2 * (C6+C2)/2 */
+static const int32_t rounder6[] ATTR_ALIGN(8) =
+ rounder (-0.25); /* C2 * (C6-C2)/2 */
+static const int32_t rounder3[] ATTR_ALIGN(8) =
+ rounder (0.087788325588); /* C3*(-C3/C4+C3+C5)/2 */
+static const int32_t rounder5[] ATTR_ALIGN(8) =
+ rounder (-0.441341716183); /* C3*(-C5/C4+C5-C3)/2 */
+
+#undef COL_SHIFT
+#undef ROW_SHIFT
+
+#define declare_idct(idct,table,idct_row_head,idct_row,idct_row_tail,idct_row_mid) \
+void idct (int16_t * const block) \
+{ \
+ static const int16_t table04[] ATTR_ALIGN(16) = \
+ table (22725, 21407, 19266, 16384, 12873, 8867, 4520); \
+ static const int16_t table17[] ATTR_ALIGN(16) = \
+ table (31521, 29692, 26722, 22725, 17855, 12299, 6270); \
+ static const int16_t table26[] ATTR_ALIGN(16) = \
+ table (29692, 27969, 25172, 21407, 16819, 11585, 5906); \
+ static const int16_t table35[] ATTR_ALIGN(16) = \
+ table (26722, 25172, 22654, 19266, 15137, 10426, 5315); \
+ \
+ idct_row_head (block, 0*8, table04); \
+ idct_row (table04, rounder0); \
+ idct_row_mid (block, 0*8, 4*8, table04); \
+ idct_row (table04, rounder4); \
+ idct_row_mid (block, 4*8, 1*8, table17); \
+ idct_row (table17, rounder1); \
+ idct_row_mid (block, 1*8, 7*8, table17); \
+ idct_row (table17, rounder7); \
+ idct_row_mid (block, 7*8, 2*8, table26); \
+ idct_row (table26, rounder2); \
+ idct_row_mid (block, 2*8, 6*8, table26); \
+ idct_row (table26, rounder6); \
+ idct_row_mid (block, 6*8, 3*8, table35); \
+ idct_row (table35, rounder3); \
+ idct_row_mid (block, 3*8, 5*8, table35); \
+ idct_row (table35, rounder5); \
+ idct_row_tail (block, 5*8); \
+ \
+ idct_col (block, 0); \
+ idct_col (block, 4); \
+}
+
+void ff_mmx_idct(DCTELEM *block);
+void ff_mmxext_idct(DCTELEM *block);
+
+declare_idct (ff_mmxext_idct, mmxext_table,
+ mmxext_row_head, mmxext_row, mmxext_row_tail, mmxext_row_mid)
+
+declare_idct (ff_mmx_idct, mmx_table,
+ mmx_row_head, mmx_row, mmx_row_tail, mmx_row_mid)
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_mmx_xvid.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_mmx_xvid.c
new file mode 100644
index 000000000..22cd50019
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_mmx_xvid.c
@@ -0,0 +1,525 @@
+/*
+ * XVID MPEG-4 VIDEO CODEC
+ * - MMX and XMM forward discrete cosine transform -
+ *
+ * Copyright(C) 2001 Peter Ross <pross@xvid.org>
+ *
+ * Originally provided by Intel at AP-922
+ * http://developer.intel.com/vtune/cbts/strmsimd/922down.htm
+ * (See more app notes at http://developer.intel.com/vtune/cbts/strmsimd/appnotes.htm)
+ * but in a limited edition.
+ * New macro implements a column part for precise iDCT
+ * The routine precision now satisfies IEEE standard 1180-1990.
+ *
+ * Copyright(C) 2000-2001 Peter Gubanov <peter@elecard.net.ru>
+ * Rounding trick Copyright(C) 2000 Michel Lespinasse <walken@zoy.org>
+ *
+ * http://www.elecard.com/peter/idct.html
+ * http://www.linuxvideo.org/mpeg2dec/
+ *
+ * These examples contain code fragments for first stage iDCT 8x8
+ * (for rows) and first stage DCT 8x8 (for columns)
+ *
+ * conversion to gcc syntax by Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <inttypes.h>
+#include "libavcodec/avcodec.h"
+
+//=============================================================================
+// Macros and other preprocessor constants
+//=============================================================================
+
+#define BITS_INV_ACC 5 // 4 or 5 for IEEE
+#define SHIFT_INV_ROW (16 - BITS_INV_ACC) //11
+#define SHIFT_INV_COL (1 + BITS_INV_ACC) //6
+#define RND_INV_ROW (1024 * (6 - BITS_INV_ACC))
+#define RND_INV_COL (16 * (BITS_INV_ACC - 3))
+#define RND_INV_CORR (RND_INV_COL - 1)
+
+#define BITS_FRW_ACC 3 // 2 or 3 for accuracy
+#define SHIFT_FRW_COL BITS_FRW_ACC
+#define SHIFT_FRW_ROW (BITS_FRW_ACC + 17)
+#define RND_FRW_ROW (262144*(BITS_FRW_ACC - 1))
+
+
+//-----------------------------------------------------------------------------
+// Various memory constants (trigonometric values or rounding values)
+//-----------------------------------------------------------------------------
+
+
+DECLARE_ALIGNED(8, static const int16_t, tg_1_16)[4*4] = {
+ 13036,13036,13036,13036, // tg * (2<<16) + 0.5
+ 27146,27146,27146,27146, // tg * (2<<16) + 0.5
+ -21746,-21746,-21746,-21746, // tg * (2<<16) + 0.5
+ 23170,23170,23170,23170}; // cos * (2<<15) + 0.5
+
+DECLARE_ALIGNED(8, static const int32_t, rounder_0)[2*8] = {
+ 65536,65536,
+ 3597,3597,
+ 2260,2260,
+ 1203,1203,
+ 0,0,
+ 120,120,
+ 512,512,
+ 512,512};
+
+//-----------------------------------------------------------------------------
+//
+// The first stage iDCT 8x8 - inverse DCTs of rows
+//
+//-----------------------------------------------------------------------------
+// The 8-point inverse DCT direct algorithm
+//-----------------------------------------------------------------------------
+//
+// static const short w[32] = {
+// FIX(cos_4_16), FIX(cos_2_16), FIX(cos_4_16), FIX(cos_6_16),
+// FIX(cos_4_16), FIX(cos_6_16), -FIX(cos_4_16), -FIX(cos_2_16),
+// FIX(cos_4_16), -FIX(cos_6_16), -FIX(cos_4_16), FIX(cos_2_16),
+// FIX(cos_4_16), -FIX(cos_2_16), FIX(cos_4_16), -FIX(cos_6_16),
+// FIX(cos_1_16), FIX(cos_3_16), FIX(cos_5_16), FIX(cos_7_16),
+// FIX(cos_3_16), -FIX(cos_7_16), -FIX(cos_1_16), -FIX(cos_5_16),
+// FIX(cos_5_16), -FIX(cos_1_16), FIX(cos_7_16), FIX(cos_3_16),
+// FIX(cos_7_16), -FIX(cos_5_16), FIX(cos_3_16), -FIX(cos_1_16) };
+//
+// #define DCT_8_INV_ROW(x, y)
+// {
+// int a0, a1, a2, a3, b0, b1, b2, b3;
+//
+// a0 =x[0]*w[0]+x[2]*w[1]+x[4]*w[2]+x[6]*w[3];
+// a1 =x[0]*w[4]+x[2]*w[5]+x[4]*w[6]+x[6]*w[7];
+// a2 = x[0] * w[ 8] + x[2] * w[ 9] + x[4] * w[10] + x[6] * w[11];
+// a3 = x[0] * w[12] + x[2] * w[13] + x[4] * w[14] + x[6] * w[15];
+// b0 = x[1] * w[16] + x[3] * w[17] + x[5] * w[18] + x[7] * w[19];
+// b1 = x[1] * w[20] + x[3] * w[21] + x[5] * w[22] + x[7] * w[23];
+// b2 = x[1] * w[24] + x[3] * w[25] + x[5] * w[26] + x[7] * w[27];
+// b3 = x[1] * w[28] + x[3] * w[29] + x[5] * w[30] + x[7] * w[31];
+//
+// y[0] = SHIFT_ROUND ( a0 + b0 );
+// y[1] = SHIFT_ROUND ( a1 + b1 );
+// y[2] = SHIFT_ROUND ( a2 + b2 );
+// y[3] = SHIFT_ROUND ( a3 + b3 );
+// y[4] = SHIFT_ROUND ( a3 - b3 );
+// y[5] = SHIFT_ROUND ( a2 - b2 );
+// y[6] = SHIFT_ROUND ( a1 - b1 );
+// y[7] = SHIFT_ROUND ( a0 - b0 );
+// }
+//
+//-----------------------------------------------------------------------------
+//
+// In this implementation the outputs of the iDCT-1D are multiplied
+// for rows 0,4 - by cos_4_16,
+// for rows 1,7 - by cos_1_16,
+// for rows 2,6 - by cos_2_16,
+// for rows 3,5 - by cos_3_16
+// and are shifted to the left for better accuracy
+//
+// For the constants used,
+// FIX(float_const) = (short) (float_const * (1<<15) + 0.5)
+//
+//-----------------------------------------------------------------------------
+
+//-----------------------------------------------------------------------------
+// Tables for mmx processors
+//-----------------------------------------------------------------------------
+
+// Table for rows 0,4 - constants are multiplied by cos_4_16
+DECLARE_ALIGNED(8, static const int16_t, tab_i_04_mmx)[32*4] = {
+ 16384,16384,16384,-16384, // movq-> w06 w04 w02 w00
+ 21407,8867,8867,-21407, // w07 w05 w03 w01
+ 16384,-16384,16384,16384, // w14 w12 w10 w08
+ -8867,21407,-21407,-8867, // w15 w13 w11 w09
+ 22725,12873,19266,-22725, // w22 w20 w18 w16
+ 19266,4520,-4520,-12873, // w23 w21 w19 w17
+ 12873,4520,4520,19266, // w30 w28 w26 w24
+ -22725,19266,-12873,-22725, // w31 w29 w27 w25
+// Table for rows 1,7 - constants are multiplied by cos_1_16
+ 22725,22725,22725,-22725, // movq-> w06 w04 w02 w00
+ 29692,12299,12299,-29692, // w07 w05 w03 w01
+ 22725,-22725,22725,22725, // w14 w12 w10 w08
+ -12299,29692,-29692,-12299, // w15 w13 w11 w09
+ 31521,17855,26722,-31521, // w22 w20 w18 w16
+ 26722,6270,-6270,-17855, // w23 w21 w19 w17
+ 17855,6270,6270,26722, // w30 w28 w26 w24
+ -31521,26722,-17855,-31521, // w31 w29 w27 w25
+// Table for rows 2,6 - constants are multiplied by cos_2_16
+ 21407,21407,21407,-21407, // movq-> w06 w04 w02 w00
+ 27969,11585,11585,-27969, // w07 w05 w03 w01
+ 21407,-21407,21407,21407, // w14 w12 w10 w08
+ -11585,27969,-27969,-11585, // w15 w13 w11 w09
+ 29692,16819,25172,-29692, // w22 w20 w18 w16
+ 25172,5906,-5906,-16819, // w23 w21 w19 w17
+ 16819,5906,5906,25172, // w30 w28 w26 w24
+ -29692,25172,-16819,-29692, // w31 w29 w27 w25
+// Table for rows 3,5 - constants are multiplied by cos_3_16
+ 19266,19266,19266,-19266, // movq-> w06 w04 w02 w00
+ 25172,10426,10426,-25172, // w07 w05 w03 w01
+ 19266,-19266,19266,19266, // w14 w12 w10 w08
+ -10426,25172,-25172,-10426, // w15 w13 w11 w09
+ 26722,15137,22654,-26722, // w22 w20 w18 w16
+ 22654,5315,-5315,-15137, // w23 w21 w19 w17
+ 15137,5315,5315,22654, // w30 w28 w26 w24
+ -26722,22654,-15137,-26722, // w31 w29 w27 w25
+};
+//-----------------------------------------------------------------------------
+// Tables for xmm processors
+//-----------------------------------------------------------------------------
+
+// %3 for rows 0,4 - constants are multiplied by cos_4_16
+DECLARE_ALIGNED(8, static const int16_t, tab_i_04_xmm)[32*4] = {
+ 16384,21407,16384,8867, // movq-> w05 w04 w01 w00
+ 16384,8867,-16384,-21407, // w07 w06 w03 w02
+ 16384,-8867,16384,-21407, // w13 w12 w09 w08
+ -16384,21407,16384,-8867, // w15 w14 w11 w10
+ 22725,19266,19266,-4520, // w21 w20 w17 w16
+ 12873,4520,-22725,-12873, // w23 w22 w19 w18
+ 12873,-22725,4520,-12873, // w29 w28 w25 w24
+ 4520,19266,19266,-22725, // w31 w30 w27 w26
+// %3 for rows 1,7 - constants are multiplied by cos_1_16
+ 22725,29692,22725,12299, // movq-> w05 w04 w01 w00
+ 22725,12299,-22725,-29692, // w07 w06 w03 w02
+ 22725,-12299,22725,-29692, // w13 w12 w09 w08
+ -22725,29692,22725,-12299, // w15 w14 w11 w10
+ 31521,26722,26722,-6270, // w21 w20 w17 w16
+ 17855,6270,-31521,-17855, // w23 w22 w19 w18
+ 17855,-31521,6270,-17855, // w29 w28 w25 w24
+ 6270,26722,26722,-31521, // w31 w30 w27 w26
+// %3 for rows 2,6 - constants are multiplied by cos_2_16
+ 21407,27969,21407,11585, // movq-> w05 w04 w01 w00
+ 21407,11585,-21407,-27969, // w07 w06 w03 w02
+ 21407,-11585,21407,-27969, // w13 w12 w09 w08
+ -21407,27969,21407,-11585, // w15 w14 w11 w10
+ 29692,25172,25172,-5906, // w21 w20 w17 w16
+ 16819,5906,-29692,-16819, // w23 w22 w19 w18
+ 16819,-29692,5906,-16819, // w29 w28 w25 w24
+ 5906,25172,25172,-29692, // w31 w30 w27 w26
+// %3 for rows 3,5 - constants are multiplied by cos_3_16
+ 19266,25172,19266,10426, // movq-> w05 w04 w01 w00
+ 19266,10426,-19266,-25172, // w07 w06 w03 w02
+ 19266,-10426,19266,-25172, // w13 w12 w09 w08
+ -19266,25172,19266,-10426, // w15 w14 w11 w10
+ 26722,22654,22654,-5315, // w21 w20 w17 w16
+ 15137,5315,-26722,-15137, // w23 w22 w19 w18
+ 15137,-26722,5315,-15137, // w29 w28 w25 w24
+ 5315,22654,22654,-26722, // w31 w30 w27 w26
+};
+//=============================================================================
+// Helper macros for the code
+//=============================================================================
+
+//-----------------------------------------------------------------------------
+// DCT_8_INV_ROW_MMX( INP, OUT, TABLE, ROUNDER
+//-----------------------------------------------------------------------------
+
+#define DCT_8_INV_ROW_MMX(A1,A2,A3,A4)\
+ "movq " #A1 ",%%mm0 \n\t"/* 0 ; x3 x2 x1 x0*/\
+ "movq 8+" #A1 ",%%mm1 \n\t"/* 1 ; x7 x6 x5 x4*/\
+ "movq %%mm0,%%mm2 \n\t"/* 2 ; x3 x2 x1 x0*/\
+ "movq " #A3 ",%%mm3 \n\t"/* 3 ; w06 w04 w02 w00*/\
+ "punpcklwd %%mm1,%%mm0 \n\t"/* x5 x1 x4 x0*/\
+ "movq %%mm0,%%mm5 \n\t"/* 5 ; x5 x1 x4 x0*/\
+ "punpckldq %%mm0,%%mm0 \n\t"/* x4 x0 x4 x0*/\
+ "movq 8+" #A3 ",%%mm4 \n\t"/* 4 ; w07 w05 w03 w01*/\
+ "punpckhwd %%mm1,%%mm2 \n\t"/* 1 ; x7 x3 x6 x2*/\
+ "pmaddwd %%mm0,%%mm3 \n\t"/* x4*w06+x0*w04 x4*w02+x0*w00*/\
+ "movq %%mm2,%%mm6 \n\t"/* 6 ; x7 x3 x6 x2*/\
+ "movq 32+" #A3 ",%%mm1 \n\t"/* 1 ; w22 w20 w18 w16*/\
+ "punpckldq %%mm2,%%mm2 \n\t"/* x6 x2 x6 x2*/\
+ "pmaddwd %%mm2,%%mm4 \n\t"/* x6*w07+x2*w05 x6*w03+x2*w01*/\
+ "punpckhdq %%mm5,%%mm5 \n\t"/* x5 x1 x5 x1*/\
+ "pmaddwd 16+" #A3 ",%%mm0 \n\t"/* x4*w14+x0*w12 x4*w10+x0*w08*/\
+ "punpckhdq %%mm6,%%mm6 \n\t"/* x7 x3 x7 x3*/\
+ "movq 40+" #A3 ",%%mm7 \n\t"/* 7 ; w23 w21 w19 w17*/\
+ "pmaddwd %%mm5,%%mm1 \n\t"/* x5*w22+x1*w20 x5*w18+x1*w16*/\
+ "paddd " #A4 ",%%mm3 \n\t"/* +%4*/\
+ "pmaddwd %%mm6,%%mm7 \n\t"/* x7*w23+x3*w21 x7*w19+x3*w17*/\
+ "pmaddwd 24+" #A3 ",%%mm2 \n\t"/* x6*w15+x2*w13 x6*w11+x2*w09*/\
+ "paddd %%mm4,%%mm3 \n\t"/* 4 ; a1=sum(even1) a0=sum(even0)*/\
+ "pmaddwd 48+" #A3 ",%%mm5 \n\t"/* x5*w30+x1*w28 x5*w26+x1*w24*/\
+ "movq %%mm3,%%mm4 \n\t"/* 4 ; a1 a0*/\
+ "pmaddwd 56+" #A3 ",%%mm6 \n\t"/* x7*w31+x3*w29 x7*w27+x3*w25*/\
+ "paddd %%mm7,%%mm1 \n\t"/* 7 ; b1=sum(odd1) b0=sum(odd0)*/\
+ "paddd " #A4 ",%%mm0 \n\t"/* +%4*/\
+ "psubd %%mm1,%%mm3 \n\t"/* a1-b1 a0-b0*/\
+ "psrad $11,%%mm3 \n\t"/* y6=a1-b1 y7=a0-b0*/\
+ "paddd %%mm4,%%mm1 \n\t"/* 4 ; a1+b1 a0+b0*/\
+ "paddd %%mm2,%%mm0 \n\t"/* 2 ; a3=sum(even3) a2=sum(even2)*/\
+ "psrad $11,%%mm1 \n\t"/* y1=a1+b1 y0=a0+b0*/\
+ "paddd %%mm6,%%mm5 \n\t"/* 6 ; b3=sum(odd3) b2=sum(odd2)*/\
+ "movq %%mm0,%%mm4 \n\t"/* 4 ; a3 a2*/\
+ "paddd %%mm5,%%mm0 \n\t"/* a3+b3 a2+b2*/\
+ "psubd %%mm5,%%mm4 \n\t"/* 5 ; a3-b3 a2-b2*/\
+ "psrad $11,%%mm0 \n\t"/* y3=a3+b3 y2=a2+b2*/\
+ "psrad $11,%%mm4 \n\t"/* y4=a3-b3 y5=a2-b2*/\
+ "packssdw %%mm0,%%mm1 \n\t"/* 0 ; y3 y2 y1 y0*/\
+ "packssdw %%mm3,%%mm4 \n\t"/* 3 ; y6 y7 y4 y5*/\
+ "movq %%mm4,%%mm7 \n\t"/* 7 ; y6 y7 y4 y5*/\
+ "psrld $16,%%mm4 \n\t"/* 0 y6 0 y4*/\
+ "pslld $16,%%mm7 \n\t"/* y7 0 y5 0*/\
+ "movq %%mm1," #A2 " \n\t"/* 1 ; save y3 y2 y1 y0*/\
+ "por %%mm4,%%mm7 \n\t"/* 4 ; y7 y6 y5 y4*/\
+ "movq %%mm7,8 +" #A2 "\n\t"/* 7 ; save y7 y6 y5 y4*/\
+
+
+//-----------------------------------------------------------------------------
+// DCT_8_INV_ROW_XMM( INP, OUT, TABLE, ROUNDER
+//-----------------------------------------------------------------------------
+
+#define DCT_8_INV_ROW_XMM(A1,A2,A3,A4)\
+ "movq " #A1 ",%%mm0 \n\t"/* 0 ; x3 x2 x1 x0*/\
+ "movq 8+" #A1 ",%%mm1 \n\t"/* 1 ; x7 x6 x5 x4*/\
+ "movq %%mm0,%%mm2 \n\t"/* 2 ; x3 x2 x1 x0*/\
+ "movq " #A3 ",%%mm3 \n\t"/* 3 ; w05 w04 w01 w00*/\
+ "pshufw $0x88,%%mm0,%%mm0 \n\t"/* x2 x0 x2 x0*/\
+ "movq 8+" #A3 ",%%mm4 \n\t"/* 4 ; w07 w06 w03 w02*/\
+ "movq %%mm1,%%mm5 \n\t"/* 5 ; x7 x6 x5 x4*/\
+ "pmaddwd %%mm0,%%mm3 \n\t"/* x2*w05+x0*w04 x2*w01+x0*w00*/\
+ "movq 32+" #A3 ",%%mm6 \n\t"/* 6 ; w21 w20 w17 w16*/\
+ "pshufw $0x88,%%mm1,%%mm1 \n\t"/* x6 x4 x6 x4*/\
+ "pmaddwd %%mm1,%%mm4 \n\t"/* x6*w07+x4*w06 x6*w03+x4*w02*/\
+ "movq 40+" #A3 ",%%mm7 \n\t"/* 7 ; w23 w22 w19 w18*/\
+ "pshufw $0xdd,%%mm2,%%mm2 \n\t"/* x3 x1 x3 x1*/\
+ "pmaddwd %%mm2,%%mm6 \n\t"/* x3*w21+x1*w20 x3*w17+x1*w16*/\
+ "pshufw $0xdd,%%mm5,%%mm5 \n\t"/* x7 x5 x7 x5*/\
+ "pmaddwd %%mm5,%%mm7 \n\t"/* x7*w23+x5*w22 x7*w19+x5*w18*/\
+ "paddd " #A4 ",%%mm3 \n\t"/* +%4*/\
+ "pmaddwd 16+" #A3 ",%%mm0 \n\t"/* x2*w13+x0*w12 x2*w09+x0*w08*/\
+ "paddd %%mm4,%%mm3 \n\t"/* 4 ; a1=sum(even1) a0=sum(even0)*/\
+ "pmaddwd 24+" #A3 ",%%mm1 \n\t"/* x6*w15+x4*w14 x6*w11+x4*w10*/\
+ "movq %%mm3,%%mm4 \n\t"/* 4 ; a1 a0*/\
+ "pmaddwd 48+" #A3 ",%%mm2 \n\t"/* x3*w29+x1*w28 x3*w25+x1*w24*/\
+ "paddd %%mm7,%%mm6 \n\t"/* 7 ; b1=sum(odd1) b0=sum(odd0)*/\
+ "pmaddwd 56+" #A3 ",%%mm5 \n\t"/* x7*w31+x5*w30 x7*w27+x5*w26*/\
+ "paddd %%mm6,%%mm3 \n\t"/* a1+b1 a0+b0*/\
+ "paddd " #A4 ",%%mm0 \n\t"/* +%4*/\
+ "psrad $11,%%mm3 \n\t"/* y1=a1+b1 y0=a0+b0*/\
+ "paddd %%mm1,%%mm0 \n\t"/* 1 ; a3=sum(even3) a2=sum(even2)*/\
+ "psubd %%mm6,%%mm4 \n\t"/* 6 ; a1-b1 a0-b0*/\
+ "movq %%mm0,%%mm7 \n\t"/* 7 ; a3 a2*/\
+ "paddd %%mm5,%%mm2 \n\t"/* 5 ; b3=sum(odd3) b2=sum(odd2)*/\
+ "paddd %%mm2,%%mm0 \n\t"/* a3+b3 a2+b2*/\
+ "psrad $11,%%mm4 \n\t"/* y6=a1-b1 y7=a0-b0*/\
+ "psubd %%mm2,%%mm7 \n\t"/* 2 ; a3-b3 a2-b2*/\
+ "psrad $11,%%mm0 \n\t"/* y3=a3+b3 y2=a2+b2*/\
+ "psrad $11,%%mm7 \n\t"/* y4=a3-b3 y5=a2-b2*/\
+ "packssdw %%mm0,%%mm3 \n\t"/* 0 ; y3 y2 y1 y0*/\
+ "packssdw %%mm4,%%mm7 \n\t"/* 4 ; y6 y7 y4 y5*/\
+ "movq %%mm3, " #A2 " \n\t"/* 3 ; save y3 y2 y1 y0*/\
+ "pshufw $0xb1,%%mm7,%%mm7 \n\t"/* y7 y6 y5 y4*/\
+ "movq %%mm7,8 +" #A2 "\n\t"/* 7 ; save y7 y6 y5 y4*/\
+
+
+//-----------------------------------------------------------------------------
+//
+// The first stage DCT 8x8 - forward DCTs of columns
+//
+// The %2puts are multiplied
+// for rows 0,4 - on cos_4_16,
+// for rows 1,7 - on cos_1_16,
+// for rows 2,6 - on cos_2_16,
+// for rows 3,5 - on cos_3_16
+// and are shifted to the left for rise of accuracy
+//
+//-----------------------------------------------------------------------------
+//
+// The 8-point scaled forward DCT algorithm (26a8m)
+//
+//-----------------------------------------------------------------------------
+//
+// #define DCT_8_FRW_COL(x, y)
+//{
+// short t0, t1, t2, t3, t4, t5, t6, t7;
+// short tp03, tm03, tp12, tm12, tp65, tm65;
+// short tp465, tm465, tp765, tm765;
+//
+// t0 = LEFT_SHIFT ( x[0] + x[7] );
+// t1 = LEFT_SHIFT ( x[1] + x[6] );
+// t2 = LEFT_SHIFT ( x[2] + x[5] );
+// t3 = LEFT_SHIFT ( x[3] + x[4] );
+// t4 = LEFT_SHIFT ( x[3] - x[4] );
+// t5 = LEFT_SHIFT ( x[2] - x[5] );
+// t6 = LEFT_SHIFT ( x[1] - x[6] );
+// t7 = LEFT_SHIFT ( x[0] - x[7] );
+//
+// tp03 = t0 + t3;
+// tm03 = t0 - t3;
+// tp12 = t1 + t2;
+// tm12 = t1 - t2;
+//
+// y[0] = tp03 + tp12;
+// y[4] = tp03 - tp12;
+//
+// y[2] = tm03 + tm12 * tg_2_16;
+// y[6] = tm03 * tg_2_16 - tm12;
+//
+// tp65 =(t6 +t5 )*cos_4_16;
+// tm65 =(t6 -t5 )*cos_4_16;
+//
+// tp765 = t7 + tp65;
+// tm765 = t7 - tp65;
+// tp465 = t4 + tm65;
+// tm465 = t4 - tm65;
+//
+// y[1] = tp765 + tp465 * tg_1_16;
+// y[7] = tp765 * tg_1_16 - tp465;
+// y[5] = tm765 * tg_3_16 + tm465;
+// y[3] = tm765 - tm465 * tg_3_16;
+//}
+//
+//-----------------------------------------------------------------------------
+
+//-----------------------------------------------------------------------------
+// DCT_8_INV_COL_4 INP,OUT
+//-----------------------------------------------------------------------------
+
+#define DCT_8_INV_COL(A1,A2)\
+ "movq 2*8(%3),%%mm0\n\t"\
+ "movq 16*3+" #A1 ",%%mm3\n\t"\
+ "movq %%mm0,%%mm1 \n\t"/* tg_3_16*/\
+ "movq 16*5+" #A1 ",%%mm5\n\t"\
+ "pmulhw %%mm3,%%mm0 \n\t"/* x3*(tg_3_16-1)*/\
+ "movq (%3),%%mm4\n\t"\
+ "pmulhw %%mm5,%%mm1 \n\t"/* x5*(tg_3_16-1)*/\
+ "movq 16*7+" #A1 ",%%mm7\n\t"\
+ "movq %%mm4,%%mm2 \n\t"/* tg_1_16*/\
+ "movq 16*1+" #A1 ",%%mm6\n\t"\
+ "pmulhw %%mm7,%%mm4 \n\t"/* x7*tg_1_16*/\
+ "paddsw %%mm3,%%mm0 \n\t"/* x3*tg_3_16*/\
+ "pmulhw %%mm6,%%mm2 \n\t"/* x1*tg_1_16*/\
+ "paddsw %%mm3,%%mm1 \n\t"/* x3+x5*(tg_3_16-1)*/\
+ "psubsw %%mm5,%%mm0 \n\t"/* x3*tg_3_16-x5 = tm35*/\
+ "movq 3*8(%3),%%mm3\n\t"\
+ "paddsw %%mm5,%%mm1 \n\t"/* x3+x5*tg_3_16 = tp35*/\
+ "paddsw %%mm6,%%mm4 \n\t"/* x1+tg_1_16*x7 = tp17*/\
+ "psubsw %%mm7,%%mm2 \n\t"/* x1*tg_1_16-x7 = tm17*/\
+ "movq %%mm4,%%mm5 \n\t"/* tp17*/\
+ "movq %%mm2,%%mm6 \n\t"/* tm17*/\
+ "paddsw %%mm1,%%mm5 \n\t"/* tp17+tp35 = b0*/\
+ "psubsw %%mm0,%%mm6 \n\t"/* tm17-tm35 = b3*/\
+ "psubsw %%mm1,%%mm4 \n\t"/* tp17-tp35 = t1*/\
+ "paddsw %%mm0,%%mm2 \n\t"/* tm17+tm35 = t2*/\
+ "movq 1*8(%3),%%mm7\n\t"\
+ "movq %%mm4,%%mm1 \n\t"/* t1*/\
+ "movq %%mm5,3*16 +" #A2 "\n\t"/* save b0*/\
+ "paddsw %%mm2,%%mm1 \n\t"/* t1+t2*/\
+ "movq %%mm6,5*16 +" #A2 "\n\t"/* save b3*/\
+ "psubsw %%mm2,%%mm4 \n\t"/* t1-t2*/\
+ "movq 2*16+" #A1 ",%%mm5\n\t"\
+ "movq %%mm7,%%mm0 \n\t"/* tg_2_16*/\
+ "movq 6*16+" #A1 ",%%mm6\n\t"\
+ "pmulhw %%mm5,%%mm0 \n\t"/* x2*tg_2_16*/\
+ "pmulhw %%mm6,%%mm7 \n\t"/* x6*tg_2_16*/\
+ "pmulhw %%mm3,%%mm1 \n\t"/* ocos_4_16*(t1+t2) = b1/2*/\
+ "movq 0*16+" #A1 ",%%mm2\n\t"\
+ "pmulhw %%mm3,%%mm4 \n\t"/* ocos_4_16*(t1-t2) = b2/2*/\
+ "psubsw %%mm6,%%mm0 \n\t"/* t2*tg_2_16-x6 = tm26*/\
+ "movq %%mm2,%%mm3 \n\t"/* x0*/\
+ "movq 4*16+" #A1 ",%%mm6\n\t"\
+ "paddsw %%mm5,%%mm7 \n\t"/* x2+x6*tg_2_16 = tp26*/\
+ "paddsw %%mm6,%%mm2 \n\t"/* x0+x4 = tp04*/\
+ "psubsw %%mm6,%%mm3 \n\t"/* x0-x4 = tm04*/\
+ "movq %%mm2,%%mm5 \n\t"/* tp04*/\
+ "movq %%mm3,%%mm6 \n\t"/* tm04*/\
+ "psubsw %%mm7,%%mm2 \n\t"/* tp04-tp26 = a3*/\
+ "paddsw %%mm0,%%mm3 \n\t"/* tm04+tm26 = a1*/\
+ "paddsw %%mm1,%%mm1 \n\t"/* b1*/\
+ "paddsw %%mm4,%%mm4 \n\t"/* b2*/\
+ "paddsw %%mm7,%%mm5 \n\t"/* tp04+tp26 = a0*/\
+ "psubsw %%mm0,%%mm6 \n\t"/* tm04-tm26 = a2*/\
+ "movq %%mm3,%%mm7 \n\t"/* a1*/\
+ "movq %%mm6,%%mm0 \n\t"/* a2*/\
+ "paddsw %%mm1,%%mm3 \n\t"/* a1+b1*/\
+ "paddsw %%mm4,%%mm6 \n\t"/* a2+b2*/\
+ "psraw $6,%%mm3 \n\t"/* dst1*/\
+ "psubsw %%mm1,%%mm7 \n\t"/* a1-b1*/\
+ "psraw $6,%%mm6 \n\t"/* dst2*/\
+ "psubsw %%mm4,%%mm0 \n\t"/* a2-b2*/\
+ "movq 3*16+" #A2 ",%%mm1 \n\t"/* load b0*/\
+ "psraw $6,%%mm7 \n\t"/* dst6*/\
+ "movq %%mm5,%%mm4 \n\t"/* a0*/\
+ "psraw $6,%%mm0 \n\t"/* dst5*/\
+ "movq %%mm3,1*16+" #A2 "\n\t"\
+ "paddsw %%mm1,%%mm5 \n\t"/* a0+b0*/\
+ "movq %%mm6,2*16+" #A2 "\n\t"\
+ "psubsw %%mm1,%%mm4 \n\t"/* a0-b0*/\
+ "movq 5*16+" #A2 ",%%mm3 \n\t"/* load b3*/\
+ "psraw $6,%%mm5 \n\t"/* dst0*/\
+ "movq %%mm2,%%mm6 \n\t"/* a3*/\
+ "psraw $6,%%mm4 \n\t"/* dst7*/\
+ "movq %%mm0,5*16+" #A2 "\n\t"\
+ "paddsw %%mm3,%%mm2 \n\t"/* a3+b3*/\
+ "movq %%mm7,6*16+" #A2 "\n\t"\
+ "psubsw %%mm3,%%mm6 \n\t"/* a3-b3*/\
+ "movq %%mm5,0*16+" #A2 "\n\t"\
+ "psraw $6,%%mm2 \n\t"/* dst3*/\
+ "movq %%mm4,7*16+" #A2 "\n\t"\
+ "psraw $6,%%mm6 \n\t"/* dst4*/\
+ "movq %%mm2,3*16+" #A2 "\n\t"\
+ "movq %%mm6,4*16+" #A2 "\n\t"
+
+//=============================================================================
+// Code
+//=============================================================================
+
+//-----------------------------------------------------------------------------
+// void idct_mmx(uint16_t block[64]);
+//-----------------------------------------------------------------------------
+
+
+void ff_idct_xvid_mmx(short *block){
+__asm__ volatile(
+ //# Process each row
+ DCT_8_INV_ROW_MMX(0*16(%0), 0*16(%0), 64*0(%2), 8*0(%1))
+ DCT_8_INV_ROW_MMX(1*16(%0), 1*16(%0), 64*1(%2), 8*1(%1))
+ DCT_8_INV_ROW_MMX(2*16(%0), 2*16(%0), 64*2(%2), 8*2(%1))
+ DCT_8_INV_ROW_MMX(3*16(%0), 3*16(%0), 64*3(%2), 8*3(%1))
+ DCT_8_INV_ROW_MMX(4*16(%0), 4*16(%0), 64*0(%2), 8*4(%1))
+ DCT_8_INV_ROW_MMX(5*16(%0), 5*16(%0), 64*3(%2), 8*5(%1))
+ DCT_8_INV_ROW_MMX(6*16(%0), 6*16(%0), 64*2(%2), 8*6(%1))
+ DCT_8_INV_ROW_MMX(7*16(%0), 7*16(%0), 64*1(%2), 8*7(%1))
+
+ //# Process the columns (4 at a time)
+ DCT_8_INV_COL(0(%0), 0(%0))
+ DCT_8_INV_COL(8(%0), 8(%0))
+ :: "r"(block), "r"(rounder_0), "r"(tab_i_04_mmx), "r"(tg_1_16));
+}
+
+//-----------------------------------------------------------------------------
+// void idct_xmm(uint16_t block[64]);
+//-----------------------------------------------------------------------------
+
+
+void ff_idct_xvid_mmx2(short *block){
+__asm__ volatile(
+ //# Process each row
+ DCT_8_INV_ROW_XMM(0*16(%0), 0*16(%0), 64*0(%2), 8*0(%1))
+ DCT_8_INV_ROW_XMM(1*16(%0), 1*16(%0), 64*1(%2), 8*1(%1))
+ DCT_8_INV_ROW_XMM(2*16(%0), 2*16(%0), 64*2(%2), 8*2(%1))
+ DCT_8_INV_ROW_XMM(3*16(%0), 3*16(%0), 64*3(%2), 8*3(%1))
+ DCT_8_INV_ROW_XMM(4*16(%0), 4*16(%0), 64*0(%2), 8*4(%1))
+ DCT_8_INV_ROW_XMM(5*16(%0), 5*16(%0), 64*3(%2), 8*5(%1))
+ DCT_8_INV_ROW_XMM(6*16(%0), 6*16(%0), 64*2(%2), 8*6(%1))
+ DCT_8_INV_ROW_XMM(7*16(%0), 7*16(%0), 64*1(%2), 8*7(%1))
+
+ //# Process the columns (4 at a time)
+ DCT_8_INV_COL(0(%0), 0(%0))
+ DCT_8_INV_COL(8(%0), 8(%0))
+ :: "r"(block), "r"(rounder_0), "r"(tab_i_04_xmm), "r"(tg_1_16));
+}
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_sse2_xvid.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_sse2_xvid.c
new file mode 100644
index 000000000..6c3564f24
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_sse2_xvid.c
@@ -0,0 +1,395 @@
+/*
+ * XVID MPEG-4 VIDEO CODEC
+ * - SSE2 inverse discrete cosine transform -
+ *
+ * Copyright(C) 2003 Pascal Massimino <skal@planet-d.net>
+ *
+ * Conversion to gcc syntax with modifications
+ * by Alexander Strange <astrange@ithinksw.com>
+ *
+ * Originally from dct/x86_asm/fdct_sse2_skal.asm in Xvid.
+ *
+ * This file is part of FFmpeg.
+ *
+ * Vertical pass is an implementation of the scheme:
+ * Loeffler C., Ligtenberg A., and Moschytz C.S.:
+ * Practical Fast 1D DCT Algorithm with Eleven Multiplications,
+ * Proc. ICASSP 1989, 988-991.
+ *
+ * Horizontal pass is a double 4x4 vector/matrix multiplication,
+ * (see also Intel's Application Note 922:
+ * http://developer.intel.com/vtune/cbts/strmsimd/922down.htm
+ * Copyright (C) 1999 Intel Corporation)
+ *
+ * More details at http://skal.planet-d.net/coding/dct.html
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "../dsputil.h"
+#include "idct_xvid.h"
+#include "dsputil_mmx.h"
+
+/*!
+ * @file libavcodec/x86/idct_sse2_xvid.c
+ * @brief SSE2 idct compatible with xvidmmx
+ */
+
+#define X8(x) x,x,x,x,x,x,x,x
+
+#define ROW_SHIFT 11
+#define COL_SHIFT 6
+
+DECLARE_ASM_CONST(16, int16_t, tan1)[] = {X8(13036)}; // tan( pi/16)
+DECLARE_ASM_CONST(16, int16_t, tan2)[] = {X8(27146)}; // tan(2pi/16) = sqrt(2)-1
+DECLARE_ASM_CONST(16, int16_t, tan3)[] = {X8(43790)}; // tan(3pi/16)-1
+DECLARE_ASM_CONST(16, int16_t, sqrt2)[]= {X8(23170)}; // 0.5/sqrt(2)
+DECLARE_ASM_CONST(8, uint8_t, m127)[] = {X8(127)};
+
+DECLARE_ASM_CONST(16, int16_t, iTab1)[] = {
+ 0x4000, 0x539f, 0xc000, 0xac61, 0x4000, 0xdd5d, 0x4000, 0xdd5d,
+ 0x4000, 0x22a3, 0x4000, 0x22a3, 0xc000, 0x539f, 0x4000, 0xac61,
+ 0x3249, 0x11a8, 0x4b42, 0xee58, 0x11a8, 0x4b42, 0x11a8, 0xcdb7,
+ 0x58c5, 0x4b42, 0xa73b, 0xcdb7, 0x3249, 0xa73b, 0x4b42, 0xa73b
+};
+
+DECLARE_ASM_CONST(16, int16_t, iTab2)[] = {
+ 0x58c5, 0x73fc, 0xa73b, 0x8c04, 0x58c5, 0xcff5, 0x58c5, 0xcff5,
+ 0x58c5, 0x300b, 0x58c5, 0x300b, 0xa73b, 0x73fc, 0x58c5, 0x8c04,
+ 0x45bf, 0x187e, 0x6862, 0xe782, 0x187e, 0x6862, 0x187e, 0xba41,
+ 0x7b21, 0x6862, 0x84df, 0xba41, 0x45bf, 0x84df, 0x6862, 0x84df
+};
+
+DECLARE_ASM_CONST(16, int16_t, iTab3)[] = {
+ 0x539f, 0x6d41, 0xac61, 0x92bf, 0x539f, 0xd2bf, 0x539f, 0xd2bf,
+ 0x539f, 0x2d41, 0x539f, 0x2d41, 0xac61, 0x6d41, 0x539f, 0x92bf,
+ 0x41b3, 0x1712, 0x6254, 0xe8ee, 0x1712, 0x6254, 0x1712, 0xbe4d,
+ 0x73fc, 0x6254, 0x8c04, 0xbe4d, 0x41b3, 0x8c04, 0x6254, 0x8c04
+};
+
+DECLARE_ASM_CONST(16, int16_t, iTab4)[] = {
+ 0x4b42, 0x6254, 0xb4be, 0x9dac, 0x4b42, 0xd746, 0x4b42, 0xd746,
+ 0x4b42, 0x28ba, 0x4b42, 0x28ba, 0xb4be, 0x6254, 0x4b42, 0x9dac,
+ 0x3b21, 0x14c3, 0x587e, 0xeb3d, 0x14c3, 0x587e, 0x14c3, 0xc4df,
+ 0x6862, 0x587e, 0x979e, 0xc4df, 0x3b21, 0x979e, 0x587e, 0x979e
+};
+
+DECLARE_ASM_CONST(16, int32_t, walkenIdctRounders)[] = {
+ 65536, 65536, 65536, 65536,
+ 3597, 3597, 3597, 3597,
+ 2260, 2260, 2260, 2260,
+ 1203, 1203, 1203, 1203,
+ 120, 120, 120, 120,
+ 512, 512, 512, 512
+};
+
+// Temporary storage before the column pass
+#define ROW1 "%%xmm6"
+#define ROW3 "%%xmm4"
+#define ROW5 "%%xmm5"
+#define ROW7 "%%xmm7"
+
+#define CLEAR_ODD(r) "pxor "r","r" \n\t"
+#define PUT_ODD(dst) "pshufhw $0x1B, %%xmm2, "dst" \n\t"
+
+#if ARCH_X86_64
+
+# define ROW0 "%%xmm8"
+# define REG0 ROW0
+# define ROW2 "%%xmm9"
+# define REG2 ROW2
+# define ROW4 "%%xmm10"
+# define REG4 ROW4
+# define ROW6 "%%xmm11"
+# define REG6 ROW6
+# define CLEAR_EVEN(r) CLEAR_ODD(r)
+# define PUT_EVEN(dst) PUT_ODD(dst)
+# define XMMS "%%xmm12"
+# define MOV_32_ONLY "#"
+# define SREG2 REG2
+# define TAN3 "%%xmm13"
+# define TAN1 "%%xmm14"
+
+#else
+
+# define ROW0 "(%0)"
+# define REG0 "%%xmm4"
+# define ROW2 "2*16(%0)"
+# define REG2 "%%xmm4"
+# define ROW4 "4*16(%0)"
+# define REG4 "%%xmm6"
+# define ROW6 "6*16(%0)"
+# define REG6 "%%xmm6"
+# define CLEAR_EVEN(r)
+# define PUT_EVEN(dst) \
+ "pshufhw $0x1B, %%xmm2, %%xmm2 \n\t" \
+ "movdqa %%xmm2, "dst" \n\t"
+# define XMMS "%%xmm2"
+# define MOV_32_ONLY "movdqa "
+# define SREG2 "%%xmm7"
+# define TAN3 "%%xmm0"
+# define TAN1 "%%xmm2"
+
+#endif
+
+#define ROUND(x) "paddd "MANGLE(x)
+
+#define JZ(reg, to) \
+ "testl "reg","reg" \n\t" \
+ "jz "to" \n\t"
+
+#define JNZ(reg, to) \
+ "testl "reg","reg" \n\t" \
+ "jnz "to" \n\t"
+
+#define TEST_ONE_ROW(src, reg, clear) \
+ clear \
+ "movq "src", %%mm1 \n\t" \
+ "por 8+"src", %%mm1 \n\t" \
+ "paddusb %%mm0, %%mm1 \n\t" \
+ "pmovmskb %%mm1, "reg" \n\t"
+
+#define TEST_TWO_ROWS(row1, row2, reg1, reg2, clear1, clear2) \
+ clear1 \
+ clear2 \
+ "movq "row1", %%mm1 \n\t" \
+ "por 8+"row1", %%mm1 \n\t" \
+ "movq "row2", %%mm2 \n\t" \
+ "por 8+"row2", %%mm2 \n\t" \
+ "paddusb %%mm0, %%mm1 \n\t" \
+ "paddusb %%mm0, %%mm2 \n\t" \
+ "pmovmskb %%mm1, "reg1" \n\t" \
+ "pmovmskb %%mm2, "reg2" \n\t"
+
+///IDCT pass on rows.
+#define iMTX_MULT(src, table, rounder, put) \
+ "movdqa "src", %%xmm3 \n\t" \
+ "movdqa %%xmm3, %%xmm0 \n\t" \
+ "pshufd $0x11, %%xmm3, %%xmm1 \n\t" /* 4602 */ \
+ "punpcklqdq %%xmm0, %%xmm0 \n\t" /* 0246 */ \
+ "pmaddwd "table", %%xmm0 \n\t" \
+ "pmaddwd 16+"table", %%xmm1 \n\t" \
+ "pshufd $0xBB, %%xmm3, %%xmm2 \n\t" /* 5713 */ \
+ "punpckhqdq %%xmm3, %%xmm3 \n\t" /* 1357 */ \
+ "pmaddwd 32+"table", %%xmm2 \n\t" \
+ "pmaddwd 48+"table", %%xmm3 \n\t" \
+ "paddd %%xmm1, %%xmm0 \n\t" \
+ "paddd %%xmm3, %%xmm2 \n\t" \
+ rounder", %%xmm0 \n\t" \
+ "movdqa %%xmm2, %%xmm3 \n\t" \
+ "paddd %%xmm0, %%xmm2 \n\t" \
+ "psubd %%xmm3, %%xmm0 \n\t" \
+ "psrad $11, %%xmm2 \n\t" \
+ "psrad $11, %%xmm0 \n\t" \
+ "packssdw %%xmm0, %%xmm2 \n\t" \
+ put \
+ "1: \n\t"
+
+#define iLLM_HEAD \
+ "movdqa "MANGLE(tan3)", "TAN3" \n\t" \
+ "movdqa "MANGLE(tan1)", "TAN1" \n\t" \
+
+///IDCT pass on columns.
+#define iLLM_PASS(dct) \
+ "movdqa "TAN3", %%xmm1 \n\t" \
+ "movdqa "TAN1", %%xmm3 \n\t" \
+ "pmulhw %%xmm4, "TAN3" \n\t" \
+ "pmulhw %%xmm5, %%xmm1 \n\t" \
+ "paddsw %%xmm4, "TAN3" \n\t" \
+ "paddsw %%xmm5, %%xmm1 \n\t" \
+ "psubsw %%xmm5, "TAN3" \n\t" \
+ "paddsw %%xmm4, %%xmm1 \n\t" \
+ "pmulhw %%xmm7, %%xmm3 \n\t" \
+ "pmulhw %%xmm6, "TAN1" \n\t" \
+ "paddsw %%xmm6, %%xmm3 \n\t" \
+ "psubsw %%xmm7, "TAN1" \n\t" \
+ "movdqa %%xmm3, %%xmm7 \n\t" \
+ "movdqa "TAN1", %%xmm6 \n\t" \
+ "psubsw %%xmm1, %%xmm3 \n\t" \
+ "psubsw "TAN3", "TAN1" \n\t" \
+ "paddsw %%xmm7, %%xmm1 \n\t" \
+ "paddsw %%xmm6, "TAN3" \n\t" \
+ "movdqa %%xmm3, %%xmm6 \n\t" \
+ "psubsw "TAN3", %%xmm3 \n\t" \
+ "paddsw %%xmm6, "TAN3" \n\t" \
+ "movdqa "MANGLE(sqrt2)", %%xmm4 \n\t" \
+ "pmulhw %%xmm4, %%xmm3 \n\t" \
+ "pmulhw %%xmm4, "TAN3" \n\t" \
+ "paddsw "TAN3", "TAN3" \n\t" \
+ "paddsw %%xmm3, %%xmm3 \n\t" \
+ "movdqa "MANGLE(tan2)", %%xmm7 \n\t" \
+ MOV_32_ONLY ROW2", "REG2" \n\t" \
+ MOV_32_ONLY ROW6", "REG6" \n\t" \
+ "movdqa %%xmm7, %%xmm5 \n\t" \
+ "pmulhw "REG6", %%xmm7 \n\t" \
+ "pmulhw "REG2", %%xmm5 \n\t" \
+ "paddsw "REG2", %%xmm7 \n\t" \
+ "psubsw "REG6", %%xmm5 \n\t" \
+ MOV_32_ONLY ROW0", "REG0" \n\t" \
+ MOV_32_ONLY ROW4", "REG4" \n\t" \
+ MOV_32_ONLY" "TAN1", (%0) \n\t" \
+ "movdqa "REG0", "XMMS" \n\t" \
+ "psubsw "REG4", "REG0" \n\t" \
+ "paddsw "XMMS", "REG4" \n\t" \
+ "movdqa "REG4", "XMMS" \n\t" \
+ "psubsw %%xmm7, "REG4" \n\t" \
+ "paddsw "XMMS", %%xmm7 \n\t" \
+ "movdqa "REG0", "XMMS" \n\t" \
+ "psubsw %%xmm5, "REG0" \n\t" \
+ "paddsw "XMMS", %%xmm5 \n\t" \
+ "movdqa %%xmm5, "XMMS" \n\t" \
+ "psubsw "TAN3", %%xmm5 \n\t" \
+ "paddsw "XMMS", "TAN3" \n\t" \
+ "movdqa "REG0", "XMMS" \n\t" \
+ "psubsw %%xmm3, "REG0" \n\t" \
+ "paddsw "XMMS", %%xmm3 \n\t" \
+ MOV_32_ONLY" (%0), "TAN1" \n\t" \
+ "psraw $6, %%xmm5 \n\t" \
+ "psraw $6, "REG0" \n\t" \
+ "psraw $6, "TAN3" \n\t" \
+ "psraw $6, %%xmm3 \n\t" \
+ "movdqa "TAN3", 1*16("dct") \n\t" \
+ "movdqa %%xmm3, 2*16("dct") \n\t" \
+ "movdqa "REG0", 5*16("dct") \n\t" \
+ "movdqa %%xmm5, 6*16("dct") \n\t" \
+ "movdqa %%xmm7, %%xmm0 \n\t" \
+ "movdqa "REG4", %%xmm4 \n\t" \
+ "psubsw %%xmm1, %%xmm7 \n\t" \
+ "psubsw "TAN1", "REG4" \n\t" \
+ "paddsw %%xmm0, %%xmm1 \n\t" \
+ "paddsw %%xmm4, "TAN1" \n\t" \
+ "psraw $6, %%xmm1 \n\t" \
+ "psraw $6, %%xmm7 \n\t" \
+ "psraw $6, "TAN1" \n\t" \
+ "psraw $6, "REG4" \n\t" \
+ "movdqa %%xmm1, ("dct") \n\t" \
+ "movdqa "TAN1", 3*16("dct") \n\t" \
+ "movdqa "REG4", 4*16("dct") \n\t" \
+ "movdqa %%xmm7, 7*16("dct") \n\t"
+
+///IDCT pass on columns, assuming rows 4-7 are zero.
+#define iLLM_PASS_SPARSE(dct) \
+ "pmulhw %%xmm4, "TAN3" \n\t" \
+ "paddsw %%xmm4, "TAN3" \n\t" \
+ "movdqa %%xmm6, %%xmm3 \n\t" \
+ "pmulhw %%xmm6, "TAN1" \n\t" \
+ "movdqa %%xmm4, %%xmm1 \n\t" \
+ "psubsw %%xmm1, %%xmm3 \n\t" \
+ "paddsw %%xmm6, %%xmm1 \n\t" \
+ "movdqa "TAN1", %%xmm6 \n\t" \
+ "psubsw "TAN3", "TAN1" \n\t" \
+ "paddsw %%xmm6, "TAN3" \n\t" \
+ "movdqa %%xmm3, %%xmm6 \n\t" \
+ "psubsw "TAN3", %%xmm3 \n\t" \
+ "paddsw %%xmm6, "TAN3" \n\t" \
+ "movdqa "MANGLE(sqrt2)", %%xmm4 \n\t" \
+ "pmulhw %%xmm4, %%xmm3 \n\t" \
+ "pmulhw %%xmm4, "TAN3" \n\t" \
+ "paddsw "TAN3", "TAN3" \n\t" \
+ "paddsw %%xmm3, %%xmm3 \n\t" \
+ "movdqa "MANGLE(tan2)", %%xmm5 \n\t" \
+ MOV_32_ONLY ROW2", "SREG2" \n\t" \
+ "pmulhw "SREG2", %%xmm5 \n\t" \
+ MOV_32_ONLY ROW0", "REG0" \n\t" \
+ "movdqa "REG0", %%xmm6 \n\t" \
+ "psubsw "SREG2", %%xmm6 \n\t" \
+ "paddsw "REG0", "SREG2" \n\t" \
+ MOV_32_ONLY" "TAN1", (%0) \n\t" \
+ "movdqa "REG0", "XMMS" \n\t" \
+ "psubsw %%xmm5, "REG0" \n\t" \
+ "paddsw "XMMS", %%xmm5 \n\t" \
+ "movdqa %%xmm5, "XMMS" \n\t" \
+ "psubsw "TAN3", %%xmm5 \n\t" \
+ "paddsw "XMMS", "TAN3" \n\t" \
+ "movdqa "REG0", "XMMS" \n\t" \
+ "psubsw %%xmm3, "REG0" \n\t" \
+ "paddsw "XMMS", %%xmm3 \n\t" \
+ MOV_32_ONLY" (%0), "TAN1" \n\t" \
+ "psraw $6, %%xmm5 \n\t" \
+ "psraw $6, "REG0" \n\t" \
+ "psraw $6, "TAN3" \n\t" \
+ "psraw $6, %%xmm3 \n\t" \
+ "movdqa "TAN3", 1*16("dct") \n\t" \
+ "movdqa %%xmm3, 2*16("dct") \n\t" \
+ "movdqa "REG0", 5*16("dct") \n\t" \
+ "movdqa %%xmm5, 6*16("dct") \n\t" \
+ "movdqa "SREG2", %%xmm0 \n\t" \
+ "movdqa %%xmm6, %%xmm4 \n\t" \
+ "psubsw %%xmm1, "SREG2" \n\t" \
+ "psubsw "TAN1", %%xmm6 \n\t" \
+ "paddsw %%xmm0, %%xmm1 \n\t" \
+ "paddsw %%xmm4, "TAN1" \n\t" \
+ "psraw $6, %%xmm1 \n\t" \
+ "psraw $6, "SREG2" \n\t" \
+ "psraw $6, "TAN1" \n\t" \
+ "psraw $6, %%xmm6 \n\t" \
+ "movdqa %%xmm1, ("dct") \n\t" \
+ "movdqa "TAN1", 3*16("dct") \n\t" \
+ "movdqa %%xmm6, 4*16("dct") \n\t" \
+ "movdqa "SREG2", 7*16("dct") \n\t"
+
+inline void ff_idct_xvid_sse2(short *block)
+{
+ __asm__ volatile(
+ "movq "MANGLE(m127)", %%mm0 \n\t"
+ iMTX_MULT("(%0)", MANGLE(iTab1), ROUND(walkenIdctRounders), PUT_EVEN(ROW0))
+ iMTX_MULT("1*16(%0)", MANGLE(iTab2), ROUND(walkenIdctRounders+1*16), PUT_ODD(ROW1))
+ iMTX_MULT("2*16(%0)", MANGLE(iTab3), ROUND(walkenIdctRounders+2*16), PUT_EVEN(ROW2))
+
+ TEST_TWO_ROWS("3*16(%0)", "4*16(%0)", "%%eax", "%%ecx", CLEAR_ODD(ROW3), CLEAR_EVEN(ROW4))
+ JZ("%%eax", "1f")
+ iMTX_MULT("3*16(%0)", MANGLE(iTab4), ROUND(walkenIdctRounders+3*16), PUT_ODD(ROW3))
+
+ TEST_TWO_ROWS("5*16(%0)", "6*16(%0)", "%%eax", "%%edx", CLEAR_ODD(ROW5), CLEAR_EVEN(ROW6))
+ TEST_ONE_ROW("7*16(%0)", "%%esi", CLEAR_ODD(ROW7))
+ iLLM_HEAD
+ ASMALIGN(4)
+ JNZ("%%ecx", "2f")
+ JNZ("%%eax", "3f")
+ JNZ("%%edx", "4f")
+ JNZ("%%esi", "5f")
+ iLLM_PASS_SPARSE("%0")
+ "jmp 6f \n\t"
+ "2: \n\t"
+ iMTX_MULT("4*16(%0)", MANGLE(iTab1), "#", PUT_EVEN(ROW4))
+ "3: \n\t"
+ iMTX_MULT("5*16(%0)", MANGLE(iTab4), ROUND(walkenIdctRounders+4*16), PUT_ODD(ROW5))
+ JZ("%%edx", "1f")
+ "4: \n\t"
+ iMTX_MULT("6*16(%0)", MANGLE(iTab3), ROUND(walkenIdctRounders+5*16), PUT_EVEN(ROW6))
+ JZ("%%esi", "1f")
+ "5: \n\t"
+ iMTX_MULT("7*16(%0)", MANGLE(iTab2), ROUND(walkenIdctRounders+5*16), PUT_ODD(ROW7))
+#if !ARCH_X86_64
+ iLLM_HEAD
+#endif
+ iLLM_PASS("%0")
+ "6: \n\t"
+ : "+r"(block)
+ :
+ : "%eax", "%ecx", "%edx", "%esi", "memory");
+}
+
+void ff_idct_xvid_sse2_put(uint8_t *dest, int line_size, short *block)
+{
+ ff_idct_xvid_sse2(block);
+ put_pixels_clamped_mmx(block, dest, line_size);
+}
+
+void ff_idct_xvid_sse2_add(uint8_t *dest, int line_size, short *block)
+{
+ ff_idct_xvid_sse2(block);
+ add_pixels_clamped_mmx(block, dest, line_size);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_xvid.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_xvid.h
new file mode 100644
index 000000000..499191639
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/idct_xvid.h
@@ -0,0 +1,39 @@
+/*
+ * XVID MPEG-4 VIDEO CODEC
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*!
+ * @file idct_xvid.h
+ * header for Xvid IDCT functions
+ */
+
+#ifndef AVCODEC_X86_IDCT_XVID_H
+#define AVCODEC_X86_IDCT_XVID_H
+
+#ifdef __GNUC__
+#include <stdint.h>
+#endif
+
+void ff_idct_xvid_mmx(short *block);
+void ff_idct_xvid_mmx2(short *block);
+void ff_idct_xvid_sse2(short *block);
+void ff_idct_xvid_sse2_put(uint8_t *dest, int line_size, short *block);
+void ff_idct_xvid_sse2_add(uint8_t *dest, int line_size, short *block);
+
+#endif /* AVCODEC_X86_IDCT_XVID_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mathops.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mathops.h
new file mode 100644
index 000000000..691a200fd
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mathops.h
@@ -0,0 +1,81 @@
+/*
+ * simple math operations
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_MATHOPS_H
+#define AVCODEC_X86_MATHOPS_H
+
+#include "config.h"
+#include "libavutil/common.h"
+
+#if ARCH_X86_32
+#define MULL(ra, rb, shift) \
+ ({ int rt, dummy; __asm__ (\
+ "imull %3 \n\t"\
+ "shrdl %4, %%edx, %%eax \n\t"\
+ : "=a"(rt), "=d"(dummy)\
+ : "a" ((int)ra), "rm" ((int)rb), "i"(shift));\
+ rt; })
+
+#define MULH(ra, rb) \
+ ({ int rt, dummy;\
+ __asm__ ("imull %3\n\t" : "=d"(rt), "=a"(dummy): "a" ((int)ra), "rm" ((int)rb));\
+ rt; })
+
+#define MUL64(ra, rb) \
+ ({ int64_t rt;\
+ __asm__ ("imull %2\n\t" : "=A"(rt) : "a" ((int)ra), "g" ((int)rb));\
+ rt; })
+#endif
+
+#if HAVE_CMOV
+/* median of 3 */
+#define mid_pred mid_pred
+static inline av_const int mid_pred(int a, int b, int c)
+{
+ int i=b;
+ __asm__ volatile(
+ "cmp %2, %1 \n\t"
+ "cmovg %1, %0 \n\t"
+ "cmovg %2, %1 \n\t"
+ "cmp %3, %1 \n\t"
+ "cmovl %3, %1 \n\t"
+ "cmp %1, %0 \n\t"
+ "cmovg %1, %0 \n\t"
+ :"+&r"(i), "+&r"(a)
+ :"r"(b), "r"(c)
+ );
+ return i;
+}
+#endif
+
+#if HAVE_CMOV
+#define COPY3_IF_LT(x, y, a, b, c, d)\
+__asm__ volatile(\
+ "cmpl %0, %3 \n\t"\
+ "cmovl %3, %0 \n\t"\
+ "cmovl %4, %1 \n\t"\
+ "cmovl %5, %2 \n\t"\
+ : "+&r" (x), "+&r" (a), "+r" (c)\
+ : "r" (y), "r" (b), "r" (d)\
+);
+#endif
+
+#endif /* AVCODEC_X86_MATHOPS_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mlpdsp.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mlpdsp.c
new file mode 100644
index 000000000..486a927ad
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mlpdsp.c
@@ -0,0 +1,181 @@
+/*
+ * MLP DSP functions x86-optimized
+ * Copyright (c) 2009 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/x86_cpu.h"
+#include "libavcodec/dsputil.h"
+#include "libavcodec/mlp.h"
+
+#if HAVE_7REGS && HAVE_TEN_OPERANDS
+
+extern void ff_mlp_firorder_8;
+extern void ff_mlp_firorder_7;
+extern void ff_mlp_firorder_6;
+extern void ff_mlp_firorder_5;
+extern void ff_mlp_firorder_4;
+extern void ff_mlp_firorder_3;
+extern void ff_mlp_firorder_2;
+extern void ff_mlp_firorder_1;
+extern void ff_mlp_firorder_0;
+
+extern void ff_mlp_iirorder_4;
+extern void ff_mlp_iirorder_3;
+extern void ff_mlp_iirorder_2;
+extern void ff_mlp_iirorder_1;
+extern void ff_mlp_iirorder_0;
+
+static const void *firtable[9] = { &ff_mlp_firorder_0, &ff_mlp_firorder_1,
+ &ff_mlp_firorder_2, &ff_mlp_firorder_3,
+ &ff_mlp_firorder_4, &ff_mlp_firorder_5,
+ &ff_mlp_firorder_6, &ff_mlp_firorder_7,
+ &ff_mlp_firorder_8 };
+static const void *iirtable[5] = { &ff_mlp_iirorder_0, &ff_mlp_iirorder_1,
+ &ff_mlp_iirorder_2, &ff_mlp_iirorder_3,
+ &ff_mlp_iirorder_4 };
+
+#if ARCH_X86_64
+
+#define MLPMUL(label, offset, offs, offc) \
+ LABEL_MANGLE(label)": \n\t" \
+ "movslq "offset"+"offs"(%0), %%rax\n\t" \
+ "movslq "offset"+"offc"(%1), %%rdx\n\t" \
+ "imul %%rdx, %%rax\n\t" \
+ "add %%rax, %%rsi\n\t"
+
+#define FIRMULREG(label, offset, firc)\
+ LABEL_MANGLE(label)": \n\t" \
+ "movslq "#offset"(%0), %%rax\n\t" \
+ "imul %"#firc", %%rax\n\t" \
+ "add %%rax, %%rsi\n\t"
+
+#define CLEAR_ACCUM \
+ "xor %%rsi, %%rsi\n\t"
+
+#define SHIFT_ACCUM \
+ "shr %%cl, %%rsi\n\t"
+
+#define ACCUM "%%rdx"
+#define RESULT "%%rsi"
+#define RESULT32 "%%esi"
+
+#else /* if ARCH_X86_32 */
+
+#define MLPMUL(label, offset, offs, offc) \
+ LABEL_MANGLE(label)": \n\t" \
+ "mov "offset"+"offs"(%0), %%eax\n\t" \
+ "imull "offset"+"offc"(%1) \n\t" \
+ "add %%eax , %%esi\n\t" \
+ "adc %%edx , %%ecx\n\t"
+
+#define FIRMULREG(label, offset, firc) \
+ MLPMUL(label, #offset, "0", "0")
+
+#define CLEAR_ACCUM \
+ "xor %%esi, %%esi\n\t" \
+ "xor %%ecx, %%ecx\n\t"
+
+#define SHIFT_ACCUM \
+ "mov %%ecx, %%edx\n\t" \
+ "mov %%esi, %%eax\n\t" \
+ "movzbl %7 , %%ecx\n\t" \
+ "shrd %%cl, %%edx, %%eax\n\t" \
+
+#define ACCUM "%%edx"
+#define RESULT "%%eax"
+#define RESULT32 "%%eax"
+
+#endif /* !ARCH_X86_64 */
+
+#define BINC AV_STRINGIFY(4* MAX_CHANNELS)
+#define IOFFS AV_STRINGIFY(4*(MAX_FIR_ORDER + MAX_BLOCKSIZE))
+#define IOFFC AV_STRINGIFY(4* MAX_FIR_ORDER)
+
+#define FIRMUL(label, offset) MLPMUL(label, #offset, "0", "0")
+#define IIRMUL(label, offset) MLPMUL(label, #offset, IOFFS, IOFFC)
+
+static void mlp_filter_channel_x86(int32_t *state, const int32_t *coeff,
+ int firorder, int iirorder,
+ unsigned int filter_shift, int32_t mask,
+ int blocksize, int32_t *sample_buffer)
+{
+ const void *firjump = firtable[firorder];
+ const void *iirjump = iirtable[iirorder];
+
+ blocksize = -blocksize;
+
+ __asm__ volatile(
+ "1: \n\t"
+ CLEAR_ACCUM
+ "jmp *%5 \n\t"
+ FIRMUL (ff_mlp_firorder_8, 0x1c )
+ FIRMUL (ff_mlp_firorder_7, 0x18 )
+ FIRMUL (ff_mlp_firorder_6, 0x14 )
+ FIRMUL (ff_mlp_firorder_5, 0x10 )
+ FIRMUL (ff_mlp_firorder_4, 0x0c )
+ FIRMULREG(ff_mlp_firorder_3, 0x08,10)
+ FIRMULREG(ff_mlp_firorder_2, 0x04, 9)
+ FIRMULREG(ff_mlp_firorder_1, 0x00, 8)
+ LABEL_MANGLE(ff_mlp_firorder_0)":\n\t"
+ "jmp *%6 \n\t"
+ IIRMUL (ff_mlp_iirorder_4, 0x0c )
+ IIRMUL (ff_mlp_iirorder_3, 0x08 )
+ IIRMUL (ff_mlp_iirorder_2, 0x04 )
+ IIRMUL (ff_mlp_iirorder_1, 0x00 )
+ LABEL_MANGLE(ff_mlp_iirorder_0)":\n\t"
+ SHIFT_ACCUM
+ "mov "RESULT" ,"ACCUM" \n\t"
+ "add (%2) ,"RESULT" \n\t"
+ "and %4 ,"RESULT" \n\t"
+ "sub $4 , %0 \n\t"
+ "mov "RESULT32", (%0) \n\t"
+ "mov "RESULT32", (%2) \n\t"
+ "add $"BINC" , %2 \n\t"
+ "sub "ACCUM" ,"RESULT" \n\t"
+ "mov "RESULT32","IOFFS"(%0) \n\t"
+ "incl %3 \n\t"
+ "js 1b \n\t"
+ : /* 0*/"+r"(state),
+ /* 1*/"+r"(coeff),
+ /* 2*/"+r"(sample_buffer),
+#if ARCH_X86_64
+ /* 3*/"+r"(blocksize)
+ : /* 4*/"r"((x86_reg)mask), /* 5*/"r"(firjump),
+ /* 6*/"r"(iirjump) , /* 7*/"c"(filter_shift)
+ , /* 8*/"r"((int64_t)coeff[0])
+ , /* 9*/"r"((int64_t)coeff[1])
+ , /*10*/"r"((int64_t)coeff[2])
+ : "rax", "rdx", "rsi"
+#else /* ARCH_X86_32 */
+ /* 3*/"+m"(blocksize)
+ : /* 4*/"m"( mask), /* 5*/"m"(firjump),
+ /* 6*/"m"(iirjump) , /* 7*/"m"(filter_shift)
+ : "eax", "edx", "esi", "ecx"
+#endif /* !ARCH_X86_64 */
+ );
+}
+
+#endif /* HAVE_7REGS && HAVE_TEN_OPERANDS */
+
+void ff_mlp_init_x86(DSPContext* c, AVCodecContext *avctx)
+{
+#if HAVE_7REGS && HAVE_TEN_OPERANDS
+ c->mlp_filter_channel = mlp_filter_channel_x86;
+#endif
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mmx.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mmx.h
new file mode 100644
index 000000000..ef064e3e3
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mmx.h
@@ -0,0 +1,267 @@
+/*
+ * mmx.h
+ * Copyright (C) 1997-2001 H. Dietz and R. Fisher
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef AVCODEC_X86_MMX_H
+#define AVCODEC_X86_MMX_H
+
+#warning Everything in this header is deprecated, use plain __asm__()! New code using this header will be rejected.
+
+
+#define mmx_i2r(op,imm,reg) \
+ __asm__ volatile (#op " %0, %%" #reg \
+ : /* nothing */ \
+ : "i" (imm) )
+
+#define mmx_m2r(op,mem,reg) \
+ __asm__ volatile (#op " %0, %%" #reg \
+ : /* nothing */ \
+ : "m" (mem))
+
+#define mmx_r2m(op,reg,mem) \
+ __asm__ volatile (#op " %%" #reg ", %0" \
+ : "=m" (mem) \
+ : /* nothing */ )
+
+#define mmx_r2r(op,regs,regd) \
+ __asm__ volatile (#op " %" #regs ", %" #regd)
+
+
+#define emms() __asm__ volatile ("emms")
+
+#define movd_m2r(var,reg) mmx_m2r (movd, var, reg)
+#define movd_r2m(reg,var) mmx_r2m (movd, reg, var)
+#define movd_r2r(regs,regd) mmx_r2r (movd, regs, regd)
+
+#define movq_m2r(var,reg) mmx_m2r (movq, var, reg)
+#define movq_r2m(reg,var) mmx_r2m (movq, reg, var)
+#define movq_r2r(regs,regd) mmx_r2r (movq, regs, regd)
+
+#define packssdw_m2r(var,reg) mmx_m2r (packssdw, var, reg)
+#define packssdw_r2r(regs,regd) mmx_r2r (packssdw, regs, regd)
+#define packsswb_m2r(var,reg) mmx_m2r (packsswb, var, reg)
+#define packsswb_r2r(regs,regd) mmx_r2r (packsswb, regs, regd)
+
+#define packuswb_m2r(var,reg) mmx_m2r (packuswb, var, reg)
+#define packuswb_r2r(regs,regd) mmx_r2r (packuswb, regs, regd)
+
+#define paddb_m2r(var,reg) mmx_m2r (paddb, var, reg)
+#define paddb_r2r(regs,regd) mmx_r2r (paddb, regs, regd)
+#define paddd_m2r(var,reg) mmx_m2r (paddd, var, reg)
+#define paddd_r2r(regs,regd) mmx_r2r (paddd, regs, regd)
+#define paddw_m2r(var,reg) mmx_m2r (paddw, var, reg)
+#define paddw_r2r(regs,regd) mmx_r2r (paddw, regs, regd)
+
+#define paddsb_m2r(var,reg) mmx_m2r (paddsb, var, reg)
+#define paddsb_r2r(regs,regd) mmx_r2r (paddsb, regs, regd)
+#define paddsw_m2r(var,reg) mmx_m2r (paddsw, var, reg)
+#define paddsw_r2r(regs,regd) mmx_r2r (paddsw, regs, regd)
+
+#define paddusb_m2r(var,reg) mmx_m2r (paddusb, var, reg)
+#define paddusb_r2r(regs,regd) mmx_r2r (paddusb, regs, regd)
+#define paddusw_m2r(var,reg) mmx_m2r (paddusw, var, reg)
+#define paddusw_r2r(regs,regd) mmx_r2r (paddusw, regs, regd)
+
+#define pand_m2r(var,reg) mmx_m2r (pand, var, reg)
+#define pand_r2r(regs,regd) mmx_r2r (pand, regs, regd)
+
+#define pandn_m2r(var,reg) mmx_m2r (pandn, var, reg)
+#define pandn_r2r(regs,regd) mmx_r2r (pandn, regs, regd)
+
+#define pcmpeqb_m2r(var,reg) mmx_m2r (pcmpeqb, var, reg)
+#define pcmpeqb_r2r(regs,regd) mmx_r2r (pcmpeqb, regs, regd)
+#define pcmpeqd_m2r(var,reg) mmx_m2r (pcmpeqd, var, reg)
+#define pcmpeqd_r2r(regs,regd) mmx_r2r (pcmpeqd, regs, regd)
+#define pcmpeqw_m2r(var,reg) mmx_m2r (pcmpeqw, var, reg)
+#define pcmpeqw_r2r(regs,regd) mmx_r2r (pcmpeqw, regs, regd)
+
+#define pcmpgtb_m2r(var,reg) mmx_m2r (pcmpgtb, var, reg)
+#define pcmpgtb_r2r(regs,regd) mmx_r2r (pcmpgtb, regs, regd)
+#define pcmpgtd_m2r(var,reg) mmx_m2r (pcmpgtd, var, reg)
+#define pcmpgtd_r2r(regs,regd) mmx_r2r (pcmpgtd, regs, regd)
+#define pcmpgtw_m2r(var,reg) mmx_m2r (pcmpgtw, var, reg)
+#define pcmpgtw_r2r(regs,regd) mmx_r2r (pcmpgtw, regs, regd)
+
+#define pmaddwd_m2r(var,reg) mmx_m2r (pmaddwd, var, reg)
+#define pmaddwd_r2r(regs,regd) mmx_r2r (pmaddwd, regs, regd)
+
+#define pmulhw_m2r(var,reg) mmx_m2r (pmulhw, var, reg)
+#define pmulhw_r2r(regs,regd) mmx_r2r (pmulhw, regs, regd)
+
+#define pmullw_m2r(var,reg) mmx_m2r (pmullw, var, reg)
+#define pmullw_r2r(regs,regd) mmx_r2r (pmullw, regs, regd)
+
+#define por_m2r(var,reg) mmx_m2r (por, var, reg)
+#define por_r2r(regs,regd) mmx_r2r (por, regs, regd)
+
+#define pslld_i2r(imm,reg) mmx_i2r (pslld, imm, reg)
+#define pslld_m2r(var,reg) mmx_m2r (pslld, var, reg)
+#define pslld_r2r(regs,regd) mmx_r2r (pslld, regs, regd)
+#define psllq_i2r(imm,reg) mmx_i2r (psllq, imm, reg)
+#define psllq_m2r(var,reg) mmx_m2r (psllq, var, reg)
+#define psllq_r2r(regs,regd) mmx_r2r (psllq, regs, regd)
+#define psllw_i2r(imm,reg) mmx_i2r (psllw, imm, reg)
+#define psllw_m2r(var,reg) mmx_m2r (psllw, var, reg)
+#define psllw_r2r(regs,regd) mmx_r2r (psllw, regs, regd)
+
+#define psrad_i2r(imm,reg) mmx_i2r (psrad, imm, reg)
+#define psrad_m2r(var,reg) mmx_m2r (psrad, var, reg)
+#define psrad_r2r(regs,regd) mmx_r2r (psrad, regs, regd)
+#define psraw_i2r(imm,reg) mmx_i2r (psraw, imm, reg)
+#define psraw_m2r(var,reg) mmx_m2r (psraw, var, reg)
+#define psraw_r2r(regs,regd) mmx_r2r (psraw, regs, regd)
+
+#define psrld_i2r(imm,reg) mmx_i2r (psrld, imm, reg)
+#define psrld_m2r(var,reg) mmx_m2r (psrld, var, reg)
+#define psrld_r2r(regs,regd) mmx_r2r (psrld, regs, regd)
+#define psrlq_i2r(imm,reg) mmx_i2r (psrlq, imm, reg)
+#define psrlq_m2r(var,reg) mmx_m2r (psrlq, var, reg)
+#define psrlq_r2r(regs,regd) mmx_r2r (psrlq, regs, regd)
+#define psrlw_i2r(imm,reg) mmx_i2r (psrlw, imm, reg)
+#define psrlw_m2r(var,reg) mmx_m2r (psrlw, var, reg)
+#define psrlw_r2r(regs,regd) mmx_r2r (psrlw, regs, regd)
+
+#define psubb_m2r(var,reg) mmx_m2r (psubb, var, reg)
+#define psubb_r2r(regs,regd) mmx_r2r (psubb, regs, regd)
+#define psubd_m2r(var,reg) mmx_m2r (psubd, var, reg)
+#define psubd_r2r(regs,regd) mmx_r2r (psubd, regs, regd)
+#define psubw_m2r(var,reg) mmx_m2r (psubw, var, reg)
+#define psubw_r2r(regs,regd) mmx_r2r (psubw, regs, regd)
+
+#define psubsb_m2r(var,reg) mmx_m2r (psubsb, var, reg)
+#define psubsb_r2r(regs,regd) mmx_r2r (psubsb, regs, regd)
+#define psubsw_m2r(var,reg) mmx_m2r (psubsw, var, reg)
+#define psubsw_r2r(regs,regd) mmx_r2r (psubsw, regs, regd)
+
+#define psubusb_m2r(var,reg) mmx_m2r (psubusb, var, reg)
+#define psubusb_r2r(regs,regd) mmx_r2r (psubusb, regs, regd)
+#define psubusw_m2r(var,reg) mmx_m2r (psubusw, var, reg)
+#define psubusw_r2r(regs,regd) mmx_r2r (psubusw, regs, regd)
+
+#define punpckhbw_m2r(var,reg) mmx_m2r (punpckhbw, var, reg)
+#define punpckhbw_r2r(regs,regd) mmx_r2r (punpckhbw, regs, regd)
+#define punpckhdq_m2r(var,reg) mmx_m2r (punpckhdq, var, reg)
+#define punpckhdq_r2r(regs,regd) mmx_r2r (punpckhdq, regs, regd)
+#define punpckhwd_m2r(var,reg) mmx_m2r (punpckhwd, var, reg)
+#define punpckhwd_r2r(regs,regd) mmx_r2r (punpckhwd, regs, regd)
+
+#define punpcklbw_m2r(var,reg) mmx_m2r (punpcklbw, var, reg)
+#define punpcklbw_r2r(regs,regd) mmx_r2r (punpcklbw, regs, regd)
+#define punpckldq_m2r(var,reg) mmx_m2r (punpckldq, var, reg)
+#define punpckldq_r2r(regs,regd) mmx_r2r (punpckldq, regs, regd)
+#define punpcklwd_m2r(var,reg) mmx_m2r (punpcklwd, var, reg)
+#define punpcklwd_r2r(regs,regd) mmx_r2r (punpcklwd, regs, regd)
+
+#define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg)
+#define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd)
+
+
+/* 3DNOW extensions */
+
+#define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg)
+#define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd)
+
+
+/* AMD MMX extensions - also available in intel SSE */
+
+
+#define mmx_m2ri(op,mem,reg,imm) \
+ __asm__ volatile (#op " %1, %0, %%" #reg \
+ : /* nothing */ \
+ : "m" (mem), "i" (imm))
+#define mmx_r2ri(op,regs,regd,imm) \
+ __asm__ volatile (#op " %0, %%" #regs ", %%" #regd \
+ : /* nothing */ \
+ : "i" (imm) )
+
+#define mmx_fetch(mem,hint) \
+ __asm__ volatile ("prefetch" #hint " %0" \
+ : /* nothing */ \
+ : "m" (mem))
+
+
+#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg)
+
+#define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var)
+
+#define pavgb_m2r(var,reg) mmx_m2r (pavgb, var, reg)
+#define pavgb_r2r(regs,regd) mmx_r2r (pavgb, regs, regd)
+#define pavgw_m2r(var,reg) mmx_m2r (pavgw, var, reg)
+#define pavgw_r2r(regs,regd) mmx_r2r (pavgw, regs, regd)
+
+#define pextrw_r2r(mmreg,reg,imm) mmx_r2ri (pextrw, mmreg, reg, imm)
+
+#define pinsrw_r2r(reg,mmreg,imm) mmx_r2ri (pinsrw, reg, mmreg, imm)
+
+#define pmaxsw_m2r(var,reg) mmx_m2r (pmaxsw, var, reg)
+#define pmaxsw_r2r(regs,regd) mmx_r2r (pmaxsw, regs, regd)
+
+#define pmaxub_m2r(var,reg) mmx_m2r (pmaxub, var, reg)
+#define pmaxub_r2r(regs,regd) mmx_r2r (pmaxub, regs, regd)
+
+#define pminsw_m2r(var,reg) mmx_m2r (pminsw, var, reg)
+#define pminsw_r2r(regs,regd) mmx_r2r (pminsw, regs, regd)
+
+#define pminub_m2r(var,reg) mmx_m2r (pminub, var, reg)
+#define pminub_r2r(regs,regd) mmx_r2r (pminub, regs, regd)
+
+#define pmovmskb(mmreg,reg) \
+ __asm__ volatile ("movmskps %" #mmreg ", %" #reg)
+
+#define pmulhuw_m2r(var,reg) mmx_m2r (pmulhuw, var, reg)
+#define pmulhuw_r2r(regs,regd) mmx_r2r (pmulhuw, regs, regd)
+
+#define prefetcht0(mem) mmx_fetch (mem, t0)
+#define prefetcht1(mem) mmx_fetch (mem, t1)
+#define prefetcht2(mem) mmx_fetch (mem, t2)
+#define prefetchnta(mem) mmx_fetch (mem, nta)
+
+#define psadbw_m2r(var,reg) mmx_m2r (psadbw, var, reg)
+#define psadbw_r2r(regs,regd) mmx_r2r (psadbw, regs, regd)
+
+#define pshufw_m2r(var,reg,imm) mmx_m2ri(pshufw, var, reg, imm)
+#define pshufw_r2r(regs,regd,imm) mmx_r2ri(pshufw, regs, regd, imm)
+
+#define sfence() __asm__ volatile ("sfence\n\t")
+
+/* SSE2 */
+#define pshufhw_m2r(var,reg,imm) mmx_m2ri(pshufhw, var, reg, imm)
+#define pshufhw_r2r(regs,regd,imm) mmx_r2ri(pshufhw, regs, regd, imm)
+#define pshuflw_m2r(var,reg,imm) mmx_m2ri(pshuflw, var, reg, imm)
+#define pshuflw_r2r(regs,regd,imm) mmx_r2ri(pshuflw, regs, regd, imm)
+
+#define pshufd_r2r(regs,regd,imm) mmx_r2ri(pshufd, regs, regd, imm)
+
+#define movdqa_m2r(var,reg) mmx_m2r (movdqa, var, reg)
+#define movdqa_r2m(reg,var) mmx_r2m (movdqa, reg, var)
+#define movdqa_r2r(regs,regd) mmx_r2r (movdqa, regs, regd)
+#define movdqu_m2r(var,reg) mmx_m2r (movdqu, var, reg)
+#define movdqu_r2m(reg,var) mmx_r2m (movdqu, reg, var)
+#define movdqu_r2r(regs,regd) mmx_r2r (movdqu, regs, regd)
+
+#define pmullw_r2m(reg,var) mmx_r2m (pmullw, reg, var)
+
+#define pslldq_i2r(imm,reg) mmx_i2r (pslldq, imm, reg)
+#define psrldq_i2r(imm,reg) mmx_i2r (psrldq, imm, reg)
+
+#define punpcklqdq_r2r(regs,regd) mmx_r2r (punpcklqdq, regs, regd)
+#define punpckhqdq_r2r(regs,regd) mmx_r2r (punpckhqdq, regs, regd)
+
+
+#endif /* AVCODEC_X86_MMX_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mpegvideo_mmx.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mpegvideo_mmx.c
new file mode 100644
index 000000000..f9a8847de
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mpegvideo_mmx.c
@@ -0,0 +1,660 @@
+/*
+ * The simplest mpeg encoder (well, it was the simplest!)
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ *
+ * Optimized for ia32 CPUs by Nick Kurshev <nickols_k@mail.ru>
+ * h263, mpeg1, mpeg2 dequantizer & draw_edges by Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/x86_cpu.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/dsputil.h"
+#include "libavcodec/mpegvideo.h"
+#include "dsputil_mmx.h"
+
+extern uint16_t inv_zigzag_direct16[64];
+
+
+static void dct_unquantize_h263_intra_mmx(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ x86_reg level, qmul, qadd, nCoeffs;
+
+ qmul = qscale << 1;
+
+ assert(s->block_last_index[n]>=0 || s->h263_aic);
+
+ if (!s->h263_aic) {
+ if (n < 4)
+ level = block[0] * s->y_dc_scale;
+ else
+ level = block[0] * s->c_dc_scale;
+ qadd = (qscale - 1) | 1;
+ }else{
+ qadd = 0;
+ level= block[0];
+ }
+ if(s->ac_pred)
+ nCoeffs=63;
+ else
+ nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
+//printf("%d %d ", qmul, qadd);
+__asm__ volatile(
+ "movd %1, %%mm6 \n\t" //qmul
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "movd %2, %%mm5 \n\t" //qadd
+ "pxor %%mm7, %%mm7 \n\t"
+ "packssdw %%mm5, %%mm5 \n\t"
+ "packssdw %%mm5, %%mm5 \n\t"
+ "psubw %%mm5, %%mm7 \n\t"
+ "pxor %%mm4, %%mm4 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0, %3), %%mm0 \n\t"
+ "movq 8(%0, %3), %%mm1 \n\t"
+
+ "pmullw %%mm6, %%mm0 \n\t"
+ "pmullw %%mm6, %%mm1 \n\t"
+
+ "movq (%0, %3), %%mm2 \n\t"
+ "movq 8(%0, %3), %%mm3 \n\t"
+
+ "pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
+ "pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
+
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+
+ "paddw %%mm7, %%mm0 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+
+ "pxor %%mm0, %%mm2 \n\t"
+ "pxor %%mm1, %%mm3 \n\t"
+
+ "pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0
+
+ "pandn %%mm2, %%mm0 \n\t"
+ "pandn %%mm3, %%mm1 \n\t"
+
+ "movq %%mm0, (%0, %3) \n\t"
+ "movq %%mm1, 8(%0, %3) \n\t"
+
+ "add $16, %3 \n\t"
+ "jng 1b \n\t"
+ ::"r" (block+nCoeffs), "rm"(qmul), "rm" (qadd), "r" (2*(-nCoeffs))
+ : "memory"
+ );
+ block[0]= level;
+}
+
+
+static void dct_unquantize_h263_inter_mmx(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ x86_reg qmul, qadd, nCoeffs;
+
+ qmul = qscale << 1;
+ qadd = (qscale - 1) | 1;
+
+ assert(s->block_last_index[n]>=0 || s->h263_aic);
+
+ nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
+//printf("%d %d ", qmul, qadd);
+__asm__ volatile(
+ "movd %1, %%mm6 \n\t" //qmul
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "movd %2, %%mm5 \n\t" //qadd
+ "pxor %%mm7, %%mm7 \n\t"
+ "packssdw %%mm5, %%mm5 \n\t"
+ "packssdw %%mm5, %%mm5 \n\t"
+ "psubw %%mm5, %%mm7 \n\t"
+ "pxor %%mm4, %%mm4 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0, %3), %%mm0 \n\t"
+ "movq 8(%0, %3), %%mm1 \n\t"
+
+ "pmullw %%mm6, %%mm0 \n\t"
+ "pmullw %%mm6, %%mm1 \n\t"
+
+ "movq (%0, %3), %%mm2 \n\t"
+ "movq 8(%0, %3), %%mm3 \n\t"
+
+ "pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
+ "pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
+
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+
+ "paddw %%mm7, %%mm0 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+
+ "pxor %%mm0, %%mm2 \n\t"
+ "pxor %%mm1, %%mm3 \n\t"
+
+ "pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0
+
+ "pandn %%mm2, %%mm0 \n\t"
+ "pandn %%mm3, %%mm1 \n\t"
+
+ "movq %%mm0, (%0, %3) \n\t"
+ "movq %%mm1, 8(%0, %3) \n\t"
+
+ "add $16, %3 \n\t"
+ "jng 1b \n\t"
+ ::"r" (block+nCoeffs), "rm"(qmul), "rm" (qadd), "r" (2*(-nCoeffs))
+ : "memory"
+ );
+}
+
+
+/*
+ NK:
+ Note: looking at PARANOID:
+ "enable all paranoid tests for rounding, overflows, etc..."
+
+#ifdef PARANOID
+ if (level < -2048 || level > 2047)
+ fprintf(stderr, "unquant error %d %d\n", i, level);
+#endif
+ We can suppose that result of two multiplications can't be greater than 0xFFFF
+ i.e. is 16-bit, so we use here only PMULLW instruction and can avoid
+ a complex multiplication.
+=====================================================
+ Full formula for multiplication of 2 integer numbers
+ which are represent as high:low words:
+ input: value1 = high1:low1
+ value2 = high2:low2
+ output: value3 = value1*value2
+ value3=high3:low3 (on overflow: modulus 2^32 wrap-around)
+ this mean that for 0x123456 * 0x123456 correct result is 0x766cb0ce4
+ but this algorithm will compute only 0x66cb0ce4
+ this limited by 16-bit size of operands
+ ---------------------------------
+ tlow1 = high1*low2
+ tlow2 = high2*low1
+ tlow1 = tlow1 + tlow2
+ high3:low3 = low1*low2
+ high3 += tlow1
+*/
+static void dct_unquantize_mpeg1_intra_mmx(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ x86_reg nCoeffs;
+ const uint16_t *quant_matrix;
+ int block0;
+
+ assert(s->block_last_index[n]>=0);
+
+ nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1;
+
+ if (n < 4)
+ block0 = block[0] * s->y_dc_scale;
+ else
+ block0 = block[0] * s->c_dc_scale;
+ /* XXX: only mpeg1 */
+ quant_matrix = s->intra_matrix;
+__asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $15, %%mm7 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "mov %3, %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq 8(%0, %%"REG_a"), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm4 \n\t"
+ "movq 8(%1, %%"REG_a"), %%mm5 \n\t"
+ "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
+ "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+ "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
+ "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
+ "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
+ "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q
+ "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t" // FIXME slow
+ "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
+ "psraw $3, %%mm0 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "psubw %%mm7, %%mm0 \n\t"
+ "psubw %%mm7, %%mm1 \n\t"
+ "por %%mm7, %%mm0 \n\t"
+ "por %%mm7, %%mm1 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "pandn %%mm0, %%mm4 \n\t"
+ "pandn %%mm1, %%mm5 \n\t"
+ "movq %%mm4, (%0, %%"REG_a") \n\t"
+ "movq %%mm5, 8(%0, %%"REG_a") \n\t"
+
+ "add $16, %%"REG_a" \n\t"
+ "js 1b \n\t"
+ ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
+ : "%"REG_a, "memory"
+ );
+ block[0]= block0;
+}
+
+static void dct_unquantize_mpeg1_inter_mmx(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ x86_reg nCoeffs;
+ const uint16_t *quant_matrix;
+
+ assert(s->block_last_index[n]>=0);
+
+ nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1;
+
+ quant_matrix = s->inter_matrix;
+__asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $15, %%mm7 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "mov %3, %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq 8(%0, %%"REG_a"), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm4 \n\t"
+ "movq 8(%1, %%"REG_a"), %%mm5 \n\t"
+ "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
+ "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+ "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
+ "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
+ "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
+ "paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2
+ "paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2
+ "paddw %%mm7, %%mm0 \n\t" // abs(block[i])*2 + 1
+ "paddw %%mm7, %%mm1 \n\t" // abs(block[i])*2 + 1
+ "pmullw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q
+ "pmullw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t" // FIXME slow
+ "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
+ "psraw $4, %%mm0 \n\t"
+ "psraw $4, %%mm1 \n\t"
+ "psubw %%mm7, %%mm0 \n\t"
+ "psubw %%mm7, %%mm1 \n\t"
+ "por %%mm7, %%mm0 \n\t"
+ "por %%mm7, %%mm1 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "pandn %%mm0, %%mm4 \n\t"
+ "pandn %%mm1, %%mm5 \n\t"
+ "movq %%mm4, (%0, %%"REG_a") \n\t"
+ "movq %%mm5, 8(%0, %%"REG_a") \n\t"
+
+ "add $16, %%"REG_a" \n\t"
+ "js 1b \n\t"
+ ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
+ : "%"REG_a, "memory"
+ );
+}
+
+static void dct_unquantize_mpeg2_intra_mmx(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ x86_reg nCoeffs;
+ const uint16_t *quant_matrix;
+ int block0;
+
+ assert(s->block_last_index[n]>=0);
+
+ if(s->alternate_scan) nCoeffs= 63; //FIXME
+ else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
+
+ if (n < 4)
+ block0 = block[0] * s->y_dc_scale;
+ else
+ block0 = block[0] * s->c_dc_scale;
+ quant_matrix = s->intra_matrix;
+__asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $15, %%mm7 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "mov %3, %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq 8(%0, %%"REG_a"), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm4 \n\t"
+ "movq 8(%1, %%"REG_a"), %%mm5 \n\t"
+ "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
+ "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+ "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
+ "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
+ "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
+ "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q
+ "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t" // FIXME slow
+ "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
+ "psraw $3, %%mm0 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "pandn %%mm0, %%mm4 \n\t"
+ "pandn %%mm1, %%mm5 \n\t"
+ "movq %%mm4, (%0, %%"REG_a") \n\t"
+ "movq %%mm5, 8(%0, %%"REG_a") \n\t"
+
+ "add $16, %%"REG_a" \n\t"
+ "jng 1b \n\t"
+ ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
+ : "%"REG_a, "memory"
+ );
+ block[0]= block0;
+ //Note, we do not do mismatch control for intra as errors cannot accumulate
+}
+
+static void dct_unquantize_mpeg2_inter_mmx(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ x86_reg nCoeffs;
+ const uint16_t *quant_matrix;
+
+ assert(s->block_last_index[n]>=0);
+
+ if(s->alternate_scan) nCoeffs= 63; //FIXME
+ else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
+
+ quant_matrix = s->inter_matrix;
+__asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlq $48, %%mm7 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "mov %3, %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq 8(%0, %%"REG_a"), %%mm1 \n\t"
+ "movq (%1, %%"REG_a"), %%mm4 \n\t"
+ "movq 8(%1, %%"REG_a"), %%mm5 \n\t"
+ "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
+ "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+ "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
+ "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
+ "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
+ "paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2
+ "paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2
+ "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*2*q
+ "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*2*q
+ "paddw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q
+ "paddw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t" // FIXME slow
+ "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
+ "psrlw $4, %%mm0 \n\t"
+ "psrlw $4, %%mm1 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "pandn %%mm0, %%mm4 \n\t"
+ "pandn %%mm1, %%mm5 \n\t"
+ "pxor %%mm4, %%mm7 \n\t"
+ "pxor %%mm5, %%mm7 \n\t"
+ "movq %%mm4, (%0, %%"REG_a") \n\t"
+ "movq %%mm5, 8(%0, %%"REG_a") \n\t"
+
+ "add $16, %%"REG_a" \n\t"
+ "jng 1b \n\t"
+ "movd 124(%0, %3), %%mm0 \n\t"
+ "movq %%mm7, %%mm6 \n\t"
+ "psrlq $32, %%mm7 \n\t"
+ "pxor %%mm6, %%mm7 \n\t"
+ "movq %%mm7, %%mm6 \n\t"
+ "psrlq $16, %%mm7 \n\t"
+ "pxor %%mm6, %%mm7 \n\t"
+ "pslld $31, %%mm7 \n\t"
+ "psrlq $15, %%mm7 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "movd %%mm0, 124(%0, %3) \n\t"
+
+ ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "r" (-2*nCoeffs)
+ : "%"REG_a, "memory"
+ );
+}
+
+static void denoise_dct_mmx(MpegEncContext *s, DCTELEM *block){
+ const int intra= s->mb_intra;
+ int *sum= s->dct_error_sum[intra];
+ uint16_t *offset= s->dct_offset[intra];
+
+ s->dct_count[intra]++;
+
+ __asm__ volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "1: \n\t"
+ "pxor %%mm0, %%mm0 \n\t"
+ "pxor %%mm1, %%mm1 \n\t"
+ "movq (%0), %%mm2 \n\t"
+ "movq 8(%0), %%mm3 \n\t"
+ "pcmpgtw %%mm2, %%mm0 \n\t"
+ "pcmpgtw %%mm3, %%mm1 \n\t"
+ "pxor %%mm0, %%mm2 \n\t"
+ "pxor %%mm1, %%mm3 \n\t"
+ "psubw %%mm0, %%mm2 \n\t"
+ "psubw %%mm1, %%mm3 \n\t"
+ "movq %%mm2, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "psubusw (%2), %%mm2 \n\t"
+ "psubusw 8(%2), %%mm3 \n\t"
+ "pxor %%mm0, %%mm2 \n\t"
+ "pxor %%mm1, %%mm3 \n\t"
+ "psubw %%mm0, %%mm2 \n\t"
+ "psubw %%mm1, %%mm3 \n\t"
+ "movq %%mm2, (%0) \n\t"
+ "movq %%mm3, 8(%0) \n\t"
+ "movq %%mm4, %%mm2 \n\t"
+ "movq %%mm5, %%mm3 \n\t"
+ "punpcklwd %%mm7, %%mm4 \n\t"
+ "punpckhwd %%mm7, %%mm2 \n\t"
+ "punpcklwd %%mm7, %%mm5 \n\t"
+ "punpckhwd %%mm7, %%mm3 \n\t"
+ "paddd (%1), %%mm4 \n\t"
+ "paddd 8(%1), %%mm2 \n\t"
+ "paddd 16(%1), %%mm5 \n\t"
+ "paddd 24(%1), %%mm3 \n\t"
+ "movq %%mm4, (%1) \n\t"
+ "movq %%mm2, 8(%1) \n\t"
+ "movq %%mm5, 16(%1) \n\t"
+ "movq %%mm3, 24(%1) \n\t"
+ "add $16, %0 \n\t"
+ "add $32, %1 \n\t"
+ "add $16, %2 \n\t"
+ "cmp %3, %0 \n\t"
+ " jb 1b \n\t"
+ : "+r" (block), "+r" (sum), "+r" (offset)
+ : "r"(block+64)
+ );
+}
+
+static void denoise_dct_sse2(MpegEncContext *s, DCTELEM *block){
+ const int intra= s->mb_intra;
+ int *sum= s->dct_error_sum[intra];
+ uint16_t *offset= s->dct_offset[intra];
+
+ s->dct_count[intra]++;
+
+ __asm__ volatile(
+ "pxor %%xmm7, %%xmm7 \n\t"
+ "1: \n\t"
+ "pxor %%xmm0, %%xmm0 \n\t"
+ "pxor %%xmm1, %%xmm1 \n\t"
+ "movdqa (%0), %%xmm2 \n\t"
+ "movdqa 16(%0), %%xmm3 \n\t"
+ "pcmpgtw %%xmm2, %%xmm0 \n\t"
+ "pcmpgtw %%xmm3, %%xmm1 \n\t"
+ "pxor %%xmm0, %%xmm2 \n\t"
+ "pxor %%xmm1, %%xmm3 \n\t"
+ "psubw %%xmm0, %%xmm2 \n\t"
+ "psubw %%xmm1, %%xmm3 \n\t"
+ "movdqa %%xmm2, %%xmm4 \n\t"
+ "movdqa %%xmm3, %%xmm5 \n\t"
+ "psubusw (%2), %%xmm2 \n\t"
+ "psubusw 16(%2), %%xmm3 \n\t"
+ "pxor %%xmm0, %%xmm2 \n\t"
+ "pxor %%xmm1, %%xmm3 \n\t"
+ "psubw %%xmm0, %%xmm2 \n\t"
+ "psubw %%xmm1, %%xmm3 \n\t"
+ "movdqa %%xmm2, (%0) \n\t"
+ "movdqa %%xmm3, 16(%0) \n\t"
+ "movdqa %%xmm4, %%xmm6 \n\t"
+ "movdqa %%xmm5, %%xmm0 \n\t"
+ "punpcklwd %%xmm7, %%xmm4 \n\t"
+ "punpckhwd %%xmm7, %%xmm6 \n\t"
+ "punpcklwd %%xmm7, %%xmm5 \n\t"
+ "punpckhwd %%xmm7, %%xmm0 \n\t"
+ "paddd (%1), %%xmm4 \n\t"
+ "paddd 16(%1), %%xmm6 \n\t"
+ "paddd 32(%1), %%xmm5 \n\t"
+ "paddd 48(%1), %%xmm0 \n\t"
+ "movdqa %%xmm4, (%1) \n\t"
+ "movdqa %%xmm6, 16(%1) \n\t"
+ "movdqa %%xmm5, 32(%1) \n\t"
+ "movdqa %%xmm0, 48(%1) \n\t"
+ "add $32, %0 \n\t"
+ "add $64, %1 \n\t"
+ "add $32, %2 \n\t"
+ "cmp %3, %0 \n\t"
+ " jb 1b \n\t"
+ : "+r" (block), "+r" (sum), "+r" (offset)
+ : "r"(block+64)
+ );
+}
+
+#if HAVE_SSSE3
+#define HAVE_SSSE3_BAK
+#endif
+#undef HAVE_SSSE3
+#define HAVE_SSSE3 0
+
+#undef HAVE_SSE2
+#undef HAVE_MMX2
+#define HAVE_SSE2 0
+#define HAVE_MMX2 0
+#define RENAME(a) a ## _MMX
+#define RENAMEl(a) a ## _mmx
+#include "mpegvideo_mmx_template.c"
+
+#undef HAVE_MMX2
+#define HAVE_MMX2 1
+#undef RENAME
+#undef RENAMEl
+#define RENAME(a) a ## _MMX2
+#define RENAMEl(a) a ## _mmx2
+#include "mpegvideo_mmx_template.c"
+
+#undef HAVE_SSE2
+#define HAVE_SSE2 1
+#undef RENAME
+#undef RENAMEl
+#define RENAME(a) a ## _SSE2
+#define RENAMEl(a) a ## _sse2
+#include "mpegvideo_mmx_template.c"
+
+#ifdef HAVE_SSSE3_BAK
+#undef HAVE_SSSE3
+#define HAVE_SSSE3 1
+#undef RENAME
+#undef RENAMEl
+#define RENAME(a) a ## _SSSE3
+#define RENAMEl(a) a ## _sse2
+#include "mpegvideo_mmx_template.c"
+#endif
+
+void MPV_common_init_mmx(MpegEncContext *s)
+{
+ if (mm_flags & FF_MM_MMX) {
+ const int dct_algo = s->avctx->dct_algo;
+
+ s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_mmx;
+ s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_mmx;
+ s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_mmx;
+ s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_mmx;
+ if(!(s->flags & CODEC_FLAG_BITEXACT))
+ s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx;
+ s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx;
+
+ if (mm_flags & FF_MM_SSE2) {
+ s->denoise_dct= denoise_dct_sse2;
+ } else {
+ s->denoise_dct= denoise_dct_mmx;
+ }
+
+ if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
+#if HAVE_SSSE3
+ if(mm_flags & FF_MM_SSSE3){
+ s->dct_quantize= dct_quantize_SSSE3;
+ } else
+#endif
+ if(mm_flags & FF_MM_SSE2){
+ s->dct_quantize= dct_quantize_SSE2;
+ } else if(mm_flags & FF_MM_MMX2){
+ s->dct_quantize= dct_quantize_MMX2;
+ } else {
+ s->dct_quantize= dct_quantize_MMX;
+ }
+ }
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mpegvideo_mmx_template.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mpegvideo_mmx_template.c
new file mode 100644
index 000000000..1e5cd2cb4
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/mpegvideo_mmx_template.c
@@ -0,0 +1,376 @@
+/*
+ * MPEG video MMX templates
+ *
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#undef MMREG_WIDTH
+#undef MM
+#undef MOVQ
+#undef SPREADW
+#undef PMAXW
+#undef PMAX
+#undef SAVE_SIGN
+#undef RESTORE_SIGN
+
+#if HAVE_SSE2
+#define MMREG_WIDTH "16"
+#define MM "%%xmm"
+#define MOVQ "movdqa"
+#define SPREADW(a) \
+ "pshuflw $0, "a", "a" \n\t"\
+ "punpcklwd "a", "a" \n\t"
+#define PMAXW(a,b) "pmaxsw "a", "b" \n\t"
+#define PMAX(a,b) \
+ "movhlps "a", "b" \n\t"\
+ PMAXW(b, a)\
+ "pshuflw $0x0E, "a", "b" \n\t"\
+ PMAXW(b, a)\
+ "pshuflw $0x01, "a", "b" \n\t"\
+ PMAXW(b, a)
+#else
+#define MMREG_WIDTH "8"
+#define MM "%%mm"
+#define MOVQ "movq"
+#if HAVE_MMX2
+#define SPREADW(a) "pshufw $0, "a", "a" \n\t"
+#define PMAXW(a,b) "pmaxsw "a", "b" \n\t"
+#define PMAX(a,b) \
+ "pshufw $0x0E, "a", "b" \n\t"\
+ PMAXW(b, a)\
+ "pshufw $0x01, "a", "b" \n\t"\
+ PMAXW(b, a)
+#else
+#define SPREADW(a) \
+ "punpcklwd "a", "a" \n\t"\
+ "punpcklwd "a", "a" \n\t"
+#define PMAXW(a,b) \
+ "psubusw "a", "b" \n\t"\
+ "paddw "a", "b" \n\t"
+#define PMAX(a,b) \
+ "movq "a", "b" \n\t"\
+ "psrlq $32, "a" \n\t"\
+ PMAXW(b, a)\
+ "movq "a", "b" \n\t"\
+ "psrlq $16, "a" \n\t"\
+ PMAXW(b, a)
+
+#endif
+#endif
+
+#if HAVE_SSSE3
+#define SAVE_SIGN(a,b) \
+ "movdqa "b", "a" \n\t"\
+ "pabsw "b", "b" \n\t"
+#define RESTORE_SIGN(a,b) \
+ "psignw "a", "b" \n\t"
+#else
+#define SAVE_SIGN(a,b) \
+ "pxor "a", "a" \n\t"\
+ "pcmpgtw "b", "a" \n\t" /* block[i] <= 0 ? 0xFF : 0x00 */\
+ "pxor "a", "b" \n\t"\
+ "psubw "a", "b" \n\t" /* ABS(block[i]) */
+#define RESTORE_SIGN(a,b) \
+ "pxor "a", "b" \n\t"\
+ "psubw "a", "b" \n\t" // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i])
+#endif
+
+static int RENAME(dct_quantize)(MpegEncContext *s,
+ DCTELEM *block, int n,
+ int qscale, int *overflow)
+{
+ x86_reg last_non_zero_p1;
+ int level=0, q; //=0 is because gcc says uninitialized ...
+ const uint16_t *qmat, *bias;
+ DECLARE_ALIGNED_16(int16_t, temp_block)[64];
+
+ assert((7&(int)(&temp_block[0])) == 0); //did gcc align it correctly?
+
+ //s->fdct (block);
+ RENAMEl(ff_fdct) (block); //cannot be anything else ...
+
+ if(s->dct_error_sum)
+ s->denoise_dct(s, block);
+
+ if (s->mb_intra) {
+ int dummy;
+ if (n < 4)
+ q = s->y_dc_scale;
+ else
+ q = s->c_dc_scale;
+ /* note: block[0] is assumed to be positive */
+ if (!s->h263_aic) {
+#if 1
+ __asm__ volatile (
+ "mul %%ecx \n\t"
+ : "=d" (level), "=a"(dummy)
+ : "a" ((block[0]>>2) + q), "c" (ff_inverse[q<<1])
+ );
+#else
+ __asm__ volatile (
+ "xorl %%edx, %%edx \n\t"
+ "divw %%cx \n\t"
+ "movzwl %%ax, %%eax \n\t"
+ : "=a" (level)
+ : "a" ((block[0]>>2) + q), "c" (q<<1)
+ : "%edx"
+ );
+#endif
+ } else
+ /* For AIC we skip quant/dequant of INTRADC */
+ level = (block[0] + 4)>>3;
+
+ block[0]=0; //avoid fake overflow
+// temp_block[0] = (block[0] + (q >> 1)) / q;
+ last_non_zero_p1 = 1;
+ bias = s->q_intra_matrix16[qscale][1];
+ qmat = s->q_intra_matrix16[qscale][0];
+ } else {
+ last_non_zero_p1 = 0;
+ bias = s->q_inter_matrix16[qscale][1];
+ qmat = s->q_inter_matrix16[qscale][0];
+ }
+
+ if((s->out_format == FMT_H263 || s->out_format == FMT_H261) && s->mpeg_quant==0){
+
+ __asm__ volatile(
+ "movd %%"REG_a", "MM"3 \n\t" // last_non_zero_p1
+ SPREADW(MM"3")
+ "pxor "MM"7, "MM"7 \n\t" // 0
+ "pxor "MM"4, "MM"4 \n\t" // 0
+ MOVQ" (%2), "MM"5 \n\t" // qmat[0]
+ "pxor "MM"6, "MM"6 \n\t"
+ "psubw (%3), "MM"6 \n\t" // -bias[0]
+ "mov $-128, %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ MOVQ" (%1, %%"REG_a"), "MM"0 \n\t" // block[i]
+ SAVE_SIGN(MM"1", MM"0") // ABS(block[i])
+ "psubusw "MM"6, "MM"0 \n\t" // ABS(block[i]) + bias[0]
+ "pmulhw "MM"5, "MM"0 \n\t" // (ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16
+ "por "MM"0, "MM"4 \n\t"
+ RESTORE_SIGN(MM"1", MM"0") // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i])
+ MOVQ" "MM"0, (%5, %%"REG_a") \n\t"
+ "pcmpeqw "MM"7, "MM"0 \n\t" // out==0 ? 0xFF : 0x00
+ MOVQ" (%4, %%"REG_a"), "MM"1 \n\t"
+ MOVQ" "MM"7, (%1, %%"REG_a") \n\t" // 0
+ "pandn "MM"1, "MM"0 \n\t"
+ PMAXW(MM"0", MM"3")
+ "add $"MMREG_WIDTH", %%"REG_a" \n\t"
+ " js 1b \n\t"
+ PMAX(MM"3", MM"0")
+ "movd "MM"3, %%"REG_a" \n\t"
+ "movzb %%al, %%"REG_a" \n\t" // last_non_zero_p1
+ : "+a" (last_non_zero_p1)
+ : "r" (block+64), "r" (qmat), "r" (bias),
+ "r" (inv_zigzag_direct16+64), "r" (temp_block+64)
+ );
+ }else{ // FMT_H263
+ __asm__ volatile(
+ "movd %%"REG_a", "MM"3 \n\t" // last_non_zero_p1
+ SPREADW(MM"3")
+ "pxor "MM"7, "MM"7 \n\t" // 0
+ "pxor "MM"4, "MM"4 \n\t" // 0
+ "mov $-128, %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ MOVQ" (%1, %%"REG_a"), "MM"0 \n\t" // block[i]
+ SAVE_SIGN(MM"1", MM"0") // ABS(block[i])
+ MOVQ" (%3, %%"REG_a"), "MM"6 \n\t" // bias[0]
+ "paddusw "MM"6, "MM"0 \n\t" // ABS(block[i]) + bias[0]
+ MOVQ" (%2, %%"REG_a"), "MM"5 \n\t" // qmat[i]
+ "pmulhw "MM"5, "MM"0 \n\t" // (ABS(block[i])*qmat[0] + bias[0]*qmat[0])>>16
+ "por "MM"0, "MM"4 \n\t"
+ RESTORE_SIGN(MM"1", MM"0") // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i])
+ MOVQ" "MM"0, (%5, %%"REG_a") \n\t"
+ "pcmpeqw "MM"7, "MM"0 \n\t" // out==0 ? 0xFF : 0x00
+ MOVQ" (%4, %%"REG_a"), "MM"1 \n\t"
+ MOVQ" "MM"7, (%1, %%"REG_a") \n\t" // 0
+ "pandn "MM"1, "MM"0 \n\t"
+ PMAXW(MM"0", MM"3")
+ "add $"MMREG_WIDTH", %%"REG_a" \n\t"
+ " js 1b \n\t"
+ PMAX(MM"3", MM"0")
+ "movd "MM"3, %%"REG_a" \n\t"
+ "movzb %%al, %%"REG_a" \n\t" // last_non_zero_p1
+ : "+a" (last_non_zero_p1)
+ : "r" (block+64), "r" (qmat+64), "r" (bias+64),
+ "r" (inv_zigzag_direct16+64), "r" (temp_block+64)
+ );
+ }
+ __asm__ volatile(
+ "movd %1, "MM"1 \n\t" // max_qcoeff
+ SPREADW(MM"1")
+ "psubusw "MM"1, "MM"4 \n\t"
+ "packuswb "MM"4, "MM"4 \n\t"
+#if HAVE_SSE2
+ "packuswb "MM"4, "MM"4 \n\t"
+#endif
+ "movd "MM"4, %0 \n\t" // *overflow
+ : "=g" (*overflow)
+ : "g" (s->max_qcoeff)
+ );
+
+ if(s->mb_intra) block[0]= level;
+ else block[0]= temp_block[0];
+
+ if(s->dsp.idct_permutation_type == FF_SIMPLE_IDCT_PERM){
+ if(last_non_zero_p1 <= 1) goto end;
+ block[0x08] = temp_block[0x01]; block[0x10] = temp_block[0x08];
+ block[0x20] = temp_block[0x10];
+ if(last_non_zero_p1 <= 4) goto end;
+ block[0x18] = temp_block[0x09]; block[0x04] = temp_block[0x02];
+ block[0x09] = temp_block[0x03];
+ if(last_non_zero_p1 <= 7) goto end;
+ block[0x14] = temp_block[0x0A]; block[0x28] = temp_block[0x11];
+ block[0x12] = temp_block[0x18]; block[0x02] = temp_block[0x20];
+ if(last_non_zero_p1 <= 11) goto end;
+ block[0x1A] = temp_block[0x19]; block[0x24] = temp_block[0x12];
+ block[0x19] = temp_block[0x0B]; block[0x01] = temp_block[0x04];
+ block[0x0C] = temp_block[0x05];
+ if(last_non_zero_p1 <= 16) goto end;
+ block[0x11] = temp_block[0x0C]; block[0x29] = temp_block[0x13];
+ block[0x16] = temp_block[0x1A]; block[0x0A] = temp_block[0x21];
+ block[0x30] = temp_block[0x28]; block[0x22] = temp_block[0x30];
+ block[0x38] = temp_block[0x29]; block[0x06] = temp_block[0x22];
+ if(last_non_zero_p1 <= 24) goto end;
+ block[0x1B] = temp_block[0x1B]; block[0x21] = temp_block[0x14];
+ block[0x1C] = temp_block[0x0D]; block[0x05] = temp_block[0x06];
+ block[0x0D] = temp_block[0x07]; block[0x15] = temp_block[0x0E];
+ block[0x2C] = temp_block[0x15]; block[0x13] = temp_block[0x1C];
+ if(last_non_zero_p1 <= 32) goto end;
+ block[0x0B] = temp_block[0x23]; block[0x34] = temp_block[0x2A];
+ block[0x2A] = temp_block[0x31]; block[0x32] = temp_block[0x38];
+ block[0x3A] = temp_block[0x39]; block[0x26] = temp_block[0x32];
+ block[0x39] = temp_block[0x2B]; block[0x03] = temp_block[0x24];
+ if(last_non_zero_p1 <= 40) goto end;
+ block[0x1E] = temp_block[0x1D]; block[0x25] = temp_block[0x16];
+ block[0x1D] = temp_block[0x0F]; block[0x2D] = temp_block[0x17];
+ block[0x17] = temp_block[0x1E]; block[0x0E] = temp_block[0x25];
+ block[0x31] = temp_block[0x2C]; block[0x2B] = temp_block[0x33];
+ if(last_non_zero_p1 <= 48) goto end;
+ block[0x36] = temp_block[0x3A]; block[0x3B] = temp_block[0x3B];
+ block[0x23] = temp_block[0x34]; block[0x3C] = temp_block[0x2D];
+ block[0x07] = temp_block[0x26]; block[0x1F] = temp_block[0x1F];
+ block[0x0F] = temp_block[0x27]; block[0x35] = temp_block[0x2E];
+ if(last_non_zero_p1 <= 56) goto end;
+ block[0x2E] = temp_block[0x35]; block[0x33] = temp_block[0x3C];
+ block[0x3E] = temp_block[0x3D]; block[0x27] = temp_block[0x36];
+ block[0x3D] = temp_block[0x2F]; block[0x2F] = temp_block[0x37];
+ block[0x37] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F];
+ }else if(s->dsp.idct_permutation_type == FF_LIBMPEG2_IDCT_PERM){
+ if(last_non_zero_p1 <= 1) goto end;
+ block[0x04] = temp_block[0x01];
+ block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10];
+ if(last_non_zero_p1 <= 4) goto end;
+ block[0x0C] = temp_block[0x09]; block[0x01] = temp_block[0x02];
+ block[0x05] = temp_block[0x03];
+ if(last_non_zero_p1 <= 7) goto end;
+ block[0x09] = temp_block[0x0A]; block[0x14] = temp_block[0x11];
+ block[0x18] = temp_block[0x18]; block[0x20] = temp_block[0x20];
+ if(last_non_zero_p1 <= 11) goto end;
+ block[0x1C] = temp_block[0x19];
+ block[0x11] = temp_block[0x12]; block[0x0D] = temp_block[0x0B];
+ block[0x02] = temp_block[0x04]; block[0x06] = temp_block[0x05];
+ if(last_non_zero_p1 <= 16) goto end;
+ block[0x0A] = temp_block[0x0C]; block[0x15] = temp_block[0x13];
+ block[0x19] = temp_block[0x1A]; block[0x24] = temp_block[0x21];
+ block[0x28] = temp_block[0x28]; block[0x30] = temp_block[0x30];
+ block[0x2C] = temp_block[0x29]; block[0x21] = temp_block[0x22];
+ if(last_non_zero_p1 <= 24) goto end;
+ block[0x1D] = temp_block[0x1B]; block[0x12] = temp_block[0x14];
+ block[0x0E] = temp_block[0x0D]; block[0x03] = temp_block[0x06];
+ block[0x07] = temp_block[0x07]; block[0x0B] = temp_block[0x0E];
+ block[0x16] = temp_block[0x15]; block[0x1A] = temp_block[0x1C];
+ if(last_non_zero_p1 <= 32) goto end;
+ block[0x25] = temp_block[0x23]; block[0x29] = temp_block[0x2A];
+ block[0x34] = temp_block[0x31]; block[0x38] = temp_block[0x38];
+ block[0x3C] = temp_block[0x39]; block[0x31] = temp_block[0x32];
+ block[0x2D] = temp_block[0x2B]; block[0x22] = temp_block[0x24];
+ if(last_non_zero_p1 <= 40) goto end;
+ block[0x1E] = temp_block[0x1D]; block[0x13] = temp_block[0x16];
+ block[0x0F] = temp_block[0x0F]; block[0x17] = temp_block[0x17];
+ block[0x1B] = temp_block[0x1E]; block[0x26] = temp_block[0x25];
+ block[0x2A] = temp_block[0x2C]; block[0x35] = temp_block[0x33];
+ if(last_non_zero_p1 <= 48) goto end;
+ block[0x39] = temp_block[0x3A]; block[0x3D] = temp_block[0x3B];
+ block[0x32] = temp_block[0x34]; block[0x2E] = temp_block[0x2D];
+ block[0x23] = temp_block[0x26]; block[0x1F] = temp_block[0x1F];
+ block[0x27] = temp_block[0x27]; block[0x2B] = temp_block[0x2E];
+ if(last_non_zero_p1 <= 56) goto end;
+ block[0x36] = temp_block[0x35]; block[0x3A] = temp_block[0x3C];
+ block[0x3E] = temp_block[0x3D]; block[0x33] = temp_block[0x36];
+ block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37];
+ block[0x3B] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F];
+ }else{
+ if(last_non_zero_p1 <= 1) goto end;
+ block[0x01] = temp_block[0x01];
+ block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10];
+ if(last_non_zero_p1 <= 4) goto end;
+ block[0x09] = temp_block[0x09]; block[0x02] = temp_block[0x02];
+ block[0x03] = temp_block[0x03];
+ if(last_non_zero_p1 <= 7) goto end;
+ block[0x0A] = temp_block[0x0A]; block[0x11] = temp_block[0x11];
+ block[0x18] = temp_block[0x18]; block[0x20] = temp_block[0x20];
+ if(last_non_zero_p1 <= 11) goto end;
+ block[0x19] = temp_block[0x19];
+ block[0x12] = temp_block[0x12]; block[0x0B] = temp_block[0x0B];
+ block[0x04] = temp_block[0x04]; block[0x05] = temp_block[0x05];
+ if(last_non_zero_p1 <= 16) goto end;
+ block[0x0C] = temp_block[0x0C]; block[0x13] = temp_block[0x13];
+ block[0x1A] = temp_block[0x1A]; block[0x21] = temp_block[0x21];
+ block[0x28] = temp_block[0x28]; block[0x30] = temp_block[0x30];
+ block[0x29] = temp_block[0x29]; block[0x22] = temp_block[0x22];
+ if(last_non_zero_p1 <= 24) goto end;
+ block[0x1B] = temp_block[0x1B]; block[0x14] = temp_block[0x14];
+ block[0x0D] = temp_block[0x0D]; block[0x06] = temp_block[0x06];
+ block[0x07] = temp_block[0x07]; block[0x0E] = temp_block[0x0E];
+ block[0x15] = temp_block[0x15]; block[0x1C] = temp_block[0x1C];
+ if(last_non_zero_p1 <= 32) goto end;
+ block[0x23] = temp_block[0x23]; block[0x2A] = temp_block[0x2A];
+ block[0x31] = temp_block[0x31]; block[0x38] = temp_block[0x38];
+ block[0x39] = temp_block[0x39]; block[0x32] = temp_block[0x32];
+ block[0x2B] = temp_block[0x2B]; block[0x24] = temp_block[0x24];
+ if(last_non_zero_p1 <= 40) goto end;
+ block[0x1D] = temp_block[0x1D]; block[0x16] = temp_block[0x16];
+ block[0x0F] = temp_block[0x0F]; block[0x17] = temp_block[0x17];
+ block[0x1E] = temp_block[0x1E]; block[0x25] = temp_block[0x25];
+ block[0x2C] = temp_block[0x2C]; block[0x33] = temp_block[0x33];
+ if(last_non_zero_p1 <= 48) goto end;
+ block[0x3A] = temp_block[0x3A]; block[0x3B] = temp_block[0x3B];
+ block[0x34] = temp_block[0x34]; block[0x2D] = temp_block[0x2D];
+ block[0x26] = temp_block[0x26]; block[0x1F] = temp_block[0x1F];
+ block[0x27] = temp_block[0x27]; block[0x2E] = temp_block[0x2E];
+ if(last_non_zero_p1 <= 56) goto end;
+ block[0x35] = temp_block[0x35]; block[0x3C] = temp_block[0x3C];
+ block[0x3D] = temp_block[0x3D]; block[0x36] = temp_block[0x36];
+ block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37];
+ block[0x3E] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F];
+ }
+ end:
+/*
+ for(i=0; i<last_non_zero_p1; i++)
+ {
+ int j= zigzag_direct_noperm[i];
+ block[block_permute_op(j)]= temp_block[j];
+ }
+*/
+
+ return last_non_zero_p1 - 1;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/rv40dsp_mmx.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/rv40dsp_mmx.c
new file mode 100644
index 000000000..9a702084a
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/rv40dsp_mmx.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2008 Konstantin Shishkov, Mathieu Velten
+ *
+ * MMX-optimized DSP functions for RV40, based on H.264 optimizations by
+ * Michael Niedermayer and Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dsputil_mmx.h"
+
+/* bias interleaved with bias div 8, use p+1 to access bias div 8 */
+DECLARE_ALIGNED_8(static const uint64_t, rv40_bias_reg)[4][8] = {
+ { 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0010001000100010ULL, 0x0002000200020002ULL,
+ 0x0020002000200020ULL, 0x0004000400040004ULL, 0x0010001000100010ULL, 0x0002000200020002ULL },
+ { 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL,
+ 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL },
+ { 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0020002000200020ULL, 0x0004000400040004ULL,
+ 0x0010001000100010ULL, 0x0002000200020002ULL, 0x0020002000200020ULL, 0x0004000400040004ULL },
+ { 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL,
+ 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL }
+};
+
+static void put_rv40_chroma_mc8_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]);
+}
+static void put_rv40_chroma_mc4_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ put_h264_chroma_generic_mc4_mmx(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]);
+}
+static void avg_rv40_chroma_mc8_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]);
+}
+static void avg_rv40_chroma_mc4_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ avg_h264_chroma_generic_mc4_mmx2(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]);
+}
+static void avg_rv40_chroma_mc8_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ avg_h264_chroma_generic_mc8_3dnow(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]);
+}
+static void avg_rv40_chroma_mc4_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ avg_h264_chroma_generic_mc4_3dnow(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/simple_idct_mmx.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/simple_idct_mmx.c
new file mode 100644
index 000000000..e32b8f0b4
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/simple_idct_mmx.c
@@ -0,0 +1,1296 @@
+/*
+ * Simple IDCT MMX
+ *
+ * Copyright (c) 2001, 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "libavcodec/dsputil.h"
+#include "libavcodec/simple_idct.h"
+#include "dsputil_mmx.h"
+
+/*
+23170.475006
+22725.260826
+21406.727617
+19265.545870
+16384.000000
+12872.826198
+8866.956905
+4520.335430
+*/
+#define C0 23170 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define C1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define C2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define C3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#if 0
+#define C4 16384 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#else
+#define C4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) - 0.5
+#endif
+#define C5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define C6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define C7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+
+#define ROW_SHIFT 11
+#define COL_SHIFT 20 // 6
+
+DECLARE_ASM_CONST(8, uint64_t, wm1010)= 0xFFFF0000FFFF0000ULL;
+DECLARE_ASM_CONST(8, uint64_t, d40000)= 0x0000000000040000ULL;
+
+DECLARE_ALIGNED(8, static const int16_t, coeffs)[]= {
+ 1<<(ROW_SHIFT-1), 0, 1<<(ROW_SHIFT-1), 0,
+// 1<<(COL_SHIFT-1), 0, 1<<(COL_SHIFT-1), 0,
+// 0, 1<<(COL_SHIFT-1-16), 0, 1<<(COL_SHIFT-1-16),
+ 1<<(ROW_SHIFT-1), 1, 1<<(ROW_SHIFT-1), 0,
+ // the 1 = ((1<<(COL_SHIFT-1))/C4)<<ROW_SHIFT :)
+// 0, 0, 0, 0,
+// 0, 0, 0, 0,
+
+ C4, C4, C4, C4,
+ C4, -C4, C4, -C4,
+
+ C2, C6, C2, C6,
+ C6, -C2, C6, -C2,
+
+ C1, C3, C1, C3,
+ C5, C7, C5, C7,
+
+ C3, -C7, C3, -C7,
+-C1, -C5, -C1, -C5,
+
+ C5, -C1, C5, -C1,
+ C7, C3, C7, C3,
+
+ C7, -C5, C7, -C5,
+ C3, -C1, C3, -C1
+};
+
+#if 0
+static void unused_var_killer(void)
+{
+ int a= wm1010 + d40000;
+ temp[0]=a;
+}
+
+static void inline idctCol (int16_t * col, int16_t *input)
+{
+#undef C0
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+ int a0, a1, a2, a3, b0, b1, b2, b3;
+ const int C0 = 23170; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C1 = 22725; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C2 = 21407; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C3 = 19266; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C4 = 16383; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C5 = 12873; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C6 = 8867; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C7 = 4520; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+/*
+ if( !(col[8*1] | col[8*2] |col[8*3] |col[8*4] |col[8*5] |col[8*6] | col[8*7])) {
+ col[8*0] = col[8*1] = col[8*2] = col[8*3] = col[8*4] =
+ col[8*5] = col[8*6] = col[8*7] = col[8*0]<<3;
+ return;
+ }*/
+
+col[8*0] = input[8*0 + 0];
+col[8*1] = input[8*2 + 0];
+col[8*2] = input[8*0 + 1];
+col[8*3] = input[8*2 + 1];
+col[8*4] = input[8*4 + 0];
+col[8*5] = input[8*6 + 0];
+col[8*6] = input[8*4 + 1];
+col[8*7] = input[8*6 + 1];
+
+ a0 = C4*col[8*0] + C2*col[8*2] + C4*col[8*4] + C6*col[8*6] + (1<<(COL_SHIFT-1));
+ a1 = C4*col[8*0] + C6*col[8*2] - C4*col[8*4] - C2*col[8*6] + (1<<(COL_SHIFT-1));
+ a2 = C4*col[8*0] - C6*col[8*2] - C4*col[8*4] + C2*col[8*6] + (1<<(COL_SHIFT-1));
+ a3 = C4*col[8*0] - C2*col[8*2] + C4*col[8*4] - C6*col[8*6] + (1<<(COL_SHIFT-1));
+
+ b0 = C1*col[8*1] + C3*col[8*3] + C5*col[8*5] + C7*col[8*7];
+ b1 = C3*col[8*1] - C7*col[8*3] - C1*col[8*5] - C5*col[8*7];
+ b2 = C5*col[8*1] - C1*col[8*3] + C7*col[8*5] + C3*col[8*7];
+ b3 = C7*col[8*1] - C5*col[8*3] + C3*col[8*5] - C1*col[8*7];
+
+ col[8*0] = (a0 + b0) >> COL_SHIFT;
+ col[8*1] = (a1 + b1) >> COL_SHIFT;
+ col[8*2] = (a2 + b2) >> COL_SHIFT;
+ col[8*3] = (a3 + b3) >> COL_SHIFT;
+ col[8*4] = (a3 - b3) >> COL_SHIFT;
+ col[8*5] = (a2 - b2) >> COL_SHIFT;
+ col[8*6] = (a1 - b1) >> COL_SHIFT;
+ col[8*7] = (a0 - b0) >> COL_SHIFT;
+}
+
+static void inline idctRow (int16_t * output, int16_t * input)
+{
+ int16_t row[8];
+
+ int a0, a1, a2, a3, b0, b1, b2, b3;
+ const int C0 = 23170; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C1 = 22725; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C2 = 21407; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C3 = 19266; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C4 = 16383; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C5 = 12873; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C6 = 8867; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C7 = 4520; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+
+row[0] = input[0];
+row[2] = input[1];
+row[4] = input[4];
+row[6] = input[5];
+row[1] = input[8];
+row[3] = input[9];
+row[5] = input[12];
+row[7] = input[13];
+
+ if( !(row[1] | row[2] |row[3] |row[4] |row[5] |row[6] | row[7]) ) {
+ row[0] = row[1] = row[2] = row[3] = row[4] =
+ row[5] = row[6] = row[7] = row[0]<<3;
+ output[0] = row[0];
+ output[2] = row[1];
+ output[4] = row[2];
+ output[6] = row[3];
+ output[8] = row[4];
+ output[10] = row[5];
+ output[12] = row[6];
+ output[14] = row[7];
+ return;
+ }
+
+ a0 = C4*row[0] + C2*row[2] + C4*row[4] + C6*row[6] + (1<<(ROW_SHIFT-1));
+ a1 = C4*row[0] + C6*row[2] - C4*row[4] - C2*row[6] + (1<<(ROW_SHIFT-1));
+ a2 = C4*row[0] - C6*row[2] - C4*row[4] + C2*row[6] + (1<<(ROW_SHIFT-1));
+ a3 = C4*row[0] - C2*row[2] + C4*row[4] - C6*row[6] + (1<<(ROW_SHIFT-1));
+
+ b0 = C1*row[1] + C3*row[3] + C5*row[5] + C7*row[7];
+ b1 = C3*row[1] - C7*row[3] - C1*row[5] - C5*row[7];
+ b2 = C5*row[1] - C1*row[3] + C7*row[5] + C3*row[7];
+ b3 = C7*row[1] - C5*row[3] + C3*row[5] - C1*row[7];
+
+ row[0] = (a0 + b0) >> ROW_SHIFT;
+ row[1] = (a1 + b1) >> ROW_SHIFT;
+ row[2] = (a2 + b2) >> ROW_SHIFT;
+ row[3] = (a3 + b3) >> ROW_SHIFT;
+ row[4] = (a3 - b3) >> ROW_SHIFT;
+ row[5] = (a2 - b2) >> ROW_SHIFT;
+ row[6] = (a1 - b1) >> ROW_SHIFT;
+ row[7] = (a0 - b0) >> ROW_SHIFT;
+
+ output[0] = row[0];
+ output[2] = row[1];
+ output[4] = row[2];
+ output[6] = row[3];
+ output[8] = row[4];
+ output[10] = row[5];
+ output[12] = row[6];
+ output[14] = row[7];
+}
+#endif
+
+static inline void idct(int16_t *block)
+{
+ DECLARE_ALIGNED(8, int64_t, align_tmp)[16];
+ int16_t * const temp= (int16_t*)align_tmp;
+
+ __asm__ volatile(
+#if 0 //Alternative, simpler variant
+
+#define ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
+ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ #rounder ", %%mm0 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\
+ "paddd %%mm0, %%mm0 \n\t" \
+ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\
+ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\
+ "movq %%mm7, " #dst " \n\t"\
+ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "movq %%mm2, 24+" #dst " \n\t"\
+ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\
+ "movq %%mm2, 8+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\
+ "movq %%mm4, 16+" #dst " \n\t"\
+
+#define COL_IDCT(src0, src4, src1, src5, dst, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
+ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\
+ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\
+ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm1, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm1 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm1 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm1, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm7, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm2, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq " #src1 ", %%mm0 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm0 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm0, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm2, 32+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "movd %%mm4, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"\
+
+
+#define DC_COND_ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
+ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
+ "movq "MANGLE(wm1010)", %%mm4 \n\t"\
+ "pand %%mm0, %%mm4 \n\t"\
+ "por %%mm1, %%mm4 \n\t"\
+ "por %%mm2, %%mm4 \n\t"\
+ "por %%mm3, %%mm4 \n\t"\
+ "packssdw %%mm4,%%mm4 \n\t"\
+ "movd %%mm4, %%eax \n\t"\
+ "orl %%eax, %%eax \n\t"\
+ "jz 1f \n\t"\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ #rounder ", %%mm0 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\
+ "paddd %%mm0, %%mm0 \n\t" \
+ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\
+ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\
+ "movq %%mm7, " #dst " \n\t"\
+ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "movq %%mm2, 24+" #dst " \n\t"\
+ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\
+ "movq %%mm2, 8+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\
+ "movq %%mm4, 16+" #dst " \n\t"\
+ "jmp 2f \n\t"\
+ "1: \n\t"\
+ "pslld $16, %%mm0 \n\t"\
+ "#paddd "MANGLE(d40000)", %%mm0 \n\t"\
+ "psrad $13, %%mm0 \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t"\
+ "movq %%mm0, " #dst " \n\t"\
+ "movq %%mm0, 8+" #dst " \n\t"\
+ "movq %%mm0, 16+" #dst " \n\t"\
+ "movq %%mm0, 24+" #dst " \n\t"\
+ "2: \n\t"
+
+
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+ROW_IDCT( (%0), 8(%0), 16(%0), 24(%0), 0(%1),paddd 8(%2), 11)
+/*ROW_IDCT( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1), paddd (%2), 11)
+ROW_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1), paddd (%2), 11)
+ROW_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1), paddd (%2), 11)*/
+
+DC_COND_ROW_IDCT( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1),paddd (%2), 11)
+DC_COND_ROW_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11)
+DC_COND_ROW_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11)
+
+
+//IDCT( src0, src4, src1, src5, dst, shift)
+COL_IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+COL_IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+COL_IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+COL_IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
+
+#else
+
+#define DC_COND_IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
+ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
+ "movq "MANGLE(wm1010)", %%mm4 \n\t"\
+ "pand %%mm0, %%mm4 \n\t"\
+ "por %%mm1, %%mm4 \n\t"\
+ "por %%mm2, %%mm4 \n\t"\
+ "por %%mm3, %%mm4 \n\t"\
+ "packssdw %%mm4,%%mm4 \n\t"\
+ "movd %%mm4, %%eax \n\t"\
+ "orl %%eax, %%eax \n\t"\
+ "jz 1f \n\t"\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ #rounder ", %%mm0 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\
+ "paddd %%mm0, %%mm0 \n\t" \
+ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\
+ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\
+ "movq %%mm7, " #dst " \n\t"\
+ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "movq %%mm2, 24+" #dst " \n\t"\
+ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\
+ "movq %%mm2, 8+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\
+ "movq %%mm4, 16+" #dst " \n\t"\
+ "jmp 2f \n\t"\
+ "1: \n\t"\
+ "pslld $16, %%mm0 \n\t"\
+ "paddd "MANGLE(d40000)", %%mm0 \n\t"\
+ "psrad $13, %%mm0 \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t"\
+ "movq %%mm0, " #dst " \n\t"\
+ "movq %%mm0, 8+" #dst " \n\t"\
+ "movq %%mm0, 16+" #dst " \n\t"\
+ "movq %%mm0, 24+" #dst " \n\t"\
+ "2: \n\t"
+
+#define Z_COND_IDCT(src0, src4, src1, src5, dst, rounder, shift, bt) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
+ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
+ "movq %%mm0, %%mm4 \n\t"\
+ "por %%mm1, %%mm4 \n\t"\
+ "por %%mm2, %%mm4 \n\t"\
+ "por %%mm3, %%mm4 \n\t"\
+ "packssdw %%mm4,%%mm4 \n\t"\
+ "movd %%mm4, %%eax \n\t"\
+ "orl %%eax, %%eax \n\t"\
+ "jz " #bt " \n\t"\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ #rounder ", %%mm0 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\
+ "paddd %%mm0, %%mm0 \n\t" \
+ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\
+ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\
+ "movq %%mm7, " #dst " \n\t"\
+ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "movq %%mm2, 24+" #dst " \n\t"\
+ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\
+ "movq %%mm2, 8+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\
+ "movq %%mm4, 16+" #dst " \n\t"\
+
+#define ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
+ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ #rounder ", %%mm0 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\
+ "paddd %%mm0, %%mm0 \n\t" \
+ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\
+ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\
+ "movq %%mm7, " #dst " \n\t"\
+ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "movq %%mm2, 24+" #dst " \n\t"\
+ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\
+ "movq %%mm2, 8+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\
+ "movq %%mm4, 16+" #dst " \n\t"\
+
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+DC_COND_IDCT( 0(%0), 8(%0), 16(%0), 24(%0), 0(%1),paddd 8(%2), 11)
+Z_COND_IDCT( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1),paddd (%2), 11, 4f)
+Z_COND_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 2f)
+Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 1f)
+
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
+ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\
+ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\
+ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm1, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm1 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm1 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm1, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm7, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm2, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq " #src1 ", %%mm0 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm0 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm0, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm2, 32+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "movd %%mm4, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"
+
+
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
+ "jmp 9f \n\t"
+
+ "#" ASMALIGN(4) \
+ "4: \n\t"
+Z_COND_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 6f)
+Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 5f)
+
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\
+ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\
+ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm1 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm1, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm7, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm7, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm1 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm1, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm2, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq 88(%2), %%mm1 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm1, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm1, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm1 \n\t" /* A3 a3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm1 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm2, 32+" #dst " \n\t"\
+ "packssdw %%mm1, %%mm1 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "movd %%mm1, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"
+
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
+ "jmp 9f \n\t"
+
+ "#" ASMALIGN(4) \
+ "6: \n\t"
+Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 7f)
+
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm1 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm1, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm7, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm7, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm1 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm1, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm2, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq 88(%2), %%mm1 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm1, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm1, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm1 \n\t" /* A3 a3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm1 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm2, 32+" #dst " \n\t"\
+ "packssdw %%mm1, %%mm1 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "movd %%mm1, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"
+
+
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
+ "jmp 9f \n\t"
+
+ "#" ASMALIGN(4) \
+ "2: \n\t"
+Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 3f)
+
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
+ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm1, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm1 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm1 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm1, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm7, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm2, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq " #src1 ", %%mm0 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm0 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm0, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm2, 32+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "movd %%mm4, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"
+
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
+ "jmp 9f \n\t"
+
+ "#" ASMALIGN(4) \
+ "3: \n\t"
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 64(%2), %%mm3 \n\t"\
+ "pmaddwd %%mm2, %%mm3 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm0, %%mm1 \n\t" /* A1 a1 */\
+ "paddd %%mm3, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm3, %%mm1 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm7, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm1, %%mm1 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm1, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "pmaddwd %%mm2, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "pmaddwd 96(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "movq %%mm5, %%mm1 \n\t" /* A2 a2 */\
+ "paddd %%mm4, %%mm1 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm2, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm2, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm1, %%mm1 \n\t" /* A2+B2 a2+b2 */\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm1, 32+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "movd %%mm4, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"
+
+
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
+ "jmp 9f \n\t"
+
+ "#" ASMALIGN(4) \
+ "5: \n\t"
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\
+ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\
+ "movq 8+" #src0 ", %%mm2 \n\t" /* R4 R0 r4 r0 */\
+ "movq 8+" #src4 ", %%mm3 \n\t" /* R6 R2 r6 r2 */\
+ "movq 16(%2), %%mm1 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm2, %%mm1 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm7 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm7, %%mm2 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm7 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "pmaddwd 40(%2), %%mm3 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "paddd %%mm1, %%mm7 \n\t" /* A0 a0 */\
+ "paddd %%mm1, %%mm1 \n\t" /* 2C0 2c0 */\
+ "psubd %%mm7, %%mm1 \n\t" /* A3 a3 */\
+ "paddd %%mm2, %%mm3 \n\t" /* A1 a1 */\
+ "paddd %%mm2, %%mm2 \n\t" /* 2C1 2c1 */\
+ "psubd %%mm3, %%mm2 \n\t" /* A2 a2 */\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm3 \n\t"\
+ "packssdw %%mm7, %%mm4 \n\t" /* A0 a0 */\
+ "movq %%mm4, " #dst " \n\t"\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "packssdw %%mm3, %%mm0 \n\t" /* A1 a1 */\
+ "movq %%mm0, 16+" #dst " \n\t"\
+ "movq %%mm0, 96+" #dst " \n\t"\
+ "movq %%mm4, 112+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm2, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movq %%mm5, 32+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "packssdw %%mm1, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movq %%mm6, 48+" #dst " \n\t"\
+ "movq %%mm6, 64+" #dst " \n\t"\
+ "movq %%mm5, 80+" #dst " \n\t"
+
+
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+//IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+//IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
+ "jmp 9f \n\t"
+
+
+ "#" ASMALIGN(4) \
+ "1: \n\t"
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\
+ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\
+ "movq 64(%2), %%mm1 \n\t"\
+ "pmaddwd %%mm2, %%mm1 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm0, %%mm3 \n\t" /* A1 a1 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm1, %%mm3 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "psrad $" #shift ", %%mm3 \n\t"\
+ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm7, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm3, %%mm3 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm3, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "pmaddwd %%mm2, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "pmaddwd 96(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "movq %%mm5, %%mm3 \n\t" /* A2 a2 */\
+ "paddd %%mm4, %%mm3 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm3 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm2, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm2, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "packssdw %%mm3, %%mm3 \n\t" /* A2+B2 a2+b2 */\
+ "movd %%mm3, 32+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm4, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"
+
+
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
+ "jmp 9f \n\t"
+
+
+ "#" ASMALIGN(4)
+ "7: \n\t"
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "movq 8+" #src0 ", %%mm2 \n\t" /* R4 R0 r4 r0 */\
+ "movq 16(%2), %%mm1 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm2, %%mm1 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm7 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm7, %%mm2 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm7 \n\t" /* C6 C2 C6 C2 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "packssdw %%mm1, %%mm4 \n\t" /* A0 a0 */\
+ "movq %%mm4, " #dst " \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm2, %%mm0 \n\t" /* A1 a1 */\
+ "movq %%mm0, 16+" #dst " \n\t"\
+ "movq %%mm0, 96+" #dst " \n\t"\
+ "movq %%mm4, 112+" #dst " \n\t"\
+ "movq %%mm0, 32+" #dst " \n\t"\
+ "movq %%mm4, 48+" #dst " \n\t"\
+ "movq %%mm4, 64+" #dst " \n\t"\
+ "movq %%mm0, 80+" #dst " \n\t"
+
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+//IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+//IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
+
+
+#endif
+
+/*
+Input
+ 00 40 04 44 20 60 24 64
+ 10 30 14 34 50 70 54 74
+ 01 41 03 43 21 61 23 63
+ 11 31 13 33 51 71 53 73
+ 02 42 06 46 22 62 26 66
+ 12 32 16 36 52 72 56 76
+ 05 45 07 47 25 65 27 67
+ 15 35 17 37 55 75 57 77
+
+Temp
+ 00 04 10 14 20 24 30 34
+ 40 44 50 54 60 64 70 74
+ 01 03 11 13 21 23 31 33
+ 41 43 51 53 61 63 71 73
+ 02 06 12 16 22 26 32 36
+ 42 46 52 56 62 66 72 76
+ 05 07 15 17 25 27 35 37
+ 45 47 55 57 65 67 75 77
+*/
+
+"9: \n\t"
+ :: "r" (block), "r" (temp), "r" (coeffs)
+ : "%eax"
+ );
+}
+
+void ff_simple_idct_mmx(int16_t *block)
+{
+ idct(block);
+}
+
+//FIXME merge add/put into the idct
+
+void ff_simple_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ idct(block);
+ put_pixels_clamped_mmx(block, dest, line_size);
+}
+void ff_simple_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ idct(block);
+ add_pixels_clamped_mmx(block, dest, line_size);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vc1dsp_mmx.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vc1dsp_mmx.c
new file mode 100644
index 000000000..bf96f2470
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vc1dsp_mmx.c
@@ -0,0 +1,744 @@
+/*
+ * VC-1 and WMV3 - DSP functions MMX-optimized
+ * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "libavutil/x86_cpu.h"
+#include "libavcodec/dsputil.h"
+#include "dsputil_mmx.h"
+
+#define OP_PUT(S,D)
+#define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t"
+
+/** Add rounder from mm7 to mm3 and pack result at destination */
+#define NORMALIZE_MMX(SHIFT) \
+ "paddw %%mm7, %%mm3 \n\t" /* +bias-r */ \
+ "paddw %%mm7, %%mm4 \n\t" /* +bias-r */ \
+ "psraw "SHIFT", %%mm3 \n\t" \
+ "psraw "SHIFT", %%mm4 \n\t"
+
+#define TRANSFER_DO_PACK(OP) \
+ "packuswb %%mm4, %%mm3 \n\t" \
+ OP((%2), %%mm3) \
+ "movq %%mm3, (%2) \n\t"
+
+#define TRANSFER_DONT_PACK(OP) \
+ OP(0(%2), %%mm3) \
+ OP(8(%2), %%mm4) \
+ "movq %%mm3, 0(%2) \n\t" \
+ "movq %%mm4, 8(%2) \n\t"
+
+/** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
+#define DO_UNPACK(reg) "punpcklbw %%mm0, " reg "\n\t"
+#define DONT_UNPACK(reg)
+
+/** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
+#define LOAD_ROUNDER_MMX(ROUND) \
+ "movd "ROUND", %%mm7 \n\t" \
+ "punpcklwd %%mm7, %%mm7 \n\t" \
+ "punpckldq %%mm7, %%mm7 \n\t"
+
+#define SHIFT2_LINE(OFF, R0,R1,R2,R3) \
+ "paddw %%mm"#R2", %%mm"#R1" \n\t" \
+ "movd (%0,%3), %%mm"#R0" \n\t" \
+ "pmullw %%mm6, %%mm"#R1" \n\t" \
+ "punpcklbw %%mm0, %%mm"#R0" \n\t" \
+ "movd (%0,%2), %%mm"#R3" \n\t" \
+ "psubw %%mm"#R0", %%mm"#R1" \n\t" \
+ "punpcklbw %%mm0, %%mm"#R3" \n\t" \
+ "paddw %%mm7, %%mm"#R1" \n\t" \
+ "psubw %%mm"#R3", %%mm"#R1" \n\t" \
+ "psraw %4, %%mm"#R1" \n\t" \
+ "movq %%mm"#R1", "#OFF"(%1) \n\t" \
+ "add %2, %0 \n\t"
+
+DECLARE_ALIGNED_16(const uint64_t, ff_pw_9) = 0x0009000900090009ULL;
+
+/** Sacrifying mm6 allows to pipeline loads from src */
+static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
+ const uint8_t *src, x86_reg stride,
+ int rnd, int64_t shift)
+{
+ __asm__ volatile(
+ "mov $3, %%"REG_c" \n\t"
+ LOAD_ROUNDER_MMX("%5")
+ "movq "MANGLE(ff_pw_9)", %%mm6 \n\t"
+ "1: \n\t"
+ "movd (%0), %%mm2 \n\t"
+ "add %2, %0 \n\t"
+ "movd (%0), %%mm3 \n\t"
+ "punpcklbw %%mm0, %%mm2 \n\t"
+ "punpcklbw %%mm0, %%mm3 \n\t"
+ SHIFT2_LINE( 0, 1, 2, 3, 4)
+ SHIFT2_LINE( 24, 2, 3, 4, 1)
+ SHIFT2_LINE( 48, 3, 4, 1, 2)
+ SHIFT2_LINE( 72, 4, 1, 2, 3)
+ SHIFT2_LINE( 96, 1, 2, 3, 4)
+ SHIFT2_LINE(120, 2, 3, 4, 1)
+ SHIFT2_LINE(144, 3, 4, 1, 2)
+ SHIFT2_LINE(168, 4, 1, 2, 3)
+ "sub %6, %0 \n\t"
+ "add $8, %1 \n\t"
+ "dec %%"REG_c" \n\t"
+ "jnz 1b \n\t"
+ : "+r"(src), "+r"(dst)
+ : "r"(stride), "r"(-2*stride),
+ "m"(shift), "m"(rnd), "r"(9*stride-4)
+ : "%"REG_c, "memory"
+ );
+}
+
+/**
+ * Data is already unpacked, so some operations can directly be made from
+ * memory.
+ */
+#define VC1_HOR_16b_SHIFT2(OP, OPNAME)\
+static void OPNAME ## vc1_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,\
+ const int16_t *src, int rnd)\
+{\
+ int h = 8;\
+\
+ src -= 1;\
+ rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */\
+ __asm__ volatile(\
+ LOAD_ROUNDER_MMX("%4")\
+ "movq "MANGLE(ff_pw_128)", %%mm6\n\t"\
+ "movq "MANGLE(ff_pw_9)", %%mm5 \n\t"\
+ "1: \n\t"\
+ "movq 2*0+0(%1), %%mm1 \n\t"\
+ "movq 2*0+8(%1), %%mm2 \n\t"\
+ "movq 2*1+0(%1), %%mm3 \n\t"\
+ "movq 2*1+8(%1), %%mm4 \n\t"\
+ "paddw 2*3+0(%1), %%mm1 \n\t"\
+ "paddw 2*3+8(%1), %%mm2 \n\t"\
+ "paddw 2*2+0(%1), %%mm3 \n\t"\
+ "paddw 2*2+8(%1), %%mm4 \n\t"\
+ "pmullw %%mm5, %%mm3 \n\t"\
+ "pmullw %%mm5, %%mm4 \n\t"\
+ "psubw %%mm1, %%mm3 \n\t"\
+ "psubw %%mm2, %%mm4 \n\t"\
+ NORMALIZE_MMX("$7")\
+ /* Remove bias */\
+ "paddw %%mm6, %%mm3 \n\t"\
+ "paddw %%mm6, %%mm4 \n\t"\
+ TRANSFER_DO_PACK(OP)\
+ "add $24, %1 \n\t"\
+ "add %3, %2 \n\t"\
+ "decl %0 \n\t"\
+ "jnz 1b \n\t"\
+ : "+r"(h), "+r" (src), "+r" (dst)\
+ : "r"(stride), "m"(rnd)\
+ : "memory"\
+ );\
+}
+
+VC1_HOR_16b_SHIFT2(OP_PUT, put_)
+VC1_HOR_16b_SHIFT2(OP_AVG, avg_)
+
+
+/**
+ * Purely vertical or horizontal 1/2 shift interpolation.
+ * Sacrify mm6 for *9 factor.
+ */
+#define VC1_SHIFT2(OP, OPNAME)\
+static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
+ x86_reg stride, int rnd, x86_reg offset)\
+{\
+ rnd = 8-rnd;\
+ __asm__ volatile(\
+ "mov $8, %%"REG_c" \n\t"\
+ LOAD_ROUNDER_MMX("%5")\
+ "movq "MANGLE(ff_pw_9)", %%mm6\n\t"\
+ "1: \n\t"\
+ "movd 0(%0 ), %%mm3 \n\t"\
+ "movd 4(%0 ), %%mm4 \n\t"\
+ "movd 0(%0,%2), %%mm1 \n\t"\
+ "movd 4(%0,%2), %%mm2 \n\t"\
+ "add %2, %0 \n\t"\
+ "punpcklbw %%mm0, %%mm3 \n\t"\
+ "punpcklbw %%mm0, %%mm4 \n\t"\
+ "punpcklbw %%mm0, %%mm1 \n\t"\
+ "punpcklbw %%mm0, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm3 \n\t"\
+ "paddw %%mm2, %%mm4 \n\t"\
+ "movd 0(%0,%3), %%mm1 \n\t"\
+ "movd 4(%0,%3), %%mm2 \n\t"\
+ "pmullw %%mm6, %%mm3 \n\t" /* 0,9,9,0*/\
+ "pmullw %%mm6, %%mm4 \n\t" /* 0,9,9,0*/\
+ "punpcklbw %%mm0, %%mm1 \n\t"\
+ "punpcklbw %%mm0, %%mm2 \n\t"\
+ "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,0*/\
+ "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,0*/\
+ "movd 0(%0,%2), %%mm1 \n\t"\
+ "movd 4(%0,%2), %%mm2 \n\t"\
+ "punpcklbw %%mm0, %%mm1 \n\t"\
+ "punpcklbw %%mm0, %%mm2 \n\t"\
+ "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,-1*/\
+ "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,-1*/\
+ NORMALIZE_MMX("$4")\
+ "packuswb %%mm4, %%mm3 \n\t"\
+ OP((%1), %%mm3)\
+ "movq %%mm3, (%1) \n\t"\
+ "add %6, %0 \n\t"\
+ "add %4, %1 \n\t"\
+ "dec %%"REG_c" \n\t"\
+ "jnz 1b \n\t"\
+ : "+r"(src), "+r"(dst)\
+ : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
+ "g"(stride-offset)\
+ : "%"REG_c, "memory"\
+ );\
+}
+
+VC1_SHIFT2(OP_PUT, put_)
+VC1_SHIFT2(OP_AVG, avg_)
+
+/**
+ * Filter coefficients made global to allow access by all 1 or 3 quarter shift
+ * interpolation functions.
+ */
+DECLARE_ASM_CONST(16, uint64_t, ff_pw_53) = 0x0035003500350035ULL;
+DECLARE_ASM_CONST(16, uint64_t, ff_pw_18) = 0x0012001200120012ULL;
+
+/**
+ * Core of the 1/4 and 3/4 shift bicubic interpolation.
+ *
+ * @param UNPACK Macro unpacking arguments from 8 to 16bits (can be empty).
+ * @param MOVQ "movd 1" or "movq 2", if data read is already unpacked.
+ * @param A1 Address of 1st tap (beware of unpacked/packed).
+ * @param A2 Address of 2nd tap
+ * @param A3 Address of 3rd tap
+ * @param A4 Address of 4th tap
+ */
+#define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4) \
+ MOVQ "*0+"A1", %%mm1 \n\t" \
+ MOVQ "*4+"A1", %%mm2 \n\t" \
+ UNPACK("%%mm1") \
+ UNPACK("%%mm2") \
+ "pmullw "MANGLE(ff_pw_3)", %%mm1\n\t" \
+ "pmullw "MANGLE(ff_pw_3)", %%mm2\n\t" \
+ MOVQ "*0+"A2", %%mm3 \n\t" \
+ MOVQ "*4+"A2", %%mm4 \n\t" \
+ UNPACK("%%mm3") \
+ UNPACK("%%mm4") \
+ "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
+ "pmullw %%mm6, %%mm4 \n\t" /* *18 */ \
+ "psubw %%mm1, %%mm3 \n\t" /* 18,-3 */ \
+ "psubw %%mm2, %%mm4 \n\t" /* 18,-3 */ \
+ MOVQ "*0+"A4", %%mm1 \n\t" \
+ MOVQ "*4+"A4", %%mm2 \n\t" \
+ UNPACK("%%mm1") \
+ UNPACK("%%mm2") \
+ "psllw $2, %%mm1 \n\t" /* 4* */ \
+ "psllw $2, %%mm2 \n\t" /* 4* */ \
+ "psubw %%mm1, %%mm3 \n\t" /* -4,18,-3 */ \
+ "psubw %%mm2, %%mm4 \n\t" /* -4,18,-3 */ \
+ MOVQ "*0+"A3", %%mm1 \n\t" \
+ MOVQ "*4+"A3", %%mm2 \n\t" \
+ UNPACK("%%mm1") \
+ UNPACK("%%mm2") \
+ "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
+ "pmullw %%mm5, %%mm2 \n\t" /* *53 */ \
+ "paddw %%mm1, %%mm3 \n\t" /* 4,53,18,-3 */ \
+ "paddw %%mm2, %%mm4 \n\t" /* 4,53,18,-3 */
+
+/**
+ * Macro to build the vertical 16bits version of vc1_put_shift[13].
+ * Here, offset=src_stride. Parameters passed A1 to A4 must use
+ * %3 (src_stride) and %4 (3*src_stride).
+ *
+ * @param NAME Either 1 or 3
+ * @see MSPEL_FILTER13_CORE for information on A1->A4
+ */
+#define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4) \
+static void \
+vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \
+ x86_reg src_stride, \
+ int rnd, int64_t shift) \
+{ \
+ int h = 8; \
+ src -= src_stride; \
+ __asm__ volatile( \
+ LOAD_ROUNDER_MMX("%5") \
+ "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \
+ "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \
+ ASMALIGN(3) \
+ "1: \n\t" \
+ MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
+ NORMALIZE_MMX("%6") \
+ TRANSFER_DONT_PACK(OP_PUT) \
+ /* Last 3 (in fact 4) bytes on the line */ \
+ "movd 8+"A1", %%mm1 \n\t" \
+ DO_UNPACK("%%mm1") \
+ "movq %%mm1, %%mm3 \n\t" \
+ "paddw %%mm1, %%mm1 \n\t" \
+ "paddw %%mm3, %%mm1 \n\t" /* 3* */ \
+ "movd 8+"A2", %%mm3 \n\t" \
+ DO_UNPACK("%%mm3") \
+ "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
+ "psubw %%mm1, %%mm3 \n\t" /*18,-3 */ \
+ "movd 8+"A3", %%mm1 \n\t" \
+ DO_UNPACK("%%mm1") \
+ "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
+ "paddw %%mm1, %%mm3 \n\t" /*53,18,-3 */ \
+ "movd 8+"A4", %%mm1 \n\t" \
+ DO_UNPACK("%%mm1") \
+ "psllw $2, %%mm1 \n\t" /* 4* */ \
+ "psubw %%mm1, %%mm3 \n\t" \
+ "paddw %%mm7, %%mm3 \n\t" \
+ "psraw %6, %%mm3 \n\t" \
+ "movq %%mm3, 16(%2) \n\t" \
+ "add %3, %1 \n\t" \
+ "add $24, %2 \n\t" \
+ "decl %0 \n\t" \
+ "jnz 1b \n\t" \
+ : "+r"(h), "+r" (src), "+r" (dst) \
+ : "r"(src_stride), "r"(3*src_stride), \
+ "m"(rnd), "m"(shift) \
+ : "memory" \
+ ); \
+}
+
+/**
+ * Macro to build the horizontal 16bits version of vc1_put_shift[13].
+ * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
+ *
+ * @param NAME Either 1 or 3
+ * @see MSPEL_FILTER13_CORE for information on A1->A4
+ */
+#define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME) \
+static void \
+OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \
+ const int16_t *src, int rnd) \
+{ \
+ int h = 8; \
+ src -= 1; \
+ rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \
+ __asm__ volatile( \
+ LOAD_ROUNDER_MMX("%4") \
+ "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
+ "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
+ ASMALIGN(3) \
+ "1: \n\t" \
+ MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4) \
+ NORMALIZE_MMX("$7") \
+ /* Remove bias */ \
+ "paddw "MANGLE(ff_pw_128)", %%mm3 \n\t" \
+ "paddw "MANGLE(ff_pw_128)", %%mm4 \n\t" \
+ TRANSFER_DO_PACK(OP) \
+ "add $24, %1 \n\t" \
+ "add %3, %2 \n\t" \
+ "decl %0 \n\t" \
+ "jnz 1b \n\t" \
+ : "+r"(h), "+r" (src), "+r" (dst) \
+ : "r"(stride), "m"(rnd) \
+ : "memory" \
+ ); \
+}
+
+/**
+ * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
+ * Here, offset=src_stride. Parameters passed A1 to A4 must use
+ * %3 (offset) and %4 (3*offset).
+ *
+ * @param NAME Either 1 or 3
+ * @see MSPEL_FILTER13_CORE for information on A1->A4
+ */
+#define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME) \
+static void \
+OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \
+ x86_reg stride, int rnd, x86_reg offset) \
+{ \
+ int h = 8; \
+ src -= offset; \
+ rnd = 32-rnd; \
+ __asm__ volatile ( \
+ LOAD_ROUNDER_MMX("%6") \
+ "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
+ "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
+ ASMALIGN(3) \
+ "1: \n\t" \
+ MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
+ NORMALIZE_MMX("$6") \
+ TRANSFER_DO_PACK(OP) \
+ "add %5, %1 \n\t" \
+ "add %5, %2 \n\t" \
+ "decl %0 \n\t" \
+ "jnz 1b \n\t" \
+ : "+r"(h), "+r" (src), "+r" (dst) \
+ : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd) \
+ : "memory" \
+ ); \
+}
+
+/** 1/4 shift bicubic interpolation */
+MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_PUT, put_)
+MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_AVG, avg_)
+MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )")
+MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT, put_)
+MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG, avg_)
+
+/** 3/4 shift bicubic interpolation */
+MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_PUT, put_)
+MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_AVG, avg_)
+MSPEL_FILTER13_VER_16B(shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )")
+MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT, put_)
+MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG, avg_)
+
+typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift);
+typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd);
+typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset);
+
+/**
+ * Interpolates fractional pel values by applying proper vertical then
+ * horizontal filter.
+ *
+ * @param dst Destination buffer for interpolated pels.
+ * @param src Source buffer.
+ * @param stride Stride for both src and dst buffers.
+ * @param hmode Horizontal filter (expressed in quarter pixels shift).
+ * @param hmode Vertical filter.
+ * @param rnd Rounding bias.
+ */
+#define VC1_MSPEL_MC(OP)\
+static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
+ int hmode, int vmode, int rnd)\
+{\
+ static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\
+ { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\
+ static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\
+ { NULL, OP ## vc1_hor_16b_shift1_mmx, OP ## vc1_hor_16b_shift2_mmx, OP ## vc1_hor_16b_shift3_mmx };\
+ static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\
+ { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\
+\
+ __asm__ volatile(\
+ "pxor %%mm0, %%mm0 \n\t"\
+ ::: "memory"\
+ );\
+\
+ if (vmode) { /* Vertical filter to apply */\
+ if (hmode) { /* Horizontal filter to apply, output to tmp */\
+ static const int shift_value[] = { 0, 5, 1, 5 };\
+ int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
+ int r;\
+ DECLARE_ALIGNED_16(int16_t, tmp)[12*8];\
+\
+ r = (1<<(shift-1)) + rnd-1;\
+ vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
+\
+ vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\
+ return;\
+ }\
+ else { /* No horizontal filter, output 8 lines to dst */\
+ vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\
+ return;\
+ }\
+ }\
+\
+ /* Horizontal mode with no vertical mode */\
+ vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\
+}
+
+VC1_MSPEL_MC(put_)
+VC1_MSPEL_MC(avg_)
+
+void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd);
+void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd);
+
+/** Macro to ease bicubic filter interpolation functions declarations */
+#define DECLARE_FUNCTION(a, b) \
+static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
+ put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
+}\
+static void avg_vc1_mspel_mc ## a ## b ## _mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
+ avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
+}
+
+DECLARE_FUNCTION(0, 1)
+DECLARE_FUNCTION(0, 2)
+DECLARE_FUNCTION(0, 3)
+
+DECLARE_FUNCTION(1, 0)
+DECLARE_FUNCTION(1, 1)
+DECLARE_FUNCTION(1, 2)
+DECLARE_FUNCTION(1, 3)
+
+DECLARE_FUNCTION(2, 0)
+DECLARE_FUNCTION(2, 1)
+DECLARE_FUNCTION(2, 2)
+DECLARE_FUNCTION(2, 3)
+
+DECLARE_FUNCTION(3, 0)
+DECLARE_FUNCTION(3, 1)
+DECLARE_FUNCTION(3, 2)
+DECLARE_FUNCTION(3, 3)
+
+static void vc1_inv_trans_4x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
+{
+ int dc = block[0];
+ dc = (17 * dc + 4) >> 3;
+ dc = (17 * dc + 64) >> 7;
+ __asm__ volatile(
+ "movd %0, %%mm0 \n\t"
+ "pshufw $0, %%mm0, %%mm0 \n\t"
+ "pxor %%mm1, %%mm1 \n\t"
+ "psubw %%mm0, %%mm1 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ ::"r"(dc)
+ );
+ __asm__ volatile(
+ "movd %0, %%mm2 \n\t"
+ "movd %1, %%mm3 \n\t"
+ "movd %2, %%mm4 \n\t"
+ "movd %3, %%mm5 \n\t"
+ "paddusb %%mm0, %%mm2 \n\t"
+ "paddusb %%mm0, %%mm3 \n\t"
+ "paddusb %%mm0, %%mm4 \n\t"
+ "paddusb %%mm0, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "psubusb %%mm1, %%mm3 \n\t"
+ "psubusb %%mm1, %%mm4 \n\t"
+ "psubusb %%mm1, %%mm5 \n\t"
+ "movd %%mm2, %0 \n\t"
+ "movd %%mm3, %1 \n\t"
+ "movd %%mm4, %2 \n\t"
+ "movd %%mm5, %3 \n\t"
+ :"+m"(*(uint32_t*)(dest+0*linesize)),
+ "+m"(*(uint32_t*)(dest+1*linesize)),
+ "+m"(*(uint32_t*)(dest+2*linesize)),
+ "+m"(*(uint32_t*)(dest+3*linesize))
+ );
+}
+
+static void vc1_inv_trans_4x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
+{
+ int dc = block[0];
+ dc = (17 * dc + 4) >> 3;
+ dc = (12 * dc + 64) >> 7;
+ __asm__ volatile(
+ "movd %0, %%mm0 \n\t"
+ "pshufw $0, %%mm0, %%mm0 \n\t"
+ "pxor %%mm1, %%mm1 \n\t"
+ "psubw %%mm0, %%mm1 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ ::"r"(dc)
+ );
+ __asm__ volatile(
+ "movd %0, %%mm2 \n\t"
+ "movd %1, %%mm3 \n\t"
+ "movd %2, %%mm4 \n\t"
+ "movd %3, %%mm5 \n\t"
+ "paddusb %%mm0, %%mm2 \n\t"
+ "paddusb %%mm0, %%mm3 \n\t"
+ "paddusb %%mm0, %%mm4 \n\t"
+ "paddusb %%mm0, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "psubusb %%mm1, %%mm3 \n\t"
+ "psubusb %%mm1, %%mm4 \n\t"
+ "psubusb %%mm1, %%mm5 \n\t"
+ "movd %%mm2, %0 \n\t"
+ "movd %%mm3, %1 \n\t"
+ "movd %%mm4, %2 \n\t"
+ "movd %%mm5, %3 \n\t"
+ :"+m"(*(uint32_t*)(dest+0*linesize)),
+ "+m"(*(uint32_t*)(dest+1*linesize)),
+ "+m"(*(uint32_t*)(dest+2*linesize)),
+ "+m"(*(uint32_t*)(dest+3*linesize))
+ );
+ dest += 4*linesize;
+ __asm__ volatile(
+ "movd %0, %%mm2 \n\t"
+ "movd %1, %%mm3 \n\t"
+ "movd %2, %%mm4 \n\t"
+ "movd %3, %%mm5 \n\t"
+ "paddusb %%mm0, %%mm2 \n\t"
+ "paddusb %%mm0, %%mm3 \n\t"
+ "paddusb %%mm0, %%mm4 \n\t"
+ "paddusb %%mm0, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "psubusb %%mm1, %%mm3 \n\t"
+ "psubusb %%mm1, %%mm4 \n\t"
+ "psubusb %%mm1, %%mm5 \n\t"
+ "movd %%mm2, %0 \n\t"
+ "movd %%mm3, %1 \n\t"
+ "movd %%mm4, %2 \n\t"
+ "movd %%mm5, %3 \n\t"
+ :"+m"(*(uint32_t*)(dest+0*linesize)),
+ "+m"(*(uint32_t*)(dest+1*linesize)),
+ "+m"(*(uint32_t*)(dest+2*linesize)),
+ "+m"(*(uint32_t*)(dest+3*linesize))
+ );
+}
+
+static void vc1_inv_trans_8x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
+{
+ int dc = block[0];
+ dc = ( 3 * dc + 1) >> 1;
+ dc = (17 * dc + 64) >> 7;
+ __asm__ volatile(
+ "movd %0, %%mm0 \n\t"
+ "pshufw $0, %%mm0, %%mm0 \n\t"
+ "pxor %%mm1, %%mm1 \n\t"
+ "psubw %%mm0, %%mm1 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ ::"r"(dc)
+ );
+ __asm__ volatile(
+ "movq %0, %%mm2 \n\t"
+ "movq %1, %%mm3 \n\t"
+ "movq %2, %%mm4 \n\t"
+ "movq %3, %%mm5 \n\t"
+ "paddusb %%mm0, %%mm2 \n\t"
+ "paddusb %%mm0, %%mm3 \n\t"
+ "paddusb %%mm0, %%mm4 \n\t"
+ "paddusb %%mm0, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "psubusb %%mm1, %%mm3 \n\t"
+ "psubusb %%mm1, %%mm4 \n\t"
+ "psubusb %%mm1, %%mm5 \n\t"
+ "movq %%mm2, %0 \n\t"
+ "movq %%mm3, %1 \n\t"
+ "movq %%mm4, %2 \n\t"
+ "movq %%mm5, %3 \n\t"
+ :"+m"(*(uint32_t*)(dest+0*linesize)),
+ "+m"(*(uint32_t*)(dest+1*linesize)),
+ "+m"(*(uint32_t*)(dest+2*linesize)),
+ "+m"(*(uint32_t*)(dest+3*linesize))
+ );
+}
+
+static void vc1_inv_trans_8x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
+{
+ int dc = block[0];
+ dc = (3 * dc + 1) >> 1;
+ dc = (3 * dc + 16) >> 5;
+ __asm__ volatile(
+ "movd %0, %%mm0 \n\t"
+ "pshufw $0, %%mm0, %%mm0 \n\t"
+ "pxor %%mm1, %%mm1 \n\t"
+ "psubw %%mm0, %%mm1 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ ::"r"(dc)
+ );
+ __asm__ volatile(
+ "movq %0, %%mm2 \n\t"
+ "movq %1, %%mm3 \n\t"
+ "movq %2, %%mm4 \n\t"
+ "movq %3, %%mm5 \n\t"
+ "paddusb %%mm0, %%mm2 \n\t"
+ "paddusb %%mm0, %%mm3 \n\t"
+ "paddusb %%mm0, %%mm4 \n\t"
+ "paddusb %%mm0, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "psubusb %%mm1, %%mm3 \n\t"
+ "psubusb %%mm1, %%mm4 \n\t"
+ "psubusb %%mm1, %%mm5 \n\t"
+ "movq %%mm2, %0 \n\t"
+ "movq %%mm3, %1 \n\t"
+ "movq %%mm4, %2 \n\t"
+ "movq %%mm5, %3 \n\t"
+ :"+m"(*(uint32_t*)(dest+0*linesize)),
+ "+m"(*(uint32_t*)(dest+1*linesize)),
+ "+m"(*(uint32_t*)(dest+2*linesize)),
+ "+m"(*(uint32_t*)(dest+3*linesize))
+ );
+ dest += 4*linesize;
+ __asm__ volatile(
+ "movq %0, %%mm2 \n\t"
+ "movq %1, %%mm3 \n\t"
+ "movq %2, %%mm4 \n\t"
+ "movq %3, %%mm5 \n\t"
+ "paddusb %%mm0, %%mm2 \n\t"
+ "paddusb %%mm0, %%mm3 \n\t"
+ "paddusb %%mm0, %%mm4 \n\t"
+ "paddusb %%mm0, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "psubusb %%mm1, %%mm3 \n\t"
+ "psubusb %%mm1, %%mm4 \n\t"
+ "psubusb %%mm1, %%mm5 \n\t"
+ "movq %%mm2, %0 \n\t"
+ "movq %%mm3, %1 \n\t"
+ "movq %%mm4, %2 \n\t"
+ "movq %%mm5, %3 \n\t"
+ :"+m"(*(uint32_t*)(dest+0*linesize)),
+ "+m"(*(uint32_t*)(dest+1*linesize)),
+ "+m"(*(uint32_t*)(dest+2*linesize)),
+ "+m"(*(uint32_t*)(dest+3*linesize))
+ );
+}
+
+void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
+ mm_flags = mm_support();
+
+ dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_mmx;
+ dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx;
+ dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx;
+ dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_mmx;
+
+ dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_mmx;
+ dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_mmx;
+ dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_mmx;
+ dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_mmx;
+
+ dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_mmx;
+ dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_mmx;
+ dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_mmx;
+ dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_mmx;
+
+ dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_mmx;
+ dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx;
+ dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
+ dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;
+
+ if (mm_flags & FF_MM_MMX2){
+ dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_mmx2;
+ dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_mmx2;
+ dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_mmx2;
+ dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_mmx2;
+
+ dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_mmx2;
+ dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_mmx2;
+ dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_mmx2;
+ dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_mmx2;
+
+ dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_mmx2;
+ dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_mmx2;
+ dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_mmx2;
+ dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_mmx2;
+
+ dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_mmx2;
+ dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_mmx2;
+ dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_mmx2;
+ dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_mmx2;
+
+ dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_mmx2;
+ dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_mmx2;
+ dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_mmx2;
+ dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_mmx2;
+ }
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_mmx.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_mmx.c
new file mode 100644
index 000000000..df0fcb9f1
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_mmx.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2004 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file vp3dsp_mmx.c
+ * MMX-optimized functions cribbed from the original VP3 source code.
+ */
+
+#include "libavutil/x86_cpu.h"
+#include "libavcodec/dsputil.h"
+#include "dsputil_mmx.h"
+
+extern const uint16_t ff_vp3_idct_data[];
+
+// this is off by one or two for some cases when filter_limit is greater than 63
+// in: p0 in mm6, p1 in mm4, p2 in mm2, p3 in mm1
+// out: p1 in mm4, p2 in mm3
+#define VP3_LOOP_FILTER(flim) \
+ "movq %%mm6, %%mm7 \n\t" \
+ "pand "MANGLE(ff_pb_7 )", %%mm6 \n\t" /* p0&7 */ \
+ "psrlw $3, %%mm7 \n\t" \
+ "pand "MANGLE(ff_pb_1F)", %%mm7 \n\t" /* p0>>3 */ \
+ "movq %%mm2, %%mm3 \n\t" /* mm3 = p2 */ \
+ "pxor %%mm4, %%mm2 \n\t" \
+ "pand "MANGLE(ff_pb_1 )", %%mm2 \n\t" /* (p2^p1)&1 */ \
+ "movq %%mm2, %%mm5 \n\t" \
+ "paddb %%mm2, %%mm2 \n\t" \
+ "paddb %%mm5, %%mm2 \n\t" /* 3*(p2^p1)&1 */ \
+ "paddb %%mm6, %%mm2 \n\t" /* extra bits lost in shifts */ \
+ "pcmpeqb %%mm0, %%mm0 \n\t" \
+ "pxor %%mm0, %%mm1 \n\t" /* 255 - p3 */ \
+ "pavgb %%mm2, %%mm1 \n\t" /* (256 - p3 + extrabits) >> 1 */ \
+ "pxor %%mm4, %%mm0 \n\t" /* 255 - p1 */ \
+ "pavgb %%mm3, %%mm0 \n\t" /* (256 + p2-p1) >> 1 */ \
+ "paddb "MANGLE(ff_pb_3 )", %%mm1 \n\t" \
+ "pavgb %%mm0, %%mm1 \n\t" /* 128+2+( p2-p1 - p3) >> 2 */ \
+ "pavgb %%mm0, %%mm1 \n\t" /* 128+1+(3*(p2-p1) - p3) >> 3 */ \
+ "paddusb %%mm1, %%mm7 \n\t" /* d+128+1 */ \
+ "movq "MANGLE(ff_pb_81)", %%mm6 \n\t" \
+ "psubusb %%mm7, %%mm6 \n\t" \
+ "psubusb "MANGLE(ff_pb_81)", %%mm7 \n\t" \
+\
+ "movq "#flim", %%mm5 \n\t" \
+ "pminub %%mm5, %%mm6 \n\t" \
+ "pminub %%mm5, %%mm7 \n\t" \
+ "movq %%mm6, %%mm0 \n\t" \
+ "movq %%mm7, %%mm1 \n\t" \
+ "paddb %%mm6, %%mm6 \n\t" \
+ "paddb %%mm7, %%mm7 \n\t" \
+ "pminub %%mm5, %%mm6 \n\t" \
+ "pminub %%mm5, %%mm7 \n\t" \
+ "psubb %%mm0, %%mm6 \n\t" \
+ "psubb %%mm1, %%mm7 \n\t" \
+ "paddusb %%mm7, %%mm4 \n\t" \
+ "psubusb %%mm6, %%mm4 \n\t" \
+ "psubusb %%mm7, %%mm3 \n\t" \
+ "paddusb %%mm6, %%mm3 \n\t"
+
+#define STORE_4_WORDS(dst0, dst1, dst2, dst3, mm) \
+ "movd "#mm", %0 \n\t" \
+ "movw %w0, -1"#dst0" \n\t" \
+ "psrlq $32, "#mm" \n\t" \
+ "shr $16, %0 \n\t" \
+ "movw %w0, -1"#dst1" \n\t" \
+ "movd "#mm", %0 \n\t" \
+ "movw %w0, -1"#dst2" \n\t" \
+ "shr $16, %0 \n\t" \
+ "movw %w0, -1"#dst3" \n\t"
+
+void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values)
+{
+ __asm__ volatile(
+ "movq %0, %%mm6 \n\t"
+ "movq %1, %%mm4 \n\t"
+ "movq %2, %%mm2 \n\t"
+ "movq %3, %%mm1 \n\t"
+
+ VP3_LOOP_FILTER(%4)
+
+ "movq %%mm4, %1 \n\t"
+ "movq %%mm3, %2 \n\t"
+
+ : "+m" (*(uint64_t*)(src - 2*stride)),
+ "+m" (*(uint64_t*)(src - 1*stride)),
+ "+m" (*(uint64_t*)(src + 0*stride)),
+ "+m" (*(uint64_t*)(src + 1*stride))
+ : "m"(*(uint64_t*)(bounding_values+129))
+ );
+}
+
+void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values)
+{
+ x86_reg tmp;
+
+ __asm__ volatile(
+ "movd -2(%1), %%mm6 \n\t"
+ "movd -2(%1,%3), %%mm0 \n\t"
+ "movd -2(%1,%3,2), %%mm1 \n\t"
+ "movd -2(%1,%4), %%mm4 \n\t"
+
+ TRANSPOSE8x4(%%mm6, %%mm0, %%mm1, %%mm4, -2(%2), -2(%2,%3), -2(%2,%3,2), -2(%2,%4), %%mm2)
+ VP3_LOOP_FILTER(%5)
+ SBUTTERFLY(%%mm4, %%mm3, %%mm5, bw, q)
+
+ STORE_4_WORDS((%1), (%1,%3), (%1,%3,2), (%1,%4), %%mm4)
+ STORE_4_WORDS((%2), (%2,%3), (%2,%3,2), (%2,%4), %%mm5)
+
+ : "=&r"(tmp)
+ : "r"(src), "r"(src+4*stride), "r"((x86_reg)stride), "r"((x86_reg)3*stride),
+ "m"(*(uint64_t*)(bounding_values+129))
+ : "memory"
+ );
+}
+
+/* from original comments: The Macro does IDct on 4 1-D Dcts */
+#define BeginIDCT() \
+ "movq "I(3)", %%mm2 \n\t" \
+ "movq "C(3)", %%mm6 \n\t" \
+ "movq %%mm2, %%mm4 \n\t" \
+ "movq "J(5)", %%mm7 \n\t" \
+ "pmulhw %%mm6, %%mm4 \n\t" /* r4 = c3*i3 - i3 */ \
+ "movq "C(5)", %%mm1 \n\t" \
+ "pmulhw %%mm7, %%mm6 \n\t" /* r6 = c3*i5 - i5 */ \
+ "movq %%mm1, %%mm5 \n\t" \
+ "pmulhw %%mm2, %%mm1 \n\t" /* r1 = c5*i3 - i3 */ \
+ "movq "I(1)", %%mm3 \n\t" \
+ "pmulhw %%mm7, %%mm5 \n\t" /* r5 = c5*i5 - i5 */ \
+ "movq "C(1)", %%mm0 \n\t" \
+ "paddw %%mm2, %%mm4 \n\t" /* r4 = c3*i3 */ \
+ "paddw %%mm7, %%mm6 \n\t" /* r6 = c3*i5 */ \
+ "paddw %%mm1, %%mm2 \n\t" /* r2 = c5*i3 */ \
+ "movq "J(7)", %%mm1 \n\t" \
+ "paddw %%mm5, %%mm7 \n\t" /* r7 = c5*i5 */ \
+ "movq %%mm0, %%mm5 \n\t" /* r5 = c1 */ \
+ "pmulhw %%mm3, %%mm0 \n\t" /* r0 = c1*i1 - i1 */ \
+ "paddsw %%mm7, %%mm4 \n\t" /* r4 = C = c3*i3 + c5*i5 */ \
+ "pmulhw %%mm1, %%mm5 \n\t" /* r5 = c1*i7 - i7 */ \
+ "movq "C(7)", %%mm7 \n\t" \
+ "psubsw %%mm2, %%mm6 \n\t" /* r6 = D = c3*i5 - c5*i3 */ \
+ "paddw %%mm3, %%mm0 \n\t" /* r0 = c1*i1 */ \
+ "pmulhw %%mm7, %%mm3 \n\t" /* r3 = c7*i1 */ \
+ "movq "I(2)", %%mm2 \n\t" \
+ "pmulhw %%mm1, %%mm7 \n\t" /* r7 = c7*i7 */ \
+ "paddw %%mm1, %%mm5 \n\t" /* r5 = c1*i7 */ \
+ "movq %%mm2, %%mm1 \n\t" /* r1 = i2 */ \
+ "pmulhw "C(2)", %%mm2 \n\t" /* r2 = c2*i2 - i2 */ \
+ "psubsw %%mm5, %%mm3 \n\t" /* r3 = B = c7*i1 - c1*i7 */ \
+ "movq "J(6)", %%mm5 \n\t" \
+ "paddsw %%mm7, %%mm0 \n\t" /* r0 = A = c1*i1 + c7*i7 */ \
+ "movq %%mm5, %%mm7 \n\t" /* r7 = i6 */ \
+ "psubsw %%mm4, %%mm0 \n\t" /* r0 = A - C */ \
+ "pmulhw "C(2)", %%mm5 \n\t" /* r5 = c2*i6 - i6 */ \
+ "paddw %%mm1, %%mm2 \n\t" /* r2 = c2*i2 */ \
+ "pmulhw "C(6)", %%mm1 \n\t" /* r1 = c6*i2 */ \
+ "paddsw %%mm4, %%mm4 \n\t" /* r4 = C + C */ \
+ "paddsw %%mm0, %%mm4 \n\t" /* r4 = C. = A + C */ \
+ "psubsw %%mm6, %%mm3 \n\t" /* r3 = B - D */ \
+ "paddw %%mm7, %%mm5 \n\t" /* r5 = c2*i6 */ \
+ "paddsw %%mm6, %%mm6 \n\t" /* r6 = D + D */ \
+ "pmulhw "C(6)", %%mm7 \n\t" /* r7 = c6*i6 */ \
+ "paddsw %%mm3, %%mm6 \n\t" /* r6 = D. = B + D */ \
+ "movq %%mm4, "I(1)"\n\t" /* save C. at I(1) */ \
+ "psubsw %%mm5, %%mm1 \n\t" /* r1 = H = c6*i2 - c2*i6 */ \
+ "movq "C(4)", %%mm4 \n\t" \
+ "movq %%mm3, %%mm5 \n\t" /* r5 = B - D */ \
+ "pmulhw %%mm4, %%mm3 \n\t" /* r3 = (c4 - 1) * (B - D) */ \
+ "paddsw %%mm2, %%mm7 \n\t" /* r3 = (c4 - 1) * (B - D) */ \
+ "movq %%mm6, "I(2)"\n\t" /* save D. at I(2) */ \
+ "movq %%mm0, %%mm2 \n\t" /* r2 = A - C */ \
+ "movq "I(0)", %%mm6 \n\t" \
+ "pmulhw %%mm4, %%mm0 \n\t" /* r0 = (c4 - 1) * (A - C) */ \
+ "paddw %%mm3, %%mm5 \n\t" /* r5 = B. = c4 * (B - D) */ \
+ "movq "J(4)", %%mm3 \n\t" \
+ "psubsw %%mm1, %%mm5 \n\t" /* r5 = B.. = B. - H */ \
+ "paddw %%mm0, %%mm2 \n\t" /* r0 = A. = c4 * (A - C) */ \
+ "psubsw %%mm3, %%mm6 \n\t" /* r6 = i0 - i4 */ \
+ "movq %%mm6, %%mm0 \n\t" \
+ "pmulhw %%mm4, %%mm6 \n\t" /* r6 = (c4 - 1) * (i0 - i4) */ \
+ "paddsw %%mm3, %%mm3 \n\t" /* r3 = i4 + i4 */ \
+ "paddsw %%mm1, %%mm1 \n\t" /* r1 = H + H */ \
+ "paddsw %%mm0, %%mm3 \n\t" /* r3 = i0 + i4 */ \
+ "paddsw %%mm5, %%mm1 \n\t" /* r1 = H. = B + H */ \
+ "pmulhw %%mm3, %%mm4 \n\t" /* r4 = (c4 - 1) * (i0 + i4) */ \
+ "paddsw %%mm0, %%mm6 \n\t" /* r6 = F = c4 * (i0 - i4) */ \
+ "psubsw %%mm2, %%mm6 \n\t" /* r6 = F. = F - A. */ \
+ "paddsw %%mm2, %%mm2 \n\t" /* r2 = A. + A. */ \
+ "movq "I(1)", %%mm0 \n\t" /* r0 = C. */ \
+ "paddsw %%mm6, %%mm2 \n\t" /* r2 = A.. = F + A. */ \
+ "paddw %%mm3, %%mm4 \n\t" /* r4 = E = c4 * (i0 + i4) */ \
+ "psubsw %%mm1, %%mm2 \n\t" /* r2 = R2 = A.. - H. */
+
+/* RowIDCT gets ready to transpose */
+#define RowIDCT() \
+ BeginIDCT() \
+ "movq "I(2)", %%mm3 \n\t" /* r3 = D. */ \
+ "psubsw %%mm7, %%mm4 \n\t" /* r4 = E. = E - G */ \
+ "paddsw %%mm1, %%mm1 \n\t" /* r1 = H. + H. */ \
+ "paddsw %%mm7, %%mm7 \n\t" /* r7 = G + G */ \
+ "paddsw %%mm2, %%mm1 \n\t" /* r1 = R1 = A.. + H. */ \
+ "paddsw %%mm4, %%mm7 \n\t" /* r1 = R1 = A.. + H. */ \
+ "psubsw %%mm3, %%mm4 \n\t" /* r4 = R4 = E. - D. */ \
+ "paddsw %%mm3, %%mm3 \n\t" \
+ "psubsw %%mm5, %%mm6 \n\t" /* r6 = R6 = F. - B.. */ \
+ "paddsw %%mm5, %%mm5 \n\t" \
+ "paddsw %%mm4, %%mm3 \n\t" /* r3 = R3 = E. + D. */ \
+ "paddsw %%mm6, %%mm5 \n\t" /* r5 = R5 = F. + B.. */ \
+ "psubsw %%mm0, %%mm7 \n\t" /* r7 = R7 = G. - C. */ \
+ "paddsw %%mm0, %%mm0 \n\t" \
+ "movq %%mm1, "I(1)"\n\t" /* save R1 */ \
+ "paddsw %%mm7, %%mm0 \n\t" /* r0 = R0 = G. + C. */
+
+/* Column IDCT normalizes and stores final results */
+#define ColumnIDCT() \
+ BeginIDCT() \
+ "paddsw "OC_8", %%mm2 \n\t" /* adjust R2 (and R1) for shift */ \
+ "paddsw %%mm1, %%mm1 \n\t" /* r1 = H. + H. */ \
+ "paddsw %%mm2, %%mm1 \n\t" /* r1 = R1 = A.. + H. */ \
+ "psraw $4, %%mm2 \n\t" /* r2 = NR2 */ \
+ "psubsw %%mm7, %%mm4 \n\t" /* r4 = E. = E - G */ \
+ "psraw $4, %%mm1 \n\t" /* r1 = NR1 */ \
+ "movq "I(2)", %%mm3 \n\t" /* r3 = D. */ \
+ "paddsw %%mm7, %%mm7 \n\t" /* r7 = G + G */ \
+ "movq %%mm2, "I(2)"\n\t" /* store NR2 at I2 */ \
+ "paddsw %%mm4, %%mm7 \n\t" /* r7 = G. = E + G */ \
+ "movq %%mm1, "I(1)"\n\t" /* store NR1 at I1 */ \
+ "psubsw %%mm3, %%mm4 \n\t" /* r4 = R4 = E. - D. */ \
+ "paddsw "OC_8", %%mm4 \n\t" /* adjust R4 (and R3) for shift */ \
+ "paddsw %%mm3, %%mm3 \n\t" /* r3 = D. + D. */ \
+ "paddsw %%mm4, %%mm3 \n\t" /* r3 = R3 = E. + D. */ \
+ "psraw $4, %%mm4 \n\t" /* r4 = NR4 */ \
+ "psubsw %%mm5, %%mm6 \n\t" /* r6 = R6 = F. - B.. */ \
+ "psraw $4, %%mm3 \n\t" /* r3 = NR3 */ \
+ "paddsw "OC_8", %%mm6 \n\t" /* adjust R6 (and R5) for shift */ \
+ "paddsw %%mm5, %%mm5 \n\t" /* r5 = B.. + B.. */ \
+ "paddsw %%mm6, %%mm5 \n\t" /* r5 = R5 = F. + B.. */ \
+ "psraw $4, %%mm6 \n\t" /* r6 = NR6 */ \
+ "movq %%mm4, "J(4)"\n\t" /* store NR4 at J4 */ \
+ "psraw $4, %%mm5 \n\t" /* r5 = NR5 */ \
+ "movq %%mm3, "I(3)"\n\t" /* store NR3 at I3 */ \
+ "psubsw %%mm0, %%mm7 \n\t" /* r7 = R7 = G. - C. */ \
+ "paddsw "OC_8", %%mm7 \n\t" /* adjust R7 (and R0) for shift */ \
+ "paddsw %%mm0, %%mm0 \n\t" /* r0 = C. + C. */ \
+ "paddsw %%mm7, %%mm0 \n\t" /* r0 = R0 = G. + C. */ \
+ "psraw $4, %%mm7 \n\t" /* r7 = NR7 */ \
+ "movq %%mm6, "J(6)"\n\t" /* store NR6 at J6 */ \
+ "psraw $4, %%mm0 \n\t" /* r0 = NR0 */ \
+ "movq %%mm5, "J(5)"\n\t" /* store NR5 at J5 */ \
+ "movq %%mm7, "J(7)"\n\t" /* store NR7 at J7 */ \
+ "movq %%mm0, "I(0)"\n\t" /* store NR0 at I0 */
+
+/* Following macro does two 4x4 transposes in place.
+
+ At entry (we assume):
+
+ r0 = a3 a2 a1 a0
+ I(1) = b3 b2 b1 b0
+ r2 = c3 c2 c1 c0
+ r3 = d3 d2 d1 d0
+
+ r4 = e3 e2 e1 e0
+ r5 = f3 f2 f1 f0
+ r6 = g3 g2 g1 g0
+ r7 = h3 h2 h1 h0
+
+ At exit, we have:
+
+ I(0) = d0 c0 b0 a0
+ I(1) = d1 c1 b1 a1
+ I(2) = d2 c2 b2 a2
+ I(3) = d3 c3 b3 a3
+
+ J(4) = h0 g0 f0 e0
+ J(5) = h1 g1 f1 e1
+ J(6) = h2 g2 f2 e2
+ J(7) = h3 g3 f3 e3
+
+ I(0) I(1) I(2) I(3) is the transpose of r0 I(1) r2 r3.
+ J(4) J(5) J(6) J(7) is the transpose of r4 r5 r6 r7.
+
+ Since r1 is free at entry, we calculate the Js first. */
+#define Transpose() \
+ "movq %%mm4, %%mm1 \n\t" /* r1 = e3 e2 e1 e0 */ \
+ "punpcklwd %%mm5, %%mm4 \n\t" /* r4 = f1 e1 f0 e0 */ \
+ "movq %%mm0, "I(0)"\n\t" /* save a3 a2 a1 a0 */ \
+ "punpckhwd %%mm5, %%mm1 \n\t" /* r1 = f3 e3 f2 e2 */ \
+ "movq %%mm6, %%mm0 \n\t" /* r0 = g3 g2 g1 g0 */ \
+ "punpcklwd %%mm7, %%mm6 \n\t" /* r6 = h1 g1 h0 g0 */ \
+ "movq %%mm4, %%mm5 \n\t" /* r5 = f1 e1 f0 e0 */ \
+ "punpckldq %%mm6, %%mm4 \n\t" /* r4 = h0 g0 f0 e0 = R4 */ \
+ "punpckhdq %%mm6, %%mm5 \n\t" /* r5 = h1 g1 f1 e1 = R5 */ \
+ "movq %%mm1, %%mm6 \n\t" /* r6 = f3 e3 f2 e2 */ \
+ "movq %%mm4, "J(4)"\n\t" \
+ "punpckhwd %%mm7, %%mm0 \n\t" /* r0 = h3 g3 h2 g2 */ \
+ "movq %%mm5, "J(5)"\n\t" \
+ "punpckhdq %%mm0, %%mm6 \n\t" /* r6 = h3 g3 f3 e3 = R7 */ \
+ "movq "I(0)", %%mm4 \n\t" /* r4 = a3 a2 a1 a0 */ \
+ "punpckldq %%mm0, %%mm1 \n\t" /* r1 = h2 g2 f2 e2 = R6 */ \
+ "movq "I(1)", %%mm5 \n\t" /* r5 = b3 b2 b1 b0 */ \
+ "movq %%mm4, %%mm0 \n\t" /* r0 = a3 a2 a1 a0 */ \
+ "movq %%mm6, "J(7)"\n\t" \
+ "punpcklwd %%mm5, %%mm0 \n\t" /* r0 = b1 a1 b0 a0 */ \
+ "movq %%mm1, "J(6)"\n\t" \
+ "punpckhwd %%mm5, %%mm4 \n\t" /* r4 = b3 a3 b2 a2 */ \
+ "movq %%mm2, %%mm5 \n\t" /* r5 = c3 c2 c1 c0 */ \
+ "punpcklwd %%mm3, %%mm2 \n\t" /* r2 = d1 c1 d0 c0 */ \
+ "movq %%mm0, %%mm1 \n\t" /* r1 = b1 a1 b0 a0 */ \
+ "punpckldq %%mm2, %%mm0 \n\t" /* r0 = d0 c0 b0 a0 = R0 */ \
+ "punpckhdq %%mm2, %%mm1 \n\t" /* r1 = d1 c1 b1 a1 = R1 */ \
+ "movq %%mm4, %%mm2 \n\t" /* r2 = b3 a3 b2 a2 */ \
+ "movq %%mm0, "I(0)"\n\t" \
+ "punpckhwd %%mm3, %%mm5 \n\t" /* r5 = d3 c3 d2 c2 */ \
+ "movq %%mm1, "I(1)"\n\t" \
+ "punpckhdq %%mm5, %%mm4 \n\t" /* r4 = d3 c3 b3 a3 = R3 */ \
+ "punpckldq %%mm5, %%mm2 \n\t" /* r2 = d2 c2 b2 a2 = R2 */ \
+ "movq %%mm4, "I(3)"\n\t" \
+ "movq %%mm2, "I(2)"\n\t"
+
+void ff_vp3_idct_mmx(int16_t *output_data)
+{
+ /* eax = quantized input
+ * ebx = dequantizer matrix
+ * ecx = IDCT constants
+ * M(I) = ecx + MaskOffset(0) + I * 8
+ * C(I) = ecx + CosineOffset(32) + (I-1) * 8
+ * edx = output
+ * r0..r7 = mm0..mm7
+ */
+
+#define C(x) AV_STRINGIFY(16*(x-1))"(%1)"
+#define OC_8 "%2"
+
+ /* at this point, function has completed dequantization + dezigzag +
+ * partial transposition; now do the idct itself */
+#define I(x) AV_STRINGIFY(16* x )"(%0)"
+#define J(x) AV_STRINGIFY(16*(x-4) + 8)"(%0)"
+
+ __asm__ volatile (
+ RowIDCT()
+ Transpose()
+
+#undef I
+#undef J
+#define I(x) AV_STRINGIFY(16* x + 64)"(%0)"
+#define J(x) AV_STRINGIFY(16*(x-4) + 72)"(%0)"
+
+ RowIDCT()
+ Transpose()
+
+#undef I
+#undef J
+#define I(x) AV_STRINGIFY(16*x)"(%0)"
+#define J(x) AV_STRINGIFY(16*x)"(%0)"
+
+ ColumnIDCT()
+
+#undef I
+#undef J
+#define I(x) AV_STRINGIFY(16*x + 8)"(%0)"
+#define J(x) AV_STRINGIFY(16*x + 8)"(%0)"
+
+ ColumnIDCT()
+ :: "r"(output_data), "r"(ff_vp3_idct_data), "m"(ff_pw_8)
+ );
+#undef I
+#undef J
+
+}
+
+void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_vp3_idct_mmx(block);
+ put_signed_pixels_clamped_mmx(block, dest, line_size);
+}
+
+void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_vp3_idct_mmx(block);
+ add_pixels_clamped_mmx(block, dest, line_size);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_mmx.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_mmx.h
new file mode 100644
index 000000000..e565a3302
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_mmx.h
@@ -0,0 +1,35 @@
+/*
+ * vp3dsp MMX function declarations
+ * Copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_VP3DSP_MMX_H
+#define AVCODEC_X86_VP3DSP_MMX_H
+
+#include <stdint.h>
+#include "libavcodec/dsputil.h"
+
+void ff_vp3_idct_mmx(int16_t *data);
+void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block);
+void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block);
+
+void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
+void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
+
+#endif /* AVCODEC_X86_VP3DSP_MMX_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_sse2.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_sse2.c
new file mode 100644
index 000000000..e7745b372
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_sse2.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2004 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/x86/vp3dsp_sse2.c
+ * SSE2-optimized functions cribbed from the original VP3 source code.
+ */
+
+#include "libavcodec/dsputil.h"
+#include "dsputil_mmx.h"
+
+DECLARE_ALIGNED_16(const uint16_t, ff_vp3_idct_data)[7 * 8] =
+{
+ 64277,64277,64277,64277,64277,64277,64277,64277,
+ 60547,60547,60547,60547,60547,60547,60547,60547,
+ 54491,54491,54491,54491,54491,54491,54491,54491,
+ 46341,46341,46341,46341,46341,46341,46341,46341,
+ 36410,36410,36410,36410,36410,36410,36410,36410,
+ 25080,25080,25080,25080,25080,25080,25080,25080,
+ 12785,12785,12785,12785,12785,12785,12785,12785
+};
+
+
+#define VP3_1D_IDCT_SSE2(ADD, SHIFT) \
+ "movdqa "I(3)", %%xmm2 \n\t" /* xmm2 = i3 */ \
+ "movdqa "C(3)", %%xmm6 \n\t" /* xmm6 = c3 */ \
+ "movdqa %%xmm2, %%xmm4 \n\t" /* xmm4 = i3 */ \
+ "movdqa "I(5)", %%xmm7 \n\t" /* xmm7 = i5 */ \
+ "pmulhw %%xmm6, %%xmm4 \n\t" /* xmm4 = c3 * i3 - i3 */ \
+ "movdqa "C(5)", %%xmm1 \n\t" /* xmm1 = c5 */ \
+ "pmulhw %%xmm7, %%xmm6 \n\t" /* xmm6 = c3 * i5 - i5 */ \
+ "movdqa %%xmm1, %%xmm5 \n\t" /* xmm5 = c5 */ \
+ "pmulhw %%xmm2, %%xmm1 \n\t" /* xmm1 = c5 * i3 - i3 */ \
+ "movdqa "I(1)", %%xmm3 \n\t" /* xmm3 = i1 */ \
+ "pmulhw %%xmm7, %%xmm5 \n\t" /* xmm5 = c5 * i5 - i5 */ \
+ "movdqa "C(1)", %%xmm0 \n\t" /* xmm0 = c1 */ \
+ "paddw %%xmm2, %%xmm4 \n\t" /* xmm4 = c3 * i3 */ \
+ "paddw %%xmm7, %%xmm6 \n\t" /* xmm6 = c3 * i5 */ \
+ "paddw %%xmm1, %%xmm2 \n\t" /* xmm2 = c5 * i3 */ \
+ "movdqa "I(7)", %%xmm1 \n\t" /* xmm1 = i7 */ \
+ "paddw %%xmm5, %%xmm7 \n\t" /* xmm7 = c5 * i5 */ \
+ "movdqa %%xmm0, %%xmm5 \n\t" /* xmm5 = c1 */ \
+ "pmulhw %%xmm3, %%xmm0 \n\t" /* xmm0 = c1 * i1 - i1 */ \
+ "paddsw %%xmm7, %%xmm4 \n\t" /* xmm4 = c3 * i3 + c5 * i5 = C */ \
+ "pmulhw %%xmm1, %%xmm5 \n\t" /* xmm5 = c1 * i7 - i7 */ \
+ "movdqa "C(7)", %%xmm7 \n\t" /* xmm7 = c7 */ \
+ "psubsw %%xmm2, %%xmm6 \n\t" /* xmm6 = c3 * i5 - c5 * i3 = D */ \
+ "paddw %%xmm3, %%xmm0 \n\t" /* xmm0 = c1 * i1 */ \
+ "pmulhw %%xmm7, %%xmm3 \n\t" /* xmm3 = c7 * i1 */ \
+ "movdqa "I(2)", %%xmm2 \n\t" /* xmm2 = i2 */ \
+ "pmulhw %%xmm1, %%xmm7 \n\t" /* xmm7 = c7 * i7 */ \
+ "paddw %%xmm1, %%xmm5 \n\t" /* xmm5 = c1 * i7 */ \
+ "movdqa %%xmm2, %%xmm1 \n\t" /* xmm1 = i2 */ \
+ "pmulhw "C(2)", %%xmm2 \n\t" /* xmm2 = i2 * c2 -i2 */ \
+ "psubsw %%xmm5, %%xmm3 \n\t" /* xmm3 = c7 * i1 - c1 * i7 = B */ \
+ "movdqa "I(6)", %%xmm5 \n\t" /* xmm5 = i6 */ \
+ "paddsw %%xmm7, %%xmm0 \n\t" /* xmm0 = c1 * i1 + c7 * i7 = A */ \
+ "movdqa %%xmm5, %%xmm7 \n\t" /* xmm7 = i6 */ \
+ "psubsw %%xmm4, %%xmm0 \n\t" /* xmm0 = A - C */ \
+ "pmulhw "C(2)", %%xmm5 \n\t" /* xmm5 = c2 * i6 - i6 */ \
+ "paddw %%xmm1, %%xmm2 \n\t" /* xmm2 = i2 * c2 */ \
+ "pmulhw "C(6)", %%xmm1 \n\t" /* xmm1 = c6 * i2 */ \
+ "paddsw %%xmm4, %%xmm4 \n\t" /* xmm4 = C + C */ \
+ "paddsw %%xmm0, %%xmm4 \n\t" /* xmm4 = A + C = C. */ \
+ "psubsw %%xmm6, %%xmm3 \n\t" /* xmm3 = B - D */ \
+ "paddw %%xmm7, %%xmm5 \n\t" /* xmm5 = c2 * i6 */ \
+ "paddsw %%xmm6, %%xmm6 \n\t" /* xmm6 = D + D */ \
+ "pmulhw "C(6)", %%xmm7 \n\t" /* xmm7 = c6 * i6 */ \
+ "paddsw %%xmm3, %%xmm6 \n\t" /* xmm6 = B + D = D. */ \
+ "movdqa %%xmm4, "I(1)" \n\t" /* Save C. at I(1) */ \
+ "psubsw %%xmm5, %%xmm1 \n\t" /* xmm1 = c6 * i2 - c2 * i6 = H */ \
+ "movdqa "C(4)", %%xmm4 \n\t" /* xmm4 = c4 */ \
+ "movdqa %%xmm3, %%xmm5 \n\t" /* xmm5 = B - D */ \
+ "pmulhw %%xmm4, %%xmm3 \n\t" /* xmm3 = ( c4 -1 ) * ( B - D ) */ \
+ "paddsw %%xmm2, %%xmm7 \n\t" /* xmm7 = c2 * i2 + c6 * i6 = G */ \
+ "movdqa %%xmm6, "I(2)" \n\t" /* Save D. at I(2) */ \
+ "movdqa %%xmm0, %%xmm2 \n\t" /* xmm2 = A - C */ \
+ "movdqa "I(0)", %%xmm6 \n\t" /* xmm6 = i0 */ \
+ "pmulhw %%xmm4, %%xmm0 \n\t" /* xmm0 = ( c4 - 1 ) * ( A - C ) = A. */ \
+ "paddw %%xmm3, %%xmm5 \n\t" /* xmm5 = c4 * ( B - D ) = B. */ \
+ "movdqa "I(4)", %%xmm3 \n\t" /* xmm3 = i4 */ \
+ "psubsw %%xmm1, %%xmm5 \n\t" /* xmm5 = B. - H = B.. */ \
+ "paddw %%xmm0, %%xmm2 \n\t" /* xmm2 = c4 * ( A - C) = A. */ \
+ "psubsw %%xmm3, %%xmm6 \n\t" /* xmm6 = i0 - i4 */ \
+ "movdqa %%xmm6, %%xmm0 \n\t" /* xmm0 = i0 - i4 */ \
+ "pmulhw %%xmm4, %%xmm6 \n\t" /* xmm6 = (c4 - 1) * (i0 - i4) = F */ \
+ "paddsw %%xmm3, %%xmm3 \n\t" /* xmm3 = i4 + i4 */ \
+ "paddsw %%xmm1, %%xmm1 \n\t" /* xmm1 = H + H */ \
+ "paddsw %%xmm0, %%xmm3 \n\t" /* xmm3 = i0 + i4 */ \
+ "paddsw %%xmm5, %%xmm1 \n\t" /* xmm1 = B. + H = H. */ \
+ "pmulhw %%xmm3, %%xmm4 \n\t" /* xmm4 = ( c4 - 1 ) * ( i0 + i4 ) */ \
+ "paddw %%xmm0, %%xmm6 \n\t" /* xmm6 = c4 * ( i0 - i4 ) */ \
+ "psubsw %%xmm2, %%xmm6 \n\t" /* xmm6 = F - A. = F. */ \
+ "paddsw %%xmm2, %%xmm2 \n\t" /* xmm2 = A. + A. */ \
+ "movdqa "I(1)", %%xmm0 \n\t" /* Load C. from I(1) */ \
+ "paddsw %%xmm6, %%xmm2 \n\t" /* xmm2 = F + A. = A.. */ \
+ "paddw %%xmm3, %%xmm4 \n\t" /* xmm4 = c4 * ( i0 + i4 ) = 3 */ \
+ "psubsw %%xmm1, %%xmm2 \n\t" /* xmm2 = A.. - H. = R2 */ \
+ ADD(%%xmm2) /* Adjust R2 and R1 before shifting */ \
+ "paddsw %%xmm1, %%xmm1 \n\t" /* xmm1 = H. + H. */ \
+ "paddsw %%xmm2, %%xmm1 \n\t" /* xmm1 = A.. + H. = R1 */ \
+ SHIFT(%%xmm2) /* xmm2 = op2 */ \
+ "psubsw %%xmm7, %%xmm4 \n\t" /* xmm4 = E - G = E. */ \
+ SHIFT(%%xmm1) /* xmm1 = op1 */ \
+ "movdqa "I(2)", %%xmm3 \n\t" /* Load D. from I(2) */ \
+ "paddsw %%xmm7, %%xmm7 \n\t" /* xmm7 = G + G */ \
+ "paddsw %%xmm4, %%xmm7 \n\t" /* xmm7 = E + G = G. */ \
+ "psubsw %%xmm3, %%xmm4 \n\t" /* xmm4 = E. - D. = R4 */ \
+ ADD(%%xmm4) /* Adjust R4 and R3 before shifting */ \
+ "paddsw %%xmm3, %%xmm3 \n\t" /* xmm3 = D. + D. */ \
+ "paddsw %%xmm4, %%xmm3 \n\t" /* xmm3 = E. + D. = R3 */ \
+ SHIFT(%%xmm4) /* xmm4 = op4 */ \
+ "psubsw %%xmm5, %%xmm6 \n\t" /* xmm6 = F. - B..= R6 */ \
+ SHIFT(%%xmm3) /* xmm3 = op3 */ \
+ ADD(%%xmm6) /* Adjust R6 and R5 before shifting */ \
+ "paddsw %%xmm5, %%xmm5 \n\t" /* xmm5 = B.. + B.. */ \
+ "paddsw %%xmm6, %%xmm5 \n\t" /* xmm5 = F. + B.. = R5 */ \
+ SHIFT(%%xmm6) /* xmm6 = op6 */ \
+ SHIFT(%%xmm5) /* xmm5 = op5 */ \
+ "psubsw %%xmm0, %%xmm7 \n\t" /* xmm7 = G. - C. = R7 */ \
+ ADD(%%xmm7) /* Adjust R7 and R0 before shifting */ \
+ "paddsw %%xmm0, %%xmm0 \n\t" /* xmm0 = C. + C. */ \
+ "paddsw %%xmm7, %%xmm0 \n\t" /* xmm0 = G. + C. */ \
+ SHIFT(%%xmm7) /* xmm7 = op7 */ \
+ SHIFT(%%xmm0) /* xmm0 = op0 */
+
+#define PUT_BLOCK(r0, r1, r2, r3, r4, r5, r6, r7) \
+ "movdqa " #r0 ", " O(0) "\n\t" \
+ "movdqa " #r1 ", " O(1) "\n\t" \
+ "movdqa " #r2 ", " O(2) "\n\t" \
+ "movdqa " #r3 ", " O(3) "\n\t" \
+ "movdqa " #r4 ", " O(4) "\n\t" \
+ "movdqa " #r5 ", " O(5) "\n\t" \
+ "movdqa " #r6 ", " O(6) "\n\t" \
+ "movdqa " #r7 ", " O(7) "\n\t"
+
+#define NOP(xmm)
+#define SHIFT4(xmm) "psraw $4, "#xmm"\n\t"
+#define ADD8(xmm) "paddsw %2, "#xmm"\n\t"
+
+void ff_vp3_idct_sse2(int16_t *input_data)
+{
+#define I(x) AV_STRINGIFY(16*x)"(%0)"
+#define O(x) I(x)
+#define C(x) AV_STRINGIFY(16*(x-1))"(%1)"
+
+ __asm__ volatile (
+ VP3_1D_IDCT_SSE2(NOP, NOP)
+
+ TRANSPOSE8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7, (%0))
+ PUT_BLOCK(%%xmm0, %%xmm5, %%xmm7, %%xmm3, %%xmm6, %%xmm4, %%xmm2, %%xmm1)
+
+ VP3_1D_IDCT_SSE2(ADD8, SHIFT4)
+ PUT_BLOCK(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)
+ :: "r"(input_data), "r"(ff_vp3_idct_data), "m"(ff_pw_8)
+ );
+}
+
+void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_vp3_idct_sse2(block);
+ put_signed_pixels_clamped_mmx(block, dest, line_size);
+}
+
+void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_vp3_idct_sse2(block);
+ add_pixels_clamped_mmx(block, dest, line_size);
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_sse2.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_sse2.h
new file mode 100644
index 000000000..9094620eb
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp3dsp_sse2.h
@@ -0,0 +1,31 @@
+/*
+ * vp3dsp SSE2 function declarations
+ * Copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_VP3DSP_SSE2_H
+#define AVCODEC_X86_VP3DSP_SSE2_H
+
+#include "libavcodec/dsputil.h"
+
+void ff_vp3_idct_sse2(int16_t *input_data);
+void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block);
+void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block);
+
+#endif /* AVCODEC_X86_VP3DSP_SSE2_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_mmx.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_mmx.c
new file mode 100644
index 000000000..39e40d568
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_mmx.c
@@ -0,0 +1,108 @@
+/**
+ * @file libavcodec/x86/vp6dsp_mmx.c
+ * MMX-optimized functions for the VP6 decoder
+ *
+ * Copyright (C) 2009 Sebastien Lucas <sebastien.lucas@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/x86_cpu.h"
+#include "libavcodec/dsputil.h"
+#include "dsputil_mmx.h"
+#include "vp6dsp_mmx.h"
+
+
+#define DIAG4_MMX(in1,in2,in3,in4) \
+ "movq "#in1"(%0), %%mm0 \n\t" \
+ "movq "#in2"(%0), %%mm1 \n\t" \
+ "movq %%mm0, %%mm3 \n\t" \
+ "movq %%mm1, %%mm4 \n\t" \
+ "punpcklbw %%mm7, %%mm0 \n\t" \
+ "punpcklbw %%mm7, %%mm1 \n\t" \
+ "punpckhbw %%mm7, %%mm3 \n\t" \
+ "punpckhbw %%mm7, %%mm4 \n\t" \
+ "pmullw 0(%2), %%mm0 \n\t" /* src[x-8 ] * biweight [0] */ \
+ "pmullw 8(%2), %%mm1 \n\t" /* src[x ] * biweight [1] */ \
+ "pmullw 0(%2), %%mm3 \n\t" /* src[x-8 ] * biweight [0] */ \
+ "pmullw 8(%2), %%mm4 \n\t" /* src[x ] * biweight [1] */ \
+ "paddw %%mm1, %%mm0 \n\t" \
+ "paddw %%mm4, %%mm3 \n\t" \
+ "movq "#in3"(%0), %%mm1 \n\t" \
+ "movq "#in4"(%0), %%mm2 \n\t" \
+ "movq %%mm1, %%mm4 \n\t" \
+ "movq %%mm2, %%mm5 \n\t" \
+ "punpcklbw %%mm7, %%mm1 \n\t" \
+ "punpcklbw %%mm7, %%mm2 \n\t" \
+ "punpckhbw %%mm7, %%mm4 \n\t" \
+ "punpckhbw %%mm7, %%mm5 \n\t" \
+ "pmullw 16(%2), %%mm1 \n\t" /* src[x+8 ] * biweight [2] */ \
+ "pmullw 24(%2), %%mm2 \n\t" /* src[x+16] * biweight [3] */ \
+ "pmullw 16(%2), %%mm4 \n\t" /* src[x+8 ] * biweight [2] */ \
+ "pmullw 24(%2), %%mm5 \n\t" /* src[x+16] * biweight [3] */ \
+ "paddw %%mm2, %%mm1 \n\t" \
+ "paddw %%mm5, %%mm4 \n\t" \
+ "paddsw %%mm1, %%mm0 \n\t" \
+ "paddsw %%mm4, %%mm3 \n\t" \
+ "paddsw %%mm6, %%mm0 \n\t" /* Add 64 */ \
+ "paddsw %%mm6, %%mm3 \n\t" /* Add 64 */ \
+ "psraw $7, %%mm0 \n\t" \
+ "psraw $7, %%mm3 \n\t" \
+ "packuswb %%mm3, %%mm0 \n\t" \
+ "movq %%mm0, (%1) \n\t"
+
+void ff_vp6_filter_diag4_mmx(uint8_t *dst, uint8_t *src, int stride,
+ const int16_t *h_weights, const int16_t *v_weights)
+{
+ uint8_t tmp[8*11], *t = tmp;
+ int16_t weights[4*4];
+ int i;
+ src -= stride;
+
+ for (i=0; i<4*4; i++)
+ weights[i] = h_weights[i>>2];
+
+ __asm__ volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq "MANGLE(ff_pw_64)", %%mm6 \n\t"
+ "1: \n\t"
+ DIAG4_MMX(-1,0,1,2)
+ "add $8, %1 \n\t"
+ "add %3, %0 \n\t"
+ "decl %4 \n\t"
+ "jnz 1b \n\t"
+ : "+r"(src), "+r"(t)
+ : "r"(weights), "r"((x86_reg)stride), "r"(11)
+ : "memory");
+
+ t = tmp + 8;
+ for (i=0; i<4*4; i++)
+ weights[i] = v_weights[i>>2];
+
+ __asm__ volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq "MANGLE(ff_pw_64)", %%mm6 \n\t"
+ "1: \n\t"
+ DIAG4_MMX(-8,0,8,16)
+ "add $8, %0 \n\t"
+ "add %3, %1 \n\t"
+ "decl %4 \n\t"
+ "jnz 1b \n\t"
+ : "+r"(t), "+r"(dst)
+ : "r"(weights), "r"((x86_reg)stride), "r"(8)
+ : "memory");
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_mmx.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_mmx.h
new file mode 100644
index 000000000..743bc4361
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_mmx.h
@@ -0,0 +1,30 @@
+/*
+ * vp6dsp MMX function declarations
+ * Copyright (c) 2009 Sebastien Lucas <sebastien.lucas@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_VP6DSP_MMX_H
+#define AVCODEC_X86_VP6DSP_MMX_H
+
+#include <stdint.h>
+
+void ff_vp6_filter_diag4_mmx(uint8_t *dst, uint8_t *src, int stride,
+ const int16_t *h_weights,const int16_t *v_weights);
+
+#endif /* AVCODEC_X86_VP6DSP_MMX_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_sse2.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_sse2.c
new file mode 100644
index 000000000..c72a0cacb
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_sse2.c
@@ -0,0 +1,98 @@
+/**
+ * @file libavcodec/x86/vp6dsp_mmx.c
+ * SSE2-optimized functions for the VP6 decoder
+ *
+ * Copyright (C) 2009 Zuxy Meng <zuxy.meng@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/x86_cpu.h"
+#include "libavcodec/dsputil.h"
+#include "dsputil_mmx.h"
+#include "vp6dsp_sse2.h"
+
+#define DIAG4_SSE2(in1,in2,in3,in4) \
+ "movq "#in1"(%0), %%xmm0 \n\t" \
+ "movq "#in2"(%0), %%xmm1 \n\t" \
+ "punpcklbw %%xmm7, %%xmm0 \n\t" \
+ "punpcklbw %%xmm7, %%xmm1 \n\t" \
+ "pmullw %%xmm4, %%xmm0 \n\t" /* src[x-8 ] * biweight [0] */ \
+ "pmullw %%xmm5, %%xmm1 \n\t" /* src[x ] * biweight [1] */ \
+ "paddw %%xmm1, %%xmm0 \n\t" \
+ "movq "#in3"(%0), %%xmm1 \n\t" \
+ "movq "#in4"(%0), %%xmm2 \n\t" \
+ "punpcklbw %%xmm7, %%xmm1 \n\t" \
+ "punpcklbw %%xmm7, %%xmm2 \n\t" \
+ "pmullw %%xmm6, %%xmm1 \n\t" /* src[x+8 ] * biweight [2] */ \
+ "pmullw %%xmm3, %%xmm2 \n\t" /* src[x+16] * biweight [3] */ \
+ "paddw %%xmm2, %%xmm1 \n\t" \
+ "paddsw %%xmm1, %%xmm0 \n\t" \
+ "paddsw "MANGLE(ff_pw_64)", %%xmm0 \n\t" /* Add 64 */ \
+ "psraw $7, %%xmm0 \n\t" \
+ "packuswb %%xmm0, %%xmm0 \n\t" \
+ "movq %%xmm0, (%1) \n\t" \
+
+void ff_vp6_filter_diag4_sse2(uint8_t *dst, uint8_t *src, int stride,
+ const int16_t *h_weights,const int16_t *v_weights)
+{
+ uint8_t tmp[8*11], *t = tmp;
+ src -= stride;
+
+ __asm__ volatile(
+ "pxor %%xmm7, %%xmm7 \n\t"
+ "movq %4, %%xmm3 \n\t"
+ "pshuflw $0, %%xmm3, %%xmm4 \n\t"
+ "punpcklqdq %%xmm4, %%xmm4 \n\t"
+ "pshuflw $85, %%xmm3, %%xmm5 \n\t"
+ "punpcklqdq %%xmm5, %%xmm5 \n\t"
+ "pshuflw $170, %%xmm3, %%xmm6 \n\t"
+ "punpcklqdq %%xmm6, %%xmm6 \n\t"
+ "pshuflw $255, %%xmm3, %%xmm3 \n\t"
+ "punpcklqdq %%xmm3, %%xmm3 \n\t"
+ "1: \n\t"
+ DIAG4_SSE2(-1,0,1,2)
+ "add $8, %1 \n\t"
+ "add %2, %0 \n\t"
+ "decl %3 \n\t"
+ "jnz 1b \n\t"
+ : "+r"(src), "+r"(t)
+ : "g"((x86_reg)stride), "r"(11), "m"(*(const int64_t*)h_weights)
+ : "memory");
+
+ t = tmp + 8;
+
+ __asm__ volatile(
+ "movq %4, %%xmm3 \n\t"
+ "pshuflw $0, %%xmm3, %%xmm4 \n\t"
+ "punpcklqdq %%xmm4, %%xmm4 \n\t"
+ "pshuflw $85, %%xmm3, %%xmm5 \n\t"
+ "punpcklqdq %%xmm5, %%xmm5 \n\t"
+ "pshuflw $170, %%xmm3, %%xmm6 \n\t"
+ "punpcklqdq %%xmm6, %%xmm6 \n\t"
+ "pshuflw $255, %%xmm3, %%xmm3 \n\t"
+ "punpcklqdq %%xmm3, %%xmm3 \n\t"
+ "1: \n\t"
+ DIAG4_SSE2(-8,0,8,16)
+ "add $8, %0 \n\t"
+ "add %2, %1 \n\t"
+ "decl %3 \n\t"
+ "jnz 1b \n\t"
+ : "+r"(t), "+r"(dst)
+ : "g"((x86_reg)stride), "r"(8), "m"(*(const int64_t*)v_weights)
+ : "memory");
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_sse2.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_sse2.h
new file mode 100644
index 000000000..a30089a3e
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/vp6dsp_sse2.h
@@ -0,0 +1,30 @@
+/*
+ * vp6dsp SSE2 function declarations
+ * Copyright (c) 2009 Zuxy Meng <zuxy.meng@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_VP6DSP_SSE2_H
+#define AVCODEC_X86_VP6DSP_SSE2_H
+
+#include <stdint.h>
+
+void ff_vp6_filter_diag4_sse2(uint8_t *dst, uint8_t *src, int stride,
+ const int16_t *h_weights,const int16_t *v_weights);
+
+#endif /* AVCODEC_X86_VP6DSP_SSE2_H */
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/x86inc.asm b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/x86inc.asm
new file mode 100644
index 000000000..c29ef3ee3
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/x86inc.asm
@@ -0,0 +1,625 @@
+;*****************************************************************************
+;* x86inc.asm
+;*****************************************************************************
+;* Copyright (C) 2005-2008 Loren Merritt <lorenm@u.washington.edu>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;*****************************************************************************
+
+%ifdef ARCH_X86_64
+ %ifidn __OUTPUT_FORMAT__,win32
+ %define WIN64
+ %else
+ %define UNIX64
+ %endif
+%endif
+
+; FIXME: All of the 64bit asm functions that take a stride as an argument
+; via register, assume that the high dword of that register is filled with 0.
+; This is true in practice (since we never do any 64bit arithmetic on strides,
+; and x264's strides are all positive), but is not guaranteed by the ABI.
+
+; Name of the .rodata section.
+; Kludge: Something on OS X fails to align .rodata even given an align attribute,
+; so use a different read-only section.
+%macro SECTION_RODATA 0-1 16
+ %ifidn __OUTPUT_FORMAT__,macho64
+ SECTION .text align=%1
+ %elifidn __OUTPUT_FORMAT__,macho
+ SECTION .text align=%1
+ fakegot:
+ %else
+ SECTION .rodata align=%1
+ %endif
+%endmacro
+
+; PIC support macros.
+; x86_64 can't fit 64bit address literals in most instruction types,
+; so shared objects (under the assumption that they might be anywhere
+; in memory) must use an address mode that does fit.
+; So all accesses to global variables must use this macro, e.g.
+; mov eax, [foo GLOBAL]
+; instead of
+; mov eax, [foo]
+;
+; x86_32 doesn't require PIC.
+; Some distros prefer shared objects to be PIC, but nothing breaks if
+; the code contains a few textrels, so we'll skip that complexity.
+
+%ifdef WIN64
+ %define PIC
+%elifndef ARCH_X86_64
+ %undef PIC
+%endif
+%ifdef PIC
+ %define GLOBAL wrt rip
+%else
+ %define GLOBAL
+%endif
+
+; Macros to eliminate most code duplication between x86_32 and x86_64:
+; Currently this works only for leaf functions which load all their arguments
+; into registers at the start, and make no other use of the stack. Luckily that
+; covers most of x264's asm.
+
+; PROLOGUE:
+; %1 = number of arguments. loads them from stack if needed.
+; %2 = number of registers used. pushes callee-saved regs if needed.
+; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
+; %4 = list of names to define to registers
+; PROLOGUE can also be invoked by adding the same options to cglobal
+
+; e.g.
+; cglobal foo, 2,3,0, dst, src, tmp
+; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
+
+; TODO Some functions can use some args directly from the stack. If they're the
+; last args then you can just not declare them, but if they're in the middle
+; we need more flexible macro.
+
+; RET:
+; Pops anything that was pushed by PROLOGUE
+
+; REP_RET:
+; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
+; which are slow when a normal ret follows a branch.
+
+; registers:
+; rN and rNq are the native-size register holding function argument N
+; rNd, rNw, rNb are dword, word, and byte size
+; rNm is the original location of arg N (a register or on the stack), dword
+; rNmp is native size
+
+%macro DECLARE_REG 6
+ %define r%1q %2
+ %define r%1d %3
+ %define r%1w %4
+ %define r%1b %5
+ %define r%1m %6
+ %ifid %6 ; i.e. it's a register
+ %define r%1mp %2
+ %elifdef ARCH_X86_64 ; memory
+ %define r%1mp qword %6
+ %else
+ %define r%1mp dword %6
+ %endif
+ %define r%1 %2
+%endmacro
+
+%macro DECLARE_REG_SIZE 2
+ %define r%1q r%1
+ %define e%1q r%1
+ %define r%1d e%1
+ %define e%1d e%1
+ %define r%1w %1
+ %define e%1w %1
+ %define r%1b %2
+ %define e%1b %2
+%ifndef ARCH_X86_64
+ %define r%1 e%1
+%endif
+%endmacro
+
+DECLARE_REG_SIZE ax, al
+DECLARE_REG_SIZE bx, bl
+DECLARE_REG_SIZE cx, cl
+DECLARE_REG_SIZE dx, dl
+DECLARE_REG_SIZE si, sil
+DECLARE_REG_SIZE di, dil
+DECLARE_REG_SIZE bp, bpl
+
+; t# defines for when per-arch register allocation is more complex than just function arguments
+
+%macro DECLARE_REG_TMP 1-*
+ %assign %%i 0
+ %rep %0
+ CAT_XDEFINE t, %%i, r%1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro DECLARE_REG_TMP_SIZE 0-*
+ %rep %0
+ %define t%1q t%1 %+ q
+ %define t%1d t%1 %+ d
+ %define t%1w t%1 %+ w
+ %define t%1b t%1 %+ b
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7
+
+%ifdef ARCH_X86_64
+ %define gprsize 8
+%else
+ %define gprsize 4
+%endif
+
+%macro PUSH 1
+ push %1
+ %assign stack_offset stack_offset+gprsize
+%endmacro
+
+%macro POP 1
+ pop %1
+ %assign stack_offset stack_offset-gprsize
+%endmacro
+
+%macro SUB 2
+ sub %1, %2
+ %ifidn %1, rsp
+ %assign stack_offset stack_offset+(%2)
+ %endif
+%endmacro
+
+%macro ADD 2
+ add %1, %2
+ %ifidn %1, rsp
+ %assign stack_offset stack_offset-(%2)
+ %endif
+%endmacro
+
+%macro movifnidn 2
+ %ifnidn %1, %2
+ mov %1, %2
+ %endif
+%endmacro
+
+%macro movsxdifnidn 2
+ %ifnidn %1, %2
+ movsxd %1, %2
+ %endif
+%endmacro
+
+%macro ASSERT 1
+ %if (%1) == 0
+ %error assert failed
+ %endif
+%endmacro
+
+%macro DEFINE_ARGS 0-*
+ %ifdef n_arg_names
+ %assign %%i 0
+ %rep n_arg_names
+ CAT_UNDEF arg_name %+ %%i, q
+ CAT_UNDEF arg_name %+ %%i, d
+ CAT_UNDEF arg_name %+ %%i, w
+ CAT_UNDEF arg_name %+ %%i, b
+ CAT_UNDEF arg_name %+ %%i, m
+ CAT_UNDEF arg_name, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+
+ %assign %%i 0
+ %rep %0
+ %xdefine %1q r %+ %%i %+ q
+ %xdefine %1d r %+ %%i %+ d
+ %xdefine %1w r %+ %%i %+ w
+ %xdefine %1b r %+ %%i %+ b
+ %xdefine %1m r %+ %%i %+ m
+ CAT_XDEFINE arg_name, %%i, %1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+ %assign n_arg_names %%i
+%endmacro
+
+%ifdef WIN64 ; Windows x64 ;=================================================
+
+DECLARE_REG 0, rcx, ecx, cx, cl, ecx
+DECLARE_REG 1, rdx, edx, dx, dl, edx
+DECLARE_REG 2, r8, r8d, r8w, r8b, r8d
+DECLARE_REG 3, r9, r9d, r9w, r9b, r9d
+DECLARE_REG 4, rdi, edi, di, dil, [rsp + stack_offset + 40]
+DECLARE_REG 5, rsi, esi, si, sil, [rsp + stack_offset + 48]
+DECLARE_REG 6, rax, eax, ax, al, [rsp + stack_offset + 56]
+%define r7m [rsp + stack_offset + 64]
+%define r8m [rsp + stack_offset + 72]
+
+%macro LOAD_IF_USED 2 ; reg_id, number_of_args
+ %if %1 < %2
+ mov r%1, [rsp + stack_offset + 8 + %1*8]
+ %endif
+%endmacro
+
+%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
+ ASSERT %2 >= %1
+ %assign regs_used %2
+ ASSERT regs_used <= 7
+ %if %0 > 2
+ %assign xmm_regs_used %3
+ %else
+ %assign xmm_regs_used 0
+ %endif
+ ASSERT xmm_regs_used <= 16
+ %if regs_used > 4
+ push r4
+ push r5
+ %assign stack_offset stack_offset+16
+ %endif
+ %if xmm_regs_used > 6
+ sub rsp, (xmm_regs_used-6)*16+16
+ %assign stack_offset stack_offset+(xmm_regs_used-6)*16+16
+ %assign %%i xmm_regs_used
+ %rep (xmm_regs_used-6)
+ %assign %%i %%i-1
+ movdqa [rsp + (%%i-6)*16+8], xmm %+ %%i
+ %endrep
+ %endif
+ LOAD_IF_USED 4, %1
+ LOAD_IF_USED 5, %1
+ LOAD_IF_USED 6, %1
+ DEFINE_ARGS %4
+%endmacro
+
+%macro RESTORE_XMM_INTERNAL 1
+ %if xmm_regs_used > 6
+ %assign %%i xmm_regs_used
+ %rep (xmm_regs_used-6)
+ %assign %%i %%i-1
+ movdqa xmm %+ %%i, [%1 + (%%i-6)*16+8]
+ %endrep
+ add %1, (xmm_regs_used-6)*16+16
+ %endif
+%endmacro
+
+%macro RESTORE_XMM 1
+ RESTORE_XMM_INTERNAL %1
+ %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16
+ %assign xmm_regs_used 0
+%endmacro
+
+%macro RET 0
+ RESTORE_XMM_INTERNAL rsp
+ %if regs_used > 4
+ pop r5
+ pop r4
+ %endif
+ ret
+%endmacro
+
+%macro REP_RET 0
+ %if regs_used > 4 || xmm_regs_used > 6
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%elifdef ARCH_X86_64 ; *nix x64 ;=============================================
+
+DECLARE_REG 0, rdi, edi, di, dil, edi
+DECLARE_REG 1, rsi, esi, si, sil, esi
+DECLARE_REG 2, rdx, edx, dx, dl, edx
+DECLARE_REG 3, rcx, ecx, cx, cl, ecx
+DECLARE_REG 4, r8, r8d, r8w, r8b, r8d
+DECLARE_REG 5, r9, r9d, r9w, r9b, r9d
+DECLARE_REG 6, rax, eax, ax, al, [rsp + stack_offset + 8]
+%define r7m [rsp + stack_offset + 16]
+%define r8m [rsp + stack_offset + 24]
+
+%macro LOAD_IF_USED 2 ; reg_id, number_of_args
+ %if %1 < %2
+ mov r%1, [rsp - 40 + %1*8]
+ %endif
+%endmacro
+
+%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
+ ASSERT %2 >= %1
+ ASSERT %2 <= 7
+ LOAD_IF_USED 6, %1
+ DEFINE_ARGS %4
+%endmacro
+
+%macro RET 0
+ ret
+%endmacro
+
+%macro REP_RET 0
+ rep ret
+%endmacro
+
+%else ; X86_32 ;==============================================================
+
+DECLARE_REG 0, eax, eax, ax, al, [esp + stack_offset + 4]
+DECLARE_REG 1, ecx, ecx, cx, cl, [esp + stack_offset + 8]
+DECLARE_REG 2, edx, edx, dx, dl, [esp + stack_offset + 12]
+DECLARE_REG 3, ebx, ebx, bx, bl, [esp + stack_offset + 16]
+DECLARE_REG 4, esi, esi, si, null, [esp + stack_offset + 20]
+DECLARE_REG 5, edi, edi, di, null, [esp + stack_offset + 24]
+DECLARE_REG 6, ebp, ebp, bp, null, [esp + stack_offset + 28]
+%define r7m [esp + stack_offset + 32]
+%define r8m [esp + stack_offset + 36]
+%define rsp esp
+
+%macro PUSH_IF_USED 1 ; reg_id
+ %if %1 < regs_used
+ push r%1
+ %assign stack_offset stack_offset+4
+ %endif
+%endmacro
+
+%macro POP_IF_USED 1 ; reg_id
+ %if %1 < regs_used
+ pop r%1
+ %endif
+%endmacro
+
+%macro LOAD_IF_USED 2 ; reg_id, number_of_args
+ %if %1 < %2
+ mov r%1, [esp + stack_offset + 4 + %1*4]
+ %endif
+%endmacro
+
+%macro PROLOGUE 2-4+ ; #args, #regs, arg_names...
+ ASSERT %2 >= %1
+ %assign regs_used %2
+ ASSERT regs_used <= 7
+ PUSH_IF_USED 3
+ PUSH_IF_USED 4
+ PUSH_IF_USED 5
+ PUSH_IF_USED 6
+ LOAD_IF_USED 0, %1
+ LOAD_IF_USED 1, %1
+ LOAD_IF_USED 2, %1
+ LOAD_IF_USED 3, %1
+ LOAD_IF_USED 4, %1
+ LOAD_IF_USED 5, %1
+ LOAD_IF_USED 6, %1
+ DEFINE_ARGS %4
+%endmacro
+
+%macro RET 0
+ POP_IF_USED 6
+ POP_IF_USED 5
+ POP_IF_USED 4
+ POP_IF_USED 3
+ ret
+%endmacro
+
+%macro REP_RET 0
+ %if regs_used > 3
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%endif ;======================================================================
+
+
+
+;=============================================================================
+; arch-independent part
+;=============================================================================
+
+%assign function_align 16
+
+; Symbol prefix for C linkage
+%macro cglobal 1-2+
+ %xdefine %1 ff_%1
+ %ifdef PREFIX
+ %xdefine %1 _ %+ %1
+ %endif
+ %xdefine %1.skip_prologue %1 %+ .skip_prologue
+ %ifidn __OUTPUT_FORMAT__,elf
+ global %1:function hidden
+ %else
+ global %1
+ %endif
+ align function_align
+ %1:
+ RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
+ %assign stack_offset 0
+ %if %0 > 1
+ PROLOGUE %2
+ %endif
+%endmacro
+
+%macro cextern 1
+ %ifdef PREFIX
+ %xdefine %1 _%1
+ %endif
+ extern %1
+%endmacro
+
+; This is needed for ELF, otherwise the GNU linker assumes the stack is
+; executable by default.
+%ifidn __OUTPUT_FORMAT__,elf
+SECTION .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+
+%assign FENC_STRIDE 16
+%assign FDEC_STRIDE 32
+
+; merge mmx and sse*
+
+%macro CAT_XDEFINE 3
+ %xdefine %1%2 %3
+%endmacro
+
+%macro CAT_UNDEF 2
+ %undef %1%2
+%endmacro
+
+%macro INIT_MMX 0
+ %define RESET_MM_PERMUTATION INIT_MMX
+ %define mmsize 8
+ %define num_mmregs 8
+ %define mova movq
+ %define movu movq
+ %define movh movd
+ %define movnt movntq
+ %assign %%i 0
+ %rep 8
+ CAT_XDEFINE m, %%i, mm %+ %%i
+ CAT_XDEFINE nmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %rep 8
+ CAT_UNDEF m, %%i
+ CAT_UNDEF nmm, %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro INIT_XMM 0
+ %define RESET_MM_PERMUTATION INIT_XMM
+ %define mmsize 16
+ %define num_mmregs 8
+ %ifdef ARCH_X86_64
+ %define num_mmregs 16
+ %endif
+ %define mova movdqa
+ %define movu movdqu
+ %define movh movq
+ %define movnt movntdq
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, xmm %+ %%i
+ CAT_XDEFINE nxmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+INIT_MMX
+
+; I often want to use macros that permute their arguments. e.g. there's no
+; efficient way to implement butterfly or transpose or dct without swapping some
+; arguments.
+;
+; I would like to not have to manually keep track of the permutations:
+; If I insert a permutation in the middle of a function, it should automatically
+; change everything that follows. For more complex macros I may also have multiple
+; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
+;
+; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
+; permutes its arguments. It's equivalent to exchanging the contents of the
+; registers, except that this way you exchange the register names instead, so it
+; doesn't cost any cycles.
+
+%macro PERMUTE 2-* ; takes a list of pairs to swap
+%rep %0/2
+ %xdefine tmp%2 m%2
+ %xdefine ntmp%2 nm%2
+ %rotate 2
+%endrep
+%rep %0/2
+ %xdefine m%1 tmp%2
+ %xdefine nm%1 ntmp%2
+ %undef tmp%2
+ %undef ntmp%2
+ %rotate 2
+%endrep
+%endmacro
+
+%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
+%rep %0-1
+%ifdef m%1
+ %xdefine tmp m%1
+ %xdefine m%1 m%2
+ %xdefine m%2 tmp
+ CAT_XDEFINE n, m%1, %1
+ CAT_XDEFINE n, m%2, %2
+%else
+ ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
+ ; Be careful using this mode in nested macros though, as in some cases there may be
+ ; other copies of m# that have already been dereferenced and don't get updated correctly.
+ %xdefine %%n1 n %+ %1
+ %xdefine %%n2 n %+ %2
+ %xdefine tmp m %+ %%n1
+ CAT_XDEFINE m, %%n1, m %+ %%n2
+ CAT_XDEFINE m, %%n2, tmp
+ CAT_XDEFINE n, m %+ %%n1, %%n1
+ CAT_XDEFINE n, m %+ %%n2, %%n2
+%endif
+ %undef tmp
+ %rotate 1
+%endrep
+%endmacro
+
+%macro SAVE_MM_PERMUTATION 1
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE %1_m, %%i, m %+ %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro LOAD_MM_PERMUTATION 1
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, %1_m %+ %%i
+ CAT_XDEFINE n, m %+ %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro call 1
+ call %1
+ %ifdef %1_m0
+ LOAD_MM_PERMUTATION %1
+ %endif
+%endmacro
+
+;Substitutions that reduce instruction size but are functionally equivalent
+%macro add 2
+ %ifnum %2
+ %if %2==128
+ sub %1, -128
+ %else
+ add %1, %2
+ %endif
+ %else
+ add %1, %2
+ %endif
+%endmacro
+
+%macro sub 2
+ %ifnum %2
+ %if %2==128
+ add %1, -128
+ %else
+ sub %1, %2
+ %endif
+ %else
+ sub %1, %2
+ %endif
+%endmacro
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/x86util.asm b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/x86util.asm
new file mode 100644
index 000000000..f3e0e2dbe
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/x86/x86util.asm
@@ -0,0 +1,515 @@
+;*****************************************************************************
+;* x86util.asm
+;*****************************************************************************
+;* Copyright (C) 2008 Loren Merritt <lorenm@u.washington.edu>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
+;*****************************************************************************
+
+%macro SBUTTERFLY 4
+ mova m%4, m%2
+ punpckl%1 m%2, m%3
+ punpckh%1 m%4, m%3
+ SWAP %3, %4
+%endmacro
+
+%macro TRANSPOSE4x4W 5
+ SBUTTERFLY wd, %1, %2, %5
+ SBUTTERFLY wd, %3, %4, %5
+ SBUTTERFLY dq, %1, %3, %5
+ SBUTTERFLY dq, %2, %4, %5
+ SWAP %2, %3
+%endmacro
+
+%macro TRANSPOSE2x4x4W 5
+ SBUTTERFLY wd, %1, %2, %5
+ SBUTTERFLY wd, %3, %4, %5
+ SBUTTERFLY dq, %1, %3, %5
+ SBUTTERFLY dq, %2, %4, %5
+ SBUTTERFLY qdq, %1, %2, %5
+ SBUTTERFLY qdq, %3, %4, %5
+%endmacro
+
+%macro TRANSPOSE4x4D 5
+ SBUTTERFLY dq, %1, %2, %5
+ SBUTTERFLY dq, %3, %4, %5
+ SBUTTERFLY qdq, %1, %3, %5
+ SBUTTERFLY qdq, %2, %4, %5
+ SWAP %2, %3
+%endmacro
+
+%macro TRANSPOSE8x8W 9-11
+%ifdef ARCH_X86_64
+ SBUTTERFLY wd, %1, %2, %9
+ SBUTTERFLY wd, %3, %4, %9
+ SBUTTERFLY wd, %5, %6, %9
+ SBUTTERFLY wd, %7, %8, %9
+ SBUTTERFLY dq, %1, %3, %9
+ SBUTTERFLY dq, %2, %4, %9
+ SBUTTERFLY dq, %5, %7, %9
+ SBUTTERFLY dq, %6, %8, %9
+ SBUTTERFLY qdq, %1, %5, %9
+ SBUTTERFLY qdq, %2, %6, %9
+ SBUTTERFLY qdq, %3, %7, %9
+ SBUTTERFLY qdq, %4, %8, %9
+ SWAP %2, %5
+ SWAP %4, %7
+%else
+; in: m0..m7, unless %11 in which case m6 is in %9
+; out: m0..m7, unless %11 in which case m4 is in %10
+; spills into %9 and %10
+%if %0<11
+ movdqa %9, m%7
+%endif
+ SBUTTERFLY wd, %1, %2, %7
+ movdqa %10, m%2
+ movdqa m%7, %9
+ SBUTTERFLY wd, %3, %4, %2
+ SBUTTERFLY wd, %5, %6, %2
+ SBUTTERFLY wd, %7, %8, %2
+ SBUTTERFLY dq, %1, %3, %2
+ movdqa %9, m%3
+ movdqa m%2, %10
+ SBUTTERFLY dq, %2, %4, %3
+ SBUTTERFLY dq, %5, %7, %3
+ SBUTTERFLY dq, %6, %8, %3
+ SBUTTERFLY qdq, %1, %5, %3
+ SBUTTERFLY qdq, %2, %6, %3
+ movdqa %10, m%2
+ movdqa m%3, %9
+ SBUTTERFLY qdq, %3, %7, %2
+ SBUTTERFLY qdq, %4, %8, %2
+ SWAP %2, %5
+ SWAP %4, %7
+%if %0<11
+ movdqa m%5, %10
+%endif
+%endif
+%endmacro
+
+%macro ABS1_MMX 2 ; a, tmp
+ pxor %2, %2
+ psubw %2, %1
+ pmaxsw %1, %2
+%endmacro
+
+%macro ABS2_MMX 4 ; a, b, tmp0, tmp1
+ pxor %3, %3
+ pxor %4, %4
+ psubw %3, %1
+ psubw %4, %2
+ pmaxsw %1, %3
+ pmaxsw %2, %4
+%endmacro
+
+%macro ABS1_SSSE3 2
+ pabsw %1, %1
+%endmacro
+
+%macro ABS2_SSSE3 4
+ pabsw %1, %1
+ pabsw %2, %2
+%endmacro
+
+%define ABS1 ABS1_MMX
+%define ABS2 ABS2_MMX
+
+%macro ABS4 6
+ ABS2 %1, %2, %5, %6
+ ABS2 %3, %4, %5, %6
+%endmacro
+
+%macro SPLATB_MMX 3
+ movd %1, [%2-3] ;to avoid crossing a cacheline
+ punpcklbw %1, %1
+%if mmsize==16
+ pshuflw %1, %1, 0xff
+ punpcklqdq %1, %1
+%else
+ pshufw %1, %1, 0xff
+%endif
+%endmacro
+
+%macro SPLATB_SSSE3 3
+ movd %1, [%2-3]
+ pshufb %1, %3
+%endmacro
+
+%macro PALIGNR_MMX 4
+ %ifnidn %4, %2
+ mova %4, %2
+ %endif
+ %if mmsize == 8
+ psllq %1, (8-%3)*8
+ psrlq %4, %3*8
+ %else
+ pslldq %1, 16-%3
+ psrldq %4, %3
+ %endif
+ por %1, %4
+%endmacro
+
+%macro PALIGNR_SSSE3 4
+ palignr %1, %2, %3
+%endmacro
+
+%macro DEINTB 5 ; mask, reg1, mask, reg2, optional src to fill masks from
+%ifnum %5
+ mova m%1, m%5
+ mova m%3, m%5
+%else
+ mova m%1, %5
+ mova m%3, m%1
+%endif
+ pand m%1, m%2 ; dst .. y6 .. y4
+ pand m%3, m%4 ; src .. y6 .. y4
+ psrlw m%2, 8 ; dst .. y7 .. y5
+ psrlw m%4, 8 ; src .. y7 .. y5
+%endmacro
+
+%macro SUMSUB_BA 2-3
+%if %0==2
+ paddw %1, %2
+ paddw %2, %2
+ psubw %2, %1
+%else
+ mova %3, %1
+ paddw %1, %2
+ psubw %2, %3
+%endif
+%endmacro
+
+%macro SUMSUB_BADC 4-5
+%if %0==5
+ SUMSUB_BA %1, %2, %5
+ SUMSUB_BA %3, %4, %5
+%else
+ paddw %1, %2
+ paddw %3, %4
+ paddw %2, %2
+ paddw %4, %4
+ psubw %2, %1
+ psubw %4, %3
+%endif
+%endmacro
+
+%macro HADAMARD4_V 4+
+ SUMSUB_BADC %1, %2, %3, %4
+ SUMSUB_BADC %1, %3, %2, %4
+%endmacro
+
+%macro HADAMARD8_V 8+
+ SUMSUB_BADC %1, %2, %3, %4
+ SUMSUB_BADC %5, %6, %7, %8
+ SUMSUB_BADC %1, %3, %2, %4
+ SUMSUB_BADC %5, %7, %6, %8
+ SUMSUB_BADC %1, %5, %2, %6
+ SUMSUB_BADC %3, %7, %4, %8
+%endmacro
+
+%macro TRANS_SSE2 5-6
+; TRANSPOSE2x2
+; %1: transpose width (d/q) - use SBUTTERFLY qdq for dq
+; %2: ord/unord (for compat with sse4, unused)
+; %3/%4: source regs
+; %5/%6: tmp regs
+%ifidn %1, d
+%define mask [mask_10 GLOBAL]
+%define shift 16
+%elifidn %1, q
+%define mask [mask_1100 GLOBAL]
+%define shift 32
+%endif
+%if %0==6 ; less dependency if we have two tmp
+ mova m%5, mask ; ff00
+ mova m%6, m%4 ; x5x4
+ psll%1 m%4, shift ; x4..
+ pand m%6, m%5 ; x5..
+ pandn m%5, m%3 ; ..x0
+ psrl%1 m%3, shift ; ..x1
+ por m%4, m%5 ; x4x0
+ por m%3, m%6 ; x5x1
+%else ; more dependency, one insn less. sometimes faster, sometimes not
+ mova m%5, m%4 ; x5x4
+ psll%1 m%4, shift ; x4..
+ pxor m%4, m%3 ; (x4^x1)x0
+ pand m%4, mask ; (x4^x1)..
+ pxor m%3, m%4 ; x4x0
+ psrl%1 m%4, shift ; ..(x1^x4)
+ pxor m%5, m%4 ; x5x1
+ SWAP %4, %3, %5
+%endif
+%endmacro
+
+%macro TRANS_SSE4 5-6 ; see above
+%ifidn %1, d
+ mova m%5, m%3
+%ifidn %2, ord
+ psrl%1 m%3, 16
+%endif
+ pblendw m%3, m%4, 10101010b
+ psll%1 m%4, 16
+%ifidn %2, ord
+ pblendw m%4, m%5, 01010101b
+%else
+ psrl%1 m%5, 16
+ por m%4, m%5
+%endif
+%elifidn %1, q
+ mova m%5, m%3
+ shufps m%3, m%4, 10001000b
+ shufps m%5, m%4, 11011101b
+ SWAP %4, %5
+%endif
+%endmacro
+
+%macro HADAMARD 5-6
+; %1=distance in words (0 for vertical pass, 1/2/4 for horizontal passes)
+; %2=sumsub/max/amax (sum and diff / maximum / maximum of absolutes)
+; %3/%4: regs
+; %5(%6): tmpregs
+%if %1!=0 ; have to reorder stuff for horizontal op
+ %ifidn %2, sumsub
+ %define ORDER ord
+ ; sumsub needs order because a-b != b-a unless a=b
+ %else
+ %define ORDER unord
+ ; if we just max, order doesn't matter (allows pblendw+or in sse4)
+ %endif
+ %if %1==1
+ TRANS d, ORDER, %3, %4, %5, %6
+ %elif %1==2
+ %if mmsize==8
+ SBUTTERFLY dq, %3, %4, %5
+ %else
+ TRANS q, ORDER, %3, %4, %5, %6
+ %endif
+ %elif %1==4
+ SBUTTERFLY qdq, %3, %4, %5
+ %endif
+%endif
+%ifidn %2, sumsub
+ SUMSUB_BA m%3, m%4, m%5
+%else
+ %ifidn %2, amax
+ %if %0==6
+ ABS2 m%3, m%4, m%5, m%6
+ %else
+ ABS1 m%3, m%5
+ ABS1 m%4, m%5
+ %endif
+ %endif
+ pmaxsw m%3, m%4
+%endif
+%endmacro
+
+
+%macro HADAMARD2_2D 6-7 sumsub
+ HADAMARD 0, sumsub, %1, %2, %5
+ HADAMARD 0, sumsub, %3, %4, %5
+ SBUTTERFLY %6, %1, %2, %5
+%ifnum %7
+ HADAMARD 0, amax, %1, %2, %5, %7
+%else
+ HADAMARD 0, %7, %1, %2, %5
+%endif
+ SBUTTERFLY %6, %3, %4, %5
+%ifnum %7
+ HADAMARD 0, amax, %3, %4, %5, %7
+%else
+ HADAMARD 0, %7, %3, %4, %5
+%endif
+%endmacro
+
+%macro HADAMARD4_2D 5-6 sumsub
+ HADAMARD2_2D %1, %2, %3, %4, %5, wd
+ HADAMARD2_2D %1, %3, %2, %4, %5, dq, %6
+ SWAP %2, %3
+%endmacro
+
+%macro HADAMARD4_2D_SSE 5-6 sumsub
+ HADAMARD 0, sumsub, %1, %2, %5 ; 1st V row 0 + 1
+ HADAMARD 0, sumsub, %3, %4, %5 ; 1st V row 2 + 3
+ SBUTTERFLY wd, %1, %2, %5 ; %1: m0 1+0 %2: m1 1+0
+ SBUTTERFLY wd, %3, %4, %5 ; %3: m0 3+2 %4: m1 3+2
+ HADAMARD2_2D %1, %3, %2, %4, %5, dq
+ SBUTTERFLY qdq, %1, %2, %5
+ HADAMARD 0, %6, %1, %2, %5 ; 2nd H m1/m0 row 0+1
+ SBUTTERFLY qdq, %3, %4, %5
+ HADAMARD 0, %6, %3, %4, %5 ; 2nd H m1/m0 row 2+3
+%endmacro
+
+%macro HADAMARD8_2D 9-10 sumsub
+ HADAMARD2_2D %1, %2, %3, %4, %9, wd
+ HADAMARD2_2D %5, %6, %7, %8, %9, wd
+ HADAMARD2_2D %1, %3, %2, %4, %9, dq
+ HADAMARD2_2D %5, %7, %6, %8, %9, dq
+ HADAMARD2_2D %1, %5, %3, %7, %9, qdq, %10
+ HADAMARD2_2D %2, %6, %4, %8, %9, qdq, %10
+%ifnidn %10, amax
+ SWAP %2, %5
+ SWAP %4, %7
+%endif
+%endmacro
+
+%macro SUMSUB2_AB 3
+ mova %3, %1
+ paddw %1, %1
+ paddw %1, %2
+ psubw %3, %2
+ psubw %3, %2
+%endmacro
+
+%macro SUMSUB2_BA 3
+ mova m%3, m%1
+ paddw m%1, m%2
+ paddw m%1, m%2
+ psubw m%2, m%3
+ psubw m%2, m%3
+%endmacro
+
+%macro SUMSUBD2_AB 4
+ mova %4, %1
+ mova %3, %2
+ psraw %2, 1
+ psraw %1, 1
+ paddw %2, %4
+ psubw %1, %3
+%endmacro
+
+%macro DCT4_1D 5
+%ifnum %5
+ SUMSUB_BADC m%4, m%1, m%3, m%2; m%5
+ SUMSUB_BA m%3, m%4, m%5
+ SUMSUB2_AB m%1, m%2, m%5
+ SWAP %1, %3, %4, %5, %2
+%else
+ SUMSUB_BADC m%4, m%1, m%3, m%2
+ SUMSUB_BA m%3, m%4
+ mova [%5], m%2
+ SUMSUB2_AB m%1, [%5], m%2
+ SWAP %1, %3, %4, %2
+%endif
+%endmacro
+
+%macro IDCT4_1D 5-6
+%ifnum %5
+ SUMSUBD2_AB m%2, m%4, m%6, m%5
+ SUMSUB_BA m%3, m%1, m%6
+ SUMSUB_BADC m%4, m%3, m%2, m%1, m%6
+%else
+ SUMSUBD2_AB m%2, m%4, [%5], [%5+16]
+ SUMSUB_BA m%3, m%1
+ SUMSUB_BADC m%4, m%3, m%2, m%1
+%endif
+ SWAP %1, %4, %3
+%endmacro
+
+%macro LOAD_DIFF 5
+%ifidn %3, none
+ movh %1, %4
+ movh %2, %5
+ punpcklbw %1, %2
+ punpcklbw %2, %2
+ psubw %1, %2
+%else
+ movh %1, %4
+ punpcklbw %1, %3
+ movh %2, %5
+ punpcklbw %2, %3
+ psubw %1, %2
+%endif
+%endmacro
+
+%macro LOAD_DIFF8x4_SSE2 8
+ LOAD_DIFF m%1, m%5, m%6, [%7+%1*FENC_STRIDE], [%8+%1*FDEC_STRIDE]
+ LOAD_DIFF m%2, m%5, m%6, [%7+%2*FENC_STRIDE], [%8+%2*FDEC_STRIDE]
+ LOAD_DIFF m%3, m%5, m%6, [%7+%3*FENC_STRIDE], [%8+%3*FDEC_STRIDE]
+ LOAD_DIFF m%4, m%5, m%6, [%7+%4*FENC_STRIDE], [%8+%4*FDEC_STRIDE]
+%endmacro
+
+%macro LOAD_DIFF8x4_SSSE3 8 ; 4x dst, 1x tmp, 1x mul, 2x ptr
+ movh m%2, [%8+%1*FDEC_STRIDE]
+ movh m%1, [%7+%1*FENC_STRIDE]
+ punpcklbw m%1, m%2
+ movh m%3, [%8+%2*FDEC_STRIDE]
+ movh m%2, [%7+%2*FENC_STRIDE]
+ punpcklbw m%2, m%3
+ movh m%4, [%8+%3*FDEC_STRIDE]
+ movh m%3, [%7+%3*FENC_STRIDE]
+ punpcklbw m%3, m%4
+ movh m%5, [%8+%4*FDEC_STRIDE]
+ movh m%4, [%7+%4*FENC_STRIDE]
+ punpcklbw m%4, m%5
+ pmaddubsw m%1, m%6
+ pmaddubsw m%2, m%6
+ pmaddubsw m%3, m%6
+ pmaddubsw m%4, m%6
+%endmacro
+
+%macro STORE_DCT 6
+ movq [%5+%6+ 0], m%1
+ movq [%5+%6+ 8], m%2
+ movq [%5+%6+16], m%3
+ movq [%5+%6+24], m%4
+ movhps [%5+%6+32], m%1
+ movhps [%5+%6+40], m%2
+ movhps [%5+%6+48], m%3
+ movhps [%5+%6+56], m%4
+%endmacro
+
+%macro STORE_IDCT 4
+ movhps [r0-4*FDEC_STRIDE], %1
+ movh [r0-3*FDEC_STRIDE], %1
+ movhps [r0-2*FDEC_STRIDE], %2
+ movh [r0-1*FDEC_STRIDE], %2
+ movhps [r0+0*FDEC_STRIDE], %3
+ movh [r0+1*FDEC_STRIDE], %3
+ movhps [r0+2*FDEC_STRIDE], %4
+ movh [r0+3*FDEC_STRIDE], %4
+%endmacro
+
+%macro LOAD_DIFF_8x4P 7-10 r0,r2,0 ; 4x dest, 2x temp, 2x pointer, increment?
+ LOAD_DIFF m%1, m%5, m%7, [%8], [%9]
+ LOAD_DIFF m%2, m%6, m%7, [%8+r1], [%9+r3]
+ LOAD_DIFF m%3, m%5, m%7, [%8+2*r1], [%9+2*r3]
+ LOAD_DIFF m%4, m%6, m%7, [%8+r4], [%9+r5]
+%if %10
+ lea %8, [%8+4*r1]
+ lea %9, [%9+4*r3]
+%endif
+%endmacro
+
+%macro DIFFx2 6-7
+ movh %3, %5
+ punpcklbw %3, %4
+ psraw %1, 6
+ paddsw %1, %3
+ movh %3, %6
+ punpcklbw %3, %4
+ psraw %2, 6
+ paddsw %2, %3
+ packuswb %2, %1
+%endmacro
+
+%macro STORE_DIFF 4
+ movh %2, %4
+ punpcklbw %2, %3
+ psraw %1, 6
+ paddsw %1, %2
+ packuswb %1, %1
+ movh %4, %1
+%endmacro
+
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/xiph.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/xiph.c
new file mode 100644
index 000000000..2f4f25c92
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/xiph.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2007 FFmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "xiph.h"
+
+int ff_split_xiph_headers(uint8_t *extradata, int extradata_size,
+ int first_header_size, uint8_t *header_start[3],
+ int header_len[3])
+{
+ int i;
+
+ if (extradata_size >= 6 && AV_RB16(extradata) == first_header_size) {
+ int overall_len = 6;
+ for (i=0; i<3; i++) {
+ header_len[i] = AV_RB16(extradata);
+ extradata += 2;
+ header_start[i] = extradata;
+ extradata += header_len[i];
+ if (overall_len > extradata_size - header_len[i])
+ return -1;
+ overall_len += header_len[i];
+ }
+ } else if (extradata_size >= 3 && extradata_size < INT_MAX - 0x1ff && extradata[0] == 2) {
+ int overall_len = 3;
+ extradata++;
+ for (i=0; i<2; i++, extradata++) {
+ header_len[i] = 0;
+ for (; overall_len < extradata_size && *extradata==0xff; extradata++) {
+ header_len[i] += 0xff;
+ overall_len += 0xff + 1;
+ }
+ header_len[i] += *extradata;
+ overall_len += *extradata;
+ if (overall_len > extradata_size)
+ return -1;
+ }
+ header_len[2] = extradata_size - overall_len;
+ header_start[0] = extradata;
+ header_start[1] = header_start[0] + header_len[0];
+ header_start[2] = header_start[1] + header_len[1];
+ } else {
+ return -1;
+ }
+ return 0;
+}
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/xiph.h b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/xiph.h
new file mode 100644
index 000000000..60f4a95d1
--- /dev/null
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/xiph.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2007 FFmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_XIPH_H
+#define AVCODEC_XIPH_H
+
+#include "libavutil/common.h"
+
+/**
+ * Splits a single extradata buffer into the three headers that most
+ * Xiph codecs use. (e.g. Theora and Vorbis)
+ * Works both with Matroska's packing and lavc's packing.
+ *
+ * @param[in] extradata The single chunk that combines all three headers
+ * @param[in] extradata_size The size of the extradata buffer
+ * @param[in] first_header_size The size of the first header, used to
+ * differentiate between the Matroska packing and lavc packing.
+ * @param[out] header_start Pointers to the start of the three separate headers.
+ * @param[out] header_len The sizes of each of the three headers.
+ * @return On error a negative value is returned, on success zero.
+ */
+int ff_split_xiph_headers(uint8_t *extradata, int extradata_size,
+ int first_header_size, uint8_t *header_start[3],
+ int header_len[3]);
+
+#endif /* AVCODEC_XIPH_H */