Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mpc-hc/mpc-hc.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortetsuo55 <tetsuo55@users.sourceforge.net>2010-04-09 01:14:58 +0400
committertetsuo55 <tetsuo55@users.sourceforge.net>2010-04-09 01:14:58 +0400
commita9b7bf3fb3e1334d8defd05ca4cfae870b4912e5 (patch)
tree2dab453d94d5e003379a6cc895eceb84c80e23ec /src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.c
parentaafd49a91f7c2fa9c7103971c16fa6e1b29e8bfd (diff)
astyle formatting cleanup to make the sourcecode more accessible
switch used: astyle --style=ansi --min-conditional-indent=0 --pad=oper --unpad=paren http://astyle.sourceforge.net/ git-svn-id: https://mpc-hc.svn.sourceforge.net/svnroot/mpc-hc/trunk@1783 10f7b99b-c216-0410-bff0-8a66a9350fd8
Diffstat (limited to 'src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.c')
-rw-r--r--src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.c268
1 files changed, 163 insertions, 105 deletions
diff --git a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.c b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.c
index a07c654e1..06a7ddd28 100644
--- a/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.c
+++ b/src/filters/transform/MPCVideoDec/ffmpeg/libavcodec/h263.c
@@ -46,35 +46,43 @@
uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
-void ff_h263_update_motion_val(MpegEncContext * s){
+void ff_h263_update_motion_val(MpegEncContext * s)
+{
const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
- //FIXME a lot of that is only needed for !low_delay
+ //FIXME a lot of that is only needed for !low_delay
const int wrap = s->b8_stride;
const int xy = s->block_index[0];
- s->current_picture.mbskip_table[mb_xy]= s->mb_skipped;
+ s->current_picture.mbskip_table[mb_xy] = s->mb_skipped;
- if(s->mv_type != MV_TYPE_8X8){
+ if(s->mv_type != MV_TYPE_8X8)
+ {
int motion_x, motion_y;
- if (s->mb_intra) {
+ if(s->mb_intra)
+ {
motion_x = 0;
motion_y = 0;
- } else if (s->mv_type == MV_TYPE_16X16) {
+ }
+ else if(s->mv_type == MV_TYPE_16X16)
+ {
motion_x = s->mv[0][0][0];
motion_y = s->mv[0][0][1];
- } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
+ }
+ else /*if (s->mv_type == MV_TYPE_FIELD)*/
+ {
int i;
motion_x = s->mv[0][0][0] + s->mv[0][1][0];
motion_y = s->mv[0][0][1] + s->mv[0][1][1];
- motion_x = (motion_x>>1) | (motion_x&1);
- for(i=0; i<2; i++){
- s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
- s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
+ motion_x = (motion_x >> 1) | (motion_x & 1);
+ for(i = 0; i < 2; i++)
+ {
+ s->p_field_mv_table[i][0][mb_xy][0] = s->mv[0][i][0];
+ s->p_field_mv_table[i][0][mb_xy][1] = s->mv[0][i][1];
}
- s->current_picture.ref_index[0][xy ]=
- s->current_picture.ref_index[0][xy + 1]= s->field_select[0][0];
- s->current_picture.ref_index[0][xy + wrap ]=
- s->current_picture.ref_index[0][xy + wrap + 1]= s->field_select[0][1];
+ s->current_picture.ref_index[0][xy ] =
+ s->current_picture.ref_index[0][xy + 1] = s->field_select[0][0];
+ s->current_picture.ref_index[0][xy + wrap ] =
+ s->current_picture.ref_index[0][xy + wrap + 1] = s->field_select[0][1];
}
/* no update if 8X8 because it has been done during parsing */
@@ -88,13 +96,14 @@ void ff_h263_update_motion_val(MpegEncContext * s){
s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
}
- if(s->encoding){ //FIXME encoding MUST be cleaned up
- if (s->mv_type == MV_TYPE_8X8)
- s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
+ if(s->encoding) //FIXME encoding MUST be cleaned up
+ {
+ if(s->mv_type == MV_TYPE_8X8)
+ s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
else if(s->mb_intra)
- s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA;
+ s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA;
else
- s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
+ s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
}
}
@@ -104,12 +113,15 @@ int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
int16_t *dc_val;
/* find prediction */
- if (n < 4) {
+ if(n < 4)
+ {
x = 2 * s->mb_x + (n & 1);
y = 2 * s->mb_y + ((n & 2) >> 1);
wrap = s->b8_stride;
dc_val = s->dc_val[0];
- } else {
+ }
+ else
+ {
x = s->mb_x;
y = s->mb_y;
wrap = s->mb_stride;
@@ -122,14 +134,15 @@ int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
c = dc_val[(x) + (y - 1) * wrap];
/* No prediction outside GOB boundary */
- if(s->first_slice_line && n!=3){
- if(n!=2) c= 1024;
- if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
+ if(s->first_slice_line && n != 3)
+ {
+ if(n != 2) c = 1024;
+ if(n != 1 && s->mb_x == s->resync_mb_x) a = 1024;
}
/* just DC prediction */
- if (a != 1024 && c != 1024)
+ if(a != 1024 && c != 1024)
pred_dc = (a + c) >> 1;
- else if (a != 1024)
+ else if(a != 1024)
pred_dc = a;
else
pred_dc = c;
@@ -139,14 +152,15 @@ int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
return pred_dc;
}
-void ff_h263_loop_filter(MpegEncContext * s){
+void ff_h263_loop_filter(MpegEncContext * s)
+{
int qp_c;
const int linesize = s->linesize;
- const int uvlinesize= s->uvlinesize;
+ const int uvlinesize = s->uvlinesize;
const int xy = s->mb_y * s->mb_stride + s->mb_x;
uint8_t *dest_y = s->dest[0];
- uint8_t *dest_cb= s->dest[1];
- uint8_t *dest_cr= s->dest[2];
+ uint8_t *dest_cb = s->dest[1];
+ uint8_t *dest_cr = s->dest[2];
// if(s->pict_type==FF_B_TYPE && !s->readable) return;
@@ -154,71 +168,81 @@ void ff_h263_loop_filter(MpegEncContext * s){
Diag Top
Left Center
*/
- if(!IS_SKIP(s->current_picture.mb_type[xy])){
- qp_c= s->qscale;
- s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
- s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
- }else
- qp_c= 0;
-
- if(s->mb_y){
+ if(!IS_SKIP(s->current_picture.mb_type[xy]))
+ {
+ qp_c = s->qscale;
+ s->dsp.h263_v_loop_filter(dest_y + 8 * linesize , linesize, qp_c);
+ s->dsp.h263_v_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
+ }
+ else
+ qp_c = 0;
+
+ if(s->mb_y)
+ {
int qp_dt, qp_tt, qp_tc;
if(IS_SKIP(s->current_picture.mb_type[xy-s->mb_stride]))
- qp_tt=0;
+ qp_tt = 0;
else
- qp_tt= s->current_picture.qscale_table[xy-s->mb_stride];
+ qp_tt = s->current_picture.qscale_table[xy-s->mb_stride];
if(qp_c)
- qp_tc= qp_c;
+ qp_tc = qp_c;
else
- qp_tc= qp_tt;
+ qp_tc = qp_tt;
- if(qp_tc){
- const int chroma_qp= s->chroma_qscale_table[qp_tc];
+ if(qp_tc)
+ {
+ const int chroma_qp = s->chroma_qscale_table[qp_tc];
s->dsp.h263_v_loop_filter(dest_y , linesize, qp_tc);
- s->dsp.h263_v_loop_filter(dest_y+8, linesize, qp_tc);
+ s->dsp.h263_v_loop_filter(dest_y + 8, linesize, qp_tc);
s->dsp.h263_v_loop_filter(dest_cb , uvlinesize, chroma_qp);
s->dsp.h263_v_loop_filter(dest_cr , uvlinesize, chroma_qp);
}
if(qp_tt)
- s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
+ s->dsp.h263_h_loop_filter(dest_y - 8 * linesize + 8 , linesize, qp_tt);
- if(s->mb_x){
+ if(s->mb_x)
+ {
if(qp_tt || IS_SKIP(s->current_picture.mb_type[xy-1-s->mb_stride]))
- qp_dt= qp_tt;
+ qp_dt = qp_tt;
else
- qp_dt= s->current_picture.qscale_table[xy-1-s->mb_stride];
-
- if(qp_dt){
- const int chroma_qp= s->chroma_qscale_table[qp_dt];
- s->dsp.h263_h_loop_filter(dest_y -8*linesize , linesize, qp_dt);
- s->dsp.h263_h_loop_filter(dest_cb-8*uvlinesize, uvlinesize, chroma_qp);
- s->dsp.h263_h_loop_filter(dest_cr-8*uvlinesize, uvlinesize, chroma_qp);
+ qp_dt = s->current_picture.qscale_table[xy-1-s->mb_stride];
+
+ if(qp_dt)
+ {
+ const int chroma_qp = s->chroma_qscale_table[qp_dt];
+ s->dsp.h263_h_loop_filter(dest_y - 8 * linesize , linesize, qp_dt);
+ s->dsp.h263_h_loop_filter(dest_cb - 8 * uvlinesize, uvlinesize, chroma_qp);
+ s->dsp.h263_h_loop_filter(dest_cr - 8 * uvlinesize, uvlinesize, chroma_qp);
}
}
}
- if(qp_c){
- s->dsp.h263_h_loop_filter(dest_y +8, linesize, qp_c);
+ if(qp_c)
+ {
+ s->dsp.h263_h_loop_filter(dest_y + 8, linesize, qp_c);
if(s->mb_y + 1 == s->mb_height)
- s->dsp.h263_h_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
+ s->dsp.h263_h_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
}
- if(s->mb_x){
+ if(s->mb_x)
+ {
int qp_lc;
if(qp_c || IS_SKIP(s->current_picture.mb_type[xy-1]))
- qp_lc= qp_c;
+ qp_lc = qp_c;
else
- qp_lc= s->current_picture.qscale_table[xy-1];
+ qp_lc = s->current_picture.qscale_table[xy-1];
- if(qp_lc){
+ if(qp_lc)
+ {
s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
- if(s->mb_y + 1 == s->mb_height){
- const int chroma_qp= s->chroma_qscale_table[qp_lc];
- s->dsp.h263_h_loop_filter(dest_y +8* linesize, linesize, qp_lc);
+ if(s->mb_y + 1 == s->mb_height)
+ {
+ const int chroma_qp = s->chroma_qscale_table[qp_lc];
+ s->dsp.h263_h_loop_filter(dest_y + 8 * linesize, linesize, qp_lc);
s->dsp.h263_h_loop_filter(dest_cb , uvlinesize, chroma_qp);
s->dsp.h263_h_loop_filter(dest_cr , uvlinesize, chroma_qp);
}
@@ -232,14 +256,17 @@ void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
int16_t *dc_val, *ac_val, *ac_val1;
/* find prediction */
- if (n < 4) {
+ if(n < 4)
+ {
x = 2 * s->mb_x + (n & 1);
- y = 2 * s->mb_y + (n>> 1);
+ y = 2 * s->mb_y + (n >> 1);
wrap = s->b8_stride;
dc_val = s->dc_val[0];
ac_val = s->ac_val[0][0];
scale = s->y_dc_scale;
- } else {
+ }
+ else
+ {
x = s->mb_x;
y = s->mb_y;
wrap = s->mb_stride;
@@ -258,46 +285,57 @@ void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
c = dc_val[(x) + (y - 1) * wrap];
/* No prediction outside GOB boundary */
- if(s->first_slice_line && n!=3){
- if(n!=2) c= 1024;
- if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
+ if(s->first_slice_line && n != 3)
+ {
+ if(n != 2) c = 1024;
+ if(n != 1 && s->mb_x == s->resync_mb_x) a = 1024;
}
- if (s->ac_pred) {
+ if(s->ac_pred)
+ {
pred_dc = 1024;
- if (s->h263_aic_dir) {
+ if(s->h263_aic_dir)
+ {
/* left prediction */
- if (a != 1024) {
+ if(a != 1024)
+ {
ac_val -= 16;
- for(i=1;i<8;i++) {
+ for(i = 1; i < 8; i++)
+ {
block[s->dsp.idct_permutation[i<<3]] += ac_val[i];
}
pred_dc = a;
}
- } else {
+ }
+ else
+ {
/* top prediction */
- if (c != 1024) {
+ if(c != 1024)
+ {
ac_val -= 16 * wrap;
- for(i=1;i<8;i++) {
+ for(i = 1; i < 8; i++)
+ {
block[s->dsp.idct_permutation[i ]] += ac_val[i + 8];
}
pred_dc = c;
}
}
- } else {
+ }
+ else
+ {
/* just DC prediction */
- if (a != 1024 && c != 1024)
+ if(a != 1024 && c != 1024)
pred_dc = (a + c) >> 1;
- else if (a != 1024)
+ else if(a != 1024)
pred_dc = a;
else
pred_dc = c;
}
/* we assume pred is positive */
- block[0]=block[0]*scale + pred_dc;
+ block[0] = block[0] * scale + pred_dc;
- if (block[0] < 0)
+ if(block[0] < 0)
block[0] = 0;
else
block[0] |= 1;
@@ -306,63 +344,82 @@ void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
dc_val[(x) + (y) * wrap] = block[0];
/* left copy */
- for(i=1;i<8;i++)
+ for(i = 1; i < 8; i++)
ac_val1[i ] = block[s->dsp.idct_permutation[i<<3]];
/* top copy */
- for(i=1;i<8;i++)
+ for(i = 1; i < 8; i++)
ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]];
}
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
- int *px, int *py)
+ int *px, int *py)
{
int wrap;
int16_t *A, *B, *C, (*mot_val)[2];
- static const int off[4]= {2, 1, 1, -1};
+ static const int off[4] = {2, 1, 1, -1};
wrap = s->b8_stride;
mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
A = mot_val[ - 1];
/* special case for first (slice) line */
- if (s->first_slice_line && block<3) {
+ if(s->first_slice_line && block < 3)
+ {
// we can't just change some MVs to simulate that as we need them for the B frames (and ME)
// and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
- if(block==0){ //most common case
- if(s->mb_x == s->resync_mb_x){ //rare
- *px= *py = 0;
- }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
+ if(block == 0) //most common case
+ {
+ if(s->mb_x == s->resync_mb_x) //rare
+ {
+ *px = *py = 0;
+ }
+ else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred) //rare
+ {
C = mot_val[off[block] - wrap];
- if(s->mb_x==0){
+ if(s->mb_x == 0)
+ {
*px = C[0];
*py = C[1];
- }else{
+ }
+ else
+ {
*px = mid_pred(A[0], 0, C[0]);
*py = mid_pred(A[1], 0, C[1]);
}
- }else{
+ }
+ else
+ {
*px = A[0];
*py = A[1];
}
- }else if(block==1){
- if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
+ }
+ else if(block == 1)
+ {
+ if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred) //rare
+ {
C = mot_val[off[block] - wrap];
*px = mid_pred(A[0], 0, C[0]);
*py = mid_pred(A[1], 0, C[1]);
- }else{
+ }
+ else
+ {
*px = A[0];
*py = A[1];
}
- }else{ /* block==2*/
+ }
+ else /* block==2*/
+ {
B = mot_val[ - wrap];
C = mot_val[off[block] - wrap];
if(s->mb_x == s->resync_mb_x) //rare
- A[0]=A[1]=0;
+ A[0] = A[1] = 0;
*px = mid_pred(A[0], B[0], C[0]);
*py = mid_pred(A[1], B[1], C[1]);
}
- } else {
+ }
+ else
+ {
B = mot_val[ - wrap];
C = mot_val[off[block] - wrap];
*px = mid_pred(A[0], B[0], C[0]);
@@ -375,10 +432,11 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
/**
* Get the GOB height based on picture height.
*/
-int ff_h263_get_gob_height(MpegEncContext *s){
- if (s->height <= 400)
+int ff_h263_get_gob_height(MpegEncContext *s)
+{
+ if(s->height <= 400)
return 1;
- else if (s->height <= 800)
+ else if(s->height <= 800)
return 2;
else
return 4;